repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
marvin-hansen/iggy-streaming-system
21,551
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/mac-x86/crypto/fipsmodule/co-586.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _bn_mul_comba8 .private_extern _bn_mul_comba8 .align 4 _bn_mul_comba8: L_bn_mul_comba8_begin: pushl %esi movl 12(%esp),%esi pushl %edi movl 20(%esp),%edi pushl %ebp pushl %ebx xorl %ebx,%ebx movl (%esi),%eax xorl %ecx,%ecx movl (%edi),%edx # ################## Calculate word 0 xorl %ebp,%ebp # mul a[0]*b[0] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,(%eax) movl 4(%esi),%eax # saved r[0] # ################## Calculate word 1 xorl %ebx,%ebx # mul a[1]*b[0] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx # mul a[0]*b[1] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,4(%eax) movl 8(%esi),%eax # saved r[1] # ################## Calculate word 2 xorl %ecx,%ecx # mul a[2]*b[0] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx # mul a[1]*b[1] mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx # mul a[0]*b[2] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,8(%eax) movl 12(%esi),%eax # saved r[2] # ################## Calculate word 3 xorl %ebp,%ebp # mul a[3]*b[0] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp # mul a[2]*b[1] mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp # mul a[1]*b[2] mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp # mul a[0]*b[3] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,12(%eax) movl 16(%esi),%eax # saved r[3] # ################## Calculate word 4 xorl %ebx,%ebx # mul a[4]*b[0] mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx # mul a[3]*b[1] mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx # mul a[2]*b[2] mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx # mul a[1]*b[3] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx # mul a[0]*b[4] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,16(%eax) movl 20(%esi),%eax # saved r[4] # ################## Calculate word 5 xorl %ecx,%ecx # mul a[5]*b[0] mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx # mul a[4]*b[1] mull %edx addl %eax,%ebp movl 12(%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx # mul a[3]*b[2] mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx # mul a[2]*b[3] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 16(%edi),%edx adcl $0,%ecx # mul a[1]*b[4] mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx # mul a[0]*b[5] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,20(%eax) movl 24(%esi),%eax # saved r[5] # ################## Calculate word 6 xorl %ebp,%ebp # mul a[6]*b[0] mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp # mul a[5]*b[1] mull %edx addl %eax,%ebx movl 16(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp # mul a[4]*b[2] mull %edx addl %eax,%ebx movl 12(%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp # mul a[3]*b[3] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 16(%edi),%edx adcl $0,%ebp # mul a[2]*b[4] mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 20(%edi),%edx adcl $0,%ebp # mul a[1]*b[5] mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp # mul a[0]*b[6] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,24(%eax) movl 28(%esi),%eax # saved r[6] # ################## Calculate word 7 xorl %ebx,%ebx # mul a[7]*b[0] mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx # mul a[6]*b[1] mull %edx addl %eax,%ecx movl 20(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx # mul a[5]*b[2] mull %edx addl %eax,%ecx movl 16(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx # mul a[4]*b[3] mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx # mul a[3]*b[4] mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 20(%edi),%edx adcl $0,%ebx # mul a[2]*b[5] mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 24(%edi),%edx adcl $0,%ebx # mul a[1]*b[6] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx # mul a[0]*b[7] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx movl %ecx,28(%eax) movl 28(%esi),%eax # saved r[7] # ################## Calculate word 8 xorl %ecx,%ecx # mul a[7]*b[1] mull %edx addl %eax,%ebp movl 24(%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx # mul a[6]*b[2] mull %edx addl %eax,%ebp movl 20(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx # mul a[5]*b[3] mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 16(%edi),%edx adcl $0,%ecx # mul a[4]*b[4] mull %edx addl %eax,%ebp movl 12(%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx # mul a[3]*b[5] mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 24(%edi),%edx adcl $0,%ecx # mul a[2]*b[6] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 28(%edi),%edx adcl $0,%ecx # mul a[1]*b[7] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx movl %ebp,32(%eax) movl 28(%esi),%eax # saved r[8] # ################## Calculate word 9 xorl %ebp,%ebp # mul a[7]*b[2] mull %edx addl %eax,%ebx movl 24(%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp # mul a[6]*b[3] mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 16(%edi),%edx adcl $0,%ebp # mul a[5]*b[4] mull %edx addl %eax,%ebx movl 16(%esi),%eax adcl %edx,%ecx movl 20(%edi),%edx adcl $0,%ebp # mul a[4]*b[5] mull %edx addl %eax,%ebx movl 12(%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp # mul a[3]*b[6] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 28(%edi),%edx adcl $0,%ebp # mul a[2]*b[7] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp movl %ebx,36(%eax) movl 28(%esi),%eax # saved r[9] # ################## Calculate word 10 xorl %ebx,%ebx # mul a[7]*b[3] mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx # mul a[6]*b[4] mull %edx addl %eax,%ecx movl 20(%esi),%eax adcl %edx,%ebp movl 20(%edi),%edx adcl $0,%ebx # mul a[5]*b[5] mull %edx addl %eax,%ecx movl 16(%esi),%eax adcl %edx,%ebp movl 24(%edi),%edx adcl $0,%ebx # mul a[4]*b[6] mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx # mul a[3]*b[7] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx movl %ecx,40(%eax) movl 28(%esi),%eax # saved r[10] # ################## Calculate word 11 xorl %ecx,%ecx # mul a[7]*b[4] mull %edx addl %eax,%ebp movl 24(%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx # mul a[6]*b[5] mull %edx addl %eax,%ebp movl 20(%esi),%eax adcl %edx,%ebx movl 24(%edi),%edx adcl $0,%ecx # mul a[5]*b[6] mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 28(%edi),%edx adcl $0,%ecx # mul a[4]*b[7] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx movl %ebp,44(%eax) movl 28(%esi),%eax # saved r[11] # ################## Calculate word 12 xorl %ebp,%ebp # mul a[7]*b[5] mull %edx addl %eax,%ebx movl 24(%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp # mul a[6]*b[6] mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 28(%edi),%edx adcl $0,%ebp # mul a[5]*b[7] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp movl %ebx,48(%eax) movl 28(%esi),%eax # saved r[12] # ################## Calculate word 13 xorl %ebx,%ebx # mul a[7]*b[6] mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx # mul a[6]*b[7] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx movl %ecx,52(%eax) movl 28(%esi),%eax # saved r[13] # ################## Calculate word 14 xorl %ecx,%ecx # mul a[7]*b[7] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx adcl $0,%ecx movl %ebp,56(%eax) # saved r[14] # save r[15] movl %ebx,60(%eax) popl %ebx popl %ebp popl %edi popl %esi ret .globl _bn_mul_comba4 .private_extern _bn_mul_comba4 .align 4 _bn_mul_comba4: L_bn_mul_comba4_begin: pushl %esi movl 12(%esp),%esi pushl %edi movl 20(%esp),%edi pushl %ebp pushl %ebx xorl %ebx,%ebx movl (%esi),%eax xorl %ecx,%ecx movl (%edi),%edx # ################## Calculate word 0 xorl %ebp,%ebp # mul a[0]*b[0] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,(%eax) movl 4(%esi),%eax # saved r[0] # ################## Calculate word 1 xorl %ebx,%ebx # mul a[1]*b[0] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx # mul a[0]*b[1] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,4(%eax) movl 8(%esi),%eax # saved r[1] # ################## Calculate word 2 xorl %ecx,%ecx # mul a[2]*b[0] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx # mul a[1]*b[1] mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx # mul a[0]*b[2] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,8(%eax) movl 12(%esi),%eax # saved r[2] # ################## Calculate word 3 xorl %ebp,%ebp # mul a[3]*b[0] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp # mul a[2]*b[1] mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp # mul a[1]*b[2] mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp # mul a[0]*b[3] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp movl %ebx,12(%eax) movl 12(%esi),%eax # saved r[3] # ################## Calculate word 4 xorl %ebx,%ebx # mul a[3]*b[1] mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx # mul a[2]*b[2] mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx # mul a[1]*b[3] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx movl %ecx,16(%eax) movl 12(%esi),%eax # saved r[4] # ################## Calculate word 5 xorl %ecx,%ecx # mul a[3]*b[2] mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx # mul a[2]*b[3] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx movl %ebp,20(%eax) movl 12(%esi),%eax # saved r[5] # ################## Calculate word 6 xorl %ebp,%ebp # mul a[3]*b[3] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx adcl $0,%ebp movl %ebx,24(%eax) # saved r[6] # save r[7] movl %ecx,28(%eax) popl %ebx popl %ebp popl %edi popl %esi ret .globl _bn_sqr_comba8 .private_extern _bn_sqr_comba8 .align 4 _bn_sqr_comba8: L_bn_sqr_comba8_begin: pushl %esi pushl %edi pushl %ebp pushl %ebx movl 20(%esp),%edi movl 24(%esp),%esi xorl %ebx,%ebx xorl %ecx,%ecx movl (%esi),%eax # ############### Calculate word 0 xorl %ebp,%ebp # sqr a[0]*a[0] mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,(%edi) movl 4(%esi),%eax # saved r[0] # ############### Calculate word 1 xorl %ebx,%ebx # sqr a[1]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx movl %ecx,4(%edi) movl (%esi),%edx # saved r[1] # ############### Calculate word 2 xorl %ecx,%ecx # sqr a[2]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 4(%esi),%eax adcl $0,%ecx # sqr a[1]*a[1] mull %eax addl %eax,%ebp adcl %edx,%ebx movl (%esi),%edx adcl $0,%ecx movl %ebp,8(%edi) movl 12(%esi),%eax # saved r[2] # ############### Calculate word 3 xorl %ebp,%ebp # sqr a[3]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 8(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx # sqr a[2]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 16(%esi),%eax adcl $0,%ebp movl %ebx,12(%edi) movl (%esi),%edx # saved r[3] # ############### Calculate word 4 xorl %ebx,%ebx # sqr a[4]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 12(%esi),%eax adcl $0,%ebx movl 4(%esi),%edx # sqr a[3]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx # sqr a[2]*a[2] mull %eax addl %eax,%ecx adcl %edx,%ebp movl (%esi),%edx adcl $0,%ebx movl %ecx,16(%edi) movl 20(%esi),%eax # saved r[4] # ############### Calculate word 5 xorl %ecx,%ecx # sqr a[5]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 16(%esi),%eax adcl $0,%ecx movl 4(%esi),%edx # sqr a[4]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 12(%esi),%eax adcl $0,%ecx movl 8(%esi),%edx # sqr a[3]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl %ebp,20(%edi) movl (%esi),%edx # saved r[5] # ############### Calculate word 6 xorl %ebp,%ebp # sqr a[6]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 20(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx # sqr a[5]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 16(%esi),%eax adcl $0,%ebp movl 8(%esi),%edx # sqr a[4]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 12(%esi),%eax adcl $0,%ebp # sqr a[3]*a[3] mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,24(%edi) movl 28(%esi),%eax # saved r[6] # ############### Calculate word 7 xorl %ebx,%ebx # sqr a[7]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 24(%esi),%eax adcl $0,%ebx movl 4(%esi),%edx # sqr a[6]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 20(%esi),%eax adcl $0,%ebx movl 8(%esi),%edx # sqr a[5]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 16(%esi),%eax adcl $0,%ebx movl 12(%esi),%edx # sqr a[4]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 28(%esi),%eax adcl $0,%ebx movl %ecx,28(%edi) movl 4(%esi),%edx # saved r[7] # ############### Calculate word 8 xorl %ecx,%ecx # sqr a[7]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl 8(%esi),%edx # sqr a[6]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 20(%esi),%eax adcl $0,%ecx movl 12(%esi),%edx # sqr a[5]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 16(%esi),%eax adcl $0,%ecx # sqr a[4]*a[4] mull %eax addl %eax,%ebp adcl %edx,%ebx movl 8(%esi),%edx adcl $0,%ecx movl %ebp,32(%edi) movl 28(%esi),%eax # saved r[8] # ############### Calculate word 9 xorl %ebp,%ebp # sqr a[7]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%eax adcl $0,%ebp movl 12(%esi),%edx # sqr a[6]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 20(%esi),%eax adcl $0,%ebp movl 16(%esi),%edx # sqr a[5]*a[4] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 28(%esi),%eax adcl $0,%ebp movl %ebx,36(%edi) movl 12(%esi),%edx # saved r[9] # ############### Calculate word 10 xorl %ebx,%ebx # sqr a[7]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 24(%esi),%eax adcl $0,%ebx movl 16(%esi),%edx # sqr a[6]*a[4] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 20(%esi),%eax adcl $0,%ebx # sqr a[5]*a[5] mull %eax addl %eax,%ecx adcl %edx,%ebp movl 16(%esi),%edx adcl $0,%ebx movl %ecx,40(%edi) movl 28(%esi),%eax # saved r[10] # ############### Calculate word 11 xorl %ecx,%ecx # sqr a[7]*a[4] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl 20(%esi),%edx # sqr a[6]*a[5] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 28(%esi),%eax adcl $0,%ecx movl %ebp,44(%edi) movl 20(%esi),%edx # saved r[11] # ############### Calculate word 12 xorl %ebp,%ebp # sqr a[7]*a[5] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%eax adcl $0,%ebp # sqr a[6]*a[6] mull %eax addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%edx adcl $0,%ebp movl %ebx,48(%edi) movl 28(%esi),%eax # saved r[12] # ############### Calculate word 13 xorl %ebx,%ebx # sqr a[7]*a[6] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 28(%esi),%eax adcl $0,%ebx movl %ecx,52(%edi) # saved r[13] # ############### Calculate word 14 xorl %ecx,%ecx # sqr a[7]*a[7] mull %eax addl %eax,%ebp adcl %edx,%ebx adcl $0,%ecx movl %ebp,56(%edi) # saved r[14] movl %ebx,60(%edi) popl %ebx popl %ebp popl %edi popl %esi ret .globl _bn_sqr_comba4 .private_extern _bn_sqr_comba4 .align 4 _bn_sqr_comba4: L_bn_sqr_comba4_begin: pushl %esi pushl %edi pushl %ebp pushl %ebx movl 20(%esp),%edi movl 24(%esp),%esi xorl %ebx,%ebx xorl %ecx,%ecx movl (%esi),%eax # ############### Calculate word 0 xorl %ebp,%ebp # sqr a[0]*a[0] mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,(%edi) movl 4(%esi),%eax # saved r[0] # ############### Calculate word 1 xorl %ebx,%ebx # sqr a[1]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx movl %ecx,4(%edi) movl (%esi),%edx # saved r[1] # ############### Calculate word 2 xorl %ecx,%ecx # sqr a[2]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 4(%esi),%eax adcl $0,%ecx # sqr a[1]*a[1] mull %eax addl %eax,%ebp adcl %edx,%ebx movl (%esi),%edx adcl $0,%ecx movl %ebp,8(%edi) movl 12(%esi),%eax # saved r[2] # ############### Calculate word 3 xorl %ebp,%ebp # sqr a[3]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 8(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx # sqr a[2]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 12(%esi),%eax adcl $0,%ebp movl %ebx,12(%edi) movl 4(%esi),%edx # saved r[3] # ############### Calculate word 4 xorl %ebx,%ebx # sqr a[3]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx # sqr a[2]*a[2] mull %eax addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%edx adcl $0,%ebx movl %ecx,16(%edi) movl 12(%esi),%eax # saved r[4] # ############### Calculate word 5 xorl %ecx,%ecx # sqr a[3]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 12(%esi),%eax adcl $0,%ecx movl %ebp,20(%edi) # saved r[5] # ############### Calculate word 6 xorl %ebp,%ebp # sqr a[3]*a[3] mull %eax addl %eax,%ebx adcl %edx,%ecx adcl $0,%ebp movl %ebx,24(%edi) # saved r[6] movl %ecx,28(%edi) popl %ebx popl %ebp popl %edi popl %esi ret #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
12,051
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/mac-x86/crypto/fipsmodule/md5-586.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _md5_block_asm_data_order .private_extern _md5_block_asm_data_order .align 4 _md5_block_asm_data_order: L_md5_block_asm_data_order_begin: pushl %esi pushl %edi movl 12(%esp),%edi movl 16(%esp),%esi movl 20(%esp),%ecx pushl %ebp shll $6,%ecx pushl %ebx addl %esi,%ecx subl $64,%ecx movl (%edi),%eax pushl %ecx movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx L000start: # R0 section movl %ecx,%edi movl (%esi),%ebp # R0 0 xorl %edx,%edi andl %ebx,%edi leal 3614090360(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 4(%esi),%ebp addl %ebx,%eax # R0 1 xorl %ecx,%edi andl %eax,%edi leal 3905402710(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 8(%esi),%ebp addl %eax,%edx # R0 2 xorl %ebx,%edi andl %edx,%edi leal 606105819(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 12(%esi),%ebp addl %edx,%ecx # R0 3 xorl %eax,%edi andl %ecx,%edi leal 3250441966(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 16(%esi),%ebp addl %ecx,%ebx # R0 4 xorl %edx,%edi andl %ebx,%edi leal 4118548399(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 20(%esi),%ebp addl %ebx,%eax # R0 5 xorl %ecx,%edi andl %eax,%edi leal 1200080426(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 24(%esi),%ebp addl %eax,%edx # R0 6 xorl %ebx,%edi andl %edx,%edi leal 2821735955(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 28(%esi),%ebp addl %edx,%ecx # R0 7 xorl %eax,%edi andl %ecx,%edi leal 4249261313(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 32(%esi),%ebp addl %ecx,%ebx # R0 8 xorl %edx,%edi andl %ebx,%edi leal 1770035416(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 36(%esi),%ebp addl %ebx,%eax # R0 9 xorl %ecx,%edi andl %eax,%edi leal 2336552879(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 40(%esi),%ebp addl %eax,%edx # R0 10 xorl %ebx,%edi andl %edx,%edi leal 4294925233(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 44(%esi),%ebp addl %edx,%ecx # R0 11 xorl %eax,%edi andl %ecx,%edi leal 2304563134(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 48(%esi),%ebp addl %ecx,%ebx # R0 12 xorl %edx,%edi andl %ebx,%edi leal 1804603682(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 52(%esi),%ebp addl %ebx,%eax # R0 13 xorl %ecx,%edi andl %eax,%edi leal 4254626195(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 56(%esi),%ebp addl %eax,%edx # R0 14 xorl %ebx,%edi andl %edx,%edi leal 2792965006(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 60(%esi),%ebp addl %edx,%ecx # R0 15 xorl %eax,%edi andl %ecx,%edi leal 1236535329(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 4(%esi),%ebp addl %ecx,%ebx # R1 section # R1 16 leal 4129170786(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 24(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax # R1 17 leal 3225465664(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 44(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx # R1 18 leal 643717713(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl (%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx # R1 19 leal 3921069994(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 20(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx # R1 20 leal 3593408605(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 40(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax # R1 21 leal 38016083(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 60(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx # R1 22 leal 3634488961(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 16(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx # R1 23 leal 3889429448(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 36(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx # R1 24 leal 568446438(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 56(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax # R1 25 leal 3275163606(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 12(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx # R1 26 leal 4107603335(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 32(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx # R1 27 leal 1163531501(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 52(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx # R1 28 leal 2850285829(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 8(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax # R1 29 leal 4243563512(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 28(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx # R1 30 leal 1735328473(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 48(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx # R1 31 leal 2368359562(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 20(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx # R2 section # R2 32 xorl %edx,%edi xorl %ebx,%edi leal 4294588738(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 32(%esi),%ebp movl %ebx,%edi # R2 33 leal 2272392833(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 44(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx # R2 34 xorl %ebx,%edi xorl %edx,%edi leal 1839030562(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 56(%esi),%ebp movl %edx,%edi # R2 35 leal 4259657740(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 4(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx # R2 36 xorl %edx,%edi xorl %ebx,%edi leal 2763975236(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 16(%esi),%ebp movl %ebx,%edi # R2 37 leal 1272893353(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 28(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx # R2 38 xorl %ebx,%edi xorl %edx,%edi leal 4139469664(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 40(%esi),%ebp movl %edx,%edi # R2 39 leal 3200236656(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 52(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx # R2 40 xorl %edx,%edi xorl %ebx,%edi leal 681279174(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl (%esi),%ebp movl %ebx,%edi # R2 41 leal 3936430074(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 12(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx # R2 42 xorl %ebx,%edi xorl %edx,%edi leal 3572445317(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 24(%esi),%ebp movl %edx,%edi # R2 43 leal 76029189(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 36(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx # R2 44 xorl %edx,%edi xorl %ebx,%edi leal 3654602809(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 48(%esi),%ebp movl %ebx,%edi # R2 45 leal 3873151461(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 60(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx # R2 46 xorl %ebx,%edi xorl %edx,%edi leal 530742520(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 8(%esi),%ebp movl %edx,%edi # R2 47 leal 3299628645(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl (%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $23,%ebx addl %ecx,%ebx # R3 section # R3 48 xorl %edx,%edi orl %ebx,%edi leal 4096336452(%eax,%ebp,1),%eax xorl %ecx,%edi movl 28(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax # R3 49 orl %eax,%edi leal 1126891415(%edx,%ebp,1),%edx xorl %ebx,%edi movl 56(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx # R3 50 orl %edx,%edi leal 2878612391(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 20(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx # R3 51 orl %ecx,%edi leal 4237533241(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 48(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx # R3 52 orl %ebx,%edi leal 1700485571(%eax,%ebp,1),%eax xorl %ecx,%edi movl 12(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax # R3 53 orl %eax,%edi leal 2399980690(%edx,%ebp,1),%edx xorl %ebx,%edi movl 40(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx # R3 54 orl %edx,%edi leal 4293915773(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 4(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx # R3 55 orl %ecx,%edi leal 2240044497(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 32(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx # R3 56 orl %ebx,%edi leal 1873313359(%eax,%ebp,1),%eax xorl %ecx,%edi movl 60(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax # R3 57 orl %eax,%edi leal 4264355552(%edx,%ebp,1),%edx xorl %ebx,%edi movl 24(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx # R3 58 orl %edx,%edi leal 2734768916(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 52(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx # R3 59 orl %ecx,%edi leal 1309151649(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 16(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx # R3 60 orl %ebx,%edi leal 4149444226(%eax,%ebp,1),%eax xorl %ecx,%edi movl 44(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax # R3 61 orl %eax,%edi leal 3174756917(%edx,%ebp,1),%edx xorl %ebx,%edi movl 8(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx # R3 62 orl %edx,%edi leal 718787259(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 36(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx # R3 63 orl %ecx,%edi leal 3951481745(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 24(%esp),%ebp addl %edi,%ebx addl $64,%esi roll $21,%ebx movl (%ebp),%edi addl %ecx,%ebx addl %edi,%eax movl 4(%ebp),%edi addl %edi,%ebx movl 8(%ebp),%edi addl %edi,%ecx movl 12(%ebp),%edi addl %edi,%edx movl %eax,(%ebp) movl %ebx,4(%ebp) movl (%esp),%edi movl %ecx,8(%ebp) movl %edx,12(%ebp) cmpl %esi,%edi jae L000start popl %eax popl %ebx popl %ebp popl %edi popl %esi ret #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
49,856
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/mac-x86/crypto/fipsmodule/aesni-x86.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text #ifdef BORINGSSL_DISPATCH_TEST #endif .globl _aes_hw_encrypt .private_extern _aes_hw_encrypt .align 4 _aes_hw_encrypt: L_aes_hw_encrypt_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call L000pic L000pic: popl %ebx leal _BORINGSSL_function_hit+1-L000pic(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 12(%esp),%edx movups (%eax),%xmm2 movl 240(%edx),%ecx movl 8(%esp),%eax movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L001enc1_loop_1: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L001enc1_loop_1 .byte 102,15,56,221,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%eax) pxor %xmm2,%xmm2 ret .globl _aes_hw_decrypt .private_extern _aes_hw_decrypt .align 4 _aes_hw_decrypt: L_aes_hw_decrypt_begin: movl 4(%esp),%eax movl 12(%esp),%edx movups (%eax),%xmm2 movl 240(%edx),%ecx movl 8(%esp),%eax movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L002dec1_loop_2: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L002dec1_loop_2 .byte 102,15,56,223,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%eax) pxor %xmm2,%xmm2 ret .private_extern __aesni_encrypt2 .align 4 __aesni_encrypt2: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx L003enc2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz L003enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 ret .private_extern __aesni_decrypt2 .align 4 __aesni_decrypt2: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx L004dec2_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 movups -16(%edx,%ecx,1),%xmm0 jnz L004dec2_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,223,208 .byte 102,15,56,223,216 ret .private_extern __aesni_encrypt3 .align 4 __aesni_encrypt3: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx L005enc3_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 movups -16(%edx,%ecx,1),%xmm0 jnz L005enc3_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 ret .private_extern __aesni_decrypt3 .align 4 __aesni_decrypt3: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx L006dec3_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 movups -16(%edx,%ecx,1),%xmm0 jnz L006dec3_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 ret .private_extern __aesni_encrypt4 .align 4 __aesni_encrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 shll $4,%ecx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 15,31,64,0 addl $16,%ecx L007enc4_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movups -16(%edx,%ecx,1),%xmm0 jnz L007enc4_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 ret .private_extern __aesni_decrypt4 .align 4 __aesni_decrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 shll $4,%ecx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 15,31,64,0 addl $16,%ecx L008dec4_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 movups -16(%edx,%ecx,1),%xmm0 jnz L008dec4_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 ret .private_extern __aesni_encrypt6 .align 4 __aesni_encrypt6: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,220,209 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,220,217 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 102,15,56,220,225 pxor %xmm0,%xmm7 movups (%edx,%ecx,1),%xmm0 addl $16,%ecx jmp L009_aesni_encrypt6_inner .align 4,0x90 L010enc6_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 L009_aesni_encrypt6_inner: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 L_aesni_encrypt6_enter: movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 movups -16(%edx,%ecx,1),%xmm0 jnz L010enc6_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 ret .private_extern __aesni_decrypt6 .align 4 __aesni_decrypt6: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,222,209 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,222,217 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 102,15,56,222,225 pxor %xmm0,%xmm7 movups (%edx,%ecx,1),%xmm0 addl $16,%ecx jmp L011_aesni_decrypt6_inner .align 4,0x90 L012dec6_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 L011_aesni_decrypt6_inner: .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 L_aesni_decrypt6_enter: movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 movups -16(%edx,%ecx,1),%xmm0 jnz L012dec6_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 .byte 102,15,56,223,240 .byte 102,15,56,223,248 ret .globl _aes_hw_ecb_encrypt .private_extern _aes_hw_ecb_encrypt .align 4 _aes_hw_ecb_encrypt: L_aes_hw_ecb_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx andl $-16,%eax jz L013ecb_ret movl 240(%edx),%ecx testl %ebx,%ebx jz L014ecb_decrypt movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax jb L015ecb_enc_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax jmp L016ecb_enc_loop6_enter .align 4,0x90 L017ecb_enc_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) movdqu 16(%esi),%xmm3 movups %xmm4,32(%edi) movdqu 32(%esi),%xmm4 movups %xmm5,48(%edi) movdqu 48(%esi),%xmm5 movups %xmm6,64(%edi) movdqu 64(%esi),%xmm6 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi L016ecb_enc_loop6_enter: call __aesni_encrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax jnc L017ecb_enc_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax jz L013ecb_ret L015ecb_enc_tail: movups (%esi),%xmm2 cmpl $32,%eax jb L018ecb_enc_one movups 16(%esi),%xmm3 je L019ecb_enc_two movups 32(%esi),%xmm4 cmpl $64,%eax jb L020ecb_enc_three movups 48(%esi),%xmm5 je L021ecb_enc_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call __aesni_encrypt6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp L013ecb_ret .align 4,0x90 L018ecb_enc_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L022enc1_loop_3: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L022enc1_loop_3 .byte 102,15,56,221,209 movups %xmm2,(%edi) jmp L013ecb_ret .align 4,0x90 L019ecb_enc_two: call __aesni_encrypt2 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp L013ecb_ret .align 4,0x90 L020ecb_enc_three: call __aesni_encrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp L013ecb_ret .align 4,0x90 L021ecb_enc_four: call __aesni_encrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) jmp L013ecb_ret .align 4,0x90 L014ecb_decrypt: movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax jb L023ecb_dec_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax jmp L024ecb_dec_loop6_enter .align 4,0x90 L025ecb_dec_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) movdqu 16(%esi),%xmm3 movups %xmm4,32(%edi) movdqu 32(%esi),%xmm4 movups %xmm5,48(%edi) movdqu 48(%esi),%xmm5 movups %xmm6,64(%edi) movdqu 64(%esi),%xmm6 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi L024ecb_dec_loop6_enter: call __aesni_decrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax jnc L025ecb_dec_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax jz L013ecb_ret L023ecb_dec_tail: movups (%esi),%xmm2 cmpl $32,%eax jb L026ecb_dec_one movups 16(%esi),%xmm3 je L027ecb_dec_two movups 32(%esi),%xmm4 cmpl $64,%eax jb L028ecb_dec_three movups 48(%esi),%xmm5 je L029ecb_dec_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call __aesni_decrypt6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp L013ecb_ret .align 4,0x90 L026ecb_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L030dec1_loop_4: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L030dec1_loop_4 .byte 102,15,56,223,209 movups %xmm2,(%edi) jmp L013ecb_ret .align 4,0x90 L027ecb_dec_two: call __aesni_decrypt2 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp L013ecb_ret .align 4,0x90 L028ecb_dec_three: call __aesni_decrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp L013ecb_ret .align 4,0x90 L029ecb_dec_four: call __aesni_decrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) L013ecb_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_ccm64_encrypt_blocks .private_extern _aes_hw_ccm64_encrypt_blocks .align 4 _aes_hw_ccm64_encrypt_blocks: L_aes_hw_ccm64_encrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl 40(%esp),%ecx movl %esp,%ebp subl $60,%esp andl $-16,%esp movl %ebp,48(%esp) movdqu (%ebx),%xmm7 movdqu (%ecx),%xmm3 movl 240(%edx),%ecx movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $1,%ebx xorl %ebp,%ebp movl %ebx,16(%esp) movl %ebp,20(%esp) movl %ebp,24(%esp) movl %ebp,28(%esp) shll $4,%ecx movl $16,%ebx leal (%edx),%ebp movdqa (%esp),%xmm5 movdqa %xmm7,%xmm2 leal 32(%edx,%ecx,1),%edx subl %ecx,%ebx .byte 102,15,56,0,253 L031ccm64_enc_outer: movups (%ebp),%xmm0 movl %ebx,%ecx movups (%esi),%xmm6 xorps %xmm0,%xmm2 movups 16(%ebp),%xmm1 xorps %xmm6,%xmm0 xorps %xmm0,%xmm3 movups 32(%ebp),%xmm0 L032ccm64_enc2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz L032ccm64_enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 paddq 16(%esp),%xmm7 decl %eax .byte 102,15,56,221,208 .byte 102,15,56,221,216 leal 16(%esi),%esi xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 movups %xmm6,(%edi) .byte 102,15,56,0,213 leal 16(%edi),%edi jnz L031ccm64_enc_outer movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_ccm64_decrypt_blocks .private_extern _aes_hw_ccm64_decrypt_blocks .align 4 _aes_hw_ccm64_decrypt_blocks: L_aes_hw_ccm64_decrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl 40(%esp),%ecx movl %esp,%ebp subl $60,%esp andl $-16,%esp movl %ebp,48(%esp) movdqu (%ebx),%xmm7 movdqu (%ecx),%xmm3 movl 240(%edx),%ecx movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $1,%ebx xorl %ebp,%ebp movl %ebx,16(%esp) movl %ebp,20(%esp) movl %ebp,24(%esp) movl %ebp,28(%esp) movdqa (%esp),%xmm5 movdqa %xmm7,%xmm2 movl %edx,%ebp movl %ecx,%ebx .byte 102,15,56,0,253 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L033enc1_loop_5: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L033enc1_loop_5 .byte 102,15,56,221,209 shll $4,%ebx movl $16,%ecx movups (%esi),%xmm6 paddq 16(%esp),%xmm7 leal 16(%esi),%esi subl %ebx,%ecx leal 32(%ebp,%ebx,1),%edx movl %ecx,%ebx jmp L034ccm64_dec_outer .align 4,0x90 L034ccm64_dec_outer: xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 movups %xmm6,(%edi) leal 16(%edi),%edi .byte 102,15,56,0,213 subl $1,%eax jz L035ccm64_dec_break movups (%ebp),%xmm0 movl %ebx,%ecx movups 16(%ebp),%xmm1 xorps %xmm0,%xmm6 xorps %xmm0,%xmm2 xorps %xmm6,%xmm3 movups 32(%ebp),%xmm0 L036ccm64_dec2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz L036ccm64_dec2_loop movups (%esi),%xmm6 paddq 16(%esp),%xmm7 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 leal 16(%esi),%esi jmp L034ccm64_dec_outer .align 4,0x90 L035ccm64_dec_break: movl 240(%ebp),%ecx movl %ebp,%edx movups (%edx),%xmm0 movups 16(%edx),%xmm1 xorps %xmm0,%xmm6 leal 32(%edx),%edx xorps %xmm6,%xmm3 L037enc1_loop_6: .byte 102,15,56,220,217 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L037enc1_loop_6 .byte 102,15,56,221,217 movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_ctr32_encrypt_blocks .private_extern _aes_hw_ctr32_encrypt_blocks .align 4 _aes_hw_ctr32_encrypt_blocks: L_aes_hw_ctr32_encrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call L038pic L038pic: popl %ebx leal _BORINGSSL_function_hit+0-L038pic(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl %esp,%ebp subl $88,%esp andl $-16,%esp movl %ebp,80(%esp) cmpl $1,%eax jb L039ctr32_ret je L040ctr32_one_shortcut movdqu (%ebx),%xmm7 movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $6,%ecx xorl %ebp,%ebp movl %ecx,16(%esp) movl %ecx,20(%esp) movl %ecx,24(%esp) movl %ebp,28(%esp) .byte 102,15,58,22,251,3 .byte 102,15,58,34,253,3 movl 240(%edx),%ecx bswap %ebx pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movdqa (%esp),%xmm2 .byte 102,15,58,34,195,0 leal 3(%ebx),%ebp .byte 102,15,58,34,205,0 incl %ebx .byte 102,15,58,34,195,1 incl %ebp .byte 102,15,58,34,205,1 incl %ebx .byte 102,15,58,34,195,2 incl %ebp .byte 102,15,58,34,205,2 movdqa %xmm0,48(%esp) .byte 102,15,56,0,194 movdqu (%edx),%xmm6 movdqa %xmm1,64(%esp) .byte 102,15,56,0,202 pshufd $192,%xmm0,%xmm2 pshufd $128,%xmm0,%xmm3 cmpl $6,%eax jb L041ctr32_tail pxor %xmm6,%xmm7 shll $4,%ecx movl $16,%ebx movdqa %xmm7,32(%esp) movl %edx,%ebp subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx subl $6,%eax jmp L042ctr32_loop6 .align 4,0x90 L042ctr32_loop6: pshufd $64,%xmm0,%xmm4 movdqa 32(%esp),%xmm0 pshufd $192,%xmm1,%xmm5 pxor %xmm0,%xmm2 pshufd $128,%xmm1,%xmm6 pxor %xmm0,%xmm3 pshufd $64,%xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 .byte 102,15,56,220,209 pxor %xmm0,%xmm6 pxor %xmm0,%xmm7 .byte 102,15,56,220,217 movups 32(%ebp),%xmm0 movl %ebx,%ecx .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 call L_aesni_encrypt6_enter movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps %xmm1,%xmm2 movups 32(%esi),%xmm1 xorps %xmm0,%xmm3 movups %xmm2,(%edi) movdqa 16(%esp),%xmm0 xorps %xmm1,%xmm4 movdqa 64(%esp),%xmm1 movups %xmm3,16(%edi) movups %xmm4,32(%edi) paddd %xmm0,%xmm1 paddd 48(%esp),%xmm0 movdqa (%esp),%xmm2 movups 48(%esi),%xmm3 movups 64(%esi),%xmm4 xorps %xmm3,%xmm5 movups 80(%esi),%xmm3 leal 96(%esi),%esi movdqa %xmm0,48(%esp) .byte 102,15,56,0,194 xorps %xmm4,%xmm6 movups %xmm5,48(%edi) xorps %xmm3,%xmm7 movdqa %xmm1,64(%esp) .byte 102,15,56,0,202 movups %xmm6,64(%edi) pshufd $192,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi pshufd $128,%xmm0,%xmm3 subl $6,%eax jnc L042ctr32_loop6 addl $6,%eax jz L039ctr32_ret movdqu (%ebp),%xmm7 movl %ebp,%edx pxor 32(%esp),%xmm7 movl 240(%ebp),%ecx L041ctr32_tail: por %xmm7,%xmm2 cmpl $2,%eax jb L043ctr32_one pshufd $64,%xmm0,%xmm4 por %xmm7,%xmm3 je L044ctr32_two pshufd $192,%xmm1,%xmm5 por %xmm7,%xmm4 cmpl $4,%eax jb L045ctr32_three pshufd $128,%xmm1,%xmm6 por %xmm7,%xmm5 je L046ctr32_four por %xmm7,%xmm6 call __aesni_encrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps %xmm1,%xmm2 movups 32(%esi),%xmm1 xorps %xmm0,%xmm3 movups 48(%esi),%xmm0 xorps %xmm1,%xmm4 movups 64(%esi),%xmm1 xorps %xmm0,%xmm5 movups %xmm2,(%edi) xorps %xmm1,%xmm6 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp L039ctr32_ret .align 4,0x90 L040ctr32_one_shortcut: movups (%ebx),%xmm2 movl 240(%edx),%ecx L043ctr32_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L047enc1_loop_7: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L047enc1_loop_7 .byte 102,15,56,221,209 movups (%esi),%xmm6 xorps %xmm2,%xmm6 movups %xmm6,(%edi) jmp L039ctr32_ret .align 4,0x90 L044ctr32_two: call __aesni_encrypt2 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp L039ctr32_ret .align 4,0x90 L045ctr32_three: call __aesni_encrypt3 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 movups 32(%esi),%xmm7 xorps %xmm6,%xmm3 movups %xmm2,(%edi) xorps %xmm7,%xmm4 movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp L039ctr32_ret .align 4,0x90 L046ctr32_four: call __aesni_encrypt4 movups (%esi),%xmm6 movups 16(%esi),%xmm7 movups 32(%esi),%xmm1 xorps %xmm6,%xmm2 movups 48(%esi),%xmm0 xorps %xmm7,%xmm3 movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) xorps %xmm0,%xmm5 movups %xmm4,32(%edi) movups %xmm5,48(%edi) L039ctr32_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movl 80(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_xts_encrypt .private_extern _aes_hw_xts_encrypt .align 4 _aes_hw_xts_encrypt: L_aes_hw_xts_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 36(%esp),%edx movl 40(%esp),%esi movl 240(%edx),%ecx movups (%esi),%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L048enc1_loop_8: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L048enc1_loop_8 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl %esp,%ebp subl $120,%esp movl 240(%edx),%ecx andl $-16,%esp movl $135,96(%esp) movl $0,100(%esp) movl $1,104(%esp) movl $0,108(%esp) movl %eax,112(%esp) movl %ebp,116(%esp) movdqa %xmm2,%xmm1 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 pcmpgtd %xmm1,%xmm0 andl $-16,%eax movl %edx,%ebp movl %ecx,%ebx subl $96,%eax jc L049xts_enc_short shll $4,%ecx movl $16,%ebx subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx jmp L050xts_enc_loop6 .align 4,0x90 L050xts_enc_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,16(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,32(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm7 movdqa %xmm1,64(%esp) paddq %xmm1,%xmm1 movups (%ebp),%xmm0 pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 pxor %xmm0,%xmm3 movdqu 48(%esi),%xmm5 pxor %xmm0,%xmm4 movdqu 64(%esi),%xmm6 pxor %xmm0,%xmm5 movdqu 80(%esi),%xmm1 pxor %xmm0,%xmm6 leal 96(%esi),%esi pxor (%esp),%xmm2 movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor 16(%esp),%xmm3 pxor 32(%esp),%xmm4 .byte 102,15,56,220,209 pxor 48(%esp),%xmm5 pxor 64(%esp),%xmm6 .byte 102,15,56,220,217 pxor %xmm0,%xmm7 movups 32(%ebp),%xmm0 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 call L_aesni_encrypt6_enter movdqa 80(%esp),%xmm1 pxor %xmm0,%xmm0 xorps (%esp),%xmm2 pcmpgtd %xmm1,%xmm0 xorps 16(%esp),%xmm3 movups %xmm2,(%edi) xorps 32(%esp),%xmm4 movups %xmm3,16(%edi) xorps 48(%esp),%xmm5 movups %xmm4,32(%edi) xorps 64(%esp),%xmm6 movups %xmm5,48(%edi) xorps %xmm1,%xmm7 movups %xmm6,64(%edi) pshufd $19,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqa 96(%esp),%xmm3 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 subl $96,%eax jnc L050xts_enc_loop6 movl 240(%ebp),%ecx movl %ebp,%edx movl %ecx,%ebx L049xts_enc_short: addl $96,%eax jz L051xts_enc_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax jb L052xts_enc_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 je L053xts_enc_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax jb L054xts_enc_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) je L055xts_enc_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm7 pxor %xmm1,%xmm7 movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 pxor (%esp),%xmm2 movdqu 48(%esi),%xmm5 pxor 16(%esp),%xmm3 movdqu 64(%esi),%xmm6 pxor 32(%esp),%xmm4 leal 80(%esi),%esi pxor 48(%esp),%xmm5 movdqa %xmm7,64(%esp) pxor %xmm7,%xmm6 call __aesni_encrypt6 movaps 64(%esp),%xmm1 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps 32(%esp),%xmm4 movups %xmm2,(%edi) xorps 48(%esp),%xmm5 movups %xmm3,16(%edi) xorps %xmm1,%xmm6 movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi jmp L056xts_enc_done .align 4,0x90 L052xts_enc_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L057enc1_loop_9: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L057enc1_loop_9 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 jmp L056xts_enc_done .align 4,0x90 L053xts_enc_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 call __aesni_encrypt2 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 jmp L056xts_enc_done .align 4,0x90 L054xts_enc_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 leal 48(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 call __aesni_encrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 jmp L056xts_enc_done .align 4,0x90 L055xts_enc_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 xorps (%esp),%xmm2 movups 48(%esi),%xmm5 leal 64(%esi),%esi xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 xorps %xmm6,%xmm5 call __aesni_encrypt4 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) xorps %xmm6,%xmm5 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 jmp L056xts_enc_done .align 4,0x90 L051xts_enc_done6x: movl 112(%esp),%eax andl $15,%eax jz L058xts_enc_ret movdqa %xmm1,%xmm5 movl %eax,112(%esp) jmp L059xts_enc_steal .align 4,0x90 L056xts_enc_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax jz L058xts_enc_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm5 paddq %xmm1,%xmm1 pand 96(%esp),%xmm5 pxor %xmm1,%xmm5 L059xts_enc_steal: movzbl (%esi),%ecx movzbl -16(%edi),%edx leal 1(%esi),%esi movb %cl,-16(%edi) movb %dl,(%edi) leal 1(%edi),%edi subl $1,%eax jnz L059xts_enc_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx movups -16(%edi),%xmm2 xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L060enc1_loop_10: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L060enc1_loop_10 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,-16(%edi) L058xts_enc_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 movdqa %xmm0,(%esp) pxor %xmm3,%xmm3 movdqa %xmm0,16(%esp) pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movdqa %xmm0,80(%esp) movl 116(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_xts_decrypt .private_extern _aes_hw_xts_decrypt .align 4 _aes_hw_xts_decrypt: L_aes_hw_xts_decrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 36(%esp),%edx movl 40(%esp),%esi movl 240(%edx),%ecx movups (%esi),%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L061enc1_loop_11: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L061enc1_loop_11 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl %esp,%ebp subl $120,%esp andl $-16,%esp xorl %ebx,%ebx testl $15,%eax setnz %bl shll $4,%ebx subl %ebx,%eax movl $135,96(%esp) movl $0,100(%esp) movl $1,104(%esp) movl $0,108(%esp) movl %eax,112(%esp) movl %ebp,116(%esp) movl 240(%edx),%ecx movl %edx,%ebp movl %ecx,%ebx movdqa %xmm2,%xmm1 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 pcmpgtd %xmm1,%xmm0 andl $-16,%eax subl $96,%eax jc L062xts_dec_short shll $4,%ecx movl $16,%ebx subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx jmp L063xts_dec_loop6 .align 4,0x90 L063xts_dec_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,16(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,32(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm7 movdqa %xmm1,64(%esp) paddq %xmm1,%xmm1 movups (%ebp),%xmm0 pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 pxor %xmm0,%xmm3 movdqu 48(%esi),%xmm5 pxor %xmm0,%xmm4 movdqu 64(%esi),%xmm6 pxor %xmm0,%xmm5 movdqu 80(%esi),%xmm1 pxor %xmm0,%xmm6 leal 96(%esi),%esi pxor (%esp),%xmm2 movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor 16(%esp),%xmm3 pxor 32(%esp),%xmm4 .byte 102,15,56,222,209 pxor 48(%esp),%xmm5 pxor 64(%esp),%xmm6 .byte 102,15,56,222,217 pxor %xmm0,%xmm7 movups 32(%ebp),%xmm0 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 call L_aesni_decrypt6_enter movdqa 80(%esp),%xmm1 pxor %xmm0,%xmm0 xorps (%esp),%xmm2 pcmpgtd %xmm1,%xmm0 xorps 16(%esp),%xmm3 movups %xmm2,(%edi) xorps 32(%esp),%xmm4 movups %xmm3,16(%edi) xorps 48(%esp),%xmm5 movups %xmm4,32(%edi) xorps 64(%esp),%xmm6 movups %xmm5,48(%edi) xorps %xmm1,%xmm7 movups %xmm6,64(%edi) pshufd $19,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqa 96(%esp),%xmm3 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 subl $96,%eax jnc L063xts_dec_loop6 movl 240(%ebp),%ecx movl %ebp,%edx movl %ecx,%ebx L062xts_dec_short: addl $96,%eax jz L064xts_dec_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax jb L065xts_dec_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 je L066xts_dec_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax jb L067xts_dec_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) je L068xts_dec_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm7 pxor %xmm1,%xmm7 movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 pxor (%esp),%xmm2 movdqu 48(%esi),%xmm5 pxor 16(%esp),%xmm3 movdqu 64(%esi),%xmm6 pxor 32(%esp),%xmm4 leal 80(%esi),%esi pxor 48(%esp),%xmm5 movdqa %xmm7,64(%esp) pxor %xmm7,%xmm6 call __aesni_decrypt6 movaps 64(%esp),%xmm1 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps 32(%esp),%xmm4 movups %xmm2,(%edi) xorps 48(%esp),%xmm5 movups %xmm3,16(%edi) xorps %xmm1,%xmm6 movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi jmp L069xts_dec_done .align 4,0x90 L065xts_dec_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L070dec1_loop_12: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L070dec1_loop_12 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 jmp L069xts_dec_done .align 4,0x90 L066xts_dec_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 call __aesni_decrypt2 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 jmp L069xts_dec_done .align 4,0x90 L067xts_dec_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 leal 48(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 call __aesni_decrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 jmp L069xts_dec_done .align 4,0x90 L068xts_dec_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 xorps (%esp),%xmm2 movups 48(%esi),%xmm5 leal 64(%esi),%esi xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 xorps %xmm6,%xmm5 call __aesni_decrypt4 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) xorps %xmm6,%xmm5 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 jmp L069xts_dec_done .align 4,0x90 L064xts_dec_done6x: movl 112(%esp),%eax andl $15,%eax jz L071xts_dec_ret movl %eax,112(%esp) jmp L072xts_dec_only_one_more .align 4,0x90 L069xts_dec_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax jz L071xts_dec_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 L072xts_dec_only_one_more: pshufd $19,%xmm0,%xmm5 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm5 pxor %xmm1,%xmm5 movl %ebp,%edx movl %ebx,%ecx movups (%esi),%xmm2 xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L073dec1_loop_13: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L073dec1_loop_13 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) L074xts_dec_steal: movzbl 16(%esi),%ecx movzbl (%edi),%edx leal 1(%esi),%esi movb %cl,(%edi) movb %dl,16(%edi) leal 1(%edi),%edi subl $1,%eax jnz L074xts_dec_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx movups (%edi),%xmm2 xorps %xmm6,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L075dec1_loop_14: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L075dec1_loop_14 .byte 102,15,56,223,209 xorps %xmm6,%xmm2 movups %xmm2,(%edi) L071xts_dec_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 movdqa %xmm0,(%esp) pxor %xmm3,%xmm3 movdqa %xmm0,16(%esp) pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movdqa %xmm0,80(%esp) movl 116(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_cbc_encrypt .private_extern _aes_hw_cbc_encrypt .align 4 _aes_hw_cbc_encrypt: L_aes_hw_cbc_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl %esp,%ebx movl 24(%esp),%edi subl $24,%ebx movl 28(%esp),%eax andl $-16,%ebx movl 32(%esp),%edx movl 36(%esp),%ebp testl %eax,%eax jz L076cbc_abort cmpl $0,40(%esp) xchgl %esp,%ebx movups (%ebp),%xmm7 movl 240(%edx),%ecx movl %edx,%ebp movl %ebx,16(%esp) movl %ecx,%ebx je L077cbc_decrypt movaps %xmm7,%xmm2 cmpl $16,%eax jb L078cbc_enc_tail subl $16,%eax jmp L079cbc_enc_loop .align 4,0x90 L079cbc_enc_loop: movups (%esi),%xmm7 leal 16(%esi),%esi movups (%edx),%xmm0 movups 16(%edx),%xmm1 xorps %xmm0,%xmm7 leal 32(%edx),%edx xorps %xmm7,%xmm2 L080enc1_loop_15: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L080enc1_loop_15 .byte 102,15,56,221,209 movl %ebx,%ecx movl %ebp,%edx movups %xmm2,(%edi) leal 16(%edi),%edi subl $16,%eax jnc L079cbc_enc_loop addl $16,%eax jnz L078cbc_enc_tail movaps %xmm2,%xmm7 pxor %xmm2,%xmm2 jmp L081cbc_ret L078cbc_enc_tail: movl %eax,%ecx .long 2767451785 movl $16,%ecx subl %eax,%ecx xorl %eax,%eax .long 2868115081 leal -16(%edi),%edi movl %ebx,%ecx movl %edi,%esi movl %ebp,%edx jmp L079cbc_enc_loop .align 4,0x90 L077cbc_decrypt: cmpl $80,%eax jbe L082cbc_dec_tail movaps %xmm7,(%esp) subl $80,%eax jmp L083cbc_dec_loop6_enter .align 4,0x90 L084cbc_dec_loop6: movaps %xmm0,(%esp) movups %xmm7,(%edi) leal 16(%edi),%edi L083cbc_dec_loop6_enter: movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 call __aesni_decrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps (%esp),%xmm2 xorps %xmm1,%xmm3 movups 32(%esi),%xmm1 xorps %xmm0,%xmm4 movups 48(%esi),%xmm0 xorps %xmm1,%xmm5 movups 64(%esi),%xmm1 xorps %xmm0,%xmm6 movups 80(%esi),%xmm0 xorps %xmm1,%xmm7 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 96(%esi),%esi movups %xmm4,32(%edi) movl %ebx,%ecx movups %xmm5,48(%edi) movl %ebp,%edx movups %xmm6,64(%edi) leal 80(%edi),%edi subl $96,%eax ja L084cbc_dec_loop6 movaps %xmm7,%xmm2 movaps %xmm0,%xmm7 addl $80,%eax jle L085cbc_dec_clear_tail_collected movups %xmm2,(%edi) leal 16(%edi),%edi L082cbc_dec_tail: movups (%esi),%xmm2 movaps %xmm2,%xmm6 cmpl $16,%eax jbe L086cbc_dec_one movups 16(%esi),%xmm3 movaps %xmm3,%xmm5 cmpl $32,%eax jbe L087cbc_dec_two movups 32(%esi),%xmm4 cmpl $48,%eax jbe L088cbc_dec_three movups 48(%esi),%xmm5 cmpl $64,%eax jbe L089cbc_dec_four movups 64(%esi),%xmm6 movaps %xmm7,(%esp) movups (%esi),%xmm2 xorps %xmm7,%xmm7 call __aesni_decrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps (%esp),%xmm2 xorps %xmm1,%xmm3 movups 32(%esi),%xmm1 xorps %xmm0,%xmm4 movups 48(%esi),%xmm0 xorps %xmm1,%xmm5 movups 64(%esi),%xmm7 xorps %xmm0,%xmm6 movups %xmm2,(%edi) movups %xmm3,16(%edi) pxor %xmm3,%xmm3 movups %xmm4,32(%edi) pxor %xmm4,%xmm4 movups %xmm5,48(%edi) pxor %xmm5,%xmm5 leal 64(%edi),%edi movaps %xmm6,%xmm2 pxor %xmm6,%xmm6 subl $80,%eax jmp L090cbc_dec_tail_collected .align 4,0x90 L086cbc_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L091dec1_loop_16: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L091dec1_loop_16 .byte 102,15,56,223,209 xorps %xmm7,%xmm2 movaps %xmm6,%xmm7 subl $16,%eax jmp L090cbc_dec_tail_collected .align 4,0x90 L087cbc_dec_two: call __aesni_decrypt2 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movaps %xmm3,%xmm2 pxor %xmm3,%xmm3 leal 16(%edi),%edi movaps %xmm5,%xmm7 subl $32,%eax jmp L090cbc_dec_tail_collected .align 4,0x90 L088cbc_dec_three: call __aesni_decrypt3 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 xorps %xmm5,%xmm4 movups %xmm2,(%edi) movaps %xmm4,%xmm2 pxor %xmm4,%xmm4 movups %xmm3,16(%edi) pxor %xmm3,%xmm3 leal 32(%edi),%edi movups 32(%esi),%xmm7 subl $48,%eax jmp L090cbc_dec_tail_collected .align 4,0x90 L089cbc_dec_four: call __aesni_decrypt4 movups 16(%esi),%xmm1 movups 32(%esi),%xmm0 xorps %xmm7,%xmm2 movups 48(%esi),%xmm7 xorps %xmm6,%xmm3 movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) pxor %xmm3,%xmm3 xorps %xmm0,%xmm5 movups %xmm4,32(%edi) pxor %xmm4,%xmm4 leal 48(%edi),%edi movaps %xmm5,%xmm2 pxor %xmm5,%xmm5 subl $64,%eax jmp L090cbc_dec_tail_collected .align 4,0x90 L085cbc_dec_clear_tail_collected: pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 L090cbc_dec_tail_collected: andl $15,%eax jnz L092cbc_dec_tail_partial movups %xmm2,(%edi) pxor %xmm0,%xmm0 jmp L081cbc_ret .align 4,0x90 L092cbc_dec_tail_partial: movaps %xmm2,(%esp) pxor %xmm0,%xmm0 movl $16,%ecx movl %esp,%esi subl %eax,%ecx .long 2767451785 movdqa %xmm2,(%esp) L081cbc_ret: movl 16(%esp),%esp movl 36(%esp),%ebp pxor %xmm2,%xmm2 pxor %xmm1,%xmm1 movups %xmm7,(%ebp) pxor %xmm7,%xmm7 L076cbc_abort: popl %edi popl %esi popl %ebx popl %ebp ret .private_extern __aesni_set_encrypt_key .align 4 __aesni_set_encrypt_key: pushl %ebp pushl %ebx testl %eax,%eax jz L093bad_pointer testl %edx,%edx jz L093bad_pointer call L094pic L094pic: popl %ebx leal Lkey_const-L094pic(%ebx),%ebx movl L_OPENSSL_ia32cap_P$non_lazy_ptr-Lkey_const(%ebx),%ebp movups (%eax),%xmm0 xorps %xmm4,%xmm4 movl 4(%ebp),%ebp leal 16(%edx),%edx andl $268437504,%ebp cmpl $256,%ecx je L09514rounds cmpl $192,%ecx je L09612rounds cmpl $128,%ecx jne L097bad_keybits .align 4,0x90 L09810rounds: cmpl $268435456,%ebp je L09910rounds_alt movl $9,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,200,1 call L100key_128_cold .byte 102,15,58,223,200,2 call L101key_128 .byte 102,15,58,223,200,4 call L101key_128 .byte 102,15,58,223,200,8 call L101key_128 .byte 102,15,58,223,200,16 call L101key_128 .byte 102,15,58,223,200,32 call L101key_128 .byte 102,15,58,223,200,64 call L101key_128 .byte 102,15,58,223,200,128 call L101key_128 .byte 102,15,58,223,200,27 call L101key_128 .byte 102,15,58,223,200,54 call L101key_128 movups %xmm0,(%edx) movl %ecx,80(%edx) jmp L102good_key .align 4,0x90 L101key_128: movups %xmm0,(%edx) leal 16(%edx),%edx L100key_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .align 4,0x90 L09910rounds_alt: movdqa (%ebx),%xmm5 movl $8,%ecx movdqa 32(%ebx),%xmm4 movdqa %xmm0,%xmm2 movdqu %xmm0,-16(%edx) L103loop_key128: .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 leal 16(%edx),%edx movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,-16(%edx) movdqa %xmm0,%xmm2 decl %ecx jnz L103loop_key128 movdqa 48(%ebx),%xmm4 .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,(%edx) movdqa %xmm0,%xmm2 .byte 102,15,56,0,197 .byte 102,15,56,221,196 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,16(%edx) movl $9,%ecx movl %ecx,96(%edx) jmp L102good_key .align 4,0x90 L09612rounds: movq 16(%eax),%xmm2 cmpl $268435456,%ebp je L10412rounds_alt movl $11,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,202,1 call L105key_192a_cold .byte 102,15,58,223,202,2 call L106key_192b .byte 102,15,58,223,202,4 call L107key_192a .byte 102,15,58,223,202,8 call L106key_192b .byte 102,15,58,223,202,16 call L107key_192a .byte 102,15,58,223,202,32 call L106key_192b .byte 102,15,58,223,202,64 call L107key_192a .byte 102,15,58,223,202,128 call L106key_192b movups %xmm0,(%edx) movl %ecx,48(%edx) jmp L102good_key .align 4,0x90 L107key_192a: movups %xmm0,(%edx) leal 16(%edx),%edx .align 4,0x90 L105key_192a_cold: movaps %xmm2,%xmm5 L108key_192b_warm: shufps $16,%xmm0,%xmm4 movdqa %xmm2,%xmm3 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 pslldq $4,%xmm3 xorps %xmm4,%xmm0 pshufd $85,%xmm1,%xmm1 pxor %xmm3,%xmm2 pxor %xmm1,%xmm0 pshufd $255,%xmm0,%xmm3 pxor %xmm3,%xmm2 ret .align 4,0x90 L106key_192b: movaps %xmm0,%xmm3 shufps $68,%xmm0,%xmm5 movups %xmm5,(%edx) shufps $78,%xmm2,%xmm3 movups %xmm3,16(%edx) leal 32(%edx),%edx jmp L108key_192b_warm .align 4,0x90 L10412rounds_alt: movdqa 16(%ebx),%xmm5 movdqa 32(%ebx),%xmm4 movl $8,%ecx movdqu %xmm0,-16(%edx) L109loop_key192: movq %xmm2,(%edx) movdqa %xmm2,%xmm1 .byte 102,15,56,0,213 .byte 102,15,56,221,212 pslld $1,%xmm4 leal 24(%edx),%edx movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pshufd $255,%xmm0,%xmm3 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pxor %xmm2,%xmm0 pxor %xmm3,%xmm2 movdqu %xmm0,-16(%edx) decl %ecx jnz L109loop_key192 movl $11,%ecx movl %ecx,32(%edx) jmp L102good_key .align 4,0x90 L09514rounds: movups 16(%eax),%xmm2 leal 16(%edx),%edx cmpl $268435456,%ebp je L11014rounds_alt movl $13,%ecx movups %xmm0,-32(%edx) movups %xmm2,-16(%edx) .byte 102,15,58,223,202,1 call L111key_256a_cold .byte 102,15,58,223,200,1 call L112key_256b .byte 102,15,58,223,202,2 call L113key_256a .byte 102,15,58,223,200,2 call L112key_256b .byte 102,15,58,223,202,4 call L113key_256a .byte 102,15,58,223,200,4 call L112key_256b .byte 102,15,58,223,202,8 call L113key_256a .byte 102,15,58,223,200,8 call L112key_256b .byte 102,15,58,223,202,16 call L113key_256a .byte 102,15,58,223,200,16 call L112key_256b .byte 102,15,58,223,202,32 call L113key_256a .byte 102,15,58,223,200,32 call L112key_256b .byte 102,15,58,223,202,64 call L113key_256a movups %xmm0,(%edx) movl %ecx,16(%edx) xorl %eax,%eax jmp L102good_key .align 4,0x90 L113key_256a: movups %xmm2,(%edx) leal 16(%edx),%edx L111key_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .align 4,0x90 L112key_256b: movups %xmm0,(%edx) leal 16(%edx),%edx shufps $16,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $140,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret .align 4,0x90 L11014rounds_alt: movdqa (%ebx),%xmm5 movdqa 32(%ebx),%xmm4 movl $7,%ecx movdqu %xmm0,-32(%edx) movdqa %xmm2,%xmm1 movdqu %xmm2,-16(%edx) L114loop_key256: .byte 102,15,56,0,213 .byte 102,15,56,221,212 movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pslld $1,%xmm4 pxor %xmm2,%xmm0 movdqu %xmm0,(%edx) decl %ecx jz L115done_key256 pshufd $255,%xmm0,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,221,211 movdqa %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm3,%xmm1 pxor %xmm1,%xmm2 movdqu %xmm2,16(%edx) leal 32(%edx),%edx movdqa %xmm2,%xmm1 jmp L114loop_key256 L115done_key256: movl $13,%ecx movl %ecx,16(%edx) L102good_key: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 xorl %eax,%eax popl %ebx popl %ebp ret .align 2,0x90 L093bad_pointer: movl $-1,%eax popl %ebx popl %ebp ret .align 2,0x90 L097bad_keybits: pxor %xmm0,%xmm0 movl $-2,%eax popl %ebx popl %ebp ret .globl _aes_hw_set_encrypt_key .private_extern _aes_hw_set_encrypt_key .align 4 _aes_hw_set_encrypt_key: L_aes_hw_set_encrypt_key_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call L116pic L116pic: popl %ebx leal _BORINGSSL_function_hit+3-L116pic(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 8(%esp),%ecx movl 12(%esp),%edx call __aesni_set_encrypt_key ret .globl _aes_hw_set_decrypt_key .private_extern _aes_hw_set_decrypt_key .align 4 _aes_hw_set_decrypt_key: L_aes_hw_set_decrypt_key_begin: movl 4(%esp),%eax movl 8(%esp),%ecx movl 12(%esp),%edx call __aesni_set_encrypt_key movl 12(%esp),%edx shll $4,%ecx testl %eax,%eax jnz L117dec_key_ret leal 16(%edx,%ecx,1),%eax movups (%edx),%xmm0 movups (%eax),%xmm1 movups %xmm0,(%eax) movups %xmm1,(%edx) leal 16(%edx),%edx leal -16(%eax),%eax L118dec_key_inverse: movups (%edx),%xmm0 movups (%eax),%xmm1 .byte 102,15,56,219,192 .byte 102,15,56,219,201 leal 16(%edx),%edx leal -16(%eax),%eax movups %xmm0,16(%eax) movups %xmm1,-16(%edx) cmpl %edx,%eax ja L118dec_key_inverse movups (%edx),%xmm0 .byte 102,15,56,219,192 movups %xmm0,(%edx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 xorl %eax,%eax L117dec_key_ret: ret .align 6,0x90 Lkey_const: .long 202313229,202313229,202313229,202313229 .long 67569157,67569157,67569157,67569157 .long 1,1,1,1 .long 27,27,27,27 .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 .byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 .byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 .byte 115,108,46,111,114,103,62,0 .section __IMPORT,__pointers,non_lazy_symbol_pointers L_OPENSSL_ia32cap_P$non_lazy_ptr: .indirect_symbol _OPENSSL_ia32cap_P .long 0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
5,508
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/mac-x86/crypto/fipsmodule/ghash-ssse3-x86.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _gcm_gmult_ssse3 .private_extern _gcm_gmult_ssse3 .align 4 _gcm_gmult_ssse3: L_gcm_gmult_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%edi movl 24(%esp),%esi movdqu (%edi),%xmm0 call L000pic_point L000pic_point: popl %eax movdqa Lreverse_bytes-L000pic_point(%eax),%xmm7 movdqa Llow4_mask-L000pic_point(%eax),%xmm2 .byte 102,15,56,0,199 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax L001loop_row_1: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L001loop_row_1 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax L002loop_row_2: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L002loop_row_2 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $6,%eax L003loop_row_3: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L003loop_row_3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,0,215 movdqu %xmm2,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 popl %edi popl %esi popl %ebx popl %ebp ret .globl _gcm_ghash_ssse3 .private_extern _gcm_ghash_ssse3 .align 4 _gcm_ghash_ssse3: L_gcm_ghash_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%edi movl 24(%esp),%esi movl 28(%esp),%edx movl 32(%esp),%ecx movdqu (%edi),%xmm0 call L004pic_point L004pic_point: popl %ebx movdqa Lreverse_bytes-L004pic_point(%ebx),%xmm7 andl $-16,%ecx .byte 102,15,56,0,199 pxor %xmm3,%xmm3 L005loop_ghash: movdqa Llow4_mask-L004pic_point(%ebx),%xmm2 movdqu (%edx),%xmm1 .byte 102,15,56,0,207 pxor %xmm1,%xmm0 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 pxor %xmm2,%xmm2 movl $5,%eax L006loop_row_4: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L006loop_row_4 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax L007loop_row_5: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L007loop_row_5 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $6,%eax L008loop_row_6: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L008loop_row_6 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movdqa %xmm2,%xmm0 leal -256(%esi),%esi leal 16(%edx),%edx subl $16,%ecx jnz L005loop_ghash .byte 102,15,56,0,199 movdqu %xmm0,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 popl %edi popl %esi popl %ebx popl %ebp ret .align 4,0x90 Lreverse_bytes: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .align 4,0x90 Llow4_mask: .long 252645135,252645135,252645135,252645135 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
16,233
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/mac-x86/crypto/fipsmodule/bn-586.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _bn_mul_add_words .private_extern _bn_mul_add_words .align 4 _bn_mul_add_words: L_bn_mul_add_words_begin: call L000PIC_me_up L000PIC_me_up: popl %eax movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L000PIC_me_up(%eax),%eax btl $26,(%eax) jnc L001maw_non_sse2 movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx movd 16(%esp),%mm0 pxor %mm1,%mm1 jmp L002maw_sse2_entry .align 4,0x90 L003maw_sse2_unrolled: movd (%eax),%mm3 paddq %mm3,%mm1 movd (%edx),%mm2 pmuludq %mm0,%mm2 movd 4(%edx),%mm4 pmuludq %mm0,%mm4 movd 8(%edx),%mm6 pmuludq %mm0,%mm6 movd 12(%edx),%mm7 pmuludq %mm0,%mm7 paddq %mm2,%mm1 movd 4(%eax),%mm3 paddq %mm4,%mm3 movd 8(%eax),%mm5 paddq %mm6,%mm5 movd 12(%eax),%mm4 paddq %mm4,%mm7 movd %mm1,(%eax) movd 16(%edx),%mm2 pmuludq %mm0,%mm2 psrlq $32,%mm1 movd 20(%edx),%mm4 pmuludq %mm0,%mm4 paddq %mm3,%mm1 movd 24(%edx),%mm6 pmuludq %mm0,%mm6 movd %mm1,4(%eax) psrlq $32,%mm1 movd 28(%edx),%mm3 addl $32,%edx pmuludq %mm0,%mm3 paddq %mm5,%mm1 movd 16(%eax),%mm5 paddq %mm5,%mm2 movd %mm1,8(%eax) psrlq $32,%mm1 paddq %mm7,%mm1 movd 20(%eax),%mm5 paddq %mm5,%mm4 movd %mm1,12(%eax) psrlq $32,%mm1 paddq %mm2,%mm1 movd 24(%eax),%mm5 paddq %mm5,%mm6 movd %mm1,16(%eax) psrlq $32,%mm1 paddq %mm4,%mm1 movd 28(%eax),%mm5 paddq %mm5,%mm3 movd %mm1,20(%eax) psrlq $32,%mm1 paddq %mm6,%mm1 movd %mm1,24(%eax) psrlq $32,%mm1 paddq %mm3,%mm1 movd %mm1,28(%eax) leal 32(%eax),%eax psrlq $32,%mm1 subl $8,%ecx jz L004maw_sse2_exit L002maw_sse2_entry: testl $4294967288,%ecx jnz L003maw_sse2_unrolled .align 2,0x90 L005maw_sse2_loop: movd (%edx),%mm2 movd (%eax),%mm3 pmuludq %mm0,%mm2 leal 4(%edx),%edx paddq %mm3,%mm1 paddq %mm2,%mm1 movd %mm1,(%eax) subl $1,%ecx psrlq $32,%mm1 leal 4(%eax),%eax jnz L005maw_sse2_loop L004maw_sse2_exit: movd %mm1,%eax emms ret .align 4,0x90 L001maw_non_sse2: pushl %ebp pushl %ebx pushl %esi pushl %edi xorl %esi,%esi movl 20(%esp),%edi movl 28(%esp),%ecx movl 24(%esp),%ebx andl $4294967288,%ecx movl 32(%esp),%ebp pushl %ecx jz L006maw_finish .align 4,0x90 L007maw_loop: # Round 0 movl (%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl (%edi),%eax adcl $0,%edx movl %eax,(%edi) movl %edx,%esi # Round 4 movl 4(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 4(%edi),%eax adcl $0,%edx movl %eax,4(%edi) movl %edx,%esi # Round 8 movl 8(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 8(%edi),%eax adcl $0,%edx movl %eax,8(%edi) movl %edx,%esi # Round 12 movl 12(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 12(%edi),%eax adcl $0,%edx movl %eax,12(%edi) movl %edx,%esi # Round 16 movl 16(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 16(%edi),%eax adcl $0,%edx movl %eax,16(%edi) movl %edx,%esi # Round 20 movl 20(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 20(%edi),%eax adcl $0,%edx movl %eax,20(%edi) movl %edx,%esi # Round 24 movl 24(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 24(%edi),%eax adcl $0,%edx movl %eax,24(%edi) movl %edx,%esi # Round 28 movl 28(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 28(%edi),%eax adcl $0,%edx movl %eax,28(%edi) movl %edx,%esi subl $8,%ecx leal 32(%ebx),%ebx leal 32(%edi),%edi jnz L007maw_loop L006maw_finish: movl 32(%esp),%ecx andl $7,%ecx jnz L008maw_finish2 jmp L009maw_end L008maw_finish2: # Tail Round 0 movl (%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl (%edi),%eax adcl $0,%edx decl %ecx movl %eax,(%edi) movl %edx,%esi jz L009maw_end # Tail Round 1 movl 4(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 4(%edi),%eax adcl $0,%edx decl %ecx movl %eax,4(%edi) movl %edx,%esi jz L009maw_end # Tail Round 2 movl 8(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 8(%edi),%eax adcl $0,%edx decl %ecx movl %eax,8(%edi) movl %edx,%esi jz L009maw_end # Tail Round 3 movl 12(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 12(%edi),%eax adcl $0,%edx decl %ecx movl %eax,12(%edi) movl %edx,%esi jz L009maw_end # Tail Round 4 movl 16(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 16(%edi),%eax adcl $0,%edx decl %ecx movl %eax,16(%edi) movl %edx,%esi jz L009maw_end # Tail Round 5 movl 20(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 20(%edi),%eax adcl $0,%edx decl %ecx movl %eax,20(%edi) movl %edx,%esi jz L009maw_end # Tail Round 6 movl 24(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 24(%edi),%eax adcl $0,%edx movl %eax,24(%edi) movl %edx,%esi L009maw_end: movl %esi,%eax popl %ecx popl %edi popl %esi popl %ebx popl %ebp ret .globl _bn_mul_words .private_extern _bn_mul_words .align 4 _bn_mul_words: L_bn_mul_words_begin: call L010PIC_me_up L010PIC_me_up: popl %eax movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L010PIC_me_up(%eax),%eax btl $26,(%eax) jnc L011mw_non_sse2 movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx movd 16(%esp),%mm0 pxor %mm1,%mm1 .align 4,0x90 L012mw_sse2_loop: movd (%edx),%mm2 pmuludq %mm0,%mm2 leal 4(%edx),%edx paddq %mm2,%mm1 movd %mm1,(%eax) subl $1,%ecx psrlq $32,%mm1 leal 4(%eax),%eax jnz L012mw_sse2_loop movd %mm1,%eax emms ret .align 4,0x90 L011mw_non_sse2: pushl %ebp pushl %ebx pushl %esi pushl %edi xorl %esi,%esi movl 20(%esp),%edi movl 24(%esp),%ebx movl 28(%esp),%ebp movl 32(%esp),%ecx andl $4294967288,%ebp jz L013mw_finish L014mw_loop: # Round 0 movl (%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,(%edi) movl %edx,%esi # Round 4 movl 4(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,4(%edi) movl %edx,%esi # Round 8 movl 8(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,8(%edi) movl %edx,%esi # Round 12 movl 12(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,12(%edi) movl %edx,%esi # Round 16 movl 16(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,16(%edi) movl %edx,%esi # Round 20 movl 20(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,20(%edi) movl %edx,%esi # Round 24 movl 24(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,24(%edi) movl %edx,%esi # Round 28 movl 28(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,28(%edi) movl %edx,%esi addl $32,%ebx addl $32,%edi subl $8,%ebp jz L013mw_finish jmp L014mw_loop L013mw_finish: movl 28(%esp),%ebp andl $7,%ebp jnz L015mw_finish2 jmp L016mw_end L015mw_finish2: # Tail Round 0 movl (%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,(%edi) movl %edx,%esi decl %ebp jz L016mw_end # Tail Round 1 movl 4(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,4(%edi) movl %edx,%esi decl %ebp jz L016mw_end # Tail Round 2 movl 8(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,8(%edi) movl %edx,%esi decl %ebp jz L016mw_end # Tail Round 3 movl 12(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,12(%edi) movl %edx,%esi decl %ebp jz L016mw_end # Tail Round 4 movl 16(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,16(%edi) movl %edx,%esi decl %ebp jz L016mw_end # Tail Round 5 movl 20(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,20(%edi) movl %edx,%esi decl %ebp jz L016mw_end # Tail Round 6 movl 24(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,24(%edi) movl %edx,%esi L016mw_end: movl %esi,%eax popl %edi popl %esi popl %ebx popl %ebp ret .globl _bn_sqr_words .private_extern _bn_sqr_words .align 4 _bn_sqr_words: L_bn_sqr_words_begin: call L017PIC_me_up L017PIC_me_up: popl %eax movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L017PIC_me_up(%eax),%eax btl $26,(%eax) jnc L018sqr_non_sse2 movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx .align 4,0x90 L019sqr_sse2_loop: movd (%edx),%mm0 pmuludq %mm0,%mm0 leal 4(%edx),%edx movq %mm0,(%eax) subl $1,%ecx leal 8(%eax),%eax jnz L019sqr_sse2_loop emms ret .align 4,0x90 L018sqr_non_sse2: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%ebx andl $4294967288,%ebx jz L020sw_finish L021sw_loop: # Round 0 movl (%edi),%eax mull %eax movl %eax,(%esi) movl %edx,4(%esi) # Round 4 movl 4(%edi),%eax mull %eax movl %eax,8(%esi) movl %edx,12(%esi) # Round 8 movl 8(%edi),%eax mull %eax movl %eax,16(%esi) movl %edx,20(%esi) # Round 12 movl 12(%edi),%eax mull %eax movl %eax,24(%esi) movl %edx,28(%esi) # Round 16 movl 16(%edi),%eax mull %eax movl %eax,32(%esi) movl %edx,36(%esi) # Round 20 movl 20(%edi),%eax mull %eax movl %eax,40(%esi) movl %edx,44(%esi) # Round 24 movl 24(%edi),%eax mull %eax movl %eax,48(%esi) movl %edx,52(%esi) # Round 28 movl 28(%edi),%eax mull %eax movl %eax,56(%esi) movl %edx,60(%esi) addl $32,%edi addl $64,%esi subl $8,%ebx jnz L021sw_loop L020sw_finish: movl 28(%esp),%ebx andl $7,%ebx jz L022sw_end # Tail Round 0 movl (%edi),%eax mull %eax movl %eax,(%esi) decl %ebx movl %edx,4(%esi) jz L022sw_end # Tail Round 1 movl 4(%edi),%eax mull %eax movl %eax,8(%esi) decl %ebx movl %edx,12(%esi) jz L022sw_end # Tail Round 2 movl 8(%edi),%eax mull %eax movl %eax,16(%esi) decl %ebx movl %edx,20(%esi) jz L022sw_end # Tail Round 3 movl 12(%edi),%eax mull %eax movl %eax,24(%esi) decl %ebx movl %edx,28(%esi) jz L022sw_end # Tail Round 4 movl 16(%edi),%eax mull %eax movl %eax,32(%esi) decl %ebx movl %edx,36(%esi) jz L022sw_end # Tail Round 5 movl 20(%edi),%eax mull %eax movl %eax,40(%esi) decl %ebx movl %edx,44(%esi) jz L022sw_end # Tail Round 6 movl 24(%edi),%eax mull %eax movl %eax,48(%esi) movl %edx,52(%esi) L022sw_end: popl %edi popl %esi popl %ebx popl %ebp ret .globl _bn_div_words .private_extern _bn_div_words .align 4 _bn_div_words: L_bn_div_words_begin: movl 4(%esp),%edx movl 8(%esp),%eax movl 12(%esp),%ecx divl %ecx ret .globl _bn_add_words .private_extern _bn_add_words .align 4 _bn_add_words: L_bn_add_words_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%ebx movl 24(%esp),%esi movl 28(%esp),%edi movl 32(%esp),%ebp xorl %eax,%eax andl $4294967288,%ebp jz L023aw_finish L024aw_loop: # Round 0 movl (%esi),%ecx movl (%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) # Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) # Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) # Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) # Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) # Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) # Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) # Round 7 movl 28(%esi),%ecx movl 28(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,28(%ebx) addl $32,%esi addl $32,%edi addl $32,%ebx subl $8,%ebp jnz L024aw_loop L023aw_finish: movl 32(%esp),%ebp andl $7,%ebp jz L025aw_end # Tail Round 0 movl (%esi),%ecx movl (%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,(%ebx) jz L025aw_end # Tail Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,4(%ebx) jz L025aw_end # Tail Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,8(%ebx) jz L025aw_end # Tail Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,12(%ebx) jz L025aw_end # Tail Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,16(%ebx) jz L025aw_end # Tail Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,20(%ebx) jz L025aw_end # Tail Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) L025aw_end: popl %edi popl %esi popl %ebx popl %ebp ret .globl _bn_sub_words .private_extern _bn_sub_words .align 4 _bn_sub_words: L_bn_sub_words_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%ebx movl 24(%esp),%esi movl 28(%esp),%edi movl 32(%esp),%ebp xorl %eax,%eax andl $4294967288,%ebp jz L026aw_finish L027aw_loop: # Round 0 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) # Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) # Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) # Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) # Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) # Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) # Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) # Round 7 movl 28(%esi),%ecx movl 28(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,28(%ebx) addl $32,%esi addl $32,%edi addl $32,%ebx subl $8,%ebp jnz L027aw_loop L026aw_finish: movl 32(%esp),%ebp andl $7,%ebp jz L028aw_end # Tail Round 0 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,(%ebx) jz L028aw_end # Tail Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,4(%ebx) jz L028aw_end # Tail Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,8(%ebx) jz L028aw_end # Tail Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,12(%ebx) jz L028aw_end # Tail Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,16(%ebx) jz L028aw_end # Tail Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,20(%ebx) jz L028aw_end # Tail Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) L028aw_end: popl %edi popl %esi popl %ebx popl %ebp ret .section __IMPORT,__pointers,non_lazy_symbol_pointers L_OPENSSL_ia32cap_P$non_lazy_ptr: .indirect_symbol _OPENSSL_ia32cap_P .long 0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
9,078
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/mac-x86/crypto/fipsmodule/x86-mont.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _bn_mul_mont .private_extern _bn_mul_mont .align 4 _bn_mul_mont: L_bn_mul_mont_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi xorl %eax,%eax movl 40(%esp),%edi cmpl $4,%edi jl L000just_leave leal 20(%esp),%esi leal 24(%esp),%edx addl $2,%edi negl %edi leal -32(%esp,%edi,4),%ebp negl %edi movl %ebp,%eax subl %edx,%eax andl $2047,%eax subl %eax,%ebp xorl %ebp,%edx andl $2048,%edx xorl $2048,%edx subl %edx,%ebp andl $-64,%ebp movl %esp,%eax subl %ebp,%eax andl $-4096,%eax movl %esp,%edx leal (%ebp,%eax,1),%esp movl (%esp),%eax cmpl %ebp,%esp ja L001page_walk jmp L002page_walk_done .align 4,0x90 L001page_walk: leal -4096(%esp),%esp movl (%esp),%eax cmpl %ebp,%esp ja L001page_walk L002page_walk_done: movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%ebp movl 16(%esi),%esi movl (%esi),%esi movl %eax,4(%esp) movl %ebx,8(%esp) movl %ecx,12(%esp) movl %ebp,16(%esp) movl %esi,20(%esp) leal -3(%edi),%ebx movl %edx,24(%esp) call L003PIC_me_up L003PIC_me_up: popl %eax movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L003PIC_me_up(%eax),%eax btl $26,(%eax) jnc L004non_sse2 movl $-1,%eax movd %eax,%mm7 movl 8(%esp),%esi movl 12(%esp),%edi movl 16(%esp),%ebp xorl %edx,%edx xorl %ecx,%ecx movd (%edi),%mm4 movd (%esi),%mm5 movd (%ebp),%mm3 pmuludq %mm4,%mm5 movq %mm5,%mm2 movq %mm5,%mm0 pand %mm7,%mm0 pmuludq 20(%esp),%mm5 pmuludq %mm5,%mm3 paddq %mm0,%mm3 movd 4(%ebp),%mm1 movd 4(%esi),%mm0 psrlq $32,%mm2 psrlq $32,%mm3 incl %ecx .align 4,0x90 L0051st: pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 movd 4(%ebp,%ecx,4),%mm1 paddq %mm0,%mm3 movd 4(%esi,%ecx,4),%mm0 psrlq $32,%mm2 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm3 leal 1(%ecx),%ecx cmpl %ebx,%ecx jl L0051st pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 paddq %mm0,%mm3 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm2 psrlq $32,%mm3 paddq %mm2,%mm3 movq %mm3,32(%esp,%ebx,4) incl %edx L006outer: xorl %ecx,%ecx movd (%edi,%edx,4),%mm4 movd (%esi),%mm5 movd 32(%esp),%mm6 movd (%ebp),%mm3 pmuludq %mm4,%mm5 paddq %mm6,%mm5 movq %mm5,%mm0 movq %mm5,%mm2 pand %mm7,%mm0 pmuludq 20(%esp),%mm5 pmuludq %mm5,%mm3 paddq %mm0,%mm3 movd 36(%esp),%mm6 movd 4(%ebp),%mm1 movd 4(%esi),%mm0 psrlq $32,%mm2 psrlq $32,%mm3 paddq %mm6,%mm2 incl %ecx decl %ebx L007inner: pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 movd 36(%esp,%ecx,4),%mm6 pand %mm7,%mm0 movd 4(%ebp,%ecx,4),%mm1 paddq %mm0,%mm3 movd 4(%esi,%ecx,4),%mm0 psrlq $32,%mm2 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm3 paddq %mm6,%mm2 decl %ebx leal 1(%ecx),%ecx jnz L007inner movl %ecx,%ebx pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 paddq %mm0,%mm3 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm2 psrlq $32,%mm3 movd 36(%esp,%ebx,4),%mm6 paddq %mm2,%mm3 paddq %mm6,%mm3 movq %mm3,32(%esp,%ebx,4) leal 1(%edx),%edx cmpl %ebx,%edx jle L006outer emms jmp L008common_tail .align 4,0x90 L004non_sse2: movl 8(%esp),%esi leal 1(%ebx),%ebp movl 12(%esp),%edi xorl %ecx,%ecx movl %esi,%edx andl $1,%ebp subl %edi,%edx leal 4(%edi,%ebx,4),%eax orl %edx,%ebp movl (%edi),%edi jz L009bn_sqr_mont movl %eax,28(%esp) movl (%esi),%eax xorl %edx,%edx .align 4,0x90 L010mull: movl %edx,%ebp mull %edi addl %eax,%ebp leal 1(%ecx),%ecx adcl $0,%edx movl (%esi,%ecx,4),%eax cmpl %ebx,%ecx movl %ebp,28(%esp,%ecx,4) jl L010mull movl %edx,%ebp mull %edi movl 20(%esp),%edi addl %ebp,%eax movl 16(%esp),%esi adcl $0,%edx imull 32(%esp),%edi movl %eax,32(%esp,%ebx,4) xorl %ecx,%ecx movl %edx,36(%esp,%ebx,4) movl %ecx,40(%esp,%ebx,4) movl (%esi),%eax mull %edi addl 32(%esp),%eax movl 4(%esi),%eax adcl $0,%edx incl %ecx jmp L0112ndmadd .align 4,0x90 L0121stmadd: movl %edx,%ebp mull %edi addl 32(%esp,%ecx,4),%ebp leal 1(%ecx),%ecx adcl $0,%edx addl %eax,%ebp movl (%esi,%ecx,4),%eax adcl $0,%edx cmpl %ebx,%ecx movl %ebp,28(%esp,%ecx,4) jl L0121stmadd movl %edx,%ebp mull %edi addl 32(%esp,%ebx,4),%eax movl 20(%esp),%edi adcl $0,%edx movl 16(%esp),%esi addl %eax,%ebp adcl $0,%edx imull 32(%esp),%edi xorl %ecx,%ecx addl 36(%esp,%ebx,4),%edx movl %ebp,32(%esp,%ebx,4) adcl $0,%ecx movl (%esi),%eax movl %edx,36(%esp,%ebx,4) movl %ecx,40(%esp,%ebx,4) mull %edi addl 32(%esp),%eax movl 4(%esi),%eax adcl $0,%edx movl $1,%ecx .align 4,0x90 L0112ndmadd: movl %edx,%ebp mull %edi addl 32(%esp,%ecx,4),%ebp leal 1(%ecx),%ecx adcl $0,%edx addl %eax,%ebp movl (%esi,%ecx,4),%eax adcl $0,%edx cmpl %ebx,%ecx movl %ebp,24(%esp,%ecx,4) jl L0112ndmadd movl %edx,%ebp mull %edi addl 32(%esp,%ebx,4),%ebp adcl $0,%edx addl %eax,%ebp adcl $0,%edx movl %ebp,28(%esp,%ebx,4) xorl %eax,%eax movl 12(%esp),%ecx addl 36(%esp,%ebx,4),%edx adcl 40(%esp,%ebx,4),%eax leal 4(%ecx),%ecx movl %edx,32(%esp,%ebx,4) cmpl 28(%esp),%ecx movl %eax,36(%esp,%ebx,4) je L008common_tail movl (%ecx),%edi movl 8(%esp),%esi movl %ecx,12(%esp) xorl %ecx,%ecx xorl %edx,%edx movl (%esi),%eax jmp L0121stmadd .align 4,0x90 L009bn_sqr_mont: movl %ebx,(%esp) movl %ecx,12(%esp) movl %edi,%eax mull %edi movl %eax,32(%esp) movl %edx,%ebx shrl $1,%edx andl $1,%ebx incl %ecx .align 4,0x90 L013sqr: movl (%esi,%ecx,4),%eax movl %edx,%ebp mull %edi addl %ebp,%eax leal 1(%ecx),%ecx adcl $0,%edx leal (%ebx,%eax,2),%ebp shrl $31,%eax cmpl (%esp),%ecx movl %eax,%ebx movl %ebp,28(%esp,%ecx,4) jl L013sqr movl (%esi,%ecx,4),%eax movl %edx,%ebp mull %edi addl %ebp,%eax movl 20(%esp),%edi adcl $0,%edx movl 16(%esp),%esi leal (%ebx,%eax,2),%ebp imull 32(%esp),%edi shrl $31,%eax movl %ebp,32(%esp,%ecx,4) leal (%eax,%edx,2),%ebp movl (%esi),%eax shrl $31,%edx movl %ebp,36(%esp,%ecx,4) movl %edx,40(%esp,%ecx,4) mull %edi addl 32(%esp),%eax movl %ecx,%ebx adcl $0,%edx movl 4(%esi),%eax movl $1,%ecx .align 4,0x90 L0143rdmadd: movl %edx,%ebp mull %edi addl 32(%esp,%ecx,4),%ebp adcl $0,%edx addl %eax,%ebp movl 4(%esi,%ecx,4),%eax adcl $0,%edx movl %ebp,28(%esp,%ecx,4) movl %edx,%ebp mull %edi addl 36(%esp,%ecx,4),%ebp leal 2(%ecx),%ecx adcl $0,%edx addl %eax,%ebp movl (%esi,%ecx,4),%eax adcl $0,%edx cmpl %ebx,%ecx movl %ebp,24(%esp,%ecx,4) jl L0143rdmadd movl %edx,%ebp mull %edi addl 32(%esp,%ebx,4),%ebp adcl $0,%edx addl %eax,%ebp adcl $0,%edx movl %ebp,28(%esp,%ebx,4) movl 12(%esp),%ecx xorl %eax,%eax movl 8(%esp),%esi addl 36(%esp,%ebx,4),%edx adcl 40(%esp,%ebx,4),%eax movl %edx,32(%esp,%ebx,4) cmpl %ebx,%ecx movl %eax,36(%esp,%ebx,4) je L008common_tail movl 4(%esi,%ecx,4),%edi leal 1(%ecx),%ecx movl %edi,%eax movl %ecx,12(%esp) mull %edi addl 32(%esp,%ecx,4),%eax adcl $0,%edx movl %eax,32(%esp,%ecx,4) xorl %ebp,%ebp cmpl %ebx,%ecx leal 1(%ecx),%ecx je L015sqrlast movl %edx,%ebx shrl $1,%edx andl $1,%ebx .align 4,0x90 L016sqradd: movl (%esi,%ecx,4),%eax movl %edx,%ebp mull %edi addl %ebp,%eax leal (%eax,%eax,1),%ebp adcl $0,%edx shrl $31,%eax addl 32(%esp,%ecx,4),%ebp leal 1(%ecx),%ecx adcl $0,%eax addl %ebx,%ebp adcl $0,%eax cmpl (%esp),%ecx movl %ebp,28(%esp,%ecx,4) movl %eax,%ebx jle L016sqradd movl %edx,%ebp addl %edx,%edx shrl $31,%ebp addl %ebx,%edx adcl $0,%ebp L015sqrlast: movl 20(%esp),%edi movl 16(%esp),%esi imull 32(%esp),%edi addl 32(%esp,%ecx,4),%edx movl (%esi),%eax adcl $0,%ebp movl %edx,32(%esp,%ecx,4) movl %ebp,36(%esp,%ecx,4) mull %edi addl 32(%esp),%eax leal -1(%ecx),%ebx adcl $0,%edx movl $1,%ecx movl 4(%esi),%eax jmp L0143rdmadd .align 4,0x90 L008common_tail: movl 16(%esp),%ebp movl 4(%esp),%edi leal 32(%esp),%esi movl (%esi),%eax movl %ebx,%ecx xorl %edx,%edx .align 4,0x90 L017sub: sbbl (%ebp,%edx,4),%eax movl %eax,(%edi,%edx,4) decl %ecx movl 4(%esi,%edx,4),%eax leal 1(%edx),%edx jge L017sub sbbl $0,%eax movl $-1,%edx xorl %eax,%edx jmp L018copy .align 4,0x90 L018copy: movl 32(%esp,%ebx,4),%esi movl (%edi,%ebx,4),%ebp movl %ecx,32(%esp,%ebx,4) andl %eax,%esi andl %edx,%ebp orl %esi,%ebp movl %ebp,(%edi,%ebx,4) decl %ebx jge L018copy movl 24(%esp),%esp movl $1,%eax L000just_leave: popl %edi popl %esi popl %ebx popl %ebp ret .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105 .byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56 .byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121 .byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46 .byte 111,114,103,62,0 .section __IMPORT,__pointers,non_lazy_symbol_pointers L_OPENSSL_ia32cap_P$non_lazy_ptr: .indirect_symbol _OPENSSL_ia32cap_P .long 0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
49,913
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/mac-x86/crypto/fipsmodule/sha512-586.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _sha512_block_data_order .private_extern _sha512_block_data_order .align 4 _sha512_block_data_order: L_sha512_block_data_order_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call L000pic_point L000pic_point: popl %ebp leal L001K512-L000pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $7,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L001K512(%ebp),%edx movl (%edx),%ecx testl $67108864,%ecx jz L002loop_x86 movl 4(%edx),%edx movq (%esi),%mm0 andl $16777216,%ecx movq 8(%esi),%mm1 andl $512,%edx movq 16(%esi),%mm2 orl %edx,%ecx movq 24(%esi),%mm3 movq 32(%esi),%mm4 movq 40(%esi),%mm5 movq 48(%esi),%mm6 movq 56(%esi),%mm7 cmpl $16777728,%ecx je L003SSSE3 subl $80,%esp jmp L004loop_sse2 .align 4,0x90 L004loop_sse2: movq %mm1,8(%esp) movq %mm2,16(%esp) movq %mm3,24(%esp) movq %mm5,40(%esp) movq %mm6,48(%esp) pxor %mm1,%mm2 movq %mm7,56(%esp) movq %mm0,%mm3 movl (%edi),%eax movl 4(%edi),%ebx addl $8,%edi movl $15,%edx bswap %eax bswap %ebx jmp L00500_14_sse2 .align 4,0x90 L00500_14_sse2: movd %eax,%mm1 movl (%edi),%eax movd %ebx,%mm7 movl 4(%edi),%ebx addl $8,%edi bswap %eax bswap %ebx punpckldq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm3,%mm0 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm2,%mm3 movq %mm0,%mm2 addl $8,%ebp paddq %mm6,%mm3 movq 48(%esp),%mm6 decl %edx jnz L00500_14_sse2 movd %eax,%mm1 movd %ebx,%mm7 punpckldq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm3,%mm0 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm2,%mm3 movq %mm0,%mm2 addl $8,%ebp paddq %mm6,%mm3 pxor %mm0,%mm0 movl $32,%edx jmp L00616_79_sse2 .align 4,0x90 L00616_79_sse2: movq 88(%esp),%mm5 movq %mm7,%mm1 psrlq $1,%mm7 movq %mm5,%mm6 psrlq $6,%mm5 psllq $56,%mm1 paddq %mm3,%mm0 movq %mm7,%mm3 psrlq $6,%mm7 pxor %mm1,%mm3 psllq $7,%mm1 pxor %mm7,%mm3 psrlq $1,%mm7 pxor %mm1,%mm3 movq %mm5,%mm1 psrlq $13,%mm5 pxor %mm3,%mm7 psllq $3,%mm6 pxor %mm5,%mm1 paddq 200(%esp),%mm7 pxor %mm6,%mm1 psrlq $42,%mm5 paddq 128(%esp),%mm7 pxor %mm5,%mm1 psllq $42,%mm6 movq 40(%esp),%mm5 pxor %mm6,%mm1 movq 48(%esp),%mm6 paddq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm6,%mm2 addl $8,%ebp movq 88(%esp),%mm5 movq %mm7,%mm1 psrlq $1,%mm7 movq %mm5,%mm6 psrlq $6,%mm5 psllq $56,%mm1 paddq %mm3,%mm2 movq %mm7,%mm3 psrlq $6,%mm7 pxor %mm1,%mm3 psllq $7,%mm1 pxor %mm7,%mm3 psrlq $1,%mm7 pxor %mm1,%mm3 movq %mm5,%mm1 psrlq $13,%mm5 pxor %mm3,%mm7 psllq $3,%mm6 pxor %mm5,%mm1 paddq 200(%esp),%mm7 pxor %mm6,%mm1 psrlq $42,%mm5 paddq 128(%esp),%mm7 pxor %mm5,%mm1 psllq $42,%mm6 movq 40(%esp),%mm5 pxor %mm6,%mm1 movq 48(%esp),%mm6 paddq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm6,%mm0 addl $8,%ebp decl %edx jnz L00616_79_sse2 paddq %mm3,%mm0 movq 8(%esp),%mm1 movq 24(%esp),%mm3 movq 40(%esp),%mm5 movq 48(%esp),%mm6 movq 56(%esp),%mm7 pxor %mm1,%mm2 paddq (%esi),%mm0 paddq 8(%esi),%mm1 paddq 16(%esi),%mm2 paddq 24(%esi),%mm3 paddq 32(%esi),%mm4 paddq 40(%esi),%mm5 paddq 48(%esi),%mm6 paddq 56(%esi),%mm7 movl $640,%eax movq %mm0,(%esi) movq %mm1,8(%esi) movq %mm2,16(%esi) movq %mm3,24(%esi) movq %mm4,32(%esi) movq %mm5,40(%esi) movq %mm6,48(%esi) movq %mm7,56(%esi) leal (%esp,%eax,1),%esp subl %eax,%ebp cmpl 88(%esp),%edi jb L004loop_sse2 movl 92(%esp),%esp emms popl %edi popl %esi popl %ebx popl %ebp ret .align 5,0x90 L003SSSE3: leal -64(%esp),%edx subl $256,%esp movdqa 640(%ebp),%xmm1 movdqu (%edi),%xmm0 .byte 102,15,56,0,193 movdqa (%ebp),%xmm3 movdqa %xmm1,%xmm2 movdqu 16(%edi),%xmm1 paddq %xmm0,%xmm3 .byte 102,15,56,0,202 movdqa %xmm3,-128(%edx) movdqa 16(%ebp),%xmm4 movdqa %xmm2,%xmm3 movdqu 32(%edi),%xmm2 paddq %xmm1,%xmm4 .byte 102,15,56,0,211 movdqa %xmm4,-112(%edx) movdqa 32(%ebp),%xmm5 movdqa %xmm3,%xmm4 movdqu 48(%edi),%xmm3 paddq %xmm2,%xmm5 .byte 102,15,56,0,220 movdqa %xmm5,-96(%edx) movdqa 48(%ebp),%xmm6 movdqa %xmm4,%xmm5 movdqu 64(%edi),%xmm4 paddq %xmm3,%xmm6 .byte 102,15,56,0,229 movdqa %xmm6,-80(%edx) movdqa 64(%ebp),%xmm7 movdqa %xmm5,%xmm6 movdqu 80(%edi),%xmm5 paddq %xmm4,%xmm7 .byte 102,15,56,0,238 movdqa %xmm7,-64(%edx) movdqa %xmm0,(%edx) movdqa 80(%ebp),%xmm0 movdqa %xmm6,%xmm7 movdqu 96(%edi),%xmm6 paddq %xmm5,%xmm0 .byte 102,15,56,0,247 movdqa %xmm0,-48(%edx) movdqa %xmm1,16(%edx) movdqa 96(%ebp),%xmm1 movdqa %xmm7,%xmm0 movdqu 112(%edi),%xmm7 paddq %xmm6,%xmm1 .byte 102,15,56,0,248 movdqa %xmm1,-32(%edx) movdqa %xmm2,32(%edx) movdqa 112(%ebp),%xmm2 movdqa (%edx),%xmm0 paddq %xmm7,%xmm2 movdqa %xmm2,-16(%edx) nop .align 5,0x90 L007loop_ssse3: movdqa 16(%edx),%xmm2 movdqa %xmm3,48(%edx) leal 128(%ebp),%ebp movq %mm1,8(%esp) movl %edi,%ebx movq %mm2,16(%esp) leal 128(%edi),%edi movq %mm3,24(%esp) cmpl %eax,%edi movq %mm5,40(%esp) cmovbl %edi,%ebx movq %mm6,48(%esp) movl $4,%ecx pxor %mm1,%mm2 movq %mm7,56(%esp) pxor %mm3,%mm3 jmp L00800_47_ssse3 .align 5,0x90 L00800_47_ssse3: movdqa %xmm5,%xmm3 movdqa %xmm2,%xmm1 .byte 102,15,58,15,208,8 movdqa %xmm4,(%edx) .byte 102,15,58,15,220,8 movdqa %xmm2,%xmm4 psrlq $7,%xmm2 paddq %xmm3,%xmm0 movdqa %xmm4,%xmm3 psrlq $1,%xmm4 psllq $56,%xmm3 pxor %xmm4,%xmm2 psrlq $7,%xmm4 pxor %xmm3,%xmm2 psllq $7,%xmm3 pxor %xmm4,%xmm2 movdqa %xmm7,%xmm4 pxor %xmm3,%xmm2 movdqa %xmm7,%xmm3 psrlq $6,%xmm4 paddq %xmm2,%xmm0 movdqa %xmm7,%xmm2 psrlq $19,%xmm3 psllq $3,%xmm2 pxor %xmm3,%xmm4 psrlq $42,%xmm3 pxor %xmm2,%xmm4 psllq $42,%xmm2 pxor %xmm3,%xmm4 movdqa 32(%edx),%xmm3 pxor %xmm2,%xmm4 movdqa (%ebp),%xmm2 movq %mm4,%mm1 paddq %xmm4,%xmm0 movq -128(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) paddq %xmm0,%xmm2 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -120(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm2,-128(%edx) movdqa %xmm6,%xmm4 movdqa %xmm3,%xmm2 .byte 102,15,58,15,217,8 movdqa %xmm5,16(%edx) .byte 102,15,58,15,229,8 movdqa %xmm3,%xmm5 psrlq $7,%xmm3 paddq %xmm4,%xmm1 movdqa %xmm5,%xmm4 psrlq $1,%xmm5 psllq $56,%xmm4 pxor %xmm5,%xmm3 psrlq $7,%xmm5 pxor %xmm4,%xmm3 psllq $7,%xmm4 pxor %xmm5,%xmm3 movdqa %xmm0,%xmm5 pxor %xmm4,%xmm3 movdqa %xmm0,%xmm4 psrlq $6,%xmm5 paddq %xmm3,%xmm1 movdqa %xmm0,%xmm3 psrlq $19,%xmm4 psllq $3,%xmm3 pxor %xmm4,%xmm5 psrlq $42,%xmm4 pxor %xmm3,%xmm5 psllq $42,%xmm3 pxor %xmm4,%xmm5 movdqa 48(%edx),%xmm4 pxor %xmm3,%xmm5 movdqa 16(%ebp),%xmm3 movq %mm4,%mm1 paddq %xmm5,%xmm1 movq -112(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) paddq %xmm1,%xmm3 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -104(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm3,-112(%edx) movdqa %xmm7,%xmm5 movdqa %xmm4,%xmm3 .byte 102,15,58,15,226,8 movdqa %xmm6,32(%edx) .byte 102,15,58,15,238,8 movdqa %xmm4,%xmm6 psrlq $7,%xmm4 paddq %xmm5,%xmm2 movdqa %xmm6,%xmm5 psrlq $1,%xmm6 psllq $56,%xmm5 pxor %xmm6,%xmm4 psrlq $7,%xmm6 pxor %xmm5,%xmm4 psllq $7,%xmm5 pxor %xmm6,%xmm4 movdqa %xmm1,%xmm6 pxor %xmm5,%xmm4 movdqa %xmm1,%xmm5 psrlq $6,%xmm6 paddq %xmm4,%xmm2 movdqa %xmm1,%xmm4 psrlq $19,%xmm5 psllq $3,%xmm4 pxor %xmm5,%xmm6 psrlq $42,%xmm5 pxor %xmm4,%xmm6 psllq $42,%xmm4 pxor %xmm5,%xmm6 movdqa (%edx),%xmm5 pxor %xmm4,%xmm6 movdqa 32(%ebp),%xmm4 movq %mm4,%mm1 paddq %xmm6,%xmm2 movq -96(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) paddq %xmm2,%xmm4 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -88(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm4,-96(%edx) movdqa %xmm0,%xmm6 movdqa %xmm5,%xmm4 .byte 102,15,58,15,235,8 movdqa %xmm7,48(%edx) .byte 102,15,58,15,247,8 movdqa %xmm5,%xmm7 psrlq $7,%xmm5 paddq %xmm6,%xmm3 movdqa %xmm7,%xmm6 psrlq $1,%xmm7 psllq $56,%xmm6 pxor %xmm7,%xmm5 psrlq $7,%xmm7 pxor %xmm6,%xmm5 psllq $7,%xmm6 pxor %xmm7,%xmm5 movdqa %xmm2,%xmm7 pxor %xmm6,%xmm5 movdqa %xmm2,%xmm6 psrlq $6,%xmm7 paddq %xmm5,%xmm3 movdqa %xmm2,%xmm5 psrlq $19,%xmm6 psllq $3,%xmm5 pxor %xmm6,%xmm7 psrlq $42,%xmm6 pxor %xmm5,%xmm7 psllq $42,%xmm5 pxor %xmm6,%xmm7 movdqa 16(%edx),%xmm6 pxor %xmm5,%xmm7 movdqa 48(%ebp),%xmm5 movq %mm4,%mm1 paddq %xmm7,%xmm3 movq -80(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) paddq %xmm3,%xmm5 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -72(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm5,-80(%edx) movdqa %xmm1,%xmm7 movdqa %xmm6,%xmm5 .byte 102,15,58,15,244,8 movdqa %xmm0,(%edx) .byte 102,15,58,15,248,8 movdqa %xmm6,%xmm0 psrlq $7,%xmm6 paddq %xmm7,%xmm4 movdqa %xmm0,%xmm7 psrlq $1,%xmm0 psllq $56,%xmm7 pxor %xmm0,%xmm6 psrlq $7,%xmm0 pxor %xmm7,%xmm6 psllq $7,%xmm7 pxor %xmm0,%xmm6 movdqa %xmm3,%xmm0 pxor %xmm7,%xmm6 movdqa %xmm3,%xmm7 psrlq $6,%xmm0 paddq %xmm6,%xmm4 movdqa %xmm3,%xmm6 psrlq $19,%xmm7 psllq $3,%xmm6 pxor %xmm7,%xmm0 psrlq $42,%xmm7 pxor %xmm6,%xmm0 psllq $42,%xmm6 pxor %xmm7,%xmm0 movdqa 32(%edx),%xmm7 pxor %xmm6,%xmm0 movdqa 64(%ebp),%xmm6 movq %mm4,%mm1 paddq %xmm0,%xmm4 movq -64(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) paddq %xmm4,%xmm6 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -56(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm6,-64(%edx) movdqa %xmm2,%xmm0 movdqa %xmm7,%xmm6 .byte 102,15,58,15,253,8 movdqa %xmm1,16(%edx) .byte 102,15,58,15,193,8 movdqa %xmm7,%xmm1 psrlq $7,%xmm7 paddq %xmm0,%xmm5 movdqa %xmm1,%xmm0 psrlq $1,%xmm1 psllq $56,%xmm0 pxor %xmm1,%xmm7 psrlq $7,%xmm1 pxor %xmm0,%xmm7 psllq $7,%xmm0 pxor %xmm1,%xmm7 movdqa %xmm4,%xmm1 pxor %xmm0,%xmm7 movdqa %xmm4,%xmm0 psrlq $6,%xmm1 paddq %xmm7,%xmm5 movdqa %xmm4,%xmm7 psrlq $19,%xmm0 psllq $3,%xmm7 pxor %xmm0,%xmm1 psrlq $42,%xmm0 pxor %xmm7,%xmm1 psllq $42,%xmm7 pxor %xmm0,%xmm1 movdqa 48(%edx),%xmm0 pxor %xmm7,%xmm1 movdqa 80(%ebp),%xmm7 movq %mm4,%mm1 paddq %xmm1,%xmm5 movq -48(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) paddq %xmm5,%xmm7 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -40(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm7,-48(%edx) movdqa %xmm3,%xmm1 movdqa %xmm0,%xmm7 .byte 102,15,58,15,198,8 movdqa %xmm2,32(%edx) .byte 102,15,58,15,202,8 movdqa %xmm0,%xmm2 psrlq $7,%xmm0 paddq %xmm1,%xmm6 movdqa %xmm2,%xmm1 psrlq $1,%xmm2 psllq $56,%xmm1 pxor %xmm2,%xmm0 psrlq $7,%xmm2 pxor %xmm1,%xmm0 psllq $7,%xmm1 pxor %xmm2,%xmm0 movdqa %xmm5,%xmm2 pxor %xmm1,%xmm0 movdqa %xmm5,%xmm1 psrlq $6,%xmm2 paddq %xmm0,%xmm6 movdqa %xmm5,%xmm0 psrlq $19,%xmm1 psllq $3,%xmm0 pxor %xmm1,%xmm2 psrlq $42,%xmm1 pxor %xmm0,%xmm2 psllq $42,%xmm0 pxor %xmm1,%xmm2 movdqa (%edx),%xmm1 pxor %xmm0,%xmm2 movdqa 96(%ebp),%xmm0 movq %mm4,%mm1 paddq %xmm2,%xmm6 movq -32(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) paddq %xmm6,%xmm0 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -24(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm0,-32(%edx) movdqa %xmm4,%xmm2 movdqa %xmm1,%xmm0 .byte 102,15,58,15,207,8 movdqa %xmm3,48(%edx) .byte 102,15,58,15,211,8 movdqa %xmm1,%xmm3 psrlq $7,%xmm1 paddq %xmm2,%xmm7 movdqa %xmm3,%xmm2 psrlq $1,%xmm3 psllq $56,%xmm2 pxor %xmm3,%xmm1 psrlq $7,%xmm3 pxor %xmm2,%xmm1 psllq $7,%xmm2 pxor %xmm3,%xmm1 movdqa %xmm6,%xmm3 pxor %xmm2,%xmm1 movdqa %xmm6,%xmm2 psrlq $6,%xmm3 paddq %xmm1,%xmm7 movdqa %xmm6,%xmm1 psrlq $19,%xmm2 psllq $3,%xmm1 pxor %xmm2,%xmm3 psrlq $42,%xmm2 pxor %xmm1,%xmm3 psllq $42,%xmm1 pxor %xmm2,%xmm3 movdqa 16(%edx),%xmm2 pxor %xmm1,%xmm3 movdqa 112(%ebp),%xmm1 movq %mm4,%mm1 paddq %xmm3,%xmm7 movq -16(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) paddq %xmm7,%xmm1 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -8(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm1,-16(%edx) leal 128(%ebp),%ebp decl %ecx jnz L00800_47_ssse3 movdqa (%ebp),%xmm1 leal -640(%ebp),%ebp movdqu (%ebx),%xmm0 .byte 102,15,56,0,193 movdqa (%ebp),%xmm3 movdqa %xmm1,%xmm2 movdqu 16(%ebx),%xmm1 paddq %xmm0,%xmm3 .byte 102,15,56,0,202 movq %mm4,%mm1 movq -128(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -120(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm3,-128(%edx) movdqa 16(%ebp),%xmm4 movdqa %xmm2,%xmm3 movdqu 32(%ebx),%xmm2 paddq %xmm1,%xmm4 .byte 102,15,56,0,211 movq %mm4,%mm1 movq -112(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -104(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm4,-112(%edx) movdqa 32(%ebp),%xmm5 movdqa %xmm3,%xmm4 movdqu 48(%ebx),%xmm3 paddq %xmm2,%xmm5 .byte 102,15,56,0,220 movq %mm4,%mm1 movq -96(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -88(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm5,-96(%edx) movdqa 48(%ebp),%xmm6 movdqa %xmm4,%xmm5 movdqu 64(%ebx),%xmm4 paddq %xmm3,%xmm6 .byte 102,15,56,0,229 movq %mm4,%mm1 movq -80(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -72(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm6,-80(%edx) movdqa 64(%ebp),%xmm7 movdqa %xmm5,%xmm6 movdqu 80(%ebx),%xmm5 paddq %xmm4,%xmm7 .byte 102,15,56,0,238 movq %mm4,%mm1 movq -64(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -56(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm7,-64(%edx) movdqa %xmm0,(%edx) movdqa 80(%ebp),%xmm0 movdqa %xmm6,%xmm7 movdqu 96(%ebx),%xmm6 paddq %xmm5,%xmm0 .byte 102,15,56,0,247 movq %mm4,%mm1 movq -48(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -40(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm0,-48(%edx) movdqa %xmm1,16(%edx) movdqa 96(%ebp),%xmm1 movdqa %xmm7,%xmm0 movdqu 112(%ebx),%xmm7 paddq %xmm6,%xmm1 .byte 102,15,56,0,248 movq %mm4,%mm1 movq -32(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -24(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm1,-32(%edx) movdqa %xmm2,32(%edx) movdqa 112(%ebp),%xmm2 movdqa (%edx),%xmm0 paddq %xmm7,%xmm2 movq %mm4,%mm1 movq -16(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -8(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm2,-16(%edx) movq 8(%esp),%mm1 paddq %mm3,%mm0 movq 24(%esp),%mm3 movq 56(%esp),%mm7 pxor %mm1,%mm2 paddq (%esi),%mm0 paddq 8(%esi),%mm1 paddq 16(%esi),%mm2 paddq 24(%esi),%mm3 paddq 32(%esi),%mm4 paddq 40(%esi),%mm5 paddq 48(%esi),%mm6 paddq 56(%esi),%mm7 movq %mm0,(%esi) movq %mm1,8(%esi) movq %mm2,16(%esi) movq %mm3,24(%esi) movq %mm4,32(%esi) movq %mm5,40(%esi) movq %mm6,48(%esi) movq %mm7,56(%esi) cmpl %eax,%edi jb L007loop_ssse3 movl 76(%edx),%esp emms popl %edi popl %esi popl %ebx popl %ebp ret .align 4,0x90 L002loop_x86: movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 16(%edi),%eax movl 20(%edi),%ebx movl 24(%edi),%ecx movl 28(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 32(%edi),%eax movl 36(%edi),%ebx movl 40(%edi),%ecx movl 44(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 48(%edi),%eax movl 52(%edi),%ebx movl 56(%edi),%ecx movl 60(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 64(%edi),%eax movl 68(%edi),%ebx movl 72(%edi),%ecx movl 76(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 80(%edi),%eax movl 84(%edi),%ebx movl 88(%edi),%ecx movl 92(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 96(%edi),%eax movl 100(%edi),%ebx movl 104(%edi),%ecx movl 108(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 112(%edi),%eax movl 116(%edi),%ebx movl 120(%edi),%ecx movl 124(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx addl $128,%edi subl $72,%esp movl %edi,204(%esp) leal 8(%esp),%edi movl $16,%ecx .long 2784229001 .align 4,0x90 L00900_15_x86: movl 40(%esp),%ecx movl 44(%esp),%edx movl %ecx,%esi shrl $9,%ecx movl %edx,%edi shrl $9,%edx movl %ecx,%ebx shll $14,%esi movl %edx,%eax shll $14,%edi xorl %esi,%ebx shrl $5,%ecx xorl %edi,%eax shrl $5,%edx xorl %ecx,%eax shll $4,%esi xorl %edx,%ebx shll $4,%edi xorl %esi,%ebx shrl $4,%ecx xorl %edi,%eax shrl $4,%edx xorl %ecx,%eax shll $5,%esi xorl %edx,%ebx shll $5,%edi xorl %esi,%eax xorl %edi,%ebx movl 48(%esp),%ecx movl 52(%esp),%edx movl 56(%esp),%esi movl 60(%esp),%edi addl 64(%esp),%eax adcl 68(%esp),%ebx xorl %esi,%ecx xorl %edi,%edx andl 40(%esp),%ecx andl 44(%esp),%edx addl 192(%esp),%eax adcl 196(%esp),%ebx xorl %esi,%ecx xorl %edi,%edx movl (%ebp),%esi movl 4(%ebp),%edi addl %ecx,%eax adcl %edx,%ebx movl 32(%esp),%ecx movl 36(%esp),%edx addl %esi,%eax adcl %edi,%ebx movl %eax,(%esp) movl %ebx,4(%esp) addl %ecx,%eax adcl %edx,%ebx movl 8(%esp),%ecx movl 12(%esp),%edx movl %eax,32(%esp) movl %ebx,36(%esp) movl %ecx,%esi shrl $2,%ecx movl %edx,%edi shrl $2,%edx movl %ecx,%ebx shll $4,%esi movl %edx,%eax shll $4,%edi xorl %esi,%ebx shrl $5,%ecx xorl %edi,%eax shrl $5,%edx xorl %ecx,%ebx shll $21,%esi xorl %edx,%eax shll $21,%edi xorl %esi,%eax shrl $21,%ecx xorl %edi,%ebx shrl $21,%edx xorl %ecx,%eax shll $5,%esi xorl %edx,%ebx shll $5,%edi xorl %esi,%eax xorl %edi,%ebx movl 8(%esp),%ecx movl 12(%esp),%edx movl 16(%esp),%esi movl 20(%esp),%edi addl (%esp),%eax adcl 4(%esp),%ebx orl %esi,%ecx orl %edi,%edx andl 24(%esp),%ecx andl 28(%esp),%edx andl 8(%esp),%esi andl 12(%esp),%edi orl %esi,%ecx orl %edi,%edx addl %ecx,%eax adcl %edx,%ebx movl %eax,(%esp) movl %ebx,4(%esp) movb (%ebp),%dl subl $8,%esp leal 8(%ebp),%ebp cmpb $148,%dl jne L00900_15_x86 .align 4,0x90 L01016_79_x86: movl 312(%esp),%ecx movl 316(%esp),%edx movl %ecx,%esi shrl $1,%ecx movl %edx,%edi shrl $1,%edx movl %ecx,%eax shll $24,%esi movl %edx,%ebx shll $24,%edi xorl %esi,%ebx shrl $6,%ecx xorl %edi,%eax shrl $6,%edx xorl %ecx,%eax shll $7,%esi xorl %edx,%ebx shll $1,%edi xorl %esi,%ebx shrl $1,%ecx xorl %edi,%eax shrl $1,%edx xorl %ecx,%eax shll $6,%edi xorl %edx,%ebx xorl %edi,%eax movl %eax,(%esp) movl %ebx,4(%esp) movl 208(%esp),%ecx movl 212(%esp),%edx movl %ecx,%esi shrl $6,%ecx movl %edx,%edi shrl $6,%edx movl %ecx,%eax shll $3,%esi movl %edx,%ebx shll $3,%edi xorl %esi,%eax shrl $13,%ecx xorl %edi,%ebx shrl $13,%edx xorl %ecx,%eax shll $10,%esi xorl %edx,%ebx shll $10,%edi xorl %esi,%ebx shrl $10,%ecx xorl %edi,%eax shrl $10,%edx xorl %ecx,%ebx shll $13,%edi xorl %edx,%eax xorl %edi,%eax movl 320(%esp),%ecx movl 324(%esp),%edx addl (%esp),%eax adcl 4(%esp),%ebx movl 248(%esp),%esi movl 252(%esp),%edi addl %ecx,%eax adcl %edx,%ebx addl %esi,%eax adcl %edi,%ebx movl %eax,192(%esp) movl %ebx,196(%esp) movl 40(%esp),%ecx movl 44(%esp),%edx movl %ecx,%esi shrl $9,%ecx movl %edx,%edi shrl $9,%edx movl %ecx,%ebx shll $14,%esi movl %edx,%eax shll $14,%edi xorl %esi,%ebx shrl $5,%ecx xorl %edi,%eax shrl $5,%edx xorl %ecx,%eax shll $4,%esi xorl %edx,%ebx shll $4,%edi xorl %esi,%ebx shrl $4,%ecx xorl %edi,%eax shrl $4,%edx xorl %ecx,%eax shll $5,%esi xorl %edx,%ebx shll $5,%edi xorl %esi,%eax xorl %edi,%ebx movl 48(%esp),%ecx movl 52(%esp),%edx movl 56(%esp),%esi movl 60(%esp),%edi addl 64(%esp),%eax adcl 68(%esp),%ebx xorl %esi,%ecx xorl %edi,%edx andl 40(%esp),%ecx andl 44(%esp),%edx addl 192(%esp),%eax adcl 196(%esp),%ebx xorl %esi,%ecx xorl %edi,%edx movl (%ebp),%esi movl 4(%ebp),%edi addl %ecx,%eax adcl %edx,%ebx movl 32(%esp),%ecx movl 36(%esp),%edx addl %esi,%eax adcl %edi,%ebx movl %eax,(%esp) movl %ebx,4(%esp) addl %ecx,%eax adcl %edx,%ebx movl 8(%esp),%ecx movl 12(%esp),%edx movl %eax,32(%esp) movl %ebx,36(%esp) movl %ecx,%esi shrl $2,%ecx movl %edx,%edi shrl $2,%edx movl %ecx,%ebx shll $4,%esi movl %edx,%eax shll $4,%edi xorl %esi,%ebx shrl $5,%ecx xorl %edi,%eax shrl $5,%edx xorl %ecx,%ebx shll $21,%esi xorl %edx,%eax shll $21,%edi xorl %esi,%eax shrl $21,%ecx xorl %edi,%ebx shrl $21,%edx xorl %ecx,%eax shll $5,%esi xorl %edx,%ebx shll $5,%edi xorl %esi,%eax xorl %edi,%ebx movl 8(%esp),%ecx movl 12(%esp),%edx movl 16(%esp),%esi movl 20(%esp),%edi addl (%esp),%eax adcl 4(%esp),%ebx orl %esi,%ecx orl %edi,%edx andl 24(%esp),%ecx andl 28(%esp),%edx andl 8(%esp),%esi andl 12(%esp),%edi orl %esi,%ecx orl %edi,%edx addl %ecx,%eax adcl %edx,%ebx movl %eax,(%esp) movl %ebx,4(%esp) movb (%ebp),%dl subl $8,%esp leal 8(%ebp),%ebp cmpb $23,%dl jne L01016_79_x86 movl 840(%esp),%esi movl 844(%esp),%edi movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edx addl 8(%esp),%eax adcl 12(%esp),%ebx movl %eax,(%esi) movl %ebx,4(%esi) addl 16(%esp),%ecx adcl 20(%esp),%edx movl %ecx,8(%esi) movl %edx,12(%esi) movl 16(%esi),%eax movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edx addl 24(%esp),%eax adcl 28(%esp),%ebx movl %eax,16(%esi) movl %ebx,20(%esi) addl 32(%esp),%ecx adcl 36(%esp),%edx movl %ecx,24(%esi) movl %edx,28(%esi) movl 32(%esi),%eax movl 36(%esi),%ebx movl 40(%esi),%ecx movl 44(%esi),%edx addl 40(%esp),%eax adcl 44(%esp),%ebx movl %eax,32(%esi) movl %ebx,36(%esi) addl 48(%esp),%ecx adcl 52(%esp),%edx movl %ecx,40(%esi) movl %edx,44(%esi) movl 48(%esi),%eax movl 52(%esi),%ebx movl 56(%esi),%ecx movl 60(%esi),%edx addl 56(%esp),%eax adcl 60(%esp),%ebx movl %eax,48(%esi) movl %ebx,52(%esi) addl 64(%esp),%ecx adcl 68(%esp),%edx movl %ecx,56(%esi) movl %edx,60(%esi) addl $840,%esp subl $640,%ebp cmpl 8(%esp),%edi jb L002loop_x86 movl 12(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .align 6,0x90 L001K512: .long 3609767458,1116352408 .long 602891725,1899447441 .long 3964484399,3049323471 .long 2173295548,3921009573 .long 4081628472,961987163 .long 3053834265,1508970993 .long 2937671579,2453635748 .long 3664609560,2870763221 .long 2734883394,3624381080 .long 1164996542,310598401 .long 1323610764,607225278 .long 3590304994,1426881987 .long 4068182383,1925078388 .long 991336113,2162078206 .long 633803317,2614888103 .long 3479774868,3248222580 .long 2666613458,3835390401 .long 944711139,4022224774 .long 2341262773,264347078 .long 2007800933,604807628 .long 1495990901,770255983 .long 1856431235,1249150122 .long 3175218132,1555081692 .long 2198950837,1996064986 .long 3999719339,2554220882 .long 766784016,2821834349 .long 2566594879,2952996808 .long 3203337956,3210313671 .long 1034457026,3336571891 .long 2466948901,3584528711 .long 3758326383,113926993 .long 168717936,338241895 .long 1188179964,666307205 .long 1546045734,773529912 .long 1522805485,1294757372 .long 2643833823,1396182291 .long 2343527390,1695183700 .long 1014477480,1986661051 .long 1206759142,2177026350 .long 344077627,2456956037 .long 1290863460,2730485921 .long 3158454273,2820302411 .long 3505952657,3259730800 .long 106217008,3345764771 .long 3606008344,3516065817 .long 1432725776,3600352804 .long 1467031594,4094571909 .long 851169720,275423344 .long 3100823752,430227734 .long 1363258195,506948616 .long 3750685593,659060556 .long 3785050280,883997877 .long 3318307427,958139571 .long 3812723403,1322822218 .long 2003034995,1537002063 .long 3602036899,1747873779 .long 1575990012,1955562222 .long 1125592928,2024104815 .long 2716904306,2227730452 .long 442776044,2361852424 .long 593698344,2428436474 .long 3733110249,2756734187 .long 2999351573,3204031479 .long 3815920427,3329325298 .long 3928383900,3391569614 .long 566280711,3515267271 .long 3454069534,3940187606 .long 4000239992,4118630271 .long 1914138554,116418474 .long 2731055270,174292421 .long 3203993006,289380356 .long 320620315,460393269 .long 587496836,685471733 .long 1086792851,852142971 .long 365543100,1017036298 .long 2618297676,1126000580 .long 3409855158,1288033470 .long 4234509866,1501505948 .long 987167468,1607167915 .long 1246189591,1816402316 .long 67438087,66051 .long 202182159,134810123 .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97 .byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 .byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 .byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 .byte 62,0 .section __IMPORT,__pointers,non_lazy_symbol_pointers L_OPENSSL_ia32cap_P$non_lazy_ptr: .indirect_symbol _OPENSSL_ia32cap_P .long 0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
6,387
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/mac-x86/crypto/fipsmodule/ghash-x86.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _gcm_init_clmul .private_extern _gcm_init_clmul .align 4 _gcm_init_clmul: L_gcm_init_clmul_begin: movl 4(%esp),%edx movl 8(%esp),%eax call L000pic L000pic: popl %ecx leal Lbswap-L000pic(%ecx),%ecx movdqu (%eax),%xmm2 pshufd $78,%xmm2,%xmm2 pshufd $255,%xmm2,%xmm4 movdqa %xmm2,%xmm3 psllq $1,%xmm2 pxor %xmm5,%xmm5 psrlq $63,%xmm3 pcmpgtd %xmm4,%xmm5 pslldq $8,%xmm3 por %xmm3,%xmm2 pand 16(%ecx),%xmm5 pxor %xmm5,%xmm2 movdqa %xmm2,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pshufd $78,%xmm2,%xmm4 pxor %xmm0,%xmm3 pxor %xmm2,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm2,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm2,%xmm3 movdqu %xmm2,(%edx) pxor %xmm0,%xmm4 movdqu %xmm0,16(%edx) .byte 102,15,58,15,227,8 movdqu %xmm4,32(%edx) ret .globl _gcm_gmult_clmul .private_extern _gcm_gmult_clmul .align 4 _gcm_gmult_clmul: L_gcm_gmult_clmul_begin: movl 4(%esp),%eax movl 8(%esp),%edx call L001pic L001pic: popl %ecx leal Lbswap-L001pic(%ecx),%ecx movdqu (%eax),%xmm0 movdqa (%ecx),%xmm5 movups (%edx),%xmm2 .byte 102,15,56,0,197 movups 32(%edx),%xmm4 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .byte 102,15,56,0,197 movdqu %xmm0,(%eax) ret .globl _gcm_ghash_clmul .private_extern _gcm_ghash_clmul .align 4 _gcm_ghash_clmul: L_gcm_ghash_clmul_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%eax movl 24(%esp),%edx movl 28(%esp),%esi movl 32(%esp),%ebx call L002pic L002pic: popl %ecx leal Lbswap-L002pic(%ecx),%ecx movdqu (%eax),%xmm0 movdqa (%ecx),%xmm5 movdqu (%edx),%xmm2 .byte 102,15,56,0,197 subl $16,%ebx jz L003odd_tail movdqu (%esi),%xmm3 movdqu 16(%esi),%xmm6 .byte 102,15,56,0,221 .byte 102,15,56,0,245 movdqu 32(%edx),%xmm5 pxor %xmm3,%xmm0 pshufd $78,%xmm6,%xmm3 movdqa %xmm6,%xmm7 pxor %xmm6,%xmm3 leal 32(%esi),%esi .byte 102,15,58,68,242,0 .byte 102,15,58,68,250,17 .byte 102,15,58,68,221,0 movups 16(%edx),%xmm2 nop subl $32,%ebx jbe L004even_tail jmp L005mod_loop .align 5,0x90 L005mod_loop: pshufd $78,%xmm0,%xmm4 movdqa %xmm0,%xmm1 pxor %xmm0,%xmm4 nop .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,229,16 movups (%edx),%xmm2 xorps %xmm6,%xmm0 movdqa (%ecx),%xmm5 xorps %xmm7,%xmm1 movdqu (%esi),%xmm7 pxor %xmm0,%xmm3 movdqu 16(%esi),%xmm6 pxor %xmm1,%xmm3 .byte 102,15,56,0,253 pxor %xmm3,%xmm4 movdqa %xmm4,%xmm3 psrldq $8,%xmm4 pslldq $8,%xmm3 pxor %xmm4,%xmm1 pxor %xmm3,%xmm0 .byte 102,15,56,0,245 pxor %xmm7,%xmm1 movdqa %xmm6,%xmm7 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 .byte 102,15,58,68,242,0 movups 32(%edx),%xmm5 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 pshufd $78,%xmm7,%xmm3 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm7,%xmm3 pxor %xmm4,%xmm1 .byte 102,15,58,68,250,17 movups 16(%edx),%xmm2 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .byte 102,15,58,68,221,0 leal 32(%esi),%esi subl $32,%ebx ja L005mod_loop L004even_tail: pshufd $78,%xmm0,%xmm4 movdqa %xmm0,%xmm1 pxor %xmm0,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,229,16 movdqa (%ecx),%xmm5 xorps %xmm6,%xmm0 xorps %xmm7,%xmm1 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 pxor %xmm3,%xmm4 movdqa %xmm4,%xmm3 psrldq $8,%xmm4 pslldq $8,%xmm3 pxor %xmm4,%xmm1 pxor %xmm3,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 testl %ebx,%ebx jnz L006done movups (%edx),%xmm2 L003odd_tail: movdqu (%esi),%xmm3 .byte 102,15,56,0,221 pxor %xmm3,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pshufd $78,%xmm2,%xmm4 pxor %xmm0,%xmm3 pxor %xmm2,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 L006done: .byte 102,15,56,0,197 movdqu %xmm0,(%eax) popl %edi popl %esi popl %ebx popl %ebp ret .align 6,0x90 Lbswap: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194 .byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67 .byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112 .byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62 .byte 0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
19,084
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/mac-x86/crypto/chacha/chacha-x86.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _ChaCha20_ctr32_nohw .private_extern _ChaCha20_ctr32_nohw .align 4 _ChaCha20_ctr32_nohw: L_ChaCha20_ctr32_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 32(%esp),%esi movl 36(%esp),%edi subl $132,%esp movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edx movl %eax,80(%esp) movl %ebx,84(%esp) movl %ecx,88(%esp) movl %edx,92(%esp) movl 16(%esi),%eax movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edx movl %eax,96(%esp) movl %ebx,100(%esp) movl %ecx,104(%esp) movl %edx,108(%esp) movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx subl $1,%eax movl %eax,112(%esp) movl %ebx,116(%esp) movl %ecx,120(%esp) movl %edx,124(%esp) jmp L000entry .align 4,0x90 L001outer_loop: movl %ebx,156(%esp) movl %eax,152(%esp) movl %ecx,160(%esp) L000entry: movl $1634760805,%eax movl $857760878,4(%esp) movl $2036477234,8(%esp) movl $1797285236,12(%esp) movl 84(%esp),%ebx movl 88(%esp),%ebp movl 104(%esp),%ecx movl 108(%esp),%esi movl 116(%esp),%edx movl 120(%esp),%edi movl %ebx,20(%esp) movl %ebp,24(%esp) movl %ecx,40(%esp) movl %esi,44(%esp) movl %edx,52(%esp) movl %edi,56(%esp) movl 92(%esp),%ebx movl 124(%esp),%edi movl 112(%esp),%edx movl 80(%esp),%ebp movl 96(%esp),%ecx movl 100(%esp),%esi addl $1,%edx movl %ebx,28(%esp) movl %edi,60(%esp) movl %edx,112(%esp) movl $10,%ebx jmp L002loop .align 4,0x90 L002loop: addl %ebp,%eax movl %ebx,128(%esp) movl %ebp,%ebx xorl %eax,%edx roll $16,%edx addl %edx,%ecx xorl %ecx,%ebx movl 52(%esp),%edi roll $12,%ebx movl 20(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,(%esp) roll $8,%edx movl 4(%esp),%eax addl %edx,%ecx movl %edx,48(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi movl %ecx,32(%esp) roll $16,%edi movl %ebx,16(%esp) addl %edi,%esi movl 40(%esp),%ecx xorl %esi,%ebp movl 56(%esp),%edx roll $12,%ebp movl 24(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,4(%esp) roll $8,%edi movl 8(%esp),%eax addl %edi,%esi movl %edi,52(%esp) xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx movl %esi,36(%esp) roll $16,%edx movl %ebp,20(%esp) addl %edx,%ecx movl 44(%esp),%esi xorl %ecx,%ebx movl 60(%esp),%edi roll $12,%ebx movl 28(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,8(%esp) roll $8,%edx movl 12(%esp),%eax addl %edx,%ecx movl %edx,56(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi roll $16,%edi movl %ebx,24(%esp) addl %edi,%esi xorl %esi,%ebp roll $12,%ebp movl 20(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,12(%esp) roll $8,%edi movl (%esp),%eax addl %edi,%esi movl %edi,%edx xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx roll $16,%edx movl %ebp,28(%esp) addl %edx,%ecx xorl %ecx,%ebx movl 48(%esp),%edi roll $12,%ebx movl 24(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,(%esp) roll $8,%edx movl 4(%esp),%eax addl %edx,%ecx movl %edx,60(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi movl %ecx,40(%esp) roll $16,%edi movl %ebx,20(%esp) addl %edi,%esi movl 32(%esp),%ecx xorl %esi,%ebp movl 52(%esp),%edx roll $12,%ebp movl 28(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,4(%esp) roll $8,%edi movl 8(%esp),%eax addl %edi,%esi movl %edi,48(%esp) xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx movl %esi,44(%esp) roll $16,%edx movl %ebp,24(%esp) addl %edx,%ecx movl 36(%esp),%esi xorl %ecx,%ebx movl 56(%esp),%edi roll $12,%ebx movl 16(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,8(%esp) roll $8,%edx movl 12(%esp),%eax addl %edx,%ecx movl %edx,52(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi roll $16,%edi movl %ebx,28(%esp) addl %edi,%esi xorl %esi,%ebp movl 48(%esp),%edx roll $12,%ebp movl 128(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,12(%esp) roll $8,%edi movl (%esp),%eax addl %edi,%esi movl %edi,56(%esp) xorl %esi,%ebp roll $7,%ebp decl %ebx jnz L002loop movl 160(%esp),%ebx addl $1634760805,%eax addl 80(%esp),%ebp addl 96(%esp),%ecx addl 100(%esp),%esi cmpl $64,%ebx jb L003tail movl 156(%esp),%ebx addl 112(%esp),%edx addl 120(%esp),%edi xorl (%ebx),%eax xorl 16(%ebx),%ebp movl %eax,(%esp) movl 152(%esp),%eax xorl 32(%ebx),%ecx xorl 36(%ebx),%esi xorl 48(%ebx),%edx xorl 56(%ebx),%edi movl %ebp,16(%eax) movl %ecx,32(%eax) movl %esi,36(%eax) movl %edx,48(%eax) movl %edi,56(%eax) movl 4(%esp),%ebp movl 8(%esp),%ecx movl 12(%esp),%esi movl 20(%esp),%edx movl 24(%esp),%edi addl $857760878,%ebp addl $2036477234,%ecx addl $1797285236,%esi addl 84(%esp),%edx addl 88(%esp),%edi xorl 4(%ebx),%ebp xorl 8(%ebx),%ecx xorl 12(%ebx),%esi xorl 20(%ebx),%edx xorl 24(%ebx),%edi movl %ebp,4(%eax) movl %ecx,8(%eax) movl %esi,12(%eax) movl %edx,20(%eax) movl %edi,24(%eax) movl 28(%esp),%ebp movl 40(%esp),%ecx movl 44(%esp),%esi movl 52(%esp),%edx movl 60(%esp),%edi addl 92(%esp),%ebp addl 104(%esp),%ecx addl 108(%esp),%esi addl 116(%esp),%edx addl 124(%esp),%edi xorl 28(%ebx),%ebp xorl 40(%ebx),%ecx xorl 44(%ebx),%esi xorl 52(%ebx),%edx xorl 60(%ebx),%edi leal 64(%ebx),%ebx movl %ebp,28(%eax) movl (%esp),%ebp movl %ecx,40(%eax) movl 160(%esp),%ecx movl %esi,44(%eax) movl %edx,52(%eax) movl %edi,60(%eax) movl %ebp,(%eax) leal 64(%eax),%eax subl $64,%ecx jnz L001outer_loop jmp L004done L003tail: addl 112(%esp),%edx addl 120(%esp),%edi movl %eax,(%esp) movl %ebp,16(%esp) movl %ecx,32(%esp) movl %esi,36(%esp) movl %edx,48(%esp) movl %edi,56(%esp) movl 4(%esp),%ebp movl 8(%esp),%ecx movl 12(%esp),%esi movl 20(%esp),%edx movl 24(%esp),%edi addl $857760878,%ebp addl $2036477234,%ecx addl $1797285236,%esi addl 84(%esp),%edx addl 88(%esp),%edi movl %ebp,4(%esp) movl %ecx,8(%esp) movl %esi,12(%esp) movl %edx,20(%esp) movl %edi,24(%esp) movl 28(%esp),%ebp movl 40(%esp),%ecx movl 44(%esp),%esi movl 52(%esp),%edx movl 60(%esp),%edi addl 92(%esp),%ebp addl 104(%esp),%ecx addl 108(%esp),%esi addl 116(%esp),%edx addl 124(%esp),%edi movl %ebp,28(%esp) movl 156(%esp),%ebp movl %ecx,40(%esp) movl 152(%esp),%ecx movl %esi,44(%esp) xorl %esi,%esi movl %edx,52(%esp) movl %edi,60(%esp) xorl %eax,%eax xorl %edx,%edx L005tail_loop: movb (%esi,%ebp,1),%al movb (%esp,%esi,1),%dl leal 1(%esi),%esi xorb %dl,%al movb %al,-1(%ecx,%esi,1) decl %ebx jnz L005tail_loop L004done: addl $132,%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _ChaCha20_ctr32_ssse3 .private_extern _ChaCha20_ctr32_ssse3 .align 4 _ChaCha20_ctr32_ssse3: L_ChaCha20_ctr32_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi call Lpic_point Lpic_point: popl %eax movl 20(%esp),%edi movl 24(%esp),%esi movl 28(%esp),%ecx movl 32(%esp),%edx movl 36(%esp),%ebx movl %esp,%ebp subl $524,%esp andl $-64,%esp movl %ebp,512(%esp) leal Lssse3_data-Lpic_point(%eax),%eax movdqu (%ebx),%xmm3 cmpl $256,%ecx jb L0061x movl %edx,516(%esp) movl %ebx,520(%esp) subl $256,%ecx leal 384(%esp),%ebp movdqu (%edx),%xmm7 pshufd $0,%xmm3,%xmm0 pshufd $85,%xmm3,%xmm1 pshufd $170,%xmm3,%xmm2 pshufd $255,%xmm3,%xmm3 paddd 48(%eax),%xmm0 pshufd $0,%xmm7,%xmm4 pshufd $85,%xmm7,%xmm5 psubd 64(%eax),%xmm0 pshufd $170,%xmm7,%xmm6 pshufd $255,%xmm7,%xmm7 movdqa %xmm0,64(%ebp) movdqa %xmm1,80(%ebp) movdqa %xmm2,96(%ebp) movdqa %xmm3,112(%ebp) movdqu 16(%edx),%xmm3 movdqa %xmm4,-64(%ebp) movdqa %xmm5,-48(%ebp) movdqa %xmm6,-32(%ebp) movdqa %xmm7,-16(%ebp) movdqa 32(%eax),%xmm7 leal 128(%esp),%ebx pshufd $0,%xmm3,%xmm0 pshufd $85,%xmm3,%xmm1 pshufd $170,%xmm3,%xmm2 pshufd $255,%xmm3,%xmm3 pshufd $0,%xmm7,%xmm4 pshufd $85,%xmm7,%xmm5 pshufd $170,%xmm7,%xmm6 pshufd $255,%xmm7,%xmm7 movdqa %xmm0,(%ebp) movdqa %xmm1,16(%ebp) movdqa %xmm2,32(%ebp) movdqa %xmm3,48(%ebp) movdqa %xmm4,-128(%ebp) movdqa %xmm5,-112(%ebp) movdqa %xmm6,-96(%ebp) movdqa %xmm7,-80(%ebp) leal 128(%esi),%esi leal 128(%edi),%edi jmp L007outer_loop .align 4,0x90 L007outer_loop: movdqa -112(%ebp),%xmm1 movdqa -96(%ebp),%xmm2 movdqa -80(%ebp),%xmm3 movdqa -48(%ebp),%xmm5 movdqa -32(%ebp),%xmm6 movdqa -16(%ebp),%xmm7 movdqa %xmm1,-112(%ebx) movdqa %xmm2,-96(%ebx) movdqa %xmm3,-80(%ebx) movdqa %xmm5,-48(%ebx) movdqa %xmm6,-32(%ebx) movdqa %xmm7,-16(%ebx) movdqa 32(%ebp),%xmm2 movdqa 48(%ebp),%xmm3 movdqa 64(%ebp),%xmm4 movdqa 80(%ebp),%xmm5 movdqa 96(%ebp),%xmm6 movdqa 112(%ebp),%xmm7 paddd 64(%eax),%xmm4 movdqa %xmm2,32(%ebx) movdqa %xmm3,48(%ebx) movdqa %xmm4,64(%ebx) movdqa %xmm5,80(%ebx) movdqa %xmm6,96(%ebx) movdqa %xmm7,112(%ebx) movdqa %xmm4,64(%ebp) movdqa -128(%ebp),%xmm0 movdqa %xmm4,%xmm6 movdqa -64(%ebp),%xmm3 movdqa (%ebp),%xmm4 movdqa 16(%ebp),%xmm5 movl $10,%edx nop .align 4,0x90 L008loop: paddd %xmm3,%xmm0 movdqa %xmm3,%xmm2 pxor %xmm0,%xmm6 pshufb (%eax),%xmm6 paddd %xmm6,%xmm4 pxor %xmm4,%xmm2 movdqa -48(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -112(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 80(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-128(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,64(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 movdqa %xmm4,(%ebx) pshufb (%eax),%xmm7 movdqa %xmm2,-64(%ebx) paddd %xmm7,%xmm5 movdqa 32(%ebx),%xmm4 pxor %xmm5,%xmm3 movdqa -32(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -96(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 96(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-112(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,80(%ebx) pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 movdqa %xmm5,16(%ebx) pshufb (%eax),%xmm6 movdqa %xmm3,-48(%ebx) paddd %xmm6,%xmm4 movdqa 48(%ebx),%xmm5 pxor %xmm4,%xmm2 movdqa -16(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -80(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 112(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-96(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,96(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 pshufb (%eax),%xmm7 movdqa %xmm2,-32(%ebx) paddd %xmm7,%xmm5 pxor %xmm5,%xmm3 movdqa -48(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -128(%ebx),%xmm0 paddd %xmm3,%xmm1 pxor %xmm1,%xmm7 movdqa %xmm1,-80(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,%xmm6 pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 pshufb (%eax),%xmm6 movdqa %xmm3,-16(%ebx) paddd %xmm6,%xmm4 pxor %xmm4,%xmm2 movdqa -32(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -112(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 64(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-128(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,112(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 movdqa %xmm4,32(%ebx) pshufb (%eax),%xmm7 movdqa %xmm2,-48(%ebx) paddd %xmm7,%xmm5 movdqa (%ebx),%xmm4 pxor %xmm5,%xmm3 movdqa -16(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -96(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 80(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-112(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,64(%ebx) pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 movdqa %xmm5,48(%ebx) pshufb (%eax),%xmm6 movdqa %xmm3,-32(%ebx) paddd %xmm6,%xmm4 movdqa 16(%ebx),%xmm5 pxor %xmm4,%xmm2 movdqa -64(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -80(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 96(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-96(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,80(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 pshufb (%eax),%xmm7 movdqa %xmm2,-16(%ebx) paddd %xmm7,%xmm5 pxor %xmm5,%xmm3 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -128(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 64(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-80(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,96(%ebx) pxor %xmm5,%xmm3 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 por %xmm1,%xmm3 decl %edx jnz L008loop movdqa %xmm3,-64(%ebx) movdqa %xmm4,(%ebx) movdqa %xmm5,16(%ebx) movdqa %xmm6,64(%ebx) movdqa %xmm7,96(%ebx) movdqa -112(%ebx),%xmm1 movdqa -96(%ebx),%xmm2 movdqa -80(%ebx),%xmm3 paddd -128(%ebp),%xmm0 paddd -112(%ebp),%xmm1 paddd -96(%ebp),%xmm2 paddd -80(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa -64(%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa -48(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa -32(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa -16(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd -64(%ebp),%xmm0 paddd -48(%ebp),%xmm1 paddd -32(%ebp),%xmm2 paddd -16(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa (%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa 16(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa 32(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa 48(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd (%ebp),%xmm0 paddd 16(%ebp),%xmm1 paddd 32(%ebp),%xmm2 paddd 48(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa 64(%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa 80(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa 96(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa 112(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd 64(%ebp),%xmm0 paddd 80(%ebp),%xmm1 paddd 96(%ebp),%xmm2 paddd 112(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 208(%esi),%esi pxor %xmm0,%xmm4 pxor %xmm1,%xmm5 pxor %xmm2,%xmm6 pxor %xmm3,%xmm7 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 208(%edi),%edi subl $256,%ecx jnc L007outer_loop addl $256,%ecx jz L009done movl 520(%esp),%ebx leal -128(%esi),%esi movl 516(%esp),%edx leal -128(%edi),%edi movd 64(%ebp),%xmm2 movdqu (%ebx),%xmm3 paddd 96(%eax),%xmm2 pand 112(%eax),%xmm3 por %xmm2,%xmm3 L0061x: movdqa 32(%eax),%xmm0 movdqu (%edx),%xmm1 movdqu 16(%edx),%xmm2 movdqa (%eax),%xmm6 movdqa 16(%eax),%xmm7 movl %ebp,48(%esp) movdqa %xmm0,(%esp) movdqa %xmm1,16(%esp) movdqa %xmm2,32(%esp) movdqa %xmm3,48(%esp) movl $10,%edx jmp L010loop1x .align 4,0x90 L011outer1x: movdqa 80(%eax),%xmm3 movdqa (%esp),%xmm0 movdqa 16(%esp),%xmm1 movdqa 32(%esp),%xmm2 paddd 48(%esp),%xmm3 movl $10,%edx movdqa %xmm3,48(%esp) jmp L010loop1x .align 4,0x90 L010loop1x: paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $57,%xmm1,%xmm1 pshufd $147,%xmm3,%xmm3 nop paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $147,%xmm1,%xmm1 pshufd $57,%xmm3,%xmm3 decl %edx jnz L010loop1x paddd (%esp),%xmm0 paddd 16(%esp),%xmm1 paddd 32(%esp),%xmm2 paddd 48(%esp),%xmm3 cmpl $64,%ecx jb L012tail movdqu (%esi),%xmm4 movdqu 16(%esi),%xmm5 pxor %xmm4,%xmm0 movdqu 32(%esi),%xmm4 pxor %xmm5,%xmm1 movdqu 48(%esi),%xmm5 pxor %xmm4,%xmm2 pxor %xmm5,%xmm3 leal 64(%esi),%esi movdqu %xmm0,(%edi) movdqu %xmm1,16(%edi) movdqu %xmm2,32(%edi) movdqu %xmm3,48(%edi) leal 64(%edi),%edi subl $64,%ecx jnz L011outer1x jmp L009done L012tail: movdqa %xmm0,(%esp) movdqa %xmm1,16(%esp) movdqa %xmm2,32(%esp) movdqa %xmm3,48(%esp) xorl %eax,%eax xorl %edx,%edx xorl %ebp,%ebp L013tail_loop: movb (%esp,%ebp,1),%al movb (%esi,%ebp,1),%dl leal 1(%ebp),%ebp xorb %dl,%al movb %al,-1(%edi,%ebp,1) decl %ecx jnz L013tail_loop L009done: movl 512(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .align 6,0x90 Lssse3_data: .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 .long 1634760805,857760878,2036477234,1797285236 .long 0,1,2,3 .long 4,4,4,4 .long 1,0,0,0 .long 4,0,0,0 .long 0,-1,-1,-1 .align 6,0x90 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54 .byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32 .byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111 .byte 114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
6,847
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/ios-arm/crypto/test/trampoline-armv4.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__) .syntax unified .text @ abi_test_trampoline loads callee-saved registers from |state|, calls |func| @ with |argv|, then saves the callee-saved registers into |state|. It returns @ the result of |func|. The |unwind| argument is unused. @ uint32_t abi_test_trampoline(void (*func)(...), CallerState *state, @ const uint32_t *argv, size_t argc, @ int unwind); .globl _abi_test_trampoline .private_extern _abi_test_trampoline .align 4 _abi_test_trampoline: @ Save parameters and all callee-saved registers. For convenience, we @ save r9 on iOS even though it's volatile. vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} stmdb sp!, {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,lr} @ Reserve stack space for six (10-4) stack parameters, plus an extra 4 @ bytes to keep it 8-byte-aligned (see AAPCS, section 5.3). sub sp, sp, #28 @ Every register in AAPCS is either non-volatile or a parameter (except @ r9 on iOS), so this code, by the actual call, loses all its scratch @ registers. First fill in stack parameters while there are registers @ to spare. cmp r3, #4 bls Lstack_args_done mov r4, sp @ r4 is the output pointer. add r5, r2, r3, lsl #2 @ Set r5 to the end of argv. add r2, r2, #16 @ Skip four arguments. Lstack_args_loop: ldr r6, [r2], #4 cmp r2, r5 str r6, [r4], #4 bne Lstack_args_loop Lstack_args_done: @ Load registers from |r1|. vldmia r1!, {d8,d9,d10,d11,d12,d13,d14,d15} #if defined(__APPLE__) @ r9 is not volatile on iOS. ldmia r1!, {r4,r5,r6,r7,r8,r10-r11} #else ldmia r1!, {r4,r5,r6,r7,r8,r9,r10,r11} #endif @ Load register parameters. This uses up our remaining registers, so we @ repurpose lr as scratch space. ldr r3, [sp, #40] @ Reload argc. ldr lr, [sp, #36] @ Load argv into lr. cmp r3, #3 bhi Larg_r3 beq Larg_r2 cmp r3, #1 bhi Larg_r1 beq Larg_r0 b Largs_done Larg_r3: ldr r3, [lr, #12] @ argv[3] Larg_r2: ldr r2, [lr, #8] @ argv[2] Larg_r1: ldr r1, [lr, #4] @ argv[1] Larg_r0: ldr r0, [lr] @ argv[0] Largs_done: @ With every other register in use, load the function pointer into lr @ and call the function. ldr lr, [sp, #28] blx lr @ r1-r3 are free for use again. The trampoline only supports @ single-return functions. Pass r4-r11 to the caller. ldr r1, [sp, #32] vstmia r1!, {d8,d9,d10,d11,d12,d13,d14,d15} #if defined(__APPLE__) @ r9 is not volatile on iOS. stmia r1!, {r4,r5,r6,r7,r8,r10-r11} #else stmia r1!, {r4,r5,r6,r7,r8,r9,r10,r11} #endif @ Unwind the stack and restore registers. add sp, sp, #44 @ 44 = 28+16 ldmia sp!, {r4,r5,r6,r7,r8,r9,r10,r11,lr} @ Skip r0-r3 (see +16 above). vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} bx lr .globl _abi_test_clobber_r0 .private_extern _abi_test_clobber_r0 .align 4 _abi_test_clobber_r0: mov r0, #0 bx lr .globl _abi_test_clobber_r1 .private_extern _abi_test_clobber_r1 .align 4 _abi_test_clobber_r1: mov r1, #0 bx lr .globl _abi_test_clobber_r2 .private_extern _abi_test_clobber_r2 .align 4 _abi_test_clobber_r2: mov r2, #0 bx lr .globl _abi_test_clobber_r3 .private_extern _abi_test_clobber_r3 .align 4 _abi_test_clobber_r3: mov r3, #0 bx lr .globl _abi_test_clobber_r4 .private_extern _abi_test_clobber_r4 .align 4 _abi_test_clobber_r4: mov r4, #0 bx lr .globl _abi_test_clobber_r5 .private_extern _abi_test_clobber_r5 .align 4 _abi_test_clobber_r5: mov r5, #0 bx lr .globl _abi_test_clobber_r6 .private_extern _abi_test_clobber_r6 .align 4 _abi_test_clobber_r6: mov r6, #0 bx lr .globl _abi_test_clobber_r7 .private_extern _abi_test_clobber_r7 .align 4 _abi_test_clobber_r7: mov r7, #0 bx lr .globl _abi_test_clobber_r8 .private_extern _abi_test_clobber_r8 .align 4 _abi_test_clobber_r8: mov r8, #0 bx lr .globl _abi_test_clobber_r9 .private_extern _abi_test_clobber_r9 .align 4 _abi_test_clobber_r9: mov r9, #0 bx lr .globl _abi_test_clobber_r10 .private_extern _abi_test_clobber_r10 .align 4 _abi_test_clobber_r10: mov r10, #0 bx lr .globl _abi_test_clobber_r11 .private_extern _abi_test_clobber_r11 .align 4 _abi_test_clobber_r11: mov r11, #0 bx lr .globl _abi_test_clobber_r12 .private_extern _abi_test_clobber_r12 .align 4 _abi_test_clobber_r12: mov r12, #0 bx lr .globl _abi_test_clobber_d0 .private_extern _abi_test_clobber_d0 .align 4 _abi_test_clobber_d0: mov r0, #0 vmov s0, r0 vmov s1, r0 bx lr .globl _abi_test_clobber_d1 .private_extern _abi_test_clobber_d1 .align 4 _abi_test_clobber_d1: mov r0, #0 vmov s2, r0 vmov s3, r0 bx lr .globl _abi_test_clobber_d2 .private_extern _abi_test_clobber_d2 .align 4 _abi_test_clobber_d2: mov r0, #0 vmov s4, r0 vmov s5, r0 bx lr .globl _abi_test_clobber_d3 .private_extern _abi_test_clobber_d3 .align 4 _abi_test_clobber_d3: mov r0, #0 vmov s6, r0 vmov s7, r0 bx lr .globl _abi_test_clobber_d4 .private_extern _abi_test_clobber_d4 .align 4 _abi_test_clobber_d4: mov r0, #0 vmov s8, r0 vmov s9, r0 bx lr .globl _abi_test_clobber_d5 .private_extern _abi_test_clobber_d5 .align 4 _abi_test_clobber_d5: mov r0, #0 vmov s10, r0 vmov s11, r0 bx lr .globl _abi_test_clobber_d6 .private_extern _abi_test_clobber_d6 .align 4 _abi_test_clobber_d6: mov r0, #0 vmov s12, r0 vmov s13, r0 bx lr .globl _abi_test_clobber_d7 .private_extern _abi_test_clobber_d7 .align 4 _abi_test_clobber_d7: mov r0, #0 vmov s14, r0 vmov s15, r0 bx lr .globl _abi_test_clobber_d8 .private_extern _abi_test_clobber_d8 .align 4 _abi_test_clobber_d8: mov r0, #0 vmov s16, r0 vmov s17, r0 bx lr .globl _abi_test_clobber_d9 .private_extern _abi_test_clobber_d9 .align 4 _abi_test_clobber_d9: mov r0, #0 vmov s18, r0 vmov s19, r0 bx lr .globl _abi_test_clobber_d10 .private_extern _abi_test_clobber_d10 .align 4 _abi_test_clobber_d10: mov r0, #0 vmov s20, r0 vmov s21, r0 bx lr .globl _abi_test_clobber_d11 .private_extern _abi_test_clobber_d11 .align 4 _abi_test_clobber_d11: mov r0, #0 vmov s22, r0 vmov s23, r0 bx lr .globl _abi_test_clobber_d12 .private_extern _abi_test_clobber_d12 .align 4 _abi_test_clobber_d12: mov r0, #0 vmov s24, r0 vmov s25, r0 bx lr .globl _abi_test_clobber_d13 .private_extern _abi_test_clobber_d13 .align 4 _abi_test_clobber_d13: mov r0, #0 vmov s26, r0 vmov s27, r0 bx lr .globl _abi_test_clobber_d14 .private_extern _abi_test_clobber_d14 .align 4 _abi_test_clobber_d14: mov r0, #0 vmov s28, r0 vmov s29, r0 bx lr .globl _abi_test_clobber_d15 .private_extern _abi_test_clobber_d15 .align 4 _abi_test_clobber_d15: mov r0, #0 vmov s30, r0 vmov s31, r0 bx lr #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
19,155
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/ios-arm/crypto/fipsmodule/aesv8-armx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__) #include <openssl/arm_arch.h> #if __ARM_MAX_ARCH__>=7 .text .code 32 #undef __thumb2__ .align 5 Lrcon: .long 0x01,0x01,0x01,0x01 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat .long 0x1b,0x1b,0x1b,0x1b .text .globl _aes_hw_set_encrypt_key .private_extern _aes_hw_set_encrypt_key #ifdef __thumb2__ .thumb_func _aes_hw_set_encrypt_key #endif .align 5 _aes_hw_set_encrypt_key: Lenc_key: mov r3,#-1 cmp r0,#0 beq Lenc_key_abort cmp r2,#0 beq Lenc_key_abort mov r3,#-2 cmp r1,#128 blt Lenc_key_abort cmp r1,#256 bgt Lenc_key_abort tst r1,#0x3f bne Lenc_key_abort adr r3,Lrcon cmp r1,#192 veor q0,q0,q0 vld1.8 {q3},[r0]! mov r1,#8 @ reuse r1 vld1.32 {q1,q2},[r3]! blt Loop128 beq L192 b L256 .align 4 Loop128: vtbl.8 d20,{q3},d4 vtbl.8 d21,{q3},d5 vext.8 q9,q0,q3,#12 vst1.32 {q3},[r2]! .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 subs r1,r1,#1 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q10,q10,q1 veor q3,q3,q9 vshl.u8 q1,q1,#1 veor q3,q3,q10 bne Loop128 vld1.32 {q1},[r3] vtbl.8 d20,{q3},d4 vtbl.8 d21,{q3},d5 vext.8 q9,q0,q3,#12 vst1.32 {q3},[r2]! .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q10,q10,q1 veor q3,q3,q9 vshl.u8 q1,q1,#1 veor q3,q3,q10 vtbl.8 d20,{q3},d4 vtbl.8 d21,{q3},d5 vext.8 q9,q0,q3,#12 vst1.32 {q3},[r2]! .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q10,q10,q1 veor q3,q3,q9 veor q3,q3,q10 vst1.32 {q3},[r2] add r2,r2,#0x50 mov r12,#10 b Ldone .align 4 L192: vld1.8 {d16},[r0]! vmov.i8 q10,#8 @ borrow q10 vst1.32 {q3},[r2]! vsub.i8 q2,q2,q10 @ adjust the mask Loop192: vtbl.8 d20,{q8},d4 vtbl.8 d21,{q8},d5 vext.8 q9,q0,q3,#12 vst1.32 {d16},[r2]! .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 subs r1,r1,#1 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vdup.32 q9,d7[1] veor q9,q9,q8 veor q10,q10,q1 vext.8 q8,q0,q8,#12 vshl.u8 q1,q1,#1 veor q8,q8,q9 veor q3,q3,q10 veor q8,q8,q10 vst1.32 {q3},[r2]! bne Loop192 mov r12,#12 add r2,r2,#0x20 b Ldone .align 4 L256: vld1.8 {q8},[r0] mov r1,#7 mov r12,#14 vst1.32 {q3},[r2]! Loop256: vtbl.8 d20,{q8},d4 vtbl.8 d21,{q8},d5 vext.8 q9,q0,q3,#12 vst1.32 {q8},[r2]! .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 subs r1,r1,#1 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q10,q10,q1 veor q3,q3,q9 vshl.u8 q1,q1,#1 veor q3,q3,q10 vst1.32 {q3},[r2]! beq Ldone vdup.32 q10,d7[1] vext.8 q9,q0,q8,#12 .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 veor q8,q8,q9 vext.8 q9,q0,q9,#12 veor q8,q8,q9 vext.8 q9,q0,q9,#12 veor q8,q8,q9 veor q8,q8,q10 b Loop256 Ldone: str r12,[r2] mov r3,#0 Lenc_key_abort: mov r0,r3 @ return value bx lr .globl _aes_hw_set_decrypt_key .private_extern _aes_hw_set_decrypt_key #ifdef __thumb2__ .thumb_func _aes_hw_set_decrypt_key #endif .align 5 _aes_hw_set_decrypt_key: stmdb sp!,{r4,lr} bl Lenc_key cmp r0,#0 bne Ldec_key_abort sub r2,r2,#240 @ restore original r2 mov r4,#-16 add r0,r2,r12,lsl#4 @ end of key schedule vld1.32 {q0},[r2] vld1.32 {q1},[r0] vst1.32 {q0},[r0],r4 vst1.32 {q1},[r2]! Loop_imc: vld1.32 {q0},[r2] vld1.32 {q1},[r0] .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 vst1.32 {q0},[r0],r4 vst1.32 {q1},[r2]! cmp r0,r2 bhi Loop_imc vld1.32 {q0},[r2] .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 vst1.32 {q0},[r0] eor r0,r0,r0 @ return value Ldec_key_abort: ldmia sp!,{r4,pc} .globl _aes_hw_encrypt .private_extern _aes_hw_encrypt #ifdef __thumb2__ .thumb_func _aes_hw_encrypt #endif .align 5 _aes_hw_encrypt: AARCH64_VALID_CALL_TARGET ldr r3,[r2,#240] vld1.32 {q0},[r2]! vld1.8 {q2},[r0] sub r3,r3,#2 vld1.32 {q1},[r2]! Loop_enc: .byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0 .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 vld1.32 {q0},[r2]! subs r3,r3,#2 .byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1 .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 vld1.32 {q1},[r2]! bgt Loop_enc .byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0 .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 vld1.32 {q0},[r2] .byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1 veor q2,q2,q0 vst1.8 {q2},[r1] bx lr .globl _aes_hw_decrypt .private_extern _aes_hw_decrypt #ifdef __thumb2__ .thumb_func _aes_hw_decrypt #endif .align 5 _aes_hw_decrypt: AARCH64_VALID_CALL_TARGET ldr r3,[r2,#240] vld1.32 {q0},[r2]! vld1.8 {q2},[r0] sub r3,r3,#2 vld1.32 {q1},[r2]! Loop_dec: .byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0 .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 vld1.32 {q0},[r2]! subs r3,r3,#2 .byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1 .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 vld1.32 {q1},[r2]! bgt Loop_dec .byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0 .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 vld1.32 {q0},[r2] .byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1 veor q2,q2,q0 vst1.8 {q2},[r1] bx lr .globl _aes_hw_cbc_encrypt .private_extern _aes_hw_cbc_encrypt #ifdef __thumb2__ .thumb_func _aes_hw_cbc_encrypt #endif .align 5 _aes_hw_cbc_encrypt: mov ip,sp stmdb sp!,{r4,r5,r6,r7,r8,lr} vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so ldmia ip,{r4,r5} @ load remaining args subs r2,r2,#16 mov r8,#16 blo Lcbc_abort moveq r8,#0 cmp r5,#0 @ en- or decrypting? ldr r5,[r3,#240] and r2,r2,#-16 vld1.8 {q6},[r4] vld1.8 {q0},[r0],r8 vld1.32 {q8,q9},[r3] @ load key schedule... sub r5,r5,#6 add r7,r3,r5,lsl#4 @ pointer to last 7 round keys sub r5,r5,#2 vld1.32 {q10,q11},[r7]! vld1.32 {q12,q13},[r7]! vld1.32 {q14,q15},[r7]! vld1.32 {q7},[r7] add r7,r3,#32 mov r6,r5 beq Lcbc_dec cmp r5,#2 veor q0,q0,q6 veor q5,q8,q7 beq Lcbc_enc128 vld1.32 {q2,q3},[r7] add r7,r3,#16 add r6,r3,#16*4 add r12,r3,#16*5 .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 add r14,r3,#16*6 add r3,r3,#16*7 b Lenter_cbc_enc .align 4 Loop_cbc_enc: .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vst1.8 {q6},[r1]! Lenter_cbc_enc: .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.32 {q8},[r6] cmp r5,#4 .byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.32 {q9},[r12] beq Lcbc_enc192 .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.32 {q8},[r14] .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.32 {q9},[r3] nop Lcbc_enc192: .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 subs r2,r2,#16 .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 moveq r8,#0 .byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.8 {q8},[r0],r8 .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 veor q8,q8,q5 .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.32 {q9},[r7] @ re-pre-load rndkey[1] .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 veor q6,q0,q7 bhs Loop_cbc_enc vst1.8 {q6},[r1]! b Lcbc_done .align 5 Lcbc_enc128: vld1.32 {q2,q3},[r7] .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 b Lenter_cbc_enc128 Loop_cbc_enc128: .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vst1.8 {q6},[r1]! Lenter_cbc_enc128: .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 subs r2,r2,#16 .byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 moveq r8,#0 .byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.8 {q8},[r0],r8 .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 veor q8,q8,q5 .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 veor q6,q0,q7 bhs Loop_cbc_enc128 vst1.8 {q6},[r1]! b Lcbc_done .align 5 Lcbc_dec: vld1.8 {q10},[r0]! subs r2,r2,#32 @ bias add r6,r5,#2 vorr q3,q0,q0 vorr q1,q0,q0 vorr q11,q10,q10 blo Lcbc_dec_tail vorr q1,q10,q10 vld1.8 {q10},[r0]! vorr q2,q0,q0 vorr q3,q1,q1 vorr q11,q10,q10 Loop3x_cbc_dec: .byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.32 {q8},[r7]! subs r6,r6,#2 .byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.32 {q9},[r7]! bgt Loop3x_cbc_dec .byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 veor q4,q6,q7 subs r2,r2,#0x30 veor q5,q2,q7 movlo r6,r2 @ r6, r6, is zero at this point .byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 veor q9,q3,q7 add r0,r0,r6 @ r0 is adjusted in such way that @ at exit from the loop q1-q10 @ are loaded with last "words" vorr q6,q11,q11 mov r7,r3 .byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.8 {q2},[r0]! .byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.8 {q3},[r0]! .byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.8 {q11},[r0]! .byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15 .byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15 .byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15 vld1.32 {q8},[r7]! @ re-pre-load rndkey[0] add r6,r5,#2 veor q4,q4,q0 veor q5,q5,q1 veor q10,q10,q9 vld1.32 {q9},[r7]! @ re-pre-load rndkey[1] vst1.8 {q4},[r1]! vorr q0,q2,q2 vst1.8 {q5},[r1]! vorr q1,q3,q3 vst1.8 {q10},[r1]! vorr q10,q11,q11 bhs Loop3x_cbc_dec cmn r2,#0x30 beq Lcbc_done nop Lcbc_dec_tail: .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.32 {q8},[r7]! subs r6,r6,#2 .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.32 {q9},[r7]! bgt Lcbc_dec_tail .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 .byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 cmn r2,#0x20 .byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 veor q5,q6,q7 .byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 veor q9,q3,q7 .byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15 .byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15 beq Lcbc_dec_one veor q5,q5,q1 veor q9,q9,q10 vorr q6,q11,q11 vst1.8 {q5},[r1]! vst1.8 {q9},[r1]! b Lcbc_done Lcbc_dec_one: veor q5,q5,q10 vorr q6,q11,q11 vst1.8 {q5},[r1]! Lcbc_done: vst1.8 {q6},[r4] Lcbc_abort: vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!,{r4,r5,r6,r7,r8,pc} .globl _aes_hw_ctr32_encrypt_blocks .private_extern _aes_hw_ctr32_encrypt_blocks #ifdef __thumb2__ .thumb_func _aes_hw_ctr32_encrypt_blocks #endif .align 5 _aes_hw_ctr32_encrypt_blocks: mov ip,sp stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr} vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so ldr r4, [ip] @ load remaining arg ldr r5,[r3,#240] ldr r8, [r4, #12] vld1.32 {q0},[r4] vld1.32 {q8,q9},[r3] @ load key schedule... sub r5,r5,#4 mov r12,#16 cmp r2,#2 add r7,r3,r5,lsl#4 @ pointer to last 5 round keys sub r5,r5,#2 vld1.32 {q12,q13},[r7]! vld1.32 {q14,q15},[r7]! vld1.32 {q7},[r7] add r7,r3,#32 mov r6,r5 @ ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are @ affected by silicon errata #1742098 [0] and #1655431 [1], @ respectively, where the second instruction of an aese/aesmc @ instruction pair may execute twice if an interrupt is taken right @ after the first instruction consumes an input register of which a @ single 32-bit lane has been updated the last time it was modified. @ @ This function uses a counter in one 32-bit lane. The @ could write to q1 and q10 directly, but that trips this bugs. @ We write to q6 and copy to the final register as a workaround. @ @ [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice @ [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __ARMEB__ rev r8, r8 #endif add r10, r8, #1 vorr q6,q0,q0 rev r10, r10 vmov.32 d13[1],r10 add r8, r8, #2 vorr q1,q6,q6 bls Lctr32_tail rev r12, r8 vmov.32 d13[1],r12 sub r2,r2,#3 @ bias vorr q10,q6,q6 b Loop3x_ctr32 .align 4 Loop3x_ctr32: .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 .byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 vld1.32 {q8},[r7]! subs r6,r6,#2 .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 .byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 vld1.32 {q9},[r7]! bgt Loop3x_ctr32 .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0 .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1 vld1.8 {q2},[r0]! add r9,r8,#1 .byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 vld1.8 {q3},[r0]! rev r9,r9 .byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 vld1.8 {q11},[r0]! mov r7,r3 .byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 .byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10 .byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 veor q2,q2,q7 add r10,r8,#2 .byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 veor q3,q3,q7 add r8,r8,#3 .byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 @ Note the logic to update q0, q1, and q1 is written to work @ around a bug in ARM Cortex-A57 and Cortex-A72 cores running in @ 32-bit mode. See the comment above. veor q11,q11,q7 vmov.32 d13[1], r9 .byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 vorr q0,q6,q6 rev r10,r10 .byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 vmov.32 d13[1], r10 rev r12,r8 .byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 vorr q1,q6,q6 vmov.32 d13[1], r12 .byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 vorr q10,q6,q6 subs r2,r2,#3 .byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15 .byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15 .byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15 veor q2,q2,q4 vld1.32 {q8},[r7]! @ re-pre-load rndkey[0] vst1.8 {q2},[r1]! veor q3,q3,q5 mov r6,r5 vst1.8 {q3},[r1]! veor q11,q11,q9 vld1.32 {q9},[r7]! @ re-pre-load rndkey[1] vst1.8 {q11},[r1]! bhs Loop3x_ctr32 adds r2,r2,#3 beq Lctr32_done Lctr32_tail: cmp r2,#1 blt Lctr32_done @ if len = 0, go to done mov r12,#16 moveq r12,#0 .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 vld1.32 {q8},[r7]! subs r6,r6,#2 .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 vld1.32 {q9},[r7]! bgt Lctr32_tail .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 vld1.8 {q2},[r0],r12 .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 vld1.8 {q3},[r0] .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 veor q2,q2,q7 .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 veor q3,q3,q7 .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 .byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15 veor q2,q2,q0 veor q3,q3,q1 vst1.8 {q2},[r1]! cmp r12, #0 beq Lctr32_done vst1.8 {q3},[r1] Lctr32_done: vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc} #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
32,164
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/ios-arm/crypto/fipsmodule/bsaes-armv7.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__) @ Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved. @ @ Licensed under the OpenSSL license (the "License"). You may not use @ this file except in compliance with the License. You can obtain a copy @ in the file LICENSE in the source distribution or at @ https://www.openssl.org/source/license.html @ ==================================================================== @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further @ details see http://www.openssl.org/~appro/cryptogams/. @ @ Specific modes and adaptation for Linux kernel by Ard Biesheuvel @ of Linaro. Permission to use under GPL terms is granted. @ ==================================================================== @ Bit-sliced AES for ARM NEON @ @ February 2012. @ @ This implementation is direct adaptation of bsaes-x86_64 module for @ ARM NEON. Except that this module is endian-neutral [in sense that @ it can be compiled for either endianness] by courtesy of vld1.8's @ neutrality. Initial version doesn't implement interface to OpenSSL, @ only low-level primitives and unsupported entry points, just enough @ to collect performance results, which for Cortex-A8 core are: @ @ encrypt 19.5 cycles per byte processed with 128-bit key @ decrypt 22.1 cycles per byte processed with 128-bit key @ key conv. 440 cycles per 128-bit key/0.18 of 8x block @ @ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7, @ which is [much] worse than anticipated (for further details see @ http://www.openssl.org/~appro/Snapdragon-S4.html). @ @ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code @ manages in 20.0 cycles]. @ @ When comparing to x86_64 results keep in mind that NEON unit is @ [mostly] single-issue and thus can't [fully] benefit from @ instruction-level parallelism. And when comparing to aes-armv4 @ results keep in mind key schedule conversion overhead (see @ bsaes-x86_64.pl for further details)... @ @ <appro@openssl.org> @ April-August 2013 @ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard. #ifndef __KERNEL__ # include <openssl/arm_arch.h> # define VFP_ABI_PUSH vstmdb sp!,{d8-d15} # define VFP_ABI_POP vldmia sp!,{d8-d15} # define VFP_ABI_FRAME 0x40 #else # define VFP_ABI_PUSH # define VFP_ABI_POP # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK # define __ARM_MAX_ARCH__ 7 #endif #ifdef __thumb__ # define adrl adr #endif #if __ARM_MAX_ARCH__>=7 .text .syntax unified @ ARMv7-capable assembler is expected to handle this #if defined(__thumb2__) && !defined(__APPLE__) .thumb #else .code 32 # undef __thumb2__ #endif #ifdef __thumb2__ .thumb_func _bsaes_decrypt8 #endif .align 4 _bsaes_decrypt8: adr r6,. vldmia r4!, {q9} @ round 0 key #if defined(__thumb2__) || defined(__APPLE__) adr r6,LM0ISR #else add r6,r6,#LM0ISR-_bsaes_decrypt8 #endif vldmia r6!, {q8} @ LM0ISR veor q10, q0, q9 @ xor with round0 key veor q11, q1, q9 vtbl.8 d0, {q10}, d16 vtbl.8 d1, {q10}, d17 veor q12, q2, q9 vtbl.8 d2, {q11}, d16 vtbl.8 d3, {q11}, d17 veor q13, q3, q9 vtbl.8 d4, {q12}, d16 vtbl.8 d5, {q12}, d17 veor q14, q4, q9 vtbl.8 d6, {q13}, d16 vtbl.8 d7, {q13}, d17 veor q15, q5, q9 vtbl.8 d8, {q14}, d16 vtbl.8 d9, {q14}, d17 veor q10, q6, q9 vtbl.8 d10, {q15}, d16 vtbl.8 d11, {q15}, d17 veor q11, q7, q9 vtbl.8 d12, {q10}, d16 vtbl.8 d13, {q10}, d17 vtbl.8 d14, {q11}, d16 vtbl.8 d15, {q11}, d17 vmov.i8 q8,#0x55 @ compose LBS0 vmov.i8 q9,#0x33 @ compose LBS1 vshr.u64 q10, q6, #1 vshr.u64 q11, q4, #1 veor q10, q10, q7 veor q11, q11, q5 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #1 veor q5, q5, q11 vshl.u64 q11, q11, #1 veor q6, q6, q10 veor q4, q4, q11 vshr.u64 q10, q2, #1 vshr.u64 q11, q0, #1 veor q10, q10, q3 veor q11, q11, q1 vand q10, q10, q8 vand q11, q11, q8 veor q3, q3, q10 vshl.u64 q10, q10, #1 veor q1, q1, q11 vshl.u64 q11, q11, #1 veor q2, q2, q10 veor q0, q0, q11 vmov.i8 q8,#0x0f @ compose LBS2 vshr.u64 q10, q5, #2 vshr.u64 q11, q4, #2 veor q10, q10, q7 veor q11, q11, q6 vand q10, q10, q9 vand q11, q11, q9 veor q7, q7, q10 vshl.u64 q10, q10, #2 veor q6, q6, q11 vshl.u64 q11, q11, #2 veor q5, q5, q10 veor q4, q4, q11 vshr.u64 q10, q1, #2 vshr.u64 q11, q0, #2 veor q10, q10, q3 veor q11, q11, q2 vand q10, q10, q9 vand q11, q11, q9 veor q3, q3, q10 vshl.u64 q10, q10, #2 veor q2, q2, q11 vshl.u64 q11, q11, #2 veor q1, q1, q10 veor q0, q0, q11 vshr.u64 q10, q3, #4 vshr.u64 q11, q2, #4 veor q10, q10, q7 veor q11, q11, q6 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #4 veor q6, q6, q11 vshl.u64 q11, q11, #4 veor q3, q3, q10 veor q2, q2, q11 vshr.u64 q10, q1, #4 vshr.u64 q11, q0, #4 veor q10, q10, q5 veor q11, q11, q4 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #4 veor q4, q4, q11 vshl.u64 q11, q11, #4 veor q1, q1, q10 veor q0, q0, q11 sub r5,r5,#1 b Ldec_sbox .align 4 Ldec_loop: vldmia r4!, {q8,q9,q10,q11} veor q8, q8, q0 veor q9, q9, q1 vtbl.8 d0, {q8}, d24 vtbl.8 d1, {q8}, d25 vldmia r4!, {q8} veor q10, q10, q2 vtbl.8 d2, {q9}, d24 vtbl.8 d3, {q9}, d25 vldmia r4!, {q9} veor q11, q11, q3 vtbl.8 d4, {q10}, d24 vtbl.8 d5, {q10}, d25 vldmia r4!, {q10} vtbl.8 d6, {q11}, d24 vtbl.8 d7, {q11}, d25 vldmia r4!, {q11} veor q8, q8, q4 veor q9, q9, q5 vtbl.8 d8, {q8}, d24 vtbl.8 d9, {q8}, d25 veor q10, q10, q6 vtbl.8 d10, {q9}, d24 vtbl.8 d11, {q9}, d25 veor q11, q11, q7 vtbl.8 d12, {q10}, d24 vtbl.8 d13, {q10}, d25 vtbl.8 d14, {q11}, d24 vtbl.8 d15, {q11}, d25 Ldec_sbox: veor q1, q1, q4 veor q3, q3, q4 veor q4, q4, q7 veor q1, q1, q6 veor q2, q2, q7 veor q6, q6, q4 veor q0, q0, q1 veor q2, q2, q5 veor q7, q7, q6 veor q3, q3, q0 veor q5, q5, q0 veor q1, q1, q3 veor q11, q3, q0 veor q10, q7, q4 veor q9, q1, q6 veor q13, q4, q0 vmov q8, q10 veor q12, q5, q2 vorr q10, q10, q9 veor q15, q11, q8 vand q14, q11, q12 vorr q11, q11, q12 veor q12, q12, q9 vand q8, q8, q9 veor q9, q6, q2 vand q15, q15, q12 vand q13, q13, q9 veor q9, q3, q7 veor q12, q1, q5 veor q11, q11, q13 veor q10, q10, q13 vand q13, q9, q12 vorr q9, q9, q12 veor q11, q11, q15 veor q8, q8, q13 veor q10, q10, q14 veor q9, q9, q15 veor q8, q8, q14 vand q12, q4, q6 veor q9, q9, q14 vand q13, q0, q2 vand q14, q7, q1 vorr q15, q3, q5 veor q11, q11, q12 veor q9, q9, q14 veor q8, q8, q15 veor q10, q10, q13 @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3 @ new smaller inversion vand q14, q11, q9 vmov q12, q8 veor q13, q10, q14 veor q15, q8, q14 veor q14, q8, q14 @ q14=q15 vbsl q13, q9, q8 vbsl q15, q11, q10 veor q11, q11, q10 vbsl q12, q13, q14 vbsl q8, q14, q13 vand q14, q12, q15 veor q9, q9, q8 veor q14, q14, q11 veor q12, q5, q2 veor q8, q1, q6 veor q10, q15, q14 vand q10, q10, q5 veor q5, q5, q1 vand q11, q1, q15 vand q5, q5, q14 veor q1, q11, q10 veor q5, q5, q11 veor q15, q15, q13 veor q14, q14, q9 veor q11, q15, q14 veor q10, q13, q9 vand q11, q11, q12 vand q10, q10, q2 veor q12, q12, q8 veor q2, q2, q6 vand q8, q8, q15 vand q6, q6, q13 vand q12, q12, q14 vand q2, q2, q9 veor q8, q8, q12 veor q2, q2, q6 veor q12, q12, q11 veor q6, q6, q10 veor q5, q5, q12 veor q2, q2, q12 veor q1, q1, q8 veor q6, q6, q8 veor q12, q3, q0 veor q8, q7, q4 veor q11, q15, q14 veor q10, q13, q9 vand q11, q11, q12 vand q10, q10, q0 veor q12, q12, q8 veor q0, q0, q4 vand q8, q8, q15 vand q4, q4, q13 vand q12, q12, q14 vand q0, q0, q9 veor q8, q8, q12 veor q0, q0, q4 veor q12, q12, q11 veor q4, q4, q10 veor q15, q15, q13 veor q14, q14, q9 veor q10, q15, q14 vand q10, q10, q3 veor q3, q3, q7 vand q11, q7, q15 vand q3, q3, q14 veor q7, q11, q10 veor q3, q3, q11 veor q3, q3, q12 veor q0, q0, q12 veor q7, q7, q8 veor q4, q4, q8 veor q1, q1, q7 veor q6, q6, q5 veor q4, q4, q1 veor q2, q2, q7 veor q5, q5, q7 veor q4, q4, q2 veor q7, q7, q0 veor q4, q4, q5 veor q3, q3, q6 veor q6, q6, q1 veor q3, q3, q4 veor q4, q4, q0 veor q7, q7, q3 subs r5,r5,#1 bcc Ldec_done @ multiplication by 0x05-0x00-0x04-0x00 vext.8 q8, q0, q0, #8 vext.8 q14, q3, q3, #8 vext.8 q15, q5, q5, #8 veor q8, q8, q0 vext.8 q9, q1, q1, #8 veor q14, q14, q3 vext.8 q10, q6, q6, #8 veor q15, q15, q5 vext.8 q11, q4, q4, #8 veor q9, q9, q1 vext.8 q12, q2, q2, #8 veor q10, q10, q6 vext.8 q13, q7, q7, #8 veor q11, q11, q4 veor q12, q12, q2 veor q13, q13, q7 veor q0, q0, q14 veor q1, q1, q14 veor q6, q6, q8 veor q2, q2, q10 veor q4, q4, q9 veor q1, q1, q15 veor q6, q6, q15 veor q2, q2, q14 veor q7, q7, q11 veor q4, q4, q14 veor q3, q3, q12 veor q2, q2, q15 veor q7, q7, q15 veor q5, q5, q13 vext.8 q8, q0, q0, #12 @ x0 <<< 32 vext.8 q9, q1, q1, #12 veor q0, q0, q8 @ x0 ^ (x0 <<< 32) vext.8 q10, q6, q6, #12 veor q1, q1, q9 vext.8 q11, q4, q4, #12 veor q6, q6, q10 vext.8 q12, q2, q2, #12 veor q4, q4, q11 vext.8 q13, q7, q7, #12 veor q2, q2, q12 vext.8 q14, q3, q3, #12 veor q7, q7, q13 vext.8 q15, q5, q5, #12 veor q3, q3, q14 veor q9, q9, q0 veor q5, q5, q15 vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64) veor q10, q10, q1 veor q8, q8, q5 veor q9, q9, q5 vext.8 q1, q1, q1, #8 veor q13, q13, q2 veor q0, q0, q8 veor q14, q14, q7 veor q1, q1, q9 vext.8 q8, q2, q2, #8 veor q12, q12, q4 vext.8 q9, q7, q7, #8 veor q15, q15, q3 vext.8 q2, q4, q4, #8 veor q11, q11, q6 vext.8 q7, q5, q5, #8 veor q12, q12, q5 vext.8 q4, q3, q3, #8 veor q11, q11, q5 vext.8 q3, q6, q6, #8 veor q5, q9, q13 veor q11, q11, q2 veor q7, q7, q15 veor q6, q4, q14 veor q4, q8, q12 veor q2, q3, q10 vmov q3, q11 @ vmov q5, q9 vldmia r6, {q12} @ LISR ite eq @ Thumb2 thing, sanity check in ARM addeq r6,r6,#0x10 bne Ldec_loop vldmia r6, {q12} @ LISRM0 b Ldec_loop .align 4 Ldec_done: vmov.i8 q8,#0x55 @ compose LBS0 vmov.i8 q9,#0x33 @ compose LBS1 vshr.u64 q10, q3, #1 vshr.u64 q11, q2, #1 veor q10, q10, q5 veor q11, q11, q7 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #1 veor q7, q7, q11 vshl.u64 q11, q11, #1 veor q3, q3, q10 veor q2, q2, q11 vshr.u64 q10, q6, #1 vshr.u64 q11, q0, #1 veor q10, q10, q4 veor q11, q11, q1 vand q10, q10, q8 vand q11, q11, q8 veor q4, q4, q10 vshl.u64 q10, q10, #1 veor q1, q1, q11 vshl.u64 q11, q11, #1 veor q6, q6, q10 veor q0, q0, q11 vmov.i8 q8,#0x0f @ compose LBS2 vshr.u64 q10, q7, #2 vshr.u64 q11, q2, #2 veor q10, q10, q5 veor q11, q11, q3 vand q10, q10, q9 vand q11, q11, q9 veor q5, q5, q10 vshl.u64 q10, q10, #2 veor q3, q3, q11 vshl.u64 q11, q11, #2 veor q7, q7, q10 veor q2, q2, q11 vshr.u64 q10, q1, #2 vshr.u64 q11, q0, #2 veor q10, q10, q4 veor q11, q11, q6 vand q10, q10, q9 vand q11, q11, q9 veor q4, q4, q10 vshl.u64 q10, q10, #2 veor q6, q6, q11 vshl.u64 q11, q11, #2 veor q1, q1, q10 veor q0, q0, q11 vshr.u64 q10, q4, #4 vshr.u64 q11, q6, #4 veor q10, q10, q5 veor q11, q11, q3 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #4 veor q3, q3, q11 vshl.u64 q11, q11, #4 veor q4, q4, q10 veor q6, q6, q11 vshr.u64 q10, q1, #4 vshr.u64 q11, q0, #4 veor q10, q10, q7 veor q11, q11, q2 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #4 veor q2, q2, q11 vshl.u64 q11, q11, #4 veor q1, q1, q10 veor q0, q0, q11 vldmia r4, {q8} @ last round key veor q6, q6, q8 veor q4, q4, q8 veor q2, q2, q8 veor q7, q7, q8 veor q3, q3, q8 veor q5, q5, q8 veor q0, q0, q8 veor q1, q1, q8 bx lr .align 6 _bsaes_const: LM0ISR:@ InvShiftRows constants .quad 0x0a0e0206070b0f03, 0x0004080c0d010509 LISR: .quad 0x0504070602010003, 0x0f0e0d0c080b0a09 LISRM0: .quad 0x01040b0e0205080f, 0x0306090c00070a0d LM0SR:@ ShiftRows constants .quad 0x0a0e02060f03070b, 0x0004080c05090d01 LSR: .quad 0x0504070600030201, 0x0f0e0d0c0a09080b LSRM0: .quad 0x0304090e00050a0f, 0x01060b0c0207080d LM0: .quad 0x02060a0e03070b0f, 0x0004080c0105090d LREVM0SR: .quad 0x090d01050c000408, 0x03070b0f060a0e02 .byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 6 #ifdef __thumb2__ .thumb_func _bsaes_encrypt8 #endif .align 4 _bsaes_encrypt8: adr r6,. vldmia r4!, {q9} @ round 0 key #if defined(__thumb2__) || defined(__APPLE__) adr r6,LM0SR #else sub r6,r6,#_bsaes_encrypt8-LM0SR #endif vldmia r6!, {q8} @ LM0SR _bsaes_encrypt8_alt: veor q10, q0, q9 @ xor with round0 key veor q11, q1, q9 vtbl.8 d0, {q10}, d16 vtbl.8 d1, {q10}, d17 veor q12, q2, q9 vtbl.8 d2, {q11}, d16 vtbl.8 d3, {q11}, d17 veor q13, q3, q9 vtbl.8 d4, {q12}, d16 vtbl.8 d5, {q12}, d17 veor q14, q4, q9 vtbl.8 d6, {q13}, d16 vtbl.8 d7, {q13}, d17 veor q15, q5, q9 vtbl.8 d8, {q14}, d16 vtbl.8 d9, {q14}, d17 veor q10, q6, q9 vtbl.8 d10, {q15}, d16 vtbl.8 d11, {q15}, d17 veor q11, q7, q9 vtbl.8 d12, {q10}, d16 vtbl.8 d13, {q10}, d17 vtbl.8 d14, {q11}, d16 vtbl.8 d15, {q11}, d17 _bsaes_encrypt8_bitslice: vmov.i8 q8,#0x55 @ compose LBS0 vmov.i8 q9,#0x33 @ compose LBS1 vshr.u64 q10, q6, #1 vshr.u64 q11, q4, #1 veor q10, q10, q7 veor q11, q11, q5 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #1 veor q5, q5, q11 vshl.u64 q11, q11, #1 veor q6, q6, q10 veor q4, q4, q11 vshr.u64 q10, q2, #1 vshr.u64 q11, q0, #1 veor q10, q10, q3 veor q11, q11, q1 vand q10, q10, q8 vand q11, q11, q8 veor q3, q3, q10 vshl.u64 q10, q10, #1 veor q1, q1, q11 vshl.u64 q11, q11, #1 veor q2, q2, q10 veor q0, q0, q11 vmov.i8 q8,#0x0f @ compose LBS2 vshr.u64 q10, q5, #2 vshr.u64 q11, q4, #2 veor q10, q10, q7 veor q11, q11, q6 vand q10, q10, q9 vand q11, q11, q9 veor q7, q7, q10 vshl.u64 q10, q10, #2 veor q6, q6, q11 vshl.u64 q11, q11, #2 veor q5, q5, q10 veor q4, q4, q11 vshr.u64 q10, q1, #2 vshr.u64 q11, q0, #2 veor q10, q10, q3 veor q11, q11, q2 vand q10, q10, q9 vand q11, q11, q9 veor q3, q3, q10 vshl.u64 q10, q10, #2 veor q2, q2, q11 vshl.u64 q11, q11, #2 veor q1, q1, q10 veor q0, q0, q11 vshr.u64 q10, q3, #4 vshr.u64 q11, q2, #4 veor q10, q10, q7 veor q11, q11, q6 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #4 veor q6, q6, q11 vshl.u64 q11, q11, #4 veor q3, q3, q10 veor q2, q2, q11 vshr.u64 q10, q1, #4 vshr.u64 q11, q0, #4 veor q10, q10, q5 veor q11, q11, q4 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #4 veor q4, q4, q11 vshl.u64 q11, q11, #4 veor q1, q1, q10 veor q0, q0, q11 sub r5,r5,#1 b Lenc_sbox .align 4 Lenc_loop: vldmia r4!, {q8,q9,q10,q11} veor q8, q8, q0 veor q9, q9, q1 vtbl.8 d0, {q8}, d24 vtbl.8 d1, {q8}, d25 vldmia r4!, {q8} veor q10, q10, q2 vtbl.8 d2, {q9}, d24 vtbl.8 d3, {q9}, d25 vldmia r4!, {q9} veor q11, q11, q3 vtbl.8 d4, {q10}, d24 vtbl.8 d5, {q10}, d25 vldmia r4!, {q10} vtbl.8 d6, {q11}, d24 vtbl.8 d7, {q11}, d25 vldmia r4!, {q11} veor q8, q8, q4 veor q9, q9, q5 vtbl.8 d8, {q8}, d24 vtbl.8 d9, {q8}, d25 veor q10, q10, q6 vtbl.8 d10, {q9}, d24 vtbl.8 d11, {q9}, d25 veor q11, q11, q7 vtbl.8 d12, {q10}, d24 vtbl.8 d13, {q10}, d25 vtbl.8 d14, {q11}, d24 vtbl.8 d15, {q11}, d25 Lenc_sbox: veor q2, q2, q1 veor q5, q5, q6 veor q3, q3, q0 veor q6, q6, q2 veor q5, q5, q0 veor q6, q6, q3 veor q3, q3, q7 veor q7, q7, q5 veor q3, q3, q4 veor q4, q4, q5 veor q2, q2, q7 veor q3, q3, q1 veor q1, q1, q5 veor q11, q7, q4 veor q10, q1, q2 veor q9, q5, q3 veor q13, q2, q4 vmov q8, q10 veor q12, q6, q0 vorr q10, q10, q9 veor q15, q11, q8 vand q14, q11, q12 vorr q11, q11, q12 veor q12, q12, q9 vand q8, q8, q9 veor q9, q3, q0 vand q15, q15, q12 vand q13, q13, q9 veor q9, q7, q1 veor q12, q5, q6 veor q11, q11, q13 veor q10, q10, q13 vand q13, q9, q12 vorr q9, q9, q12 veor q11, q11, q15 veor q8, q8, q13 veor q10, q10, q14 veor q9, q9, q15 veor q8, q8, q14 vand q12, q2, q3 veor q9, q9, q14 vand q13, q4, q0 vand q14, q1, q5 vorr q15, q7, q6 veor q11, q11, q12 veor q9, q9, q14 veor q8, q8, q15 veor q10, q10, q13 @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3 @ new smaller inversion vand q14, q11, q9 vmov q12, q8 veor q13, q10, q14 veor q15, q8, q14 veor q14, q8, q14 @ q14=q15 vbsl q13, q9, q8 vbsl q15, q11, q10 veor q11, q11, q10 vbsl q12, q13, q14 vbsl q8, q14, q13 vand q14, q12, q15 veor q9, q9, q8 veor q14, q14, q11 veor q12, q6, q0 veor q8, q5, q3 veor q10, q15, q14 vand q10, q10, q6 veor q6, q6, q5 vand q11, q5, q15 vand q6, q6, q14 veor q5, q11, q10 veor q6, q6, q11 veor q15, q15, q13 veor q14, q14, q9 veor q11, q15, q14 veor q10, q13, q9 vand q11, q11, q12 vand q10, q10, q0 veor q12, q12, q8 veor q0, q0, q3 vand q8, q8, q15 vand q3, q3, q13 vand q12, q12, q14 vand q0, q0, q9 veor q8, q8, q12 veor q0, q0, q3 veor q12, q12, q11 veor q3, q3, q10 veor q6, q6, q12 veor q0, q0, q12 veor q5, q5, q8 veor q3, q3, q8 veor q12, q7, q4 veor q8, q1, q2 veor q11, q15, q14 veor q10, q13, q9 vand q11, q11, q12 vand q10, q10, q4 veor q12, q12, q8 veor q4, q4, q2 vand q8, q8, q15 vand q2, q2, q13 vand q12, q12, q14 vand q4, q4, q9 veor q8, q8, q12 veor q4, q4, q2 veor q12, q12, q11 veor q2, q2, q10 veor q15, q15, q13 veor q14, q14, q9 veor q10, q15, q14 vand q10, q10, q7 veor q7, q7, q1 vand q11, q1, q15 vand q7, q7, q14 veor q1, q11, q10 veor q7, q7, q11 veor q7, q7, q12 veor q4, q4, q12 veor q1, q1, q8 veor q2, q2, q8 veor q7, q7, q0 veor q1, q1, q6 veor q6, q6, q0 veor q4, q4, q7 veor q0, q0, q1 veor q1, q1, q5 veor q5, q5, q2 veor q2, q2, q3 veor q3, q3, q5 veor q4, q4, q5 veor q6, q6, q3 subs r5,r5,#1 bcc Lenc_done vext.8 q8, q0, q0, #12 @ x0 <<< 32 vext.8 q9, q1, q1, #12 veor q0, q0, q8 @ x0 ^ (x0 <<< 32) vext.8 q10, q4, q4, #12 veor q1, q1, q9 vext.8 q11, q6, q6, #12 veor q4, q4, q10 vext.8 q12, q3, q3, #12 veor q6, q6, q11 vext.8 q13, q7, q7, #12 veor q3, q3, q12 vext.8 q14, q2, q2, #12 veor q7, q7, q13 vext.8 q15, q5, q5, #12 veor q2, q2, q14 veor q9, q9, q0 veor q5, q5, q15 vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64) veor q10, q10, q1 veor q8, q8, q5 veor q9, q9, q5 vext.8 q1, q1, q1, #8 veor q13, q13, q3 veor q0, q0, q8 veor q14, q14, q7 veor q1, q1, q9 vext.8 q8, q3, q3, #8 veor q12, q12, q6 vext.8 q9, q7, q7, #8 veor q15, q15, q2 vext.8 q3, q6, q6, #8 veor q11, q11, q4 vext.8 q7, q5, q5, #8 veor q12, q12, q5 vext.8 q6, q2, q2, #8 veor q11, q11, q5 vext.8 q2, q4, q4, #8 veor q5, q9, q13 veor q4, q8, q12 veor q3, q3, q11 veor q7, q7, q15 veor q6, q6, q14 @ vmov q4, q8 veor q2, q2, q10 @ vmov q5, q9 vldmia r6, {q12} @ LSR ite eq @ Thumb2 thing, samity check in ARM addeq r6,r6,#0x10 bne Lenc_loop vldmia r6, {q12} @ LSRM0 b Lenc_loop .align 4 Lenc_done: vmov.i8 q8,#0x55 @ compose LBS0 vmov.i8 q9,#0x33 @ compose LBS1 vshr.u64 q10, q2, #1 vshr.u64 q11, q3, #1 veor q10, q10, q5 veor q11, q11, q7 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #1 veor q7, q7, q11 vshl.u64 q11, q11, #1 veor q2, q2, q10 veor q3, q3, q11 vshr.u64 q10, q4, #1 vshr.u64 q11, q0, #1 veor q10, q10, q6 veor q11, q11, q1 vand q10, q10, q8 vand q11, q11, q8 veor q6, q6, q10 vshl.u64 q10, q10, #1 veor q1, q1, q11 vshl.u64 q11, q11, #1 veor q4, q4, q10 veor q0, q0, q11 vmov.i8 q8,#0x0f @ compose LBS2 vshr.u64 q10, q7, #2 vshr.u64 q11, q3, #2 veor q10, q10, q5 veor q11, q11, q2 vand q10, q10, q9 vand q11, q11, q9 veor q5, q5, q10 vshl.u64 q10, q10, #2 veor q2, q2, q11 vshl.u64 q11, q11, #2 veor q7, q7, q10 veor q3, q3, q11 vshr.u64 q10, q1, #2 vshr.u64 q11, q0, #2 veor q10, q10, q6 veor q11, q11, q4 vand q10, q10, q9 vand q11, q11, q9 veor q6, q6, q10 vshl.u64 q10, q10, #2 veor q4, q4, q11 vshl.u64 q11, q11, #2 veor q1, q1, q10 veor q0, q0, q11 vshr.u64 q10, q6, #4 vshr.u64 q11, q4, #4 veor q10, q10, q5 veor q11, q11, q2 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #4 veor q2, q2, q11 vshl.u64 q11, q11, #4 veor q6, q6, q10 veor q4, q4, q11 vshr.u64 q10, q1, #4 vshr.u64 q11, q0, #4 veor q10, q10, q7 veor q11, q11, q3 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #4 veor q3, q3, q11 vshl.u64 q11, q11, #4 veor q1, q1, q10 veor q0, q0, q11 vldmia r4, {q8} @ last round key veor q4, q4, q8 veor q6, q6, q8 veor q3, q3, q8 veor q7, q7, q8 veor q2, q2, q8 veor q5, q5, q8 veor q0, q0, q8 veor q1, q1, q8 bx lr #ifdef __thumb2__ .thumb_func _bsaes_key_convert #endif .align 4 _bsaes_key_convert: adr r6,. vld1.8 {q7}, [r4]! @ load round 0 key #if defined(__thumb2__) || defined(__APPLE__) adr r6,LM0 #else sub r6,r6,#_bsaes_key_convert-LM0 #endif vld1.8 {q15}, [r4]! @ load round 1 key vmov.i8 q8, #0x01 @ bit masks vmov.i8 q9, #0x02 vmov.i8 q10, #0x04 vmov.i8 q11, #0x08 vmov.i8 q12, #0x10 vmov.i8 q13, #0x20 vldmia r6, {q14} @ LM0 #ifdef __ARMEL__ vrev32.8 q7, q7 vrev32.8 q15, q15 #endif sub r5,r5,#1 vstmia r12!, {q7} @ save round 0 key b Lkey_loop .align 4 Lkey_loop: vtbl.8 d14,{q15},d28 vtbl.8 d15,{q15},d29 vmov.i8 q6, #0x40 vmov.i8 q15, #0x80 vtst.8 q0, q7, q8 vtst.8 q1, q7, q9 vtst.8 q2, q7, q10 vtst.8 q3, q7, q11 vtst.8 q4, q7, q12 vtst.8 q5, q7, q13 vtst.8 q6, q7, q6 vtst.8 q7, q7, q15 vld1.8 {q15}, [r4]! @ load next round key vmvn q0, q0 @ "pnot" vmvn q1, q1 vmvn q5, q5 vmvn q6, q6 #ifdef __ARMEL__ vrev32.8 q15, q15 #endif subs r5,r5,#1 vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key bne Lkey_loop vmov.i8 q7,#0x63 @ compose L63 @ don't save last round key bx lr .globl _bsaes_cbc_encrypt .private_extern _bsaes_cbc_encrypt #ifdef __thumb2__ .thumb_func _bsaes_cbc_encrypt #endif .align 5 _bsaes_cbc_encrypt: @ In OpenSSL, this function had a fallback to aes_nohw_cbc_encrypt for @ short inputs. We patch this out, using bsaes for all input sizes. @ it is up to the caller to make sure we are called with enc == 0 mov ip, sp stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr} VFP_ABI_PUSH ldr r8, [ip] @ IV is 1st arg on the stack mov r2, r2, lsr#4 @ len in 16 byte blocks sub sp, #0x10 @ scratch space to carry over the IV mov r9, sp @ save sp ldr r10, [r3, #240] @ get # of rounds #ifndef BSAES_ASM_EXTENDED_KEY @ allocate the key schedule on the stack sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key add r12, #96 @ sifze of bit-slices key schedule @ populate the key schedule mov r4, r3 @ pass key mov r5, r10 @ pass # of rounds mov sp, r12 @ sp is sp bl _bsaes_key_convert vldmia sp, {q6} vstmia r12, {q15} @ save last round key veor q7, q7, q6 @ fix up round 0 key vstmia sp, {q7} #else ldr r12, [r3, #244] eors r12, #1 beq 0f @ populate the key schedule str r12, [r3, #244] mov r4, r3 @ pass key mov r5, r10 @ pass # of rounds add r12, r3, #248 @ pass key schedule bl _bsaes_key_convert add r4, r3, #248 vldmia r4, {q6} vstmia r12, {q15} @ save last round key veor q7, q7, q6 @ fix up round 0 key vstmia r4, {q7} .align 2 #endif vld1.8 {q15}, [r8] @ load IV b Lcbc_dec_loop .align 4 Lcbc_dec_loop: subs r2, r2, #0x8 bmi Lcbc_dec_loop_finish vld1.8 {q0,q1}, [r0]! @ load input vld1.8 {q2,q3}, [r0]! #ifndef BSAES_ASM_EXTENDED_KEY mov r4, sp @ pass the key #else add r4, r3, #248 #endif vld1.8 {q4,q5}, [r0]! mov r5, r10 vld1.8 {q6,q7}, [r0] sub r0, r0, #0x60 vstmia r9, {q15} @ put aside IV bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q10,q11}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vld1.8 {q12,q13}, [r0]! veor q4, q4, q10 veor q2, q2, q11 vld1.8 {q14,q15}, [r0]! veor q7, q7, q12 vst1.8 {q0,q1}, [r1]! @ write output veor q3, q3, q13 vst1.8 {q6}, [r1]! veor q5, q5, q14 vst1.8 {q4}, [r1]! vst1.8 {q2}, [r1]! vst1.8 {q7}, [r1]! vst1.8 {q3}, [r1]! vst1.8 {q5}, [r1]! b Lcbc_dec_loop Lcbc_dec_loop_finish: adds r2, r2, #8 beq Lcbc_dec_done @ Set up most parameters for the _bsaes_decrypt8 call. #ifndef BSAES_ASM_EXTENDED_KEY mov r4, sp @ pass the key #else add r4, r3, #248 #endif mov r5, r10 vstmia r9, {q15} @ put aside IV vld1.8 {q0}, [r0]! @ load input cmp r2, #2 blo Lcbc_dec_one vld1.8 {q1}, [r0]! beq Lcbc_dec_two vld1.8 {q2}, [r0]! cmp r2, #4 blo Lcbc_dec_three vld1.8 {q3}, [r0]! beq Lcbc_dec_four vld1.8 {q4}, [r0]! cmp r2, #6 blo Lcbc_dec_five vld1.8 {q5}, [r0]! beq Lcbc_dec_six vld1.8 {q6}, [r0]! sub r0, r0, #0x70 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q10,q11}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vld1.8 {q12,q13}, [r0]! veor q4, q4, q10 veor q2, q2, q11 vld1.8 {q15}, [r0]! veor q7, q7, q12 vst1.8 {q0,q1}, [r1]! @ write output veor q3, q3, q13 vst1.8 {q6}, [r1]! vst1.8 {q4}, [r1]! vst1.8 {q2}, [r1]! vst1.8 {q7}, [r1]! vst1.8 {q3}, [r1]! b Lcbc_dec_done .align 4 Lcbc_dec_six: sub r0, r0, #0x60 bl _bsaes_decrypt8 vldmia r9,{q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q10,q11}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vld1.8 {q12}, [r0]! veor q4, q4, q10 veor q2, q2, q11 vld1.8 {q15}, [r0]! veor q7, q7, q12 vst1.8 {q0,q1}, [r1]! @ write output vst1.8 {q6}, [r1]! vst1.8 {q4}, [r1]! vst1.8 {q2}, [r1]! vst1.8 {q7}, [r1]! b Lcbc_dec_done .align 4 Lcbc_dec_five: sub r0, r0, #0x50 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q10,q11}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vld1.8 {q15}, [r0]! veor q4, q4, q10 vst1.8 {q0,q1}, [r1]! @ write output veor q2, q2, q11 vst1.8 {q6}, [r1]! vst1.8 {q4}, [r1]! vst1.8 {q2}, [r1]! b Lcbc_dec_done .align 4 Lcbc_dec_four: sub r0, r0, #0x40 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q10}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vld1.8 {q15}, [r0]! veor q4, q4, q10 vst1.8 {q0,q1}, [r1]! @ write output vst1.8 {q6}, [r1]! vst1.8 {q4}, [r1]! b Lcbc_dec_done .align 4 Lcbc_dec_three: sub r0, r0, #0x30 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q15}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vst1.8 {q0,q1}, [r1]! @ write output vst1.8 {q6}, [r1]! b Lcbc_dec_done .align 4 Lcbc_dec_two: sub r0, r0, #0x20 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q15}, [r0]! @ reload input veor q1, q1, q8 vst1.8 {q0,q1}, [r1]! @ write output b Lcbc_dec_done .align 4 Lcbc_dec_one: sub r0, r0, #0x10 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q15}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vst1.8 {q0}, [r1]! @ write output Lcbc_dec_done: #ifndef BSAES_ASM_EXTENDED_KEY vmov.i32 q0, #0 vmov.i32 q1, #0 Lcbc_dec_bzero:@ wipe key schedule [if any] vstmia sp!, {q0,q1} cmp sp, r9 bne Lcbc_dec_bzero #endif mov sp, r9 add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb vst1.8 {q15}, [r8] @ return IV VFP_ABI_POP ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} .globl _bsaes_ctr32_encrypt_blocks .private_extern _bsaes_ctr32_encrypt_blocks #ifdef __thumb2__ .thumb_func _bsaes_ctr32_encrypt_blocks #endif .align 5 _bsaes_ctr32_encrypt_blocks: @ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this @ out to retain a constant-time implementation. mov ip, sp stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr} VFP_ABI_PUSH ldr r8, [ip] @ ctr is 1st arg on the stack sub sp, sp, #0x10 @ scratch space to carry over the ctr mov r9, sp @ save sp ldr r10, [r3, #240] @ get # of rounds #ifndef BSAES_ASM_EXTENDED_KEY @ allocate the key schedule on the stack sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key add r12, #96 @ size of bit-sliced key schedule @ populate the key schedule mov r4, r3 @ pass key mov r5, r10 @ pass # of rounds mov sp, r12 @ sp is sp bl _bsaes_key_convert veor q7,q7,q15 @ fix up last round key vstmia r12, {q7} @ save last round key vld1.8 {q0}, [r8] @ load counter #ifdef __APPLE__ mov r8, #:lower16:(LREVM0SR-LM0) add r8, r6, r8 #else add r8, r6, #LREVM0SR-LM0 @ borrow r8 #endif vldmia sp, {q4} @ load round0 key #else ldr r12, [r3, #244] eors r12, #1 beq 0f @ populate the key schedule str r12, [r3, #244] mov r4, r3 @ pass key mov r5, r10 @ pass # of rounds add r12, r3, #248 @ pass key schedule bl _bsaes_key_convert veor q7,q7,q15 @ fix up last round key vstmia r12, {q7} @ save last round key .align 2 add r12, r3, #248 vld1.8 {q0}, [r8] @ load counter adrl r8, LREVM0SR @ borrow r8 vldmia r12, {q4} @ load round0 key sub sp, #0x10 @ place for adjusted round0 key #endif vmov.i32 q8,#1 @ compose 1<<96 veor q9,q9,q9 vrev32.8 q0,q0 vext.8 q8,q9,q8,#4 vrev32.8 q4,q4 vadd.u32 q9,q8,q8 @ compose 2<<96 vstmia sp, {q4} @ save adjusted round0 key b Lctr_enc_loop .align 4 Lctr_enc_loop: vadd.u32 q10, q8, q9 @ compose 3<<96 vadd.u32 q1, q0, q8 @ +1 vadd.u32 q2, q0, q9 @ +2 vadd.u32 q3, q0, q10 @ +3 vadd.u32 q4, q1, q10 vadd.u32 q5, q2, q10 vadd.u32 q6, q3, q10 vadd.u32 q7, q4, q10 vadd.u32 q10, q5, q10 @ next counter @ Borrow prologue from _bsaes_encrypt8 to use the opportunity @ to flip byte order in 32-bit counter vldmia sp, {q9} @ load round0 key #ifndef BSAES_ASM_EXTENDED_KEY add r4, sp, #0x10 @ pass next round key #else add r4, r3, #264 #endif vldmia r8, {q8} @ LREVM0SR mov r5, r10 @ pass rounds vstmia r9, {q10} @ save next counter #ifdef __APPLE__ mov r6, #:lower16:(LREVM0SR-LSR) sub r6, r8, r6 #else sub r6, r8, #LREVM0SR-LSR @ pass constants #endif bl _bsaes_encrypt8_alt subs r2, r2, #8 blo Lctr_enc_loop_done vld1.8 {q8,q9}, [r0]! @ load input vld1.8 {q10,q11}, [r0]! veor q0, q8 veor q1, q9 vld1.8 {q12,q13}, [r0]! veor q4, q10 veor q6, q11 vld1.8 {q14,q15}, [r0]! veor q3, q12 vst1.8 {q0,q1}, [r1]! @ write output veor q7, q13 veor q2, q14 vst1.8 {q4}, [r1]! veor q5, q15 vst1.8 {q6}, [r1]! vmov.i32 q8, #1 @ compose 1<<96 vst1.8 {q3}, [r1]! veor q9, q9, q9 vst1.8 {q7}, [r1]! vext.8 q8, q9, q8, #4 vst1.8 {q2}, [r1]! vadd.u32 q9,q8,q8 @ compose 2<<96 vst1.8 {q5}, [r1]! vldmia r9, {q0} @ load counter bne Lctr_enc_loop b Lctr_enc_done .align 4 Lctr_enc_loop_done: add r2, r2, #8 vld1.8 {q8}, [r0]! @ load input veor q0, q8 vst1.8 {q0}, [r1]! @ write output cmp r2, #2 blo Lctr_enc_done vld1.8 {q9}, [r0]! veor q1, q9 vst1.8 {q1}, [r1]! beq Lctr_enc_done vld1.8 {q10}, [r0]! veor q4, q10 vst1.8 {q4}, [r1]! cmp r2, #4 blo Lctr_enc_done vld1.8 {q11}, [r0]! veor q6, q11 vst1.8 {q6}, [r1]! beq Lctr_enc_done vld1.8 {q12}, [r0]! veor q3, q12 vst1.8 {q3}, [r1]! cmp r2, #6 blo Lctr_enc_done vld1.8 {q13}, [r0]! veor q7, q13 vst1.8 {q7}, [r1]! beq Lctr_enc_done vld1.8 {q14}, [r0] veor q2, q14 vst1.8 {q2}, [r1]! Lctr_enc_done: vmov.i32 q0, #0 vmov.i32 q1, #0 #ifndef BSAES_ASM_EXTENDED_KEY Lctr_enc_bzero:@ wipe key schedule [if any] vstmia sp!, {q0,q1} cmp sp, r9 bne Lctr_enc_bzero #else vstmia sp, {q0,q1} #endif mov sp, r9 add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb VFP_ABI_POP ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return @ OpenSSL contains aes_nohw_* fallback code here. We patch this @ out to retain a constant-time implementation. #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
40,095
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/ios-arm/crypto/fipsmodule/vpaes-armv7.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__) .syntax unified #if defined(__thumb2__) .thumb #else .code 32 #endif .text .align 7 @ totally strategic alignment _vpaes_consts: Lk_mc_forward:@ mc_forward .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 Lk_mc_backward:@ mc_backward .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F Lk_sr:@ sr .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 @ @ "Hot" constants @ Lk_inv:@ inv, inva .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 Lk_ipt:@ input transform (lo, hi) .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 Lk_sbo:@ sbou, sbot .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA Lk_sb1:@ sb1u, sb1t .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 Lk_sb2:@ sb2u, sb2t .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 2 .align 6 @@ @@ _aes_preheat @@ @@ Fills q9-q15 as specified below. @@ #ifdef __thumb2__ .thumb_func _vpaes_preheat #endif .align 4 _vpaes_preheat: adr r10, Lk_inv vmov.i8 q9, #0x0f @ Lk_s0F vld1.64 {q10,q11}, [r10]! @ Lk_inv add r10, r10, #64 @ Skip Lk_ipt, Lk_sbo vld1.64 {q12,q13}, [r10]! @ Lk_sb1 vld1.64 {q14,q15}, [r10] @ Lk_sb2 bx lr @@ @@ _aes_encrypt_core @@ @@ AES-encrypt q0. @@ @@ Inputs: @@ q0 = input @@ q9-q15 as in _vpaes_preheat @@ [r2] = scheduled keys @@ @@ Output in q0 @@ Clobbers q1-q5, r8-r11 @@ Preserves q6-q8 so you get some local vectors @@ @@ #ifdef __thumb2__ .thumb_func _vpaes_encrypt_core #endif .align 4 _vpaes_encrypt_core: mov r9, r2 ldr r8, [r2,#240] @ pull rounds adr r11, Lk_ipt @ vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo @ vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi vld1.64 {q2, q3}, [r11] adr r11, Lk_mc_forward+16 vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 # round0 key vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 vtbl.8 d2, {q2}, d2 @ vpshufb %xmm1, %xmm2, %xmm1 vtbl.8 d3, {q2}, d3 vtbl.8 d4, {q3}, d0 @ vpshufb %xmm0, %xmm3, %xmm2 vtbl.8 d5, {q3}, d1 veor q0, q1, q5 @ vpxor %xmm5, %xmm1, %xmm0 veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 @ .Lenc_entry ends with a bnz instruction which is normally paired with @ subs in .Lenc_loop. tst r8, r8 b Lenc_entry .align 4 Lenc_loop: @ middle of middle round add r10, r11, #0x40 vtbl.8 d8, {q13}, d4 @ vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u vtbl.8 d9, {q13}, d5 vld1.64 {q1}, [r11]! @ vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] vtbl.8 d0, {q12}, d6 @ vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t vtbl.8 d1, {q12}, d7 veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k vtbl.8 d10, {q15}, d4 @ vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u vtbl.8 d11, {q15}, d5 veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A vtbl.8 d4, {q14}, d6 @ vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t vtbl.8 d5, {q14}, d7 vld1.64 {q4}, [r10] @ vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] vtbl.8 d6, {q0}, d2 @ vpshufb %xmm1, %xmm0, %xmm3 # 0 = B vtbl.8 d7, {q0}, d3 veor q2, q2, q5 @ vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A @ Write to q5 instead of q0, so the table and destination registers do @ not overlap. vtbl.8 d10, {q0}, d8 @ vpshufb %xmm4, %xmm0, %xmm0 # 3 = D vtbl.8 d11, {q0}, d9 veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B vtbl.8 d8, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C vtbl.8 d9, {q3}, d3 @ Here we restore the original q0/q5 usage. veor q0, q5, q3 @ vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D and r11, r11, #~(1<<6) @ and $0x30, %r11 # ... mod 4 veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D subs r8, r8, #1 @ nr-- Lenc_entry: @ top of round vand q1, q0, q9 @ vpand %xmm0, %xmm9, %xmm1 # 0 = k vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i vtbl.8 d10, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k vtbl.8 d11, {q11}, d3 veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i vtbl.8 d7, {q10}, d1 vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j vtbl.8 d9, {q10}, d3 veor q3, q3, q5 @ vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak vtbl.8 d5, {q10}, d7 vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak vtbl.8 d7, {q10}, d9 veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 bne Lenc_loop @ middle of last round add r10, r11, #0x80 adr r11, Lk_sbo @ Read to q1 instead of q4, so the vtbl.8 instruction below does not @ overlap table and destination registers. vld1.64 {q1}, [r11]! @ vmovdqa -0x60(%r10), %xmm4 # 3 : sbou vld1.64 {q0}, [r11] @ vmovdqa -0x50(%r10), %xmm0 # 0 : sbot Lk_sbo+16 vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou vtbl.8 d9, {q1}, d5 vld1.64 {q1}, [r10] @ vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] @ Write to q2 instead of q0 below, to avoid overlapping table and @ destination registers. vtbl.8 d4, {q0}, d6 @ vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t vtbl.8 d5, {q0}, d7 veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k veor q2, q2, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A @ Here we restore the original q0/q2 usage. vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 vtbl.8 d1, {q2}, d3 bx lr .globl _vpaes_encrypt .private_extern _vpaes_encrypt #ifdef __thumb2__ .thumb_func _vpaes_encrypt #endif .align 4 _vpaes_encrypt: @ _vpaes_encrypt_core uses r8-r11. Round up to r7-r11 to maintain stack @ alignment. stmdb sp!, {r7,r8,r9,r10,r11,lr} @ _vpaes_encrypt_core uses q4-q5 (d8-d11), which are callee-saved. vstmdb sp!, {d8,d9,d10,d11} vld1.64 {q0}, [r0] bl _vpaes_preheat bl _vpaes_encrypt_core vst1.64 {q0}, [r1] vldmia sp!, {d8,d9,d10,d11} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return @ @ Decryption stuff @ .align 4 _vpaes_decrypt_consts: Lk_dipt:@ decryption input transform .quad 0x0F505B040B545F00, 0x154A411E114E451A .quad 0x86E383E660056500, 0x12771772F491F194 Lk_dsbo:@ decryption sbox final output .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C Lk_dsb9:@ decryption sbox output *9*u, *9*t .quad 0x851C03539A86D600, 0xCAD51F504F994CC9 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 Lk_dsbd:@ decryption sbox output *D*u, *D*t .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 Lk_dsbb:@ decryption sbox output *B*u, *B*t .quad 0xD022649296B44200, 0x602646F6B0F2D404 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B Lk_dsbe:@ decryption sbox output *E*u, *E*t .quad 0x46F2929626D4D000, 0x2242600464B4F6B0 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 @@ @@ Decryption core @@ @@ Same API as encryption core, except it clobbers q12-q15 rather than using @@ the values from _vpaes_preheat. q9-q11 must still be set from @@ _vpaes_preheat. @@ #ifdef __thumb2__ .thumb_func _vpaes_decrypt_core #endif .align 4 _vpaes_decrypt_core: mov r9, r2 ldr r8, [r2,#240] @ pull rounds @ This function performs shuffles with various constants. The x86_64 @ version loads them on-demand into %xmm0-%xmm5. This does not work well @ for ARMv7 because those registers are shuffle destinations. The ARMv8 @ version preloads those constants into registers, but ARMv7 has half @ the registers to work with. Instead, we load them on-demand into @ q12-q15, registers normally use for preloaded constants. This is fine @ because decryption doesn't use those constants. The values are @ constant, so this does not interfere with potential 2x optimizations. adr r7, Lk_dipt vld1.64 {q12,q13}, [r7] @ vmovdqa Lk_dipt(%rip), %xmm2 # iptlo lsl r11, r8, #4 @ mov %rax, %r11; shl $4, %r11 eor r11, r11, #0x30 @ xor $0x30, %r11 adr r10, Lk_sr and r11, r11, #0x30 @ and $0x30, %r11 add r11, r11, r10 adr r10, Lk_mc_forward+48 vld1.64 {q4}, [r9]! @ vmovdqu (%r9), %xmm4 # round0 key vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 vtbl.8 d4, {q12}, d2 @ vpshufb %xmm1, %xmm2, %xmm2 vtbl.8 d5, {q12}, d3 vld1.64 {q5}, [r10] @ vmovdqa Lk_mc_forward+48(%rip), %xmm5 @ vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi vtbl.8 d0, {q13}, d0 @ vpshufb %xmm0, %xmm1, %xmm0 vtbl.8 d1, {q13}, d1 veor q2, q2, q4 @ vpxor %xmm4, %xmm2, %xmm2 veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 @ .Ldec_entry ends with a bnz instruction which is normally paired with @ subs in .Ldec_loop. tst r8, r8 b Ldec_entry .align 4 Ldec_loop: @ @ Inverse mix columns @ @ We load .Lk_dsb* into q12-q15 on-demand. See the comment at the top of @ the function. adr r10, Lk_dsb9 vld1.64 {q12,q13}, [r10]! @ vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u @ vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t @ Load sbd* ahead of time. vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu @ vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u vtbl.8 d9, {q12}, d5 vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t vtbl.8 d3, {q13}, d7 veor q0, q4, q0 @ vpxor %xmm4, %xmm0, %xmm0 veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch @ Load sbb* ahead of time. vld1.64 {q12,q13}, [r10]! @ vmovdqa 0x20(%r10),%xmm4 # 4 : sbbu @ vmovdqa 0x30(%r10),%xmm1 # 0 : sbbt vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu vtbl.8 d9, {q14}, d5 @ Write to q1 instead of q0, so the table and destination registers do @ not overlap. vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch vtbl.8 d3, {q0}, d11 @ Here we restore the original q0/q1 usage. This instruction is @ reordered from the ARMv8 version so we do not clobber the vtbl.8 @ below. veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt vtbl.8 d3, {q15}, d7 @ vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch @ vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt @ Load sbd* ahead of time. vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x40(%r10),%xmm4 # 4 : sbeu @ vmovdqa 0x50(%r10),%xmm1 # 0 : sbet vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu vtbl.8 d9, {q12}, d5 @ Write to q1 instead of q0, so the table and destination registers do @ not overlap. vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch vtbl.8 d3, {q0}, d11 @ Here we restore the original q0/q1 usage. This instruction is @ reordered from the ARMv8 version so we do not clobber the vtbl.8 @ below. veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt vtbl.8 d3, {q13}, d7 veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu vtbl.8 d9, {q14}, d5 @ Write to q1 instead of q0, so the table and destination registers do @ not overlap. vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch vtbl.8 d3, {q0}, d11 @ Here we restore the original q0/q1 usage. This instruction is @ reordered from the ARMv8 version so we do not clobber the vtbl.8 @ below. veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet vtbl.8 d3, {q15}, d7 vext.8 q5, q5, q5, #12 @ vpalignr $12, %xmm5, %xmm5, %xmm5 veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch subs r8, r8, #1 @ sub $1,%rax # nr-- Ldec_entry: @ top of round vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k vtbl.8 d5, {q11}, d3 veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i vtbl.8 d7, {q10}, d1 vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j vtbl.8 d9, {q10}, d3 veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak vtbl.8 d5, {q10}, d7 vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak vtbl.8 d7, {q10}, d9 veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo vld1.64 {q0}, [r9]! @ vmovdqu (%r9), %xmm0 bne Ldec_loop @ middle of last round adr r10, Lk_dsbo @ Write to q1 rather than q4 to avoid overlapping table and destination. vld1.64 {q1}, [r10]! @ vmovdqa 0x60(%r10), %xmm4 # 3 : sbou vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou vtbl.8 d9, {q1}, d5 @ Write to q2 rather than q1 to avoid overlapping table and destination. vld1.64 {q2}, [r10] @ vmovdqa 0x70(%r10), %xmm1 # 0 : sbot vtbl.8 d2, {q2}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t vtbl.8 d3, {q2}, d7 vld1.64 {q2}, [r11] @ vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160 veor q4, q4, q0 @ vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k @ Write to q1 rather than q0 so the table and destination registers @ below do not overlap. veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm0 # 0 = A vtbl.8 d0, {q1}, d4 @ vpshufb %xmm2, %xmm0, %xmm0 vtbl.8 d1, {q1}, d5 bx lr .globl _vpaes_decrypt .private_extern _vpaes_decrypt #ifdef __thumb2__ .thumb_func _vpaes_decrypt #endif .align 4 _vpaes_decrypt: @ _vpaes_decrypt_core uses r7-r11. stmdb sp!, {r7,r8,r9,r10,r11,lr} @ _vpaes_decrypt_core uses q4-q5 (d8-d11), which are callee-saved. vstmdb sp!, {d8,d9,d10,d11} vld1.64 {q0}, [r0] bl _vpaes_preheat bl _vpaes_decrypt_core vst1.64 {q0}, [r1] vldmia sp!, {d8,d9,d10,d11} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@ @@ @@ AES key schedule @@ @@ @@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @ This function diverges from both x86_64 and armv7 in which constants are @ pinned. x86_64 has a common preheat function for all operations. aarch64 @ separates them because it has enough registers to pin nearly all constants. @ armv7 does not have enough registers, but needing explicit loads and stores @ also complicates using x86_64's register allocation directly. @ @ We pin some constants for convenience and leave q14 and q15 free to load @ others on demand. @ @ Key schedule constants @ .align 4 _vpaes_key_consts: Lk_dksd:@ decryption key schedule: invskew x*D .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E Lk_dksb:@ decryption key schedule: invskew x*B .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 Lk_dkse:@ decryption key schedule: invskew x*E + 0x63 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 Lk_dks9:@ decryption key schedule: invskew x*9 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE Lk_rcon:@ rcon .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 Lk_opt:@ output transform .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 Lk_deskew:@ deskew tables: inverts the sbox's "skew" .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 #ifdef __thumb2__ .thumb_func _vpaes_key_preheat #endif .align 4 _vpaes_key_preheat: adr r11, Lk_rcon vmov.i8 q12, #0x5b @ Lk_s63 adr r10, Lk_inv @ Must be aligned to 8 mod 16. vmov.i8 q9, #0x0f @ Lk_s0F vld1.64 {q10,q11}, [r10] @ Lk_inv vld1.64 {q8}, [r11] @ Lk_rcon bx lr #ifdef __thumb2__ .thumb_func _vpaes_schedule_core #endif .align 4 _vpaes_schedule_core: @ We only need to save lr, but ARM requires an 8-byte stack alignment, @ so save an extra register. stmdb sp!, {r3,lr} bl _vpaes_key_preheat @ load the tables adr r11, Lk_ipt @ Must be aligned to 8 mod 16. vld1.64 {q0}, [r0]! @ vmovdqu (%rdi), %xmm0 # load key (unaligned) @ input transform @ Use q4 here rather than q3 so .Lschedule_am_decrypting does not @ overlap table and destination. vmov q4, q0 @ vmovdqa %xmm0, %xmm3 bl _vpaes_schedule_transform adr r10, Lk_sr @ Must be aligned to 8 mod 16. vmov q7, q0 @ vmovdqa %xmm0, %xmm7 add r8, r8, r10 tst r3, r3 bne Lschedule_am_decrypting @ encrypting, output zeroth round key after transform vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) b Lschedule_go Lschedule_am_decrypting: @ decrypting, output zeroth round key after shiftrows vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 vtbl.8 d6, {q4}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 vtbl.8 d7, {q4}, d3 vst1.64 {q3}, [r2] @ vmovdqu %xmm3, (%rdx) eor r8, r8, #0x30 @ xor $0x30, %r8 Lschedule_go: cmp r1, #192 @ cmp $192, %esi bhi Lschedule_256 beq Lschedule_192 @ 128: fall though @@ @@ .schedule_128 @@ @@ 128-bit specific part of key schedule. @@ @@ This schedule is really simple, because all its parts @@ are accomplished by the subroutines. @@ Lschedule_128: mov r0, #10 @ mov $10, %esi Loop_schedule_128: bl _vpaes_schedule_round subs r0, r0, #1 @ dec %esi beq Lschedule_mangle_last bl _vpaes_schedule_mangle @ write output b Loop_schedule_128 @@ @@ .aes_schedule_192 @@ @@ 192-bit specific part of key schedule. @@ @@ The main body of this schedule is the same as the 128-bit @@ schedule, but with more smearing. The long, high side is @@ stored in q7 as before, and the short, low side is in @@ the high bits of q6. @@ @@ This schedule is somewhat nastier, however, because each @@ round produces 192 bits of key material, or 1.5 round keys. @@ Therefore, on each cycle we do 2 rounds and produce 3 round @@ keys. @@ .align 4 Lschedule_192: sub r0, r0, #8 vld1.64 {q0}, [r0] @ vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) bl _vpaes_schedule_transform @ input transform vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save short part vmov.i8 d12, #0 @ vpxor %xmm4, %xmm4, %xmm4 # clear 4 @ vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros mov r0, #4 @ mov $4, %esi Loop_schedule_192: bl _vpaes_schedule_round vext.8 q0, q6, q0, #8 @ vpalignr $8,%xmm6,%xmm0,%xmm0 bl _vpaes_schedule_mangle @ save key n bl _vpaes_schedule_192_smear bl _vpaes_schedule_mangle @ save key n+1 bl _vpaes_schedule_round subs r0, r0, #1 @ dec %esi beq Lschedule_mangle_last bl _vpaes_schedule_mangle @ save key n+2 bl _vpaes_schedule_192_smear b Loop_schedule_192 @@ @@ .aes_schedule_256 @@ @@ 256-bit specific part of key schedule. @@ @@ The structure here is very similar to the 128-bit @@ schedule, but with an additional "low side" in @@ q6. The low side's rounds are the same as the @@ high side's, except no rcon and no rotation. @@ .align 4 Lschedule_256: vld1.64 {q0}, [r0] @ vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) bl _vpaes_schedule_transform @ input transform mov r0, #7 @ mov $7, %esi Loop_schedule_256: bl _vpaes_schedule_mangle @ output low result vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 @ high round bl _vpaes_schedule_round subs r0, r0, #1 @ dec %esi beq Lschedule_mangle_last bl _vpaes_schedule_mangle @ low round. swap xmm7 and xmm6 vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 vmov.i8 q4, #0 vmov q5, q7 @ vmovdqa %xmm7, %xmm5 vmov q7, q6 @ vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round vmov q7, q5 @ vmovdqa %xmm5, %xmm7 b Loop_schedule_256 @@ @@ .aes_schedule_mangle_last @@ @@ Mangler for last round of key schedule @@ Mangles q0 @@ when encrypting, outputs out(q0) ^ 63 @@ when decrypting, outputs unskew(q0) @@ @@ Always called right before return... jumps to cleanup and exits @@ .align 4 Lschedule_mangle_last: @ schedule last round key from xmm0 adr r11, Lk_deskew @ lea Lk_deskew(%rip),%r11 # prepare to deskew tst r3, r3 bne Lschedule_mangle_last_dec @ encrypting vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10),%xmm1 adr r11, Lk_opt @ lea Lk_opt(%rip), %r11 # prepare to output transform add r2, r2, #32 @ add $32, %rdx vmov q2, q0 vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 # output permute vtbl.8 d1, {q2}, d3 Lschedule_mangle_last_dec: sub r2, r2, #16 @ add $-16, %rdx veor q0, q0, q12 @ vpxor Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform @ output transform vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) # save last key @ cleanup veor q0, q0, q0 @ vpxor %xmm0, %xmm0, %xmm0 veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1 veor q2, q2, q2 @ vpxor %xmm2, %xmm2, %xmm2 veor q3, q3, q3 @ vpxor %xmm3, %xmm3, %xmm3 veor q4, q4, q4 @ vpxor %xmm4, %xmm4, %xmm4 veor q5, q5, q5 @ vpxor %xmm5, %xmm5, %xmm5 veor q6, q6, q6 @ vpxor %xmm6, %xmm6, %xmm6 veor q7, q7, q7 @ vpxor %xmm7, %xmm7, %xmm7 ldmia sp!, {r3,pc} @ return @@ @@ .aes_schedule_192_smear @@ @@ Smear the short, low side in the 192-bit key schedule. @@ @@ Inputs: @@ q7: high side, b a x y @@ q6: low side, d c 0 0 @@ @@ Outputs: @@ q6: b+c+d b+c 0 0 @@ q0: b+c+d b+c b a @@ #ifdef __thumb2__ .thumb_func _vpaes_schedule_192_smear #endif .align 4 _vpaes_schedule_192_smear: vmov.i8 q1, #0 vdup.32 q0, d15[1] vshl.i64 q1, q6, #32 @ vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 vmov d0, d15 @ vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a veor q6, q6, q1 @ vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1 veor q6, q6, q0 @ vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a vmov q0, q6 @ vmovdqa %xmm6, %xmm0 vmov d12, d2 @ vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros bx lr @@ @@ .aes_schedule_round @@ @@ Runs one main round of the key schedule on q0, q7 @@ @@ Specifically, runs subbytes on the high dword of q0 @@ then rotates it by one byte and xors into the low dword of @@ q7. @@ @@ Adds rcon from low byte of q8, then rotates q8 for @@ next rcon. @@ @@ Smears the dwords of q7 by xoring the low into the @@ second low, result into third, result into highest. @@ @@ Returns results in q7 = q0. @@ Clobbers q1-q4, r11. @@ #ifdef __thumb2__ .thumb_func _vpaes_schedule_round #endif .align 4 _vpaes_schedule_round: @ extract rcon from xmm8 vmov.i8 q4, #0 @ vpxor %xmm4, %xmm4, %xmm4 vext.8 q1, q8, q4, #15 @ vpalignr $15, %xmm8, %xmm4, %xmm1 vext.8 q8, q8, q8, #15 @ vpalignr $15, %xmm8, %xmm8, %xmm8 veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 @ rotate vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 vext.8 q0, q0, q0, #1 @ vpalignr $1, %xmm0, %xmm0, %xmm0 @ fall through... @ low round: same as high round, but no rotation and no rcon. _vpaes_schedule_low_round: @ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12. @ We pin other values in _vpaes_key_preheat, so load them now. adr r11, Lk_sb1 vld1.64 {q14,q15}, [r11] @ smear xmm7 vext.8 q1, q4, q7, #12 @ vpslldq $4, %xmm7, %xmm1 veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 vext.8 q4, q4, q7, #8 @ vpslldq $8, %xmm7, %xmm4 @ subbytes vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i veor q7, q7, q4 @ vpxor %xmm4, %xmm7, %xmm7 vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k vtbl.8 d5, {q11}, d3 veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i vtbl.8 d7, {q10}, d1 veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j vtbl.8 d9, {q10}, d3 veor q7, q7, q12 @ vpxor Lk_s63(%rip), %xmm7, %xmm7 vtbl.8 d6, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak vtbl.8 d7, {q10}, d7 veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k vtbl.8 d4, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak vtbl.8 d5, {q10}, d9 veor q3, q3, q1 @ vpxor %xmm1, %xmm3, %xmm3 # 2 = io veor q2, q2, q0 @ vpxor %xmm0, %xmm2, %xmm2 # 3 = jo vtbl.8 d8, {q15}, d6 @ vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou vtbl.8 d9, {q15}, d7 vtbl.8 d2, {q14}, d4 @ vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t vtbl.8 d3, {q14}, d5 veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output @ add in smeared stuff veor q0, q1, q7 @ vpxor %xmm7, %xmm1, %xmm0 veor q7, q1, q7 @ vmovdqa %xmm0, %xmm7 bx lr @@ @@ .aes_schedule_transform @@ @@ Linear-transform q0 according to tables at [r11] @@ @@ Requires that q9 = 0x0F0F... as in preheat @@ Output in q0 @@ Clobbers q1, q2, q14, q15 @@ #ifdef __thumb2__ .thumb_func _vpaes_schedule_transform #endif .align 4 _vpaes_schedule_transform: vld1.64 {q14,q15}, [r11] @ vmovdqa (%r11), %xmm2 # lo @ vmovdqa 16(%r11), %xmm1 # hi vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 vtbl.8 d4, {q14}, d2 @ vpshufb %xmm1, %xmm2, %xmm2 vtbl.8 d5, {q14}, d3 vtbl.8 d0, {q15}, d0 @ vpshufb %xmm0, %xmm1, %xmm0 vtbl.8 d1, {q15}, d1 veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 bx lr @@ @@ .aes_schedule_mangle @@ @@ Mangles q0 from (basis-transformed) standard version @@ to our version. @@ @@ On encrypt, @@ xor with 0x63 @@ multiply by circulant 0,1,1,1 @@ apply shiftrows transform @@ @@ On decrypt, @@ xor with 0x63 @@ multiply by "inverse mixcolumns" circulant E,B,D,9 @@ deskew @@ apply shiftrows transform @@ @@ @@ Writes out to [r2], and increments or decrements it @@ Keeps track of round number mod 4 in r8 @@ Preserves q0 @@ Clobbers q1-q5 @@ #ifdef __thumb2__ .thumb_func _vpaes_schedule_mangle #endif .align 4 _vpaes_schedule_mangle: tst r3, r3 vmov q4, q0 @ vmovdqa %xmm0, %xmm4 # save xmm0 for later adr r11, Lk_mc_forward @ Must be aligned to 8 mod 16. vld1.64 {q5}, [r11] @ vmovdqa Lk_mc_forward(%rip),%xmm5 bne Lschedule_mangle_dec @ encrypting @ Write to q2 so we do not overlap table and destination below. veor q2, q0, q12 @ vpxor Lk_s63(%rip), %xmm0, %xmm4 add r2, r2, #16 @ add $16, %rdx vtbl.8 d8, {q2}, d10 @ vpshufb %xmm5, %xmm4, %xmm4 vtbl.8 d9, {q2}, d11 vtbl.8 d2, {q4}, d10 @ vpshufb %xmm5, %xmm4, %xmm1 vtbl.8 d3, {q4}, d11 vtbl.8 d6, {q1}, d10 @ vpshufb %xmm5, %xmm1, %xmm3 vtbl.8 d7, {q1}, d11 veor q4, q4, q1 @ vpxor %xmm1, %xmm4, %xmm4 vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 veor q3, q3, q4 @ vpxor %xmm4, %xmm3, %xmm3 b Lschedule_mangle_both .align 4 Lschedule_mangle_dec: @ inverse mix columns adr r11, Lk_dksd @ lea Lk_dksd(%rip),%r11 vshr.u8 q1, q4, #4 @ vpsrlb $4, %xmm4, %xmm1 # 1 = hi vand q4, q4, q9 @ vpand %xmm9, %xmm4, %xmm4 # 4 = lo vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x00(%r11), %xmm2 @ vmovdqa 0x10(%r11), %xmm3 vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 vtbl.8 d5, {q14}, d9 vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 vtbl.8 d7, {q15}, d3 @ Load .Lk_dksb ahead of time. vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x20(%r11), %xmm2 @ vmovdqa 0x30(%r11), %xmm3 @ Write to q13 so we do not overlap table and destination. veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 vtbl.8 d7, {q13}, d11 vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 vtbl.8 d5, {q14}, d9 veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 vtbl.8 d7, {q15}, d3 @ Load .Lk_dkse ahead of time. vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x40(%r11), %xmm2 @ vmovdqa 0x50(%r11), %xmm3 @ Write to q13 so we do not overlap table and destination. veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 vtbl.8 d7, {q13}, d11 vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 vtbl.8 d5, {q14}, d9 veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 vtbl.8 d7, {q15}, d3 @ Load .Lk_dkse ahead of time. vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x60(%r11), %xmm2 @ vmovdqa 0x70(%r11), %xmm4 @ Write to q13 so we do not overlap table and destination. veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 vtbl.8 d5, {q14}, d9 vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 vtbl.8 d7, {q13}, d11 vtbl.8 d8, {q15}, d2 @ vpshufb %xmm1, %xmm4, %xmm4 vtbl.8 d9, {q15}, d3 vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 veor q3, q4, q2 @ vpxor %xmm2, %xmm4, %xmm3 sub r2, r2, #16 @ add $-16, %rdx Lschedule_mangle_both: @ Write to q2 so table and destination do not overlap. vtbl.8 d4, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 vtbl.8 d5, {q3}, d3 add r8, r8, #64-16 @ add $-16, %r8 and r8, r8, #~(1<<6) @ and $0x30, %r8 vst1.64 {q2}, [r2] @ vmovdqu %xmm3, (%rdx) bx lr .globl _vpaes_set_encrypt_key .private_extern _vpaes_set_encrypt_key #ifdef __thumb2__ .thumb_func _vpaes_set_encrypt_key #endif .align 4 _vpaes_set_encrypt_key: stmdb sp!, {r7,r8,r9,r10,r11, lr} vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} lsr r9, r1, #5 @ shr $5,%eax add r9, r9, #5 @ $5,%eax str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; mov r3, #0 @ mov $0,%ecx mov r8, #0x30 @ mov $0x30,%r8d bl _vpaes_schedule_core eor r0, r0, r0 vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return .globl _vpaes_set_decrypt_key .private_extern _vpaes_set_decrypt_key #ifdef __thumb2__ .thumb_func _vpaes_set_decrypt_key #endif .align 4 _vpaes_set_decrypt_key: stmdb sp!, {r7,r8,r9,r10,r11, lr} vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} lsr r9, r1, #5 @ shr $5,%eax add r9, r9, #5 @ $5,%eax str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; lsl r9, r9, #4 @ shl $4,%eax add r2, r2, #16 @ lea 16(%rdx,%rax),%rdx add r2, r2, r9 mov r3, #1 @ mov $1,%ecx lsr r8, r1, #1 @ shr $1,%r8d and r8, r8, #32 @ and $32,%r8d eor r8, r8, #32 @ xor $32,%r8d # nbits==192?0:32 bl _vpaes_schedule_core vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return @ Additional constants for converting to bsaes. .align 4 _vpaes_convert_consts: @ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear @ transform in the AES S-box. 0x63 is incorporated into the low half of the @ table. This was computed with the following script: @ @ def u64s_to_u128(x, y): @ return x | (y << 64) @ def u128_to_u64s(w): @ return w & ((1<<64)-1), w >> 64 @ def get_byte(w, i): @ return (w >> (i*8)) & 0xff @ def apply_table(table, b): @ lo = b & 0xf @ hi = b >> 4 @ return get_byte(table[0], lo) ^ get_byte(table[1], hi) @ def opt(b): @ table = [ @ u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808), @ u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0), @ ] @ return apply_table(table, b) @ def rot_byte(b, n): @ return 0xff & ((b << n) | (b >> (8-n))) @ def skew(x): @ return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^ @ rot_byte(x, 4)) @ table = [0, 0] @ for i in range(16): @ table[0] |= (skew(opt(i)) ^ 0x63) << (i*8) @ table[1] |= skew(opt(i<<4)) << (i*8) @ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[0])) @ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[1])) Lk_opt_then_skew: .quad 0x9cb8436798bc4763, 0x6440bb9f6044bf9b .quad 0x1f30062936192f00, 0xb49bad829db284ab @ .Lk_decrypt_transform is a permutation which performs an 8-bit left-rotation @ followed by a byte-swap on each 32-bit word of a vector. E.g., 0x11223344 @ becomes 0x22334411 and then 0x11443322. Lk_decrypt_transform: .quad 0x0704050603000102, 0x0f0c0d0e0b08090a @ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes); .globl _vpaes_encrypt_key_to_bsaes .private_extern _vpaes_encrypt_key_to_bsaes #ifdef __thumb2__ .thumb_func _vpaes_encrypt_key_to_bsaes #endif .align 4 _vpaes_encrypt_key_to_bsaes: stmdb sp!, {r11, lr} @ See _vpaes_schedule_core for the key schedule logic. In particular, @ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper), @ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last @ contain the transformations not in the bsaes representation. This @ function inverts those transforms. @ @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key @ representation, which does not match the other aes_nohw_* @ implementations. The ARM aes_nohw_* stores each 32-bit word @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the @ cost of extra REV and VREV32 operations in little-endian ARM. vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform adr r2, Lk_mc_forward @ Must be aligned to 8 mod 16. add r3, r2, 0x90 @ Lk_sr+0x10-Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression) vld1.64 {q12}, [r2] vmov.i8 q10, #0x5b @ Lk_s63 from vpaes-x86_64 adr r11, Lk_opt @ Must be aligned to 8 mod 16. vmov.i8 q11, #0x63 @ LK_s63 without Lk_ipt applied @ vpaes stores one fewer round count than bsaes, but the number of keys @ is the same. ldr r2, [r1,#240] add r2, r2, #1 str r2, [r0,#240] @ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt). @ Invert this with .Lk_opt. vld1.64 {q0}, [r1]! bl _vpaes_schedule_transform vrev32.8 q0, q0 vst1.64 {q0}, [r0]! @ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied, @ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63, @ multiplies by the circulant 0,1,1,1, then applies ShiftRows. Loop_enc_key_to_bsaes: vld1.64 {q0}, [r1]! @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle @ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30. @ We use r3 rather than r8 to avoid a callee-saved register. vld1.64 {q1}, [r3] vtbl.8 d4, {q0}, d2 vtbl.8 d5, {q0}, d3 add r3, r3, #16 and r3, r3, #~(1<<6) vmov q0, q2 @ Handle the last key differently. subs r2, r2, #1 beq Loop_enc_key_to_bsaes_last @ Multiply by the circulant. This is its own inverse. vtbl.8 d2, {q0}, d24 vtbl.8 d3, {q0}, d25 vmov q0, q1 vtbl.8 d4, {q1}, d24 vtbl.8 d5, {q1}, d25 veor q0, q0, q2 vtbl.8 d2, {q2}, d24 vtbl.8 d3, {q2}, d25 veor q0, q0, q1 @ XOR and finish. veor q0, q0, q10 bl _vpaes_schedule_transform vrev32.8 q0, q0 vst1.64 {q0}, [r0]! b Loop_enc_key_to_bsaes Loop_enc_key_to_bsaes_last: @ The final key does not have a basis transform (note @ .Lschedule_mangle_last inverts the original transform). It only XORs @ 0x63 and applies ShiftRows. The latter was already inverted in the @ loop. Note that, because we act on the original representation, we use @ q11, not q10. veor q0, q0, q11 vrev32.8 q0, q0 vst1.64 {q0}, [r0] @ Wipe registers which contained key material. veor q0, q0, q0 veor q1, q1, q1 veor q2, q2, q2 ldmia sp!, {r11, pc} @ return @ void vpaes_decrypt_key_to_bsaes(AES_KEY *vpaes, const AES_KEY *bsaes); .globl _vpaes_decrypt_key_to_bsaes .private_extern _vpaes_decrypt_key_to_bsaes #ifdef __thumb2__ .thumb_func _vpaes_decrypt_key_to_bsaes #endif .align 4 _vpaes_decrypt_key_to_bsaes: stmdb sp!, {r11, lr} @ See _vpaes_schedule_core for the key schedule logic. Note vpaes @ computes the decryption key schedule in reverse. Additionally, @ aes-x86_64.pl shares some transformations, so we must only partially @ invert vpaes's transformations. In general, vpaes computes in a @ different basis (.Lk_ipt and .Lk_opt) and applies the inverses of @ MixColumns, ShiftRows, and the affine part of the AES S-box (which is @ split into a linear skew and XOR of 0x63). We undo all but MixColumns. @ @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key @ representation, which does not match the other aes_nohw_* @ implementations. The ARM aes_nohw_* stores each 32-bit word @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the @ cost of extra REV and VREV32 operations in little-endian ARM. adr r2, Lk_decrypt_transform adr r3, Lk_sr+0x30 adr r11, Lk_opt_then_skew @ Input to _vpaes_schedule_transform. vld1.64 {q12}, [r2] @ Reuse q12 from encryption. vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform @ vpaes stores one fewer round count than bsaes, but the number of keys @ is the same. ldr r2, [r1,#240] add r2, r2, #1 str r2, [r0,#240] @ Undo the basis change and reapply the S-box affine transform. See @ .Lschedule_mangle_last. vld1.64 {q0}, [r1]! bl _vpaes_schedule_transform vrev32.8 q0, q0 vst1.64 {q0}, [r0]! @ See _vpaes_schedule_mangle for the transform on the middle keys. Note @ it simultaneously inverts MixColumns and the S-box affine transform. @ See .Lk_dksd through .Lk_dks9. Loop_dec_key_to_bsaes: vld1.64 {q0}, [r1]! @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note going @ forwards cancels inverting for which direction we cycle r3. We use r3 @ rather than r8 to avoid a callee-saved register. vld1.64 {q1}, [r3] vtbl.8 d4, {q0}, d2 vtbl.8 d5, {q0}, d3 add r3, r3, #64-16 and r3, r3, #~(1<<6) vmov q0, q2 @ Handle the last key differently. subs r2, r2, #1 beq Loop_dec_key_to_bsaes_last @ Undo the basis change and reapply the S-box affine transform. bl _vpaes_schedule_transform @ Rotate each word by 8 bytes (cycle the rows) and then byte-swap. We @ combine the two operations in .Lk_decrypt_transform. @ @ TODO(davidben): Where does the rotation come from? vtbl.8 d2, {q0}, d24 vtbl.8 d3, {q0}, d25 vst1.64 {q1}, [r0]! b Loop_dec_key_to_bsaes Loop_dec_key_to_bsaes_last: @ The final key only inverts ShiftRows (already done in the loop). See @ .Lschedule_am_decrypting. Its basis is not transformed. vrev32.8 q0, q0 vst1.64 {q0}, [r0]! @ Wipe registers which contained key material. veor q0, q0, q0 veor q1, q1, q1 veor q2, q2, q2 ldmia sp!, {r11, pc} @ return .globl _vpaes_ctr32_encrypt_blocks .private_extern _vpaes_ctr32_encrypt_blocks #ifdef __thumb2__ .thumb_func _vpaes_ctr32_encrypt_blocks #endif .align 4 _vpaes_ctr32_encrypt_blocks: mov ip, sp stmdb sp!, {r7,r8,r9,r10,r11, lr} @ This function uses q4-q7 (d8-d15), which are callee-saved. vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} cmp r2, #0 @ r8 is passed on the stack. ldr r8, [ip] beq Lctr32_done @ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3. mov r9, r3 mov r3, r2 mov r2, r9 @ Load the IV and counter portion. ldr r7, [r8, #12] vld1.8 {q7}, [r8] bl _vpaes_preheat rev r7, r7 @ The counter is big-endian. Lctr32_loop: vmov q0, q7 vld1.8 {q6}, [r0]! @ Load input ahead of time bl _vpaes_encrypt_core veor q0, q0, q6 @ XOR input and result vst1.8 {q0}, [r1]! subs r3, r3, #1 @ Update the counter. add r7, r7, #1 rev r9, r7 vmov.32 d15[1], r9 bne Lctr32_loop Lctr32_done: vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
21,755
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/ios-arm/crypto/fipsmodule/armv4-mont.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__) #include <openssl/arm_arch.h> @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. .text #if defined(__thumb2__) .syntax unified .thumb #else .code 32 #endif .globl _bn_mul_mont_nohw .private_extern _bn_mul_mont_nohw #ifdef __thumb2__ .thumb_func _bn_mul_mont_nohw #endif .align 5 _bn_mul_mont_nohw: ldr ip,[sp,#4] @ load num stmdb sp!,{r0,r2} @ sp points at argument block cmp ip,#2 mov r0,ip @ load num #ifdef __thumb2__ ittt lt #endif movlt r0,#0 addlt sp,sp,#2*4 blt Labrt stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers mov r0,r0,lsl#2 @ rescale r0 for byte count sub sp,sp,r0 @ alloca(4*num) sub sp,sp,#4 @ +extra dword sub r0,r0,#4 @ "num=num-1" add r4,r2,r0 @ &bp[num-1] add r0,sp,r0 @ r0 to point at &tp[num-1] ldr r8,[r0,#14*4] @ &n0 ldr r2,[r2] @ bp[0] ldr r5,[r1],#4 @ ap[0],ap++ ldr r6,[r3],#4 @ np[0],np++ ldr r8,[r8] @ *n0 str r4,[r0,#15*4] @ save &bp[num] umull r10,r11,r5,r2 @ ap[0]*bp[0] str r8,[r0,#14*4] @ save n0 value mul r8,r10,r8 @ "tp[0]"*n0 mov r12,#0 umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" mov r4,sp L1st: ldr r5,[r1],#4 @ ap[j],ap++ mov r10,r11 ldr r6,[r3],#4 @ np[j],np++ mov r11,#0 umlal r10,r11,r5,r2 @ ap[j]*bp[0] mov r14,#0 umlal r12,r14,r6,r8 @ np[j]*n0 adds r12,r12,r10 str r12,[r4],#4 @ tp[j-1]=,tp++ adc r12,r14,#0 cmp r4,r0 bne L1st adds r12,r12,r11 ldr r4,[r0,#13*4] @ restore bp mov r14,#0 ldr r8,[r0,#14*4] @ restore n0 adc r14,r14,#0 str r12,[r0] @ tp[num-1]= mov r7,sp str r14,[r0,#4] @ tp[num]= Louter: sub r7,r0,r7 @ "original" r0-1 value sub r1,r1,r7 @ "rewind" ap to &ap[1] ldr r2,[r4,#4]! @ *(++bp) sub r3,r3,r7 @ "rewind" np to &np[1] ldr r5,[r1,#-4] @ ap[0] ldr r10,[sp] @ tp[0] ldr r6,[r3,#-4] @ np[0] ldr r7,[sp,#4] @ tp[1] mov r11,#0 umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] str r4,[r0,#13*4] @ save bp mul r8,r10,r8 mov r12,#0 umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]" mov r4,sp Linner: ldr r5,[r1],#4 @ ap[j],ap++ adds r10,r11,r7 @ +=tp[j] ldr r6,[r3],#4 @ np[j],np++ mov r11,#0 umlal r10,r11,r5,r2 @ ap[j]*bp[i] mov r14,#0 umlal r12,r14,r6,r8 @ np[j]*n0 adc r11,r11,#0 ldr r7,[r4,#8] @ tp[j+1] adds r12,r12,r10 str r12,[r4],#4 @ tp[j-1]=,tp++ adc r12,r14,#0 cmp r4,r0 bne Linner adds r12,r12,r11 mov r14,#0 ldr r4,[r0,#13*4] @ restore bp adc r14,r14,#0 ldr r8,[r0,#14*4] @ restore n0 adds r12,r12,r7 ldr r7,[r0,#15*4] @ restore &bp[num] adc r14,r14,#0 str r12,[r0] @ tp[num-1]= str r14,[r0,#4] @ tp[num]= cmp r4,r7 #ifdef __thumb2__ itt ne #endif movne r7,sp bne Louter ldr r2,[r0,#12*4] @ pull rp mov r5,sp add r0,r0,#4 @ r0 to point at &tp[num] sub r5,r0,r5 @ "original" num value mov r4,sp @ "rewind" r4 mov r1,r4 @ "borrow" r1 sub r3,r3,r5 @ "rewind" r3 to &np[0] subs r7,r7,r7 @ "clear" carry flag Lsub: ldr r7,[r4],#4 ldr r6,[r3],#4 sbcs r7,r7,r6 @ tp[j]-np[j] str r7,[r2],#4 @ rp[j]= teq r4,r0 @ preserve carry bne Lsub sbcs r14,r14,#0 @ upmost carry mov r4,sp @ "rewind" r4 sub r2,r2,r5 @ "rewind" r2 Lcopy: ldr r7,[r4] @ conditional copy ldr r5,[r2] str sp,[r4],#4 @ zap tp #ifdef __thumb2__ it cc #endif movcc r5,r7 str r5,[r2],#4 teq r4,r0 @ preserve carry bne Lcopy mov sp,r0 add sp,sp,#4 @ skip over tp[num+1] ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers add sp,sp,#2*4 @ skip over {r0,r2} mov r0,#1 Labrt: #if __ARM_ARCH>=5 bx lr @ bx lr #else tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif #if __ARM_MAX_ARCH__>=7 .globl _bn_mul8x_mont_neon .private_extern _bn_mul8x_mont_neon #ifdef __thumb2__ .thumb_func _bn_mul8x_mont_neon #endif .align 5 _bn_mul8x_mont_neon: mov ip,sp stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so ldmia ip,{r4,r5} @ load rest of parameter block mov ip,sp cmp r5,#8 bhi LNEON_8n @ special case for r5==8, everything is in register bank... vld1.32 {d28[0]}, [r2,:32]! veor d8,d8,d8 sub r7,sp,r5,lsl#4 vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-( and r7,r7,#-64 vld1.32 {d30[0]}, [r4,:32] mov sp,r7 @ alloca vzip.16 d28,d8 vmull.u32 q6,d28,d0[0] vmull.u32 q7,d28,d0[1] vmull.u32 q8,d28,d1[0] vshl.i64 d29,d13,#16 vmull.u32 q9,d28,d1[1] vadd.u64 d29,d29,d12 veor d8,d8,d8 vmul.u32 d29,d29,d30 vmull.u32 q10,d28,d2[0] vld1.32 {d4,d5,d6,d7}, [r3]! vmull.u32 q11,d28,d2[1] vmull.u32 q12,d28,d3[0] vzip.16 d29,d8 vmull.u32 q13,d28,d3[1] vmlal.u32 q6,d29,d4[0] sub r9,r5,#1 vmlal.u32 q7,d29,d4[1] vmlal.u32 q8,d29,d5[0] vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vmov q5,q6 vmlal.u32 q11,d29,d6[1] vmov q6,q7 vmlal.u32 q12,d29,d7[0] vmov q7,q8 vmlal.u32 q13,d29,d7[1] vmov q8,q9 vmov q9,q10 vshr.u64 d10,d10,#16 vmov q10,q11 vmov q11,q12 vadd.u64 d10,d10,d11 vmov q12,q13 veor q13,q13 vshr.u64 d10,d10,#16 b LNEON_outer8 .align 4 LNEON_outer8: vld1.32 {d28[0]}, [r2,:32]! veor d8,d8,d8 vzip.16 d28,d8 vadd.u64 d12,d12,d10 vmlal.u32 q6,d28,d0[0] vmlal.u32 q7,d28,d0[1] vmlal.u32 q8,d28,d1[0] vshl.i64 d29,d13,#16 vmlal.u32 q9,d28,d1[1] vadd.u64 d29,d29,d12 veor d8,d8,d8 subs r9,r9,#1 vmul.u32 d29,d29,d30 vmlal.u32 q10,d28,d2[0] vmlal.u32 q11,d28,d2[1] vmlal.u32 q12,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q13,d28,d3[1] vmlal.u32 q6,d29,d4[0] vmlal.u32 q7,d29,d4[1] vmlal.u32 q8,d29,d5[0] vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vmov q5,q6 vmlal.u32 q11,d29,d6[1] vmov q6,q7 vmlal.u32 q12,d29,d7[0] vmov q7,q8 vmlal.u32 q13,d29,d7[1] vmov q8,q9 vmov q9,q10 vshr.u64 d10,d10,#16 vmov q10,q11 vmov q11,q12 vadd.u64 d10,d10,d11 vmov q12,q13 veor q13,q13 vshr.u64 d10,d10,#16 bne LNEON_outer8 vadd.u64 d12,d12,d10 mov r7,sp vshr.u64 d10,d12,#16 mov r8,r5 vadd.u64 d13,d13,d10 add r6,sp,#96 vshr.u64 d10,d13,#16 vzip.16 d12,d13 b LNEON_tail_entry .align 4 LNEON_8n: veor q6,q6,q6 sub r7,sp,#128 veor q7,q7,q7 sub r7,r7,r5,lsl#4 veor q8,q8,q8 and r7,r7,#-64 veor q9,q9,q9 mov sp,r7 @ alloca veor q10,q10,q10 add r7,r7,#256 veor q11,q11,q11 sub r8,r5,#8 veor q12,q12,q12 veor q13,q13,q13 LNEON_8n_init: vst1.64 {q6,q7},[r7,:256]! subs r8,r8,#8 vst1.64 {q8,q9},[r7,:256]! vst1.64 {q10,q11},[r7,:256]! vst1.64 {q12,q13},[r7,:256]! bne LNEON_8n_init add r6,sp,#256 vld1.32 {d0,d1,d2,d3},[r1]! add r10,sp,#8 vld1.32 {d30[0]},[r4,:32] mov r9,r5 b LNEON_8n_outer .align 4 LNEON_8n_outer: vld1.32 {d28[0]},[r2,:32]! @ *b++ veor d8,d8,d8 vzip.16 d28,d8 add r7,sp,#128 vld1.32 {d4,d5,d6,d7},[r3]! vmlal.u32 q6,d28,d0[0] vmlal.u32 q7,d28,d0[1] veor d8,d8,d8 vmlal.u32 q8,d28,d1[0] vshl.i64 d29,d13,#16 vmlal.u32 q9,d28,d1[1] vadd.u64 d29,d29,d12 vmlal.u32 q10,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q11,d28,d2[1] vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0] vmlal.u32 q12,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q13,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q6,d29,d4[0] veor d10,d10,d10 vmlal.u32 q7,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q8,d29,d5[0] vshr.u64 d12,d12,#16 vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vadd.u64 d12,d12,d13 vmlal.u32 q11,d29,d6[1] vshr.u64 d12,d12,#16 vmlal.u32 q12,d29,d7[0] vmlal.u32 q13,d29,d7[1] vadd.u64 d14,d14,d12 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0] vmlal.u32 q7,d28,d0[0] vld1.64 {q6},[r6,:128]! vmlal.u32 q8,d28,d0[1] veor d8,d8,d8 vmlal.u32 q9,d28,d1[0] vshl.i64 d29,d15,#16 vmlal.u32 q10,d28,d1[1] vadd.u64 d29,d29,d14 vmlal.u32 q11,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q12,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1] vmlal.u32 q13,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q6,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q7,d29,d4[0] veor d10,d10,d10 vmlal.u32 q8,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q9,d29,d5[0] vshr.u64 d14,d14,#16 vmlal.u32 q10,d29,d5[1] vmlal.u32 q11,d29,d6[0] vadd.u64 d14,d14,d15 vmlal.u32 q12,d29,d6[1] vshr.u64 d14,d14,#16 vmlal.u32 q13,d29,d7[0] vmlal.u32 q6,d29,d7[1] vadd.u64 d16,d16,d14 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1] vmlal.u32 q8,d28,d0[0] vld1.64 {q7},[r6,:128]! vmlal.u32 q9,d28,d0[1] veor d8,d8,d8 vmlal.u32 q10,d28,d1[0] vshl.i64 d29,d17,#16 vmlal.u32 q11,d28,d1[1] vadd.u64 d29,d29,d16 vmlal.u32 q12,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q13,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2] vmlal.u32 q6,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q7,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q8,d29,d4[0] veor d10,d10,d10 vmlal.u32 q9,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q10,d29,d5[0] vshr.u64 d16,d16,#16 vmlal.u32 q11,d29,d5[1] vmlal.u32 q12,d29,d6[0] vadd.u64 d16,d16,d17 vmlal.u32 q13,d29,d6[1] vshr.u64 d16,d16,#16 vmlal.u32 q6,d29,d7[0] vmlal.u32 q7,d29,d7[1] vadd.u64 d18,d18,d16 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2] vmlal.u32 q9,d28,d0[0] vld1.64 {q8},[r6,:128]! vmlal.u32 q10,d28,d0[1] veor d8,d8,d8 vmlal.u32 q11,d28,d1[0] vshl.i64 d29,d19,#16 vmlal.u32 q12,d28,d1[1] vadd.u64 d29,d29,d18 vmlal.u32 q13,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q6,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3] vmlal.u32 q7,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q8,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q9,d29,d4[0] veor d10,d10,d10 vmlal.u32 q10,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q11,d29,d5[0] vshr.u64 d18,d18,#16 vmlal.u32 q12,d29,d5[1] vmlal.u32 q13,d29,d6[0] vadd.u64 d18,d18,d19 vmlal.u32 q6,d29,d6[1] vshr.u64 d18,d18,#16 vmlal.u32 q7,d29,d7[0] vmlal.u32 q8,d29,d7[1] vadd.u64 d20,d20,d18 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3] vmlal.u32 q10,d28,d0[0] vld1.64 {q9},[r6,:128]! vmlal.u32 q11,d28,d0[1] veor d8,d8,d8 vmlal.u32 q12,d28,d1[0] vshl.i64 d29,d21,#16 vmlal.u32 q13,d28,d1[1] vadd.u64 d29,d29,d20 vmlal.u32 q6,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q7,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4] vmlal.u32 q8,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q9,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q10,d29,d4[0] veor d10,d10,d10 vmlal.u32 q11,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q12,d29,d5[0] vshr.u64 d20,d20,#16 vmlal.u32 q13,d29,d5[1] vmlal.u32 q6,d29,d6[0] vadd.u64 d20,d20,d21 vmlal.u32 q7,d29,d6[1] vshr.u64 d20,d20,#16 vmlal.u32 q8,d29,d7[0] vmlal.u32 q9,d29,d7[1] vadd.u64 d22,d22,d20 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4] vmlal.u32 q11,d28,d0[0] vld1.64 {q10},[r6,:128]! vmlal.u32 q12,d28,d0[1] veor d8,d8,d8 vmlal.u32 q13,d28,d1[0] vshl.i64 d29,d23,#16 vmlal.u32 q6,d28,d1[1] vadd.u64 d29,d29,d22 vmlal.u32 q7,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q8,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5] vmlal.u32 q9,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q10,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q11,d29,d4[0] veor d10,d10,d10 vmlal.u32 q12,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q13,d29,d5[0] vshr.u64 d22,d22,#16 vmlal.u32 q6,d29,d5[1] vmlal.u32 q7,d29,d6[0] vadd.u64 d22,d22,d23 vmlal.u32 q8,d29,d6[1] vshr.u64 d22,d22,#16 vmlal.u32 q9,d29,d7[0] vmlal.u32 q10,d29,d7[1] vadd.u64 d24,d24,d22 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5] vmlal.u32 q12,d28,d0[0] vld1.64 {q11},[r6,:128]! vmlal.u32 q13,d28,d0[1] veor d8,d8,d8 vmlal.u32 q6,d28,d1[0] vshl.i64 d29,d25,#16 vmlal.u32 q7,d28,d1[1] vadd.u64 d29,d29,d24 vmlal.u32 q8,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q9,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6] vmlal.u32 q10,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q11,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q12,d29,d4[0] veor d10,d10,d10 vmlal.u32 q13,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q6,d29,d5[0] vshr.u64 d24,d24,#16 vmlal.u32 q7,d29,d5[1] vmlal.u32 q8,d29,d6[0] vadd.u64 d24,d24,d25 vmlal.u32 q9,d29,d6[1] vshr.u64 d24,d24,#16 vmlal.u32 q10,d29,d7[0] vmlal.u32 q11,d29,d7[1] vadd.u64 d26,d26,d24 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6] vmlal.u32 q13,d28,d0[0] vld1.64 {q12},[r6,:128]! vmlal.u32 q6,d28,d0[1] veor d8,d8,d8 vmlal.u32 q7,d28,d1[0] vshl.i64 d29,d27,#16 vmlal.u32 q8,d28,d1[1] vadd.u64 d29,d29,d26 vmlal.u32 q9,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q10,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7] vmlal.u32 q11,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q12,d28,d3[1] vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0] vmlal.u32 q13,d29,d4[0] vld1.32 {d0,d1,d2,d3},[r1]! vmlal.u32 q6,d29,d4[1] vmlal.u32 q7,d29,d5[0] vshr.u64 d26,d26,#16 vmlal.u32 q8,d29,d5[1] vmlal.u32 q9,d29,d6[0] vadd.u64 d26,d26,d27 vmlal.u32 q10,d29,d6[1] vshr.u64 d26,d26,#16 vmlal.u32 q11,d29,d7[0] vmlal.u32 q12,d29,d7[1] vadd.u64 d12,d12,d26 vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7] add r10,sp,#8 @ rewind sub r8,r5,#8 b LNEON_8n_inner .align 4 LNEON_8n_inner: subs r8,r8,#8 vmlal.u32 q6,d28,d0[0] vld1.64 {q13},[r6,:128] vmlal.u32 q7,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0] vmlal.u32 q8,d28,d1[0] vld1.32 {d4,d5,d6,d7},[r3]! vmlal.u32 q9,d28,d1[1] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q10,d28,d2[0] vmlal.u32 q11,d28,d2[1] vmlal.u32 q12,d28,d3[0] vmlal.u32 q13,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1] vmlal.u32 q6,d29,d4[0] vmlal.u32 q7,d29,d4[1] vmlal.u32 q8,d29,d5[0] vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vmlal.u32 q11,d29,d6[1] vmlal.u32 q12,d29,d7[0] vmlal.u32 q13,d29,d7[1] vst1.64 {q6},[r7,:128]! vmlal.u32 q7,d28,d0[0] vld1.64 {q6},[r6,:128] vmlal.u32 q8,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1] vmlal.u32 q9,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q10,d28,d1[1] vmlal.u32 q11,d28,d2[0] vmlal.u32 q12,d28,d2[1] vmlal.u32 q13,d28,d3[0] vmlal.u32 q6,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2] vmlal.u32 q7,d29,d4[0] vmlal.u32 q8,d29,d4[1] vmlal.u32 q9,d29,d5[0] vmlal.u32 q10,d29,d5[1] vmlal.u32 q11,d29,d6[0] vmlal.u32 q12,d29,d6[1] vmlal.u32 q13,d29,d7[0] vmlal.u32 q6,d29,d7[1] vst1.64 {q7},[r7,:128]! vmlal.u32 q8,d28,d0[0] vld1.64 {q7},[r6,:128] vmlal.u32 q9,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2] vmlal.u32 q10,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q11,d28,d1[1] vmlal.u32 q12,d28,d2[0] vmlal.u32 q13,d28,d2[1] vmlal.u32 q6,d28,d3[0] vmlal.u32 q7,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3] vmlal.u32 q8,d29,d4[0] vmlal.u32 q9,d29,d4[1] vmlal.u32 q10,d29,d5[0] vmlal.u32 q11,d29,d5[1] vmlal.u32 q12,d29,d6[0] vmlal.u32 q13,d29,d6[1] vmlal.u32 q6,d29,d7[0] vmlal.u32 q7,d29,d7[1] vst1.64 {q8},[r7,:128]! vmlal.u32 q9,d28,d0[0] vld1.64 {q8},[r6,:128] vmlal.u32 q10,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3] vmlal.u32 q11,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q12,d28,d1[1] vmlal.u32 q13,d28,d2[0] vmlal.u32 q6,d28,d2[1] vmlal.u32 q7,d28,d3[0] vmlal.u32 q8,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4] vmlal.u32 q9,d29,d4[0] vmlal.u32 q10,d29,d4[1] vmlal.u32 q11,d29,d5[0] vmlal.u32 q12,d29,d5[1] vmlal.u32 q13,d29,d6[0] vmlal.u32 q6,d29,d6[1] vmlal.u32 q7,d29,d7[0] vmlal.u32 q8,d29,d7[1] vst1.64 {q9},[r7,:128]! vmlal.u32 q10,d28,d0[0] vld1.64 {q9},[r6,:128] vmlal.u32 q11,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4] vmlal.u32 q12,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q13,d28,d1[1] vmlal.u32 q6,d28,d2[0] vmlal.u32 q7,d28,d2[1] vmlal.u32 q8,d28,d3[0] vmlal.u32 q9,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5] vmlal.u32 q10,d29,d4[0] vmlal.u32 q11,d29,d4[1] vmlal.u32 q12,d29,d5[0] vmlal.u32 q13,d29,d5[1] vmlal.u32 q6,d29,d6[0] vmlal.u32 q7,d29,d6[1] vmlal.u32 q8,d29,d7[0] vmlal.u32 q9,d29,d7[1] vst1.64 {q10},[r7,:128]! vmlal.u32 q11,d28,d0[0] vld1.64 {q10},[r6,:128] vmlal.u32 q12,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5] vmlal.u32 q13,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q6,d28,d1[1] vmlal.u32 q7,d28,d2[0] vmlal.u32 q8,d28,d2[1] vmlal.u32 q9,d28,d3[0] vmlal.u32 q10,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6] vmlal.u32 q11,d29,d4[0] vmlal.u32 q12,d29,d4[1] vmlal.u32 q13,d29,d5[0] vmlal.u32 q6,d29,d5[1] vmlal.u32 q7,d29,d6[0] vmlal.u32 q8,d29,d6[1] vmlal.u32 q9,d29,d7[0] vmlal.u32 q10,d29,d7[1] vst1.64 {q11},[r7,:128]! vmlal.u32 q12,d28,d0[0] vld1.64 {q11},[r6,:128] vmlal.u32 q13,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6] vmlal.u32 q6,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q7,d28,d1[1] vmlal.u32 q8,d28,d2[0] vmlal.u32 q9,d28,d2[1] vmlal.u32 q10,d28,d3[0] vmlal.u32 q11,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7] vmlal.u32 q12,d29,d4[0] vmlal.u32 q13,d29,d4[1] vmlal.u32 q6,d29,d5[0] vmlal.u32 q7,d29,d5[1] vmlal.u32 q8,d29,d6[0] vmlal.u32 q9,d29,d6[1] vmlal.u32 q10,d29,d7[0] vmlal.u32 q11,d29,d7[1] vst1.64 {q12},[r7,:128]! vmlal.u32 q13,d28,d0[0] vld1.64 {q12},[r6,:128] vmlal.u32 q6,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7] vmlal.u32 q7,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q8,d28,d1[1] vmlal.u32 q9,d28,d2[0] vmlal.u32 q10,d28,d2[1] vmlal.u32 q11,d28,d3[0] vmlal.u32 q12,d28,d3[1] it eq subeq r1,r1,r5,lsl#2 @ rewind vmlal.u32 q13,d29,d4[0] vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0] vmlal.u32 q6,d29,d4[1] vld1.32 {d0,d1,d2,d3},[r1]! vmlal.u32 q7,d29,d5[0] add r10,sp,#8 @ rewind vmlal.u32 q8,d29,d5[1] vmlal.u32 q9,d29,d6[0] vmlal.u32 q10,d29,d6[1] vmlal.u32 q11,d29,d7[0] vst1.64 {q13},[r7,:128]! vmlal.u32 q12,d29,d7[1] bne LNEON_8n_inner add r6,sp,#128 vst1.64 {q6,q7},[r7,:256]! veor q2,q2,q2 @ d4-d5 vst1.64 {q8,q9},[r7,:256]! veor q3,q3,q3 @ d6-d7 vst1.64 {q10,q11},[r7,:256]! vst1.64 {q12},[r7,:128] subs r9,r9,#8 vld1.64 {q6,q7},[r6,:256]! vld1.64 {q8,q9},[r6,:256]! vld1.64 {q10,q11},[r6,:256]! vld1.64 {q12,q13},[r6,:256]! itt ne subne r3,r3,r5,lsl#2 @ rewind bne LNEON_8n_outer add r7,sp,#128 vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame vshr.u64 d10,d12,#16 vst1.64 {q2,q3},[sp,:256]! vadd.u64 d13,d13,d10 vst1.64 {q2,q3}, [sp,:256]! vshr.u64 d10,d13,#16 vst1.64 {q2,q3}, [sp,:256]! vzip.16 d12,d13 mov r8,r5 b LNEON_tail_entry .align 4 LNEON_tail: vadd.u64 d12,d12,d10 vshr.u64 d10,d12,#16 vld1.64 {q8,q9}, [r6, :256]! vadd.u64 d13,d13,d10 vld1.64 {q10,q11}, [r6, :256]! vshr.u64 d10,d13,#16 vld1.64 {q12,q13}, [r6, :256]! vzip.16 d12,d13 LNEON_tail_entry: vadd.u64 d14,d14,d10 vst1.32 {d12[0]}, [r7, :32]! vshr.u64 d10,d14,#16 vadd.u64 d15,d15,d10 vshr.u64 d10,d15,#16 vzip.16 d14,d15 vadd.u64 d16,d16,d10 vst1.32 {d14[0]}, [r7, :32]! vshr.u64 d10,d16,#16 vadd.u64 d17,d17,d10 vshr.u64 d10,d17,#16 vzip.16 d16,d17 vadd.u64 d18,d18,d10 vst1.32 {d16[0]}, [r7, :32]! vshr.u64 d10,d18,#16 vadd.u64 d19,d19,d10 vshr.u64 d10,d19,#16 vzip.16 d18,d19 vadd.u64 d20,d20,d10 vst1.32 {d18[0]}, [r7, :32]! vshr.u64 d10,d20,#16 vadd.u64 d21,d21,d10 vshr.u64 d10,d21,#16 vzip.16 d20,d21 vadd.u64 d22,d22,d10 vst1.32 {d20[0]}, [r7, :32]! vshr.u64 d10,d22,#16 vadd.u64 d23,d23,d10 vshr.u64 d10,d23,#16 vzip.16 d22,d23 vadd.u64 d24,d24,d10 vst1.32 {d22[0]}, [r7, :32]! vshr.u64 d10,d24,#16 vadd.u64 d25,d25,d10 vshr.u64 d10,d25,#16 vzip.16 d24,d25 vadd.u64 d26,d26,d10 vst1.32 {d24[0]}, [r7, :32]! vshr.u64 d10,d26,#16 vadd.u64 d27,d27,d10 vshr.u64 d10,d27,#16 vzip.16 d26,d27 vld1.64 {q6,q7}, [r6, :256]! subs r8,r8,#8 vst1.32 {d26[0]}, [r7, :32]! bne LNEON_tail vst1.32 {d10[0]}, [r7, :32] @ top-most bit sub r3,r3,r5,lsl#2 @ rewind r3 subs r1,sp,#0 @ clear carry flag add r2,sp,r5,lsl#2 LNEON_sub: ldmia r1!, {r4,r5,r6,r7} ldmia r3!, {r8,r9,r10,r11} sbcs r8, r4,r8 sbcs r9, r5,r9 sbcs r10,r6,r10 sbcs r11,r7,r11 teq r1,r2 @ preserves carry stmia r0!, {r8,r9,r10,r11} bne LNEON_sub ldr r10, [r1] @ load top-most bit mov r11,sp veor q0,q0,q0 sub r11,r2,r11 @ this is num*4 veor q1,q1,q1 mov r1,sp sub r0,r0,r11 @ rewind r0 mov r3,r2 @ second 3/4th of frame sbcs r10,r10,#0 @ result is carry flag LNEON_copy_n_zap: ldmia r1!, {r4,r5,r6,r7} ldmia r0, {r8,r9,r10,r11} it cc movcc r8, r4 vst1.64 {q0,q1}, [r3,:256]! @ wipe itt cc movcc r9, r5 movcc r10,r6 vst1.64 {q0,q1}, [r3,:256]! @ wipe it cc movcc r11,r7 ldmia r1, {r4,r5,r6,r7} stmia r0!, {r8,r9,r10,r11} sub r1,r1,#16 ldmia r0, {r8,r9,r10,r11} it cc movcc r8, r4 vst1.64 {q0,q1}, [r1,:256]! @ wipe itt cc movcc r9, r5 movcc r10,r6 vst1.64 {q0,q1}, [r3,:256]! @ wipe it cc movcc r11,r7 teq r1,r2 @ preserves carry stmia r0!, {r8,r9,r10,r11} bne LNEON_copy_n_zap mov sp,ip vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11} bx lr @ bx lr #endif .byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
64,769
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/ios-arm/crypto/fipsmodule/sha256-armv4.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__) @ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. @ @ Licensed under the OpenSSL license (the "License"). You may not use @ this file except in compliance with the License. You can obtain a copy @ in the file LICENSE in the source distribution or at @ https://www.openssl.org/source/license.html @ ==================================================================== @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further @ details see http://www.openssl.org/~appro/cryptogams/. @ @ Permission to use under GPL terms is granted. @ ==================================================================== @ SHA256 block procedure for ARMv4. May 2007. @ Performance is ~2x better than gcc 3.4 generated code and in "abso- @ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per @ byte [on single-issue Xscale PXA250 core]. @ July 2010. @ @ Rescheduling for dual-issue pipeline resulted in 22% improvement on @ Cortex A8 core and ~20 cycles per processed byte. @ February 2011. @ @ Profiler-assisted and platform-specific optimization resulted in 16% @ improvement on Cortex A8 core and ~15.4 cycles per processed byte. @ September 2013. @ @ Add NEON implementation. On Cortex A8 it was measured to process one @ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon @ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only @ code (meaning that latter performs sub-optimally, nothing was done @ about it). @ May 2014. @ @ Add ARMv8 code path performing at 2.0 cpb on Apple A7. #ifndef __KERNEL__ # include <openssl/arm_arch.h> #else # define __ARM_ARCH __LINUX_ARM_ARCH__ # define __ARM_MAX_ARCH__ 7 #endif @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors. It does have ARMv8-only code, but those @ instructions are manually-encoded. (See unsha256.) .text #if defined(__thumb2__) .syntax unified .thumb #else .code 32 #endif .align 5 K256: .word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .word 0 @ terminator .align 5 .globl _sha256_block_data_order_nohw .private_extern _sha256_block_data_order_nohw #ifdef __thumb2__ .thumb_func _sha256_block_data_order_nohw #endif _sha256_block_data_order_nohw: add r2,r1,r2,lsl#6 @ len to point at the end of inp stmdb sp!,{r0,r1,r2,r4-r11,lr} ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} adr r14,K256 sub sp,sp,#16*4 @ alloca(X[16]) Loop: # if __ARM_ARCH>=7 ldr r2,[r1],#4 # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ magic eor r12,r12,r12 #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 0 # if 0==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 add r4,r4,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r8,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 0 add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 0==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r8,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#0*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 0==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 0<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#2*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#15*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 1 # if 1==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 add r11,r11,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r7,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 1 add r11,r11,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 1==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r7,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#1*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 1==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 1<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#3*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#0*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 2 # if 2==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 add r10,r10,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r6,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 2 add r10,r10,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 2==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r6,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#2*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 2==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 2<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#4*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#1*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 3 # if 3==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 add r9,r9,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r5,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 3 add r9,r9,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 3==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r5,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#3*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 3==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 3<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#5*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#2*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 4 # if 4==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 add r8,r8,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r4,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 4 add r8,r8,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 4==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r4,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#4*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 4==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 4<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#6*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#3*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 5 # if 5==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r11,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 5==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r11,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#5*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 5==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 5<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#7*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#4*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 6 # if 6==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 add r6,r6,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r10,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 6 add r6,r6,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 6==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r10,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#6*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 6==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 6<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#8*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#5*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 7 # if 7==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 add r5,r5,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r9,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 7==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r9,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#7*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 7==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 7<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#9*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#6*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 8 # if 8==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 add r4,r4,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r8,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 8 add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 8==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r8,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#8*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 8==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 8<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#10*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#7*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 9 # if 9==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 add r11,r11,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r7,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 9 add r11,r11,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 9==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r7,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#9*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 9==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 9<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#11*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#8*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 10 # if 10==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 add r10,r10,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r6,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 10 add r10,r10,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 10==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r6,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#10*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 10==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 10<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#12*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#9*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 11 # if 11==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 add r9,r9,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r5,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 11 add r9,r9,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 11==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r5,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#11*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 11==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 11<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#13*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#10*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 12 # if 12==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 add r8,r8,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r4,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 12 add r8,r8,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 12==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r4,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#12*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 12==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 12<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#14*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#11*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 13 # if 13==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r11,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 13 add r7,r7,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 13==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r11,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#13*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 13==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 13<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#15*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#12*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 14 # if 14==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 add r6,r6,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r10,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 14 add r6,r6,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 14==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r10,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#14*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 14==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 14<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#0*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#13*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 15 # if 15==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 add r5,r5,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r9,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 15 add r5,r5,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 15==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r9,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#15*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 15==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 15<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#1*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#14*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) Lrounds_16_xx: @ ldr r2,[sp,#1*4] @ 16 @ ldr r1,[sp,#14*4] mov r0,r2,ror#7 add r4,r4,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#0*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#9*4] add r12,r12,r0 eor r0,r8,r8,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r8,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#0*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 16==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 16<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#2*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#15*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#2*4] @ 17 @ ldr r1,[sp,#15*4] mov r0,r2,ror#7 add r11,r11,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#1*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#10*4] add r3,r3,r0 eor r0,r7,r7,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r7,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#1*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 17==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 17<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#3*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#0*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#3*4] @ 18 @ ldr r1,[sp,#0*4] mov r0,r2,ror#7 add r10,r10,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#2*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#11*4] add r12,r12,r0 eor r0,r6,r6,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r6,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#2*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 18==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 18<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#4*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#1*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#4*4] @ 19 @ ldr r1,[sp,#1*4] mov r0,r2,ror#7 add r9,r9,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#3*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#12*4] add r3,r3,r0 eor r0,r5,r5,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r5,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#3*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 19==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 19<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#5*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#2*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#5*4] @ 20 @ ldr r1,[sp,#2*4] mov r0,r2,ror#7 add r8,r8,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#4*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#13*4] add r12,r12,r0 eor r0,r4,r4,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r4,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#4*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 20==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 20<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#6*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#3*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#6*4] @ 21 @ ldr r1,[sp,#3*4] mov r0,r2,ror#7 add r7,r7,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#5*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#14*4] add r3,r3,r0 eor r0,r11,r11,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r11,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#5*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 21==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 21<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#7*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#4*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#7*4] @ 22 @ ldr r1,[sp,#4*4] mov r0,r2,ror#7 add r6,r6,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#6*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#15*4] add r12,r12,r0 eor r0,r10,r10,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r10,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#6*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 22==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 22<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#8*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#5*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#8*4] @ 23 @ ldr r1,[sp,#5*4] mov r0,r2,ror#7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#7*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#0*4] add r3,r3,r0 eor r0,r9,r9,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r9,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#7*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 23==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 23<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#9*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#6*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#9*4] @ 24 @ ldr r1,[sp,#6*4] mov r0,r2,ror#7 add r4,r4,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#8*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#1*4] add r12,r12,r0 eor r0,r8,r8,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r8,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#8*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 24==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 24<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#10*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#7*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#10*4] @ 25 @ ldr r1,[sp,#7*4] mov r0,r2,ror#7 add r11,r11,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#9*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#2*4] add r3,r3,r0 eor r0,r7,r7,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r7,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#9*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 25==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 25<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#11*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#8*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#11*4] @ 26 @ ldr r1,[sp,#8*4] mov r0,r2,ror#7 add r10,r10,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#10*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#3*4] add r12,r12,r0 eor r0,r6,r6,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r6,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#10*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 26==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 26<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#12*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#9*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#12*4] @ 27 @ ldr r1,[sp,#9*4] mov r0,r2,ror#7 add r9,r9,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#11*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#4*4] add r3,r3,r0 eor r0,r5,r5,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r5,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#11*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 27==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 27<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#13*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#10*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#13*4] @ 28 @ ldr r1,[sp,#10*4] mov r0,r2,ror#7 add r8,r8,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#12*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#5*4] add r12,r12,r0 eor r0,r4,r4,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r4,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#12*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 28==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 28<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#14*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#11*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#14*4] @ 29 @ ldr r1,[sp,#11*4] mov r0,r2,ror#7 add r7,r7,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#13*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#6*4] add r3,r3,r0 eor r0,r11,r11,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r11,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#13*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 29==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 29<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#15*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#12*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#15*4] @ 30 @ ldr r1,[sp,#12*4] mov r0,r2,ror#7 add r6,r6,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#14*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#7*4] add r12,r12,r0 eor r0,r10,r10,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r10,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#14*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 30==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 30<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#0*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#13*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#0*4] @ 31 @ ldr r1,[sp,#13*4] mov r0,r2,ror#7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#15*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#8*4] add r3,r3,r0 eor r0,r9,r9,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r9,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#15*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 31==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 31<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#1*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#14*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 ite eq @ Thumb2 thing, sanity check in ARM #endif ldreq r3,[sp,#16*4] @ pull ctx bne Lrounds_16_xx add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldr r0,[r3,#0] ldr r2,[r3,#4] ldr r12,[r3,#8] add r4,r4,r0 ldr r0,[r3,#12] add r5,r5,r2 ldr r2,[r3,#16] add r6,r6,r12 ldr r12,[r3,#20] add r7,r7,r0 ldr r0,[r3,#24] add r8,r8,r2 ldr r2,[r3,#28] add r9,r9,r12 ldr r1,[sp,#17*4] @ pull inp ldr r12,[sp,#18*4] @ pull inp+len add r10,r10,r0 add r11,r11,r2 stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} cmp r1,r12 sub r14,r14,#256 @ rewind Ktbl bne Loop add sp,sp,#19*4 @ destroy frame #if __ARM_ARCH>=5 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} #else ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif #if __ARM_MAX_ARCH__>=7 LK256_shortcut_neon: @ PC is 8 bytes ahead in Arm mode and 4 bytes ahead in Thumb mode. #if defined(__thumb2__) .word K256-(LK256_add_neon+4) #else .word K256-(LK256_add_neon+8) #endif .globl _sha256_block_data_order_neon .private_extern _sha256_block_data_order_neon #ifdef __thumb2__ .thumb_func _sha256_block_data_order_neon #endif .align 5 .skip 16 _sha256_block_data_order_neon: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} sub r11,sp,#16*4+16 @ K256 is just at the boundary of being easily referenced by an ADR from @ this function. In Arm mode, when building with __ARM_ARCH=6, it does @ not fit. By moving code around, we could make it fit, but this is too @ fragile. For simplicity, just load the offset from @ .LK256_shortcut_neon. @ @ TODO(davidben): adrl would avoid a load, but clang-assembler does not @ support it. We might be able to emulate it with a macro, but Android's @ did not work when I tried it. @ https://android.googlesource.com/platform/ndk/+/refs/heads/master/docs/ClangMigration.md#arm ldr r14,LK256_shortcut_neon LK256_add_neon: add r14,pc,r14 bic r11,r11,#15 @ align for 128-bit stores mov r12,sp mov sp,r11 @ alloca add r2,r1,r2,lsl#6 @ len to point at the end of inp vld1.8 {q0},[r1]! vld1.8 {q1},[r1]! vld1.8 {q2},[r1]! vld1.8 {q3},[r1]! vld1.32 {q8},[r14,:128]! vld1.32 {q9},[r14,:128]! vld1.32 {q10},[r14,:128]! vld1.32 {q11},[r14,:128]! vrev32.8 q0,q0 @ yes, even on str r0,[sp,#64] vrev32.8 q1,q1 @ big-endian str r1,[sp,#68] mov r1,sp vrev32.8 q2,q2 str r2,[sp,#72] vrev32.8 q3,q3 str r12,[sp,#76] @ save original sp vadd.i32 q8,q8,q0 vadd.i32 q9,q9,q1 vst1.32 {q8},[r1,:128]! vadd.i32 q10,q10,q2 vst1.32 {q9},[r1,:128]! vadd.i32 q11,q11,q3 vst1.32 {q10},[r1,:128]! vst1.32 {q11},[r1,:128]! ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} sub r1,r1,#64 ldr r2,[sp,#0] eor r12,r12,r12 eor r3,r5,r6 b L_00_48 .align 4 L_00_48: vext.8 q8,q0,q1,#4 add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 vext.8 q9,q2,q3,#4 add r4,r4,r12 and r2,r2,r8 eor r12,r0,r8,ror#19 vshr.u32 q10,q8,#7 eor r0,r4,r4,ror#11 eor r2,r2,r10 vadd.i32 q0,q0,q9 add r11,r11,r12,ror#6 eor r12,r4,r5 vshr.u32 q9,q8,#3 eor r0,r0,r4,ror#20 add r11,r11,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#4] and r3,r3,r12 vshr.u32 q11,q8,#18 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 veor q9,q9,q10 add r10,r10,r2 vsli.32 q11,q8,#14 eor r2,r8,r9 eor r0,r7,r7,ror#5 vshr.u32 d24,d7,#17 add r11,r11,r3 and r2,r2,r7 veor q9,q9,q11 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 vsli.32 d24,d7,#15 eor r2,r2,r9 add r10,r10,r3,ror#6 vshr.u32 d25,d7,#10 eor r3,r11,r4 eor r0,r0,r11,ror#20 vadd.i32 q0,q0,q9 add r10,r10,r2 ldr r2,[sp,#8] veor d25,d25,d24 and r12,r12,r3 add r6,r6,r10 vshr.u32 d24,d7,#19 add r10,r10,r0,ror#2 eor r12,r12,r4 vsli.32 d24,d7,#13 add r9,r9,r2 eor r2,r7,r8 veor d25,d25,d24 eor r0,r6,r6,ror#5 add r10,r10,r12 vadd.i32 d0,d0,d25 and r2,r2,r6 eor r12,r0,r6,ror#19 vshr.u32 d24,d0,#17 eor r0,r10,r10,ror#11 eor r2,r2,r8 vsli.32 d24,d0,#15 add r9,r9,r12,ror#6 eor r12,r10,r11 vshr.u32 d25,d0,#10 eor r0,r0,r10,ror#20 add r9,r9,r2 veor d25,d25,d24 ldr r2,[sp,#12] and r3,r3,r12 vshr.u32 d24,d0,#19 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 vld1.32 {q8},[r14,:128]! add r8,r8,r2 vsli.32 d24,d0,#13 eor r2,r6,r7 eor r0,r5,r5,ror#5 veor d25,d25,d24 add r9,r9,r3 and r2,r2,r5 vadd.i32 d1,d1,d25 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 vadd.i32 q8,q8,q0 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#16] and r12,r12,r3 add r4,r4,r8 vst1.32 {q8},[r1,:128]! add r8,r8,r0,ror#2 eor r12,r12,r10 vext.8 q8,q1,q2,#4 add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 vext.8 q9,q3,q0,#4 add r8,r8,r12 and r2,r2,r4 eor r12,r0,r4,ror#19 vshr.u32 q10,q8,#7 eor r0,r8,r8,ror#11 eor r2,r2,r6 vadd.i32 q1,q1,q9 add r7,r7,r12,ror#6 eor r12,r8,r9 vshr.u32 q9,q8,#3 eor r0,r0,r8,ror#20 add r7,r7,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#20] and r3,r3,r12 vshr.u32 q11,q8,#18 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 veor q9,q9,q10 add r6,r6,r2 vsli.32 q11,q8,#14 eor r2,r4,r5 eor r0,r11,r11,ror#5 vshr.u32 d24,d1,#17 add r7,r7,r3 and r2,r2,r11 veor q9,q9,q11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 vsli.32 d24,d1,#15 eor r2,r2,r5 add r6,r6,r3,ror#6 vshr.u32 d25,d1,#10 eor r3,r7,r8 eor r0,r0,r7,ror#20 vadd.i32 q1,q1,q9 add r6,r6,r2 ldr r2,[sp,#24] veor d25,d25,d24 and r12,r12,r3 add r10,r10,r6 vshr.u32 d24,d1,#19 add r6,r6,r0,ror#2 eor r12,r12,r8 vsli.32 d24,d1,#13 add r5,r5,r2 eor r2,r11,r4 veor d25,d25,d24 eor r0,r10,r10,ror#5 add r6,r6,r12 vadd.i32 d2,d2,d25 and r2,r2,r10 eor r12,r0,r10,ror#19 vshr.u32 d24,d2,#17 eor r0,r6,r6,ror#11 eor r2,r2,r4 vsli.32 d24,d2,#15 add r5,r5,r12,ror#6 eor r12,r6,r7 vshr.u32 d25,d2,#10 eor r0,r0,r6,ror#20 add r5,r5,r2 veor d25,d25,d24 ldr r2,[sp,#28] and r3,r3,r12 vshr.u32 d24,d2,#19 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 vld1.32 {q8},[r14,:128]! add r4,r4,r2 vsli.32 d24,d2,#13 eor r2,r10,r11 eor r0,r9,r9,ror#5 veor d25,d25,d24 add r5,r5,r3 and r2,r2,r9 vadd.i32 d3,d3,d25 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 vadd.i32 q8,q8,q1 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#32] and r12,r12,r3 add r8,r8,r4 vst1.32 {q8},[r1,:128]! add r4,r4,r0,ror#2 eor r12,r12,r6 vext.8 q8,q2,q3,#4 add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 vext.8 q9,q0,q1,#4 add r4,r4,r12 and r2,r2,r8 eor r12,r0,r8,ror#19 vshr.u32 q10,q8,#7 eor r0,r4,r4,ror#11 eor r2,r2,r10 vadd.i32 q2,q2,q9 add r11,r11,r12,ror#6 eor r12,r4,r5 vshr.u32 q9,q8,#3 eor r0,r0,r4,ror#20 add r11,r11,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#36] and r3,r3,r12 vshr.u32 q11,q8,#18 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 veor q9,q9,q10 add r10,r10,r2 vsli.32 q11,q8,#14 eor r2,r8,r9 eor r0,r7,r7,ror#5 vshr.u32 d24,d3,#17 add r11,r11,r3 and r2,r2,r7 veor q9,q9,q11 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 vsli.32 d24,d3,#15 eor r2,r2,r9 add r10,r10,r3,ror#6 vshr.u32 d25,d3,#10 eor r3,r11,r4 eor r0,r0,r11,ror#20 vadd.i32 q2,q2,q9 add r10,r10,r2 ldr r2,[sp,#40] veor d25,d25,d24 and r12,r12,r3 add r6,r6,r10 vshr.u32 d24,d3,#19 add r10,r10,r0,ror#2 eor r12,r12,r4 vsli.32 d24,d3,#13 add r9,r9,r2 eor r2,r7,r8 veor d25,d25,d24 eor r0,r6,r6,ror#5 add r10,r10,r12 vadd.i32 d4,d4,d25 and r2,r2,r6 eor r12,r0,r6,ror#19 vshr.u32 d24,d4,#17 eor r0,r10,r10,ror#11 eor r2,r2,r8 vsli.32 d24,d4,#15 add r9,r9,r12,ror#6 eor r12,r10,r11 vshr.u32 d25,d4,#10 eor r0,r0,r10,ror#20 add r9,r9,r2 veor d25,d25,d24 ldr r2,[sp,#44] and r3,r3,r12 vshr.u32 d24,d4,#19 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 vld1.32 {q8},[r14,:128]! add r8,r8,r2 vsli.32 d24,d4,#13 eor r2,r6,r7 eor r0,r5,r5,ror#5 veor d25,d25,d24 add r9,r9,r3 and r2,r2,r5 vadd.i32 d5,d5,d25 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 vadd.i32 q8,q8,q2 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#48] and r12,r12,r3 add r4,r4,r8 vst1.32 {q8},[r1,:128]! add r8,r8,r0,ror#2 eor r12,r12,r10 vext.8 q8,q3,q0,#4 add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 vext.8 q9,q1,q2,#4 add r8,r8,r12 and r2,r2,r4 eor r12,r0,r4,ror#19 vshr.u32 q10,q8,#7 eor r0,r8,r8,ror#11 eor r2,r2,r6 vadd.i32 q3,q3,q9 add r7,r7,r12,ror#6 eor r12,r8,r9 vshr.u32 q9,q8,#3 eor r0,r0,r8,ror#20 add r7,r7,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#52] and r3,r3,r12 vshr.u32 q11,q8,#18 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 veor q9,q9,q10 add r6,r6,r2 vsli.32 q11,q8,#14 eor r2,r4,r5 eor r0,r11,r11,ror#5 vshr.u32 d24,d5,#17 add r7,r7,r3 and r2,r2,r11 veor q9,q9,q11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 vsli.32 d24,d5,#15 eor r2,r2,r5 add r6,r6,r3,ror#6 vshr.u32 d25,d5,#10 eor r3,r7,r8 eor r0,r0,r7,ror#20 vadd.i32 q3,q3,q9 add r6,r6,r2 ldr r2,[sp,#56] veor d25,d25,d24 and r12,r12,r3 add r10,r10,r6 vshr.u32 d24,d5,#19 add r6,r6,r0,ror#2 eor r12,r12,r8 vsli.32 d24,d5,#13 add r5,r5,r2 eor r2,r11,r4 veor d25,d25,d24 eor r0,r10,r10,ror#5 add r6,r6,r12 vadd.i32 d6,d6,d25 and r2,r2,r10 eor r12,r0,r10,ror#19 vshr.u32 d24,d6,#17 eor r0,r6,r6,ror#11 eor r2,r2,r4 vsli.32 d24,d6,#15 add r5,r5,r12,ror#6 eor r12,r6,r7 vshr.u32 d25,d6,#10 eor r0,r0,r6,ror#20 add r5,r5,r2 veor d25,d25,d24 ldr r2,[sp,#60] and r3,r3,r12 vshr.u32 d24,d6,#19 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 vld1.32 {q8},[r14,:128]! add r4,r4,r2 vsli.32 d24,d6,#13 eor r2,r10,r11 eor r0,r9,r9,ror#5 veor d25,d25,d24 add r5,r5,r3 and r2,r2,r9 vadd.i32 d7,d7,d25 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 vadd.i32 q8,q8,q3 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[r14] and r12,r12,r3 add r8,r8,r4 vst1.32 {q8},[r1,:128]! add r4,r4,r0,ror#2 eor r12,r12,r6 teq r2,#0 @ check for K256 terminator ldr r2,[sp,#0] sub r1,r1,#64 bne L_00_48 ldr r1,[sp,#68] ldr r0,[sp,#72] sub r14,r14,#256 @ rewind r14 teq r1,r0 it eq subeq r1,r1,#64 @ avoid SEGV vld1.8 {q0},[r1]! @ load next input block vld1.8 {q1},[r1]! vld1.8 {q2},[r1]! vld1.8 {q3},[r1]! it ne strne r1,[sp,#68] mov r1,sp add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 add r4,r4,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r8 eor r12,r0,r8,ror#19 eor r0,r4,r4,ror#11 eor r2,r2,r10 vrev32.8 q0,q0 add r11,r11,r12,ror#6 eor r12,r4,r5 eor r0,r0,r4,ror#20 add r11,r11,r2 vadd.i32 q8,q8,q0 ldr r2,[sp,#4] and r3,r3,r12 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 add r10,r10,r2 eor r2,r8,r9 eor r0,r7,r7,ror#5 add r11,r11,r3 and r2,r2,r7 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 eor r2,r2,r9 add r10,r10,r3,ror#6 eor r3,r11,r4 eor r0,r0,r11,ror#20 add r10,r10,r2 ldr r2,[sp,#8] and r12,r12,r3 add r6,r6,r10 add r10,r10,r0,ror#2 eor r12,r12,r4 add r9,r9,r2 eor r2,r7,r8 eor r0,r6,r6,ror#5 add r10,r10,r12 and r2,r2,r6 eor r12,r0,r6,ror#19 eor r0,r10,r10,ror#11 eor r2,r2,r8 add r9,r9,r12,ror#6 eor r12,r10,r11 eor r0,r0,r10,ror#20 add r9,r9,r2 ldr r2,[sp,#12] and r3,r3,r12 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 add r8,r8,r2 eor r2,r6,r7 eor r0,r5,r5,ror#5 add r9,r9,r3 and r2,r2,r5 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#16] and r12,r12,r3 add r4,r4,r8 add r8,r8,r0,ror#2 eor r12,r12,r10 vst1.32 {q8},[r1,:128]! add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 add r8,r8,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r4 eor r12,r0,r4,ror#19 eor r0,r8,r8,ror#11 eor r2,r2,r6 vrev32.8 q1,q1 add r7,r7,r12,ror#6 eor r12,r8,r9 eor r0,r0,r8,ror#20 add r7,r7,r2 vadd.i32 q8,q8,q1 ldr r2,[sp,#20] and r3,r3,r12 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 add r6,r6,r2 eor r2,r4,r5 eor r0,r11,r11,ror#5 add r7,r7,r3 and r2,r2,r11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 eor r2,r2,r5 add r6,r6,r3,ror#6 eor r3,r7,r8 eor r0,r0,r7,ror#20 add r6,r6,r2 ldr r2,[sp,#24] and r12,r12,r3 add r10,r10,r6 add r6,r6,r0,ror#2 eor r12,r12,r8 add r5,r5,r2 eor r2,r11,r4 eor r0,r10,r10,ror#5 add r6,r6,r12 and r2,r2,r10 eor r12,r0,r10,ror#19 eor r0,r6,r6,ror#11 eor r2,r2,r4 add r5,r5,r12,ror#6 eor r12,r6,r7 eor r0,r0,r6,ror#20 add r5,r5,r2 ldr r2,[sp,#28] and r3,r3,r12 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 add r4,r4,r2 eor r2,r10,r11 eor r0,r9,r9,ror#5 add r5,r5,r3 and r2,r2,r9 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#32] and r12,r12,r3 add r8,r8,r4 add r4,r4,r0,ror#2 eor r12,r12,r6 vst1.32 {q8},[r1,:128]! add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 add r4,r4,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r8 eor r12,r0,r8,ror#19 eor r0,r4,r4,ror#11 eor r2,r2,r10 vrev32.8 q2,q2 add r11,r11,r12,ror#6 eor r12,r4,r5 eor r0,r0,r4,ror#20 add r11,r11,r2 vadd.i32 q8,q8,q2 ldr r2,[sp,#36] and r3,r3,r12 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 add r10,r10,r2 eor r2,r8,r9 eor r0,r7,r7,ror#5 add r11,r11,r3 and r2,r2,r7 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 eor r2,r2,r9 add r10,r10,r3,ror#6 eor r3,r11,r4 eor r0,r0,r11,ror#20 add r10,r10,r2 ldr r2,[sp,#40] and r12,r12,r3 add r6,r6,r10 add r10,r10,r0,ror#2 eor r12,r12,r4 add r9,r9,r2 eor r2,r7,r8 eor r0,r6,r6,ror#5 add r10,r10,r12 and r2,r2,r6 eor r12,r0,r6,ror#19 eor r0,r10,r10,ror#11 eor r2,r2,r8 add r9,r9,r12,ror#6 eor r12,r10,r11 eor r0,r0,r10,ror#20 add r9,r9,r2 ldr r2,[sp,#44] and r3,r3,r12 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 add r8,r8,r2 eor r2,r6,r7 eor r0,r5,r5,ror#5 add r9,r9,r3 and r2,r2,r5 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#48] and r12,r12,r3 add r4,r4,r8 add r8,r8,r0,ror#2 eor r12,r12,r10 vst1.32 {q8},[r1,:128]! add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 add r8,r8,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r4 eor r12,r0,r4,ror#19 eor r0,r8,r8,ror#11 eor r2,r2,r6 vrev32.8 q3,q3 add r7,r7,r12,ror#6 eor r12,r8,r9 eor r0,r0,r8,ror#20 add r7,r7,r2 vadd.i32 q8,q8,q3 ldr r2,[sp,#52] and r3,r3,r12 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 add r6,r6,r2 eor r2,r4,r5 eor r0,r11,r11,ror#5 add r7,r7,r3 and r2,r2,r11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 eor r2,r2,r5 add r6,r6,r3,ror#6 eor r3,r7,r8 eor r0,r0,r7,ror#20 add r6,r6,r2 ldr r2,[sp,#56] and r12,r12,r3 add r10,r10,r6 add r6,r6,r0,ror#2 eor r12,r12,r8 add r5,r5,r2 eor r2,r11,r4 eor r0,r10,r10,ror#5 add r6,r6,r12 and r2,r2,r10 eor r12,r0,r10,ror#19 eor r0,r6,r6,ror#11 eor r2,r2,r4 add r5,r5,r12,ror#6 eor r12,r6,r7 eor r0,r0,r6,ror#20 add r5,r5,r2 ldr r2,[sp,#60] and r3,r3,r12 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 add r4,r4,r2 eor r2,r10,r11 eor r0,r9,r9,ror#5 add r5,r5,r3 and r2,r2,r9 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#64] and r12,r12,r3 add r8,r8,r4 add r4,r4,r0,ror#2 eor r12,r12,r6 vst1.32 {q8},[r1,:128]! ldr r0,[r2,#0] add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldr r12,[r2,#4] ldr r3,[r2,#8] ldr r1,[r2,#12] add r4,r4,r0 @ accumulate ldr r0,[r2,#16] add r5,r5,r12 ldr r12,[r2,#20] add r6,r6,r3 ldr r3,[r2,#24] add r7,r7,r1 ldr r1,[r2,#28] add r8,r8,r0 str r4,[r2],#4 add r9,r9,r12 str r5,[r2],#4 add r10,r10,r3 str r6,[r2],#4 add r11,r11,r1 str r7,[r2],#4 stmia r2,{r8,r9,r10,r11} ittte ne movne r1,sp ldrne r2,[sp,#0] eorne r12,r12,r12 ldreq sp,[sp,#76] @ restore original sp itt ne eorne r3,r5,r6 bne L_00_48 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) # if defined(__thumb2__) # define INST(a,b,c,d) .byte c,d|0xc,a,b # else # define INST(a,b,c,d) .byte a,b,c,d # endif LK256_shortcut_hw: @ PC is 8 bytes ahead in Arm mode and 4 bytes ahead in Thumb mode. #if defined(__thumb2__) .word K256-(LK256_add_hw+4) #else .word K256-(LK256_add_hw+8) #endif .globl _sha256_block_data_order_hw .private_extern _sha256_block_data_order_hw #ifdef __thumb2__ .thumb_func _sha256_block_data_order_hw #endif .align 5 _sha256_block_data_order_hw: @ K256 is too far to reference from one ADR command in Thumb mode. In @ Arm mode, we could make it fit by aligning the ADR offset to a 64-byte @ boundary. For simplicity, just load the offset from .LK256_shortcut_hw. ldr r3,LK256_shortcut_hw LK256_add_hw: add r3,pc,r3 vld1.32 {q0,q1},[r0] add r2,r1,r2,lsl#6 @ len to point at the end of inp b Loop_v8 .align 4 Loop_v8: vld1.8 {q8,q9},[r1]! vld1.8 {q10,q11},[r1]! vld1.32 {q12},[r3]! vrev32.8 q8,q8 vrev32.8 q9,q9 vrev32.8 q10,q10 vrev32.8 q11,q11 vmov q14,q0 @ offload vmov q15,q1 teq r1,r2 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q8 INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q9 INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q10 INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q11 INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q8 INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q9 INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q10 INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q11 INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q8 INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q9 INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q10 INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q11 INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q8 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q9 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 vld1.32 {q13},[r3] vadd.i32 q12,q12,q10 sub r3,r3,#256-16 @ rewind vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 vadd.i32 q13,q13,q11 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 vadd.i32 q0,q0,q14 vadd.i32 q1,q1,q15 it ne bne Loop_v8 vst1.32 {q0,q1},[r0] bx lr @ bx lr #endif .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
42,677
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/ios-arm/crypto/fipsmodule/sha512-armv4.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__) @ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. @ @ Licensed under the OpenSSL license (the "License"). You may not use @ this file except in compliance with the License. You can obtain a copy @ in the file LICENSE in the source distribution or at @ https://www.openssl.org/source/license.html @ ==================================================================== @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further @ details see http://www.openssl.org/~appro/cryptogams/. @ @ Permission to use under GPL terms is granted. @ ==================================================================== @ SHA512 block procedure for ARMv4. September 2007. @ This code is ~4.5 (four and a half) times faster than code generated @ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue @ Xscale PXA250 core]. @ @ July 2010. @ @ Rescheduling for dual-issue pipeline resulted in 6% improvement on @ Cortex A8 core and ~40 cycles per processed byte. @ February 2011. @ @ Profiler-assisted and platform-specific optimization resulted in 7% @ improvement on Coxtex A8 core and ~38 cycles per byte. @ March 2011. @ @ Add NEON implementation. On Cortex A8 it was measured to process @ one byte in 23.3 cycles or ~60% faster than integer-only code. @ August 2012. @ @ Improve NEON performance by 12% on Snapdragon S4. In absolute @ terms it's 22.6 cycles per byte, which is disappointing result. @ Technical writers asserted that 3-way S4 pipeline can sustain @ multiple NEON instructions per cycle, but dual NEON issue could @ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html @ for further details. On side note Cortex-A15 processes one byte in @ 16 cycles. @ Byte order [in]dependence. ========================================= @ @ Originally caller was expected to maintain specific *dword* order in @ h[0-7], namely with most significant dword at *lower* address, which @ was reflected in below two parameters as 0 and 4. Now caller is @ expected to maintain native byte order for whole 64-bit values. #ifndef __KERNEL__ # include <openssl/arm_arch.h> # define VFP_ABI_PUSH vstmdb sp!,{d8-d15} # define VFP_ABI_POP vldmia sp!,{d8-d15} #else # define __ARM_MAX_ARCH__ 7 # define VFP_ABI_PUSH # define VFP_ABI_POP #endif @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. #ifdef __ARMEL__ # define LO 0 # define HI 4 # define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 #else # define HI 0 # define LO 4 # define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 #endif .text #if defined(__thumb2__) .syntax unified .thumb # define adrl adr #else .code 32 #endif .align 5 K512: WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .globl _sha512_block_data_order_nohw .private_extern _sha512_block_data_order_nohw #ifdef __thumb2__ .thumb_func _sha512_block_data_order_nohw #endif _sha512_block_data_order_nohw: add r2,r1,r2,lsl#7 @ len to point at the end of inp stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} adr r14,K512 sub sp,sp,#9*8 ldr r7,[r0,#32+LO] ldr r8,[r0,#32+HI] ldr r9, [r0,#48+LO] ldr r10, [r0,#48+HI] ldr r11, [r0,#56+LO] ldr r12, [r0,#56+HI] Loop: str r9, [sp,#48+0] str r10, [sp,#48+4] str r11, [sp,#56+0] str r12, [sp,#56+4] ldr r5,[r0,#0+LO] ldr r6,[r0,#0+HI] ldr r3,[r0,#8+LO] ldr r4,[r0,#8+HI] ldr r9, [r0,#16+LO] ldr r10, [r0,#16+HI] ldr r11, [r0,#24+LO] ldr r12, [r0,#24+HI] str r3,[sp,#8+0] str r4,[sp,#8+4] str r9, [sp,#16+0] str r10, [sp,#16+4] str r11, [sp,#24+0] str r12, [sp,#24+4] ldr r3,[r0,#40+LO] ldr r4,[r0,#40+HI] str r3,[sp,#40+0] str r4,[sp,#40+4] L00_15: #if __ARM_ARCH<7 ldrb r3,[r1,#7] ldrb r9, [r1,#6] ldrb r10, [r1,#5] ldrb r11, [r1,#4] ldrb r4,[r1,#3] ldrb r12, [r1,#2] orr r3,r3,r9,lsl#8 ldrb r9, [r1,#1] orr r3,r3,r10,lsl#16 ldrb r10, [r1],#8 orr r3,r3,r11,lsl#24 orr r4,r4,r12,lsl#8 orr r4,r4,r9,lsl#16 orr r4,r4,r10,lsl#24 #else ldr r3,[r1,#4] ldr r4,[r1],#8 #ifdef __ARMEL__ rev r3,r3 rev r4,r4 #endif #endif @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 mov r9,r7,lsr#14 str r3,[sp,#64+0] mov r10,r8,lsr#14 str r4,[sp,#64+4] eor r9,r9,r8,lsl#18 ldr r11,[sp,#56+0] @ h.lo eor r10,r10,r7,lsl#18 ldr r12,[sp,#56+4] @ h.hi eor r9,r9,r7,lsr#18 eor r10,r10,r8,lsr#18 eor r9,r9,r8,lsl#14 eor r10,r10,r7,lsl#14 eor r9,r9,r8,lsr#9 eor r10,r10,r7,lsr#9 eor r9,r9,r7,lsl#23 eor r10,r10,r8,lsl#23 @ Sigma1(e) adds r3,r3,r9 ldr r9,[sp,#40+0] @ f.lo adc r4,r4,r10 @ T += Sigma1(e) ldr r10,[sp,#40+4] @ f.hi adds r3,r3,r11 ldr r11,[sp,#48+0] @ g.lo adc r4,r4,r12 @ T += h ldr r12,[sp,#48+4] @ g.hi eor r9,r9,r11 str r7,[sp,#32+0] eor r10,r10,r12 str r8,[sp,#32+4] and r9,r9,r7 str r5,[sp,#0+0] and r10,r10,r8 str r6,[sp,#0+4] eor r9,r9,r11 ldr r11,[r14,#LO] @ K[i].lo eor r10,r10,r12 @ Ch(e,f,g) ldr r12,[r14,#HI] @ K[i].hi adds r3,r3,r9 ldr r7,[sp,#24+0] @ d.lo adc r4,r4,r10 @ T += Ch(e,f,g) ldr r8,[sp,#24+4] @ d.hi adds r3,r3,r11 and r9,r11,#0xff adc r4,r4,r12 @ T += K[i] adds r7,r7,r3 ldr r11,[sp,#8+0] @ b.lo adc r8,r8,r4 @ d += T teq r9,#148 ldr r12,[sp,#16+0] @ c.lo #if __ARM_ARCH>=7 it eq @ Thumb2 thing, sanity check in ARM #endif orreq r14,r14,#1 @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 mov r9,r5,lsr#28 mov r10,r6,lsr#28 eor r9,r9,r6,lsl#4 eor r10,r10,r5,lsl#4 eor r9,r9,r6,lsr#2 eor r10,r10,r5,lsr#2 eor r9,r9,r5,lsl#30 eor r10,r10,r6,lsl#30 eor r9,r9,r6,lsr#7 eor r10,r10,r5,lsr#7 eor r9,r9,r5,lsl#25 eor r10,r10,r6,lsl#25 @ Sigma0(a) adds r3,r3,r9 and r9,r5,r11 adc r4,r4,r10 @ T += Sigma0(a) ldr r10,[sp,#8+4] @ b.hi orr r5,r5,r11 ldr r11,[sp,#16+4] @ c.hi and r5,r5,r12 and r12,r6,r10 orr r6,r6,r10 orr r5,r5,r9 @ Maj(a,b,c).lo and r6,r6,r11 adds r5,r5,r3 orr r6,r6,r12 @ Maj(a,b,c).hi sub sp,sp,#8 adc r6,r6,r4 @ h += T tst r14,#1 add r14,r14,#8 tst r14,#1 beq L00_15 ldr r9,[sp,#184+0] ldr r10,[sp,#184+4] bic r14,r14,#1 L16_79: @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 mov r3,r9,lsr#1 ldr r11,[sp,#80+0] mov r4,r10,lsr#1 ldr r12,[sp,#80+4] eor r3,r3,r10,lsl#31 eor r4,r4,r9,lsl#31 eor r3,r3,r9,lsr#8 eor r4,r4,r10,lsr#8 eor r3,r3,r10,lsl#24 eor r4,r4,r9,lsl#24 eor r3,r3,r9,lsr#7 eor r4,r4,r10,lsr#7 eor r3,r3,r10,lsl#25 @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26 @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6 mov r9,r11,lsr#19 mov r10,r12,lsr#19 eor r9,r9,r12,lsl#13 eor r10,r10,r11,lsl#13 eor r9,r9,r12,lsr#29 eor r10,r10,r11,lsr#29 eor r9,r9,r11,lsl#3 eor r10,r10,r12,lsl#3 eor r9,r9,r11,lsr#6 eor r10,r10,r12,lsr#6 ldr r11,[sp,#120+0] eor r9,r9,r12,lsl#26 ldr r12,[sp,#120+4] adds r3,r3,r9 ldr r9,[sp,#192+0] adc r4,r4,r10 ldr r10,[sp,#192+4] adds r3,r3,r11 adc r4,r4,r12 adds r3,r3,r9 adc r4,r4,r10 @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 mov r9,r7,lsr#14 str r3,[sp,#64+0] mov r10,r8,lsr#14 str r4,[sp,#64+4] eor r9,r9,r8,lsl#18 ldr r11,[sp,#56+0] @ h.lo eor r10,r10,r7,lsl#18 ldr r12,[sp,#56+4] @ h.hi eor r9,r9,r7,lsr#18 eor r10,r10,r8,lsr#18 eor r9,r9,r8,lsl#14 eor r10,r10,r7,lsl#14 eor r9,r9,r8,lsr#9 eor r10,r10,r7,lsr#9 eor r9,r9,r7,lsl#23 eor r10,r10,r8,lsl#23 @ Sigma1(e) adds r3,r3,r9 ldr r9,[sp,#40+0] @ f.lo adc r4,r4,r10 @ T += Sigma1(e) ldr r10,[sp,#40+4] @ f.hi adds r3,r3,r11 ldr r11,[sp,#48+0] @ g.lo adc r4,r4,r12 @ T += h ldr r12,[sp,#48+4] @ g.hi eor r9,r9,r11 str r7,[sp,#32+0] eor r10,r10,r12 str r8,[sp,#32+4] and r9,r9,r7 str r5,[sp,#0+0] and r10,r10,r8 str r6,[sp,#0+4] eor r9,r9,r11 ldr r11,[r14,#LO] @ K[i].lo eor r10,r10,r12 @ Ch(e,f,g) ldr r12,[r14,#HI] @ K[i].hi adds r3,r3,r9 ldr r7,[sp,#24+0] @ d.lo adc r4,r4,r10 @ T += Ch(e,f,g) ldr r8,[sp,#24+4] @ d.hi adds r3,r3,r11 and r9,r11,#0xff adc r4,r4,r12 @ T += K[i] adds r7,r7,r3 ldr r11,[sp,#8+0] @ b.lo adc r8,r8,r4 @ d += T teq r9,#23 ldr r12,[sp,#16+0] @ c.lo #if __ARM_ARCH>=7 it eq @ Thumb2 thing, sanity check in ARM #endif orreq r14,r14,#1 @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 mov r9,r5,lsr#28 mov r10,r6,lsr#28 eor r9,r9,r6,lsl#4 eor r10,r10,r5,lsl#4 eor r9,r9,r6,lsr#2 eor r10,r10,r5,lsr#2 eor r9,r9,r5,lsl#30 eor r10,r10,r6,lsl#30 eor r9,r9,r6,lsr#7 eor r10,r10,r5,lsr#7 eor r9,r9,r5,lsl#25 eor r10,r10,r6,lsl#25 @ Sigma0(a) adds r3,r3,r9 and r9,r5,r11 adc r4,r4,r10 @ T += Sigma0(a) ldr r10,[sp,#8+4] @ b.hi orr r5,r5,r11 ldr r11,[sp,#16+4] @ c.hi and r5,r5,r12 and r12,r6,r10 orr r6,r6,r10 orr r5,r5,r9 @ Maj(a,b,c).lo and r6,r6,r11 adds r5,r5,r3 orr r6,r6,r12 @ Maj(a,b,c).hi sub sp,sp,#8 adc r6,r6,r4 @ h += T tst r14,#1 add r14,r14,#8 #if __ARM_ARCH>=7 ittt eq @ Thumb2 thing, sanity check in ARM #endif ldreq r9,[sp,#184+0] ldreq r10,[sp,#184+4] beq L16_79 bic r14,r14,#1 ldr r3,[sp,#8+0] ldr r4,[sp,#8+4] ldr r9, [r0,#0+LO] ldr r10, [r0,#0+HI] ldr r11, [r0,#8+LO] ldr r12, [r0,#8+HI] adds r9,r5,r9 str r9, [r0,#0+LO] adc r10,r6,r10 str r10, [r0,#0+HI] adds r11,r3,r11 str r11, [r0,#8+LO] adc r12,r4,r12 str r12, [r0,#8+HI] ldr r5,[sp,#16+0] ldr r6,[sp,#16+4] ldr r3,[sp,#24+0] ldr r4,[sp,#24+4] ldr r9, [r0,#16+LO] ldr r10, [r0,#16+HI] ldr r11, [r0,#24+LO] ldr r12, [r0,#24+HI] adds r9,r5,r9 str r9, [r0,#16+LO] adc r10,r6,r10 str r10, [r0,#16+HI] adds r11,r3,r11 str r11, [r0,#24+LO] adc r12,r4,r12 str r12, [r0,#24+HI] ldr r3,[sp,#40+0] ldr r4,[sp,#40+4] ldr r9, [r0,#32+LO] ldr r10, [r0,#32+HI] ldr r11, [r0,#40+LO] ldr r12, [r0,#40+HI] adds r7,r7,r9 str r7,[r0,#32+LO] adc r8,r8,r10 str r8,[r0,#32+HI] adds r11,r3,r11 str r11, [r0,#40+LO] adc r12,r4,r12 str r12, [r0,#40+HI] ldr r5,[sp,#48+0] ldr r6,[sp,#48+4] ldr r3,[sp,#56+0] ldr r4,[sp,#56+4] ldr r9, [r0,#48+LO] ldr r10, [r0,#48+HI] ldr r11, [r0,#56+LO] ldr r12, [r0,#56+HI] adds r9,r5,r9 str r9, [r0,#48+LO] adc r10,r6,r10 str r10, [r0,#48+HI] adds r11,r3,r11 str r11, [r0,#56+LO] adc r12,r4,r12 str r12, [r0,#56+HI] add sp,sp,#640 sub r14,r14,#640 teq r1,r2 bne Loop add sp,sp,#8*9 @ destroy frame #if __ARM_ARCH>=5 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} #else ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif #if __ARM_MAX_ARCH__>=7 .globl _sha512_block_data_order_neon .private_extern _sha512_block_data_order_neon #ifdef __thumb2__ .thumb_func _sha512_block_data_order_neon #endif .align 4 _sha512_block_data_order_neon: dmb @ errata #451034 on early Cortex A8 add r2,r1,r2,lsl#7 @ len to point at the end of inp adr r3,K512 VFP_ABI_PUSH vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context Loop_neon: vshr.u64 d24,d20,#14 @ 0 #if 0<16 vld1.64 {d0},[r1]! @ handles unaligned #endif vshr.u64 d25,d20,#18 #if 0>0 vadd.i64 d16,d30 @ h+=Maj from the past #endif vshr.u64 d26,d20,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 0<16 && defined(__ARMEL__) vrev64.8 d0,d0 #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d0 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 1 #if 1<16 vld1.64 {d1},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 1>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 1<16 && defined(__ARMEL__) vrev64.8 d1,d1 #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d1 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 d24,d18,#14 @ 2 #if 2<16 vld1.64 {d2},[r1]! @ handles unaligned #endif vshr.u64 d25,d18,#18 #if 2>0 vadd.i64 d22,d30 @ h+=Maj from the past #endif vshr.u64 d26,d18,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 2<16 && defined(__ARMEL__) vrev64.8 d2,d2 #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d2 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 3 #if 3<16 vld1.64 {d3},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 3>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 3<16 && defined(__ARMEL__) vrev64.8 d3,d3 #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d3 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 d24,d16,#14 @ 4 #if 4<16 vld1.64 {d4},[r1]! @ handles unaligned #endif vshr.u64 d25,d16,#18 #if 4>0 vadd.i64 d20,d30 @ h+=Maj from the past #endif vshr.u64 d26,d16,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 4<16 && defined(__ARMEL__) vrev64.8 d4,d4 #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d4 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 5 #if 5<16 vld1.64 {d5},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 5>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 5<16 && defined(__ARMEL__) vrev64.8 d5,d5 #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d5 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 d24,d22,#14 @ 6 #if 6<16 vld1.64 {d6},[r1]! @ handles unaligned #endif vshr.u64 d25,d22,#18 #if 6>0 vadd.i64 d18,d30 @ h+=Maj from the past #endif vshr.u64 d26,d22,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 6<16 && defined(__ARMEL__) vrev64.8 d6,d6 #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d6 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 7 #if 7<16 vld1.64 {d7},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 7>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 7<16 && defined(__ARMEL__) vrev64.8 d7,d7 #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d7 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 vshr.u64 d24,d20,#14 @ 8 #if 8<16 vld1.64 {d8},[r1]! @ handles unaligned #endif vshr.u64 d25,d20,#18 #if 8>0 vadd.i64 d16,d30 @ h+=Maj from the past #endif vshr.u64 d26,d20,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 8<16 && defined(__ARMEL__) vrev64.8 d8,d8 #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d8 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 9 #if 9<16 vld1.64 {d9},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 9>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 9<16 && defined(__ARMEL__) vrev64.8 d9,d9 #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d9 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 d24,d18,#14 @ 10 #if 10<16 vld1.64 {d10},[r1]! @ handles unaligned #endif vshr.u64 d25,d18,#18 #if 10>0 vadd.i64 d22,d30 @ h+=Maj from the past #endif vshr.u64 d26,d18,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 10<16 && defined(__ARMEL__) vrev64.8 d10,d10 #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d10 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 11 #if 11<16 vld1.64 {d11},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 11>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 11<16 && defined(__ARMEL__) vrev64.8 d11,d11 #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d11 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 d24,d16,#14 @ 12 #if 12<16 vld1.64 {d12},[r1]! @ handles unaligned #endif vshr.u64 d25,d16,#18 #if 12>0 vadd.i64 d20,d30 @ h+=Maj from the past #endif vshr.u64 d26,d16,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 12<16 && defined(__ARMEL__) vrev64.8 d12,d12 #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d12 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 13 #if 13<16 vld1.64 {d13},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 13>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 13<16 && defined(__ARMEL__) vrev64.8 d13,d13 #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d13 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 d24,d22,#14 @ 14 #if 14<16 vld1.64 {d14},[r1]! @ handles unaligned #endif vshr.u64 d25,d22,#18 #if 14>0 vadd.i64 d18,d30 @ h+=Maj from the past #endif vshr.u64 d26,d22,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 14<16 && defined(__ARMEL__) vrev64.8 d14,d14 #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d14 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 15 #if 15<16 vld1.64 {d15},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 15>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 15<16 && defined(__ARMEL__) vrev64.8 d15,d15 #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d15 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 mov r12,#4 L16_79_neon: subs r12,#1 vshr.u64 q12,q7,#19 vshr.u64 q13,q7,#61 vadd.i64 d16,d30 @ h+=Maj from the past vshr.u64 q15,q7,#6 vsli.64 q12,q7,#45 vext.8 q14,q0,q1,#8 @ X[i+1] vsli.64 q13,q7,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q0,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q4,q5,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d20,#14 @ from NEON_00_15 vadd.i64 q0,q14 vshr.u64 d25,d20,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d20,#41 @ from NEON_00_15 vadd.i64 q0,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 16<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d0 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 17 #if 17<16 vld1.64 {d1},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 17>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 17<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d1 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 q12,q0,#19 vshr.u64 q13,q0,#61 vadd.i64 d22,d30 @ h+=Maj from the past vshr.u64 q15,q0,#6 vsli.64 q12,q0,#45 vext.8 q14,q1,q2,#8 @ X[i+1] vsli.64 q13,q0,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q1,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q5,q6,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d18,#14 @ from NEON_00_15 vadd.i64 q1,q14 vshr.u64 d25,d18,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d18,#41 @ from NEON_00_15 vadd.i64 q1,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 18<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d2 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 19 #if 19<16 vld1.64 {d3},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 19>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 19<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d3 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 q12,q1,#19 vshr.u64 q13,q1,#61 vadd.i64 d20,d30 @ h+=Maj from the past vshr.u64 q15,q1,#6 vsli.64 q12,q1,#45 vext.8 q14,q2,q3,#8 @ X[i+1] vsli.64 q13,q1,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q2,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q6,q7,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d16,#14 @ from NEON_00_15 vadd.i64 q2,q14 vshr.u64 d25,d16,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d16,#41 @ from NEON_00_15 vadd.i64 q2,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 20<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d4 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 21 #if 21<16 vld1.64 {d5},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 21>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 21<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d5 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 q12,q2,#19 vshr.u64 q13,q2,#61 vadd.i64 d18,d30 @ h+=Maj from the past vshr.u64 q15,q2,#6 vsli.64 q12,q2,#45 vext.8 q14,q3,q4,#8 @ X[i+1] vsli.64 q13,q2,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q3,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q7,q0,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d22,#14 @ from NEON_00_15 vadd.i64 q3,q14 vshr.u64 d25,d22,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d22,#41 @ from NEON_00_15 vadd.i64 q3,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 22<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d6 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 23 #if 23<16 vld1.64 {d7},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 23>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 23<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d7 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 vshr.u64 q12,q3,#19 vshr.u64 q13,q3,#61 vadd.i64 d16,d30 @ h+=Maj from the past vshr.u64 q15,q3,#6 vsli.64 q12,q3,#45 vext.8 q14,q4,q5,#8 @ X[i+1] vsli.64 q13,q3,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q4,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q0,q1,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d20,#14 @ from NEON_00_15 vadd.i64 q4,q14 vshr.u64 d25,d20,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d20,#41 @ from NEON_00_15 vadd.i64 q4,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 24<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d8 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 25 #if 25<16 vld1.64 {d9},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 25>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 25<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d9 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 q12,q4,#19 vshr.u64 q13,q4,#61 vadd.i64 d22,d30 @ h+=Maj from the past vshr.u64 q15,q4,#6 vsli.64 q12,q4,#45 vext.8 q14,q5,q6,#8 @ X[i+1] vsli.64 q13,q4,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q5,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q1,q2,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d18,#14 @ from NEON_00_15 vadd.i64 q5,q14 vshr.u64 d25,d18,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d18,#41 @ from NEON_00_15 vadd.i64 q5,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 26<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d10 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 27 #if 27<16 vld1.64 {d11},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 27>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 27<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d11 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 q12,q5,#19 vshr.u64 q13,q5,#61 vadd.i64 d20,d30 @ h+=Maj from the past vshr.u64 q15,q5,#6 vsli.64 q12,q5,#45 vext.8 q14,q6,q7,#8 @ X[i+1] vsli.64 q13,q5,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q6,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q2,q3,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d16,#14 @ from NEON_00_15 vadd.i64 q6,q14 vshr.u64 d25,d16,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d16,#41 @ from NEON_00_15 vadd.i64 q6,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 28<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d12 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 29 #if 29<16 vld1.64 {d13},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 29>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 29<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d13 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 q12,q6,#19 vshr.u64 q13,q6,#61 vadd.i64 d18,d30 @ h+=Maj from the past vshr.u64 q15,q6,#6 vsli.64 q12,q6,#45 vext.8 q14,q7,q0,#8 @ X[i+1] vsli.64 q13,q6,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q7,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q3,q4,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d22,#14 @ from NEON_00_15 vadd.i64 q7,q14 vshr.u64 d25,d22,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d22,#41 @ from NEON_00_15 vadd.i64 q7,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 30<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d14 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 31 #if 31<16 vld1.64 {d15},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 31>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 31<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d15 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 bne L16_79_neon vadd.i64 d16,d30 @ h+=Maj from the past vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp vadd.i64 q8,q12 @ vectorized accumulate vadd.i64 q9,q13 vadd.i64 q10,q14 vadd.i64 q11,q15 vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context teq r1,r2 sub r3,#640 @ rewind K512 bne Loop_neon VFP_ABI_POP bx lr @ .word 0xe12fff1e #endif .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
6,942
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/ios-arm/crypto/fipsmodule/ghashv8-armx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__) #include <openssl/arm_arch.h> #if __ARM_MAX_ARCH__>=7 .text .code 32 #undef __thumb2__ .globl _gcm_init_v8 .private_extern _gcm_init_v8 #ifdef __thumb2__ .thumb_func _gcm_init_v8 #endif .align 4 _gcm_init_v8: AARCH64_VALID_CALL_TARGET vld1.64 {q9},[r1] @ load input H vmov.i8 q11,#0xe1 vshl.i64 q11,q11,#57 @ 0xc2.0 vext.8 q3,q9,q9,#8 vshr.u64 q10,q11,#63 vdup.32 q9,d18[1] vext.8 q8,q10,q11,#8 @ t0=0xc2....01 vshr.u64 q10,q3,#63 vshr.s32 q9,q9,#31 @ broadcast carry bit vand q10,q10,q8 vshl.i64 q3,q3,#1 vext.8 q10,q10,q10,#8 vand q8,q8,q9 vorr q3,q3,q10 @ H<<<=1 veor q12,q3,q8 @ twisted H vext.8 q12, q12, q12, #8 vst1.64 {q12},[r0]! @ store Htable[0] @ calculate H^2 vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing .byte 0xa9,0x0e,0xa9,0xf2 @ pmull2 q0,q12,q12 veor q8,q8,q12 .byte 0xa8,0x4e,0xa8,0xf2 @ pmull q2,q12,q12 .byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing veor q10,q0,q2 veor q1,q1,q9 veor q1,q1,q10 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase vmov d4,d3 @ Xh|Xm - 256-bit result vmov d3,d0 @ Xm is rotated Xl veor q0,q1,q10 vext.8 q10,q0,q0,#8 @ 2nd phase .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 veor q10,q10,q2 veor q9,q0,q10 vext.8 q14,q9,q9,#8 @ Karatsuba pre-processing veor q9,q9,q14 vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed vst1.64 {q13},[r0]! @ store Htable[1..2] vst1.64 {q14},[r0]! @ store Htable[1..2] bx lr .globl _gcm_gmult_v8 .private_extern _gcm_gmult_v8 #ifdef __thumb2__ .thumb_func _gcm_gmult_v8 #endif .align 4 _gcm_gmult_v8: AARCH64_VALID_CALL_TARGET vld1.64 {q9},[r0] @ load Xi vmov.i8 q11,#0xe1 vld1.64 {q12,q13},[r1] @ load twisted H, ... vext.8 q12,q12,q12,#8 vshl.u64 q11,q11,#57 #ifndef __ARMEB__ vrev64.8 q9,q9 #endif vext.8 q3,q9,q9,#8 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo veor q9,q9,q3 @ Karatsuba pre-processing .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) vext.8 q9,q0,q2,#8 @ Karatsuba post-processing veor q10,q0,q2 veor q1,q1,q9 veor q1,q1,q10 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction vmov d4,d3 @ Xh|Xm - 256-bit result vmov d3,d0 @ Xm is rotated Xl veor q0,q1,q10 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 veor q10,q10,q2 veor q0,q0,q10 #ifndef __ARMEB__ vrev64.8 q0,q0 #endif vext.8 q0,q0,q0,#8 vst1.64 {q0},[r0] @ write out Xi bx lr .globl _gcm_ghash_v8 .private_extern _gcm_ghash_v8 #ifdef __thumb2__ .thumb_func _gcm_ghash_v8 #endif .align 4 _gcm_ghash_v8: AARCH64_VALID_CALL_TARGET vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so vld1.64 {q0},[r0] @ load [rotated] Xi @ "[rotated]" means that @ loaded value would have @ to be rotated in order to @ make it appear as in @ algorithm specification subs r3,r3,#32 @ see if r3 is 32 or larger mov r12,#16 @ r12 is used as post- @ increment for input pointer; @ as loop is modulo-scheduled @ r12 is zeroed just in time @ to preclude overstepping @ inp[len], which means that @ last block[s] are actually @ loaded twice, but last @ copy is not processed vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2 vext.8 q12,q12,q12,#8 vmov.i8 q11,#0xe1 vld1.64 {q14},[r1] vext.8 q14,q14,q14,#8 moveq r12,#0 @ is it time to zero r12? vext.8 q0,q0,q0,#8 @ rotate Xi vld1.64 {q8},[r2]! @ load [rotated] I[0] vshl.u64 q11,q11,#57 @ compose 0xc2.0 constant #ifndef __ARMEB__ vrev64.8 q8,q8 vrev64.8 q0,q0 #endif vext.8 q3,q8,q8,#8 @ rotate I[0] blo Lodd_tail_v8 @ r3 was less than 32 vld1.64 {q9},[r2],r12 @ load [rotated] I[1] #ifndef __ARMEB__ vrev64.8 q9,q9 #endif vext.8 q7,q9,q9,#8 veor q3,q3,q0 @ I[i]^=Xi .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 veor q9,q9,q7 @ Karatsuba pre-processing .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 b Loop_mod2x_v8 .align 4 Loop_mod2x_v8: vext.8 q10,q3,q3,#8 subs r3,r3,#32 @ is there more data? .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo movlo r12,#0 @ is it time to zero r12? .byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9 veor q10,q10,q3 @ Karatsuba pre-processing .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi veor q0,q0,q4 @ accumulate .byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2] veor q2,q2,q6 moveq r12,#0 @ is it time to zero r12? veor q1,q1,q5 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing veor q10,q0,q2 veor q1,q1,q9 vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3] #ifndef __ARMEB__ vrev64.8 q8,q8 #endif veor q1,q1,q10 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction #ifndef __ARMEB__ vrev64.8 q9,q9 #endif vmov d4,d3 @ Xh|Xm - 256-bit result vmov d3,d0 @ Xm is rotated Xl vext.8 q7,q9,q9,#8 vext.8 q3,q8,q8,#8 veor q0,q1,q10 .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 veor q3,q3,q2 @ accumulate q3 early vext.8 q10,q0,q0,#8 @ 2nd phase of reduction .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 veor q3,q3,q10 veor q9,q9,q7 @ Karatsuba pre-processing veor q3,q3,q0 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 bhs Loop_mod2x_v8 @ there was at least 32 more bytes veor q2,q2,q10 vext.8 q3,q8,q8,#8 @ re-construct q3 adds r3,r3,#32 @ re-construct r3 veor q0,q0,q2 @ re-construct q0 beq Ldone_v8 @ is r3 zero? Lodd_tail_v8: vext.8 q10,q0,q0,#8 veor q3,q3,q0 @ inp^=Xi veor q9,q8,q10 @ q9 is rotated inp^Xi .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo veor q9,q9,q3 @ Karatsuba pre-processing .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) vext.8 q9,q0,q2,#8 @ Karatsuba post-processing veor q10,q0,q2 veor q1,q1,q9 veor q1,q1,q10 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction vmov d4,d3 @ Xh|Xm - 256-bit result vmov d3,d0 @ Xm is rotated Xl veor q0,q1,q10 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 veor q10,q10,q2 veor q0,q0,q10 Ldone_v8: #ifndef __ARMEB__ vrev64.8 q0,q0 #endif vext.8 q0,q0,q0,#8 vst1.64 {q0},[r0] @ write out Xi vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so bx lr .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
6,253
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/ios-arm/crypto/fipsmodule/ghash-armv4.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__) #include <openssl/arm_arch.h> @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 PMULL @ instructions are in aesv8-armx.pl.) .text #if defined(__thumb2__) || defined(__clang__) .syntax unified #define ldrplb ldrbpl #define ldrneb ldrbne #endif #if defined(__thumb2__) .thumb #else .code 32 #endif #if __ARM_MAX_ARCH__>=7 .globl _gcm_init_neon .private_extern _gcm_init_neon #ifdef __thumb2__ .thumb_func _gcm_init_neon #endif .align 4 _gcm_init_neon: vld1.64 d7,[r1]! @ load H vmov.i8 q8,#0xe1 vld1.64 d6,[r1] vshl.i64 d17,#57 vshr.u64 d16,#63 @ t0=0xc2....01 vdup.8 q9,d7[7] vshr.u64 d26,d6,#63 vshr.s8 q9,#7 @ broadcast carry bit vshl.i64 q3,q3,#1 vand q8,q8,q9 vorr d7,d26 @ H<<<=1 veor q3,q3,q8 @ twisted H vstmia r0,{q3} bx lr @ bx lr .globl _gcm_gmult_neon .private_extern _gcm_gmult_neon #ifdef __thumb2__ .thumb_func _gcm_gmult_neon #endif .align 4 _gcm_gmult_neon: vld1.64 d7,[r0]! @ load Xi vld1.64 d6,[r0]! vmov.i64 d29,#0x0000ffffffffffff vldmia r1,{d26,d27} @ load twisted H vmov.i64 d30,#0x00000000ffffffff #ifdef __ARMEL__ vrev64.8 q3,q3 #endif vmov.i64 d31,#0x000000000000ffff veor d28,d26,d27 @ Karatsuba pre-processing mov r3,#16 b Lgmult_neon .globl _gcm_ghash_neon .private_extern _gcm_ghash_neon #ifdef __thumb2__ .thumb_func _gcm_ghash_neon #endif .align 4 _gcm_ghash_neon: vld1.64 d1,[r0]! @ load Xi vld1.64 d0,[r0]! vmov.i64 d29,#0x0000ffffffffffff vldmia r1,{d26,d27} @ load twisted H vmov.i64 d30,#0x00000000ffffffff #ifdef __ARMEL__ vrev64.8 q0,q0 #endif vmov.i64 d31,#0x000000000000ffff veor d28,d26,d27 @ Karatsuba pre-processing Loop_neon: vld1.64 d7,[r2]! @ load inp vld1.64 d6,[r2]! #ifdef __ARMEL__ vrev64.8 q3,q3 #endif veor q3,q0 @ inp^=Xi Lgmult_neon: vext.8 d16, d26, d26, #1 @ A1 vmull.p8 q8, d16, d6 @ F = A1*B vext.8 d0, d6, d6, #1 @ B1 vmull.p8 q0, d26, d0 @ E = A*B1 vext.8 d18, d26, d26, #2 @ A2 vmull.p8 q9, d18, d6 @ H = A2*B vext.8 d22, d6, d6, #2 @ B2 vmull.p8 q11, d26, d22 @ G = A*B2 vext.8 d20, d26, d26, #3 @ A3 veor q8, q8, q0 @ L = E + F vmull.p8 q10, d20, d6 @ J = A3*B vext.8 d0, d6, d6, #3 @ B3 veor q9, q9, q11 @ M = G + H vmull.p8 q0, d26, d0 @ I = A*B3 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 vand d17, d17, d29 vext.8 d22, d6, d6, #4 @ B4 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 vand d19, d19, d30 vmull.p8 q11, d26, d22 @ K = A*B4 veor q10, q10, q0 @ N = I + J veor d16, d16, d17 veor d18, d18, d19 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 vand d21, d21, d31 vext.8 q8, q8, q8, #15 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 vmov.i64 d23, #0 vext.8 q9, q9, q9, #14 veor d20, d20, d21 vmull.p8 q0, d26, d6 @ D = A*B vext.8 q11, q11, q11, #12 vext.8 q10, q10, q10, #13 veor q8, q8, q9 veor q10, q10, q11 veor q0, q0, q8 veor q0, q0, q10 veor d6,d6,d7 @ Karatsuba pre-processing vext.8 d16, d28, d28, #1 @ A1 vmull.p8 q8, d16, d6 @ F = A1*B vext.8 d2, d6, d6, #1 @ B1 vmull.p8 q1, d28, d2 @ E = A*B1 vext.8 d18, d28, d28, #2 @ A2 vmull.p8 q9, d18, d6 @ H = A2*B vext.8 d22, d6, d6, #2 @ B2 vmull.p8 q11, d28, d22 @ G = A*B2 vext.8 d20, d28, d28, #3 @ A3 veor q8, q8, q1 @ L = E + F vmull.p8 q10, d20, d6 @ J = A3*B vext.8 d2, d6, d6, #3 @ B3 veor q9, q9, q11 @ M = G + H vmull.p8 q1, d28, d2 @ I = A*B3 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 vand d17, d17, d29 vext.8 d22, d6, d6, #4 @ B4 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 vand d19, d19, d30 vmull.p8 q11, d28, d22 @ K = A*B4 veor q10, q10, q1 @ N = I + J veor d16, d16, d17 veor d18, d18, d19 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 vand d21, d21, d31 vext.8 q8, q8, q8, #15 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 vmov.i64 d23, #0 vext.8 q9, q9, q9, #14 veor d20, d20, d21 vmull.p8 q1, d28, d6 @ D = A*B vext.8 q11, q11, q11, #12 vext.8 q10, q10, q10, #13 veor q8, q8, q9 veor q10, q10, q11 veor q1, q1, q8 veor q1, q1, q10 vext.8 d16, d27, d27, #1 @ A1 vmull.p8 q8, d16, d7 @ F = A1*B vext.8 d4, d7, d7, #1 @ B1 vmull.p8 q2, d27, d4 @ E = A*B1 vext.8 d18, d27, d27, #2 @ A2 vmull.p8 q9, d18, d7 @ H = A2*B vext.8 d22, d7, d7, #2 @ B2 vmull.p8 q11, d27, d22 @ G = A*B2 vext.8 d20, d27, d27, #3 @ A3 veor q8, q8, q2 @ L = E + F vmull.p8 q10, d20, d7 @ J = A3*B vext.8 d4, d7, d7, #3 @ B3 veor q9, q9, q11 @ M = G + H vmull.p8 q2, d27, d4 @ I = A*B3 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 vand d17, d17, d29 vext.8 d22, d7, d7, #4 @ B4 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 vand d19, d19, d30 vmull.p8 q11, d27, d22 @ K = A*B4 veor q10, q10, q2 @ N = I + J veor d16, d16, d17 veor d18, d18, d19 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 vand d21, d21, d31 vext.8 q8, q8, q8, #15 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 vmov.i64 d23, #0 vext.8 q9, q9, q9, #14 veor d20, d20, d21 vmull.p8 q2, d27, d7 @ D = A*B vext.8 q11, q11, q11, #12 vext.8 q10, q10, q10, #13 veor q8, q8, q9 veor q10, q10, q11 veor q2, q2, q8 veor q2, q2, q10 veor q1,q1,q0 @ Karatsuba post-processing veor q1,q1,q2 veor d1,d1,d2 veor d4,d4,d3 @ Xh|Xl - 256-bit result @ equivalent of reduction_avx from ghash-x86_64.pl vshl.i64 q9,q0,#57 @ 1st phase vshl.i64 q10,q0,#62 veor q10,q10,q9 @ vshl.i64 q9,q0,#63 veor q10, q10, q9 @ veor d1,d1,d20 @ veor d4,d4,d21 vshr.u64 q10,q0,#1 @ 2nd phase veor q2,q2,q0 veor q0,q0,q10 @ vshr.u64 q10,q10,#6 vshr.u64 q0,q0,#1 @ veor q0,q0,q2 @ veor q0,q0,q10 @ subs r3,#16 bne Loop_neon #ifdef __ARMEL__ vrev64.8 q0,q0 #endif sub r0,#16 vst1.64 d1,[r0]! @ write out Xi vst1.64 d0,[r0] bx lr @ bx lr #endif .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
31,590
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/ios-arm/crypto/fipsmodule/sha1-armv4-large.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__) #include <openssl/arm_arch.h> .text #if defined(__thumb2__) .syntax unified .thumb #else .code 32 #endif .globl _sha1_block_data_order_nohw .private_extern _sha1_block_data_order_nohw #ifdef __thumb2__ .thumb_func _sha1_block_data_order_nohw #endif .align 5 _sha1_block_data_order_nohw: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 ldmia r0,{r3,r4,r5,r6,r7} Lloop: ldr r8,LK_00_19 mov r14,sp sub sp,sp,#15*4 mov r5,r5,ror#30 mov r6,r6,ror#30 mov r7,r7,ror#30 @ [6] L_00_15: #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r7,r8,r7,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r5,r6 @ F_xx_xx orr r9,r9,r11,lsl#16 add r7,r7,r3,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r7,r8,r7,ror#2 @ E+=K_00_19 eor r10,r5,r6 @ F_xx_xx add r7,r7,r3,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r4,r10,ror#2 add r7,r7,r9 @ E+=X[i] eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r7,r7,r10 @ E+=F_00_19(B,C,D) #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r6,r8,r6,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r4,r5 @ F_xx_xx orr r9,r9,r11,lsl#16 add r6,r6,r7,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r6,r8,r6,ror#2 @ E+=K_00_19 eor r10,r4,r5 @ F_xx_xx add r6,r6,r7,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r3,r10,ror#2 add r6,r6,r9 @ E+=X[i] eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r6,r6,r10 @ E+=F_00_19(B,C,D) #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r5,r8,r5,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r3,r4 @ F_xx_xx orr r9,r9,r11,lsl#16 add r5,r5,r6,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r5,r8,r5,ror#2 @ E+=K_00_19 eor r10,r3,r4 @ F_xx_xx add r5,r5,r6,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r7,r10,ror#2 add r5,r5,r9 @ E+=X[i] eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r5,r5,r10 @ E+=F_00_19(B,C,D) #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r4,r8,r4,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r7,r3 @ F_xx_xx orr r9,r9,r11,lsl#16 add r4,r4,r5,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r4,r8,r4,ror#2 @ E+=K_00_19 eor r10,r7,r3 @ F_xx_xx add r4,r4,r5,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r6,r10,ror#2 add r4,r4,r9 @ E+=X[i] eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r4,r4,r10 @ E+=F_00_19(B,C,D) #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r3,r8,r3,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r6,r7 @ F_xx_xx orr r9,r9,r11,lsl#16 add r3,r3,r4,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r3,r8,r3,ror#2 @ E+=K_00_19 eor r10,r6,r7 @ F_xx_xx add r3,r3,r4,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r5,r10,ror#2 add r3,r3,r9 @ E+=X[i] eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r3,r3,r10 @ E+=F_00_19(B,C,D) #if defined(__thumb2__) mov r12,sp teq r14,r12 #else teq r14,sp #endif bne L_00_15 @ [((11+4)*5+2)*3] sub sp,sp,#25*4 #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r7,r8,r7,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r5,r6 @ F_xx_xx orr r9,r9,r11,lsl#16 add r7,r7,r3,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r7,r8,r7,ror#2 @ E+=K_00_19 eor r10,r5,r6 @ F_xx_xx add r7,r7,r3,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r4,r10,ror#2 add r7,r7,r9 @ E+=X[i] eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r7,r7,r10 @ E+=F_00_19(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r6,r8,r6,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r4,r5 @ F_xx_xx mov r9,r9,ror#31 add r6,r6,r7,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r3,r10,ror#2 @ F_xx_xx @ F_xx_xx add r6,r6,r9 @ E+=X[i] eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) add r6,r6,r10 @ E+=F_00_19(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r5,r8,r5,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r3,r4 @ F_xx_xx mov r9,r9,ror#31 add r5,r5,r6,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r7,r10,ror#2 @ F_xx_xx @ F_xx_xx add r5,r5,r9 @ E+=X[i] eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) add r5,r5,r10 @ E+=F_00_19(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r4,r8,r4,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r7,r3 @ F_xx_xx mov r9,r9,ror#31 add r4,r4,r5,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r6,r10,ror#2 @ F_xx_xx @ F_xx_xx add r4,r4,r9 @ E+=X[i] eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) add r4,r4,r10 @ E+=F_00_19(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r3,r8,r3,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r6,r7 @ F_xx_xx mov r9,r9,ror#31 add r3,r3,r4,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r5,r10,ror#2 @ F_xx_xx @ F_xx_xx add r3,r3,r9 @ E+=X[i] eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) add r3,r3,r10 @ E+=F_00_19(B,C,D) ldr r8,LK_20_39 @ [+15+16*4] cmn sp,#0 @ [+3], clear carry to denote 20_39 L_20_39_or_60_79: ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r7,r8,r7,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r5,r6 @ F_xx_xx mov r9,r9,ror#31 add r7,r7,r3,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! eor r10,r4,r10,ror#2 @ F_xx_xx @ F_xx_xx add r7,r7,r9 @ E+=X[i] add r7,r7,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r6,r8,r6,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r4,r5 @ F_xx_xx mov r9,r9,ror#31 add r6,r6,r7,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! eor r10,r3,r10,ror#2 @ F_xx_xx @ F_xx_xx add r6,r6,r9 @ E+=X[i] add r6,r6,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r5,r8,r5,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r3,r4 @ F_xx_xx mov r9,r9,ror#31 add r5,r5,r6,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! eor r10,r7,r10,ror#2 @ F_xx_xx @ F_xx_xx add r5,r5,r9 @ E+=X[i] add r5,r5,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r4,r8,r4,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r7,r3 @ F_xx_xx mov r9,r9,ror#31 add r4,r4,r5,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! eor r10,r6,r10,ror#2 @ F_xx_xx @ F_xx_xx add r4,r4,r9 @ E+=X[i] add r4,r4,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r3,r8,r3,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r6,r7 @ F_xx_xx mov r9,r9,ror#31 add r3,r3,r4,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! eor r10,r5,r10,ror#2 @ F_xx_xx @ F_xx_xx add r3,r3,r9 @ E+=X[i] add r3,r3,r10 @ E+=F_20_39(B,C,D) #if defined(__thumb2__) mov r12,sp teq r14,r12 #else teq r14,sp @ preserve carry #endif bne L_20_39_or_60_79 @ [+((12+3)*5+2)*4] bcs L_done @ [+((12+3)*5+2)*4], spare 300 bytes ldr r8,LK_40_59 sub sp,sp,#20*4 @ [+2] L_40_59: ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r7,r8,r7,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r5,r6 @ F_xx_xx mov r9,r9,ror#31 add r7,r7,r3,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r4,r10,ror#2 @ F_xx_xx and r11,r5,r6 @ F_xx_xx add r7,r7,r9 @ E+=X[i] add r7,r7,r10 @ E+=F_40_59(B,C,D) add r7,r7,r11,ror#2 ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r6,r8,r6,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r4,r5 @ F_xx_xx mov r9,r9,ror#31 add r6,r6,r7,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r3,r10,ror#2 @ F_xx_xx and r11,r4,r5 @ F_xx_xx add r6,r6,r9 @ E+=X[i] add r6,r6,r10 @ E+=F_40_59(B,C,D) add r6,r6,r11,ror#2 ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r5,r8,r5,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r3,r4 @ F_xx_xx mov r9,r9,ror#31 add r5,r5,r6,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r7,r10,ror#2 @ F_xx_xx and r11,r3,r4 @ F_xx_xx add r5,r5,r9 @ E+=X[i] add r5,r5,r10 @ E+=F_40_59(B,C,D) add r5,r5,r11,ror#2 ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r4,r8,r4,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r7,r3 @ F_xx_xx mov r9,r9,ror#31 add r4,r4,r5,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r6,r10,ror#2 @ F_xx_xx and r11,r7,r3 @ F_xx_xx add r4,r4,r9 @ E+=X[i] add r4,r4,r10 @ E+=F_40_59(B,C,D) add r4,r4,r11,ror#2 ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r3,r8,r3,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r6,r7 @ F_xx_xx mov r9,r9,ror#31 add r3,r3,r4,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r5,r10,ror#2 @ F_xx_xx and r11,r6,r7 @ F_xx_xx add r3,r3,r9 @ E+=X[i] add r3,r3,r10 @ E+=F_40_59(B,C,D) add r3,r3,r11,ror#2 #if defined(__thumb2__) mov r12,sp teq r14,r12 #else teq r14,sp #endif bne L_40_59 @ [+((12+5)*5+2)*4] ldr r8,LK_60_79 sub sp,sp,#20*4 cmp sp,#0 @ set carry to denote 60_79 b L_20_39_or_60_79 @ [+4], spare 300 bytes L_done: add sp,sp,#80*4 @ "deallocate" stack frame ldmia r0,{r8,r9,r10,r11,r12} add r3,r8,r3 add r4,r9,r4 add r5,r10,r5,ror#2 add r6,r11,r6,ror#2 add r7,r12,r7,ror#2 stmia r0,{r3,r4,r5,r6,r7} teq r1,r2 bne Lloop @ [+18], total 1307 #if __ARM_ARCH>=5 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} #else ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif .align 5 LK_00_19:.word 0x5a827999 LK_20_39:.word 0x6ed9eba1 LK_40_59:.word 0x8f1bbcdc LK_60_79:.word 0xca62c1d6 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 5 #if __ARM_MAX_ARCH__>=7 .globl _sha1_block_data_order_neon .private_extern _sha1_block_data_order_neon #ifdef __thumb2__ .thumb_func _sha1_block_data_order_neon #endif .align 4 _sha1_block_data_order_neon: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 @ dmb @ errata #451034 on early Cortex A8 @ vstmdb sp!,{d8-d15} @ ABI specification says so mov r14,sp sub r12,sp,#64 adr r8,LK_00_19 bic r12,r12,#15 @ align for 128-bit stores ldmia r0,{r3,r4,r5,r6,r7} @ load context mov sp,r12 @ alloca vld1.8 {q0,q1},[r1]! @ handles unaligned veor q15,q15,q15 vld1.8 {q2,q3},[r1]! vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19 vrev32.8 q0,q0 @ yes, even on vrev32.8 q1,q1 @ big-endian... vrev32.8 q2,q2 vadd.i32 q8,q0,q14 vrev32.8 q3,q3 vadd.i32 q9,q1,q14 vst1.32 {q8},[r12,:128]! vadd.i32 q10,q2,q14 vst1.32 {q9},[r12,:128]! vst1.32 {q10},[r12,:128]! ldr r9,[sp] @ big RAW stall Loop_neon: vext.8 q8,q0,q1,#8 bic r10,r6,r4 add r7,r7,r9 and r11,r5,r4 vadd.i32 q13,q3,q14 ldr r9,[sp,#4] add r7,r7,r3,ror#27 vext.8 q12,q3,q15,#4 eor r11,r11,r10 mov r4,r4,ror#2 add r7,r7,r11 veor q8,q8,q0 bic r10,r5,r3 add r6,r6,r9 veor q12,q12,q2 and r11,r4,r3 ldr r9,[sp,#8] veor q12,q12,q8 add r6,r6,r7,ror#27 eor r11,r11,r10 vst1.32 {q13},[r12,:128]! sub r12,r12,#64 mov r3,r3,ror#2 add r6,r6,r11 vext.8 q13,q15,q12,#4 bic r10,r4,r7 add r5,r5,r9 vadd.i32 q8,q12,q12 and r11,r3,r7 ldr r9,[sp,#12] vsri.32 q8,q12,#31 add r5,r5,r6,ror#27 eor r11,r11,r10 mov r7,r7,ror#2 vshr.u32 q12,q13,#30 add r5,r5,r11 bic r10,r3,r6 vshl.u32 q13,q13,#2 add r4,r4,r9 and r11,r7,r6 veor q8,q8,q12 ldr r9,[sp,#16] add r4,r4,r5,ror#27 veor q8,q8,q13 eor r11,r11,r10 mov r6,r6,ror#2 add r4,r4,r11 vext.8 q9,q1,q2,#8 bic r10,r7,r5 add r3,r3,r9 and r11,r6,r5 vadd.i32 q13,q8,q14 ldr r9,[sp,#20] vld1.32 {d28[],d29[]},[r8,:32]! add r3,r3,r4,ror#27 vext.8 q12,q8,q15,#4 eor r11,r11,r10 mov r5,r5,ror#2 add r3,r3,r11 veor q9,q9,q1 bic r10,r6,r4 add r7,r7,r9 veor q12,q12,q3 and r11,r5,r4 ldr r9,[sp,#24] veor q12,q12,q9 add r7,r7,r3,ror#27 eor r11,r11,r10 vst1.32 {q13},[r12,:128]! mov r4,r4,ror#2 add r7,r7,r11 vext.8 q13,q15,q12,#4 bic r10,r5,r3 add r6,r6,r9 vadd.i32 q9,q12,q12 and r11,r4,r3 ldr r9,[sp,#28] vsri.32 q9,q12,#31 add r6,r6,r7,ror#27 eor r11,r11,r10 mov r3,r3,ror#2 vshr.u32 q12,q13,#30 add r6,r6,r11 bic r10,r4,r7 vshl.u32 q13,q13,#2 add r5,r5,r9 and r11,r3,r7 veor q9,q9,q12 ldr r9,[sp,#32] add r5,r5,r6,ror#27 veor q9,q9,q13 eor r11,r11,r10 mov r7,r7,ror#2 add r5,r5,r11 vext.8 q10,q2,q3,#8 bic r10,r3,r6 add r4,r4,r9 and r11,r7,r6 vadd.i32 q13,q9,q14 ldr r9,[sp,#36] add r4,r4,r5,ror#27 vext.8 q12,q9,q15,#4 eor r11,r11,r10 mov r6,r6,ror#2 add r4,r4,r11 veor q10,q10,q2 bic r10,r7,r5 add r3,r3,r9 veor q12,q12,q8 and r11,r6,r5 ldr r9,[sp,#40] veor q12,q12,q10 add r3,r3,r4,ror#27 eor r11,r11,r10 vst1.32 {q13},[r12,:128]! mov r5,r5,ror#2 add r3,r3,r11 vext.8 q13,q15,q12,#4 bic r10,r6,r4 add r7,r7,r9 vadd.i32 q10,q12,q12 and r11,r5,r4 ldr r9,[sp,#44] vsri.32 q10,q12,#31 add r7,r7,r3,ror#27 eor r11,r11,r10 mov r4,r4,ror#2 vshr.u32 q12,q13,#30 add r7,r7,r11 bic r10,r5,r3 vshl.u32 q13,q13,#2 add r6,r6,r9 and r11,r4,r3 veor q10,q10,q12 ldr r9,[sp,#48] add r6,r6,r7,ror#27 veor q10,q10,q13 eor r11,r11,r10 mov r3,r3,ror#2 add r6,r6,r11 vext.8 q11,q3,q8,#8 bic r10,r4,r7 add r5,r5,r9 and r11,r3,r7 vadd.i32 q13,q10,q14 ldr r9,[sp,#52] add r5,r5,r6,ror#27 vext.8 q12,q10,q15,#4 eor r11,r11,r10 mov r7,r7,ror#2 add r5,r5,r11 veor q11,q11,q3 bic r10,r3,r6 add r4,r4,r9 veor q12,q12,q9 and r11,r7,r6 ldr r9,[sp,#56] veor q12,q12,q11 add r4,r4,r5,ror#27 eor r11,r11,r10 vst1.32 {q13},[r12,:128]! mov r6,r6,ror#2 add r4,r4,r11 vext.8 q13,q15,q12,#4 bic r10,r7,r5 add r3,r3,r9 vadd.i32 q11,q12,q12 and r11,r6,r5 ldr r9,[sp,#60] vsri.32 q11,q12,#31 add r3,r3,r4,ror#27 eor r11,r11,r10 mov r5,r5,ror#2 vshr.u32 q12,q13,#30 add r3,r3,r11 bic r10,r6,r4 vshl.u32 q13,q13,#2 add r7,r7,r9 and r11,r5,r4 veor q11,q11,q12 ldr r9,[sp,#0] add r7,r7,r3,ror#27 veor q11,q11,q13 eor r11,r11,r10 mov r4,r4,ror#2 add r7,r7,r11 vext.8 q12,q10,q11,#8 bic r10,r5,r3 add r6,r6,r9 and r11,r4,r3 veor q0,q0,q8 ldr r9,[sp,#4] add r6,r6,r7,ror#27 veor q0,q0,q1 eor r11,r11,r10 mov r3,r3,ror#2 vadd.i32 q13,q11,q14 add r6,r6,r11 bic r10,r4,r7 veor q12,q12,q0 add r5,r5,r9 and r11,r3,r7 vshr.u32 q0,q12,#30 ldr r9,[sp,#8] add r5,r5,r6,ror#27 vst1.32 {q13},[r12,:128]! sub r12,r12,#64 eor r11,r11,r10 mov r7,r7,ror#2 vsli.32 q0,q12,#2 add r5,r5,r11 bic r10,r3,r6 add r4,r4,r9 and r11,r7,r6 ldr r9,[sp,#12] add r4,r4,r5,ror#27 eor r11,r11,r10 mov r6,r6,ror#2 add r4,r4,r11 bic r10,r7,r5 add r3,r3,r9 and r11,r6,r5 ldr r9,[sp,#16] add r3,r3,r4,ror#27 eor r11,r11,r10 mov r5,r5,ror#2 add r3,r3,r11 vext.8 q12,q11,q0,#8 eor r10,r4,r6 add r7,r7,r9 ldr r9,[sp,#20] veor q1,q1,q9 eor r11,r10,r5 add r7,r7,r3,ror#27 veor q1,q1,q2 mov r4,r4,ror#2 add r7,r7,r11 vadd.i32 q13,q0,q14 eor r10,r3,r5 add r6,r6,r9 veor q12,q12,q1 ldr r9,[sp,#24] eor r11,r10,r4 vshr.u32 q1,q12,#30 add r6,r6,r7,ror#27 mov r3,r3,ror#2 vst1.32 {q13},[r12,:128]! add r6,r6,r11 eor r10,r7,r4 vsli.32 q1,q12,#2 add r5,r5,r9 ldr r9,[sp,#28] eor r11,r10,r3 add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 eor r10,r6,r3 add r4,r4,r9 ldr r9,[sp,#32] eor r11,r10,r7 add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 vext.8 q12,q0,q1,#8 eor r10,r5,r7 add r3,r3,r9 ldr r9,[sp,#36] veor q2,q2,q10 eor r11,r10,r6 add r3,r3,r4,ror#27 veor q2,q2,q3 mov r5,r5,ror#2 add r3,r3,r11 vadd.i32 q13,q1,q14 eor r10,r4,r6 vld1.32 {d28[],d29[]},[r8,:32]! add r7,r7,r9 veor q12,q12,q2 ldr r9,[sp,#40] eor r11,r10,r5 vshr.u32 q2,q12,#30 add r7,r7,r3,ror#27 mov r4,r4,ror#2 vst1.32 {q13},[r12,:128]! add r7,r7,r11 eor r10,r3,r5 vsli.32 q2,q12,#2 add r6,r6,r9 ldr r9,[sp,#44] eor r11,r10,r4 add r6,r6,r7,ror#27 mov r3,r3,ror#2 add r6,r6,r11 eor r10,r7,r4 add r5,r5,r9 ldr r9,[sp,#48] eor r11,r10,r3 add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 vext.8 q12,q1,q2,#8 eor r10,r6,r3 add r4,r4,r9 ldr r9,[sp,#52] veor q3,q3,q11 eor r11,r10,r7 add r4,r4,r5,ror#27 veor q3,q3,q8 mov r6,r6,ror#2 add r4,r4,r11 vadd.i32 q13,q2,q14 eor r10,r5,r7 add r3,r3,r9 veor q12,q12,q3 ldr r9,[sp,#56] eor r11,r10,r6 vshr.u32 q3,q12,#30 add r3,r3,r4,ror#27 mov r5,r5,ror#2 vst1.32 {q13},[r12,:128]! add r3,r3,r11 eor r10,r4,r6 vsli.32 q3,q12,#2 add r7,r7,r9 ldr r9,[sp,#60] eor r11,r10,r5 add r7,r7,r3,ror#27 mov r4,r4,ror#2 add r7,r7,r11 eor r10,r3,r5 add r6,r6,r9 ldr r9,[sp,#0] eor r11,r10,r4 add r6,r6,r7,ror#27 mov r3,r3,ror#2 add r6,r6,r11 vext.8 q12,q2,q3,#8 eor r10,r7,r4 add r5,r5,r9 ldr r9,[sp,#4] veor q8,q8,q0 eor r11,r10,r3 add r5,r5,r6,ror#27 veor q8,q8,q9 mov r7,r7,ror#2 add r5,r5,r11 vadd.i32 q13,q3,q14 eor r10,r6,r3 add r4,r4,r9 veor q12,q12,q8 ldr r9,[sp,#8] eor r11,r10,r7 vshr.u32 q8,q12,#30 add r4,r4,r5,ror#27 mov r6,r6,ror#2 vst1.32 {q13},[r12,:128]! sub r12,r12,#64 add r4,r4,r11 eor r10,r5,r7 vsli.32 q8,q12,#2 add r3,r3,r9 ldr r9,[sp,#12] eor r11,r10,r6 add r3,r3,r4,ror#27 mov r5,r5,ror#2 add r3,r3,r11 eor r10,r4,r6 add r7,r7,r9 ldr r9,[sp,#16] eor r11,r10,r5 add r7,r7,r3,ror#27 mov r4,r4,ror#2 add r7,r7,r11 vext.8 q12,q3,q8,#8 eor r10,r3,r5 add r6,r6,r9 ldr r9,[sp,#20] veor q9,q9,q1 eor r11,r10,r4 add r6,r6,r7,ror#27 veor q9,q9,q10 mov r3,r3,ror#2 add r6,r6,r11 vadd.i32 q13,q8,q14 eor r10,r7,r4 add r5,r5,r9 veor q12,q12,q9 ldr r9,[sp,#24] eor r11,r10,r3 vshr.u32 q9,q12,#30 add r5,r5,r6,ror#27 mov r7,r7,ror#2 vst1.32 {q13},[r12,:128]! add r5,r5,r11 eor r10,r6,r3 vsli.32 q9,q12,#2 add r4,r4,r9 ldr r9,[sp,#28] eor r11,r10,r7 add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 eor r10,r5,r7 add r3,r3,r9 ldr r9,[sp,#32] eor r11,r10,r6 add r3,r3,r4,ror#27 mov r5,r5,ror#2 add r3,r3,r11 vext.8 q12,q8,q9,#8 add r7,r7,r9 and r10,r5,r6 ldr r9,[sp,#36] veor q10,q10,q2 add r7,r7,r3,ror#27 eor r11,r5,r6 veor q10,q10,q11 add r7,r7,r10 and r11,r11,r4 vadd.i32 q13,q9,q14 mov r4,r4,ror#2 add r7,r7,r11 veor q12,q12,q10 add r6,r6,r9 and r10,r4,r5 vshr.u32 q10,q12,#30 ldr r9,[sp,#40] add r6,r6,r7,ror#27 vst1.32 {q13},[r12,:128]! eor r11,r4,r5 add r6,r6,r10 vsli.32 q10,q12,#2 and r11,r11,r3 mov r3,r3,ror#2 add r6,r6,r11 add r5,r5,r9 and r10,r3,r4 ldr r9,[sp,#44] add r5,r5,r6,ror#27 eor r11,r3,r4 add r5,r5,r10 and r11,r11,r7 mov r7,r7,ror#2 add r5,r5,r11 add r4,r4,r9 and r10,r7,r3 ldr r9,[sp,#48] add r4,r4,r5,ror#27 eor r11,r7,r3 add r4,r4,r10 and r11,r11,r6 mov r6,r6,ror#2 add r4,r4,r11 vext.8 q12,q9,q10,#8 add r3,r3,r9 and r10,r6,r7 ldr r9,[sp,#52] veor q11,q11,q3 add r3,r3,r4,ror#27 eor r11,r6,r7 veor q11,q11,q0 add r3,r3,r10 and r11,r11,r5 vadd.i32 q13,q10,q14 mov r5,r5,ror#2 vld1.32 {d28[],d29[]},[r8,:32]! add r3,r3,r11 veor q12,q12,q11 add r7,r7,r9 and r10,r5,r6 vshr.u32 q11,q12,#30 ldr r9,[sp,#56] add r7,r7,r3,ror#27 vst1.32 {q13},[r12,:128]! eor r11,r5,r6 add r7,r7,r10 vsli.32 q11,q12,#2 and r11,r11,r4 mov r4,r4,ror#2 add r7,r7,r11 add r6,r6,r9 and r10,r4,r5 ldr r9,[sp,#60] add r6,r6,r7,ror#27 eor r11,r4,r5 add r6,r6,r10 and r11,r11,r3 mov r3,r3,ror#2 add r6,r6,r11 add r5,r5,r9 and r10,r3,r4 ldr r9,[sp,#0] add r5,r5,r6,ror#27 eor r11,r3,r4 add r5,r5,r10 and r11,r11,r7 mov r7,r7,ror#2 add r5,r5,r11 vext.8 q12,q10,q11,#8 add r4,r4,r9 and r10,r7,r3 ldr r9,[sp,#4] veor q0,q0,q8 add r4,r4,r5,ror#27 eor r11,r7,r3 veor q0,q0,q1 add r4,r4,r10 and r11,r11,r6 vadd.i32 q13,q11,q14 mov r6,r6,ror#2 add r4,r4,r11 veor q12,q12,q0 add r3,r3,r9 and r10,r6,r7 vshr.u32 q0,q12,#30 ldr r9,[sp,#8] add r3,r3,r4,ror#27 vst1.32 {q13},[r12,:128]! sub r12,r12,#64 eor r11,r6,r7 add r3,r3,r10 vsli.32 q0,q12,#2 and r11,r11,r5 mov r5,r5,ror#2 add r3,r3,r11 add r7,r7,r9 and r10,r5,r6 ldr r9,[sp,#12] add r7,r7,r3,ror#27 eor r11,r5,r6 add r7,r7,r10 and r11,r11,r4 mov r4,r4,ror#2 add r7,r7,r11 add r6,r6,r9 and r10,r4,r5 ldr r9,[sp,#16] add r6,r6,r7,ror#27 eor r11,r4,r5 add r6,r6,r10 and r11,r11,r3 mov r3,r3,ror#2 add r6,r6,r11 vext.8 q12,q11,q0,#8 add r5,r5,r9 and r10,r3,r4 ldr r9,[sp,#20] veor q1,q1,q9 add r5,r5,r6,ror#27 eor r11,r3,r4 veor q1,q1,q2 add r5,r5,r10 and r11,r11,r7 vadd.i32 q13,q0,q14 mov r7,r7,ror#2 add r5,r5,r11 veor q12,q12,q1 add r4,r4,r9 and r10,r7,r3 vshr.u32 q1,q12,#30 ldr r9,[sp,#24] add r4,r4,r5,ror#27 vst1.32 {q13},[r12,:128]! eor r11,r7,r3 add r4,r4,r10 vsli.32 q1,q12,#2 and r11,r11,r6 mov r6,r6,ror#2 add r4,r4,r11 add r3,r3,r9 and r10,r6,r7 ldr r9,[sp,#28] add r3,r3,r4,ror#27 eor r11,r6,r7 add r3,r3,r10 and r11,r11,r5 mov r5,r5,ror#2 add r3,r3,r11 add r7,r7,r9 and r10,r5,r6 ldr r9,[sp,#32] add r7,r7,r3,ror#27 eor r11,r5,r6 add r7,r7,r10 and r11,r11,r4 mov r4,r4,ror#2 add r7,r7,r11 vext.8 q12,q0,q1,#8 add r6,r6,r9 and r10,r4,r5 ldr r9,[sp,#36] veor q2,q2,q10 add r6,r6,r7,ror#27 eor r11,r4,r5 veor q2,q2,q3 add r6,r6,r10 and r11,r11,r3 vadd.i32 q13,q1,q14 mov r3,r3,ror#2 add r6,r6,r11 veor q12,q12,q2 add r5,r5,r9 and r10,r3,r4 vshr.u32 q2,q12,#30 ldr r9,[sp,#40] add r5,r5,r6,ror#27 vst1.32 {q13},[r12,:128]! eor r11,r3,r4 add r5,r5,r10 vsli.32 q2,q12,#2 and r11,r11,r7 mov r7,r7,ror#2 add r5,r5,r11 add r4,r4,r9 and r10,r7,r3 ldr r9,[sp,#44] add r4,r4,r5,ror#27 eor r11,r7,r3 add r4,r4,r10 and r11,r11,r6 mov r6,r6,ror#2 add r4,r4,r11 add r3,r3,r9 and r10,r6,r7 ldr r9,[sp,#48] add r3,r3,r4,ror#27 eor r11,r6,r7 add r3,r3,r10 and r11,r11,r5 mov r5,r5,ror#2 add r3,r3,r11 vext.8 q12,q1,q2,#8 eor r10,r4,r6 add r7,r7,r9 ldr r9,[sp,#52] veor q3,q3,q11 eor r11,r10,r5 add r7,r7,r3,ror#27 veor q3,q3,q8 mov r4,r4,ror#2 add r7,r7,r11 vadd.i32 q13,q2,q14 eor r10,r3,r5 add r6,r6,r9 veor q12,q12,q3 ldr r9,[sp,#56] eor r11,r10,r4 vshr.u32 q3,q12,#30 add r6,r6,r7,ror#27 mov r3,r3,ror#2 vst1.32 {q13},[r12,:128]! add r6,r6,r11 eor r10,r7,r4 vsli.32 q3,q12,#2 add r5,r5,r9 ldr r9,[sp,#60] eor r11,r10,r3 add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 eor r10,r6,r3 add r4,r4,r9 ldr r9,[sp,#0] eor r11,r10,r7 add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 vadd.i32 q13,q3,q14 eor r10,r5,r7 add r3,r3,r9 vst1.32 {q13},[r12,:128]! sub r12,r12,#64 teq r1,r2 sub r8,r8,#16 it eq subeq r1,r1,#64 vld1.8 {q0,q1},[r1]! ldr r9,[sp,#4] eor r11,r10,r6 vld1.8 {q2,q3},[r1]! add r3,r3,r4,ror#27 mov r5,r5,ror#2 vld1.32 {d28[],d29[]},[r8,:32]! add r3,r3,r11 eor r10,r4,r6 vrev32.8 q0,q0 add r7,r7,r9 ldr r9,[sp,#8] eor r11,r10,r5 add r7,r7,r3,ror#27 mov r4,r4,ror#2 add r7,r7,r11 eor r10,r3,r5 add r6,r6,r9 ldr r9,[sp,#12] eor r11,r10,r4 add r6,r6,r7,ror#27 mov r3,r3,ror#2 add r6,r6,r11 eor r10,r7,r4 add r5,r5,r9 ldr r9,[sp,#16] eor r11,r10,r3 add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 vrev32.8 q1,q1 eor r10,r6,r3 add r4,r4,r9 vadd.i32 q8,q0,q14 ldr r9,[sp,#20] eor r11,r10,r7 vst1.32 {q8},[r12,:128]! add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 eor r10,r5,r7 add r3,r3,r9 ldr r9,[sp,#24] eor r11,r10,r6 add r3,r3,r4,ror#27 mov r5,r5,ror#2 add r3,r3,r11 eor r10,r4,r6 add r7,r7,r9 ldr r9,[sp,#28] eor r11,r10,r5 add r7,r7,r3,ror#27 mov r4,r4,ror#2 add r7,r7,r11 eor r10,r3,r5 add r6,r6,r9 ldr r9,[sp,#32] eor r11,r10,r4 add r6,r6,r7,ror#27 mov r3,r3,ror#2 add r6,r6,r11 vrev32.8 q2,q2 eor r10,r7,r4 add r5,r5,r9 vadd.i32 q9,q1,q14 ldr r9,[sp,#36] eor r11,r10,r3 vst1.32 {q9},[r12,:128]! add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 eor r10,r6,r3 add r4,r4,r9 ldr r9,[sp,#40] eor r11,r10,r7 add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 eor r10,r5,r7 add r3,r3,r9 ldr r9,[sp,#44] eor r11,r10,r6 add r3,r3,r4,ror#27 mov r5,r5,ror#2 add r3,r3,r11 eor r10,r4,r6 add r7,r7,r9 ldr r9,[sp,#48] eor r11,r10,r5 add r7,r7,r3,ror#27 mov r4,r4,ror#2 add r7,r7,r11 vrev32.8 q3,q3 eor r10,r3,r5 add r6,r6,r9 vadd.i32 q10,q2,q14 ldr r9,[sp,#52] eor r11,r10,r4 vst1.32 {q10},[r12,:128]! add r6,r6,r7,ror#27 mov r3,r3,ror#2 add r6,r6,r11 eor r10,r7,r4 add r5,r5,r9 ldr r9,[sp,#56] eor r11,r10,r3 add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 eor r10,r6,r3 add r4,r4,r9 ldr r9,[sp,#60] eor r11,r10,r7 add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 eor r10,r5,r7 add r3,r3,r9 eor r11,r10,r6 add r3,r3,r4,ror#27 mov r5,r5,ror#2 add r3,r3,r11 ldmia r0,{r9,r10,r11,r12} @ accumulate context add r3,r3,r9 ldr r9,[r0,#16] add r4,r4,r10 add r5,r5,r11 add r6,r6,r12 it eq moveq sp,r14 add r7,r7,r9 it ne ldrne r9,[sp] stmia r0,{r3,r4,r5,r6,r7} itt ne addne r12,sp,#3*16 bne Loop_neon @ vldmia sp!,{d8-d15} ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} #endif #if __ARM_MAX_ARCH__>=7 # if defined(__thumb2__) # define INST(a,b,c,d) .byte c,d|0xf,a,b # else # define INST(a,b,c,d) .byte a,b,c,d|0x10 # endif .globl _sha1_block_data_order_hw .private_extern _sha1_block_data_order_hw #ifdef __thumb2__ .thumb_func _sha1_block_data_order_hw #endif .align 5 _sha1_block_data_order_hw: vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so veor q1,q1,q1 adr r3,LK_00_19 vld1.32 {q0},[r0]! vld1.32 {d2[0]},[r0] sub r0,r0,#16 vld1.32 {d16[],d17[]},[r3,:32]! vld1.32 {d18[],d19[]},[r3,:32]! vld1.32 {d20[],d21[]},[r3,:32]! vld1.32 {d22[],d23[]},[r3,:32] Loop_v8: vld1.8 {q4,q5},[r1]! vld1.8 {q6,q7},[r1]! vrev32.8 q4,q4 vrev32.8 q5,q5 vadd.i32 q12,q8,q4 vrev32.8 q6,q6 vmov q14,q0 @ offload subs r2,r2,#1 vadd.i32 q13,q8,q5 vrev32.8 q7,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 0 INST(0x68,0x0c,0x02,0xe2) @ sha1c q0,q1,q12 vadd.i32 q12,q8,q6 INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 1 INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13 vadd.i32 q13,q8,q7 INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 2 INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12 vadd.i32 q12,q8,q4 INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 3 INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13 vadd.i32 q13,q9,q5 INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 4 INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12 vadd.i32 q12,q9,q6 INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 5 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q13,q9,q7 INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 6 INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 vadd.i32 q12,q9,q4 INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 7 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q13,q9,q5 INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 8 INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 vadd.i32 q12,q10,q6 INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 9 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q13,q10,q7 INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 10 INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 vadd.i32 q12,q10,q4 INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 11 INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13 vadd.i32 q13,q10,q5 INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 12 INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 vadd.i32 q12,q10,q6 INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 13 INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13 vadd.i32 q13,q11,q7 INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 14 INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 vadd.i32 q12,q11,q4 INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 15 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q13,q11,q5 INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 16 INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 vadd.i32 q12,q11,q6 INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 17 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q13,q11,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 18 INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 19 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q1,q1,q2 vadd.i32 q0,q0,q14 bne Loop_v8 vst1.32 {q0},[r0]! vst1.32 {d2[0]},[r0] vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} bx lr @ bx lr #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
28,701
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/ios-arm/crypto/chacha/chacha-armv4.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__) #include <openssl/arm_arch.h> @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. .text #if defined(__thumb2__) || defined(__clang__) .syntax unified #endif #if defined(__thumb2__) .thumb #else .code 32 #endif #if defined(__thumb2__) || defined(__clang__) #define ldrhsb ldrbhs #endif .align 5 Lsigma: .long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral Lone: .long 1,0,0,0 .globl _ChaCha20_ctr32_nohw .private_extern _ChaCha20_ctr32_nohw #ifdef __thumb2__ .thumb_func _ChaCha20_ctr32_nohw #endif .align 5 _ChaCha20_ctr32_nohw: ldr r12,[sp,#0] @ pull pointer to counter and nonce stmdb sp!,{r0,r1,r2,r4-r11,lr} adr r14,Lsigma ldmia r12,{r4,r5,r6,r7} @ load counter and nonce sub sp,sp,#4*(16) @ off-load area stmdb sp!,{r4,r5,r6,r7} @ copy counter and nonce ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key ldmia r14,{r0,r1,r2,r3} @ load sigma stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy key stmdb sp!,{r0,r1,r2,r3} @ copy sigma str r10,[sp,#4*(16+10)] @ off-load "rx" str r11,[sp,#4*(16+11)] @ off-load "rx" b Loop_outer_enter .align 4 Loop_outer: ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material str r11,[sp,#4*(32+2)] @ save len str r12, [sp,#4*(32+1)] @ save inp str r14, [sp,#4*(32+0)] @ save out Loop_outer_enter: ldr r11, [sp,#4*(15)] ldr r12,[sp,#4*(12)] @ modulo-scheduled load ldr r10, [sp,#4*(13)] ldr r14,[sp,#4*(14)] str r11, [sp,#4*(16+15)] mov r11,#10 b Loop .align 4 Loop: subs r11,r11,#1 add r0,r0,r4 mov r12,r12,ror#16 add r1,r1,r5 mov r10,r10,ror#16 eor r12,r12,r0,ror#16 eor r10,r10,r1,ror#16 add r8,r8,r12 mov r4,r4,ror#20 add r9,r9,r10 mov r5,r5,ror#20 eor r4,r4,r8,ror#20 eor r5,r5,r9,ror#20 add r0,r0,r4 mov r12,r12,ror#24 add r1,r1,r5 mov r10,r10,ror#24 eor r12,r12,r0,ror#24 eor r10,r10,r1,ror#24 add r8,r8,r12 mov r4,r4,ror#25 add r9,r9,r10 mov r5,r5,ror#25 str r10,[sp,#4*(16+13)] ldr r10,[sp,#4*(16+15)] eor r4,r4,r8,ror#25 eor r5,r5,r9,ror#25 str r8,[sp,#4*(16+8)] ldr r8,[sp,#4*(16+10)] add r2,r2,r6 mov r14,r14,ror#16 str r9,[sp,#4*(16+9)] ldr r9,[sp,#4*(16+11)] add r3,r3,r7 mov r10,r10,ror#16 eor r14,r14,r2,ror#16 eor r10,r10,r3,ror#16 add r8,r8,r14 mov r6,r6,ror#20 add r9,r9,r10 mov r7,r7,ror#20 eor r6,r6,r8,ror#20 eor r7,r7,r9,ror#20 add r2,r2,r6 mov r14,r14,ror#24 add r3,r3,r7 mov r10,r10,ror#24 eor r14,r14,r2,ror#24 eor r10,r10,r3,ror#24 add r8,r8,r14 mov r6,r6,ror#25 add r9,r9,r10 mov r7,r7,ror#25 eor r6,r6,r8,ror#25 eor r7,r7,r9,ror#25 add r0,r0,r5 mov r10,r10,ror#16 add r1,r1,r6 mov r12,r12,ror#16 eor r10,r10,r0,ror#16 eor r12,r12,r1,ror#16 add r8,r8,r10 mov r5,r5,ror#20 add r9,r9,r12 mov r6,r6,ror#20 eor r5,r5,r8,ror#20 eor r6,r6,r9,ror#20 add r0,r0,r5 mov r10,r10,ror#24 add r1,r1,r6 mov r12,r12,ror#24 eor r10,r10,r0,ror#24 eor r12,r12,r1,ror#24 add r8,r8,r10 mov r5,r5,ror#25 str r10,[sp,#4*(16+15)] ldr r10,[sp,#4*(16+13)] add r9,r9,r12 mov r6,r6,ror#25 eor r5,r5,r8,ror#25 eor r6,r6,r9,ror#25 str r8,[sp,#4*(16+10)] ldr r8,[sp,#4*(16+8)] add r2,r2,r7 mov r10,r10,ror#16 str r9,[sp,#4*(16+11)] ldr r9,[sp,#4*(16+9)] add r3,r3,r4 mov r14,r14,ror#16 eor r10,r10,r2,ror#16 eor r14,r14,r3,ror#16 add r8,r8,r10 mov r7,r7,ror#20 add r9,r9,r14 mov r4,r4,ror#20 eor r7,r7,r8,ror#20 eor r4,r4,r9,ror#20 add r2,r2,r7 mov r10,r10,ror#24 add r3,r3,r4 mov r14,r14,ror#24 eor r10,r10,r2,ror#24 eor r14,r14,r3,ror#24 add r8,r8,r10 mov r7,r7,ror#25 add r9,r9,r14 mov r4,r4,ror#25 eor r7,r7,r8,ror#25 eor r4,r4,r9,ror#25 bne Loop ldr r11,[sp,#4*(32+2)] @ load len str r8, [sp,#4*(16+8)] @ modulo-scheduled store str r9, [sp,#4*(16+9)] str r12,[sp,#4*(16+12)] str r10, [sp,#4*(16+13)] str r14,[sp,#4*(16+14)] @ at this point we have first half of 512-bit result in @ rx and second half at sp+4*(16+8) cmp r11,#64 @ done yet? #ifdef __thumb2__ itete lo #endif addlo r12,sp,#4*(0) @ shortcut or ... ldrhs r12,[sp,#4*(32+1)] @ ... load inp addlo r14,sp,#4*(0) @ shortcut or ... ldrhs r14,[sp,#4*(32+0)] @ ... load out ldr r8,[sp,#4*(0)] @ load key material ldr r9,[sp,#4*(1)] #if __ARM_ARCH>=6 || !defined(__ARMEB__) # if __ARM_ARCH<7 orr r10,r12,r14 tst r10,#3 @ are input and output aligned? ldr r10,[sp,#4*(2)] bne Lunaligned cmp r11,#64 @ restore flags # else ldr r10,[sp,#4*(2)] # endif ldr r11,[sp,#4*(3)] add r0,r0,r8 @ accumulate key material add r1,r1,r9 # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] add r2,r2,r10 add r3,r3,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif # ifdef __thumb2__ itt hs # endif eorhs r0,r0,r8 @ xor with input eorhs r1,r1,r9 add r8,sp,#4*(4) str r0,[r14],#16 @ store output # ifdef __thumb2__ itt hs # endif eorhs r2,r2,r10 eorhs r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r1,[r14,#-12] str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material add r5,r5,r9 # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] add r6,r6,r10 add r7,r7,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif # ifdef __thumb2__ itt hs # endif eorhs r4,r4,r8 eorhs r5,r5,r9 add r8,sp,#4*(8) str r4,[r14],#16 @ store output # ifdef __thumb2__ itt hs # endif eorhs r6,r6,r10 eorhs r7,r7,r11 str r5,[r14,#-12] ldmia r8,{r8,r9,r10,r11} @ load key material str r6,[r14,#-8] add r0,sp,#4*(16+8) str r7,[r14,#-4] ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half add r0,r0,r8 @ accumulate key material add r1,r1,r9 # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] # ifdef __thumb2__ itt hi # endif strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it add r2,r2,r10 add r3,r3,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif # ifdef __thumb2__ itt hs # endif eorhs r0,r0,r8 eorhs r1,r1,r9 add r8,sp,#4*(12) str r0,[r14],#16 @ store output # ifdef __thumb2__ itt hs # endif eorhs r2,r2,r10 eorhs r3,r3,r11 str r1,[r14,#-12] ldmia r8,{r8,r9,r10,r11} @ load key material str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material add r5,r5,r9 # ifdef __thumb2__ itt hi # endif addhi r8,r8,#1 @ next counter value strhi r8,[sp,#4*(12)] @ save next counter value # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] add r6,r6,r10 add r7,r7,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif # ifdef __thumb2__ itt hs # endif eorhs r4,r4,r8 eorhs r5,r5,r9 # ifdef __thumb2__ it ne # endif ldrne r8,[sp,#4*(32+2)] @ re-load len # ifdef __thumb2__ itt hs # endif eorhs r6,r6,r10 eorhs r7,r7,r11 str r4,[r14],#16 @ store output str r5,[r14,#-12] # ifdef __thumb2__ it hs # endif subhs r11,r8,#64 @ len-=64 str r6,[r14,#-8] str r7,[r14,#-4] bhi Loop_outer beq Ldone # if __ARM_ARCH<7 b Ltail .align 4 Lunaligned:@ unaligned endian-neutral path cmp r11,#64 @ restore flags # endif #endif #if __ARM_ARCH<7 ldr r11,[sp,#4*(3)] add r0,r0,r8 @ accumulate key material add r1,r1,r9 add r2,r2,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r3,r3,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r0,r8,r0 @ xor with input (or zero) eor r1,r9,r1 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r2,r10,r2 strb r0,[r14],#16 @ store output eor r3,r11,r3 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r1,[r14,#-12] eor r0,r8,r0,lsr#8 strb r2,[r14,#-8] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r3,[r14,#-4] eor r2,r10,r2,lsr#8 strb r0,[r14,#-15] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r1,[r14,#-11] eor r0,r8,r0,lsr#8 strb r2,[r14,#-7] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r3,[r14,#-3] eor r2,r10,r2,lsr#8 strb r0,[r14,#-14] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r1,[r14,#-10] strb r2,[r14,#-6] eor r0,r8,r0,lsr#8 strb r3,[r14,#-2] eor r1,r9,r1,lsr#8 strb r0,[r14,#-13] eor r2,r10,r2,lsr#8 strb r1,[r14,#-9] eor r3,r11,r3,lsr#8 strb r2,[r14,#-5] strb r3,[r14,#-1] add r8,sp,#4*(4+0) ldmia r8,{r8,r9,r10,r11} @ load key material add r0,sp,#4*(16+8) add r4,r4,r8 @ accumulate key material add r5,r5,r9 add r6,r6,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r7,r7,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r4,r8,r4 @ xor with input (or zero) eor r5,r9,r5 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r6,r10,r6 strb r4,[r14],#16 @ store output eor r7,r11,r7 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r5,[r14,#-12] eor r4,r8,r4,lsr#8 strb r6,[r14,#-8] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r7,[r14,#-4] eor r6,r10,r6,lsr#8 strb r4,[r14,#-15] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r5,[r14,#-11] eor r4,r8,r4,lsr#8 strb r6,[r14,#-7] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r7,[r14,#-3] eor r6,r10,r6,lsr#8 strb r4,[r14,#-14] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r5,[r14,#-10] strb r6,[r14,#-6] eor r4,r8,r4,lsr#8 strb r7,[r14,#-2] eor r5,r9,r5,lsr#8 strb r4,[r14,#-13] eor r6,r10,r6,lsr#8 strb r5,[r14,#-9] eor r7,r11,r7,lsr#8 strb r6,[r14,#-5] strb r7,[r14,#-1] add r8,sp,#4*(4+4) ldmia r8,{r8,r9,r10,r11} @ load key material ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half # ifdef __thumb2__ itt hi # endif strhi r10,[sp,#4*(16+10)] @ copy "rx" strhi r11,[sp,#4*(16+11)] @ copy "rx" add r0,r0,r8 @ accumulate key material add r1,r1,r9 add r2,r2,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r3,r3,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r0,r8,r0 @ xor with input (or zero) eor r1,r9,r1 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r2,r10,r2 strb r0,[r14],#16 @ store output eor r3,r11,r3 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r1,[r14,#-12] eor r0,r8,r0,lsr#8 strb r2,[r14,#-8] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r3,[r14,#-4] eor r2,r10,r2,lsr#8 strb r0,[r14,#-15] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r1,[r14,#-11] eor r0,r8,r0,lsr#8 strb r2,[r14,#-7] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r3,[r14,#-3] eor r2,r10,r2,lsr#8 strb r0,[r14,#-14] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r1,[r14,#-10] strb r2,[r14,#-6] eor r0,r8,r0,lsr#8 strb r3,[r14,#-2] eor r1,r9,r1,lsr#8 strb r0,[r14,#-13] eor r2,r10,r2,lsr#8 strb r1,[r14,#-9] eor r3,r11,r3,lsr#8 strb r2,[r14,#-5] strb r3,[r14,#-1] add r8,sp,#4*(4+8) ldmia r8,{r8,r9,r10,r11} @ load key material add r4,r4,r8 @ accumulate key material # ifdef __thumb2__ itt hi # endif addhi r8,r8,#1 @ next counter value strhi r8,[sp,#4*(12)] @ save next counter value add r5,r5,r9 add r6,r6,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r7,r7,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r4,r8,r4 @ xor with input (or zero) eor r5,r9,r5 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r6,r10,r6 strb r4,[r14],#16 @ store output eor r7,r11,r7 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r5,[r14,#-12] eor r4,r8,r4,lsr#8 strb r6,[r14,#-8] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r7,[r14,#-4] eor r6,r10,r6,lsr#8 strb r4,[r14,#-15] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r5,[r14,#-11] eor r4,r8,r4,lsr#8 strb r6,[r14,#-7] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r7,[r14,#-3] eor r6,r10,r6,lsr#8 strb r4,[r14,#-14] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r5,[r14,#-10] strb r6,[r14,#-6] eor r4,r8,r4,lsr#8 strb r7,[r14,#-2] eor r5,r9,r5,lsr#8 strb r4,[r14,#-13] eor r6,r10,r6,lsr#8 strb r5,[r14,#-9] eor r7,r11,r7,lsr#8 strb r6,[r14,#-5] strb r7,[r14,#-1] # ifdef __thumb2__ it ne # endif ldrne r8,[sp,#4*(32+2)] @ re-load len # ifdef __thumb2__ it hs # endif subhs r11,r8,#64 @ len-=64 bhi Loop_outer beq Ldone #endif Ltail: ldr r12,[sp,#4*(32+1)] @ load inp add r9,sp,#4*(0) ldr r14,[sp,#4*(32+0)] @ load out Loop_tail: ldrb r10,[r9],#1 @ read buffer on stack ldrb r11,[r12],#1 @ read input subs r8,r8,#1 eor r11,r11,r10 strb r11,[r14],#1 @ store output bne Loop_tail Ldone: add sp,sp,#4*(32+3) ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} #if __ARM_MAX_ARCH__>=7 .globl _ChaCha20_ctr32_neon .private_extern _ChaCha20_ctr32_neon #ifdef __thumb2__ .thumb_func _ChaCha20_ctr32_neon #endif .align 5 _ChaCha20_ctr32_neon: ldr r12,[sp,#0] @ pull pointer to counter and nonce stmdb sp!,{r0,r1,r2,r4-r11,lr} adr r14,Lsigma vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI spec says so stmdb sp!,{r0,r1,r2,r3} vld1.32 {q1,q2},[r3] @ load key ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key sub sp,sp,#4*(16+16) vld1.32 {q3},[r12] @ load counter and nonce add r12,sp,#4*8 ldmia r14,{r0,r1,r2,r3} @ load sigma vld1.32 {q0},[r14]! @ load sigma vld1.32 {q12},[r14] @ one vst1.32 {q2,q3},[r12] @ copy 1/2key|counter|nonce vst1.32 {q0,q1},[sp] @ copy sigma|1/2key str r10,[sp,#4*(16+10)] @ off-load "rx" str r11,[sp,#4*(16+11)] @ off-load "rx" vshl.i32 d26,d24,#1 @ two vstr d24,[sp,#4*(16+0)] vshl.i32 d28,d24,#2 @ four vstr d26,[sp,#4*(16+2)] vmov q4,q0 vstr d28,[sp,#4*(16+4)] vmov q8,q0 vmov q5,q1 vmov q9,q1 b Loop_neon_enter .align 4 Loop_neon_outer: ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material cmp r11,#64*2 @ if len<=64*2 bls Lbreak_neon @ switch to integer-only vmov q4,q0 str r11,[sp,#4*(32+2)] @ save len vmov q8,q0 str r12, [sp,#4*(32+1)] @ save inp vmov q5,q1 str r14, [sp,#4*(32+0)] @ save out vmov q9,q1 Loop_neon_enter: ldr r11, [sp,#4*(15)] vadd.i32 q7,q3,q12 @ counter+1 ldr r12,[sp,#4*(12)] @ modulo-scheduled load vmov q6,q2 ldr r10, [sp,#4*(13)] vmov q10,q2 ldr r14,[sp,#4*(14)] vadd.i32 q11,q7,q12 @ counter+2 str r11, [sp,#4*(16+15)] mov r11,#10 add r12,r12,#3 @ counter+3 b Loop_neon .align 4 Loop_neon: subs r11,r11,#1 vadd.i32 q0,q0,q1 add r0,r0,r4 vadd.i32 q4,q4,q5 mov r12,r12,ror#16 vadd.i32 q8,q8,q9 add r1,r1,r5 veor q3,q3,q0 mov r10,r10,ror#16 veor q7,q7,q4 eor r12,r12,r0,ror#16 veor q11,q11,q8 eor r10,r10,r1,ror#16 vrev32.16 q3,q3 add r8,r8,r12 vrev32.16 q7,q7 mov r4,r4,ror#20 vrev32.16 q11,q11 add r9,r9,r10 vadd.i32 q2,q2,q3 mov r5,r5,ror#20 vadd.i32 q6,q6,q7 eor r4,r4,r8,ror#20 vadd.i32 q10,q10,q11 eor r5,r5,r9,ror#20 veor q12,q1,q2 add r0,r0,r4 veor q13,q5,q6 mov r12,r12,ror#24 veor q14,q9,q10 add r1,r1,r5 vshr.u32 q1,q12,#20 mov r10,r10,ror#24 vshr.u32 q5,q13,#20 eor r12,r12,r0,ror#24 vshr.u32 q9,q14,#20 eor r10,r10,r1,ror#24 vsli.32 q1,q12,#12 add r8,r8,r12 vsli.32 q5,q13,#12 mov r4,r4,ror#25 vsli.32 q9,q14,#12 add r9,r9,r10 vadd.i32 q0,q0,q1 mov r5,r5,ror#25 vadd.i32 q4,q4,q5 str r10,[sp,#4*(16+13)] vadd.i32 q8,q8,q9 ldr r10,[sp,#4*(16+15)] veor q12,q3,q0 eor r4,r4,r8,ror#25 veor q13,q7,q4 eor r5,r5,r9,ror#25 veor q14,q11,q8 str r8,[sp,#4*(16+8)] vshr.u32 q3,q12,#24 ldr r8,[sp,#4*(16+10)] vshr.u32 q7,q13,#24 add r2,r2,r6 vshr.u32 q11,q14,#24 mov r14,r14,ror#16 vsli.32 q3,q12,#8 str r9,[sp,#4*(16+9)] vsli.32 q7,q13,#8 ldr r9,[sp,#4*(16+11)] vsli.32 q11,q14,#8 add r3,r3,r7 vadd.i32 q2,q2,q3 mov r10,r10,ror#16 vadd.i32 q6,q6,q7 eor r14,r14,r2,ror#16 vadd.i32 q10,q10,q11 eor r10,r10,r3,ror#16 veor q12,q1,q2 add r8,r8,r14 veor q13,q5,q6 mov r6,r6,ror#20 veor q14,q9,q10 add r9,r9,r10 vshr.u32 q1,q12,#25 mov r7,r7,ror#20 vshr.u32 q5,q13,#25 eor r6,r6,r8,ror#20 vshr.u32 q9,q14,#25 eor r7,r7,r9,ror#20 vsli.32 q1,q12,#7 add r2,r2,r6 vsli.32 q5,q13,#7 mov r14,r14,ror#24 vsli.32 q9,q14,#7 add r3,r3,r7 vext.8 q2,q2,q2,#8 mov r10,r10,ror#24 vext.8 q6,q6,q6,#8 eor r14,r14,r2,ror#24 vext.8 q10,q10,q10,#8 eor r10,r10,r3,ror#24 vext.8 q1,q1,q1,#4 add r8,r8,r14 vext.8 q5,q5,q5,#4 mov r6,r6,ror#25 vext.8 q9,q9,q9,#4 add r9,r9,r10 vext.8 q3,q3,q3,#12 mov r7,r7,ror#25 vext.8 q7,q7,q7,#12 eor r6,r6,r8,ror#25 vext.8 q11,q11,q11,#12 eor r7,r7,r9,ror#25 vadd.i32 q0,q0,q1 add r0,r0,r5 vadd.i32 q4,q4,q5 mov r10,r10,ror#16 vadd.i32 q8,q8,q9 add r1,r1,r6 veor q3,q3,q0 mov r12,r12,ror#16 veor q7,q7,q4 eor r10,r10,r0,ror#16 veor q11,q11,q8 eor r12,r12,r1,ror#16 vrev32.16 q3,q3 add r8,r8,r10 vrev32.16 q7,q7 mov r5,r5,ror#20 vrev32.16 q11,q11 add r9,r9,r12 vadd.i32 q2,q2,q3 mov r6,r6,ror#20 vadd.i32 q6,q6,q7 eor r5,r5,r8,ror#20 vadd.i32 q10,q10,q11 eor r6,r6,r9,ror#20 veor q12,q1,q2 add r0,r0,r5 veor q13,q5,q6 mov r10,r10,ror#24 veor q14,q9,q10 add r1,r1,r6 vshr.u32 q1,q12,#20 mov r12,r12,ror#24 vshr.u32 q5,q13,#20 eor r10,r10,r0,ror#24 vshr.u32 q9,q14,#20 eor r12,r12,r1,ror#24 vsli.32 q1,q12,#12 add r8,r8,r10 vsli.32 q5,q13,#12 mov r5,r5,ror#25 vsli.32 q9,q14,#12 str r10,[sp,#4*(16+15)] vadd.i32 q0,q0,q1 ldr r10,[sp,#4*(16+13)] vadd.i32 q4,q4,q5 add r9,r9,r12 vadd.i32 q8,q8,q9 mov r6,r6,ror#25 veor q12,q3,q0 eor r5,r5,r8,ror#25 veor q13,q7,q4 eor r6,r6,r9,ror#25 veor q14,q11,q8 str r8,[sp,#4*(16+10)] vshr.u32 q3,q12,#24 ldr r8,[sp,#4*(16+8)] vshr.u32 q7,q13,#24 add r2,r2,r7 vshr.u32 q11,q14,#24 mov r10,r10,ror#16 vsli.32 q3,q12,#8 str r9,[sp,#4*(16+11)] vsli.32 q7,q13,#8 ldr r9,[sp,#4*(16+9)] vsli.32 q11,q14,#8 add r3,r3,r4 vadd.i32 q2,q2,q3 mov r14,r14,ror#16 vadd.i32 q6,q6,q7 eor r10,r10,r2,ror#16 vadd.i32 q10,q10,q11 eor r14,r14,r3,ror#16 veor q12,q1,q2 add r8,r8,r10 veor q13,q5,q6 mov r7,r7,ror#20 veor q14,q9,q10 add r9,r9,r14 vshr.u32 q1,q12,#25 mov r4,r4,ror#20 vshr.u32 q5,q13,#25 eor r7,r7,r8,ror#20 vshr.u32 q9,q14,#25 eor r4,r4,r9,ror#20 vsli.32 q1,q12,#7 add r2,r2,r7 vsli.32 q5,q13,#7 mov r10,r10,ror#24 vsli.32 q9,q14,#7 add r3,r3,r4 vext.8 q2,q2,q2,#8 mov r14,r14,ror#24 vext.8 q6,q6,q6,#8 eor r10,r10,r2,ror#24 vext.8 q10,q10,q10,#8 eor r14,r14,r3,ror#24 vext.8 q1,q1,q1,#12 add r8,r8,r10 vext.8 q5,q5,q5,#12 mov r7,r7,ror#25 vext.8 q9,q9,q9,#12 add r9,r9,r14 vext.8 q3,q3,q3,#4 mov r4,r4,ror#25 vext.8 q7,q7,q7,#4 eor r7,r7,r8,ror#25 vext.8 q11,q11,q11,#4 eor r4,r4,r9,ror#25 bne Loop_neon add r11,sp,#32 vld1.32 {q12,q13},[sp] @ load key material vld1.32 {q14,q15},[r11] ldr r11,[sp,#4*(32+2)] @ load len str r8, [sp,#4*(16+8)] @ modulo-scheduled store str r9, [sp,#4*(16+9)] str r12,[sp,#4*(16+12)] str r10, [sp,#4*(16+13)] str r14,[sp,#4*(16+14)] @ at this point we have first half of 512-bit result in @ rx and second half at sp+4*(16+8) ldr r12,[sp,#4*(32+1)] @ load inp ldr r14,[sp,#4*(32+0)] @ load out vadd.i32 q0,q0,q12 @ accumulate key material vadd.i32 q4,q4,q12 vadd.i32 q8,q8,q12 vldr d24,[sp,#4*(16+0)] @ one vadd.i32 q1,q1,q13 vadd.i32 q5,q5,q13 vadd.i32 q9,q9,q13 vldr d26,[sp,#4*(16+2)] @ two vadd.i32 q2,q2,q14 vadd.i32 q6,q6,q14 vadd.i32 q10,q10,q14 vadd.i32 d14,d14,d24 @ counter+1 vadd.i32 d22,d22,d26 @ counter+2 vadd.i32 q3,q3,q15 vadd.i32 q7,q7,q15 vadd.i32 q11,q11,q15 cmp r11,#64*4 blo Ltail_neon vld1.8 {q12,q13},[r12]! @ load input mov r11,sp vld1.8 {q14,q15},[r12]! veor q0,q0,q12 @ xor with input veor q1,q1,q13 vld1.8 {q12,q13},[r12]! veor q2,q2,q14 veor q3,q3,q15 vld1.8 {q14,q15},[r12]! veor q4,q4,q12 vst1.8 {q0,q1},[r14]! @ store output veor q5,q5,q13 vld1.8 {q12,q13},[r12]! veor q6,q6,q14 vst1.8 {q2,q3},[r14]! veor q7,q7,q15 vld1.8 {q14,q15},[r12]! veor q8,q8,q12 vld1.32 {q0,q1},[r11]! @ load for next iteration veor d25,d25,d25 vldr d24,[sp,#4*(16+4)] @ four veor q9,q9,q13 vld1.32 {q2,q3},[r11] veor q10,q10,q14 vst1.8 {q4,q5},[r14]! veor q11,q11,q15 vst1.8 {q6,q7},[r14]! vadd.i32 d6,d6,d24 @ next counter value vldr d24,[sp,#4*(16+0)] @ one ldmia sp,{r8,r9,r10,r11} @ load key material add r0,r0,r8 @ accumulate key material ldr r8,[r12],#16 @ load input vst1.8 {q8,q9},[r14]! add r1,r1,r9 ldr r9,[r12,#-12] vst1.8 {q10,q11},[r14]! add r2,r2,r10 ldr r10,[r12,#-8] add r3,r3,r11 ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif eor r0,r0,r8 @ xor with input add r8,sp,#4*(4) eor r1,r1,r9 str r0,[r14],#16 @ store output eor r2,r2,r10 str r1,[r14,#-12] eor r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material ldr r8,[r12],#16 @ load input add r5,r5,r9 ldr r9,[r12,#-12] add r6,r6,r10 ldr r10,[r12,#-8] add r7,r7,r11 ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif eor r4,r4,r8 add r8,sp,#4*(8) eor r5,r5,r9 str r4,[r14],#16 @ store output eor r6,r6,r10 str r5,[r14,#-12] eor r7,r7,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r6,[r14,#-8] add r0,sp,#4*(16+8) str r7,[r14,#-4] ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half add r0,r0,r8 @ accumulate key material ldr r8,[r12],#16 @ load input add r1,r1,r9 ldr r9,[r12,#-12] # ifdef __thumb2__ it hi # endif strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it add r2,r2,r10 ldr r10,[r12,#-8] # ifdef __thumb2__ it hi # endif strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it add r3,r3,r11 ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif eor r0,r0,r8 add r8,sp,#4*(12) eor r1,r1,r9 str r0,[r14],#16 @ store output eor r2,r2,r10 str r1,[r14,#-12] eor r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material add r8,r8,#4 @ next counter value add r5,r5,r9 str r8,[sp,#4*(12)] @ save next counter value ldr r8,[r12],#16 @ load input add r6,r6,r10 add r4,r4,#3 @ counter+3 ldr r9,[r12,#-12] add r7,r7,r11 ldr r10,[r12,#-8] ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif eor r4,r4,r8 # ifdef __thumb2__ it hi # endif ldrhi r8,[sp,#4*(32+2)] @ re-load len eor r5,r5,r9 eor r6,r6,r10 str r4,[r14],#16 @ store output eor r7,r7,r11 str r5,[r14,#-12] sub r11,r8,#64*4 @ len-=64*4 str r6,[r14,#-8] str r7,[r14,#-4] bhi Loop_neon_outer b Ldone_neon .align 4 Lbreak_neon: @ harmonize NEON and integer-only stack frames: load data @ from NEON frame, but save to integer-only one; distance @ between the two is 4*(32+4+16-32)=4*(20). str r11, [sp,#4*(20+32+2)] @ save len add r11,sp,#4*(32+4) str r12, [sp,#4*(20+32+1)] @ save inp str r14, [sp,#4*(20+32+0)] @ save out ldr r12,[sp,#4*(16+10)] ldr r14,[sp,#4*(16+11)] vldmia r11,{d8,d9,d10,d11,d12,d13,d14,d15} @ fulfill ABI requirement str r12,[sp,#4*(20+16+10)] @ copy "rx" str r14,[sp,#4*(20+16+11)] @ copy "rx" ldr r11, [sp,#4*(15)] ldr r12,[sp,#4*(12)] @ modulo-scheduled load ldr r10, [sp,#4*(13)] ldr r14,[sp,#4*(14)] str r11, [sp,#4*(20+16+15)] add r11,sp,#4*(20) vst1.32 {q0,q1},[r11]! @ copy key add sp,sp,#4*(20) @ switch frame vst1.32 {q2,q3},[r11] mov r11,#10 b Loop @ go integer-only .align 4 Ltail_neon: cmp r11,#64*3 bhs L192_or_more_neon cmp r11,#64*2 bhs L128_or_more_neon cmp r11,#64*1 bhs L64_or_more_neon add r8,sp,#4*(8) vst1.8 {q0,q1},[sp] add r10,sp,#4*(0) vst1.8 {q2,q3},[r8] b Loop_tail_neon .align 4 L64_or_more_neon: vld1.8 {q12,q13},[r12]! vld1.8 {q14,q15},[r12]! veor q0,q0,q12 veor q1,q1,q13 veor q2,q2,q14 veor q3,q3,q15 vst1.8 {q0,q1},[r14]! vst1.8 {q2,q3},[r14]! beq Ldone_neon add r8,sp,#4*(8) vst1.8 {q4,q5},[sp] add r10,sp,#4*(0) vst1.8 {q6,q7},[r8] sub r11,r11,#64*1 @ len-=64*1 b Loop_tail_neon .align 4 L128_or_more_neon: vld1.8 {q12,q13},[r12]! vld1.8 {q14,q15},[r12]! veor q0,q0,q12 veor q1,q1,q13 vld1.8 {q12,q13},[r12]! veor q2,q2,q14 veor q3,q3,q15 vld1.8 {q14,q15},[r12]! veor q4,q4,q12 veor q5,q5,q13 vst1.8 {q0,q1},[r14]! veor q6,q6,q14 vst1.8 {q2,q3},[r14]! veor q7,q7,q15 vst1.8 {q4,q5},[r14]! vst1.8 {q6,q7},[r14]! beq Ldone_neon add r8,sp,#4*(8) vst1.8 {q8,q9},[sp] add r10,sp,#4*(0) vst1.8 {q10,q11},[r8] sub r11,r11,#64*2 @ len-=64*2 b Loop_tail_neon .align 4 L192_or_more_neon: vld1.8 {q12,q13},[r12]! vld1.8 {q14,q15},[r12]! veor q0,q0,q12 veor q1,q1,q13 vld1.8 {q12,q13},[r12]! veor q2,q2,q14 veor q3,q3,q15 vld1.8 {q14,q15},[r12]! veor q4,q4,q12 veor q5,q5,q13 vld1.8 {q12,q13},[r12]! veor q6,q6,q14 vst1.8 {q0,q1},[r14]! veor q7,q7,q15 vld1.8 {q14,q15},[r12]! veor q8,q8,q12 vst1.8 {q2,q3},[r14]! veor q9,q9,q13 vst1.8 {q4,q5},[r14]! veor q10,q10,q14 vst1.8 {q6,q7},[r14]! veor q11,q11,q15 vst1.8 {q8,q9},[r14]! vst1.8 {q10,q11},[r14]! beq Ldone_neon ldmia sp,{r8,r9,r10,r11} @ load key material add r0,r0,r8 @ accumulate key material add r8,sp,#4*(4) add r1,r1,r9 add r2,r2,r10 add r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material add r4,r4,r8 @ accumulate key material add r8,sp,#4*(8) add r5,r5,r9 add r6,r6,r10 add r7,r7,r11 ldmia r8,{r8,r9,r10,r11} @ load key material # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif stmia sp,{r0,r1,r2,r3,r4,r5,r6,r7} add r0,sp,#4*(16+8) ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half add r0,r0,r8 @ accumulate key material add r8,sp,#4*(12) add r1,r1,r9 add r2,r2,r10 add r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material add r4,r4,r8 @ accumulate key material add r8,sp,#4*(8) add r5,r5,r9 add r4,r4,#3 @ counter+3 add r6,r6,r10 add r7,r7,r11 ldr r11,[sp,#4*(32+2)] @ re-load len # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif stmia r8,{r0,r1,r2,r3,r4,r5,r6,r7} add r10,sp,#4*(0) sub r11,r11,#64*3 @ len-=64*3 Loop_tail_neon: ldrb r8,[r10],#1 @ read buffer on stack ldrb r9,[r12],#1 @ read input subs r11,r11,#1 eor r8,r8,r9 strb r8,[r14],#1 @ store output bne Loop_tail_neon Ldone_neon: add sp,sp,#4*(32+4) vldmia sp,{d8,d9,d10,d11,d12,d13,d14,d15} add sp,sp,#4*(16+3) ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
mathsmm/bcc
2,489
arquitetura-de-computadores/tarefa-3-ex-3.s
.code16 .text .globl _start _start: # PREPARAÇÃO DO SEGMENTO DE DADOS movw $0x1000, %ax movw %ax, %ds # DS (Data Segment) # O endereço "verdadeiro" que DS apontará depois da execução # do código acima é 0x10000 (0x1000 * 0x10) movw $0x0000, %si movb $0xcd, %ds:(%si) # Leva 0xcd para o endereço # DS * 0X10 + SI, que é # 0x1000 * 0x10 + 0x0000 movb $0xba, %ds:1(%si) # '═' --> (DS * 0X10 + SI + 1) movb $0xc9, %ds:2(%si) # '║' --> (DS * 0X10 + SI + 2) movb $0xbb, %ds:3(%si) # '╔' --> (DS * 0X10 + SI + 3) movb $0xc8, %ds:4(%si) # '╗' --> (DS * 0X10 + SI + 4) movb $0xbc, %ds:5(%si) # '╚' --> (DS * 0X10 + SI + 5) movb $0x20, %ds:6(%si) # '╝' --> (DS * 0X10 + SI + 6) main: # BORDA SUPERIOR E CANTOS SUPERIORES DA CAIXA movb %ds:2(%si), %al # Move o caractere '╔' para AL call print # Empurra pra stack o endereço da instrução # atual e depois dá um jump pra print movb %ds:(%si), %al # Move o caractere '=' para AL movw $78, %cx # Move 78 para CX (Para iterar 78 vezes) call loop1 movb %ds:3(%si), %al # '╗' call print # BORDAS LATERAIS DA CAIXA movw $22, %cx # Move 22 para CX (para loop2 iterar 22 vezes) call loop2 # loop2 # BORDA INFERIOR E CANTOS INFERIORES DA CAIXA movb %ds:4(%si), %al # '╚' call print movb %ds:(%si), %al # '=' movw $78, %cx call loop1 movb %ds:5(%si), %al # '╝' call print jmp final loop1: call print loop loop1 # Decrementa cx e pula para o rótulo # "loop1" se cx não for zero ret loop2: # BORDA LATERAL ESQUERDA movb %ds:1(%si), %al # '║' call print # PREENCHER COM ESPAÇOS movb %ds:6(%si), %al # ' ' (espaço) movw %cx, %dx # Salva CX em DX movw $78, %cx # Move 78 para CX (Para iterar 78 vezes) call loop1 movw %dx, %cx # Retorna o conteúdo de CX salvo em DX # BORDA LATERAL DIREITA movb %ds:1(%si), %al # '║' call print loop loop2 ret print: movb $0x0e, %ah # Move o código da interrupção para AH int $0x10 # Interrupção da bios ret # Tira da memória o endereço empurrado # pela instrução call e dá um jump # neste endereço final: # Vai para o 510º byte a partir da posição 0 . = _start + 510 # Assinatura MBR boot .byte 0x55 .byte 0xaa
mathsmm/bcc
1,009
arquitetura-de-computadores/tarefa-3-ex-1pt1.s
.code16 .text .globl _start _start: movb $10, %bx # Move 10 (decimal) para BH movb $15, %cx # Move 15 (decimal) para BL add %bh, %bl # Soma BH com BL, armazenando o resultado no BL movb %bl, %al # Move o resultado da soma para o AL movb $0x0e, %ah # Move o código da interrupção para o AH int $0x10 # Interrupção da bios # Printa na tela o caractere "END OF MEDIUM" # este caractere 0x19 (25 em decimal) da tabela ASCII # Foi possível visualizar o código em execução # usando os seguintes comandos no debugger do Bochs: # b 0x7c00 --> para colocar um breakpoint no endereço 0x7c00 # c --> para continuar a execução até o breakpoint # s --> para executar as instruções uma por uma # r --> para visualizar os valores nos registradores # mov to 510th byte from 0 pos . = _start + 510 # MBR boot signature .byte 0x55 # MBR boot signature .byte 0xaa
matmarqs/nightmare
1,766
workdir/assembly/fib.s
global _start section .data message db "Fibonacci Sequence:", 0x0a section .text _start: mov rax, 1 ; rax: syscall number 1 mov rdi, 1 ; rdi: fd 1 for stdout mov rsi,message ; rsi: pointer to message mov rdx, 20 ; rdx: print length of 20 bytes syscall ; call write syscall to the intro message xor rax, rax ; initialize rax to 0 xor rbx, rbx ; initialize rbx to 0 inc rbx ; increment rbx to 1 loopFib: add rax, rbx ; get the next number xchg rax, rbx ; swap values push rax ; save the value of rax push rbx ; save the value of rbx mov rax, 1 ; write syscall mov rdi, 1 ; print to stdout add rbx, 0x30 ; in ASCII: '0' = 0x30, '1' = 0x31, etc. push rbx ; create pointer to rbx, at rsp mov rsi, rsp ; string to be printed mov rdx, 1 ; length to be printed, only one char syscall pop rbx ; NOP pop pop rbx ; restore the value of rbx pop rax ; restore the value of rax cmp rbx, 10 ; do rbx - 10 js loopFib ; jump if result is <0 mov rax, 60 mov rdi, 0 syscall ; global _start ; ; section .text ; _start: ; xor rax, rax ; zero rax ; xor rbx, rbx ; zero rbx ; inc rbx ; increment rbx to 1 ; push rax ; push registers to stack ; push rbx ; ; call function ; pop rbx ; pop rax ; restore registers from stack ; mov rcx, 10 ; loopFib: ; add rax, rbx ; xchg rax, rbx ; cmp rbx, 10 ; js loopFib ; ; ;xor rax, rax ; ;xor rbx, rbx ; ;inc bl ; ;add al, bl ; ;not rax ; ; ;mov rax, rsp ; ;mov rax, [rsp] ; ;mov al, 0 ; ;mov bl, 1 ; ;xchg rbx, rax
mattlafayette/openvmm-old
2,291
openhcl/openhcl_boot/src/arch/x86_64/entry.S
// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. // // Entry point that zeroes BSS, sets up the stack, enables SSE, performs // relocations, and jumps to start(). // // BSS must be zeroed because the IGVM file does not contain pages for it, and // during reboot there may be dirty data in memory. This must happen early // because the stack itself is in BSS, so BSS must be zeroed before the stack is // set up. .globl _start _start: mov rbx, rdi // Save arg rdi lea rdi, __bss_start[rip] // Put BSS base in rdi lea rcx, _end[rip] // Put BSS end in rcx sub rcx, rdi // Compute BSS len in rcx xor eax, eax // Clear eax cld // Clear the direction flag for the string operation rep stosb // Zero BSS: memset(rdi, al, rcx) mov rdi, rbx // Restore rdi lea rsp, {STACK_SIZE} + {stack}[rip] // Set stack pointer mov dword ptr {stack}[rip], {STACK_COOKIE} // Set stack cookie mov rax, cr4 // Read CR4 into rax or rax, 0x600 // Set OSFXSR and OSXMMEXCPT for SSE support mov cr4, rax // Set CR4 from rax with previous values set push rsi // caller save rsi push rdi // caller save rdi lea rdx, _DYNAMIC[rip] // The start of the dynamic section, rip-relative lea rdi, __ehdr_start[rip] // The mapped base of the image, rip-relative mov rsi, rdi // The virtual address of the image call {relocate} // call relocate to fixup relocation entries pop rdi // restore rdi (arg 0) to call start mov rsi, [rsp] // restore rsi (arg 1) to call start (leave on stack to align) jmp {start} // jump to start
mattlafayette/openvmm-old
4,222
openhcl/openhcl_boot/src/arch/aarch64/entry.S
// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. // // Entry point that zeroes BSS, sets up the stack, performs relocations, // does architecture-specific setup, and jumps to start(). // // BSS must be zeroed because the IGVM file does not contain pages for it, and // during reboot there may be dirty data in memory. This must happen early // because the stack itself is in BSS, so BSS must be zeroed before the stack is // set up. .weak _DYNAMIC .hidden _DYNAMIC .balign 0x10 .globl _start _start: // Clean BSS, avoid using x0 as it contains the IGVM parameter. // NOTE: the stack space is allocated in BSS, and can't use function calls // as the return address will be wiped out. adrp x1, __bss_start__ add x1, x1, :lo12:__bss_start__ // X1 contains the BSS start adrp x2, __bss_end__ add x2, x2, :lo12:__bss_end__ sub x2, x2, x1 // X2 contains the BSS length 1: cbz x2, 2f sub x2, x2, 1 strb wzr, [x1,x2] b 1b 2: // Set up the stack space. adrp x1, {stack} add x1, x1, :lo12:{stack} mov x2, {STACK_COOKIE_LO} // Lower 16 bits of the stack cookie movk x2, {STACK_COOKIE_HI}, lsl 16 // Higher 16 bits of the stack cookie, keep the lower bits str x2, [x1] // Store the stack cookie at the bottom add x1, x1, {STACK_SIZE} // Stack size sub x1, x1, #8 // Leave 8 bytes for the stack cookie at the top str x2, [x1] // Store the stack cookie at the top sub x1, x1, #8 // Set the stack pointer mov sp, x1 // Set the vector table up. adrp x1, _vector_table_el1 add x1, x1, :lo12:_vector_table_el1 msr VBAR_EL1, x1 isb // Push x0 to the stack, its value has to be passed to `start`. str x0, [sp, #-16]! // NEON and FP setup for EL1. The compiler can use SIMD as an // optimization because the target specific options set in the `rustc` // do not prohibit that. // This is not compiled for the `softfloat` target so enabling FP // for consistency. mrs x0, CPACR_EL1 orr x0, x0, #(3 << 20) orr x0, x0, #(3 << 16) msr CPACR_EL1, x0 isb // Call `relocate` to fixup relocation entries. The Rust compiler // produces globals for the formatting calls. adrp x0, __ehdr_start add x0, x0, :lo12:__ehdr_start mov x1, x0 adrp x2, _DYNAMIC add x2, x2, :lo12:_DYNAMIC bl {relocate} // Restore the IGVM parameter from the stack and call the main function. // Its first parameter is ignored. mov x0, xzr ldr x1, [sp], #16 bl {start} // If the main function exited, call into the Debug Interface, or // break. mov x0, 6 movk x0, 0x8600, lsl 16 smc #0 .macro EXCEPTION_ENTRY source, kind .align 7 b . mov x0, \source mov x1, \kind b . .endm // Vector table must be aligned to a 2KB boundary. .balign 0x800 _vector_table_el1: // Target and source at same exception level with source SP = SP_EL0 EXCEPTION_ENTRY #0x0, #0x0 // Synchronous exception EXCEPTION_ENTRY #0x0, #0x1 // IRQ EXCEPTION_ENTRY #0x0, #0x2 // FIQ EXCEPTION_ENTRY #0x0, #0x3 // SError // Target and source at same exception level with source SP = SP_ELx EXCEPTION_ENTRY #0x1, #0x0 // Synchronous exception EXCEPTION_ENTRY #0x1, #0x1 // IRQ EXCEPTION_ENTRY #0x1, #0x2 // FIQ EXCEPTION_ENTRY #0x1, #0x3 // SError // Source is at lower exception level running on AArch64 EXCEPTION_ENTRY #0x2, #0x0 // Synchronous exception EXCEPTION_ENTRY #0x2, #0x1 // IRQ EXCEPTION_ENTRY #0x2, #0x2 // FIQ EXCEPTION_ENTRY #0x2, #0x3 // SError // Source is at lower exception level running on AArch32 EXCEPTION_ENTRY #0x3, #0x0 // Synchronous exception EXCEPTION_ENTRY #0x3, #0x1 // IRQ EXCEPTION_ENTRY #0x3, #0x2 // FIQ EXCEPTION_ENTRY #0x3, #0x3 // SError
mattlafayette/openvmm-old
5,086
openhcl/sidecar/src/arch/x86_64/entry.S
# Copyright (C) Microsoft Corporation. All rights reserved. BASE = 0xffffff8000000000 # We will relocate ourselves to this base address PML4_INDEX = 511 STACK_PAGES = 4 STACK_TOP = 0x400000 .globl _start _start: # Save registers. The caller must have provided a stack since we `ret` # back to the caller. push rbp # old stack push rbx # physical to virtual offset push r12 # old cr3 push r13 # old pml4 entry push r14 # input parameter push r15 # input parameter 2 push rax # align the stack mov r14, rdi # Save the input parameters mov r15, rsi # Clear BSS lea rdi, __bss_start[rip] # Put BSS base in rdi lea rcx, _end[rip] # Put BSS end in rcx sub rcx, rdi # Compute BSS len in rcx xor eax, eax # Clear eax cld # Clear the direction flag for the string operation rep stosb # Zero BSS: memset(rdi, al, rcx) mov r12, cr3 # Save old cr3 mov rbx, BASE # Get the base virtual address lea rax, __ehdr_start[rip] # Get the base physical address sub rbx, rax # Compute the physical-to-virtual offset # Perform relocations. lea rdx, _DYNAMIC[rip] # The start of the dynamic section, rip-relative mov rsi, BASE # The target load address of the image lea rdi, __ehdr_start[rip] # The base address of the image call {relocate} # apply relocations (including to page tables) # Whoops, the page tables were relocated incorrectly to VAs. Re-relocate # them to PAs. lea rdi, pt_start[rip] lea rcx, pt_end[rip] 2: sub [rdi], rbx add rdi, 8 cmp rdi, rcx jne 2b # Splice the PDPT into the current page table so that we can run at our # desired base address. mov r13, [r12 + PML4_INDEX * 8] # save old pml4 entry lea rax, 3 + pdpt[rip] mov [r12 + PML4_INDEX * 8], rax # splice in pdpt mov cr3, r12 # flush tlb just in case # Change RIP to virtual addressing. lea rax, 2f[rip] # get the physical address of the label add rax, rbx # convert to virtual jmp rax # Jump to the virtual address mapping 2: # Set the new startup stack. mov rbp, rsp # save the old stack lea rsp, STACK_TOP + __ehdr_start[rip] # set the new startup stack # Drop the identity map page. lea rax, pml4[rip] # get the new pml4 sub rax, rbx # convert to physical mov cr3, rax # set new cr3 # Run the program. Preserve rax to pass back to the caller. mov rdi, r14 mov rsi, r15 call {start} # Restore the physical stack and identity map page tables. mov cr3, r12 # restore old cr3 mov rsp, rbp # restore old stack # Return to physical RIP lea rcx, 3f[rip] # get the virtual address of the label sub rcx, rbx # convert to physical jmp rcx # jump to physical 3: # Restore the old page table entry. mov [r12 + PML4_INDEX * 8], r13 # restore old pml4 entry mov cr3, r12 # flush TLB out: pop r15 pop r15 pop r14 pop r13 pop r12 pop rbx pop rbp ret .globl irq_entry irq_entry: push rax push rcx push rdx push rsi push rdi push r8 push r9 push r10 push r11 call {irq_handler} pop r11 pop r10 pop r9 pop r8 pop rdi pop rsi pop rdx pop rcx pop rax iretq .globl exc_gpf exc_gpf: mov rdi, 0xd mov rsi, rsp jmp {exception_handler} .globl exc_pf exc_pf: mov rdi, 0xe mov rsi, rsp jmp {exception_handler} # Page tables. .pushsection .data PTE_PRESENT = 1 PTE_RW = 2 PTE_LARGE = 0x80 PTE_NX = 0x8000000000000000 PTE_TABLE = PTE_PRESENT | PTE_RW # The image is mapped RWX because it's just one 2MB mapping. # FUTURE: break this down into the different sections to allow W/X to be set appropriately. PTE_IMAGE = PTE_PRESENT | PTE_LARGE | PTE_RW PTE_DATA = PTE_PRESENT | PTE_RW | PTE_NX .align 4096 pt_start: pml4: .fill 511, 8, 0 .quad pdpt + PTE_TABLE .align 4096 .globl pdpt pdpt: .quad pd + PTE_TABLE .align 4096 pd: .global IMAGE_PDE IMAGE_PDE: .quad __ehdr_start + PTE_IMAGE # The image. 2MB should be enough for anyone. .quad pt + PTE_TABLE # Runtime data. .align 4096 # The data page table. The layout must match the `addr_space` module. pt: .quad pt + PTE_DATA # Self map for temporary_map. .quad hypercall_input + PTE_DATA .quad hypercall_output + PTE_DATA .fill 509 - STACK_PAGES, 8, 0 .set OFFSET, 0 .rept STACK_PAGES .quad stack + OFFSET + PTE_DATA .set OFFSET, OFFSET + 0x1000 .endr .align 4096 pt_end: .align 4096 hypercall_input: .fill 4096, 1, 0 hypercall_output: .fill 4096, 1, 0 .popsection .pushsection .bss stack: .fill STACK_PAGES * 0x1000, 1, 0 .popsection
maurermi/eece7398-compilers-homework
2,105
hw5-llvm/tests/sqrt.s
; ModuleID = 'tests/sqrt.c' source_filename = "tests/sqrt.c" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" target triple = "arm64-apple-macosx15.0.0" @.str = private unnamed_addr constant [25 x i8] c"Square root of %f is %f\0A\00", align 1 ; Function Attrs: noinline nounwind optnone ssp uwtable(sync) define i32 @main() #0 { %1 = alloca i32, align 4 %2 = alloca float, align 4 %3 = alloca float, align 4 store i32 0, ptr %1, align 4 store float 1.600000e+01, ptr %2, align 4 %4 = load float, ptr %2, align 4 %5 = fpext float %4 to double %6 = call double @llvm.sqrt.f64(double %5) %7 = fptrunc double %6 to float store float %7, ptr %3, align 4 %8 = load float, ptr %2, align 4 %9 = fpext float %8 to double %10 = load float, ptr %3, align 4 %11 = fpext float %10 to double %12 = call i32 (ptr, ...) @printf(ptr noundef @.str, double noundef %9, double noundef %11) ret i32 0 } ; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none) declare double @llvm.sqrt.f64(double) #1 declare i32 @printf(ptr noundef, ...) #2 attributes #0 = { noinline nounwind optnone ssp uwtable(sync) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } attributes #1 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } attributes #2 = { "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } !llvm.module.flags = !{!0, !1, !2, !3} !llvm.ident = !{!4} !0 = !{i32 1, !"wchar_size", i32 4} !1 = !{i32 8, !"PIC Level", i32 2} !2 = !{i32 7, !"uwtable", i32 1} !3 = !{i32 7, !"frame-pointer", i32 1} !4 = !{!"Homebrew clang version 18.1.8"}
maurermi/eece7398-compilers-homework
1,989
hw5-llvm/tests/division.s
; ModuleID = 'tests/division.c' source_filename = "tests/division.c" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" target triple = "arm64-apple-macosx15.0.0" @.str = private unnamed_addr constant [12 x i8] c"Result: %f\0A\00", align 1 ; Function Attrs: noinline nounwind optnone ssp uwtable(sync) define i32 @main() #0 { %1 = alloca i32, align 4 %2 = alloca float, align 4 %3 = alloca float, align 4 %4 = alloca float, align 4 store i32 0, ptr %1, align 4 store float 1.000000e+01, ptr %2, align 4 store float 4.000000e+00, ptr %3, align 4 %5 = load float, ptr %2, align 4 %6 = load float, ptr %3, align 4 %7 = fdiv float %5, %6 store float %7, ptr %4, align 4 %8 = load float, ptr %4, align 4 %9 = fpext float %8 to double %10 = fsub double %9, 1.500000e+00 %11 = fptrunc double %10 to float store float %11, ptr %4, align 4 %12 = load float, ptr %4, align 4 %13 = fpext float %12 to double %14 = call i32 (ptr, ...) @printf(ptr noundef @.str, double noundef %13) ret i32 0 } declare i32 @printf(ptr noundef, ...) #1 attributes #0 = { noinline nounwind optnone ssp uwtable(sync) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } attributes #1 = { "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } !llvm.module.flags = !{!0, !1, !2, !3} !llvm.ident = !{!4} !0 = !{i32 1, !"wchar_size", i32 4} !1 = !{i32 8, !"PIC Level", i32 2} !2 = !{i32 7, !"uwtable", i32 1} !3 = !{i32 7, !"frame-pointer", i32 1} !4 = !{!"Homebrew clang version 18.1.8"}
maurermi/eece7398-compilers-homework
4,780
hw5-llvm/tests/nested-conditionals.s
; ModuleID = 'tests/nested-conditionals.c' source_filename = "tests/nested-conditionals.c" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" target triple = "arm64-apple-macosx15.0.0" @.str = private unnamed_addr constant [18 x i8] c"Final result: %f\0A\00", align 1 ; Function Attrs: noinline nounwind optnone ssp uwtable(sync) define i32 @main() #0 { %1 = alloca i32, align 4 %2 = alloca float, align 4 %3 = alloca float, align 4 %4 = alloca float, align 4 store i32 0, ptr %1, align 4 store float 0x400D9999A0000000, ptr %2, align 4 store float 2.500000e+00, ptr %3, align 4 store float 0.000000e+00, ptr %4, align 4 %5 = load float, ptr %2, align 4 %6 = load float, ptr %3, align 4 %7 = fcmp ogt float %5, %6 br i1 %7, label %8, label %17 8: ; preds = %0 %9 = load float, ptr %2, align 4 %10 = fpext float %9 to double %11 = call double @llvm.pow.f64(double %10, double 2.000000e+00) %12 = load float, ptr %3, align 4 %13 = fpext float %12 to double %14 = call double @llvm.sqrt.f64(double %13) %15 = fsub double %11, %14 %16 = fptrunc double %15 to float store float %16, ptr %4, align 4 br label %28 17: ; preds = %0 %18 = load float, ptr %2, align 4 %19 = load float, ptr %3, align 4 %20 = fadd float %18, %19 %21 = fpext float %20 to double %22 = call double @llvm.log.f64(double %21) %23 = load float, ptr %2, align 4 %24 = fpext float %23 to double %25 = call double @llvm.sin.f64(double %24) %26 = fmul double %22, %25 %27 = fptrunc double %26 to float store float %27, ptr %4, align 4 br label %28 28: ; preds = %17, %8 %29 = load float, ptr %4, align 4 %30 = fpext float %29 to double %31 = fcmp ogt double %30, 5.000000e+00 br i1 %31, label %32, label %37 32: ; preds = %28 %33 = load float, ptr %4, align 4 %34 = fpext float %33 to double %35 = fdiv double %34, 2.000000e+00 %36 = fptrunc double %35 to float store float %36, ptr %4, align 4 br label %52 37: ; preds = %28 %38 = load float, ptr %4, align 4 %39 = fpext float %38 to double %40 = fcmp olt double %39, -5.000000e+00 br i1 %40, label %41, label %46 41: ; preds = %37 %42 = load float, ptr %4, align 4 %43 = fpext float %42 to double %44 = fmul double %43, -1.000000e+00 %45 = fptrunc double %44 to float store float %45, ptr %4, align 4 br label %51 46: ; preds = %37 %47 = load float, ptr %4, align 4 %48 = fpext float %47 to double %49 = fadd double %48, 1.500000e+00 %50 = fptrunc double %49 to float store float %50, ptr %4, align 4 br label %51 51: ; preds = %46, %41 br label %52 52: ; preds = %51, %32 %53 = load float, ptr %4, align 4 %54 = fpext float %53 to double %55 = call i32 (ptr, ...) @printf(ptr noundef @.str, double noundef %54) ret i32 0 } ; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none) declare double @llvm.pow.f64(double, double) #1 ; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none) declare double @llvm.sqrt.f64(double) #1 ; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none) declare double @llvm.log.f64(double) #1 ; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none) declare double @llvm.sin.f64(double) #1 declare i32 @printf(ptr noundef, ...) #2 attributes #0 = { noinline nounwind optnone ssp uwtable(sync) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } attributes #1 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } attributes #2 = { "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } !llvm.module.flags = !{!0, !1, !2, !3} !llvm.ident = !{!4} !0 = !{i32 1, !"wchar_size", i32 4} !1 = !{i32 8, !"PIC Level", i32 2} !2 = !{i32 7, !"uwtable", i32 1} !3 = !{i32 7, !"frame-pointer", i32 1} !4 = !{!"Homebrew clang version 18.1.8"}
maurermi/eece7398-compilers-homework
3,394
hw5-llvm/tests/multiplication.s
; ModuleID = 'tests/multiplication.c' source_filename = "tests/multiplication.c" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" target triple = "arm64-apple-macosx15.0.0" ; Function Attrs: noinline nounwind optnone ssp uwtable(sync) define i32 @main() #0 { %1 = alloca i32, align 4 %2 = alloca float, align 4 %3 = alloca float, align 4 %4 = alloca float, align 4 %5 = alloca float, align 4 %6 = alloca float, align 4 %7 = alloca float, align 4 %8 = alloca float, align 4 %9 = alloca float, align 4 %10 = alloca float, align 4 %11 = alloca float, align 4 %12 = alloca float, align 4 %13 = alloca float, align 4 %14 = alloca float, align 4 %15 = alloca float, align 4 %16 = alloca float, align 4 %17 = alloca float, align 4 %18 = alloca float, align 4 %19 = alloca float, align 4 %20 = alloca float, align 4 %21 = alloca float, align 4 store i32 0, ptr %1, align 4 store float 0x3FF19999A0000000, ptr %2, align 4 store float 0x40019999A0000000, ptr %3, align 4 store float 0x400A666660000000, ptr %4, align 4 store float 0x40119999A0000000, ptr %5, align 4 store float 5.500000e+00, ptr %6, align 4 store float 0x401A666660000000, ptr %7, align 4 store float 0x401ECCCCC0000000, ptr %8, align 4 store float 0x40219999A0000000, ptr %9, align 4 store float 0x4023CCCCC0000000, ptr %10, align 4 store float 0x4024333340000000, ptr %11, align 4 %22 = load float, ptr %2, align 4 %23 = load float, ptr %3, align 4 %24 = fmul float %22, %23 store float %24, ptr %12, align 4 %25 = load float, ptr %4, align 4 %26 = load float, ptr %5, align 4 %27 = fmul float %25, %26 store float %27, ptr %13, align 4 %28 = load float, ptr %6, align 4 %29 = load float, ptr %7, align 4 %30 = fmul float %28, %29 store float %30, ptr %14, align 4 %31 = load float, ptr %8, align 4 %32 = load float, ptr %9, align 4 %33 = fmul float %31, %32 store float %33, ptr %15, align 4 %34 = load float, ptr %10, align 4 %35 = load float, ptr %11, align 4 %36 = fmul float %34, %35 store float %36, ptr %16, align 4 %37 = load float, ptr %2, align 4 %38 = load float, ptr %4, align 4 %39 = fmul float %37, %38 store float %39, ptr %17, align 4 %40 = load float, ptr %6, align 4 %41 = load float, ptr %8, align 4 %42 = fmul float %40, %41 store float %42, ptr %18, align 4 %43 = load float, ptr %10, align 4 %44 = load float, ptr %2, align 4 %45 = fmul float %43, %44 store float %45, ptr %19, align 4 %46 = load float, ptr %3, align 4 %47 = load float, ptr %7, align 4 %48 = fmul float %46, %47 store float %48, ptr %20, align 4 %49 = load float, ptr %5, align 4 %50 = load float, ptr %9, align 4 %51 = fmul float %49, %50 store float %51, ptr %21, align 4 ret i32 0 } attributes #0 = { noinline nounwind optnone ssp uwtable(sync) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } !llvm.module.flags = !{!0, !1, !2, !3} !llvm.ident = !{!4} !0 = !{i32 1, !"wchar_size", i32 4} !1 = !{i32 8, !"PIC Level", i32 2} !2 = !{i32 7, !"uwtable", i32 1} !3 = !{i32 7, !"frame-pointer", i32 1} !4 = !{!"Homebrew clang version 18.1.8"}
maurermi/eece7398-compilers-homework
2,446
hw5-llvm/tests/sin.s
; ModuleID = 'tests/sin.c' source_filename = "tests/sin.c" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" target triple = "arm64-apple-macosx15.0.0" @.str = private unnamed_addr constant [22 x i8] c"Sine: %f, Cosine: %f\0A\00", align 1 ; Function Attrs: noinline nounwind optnone ssp uwtable(sync) define i32 @main() #0 { %1 = alloca i32, align 4 %2 = alloca float, align 4 %3 = alloca float, align 4 %4 = alloca float, align 4 store i32 0, ptr %1, align 4 store float 5.000000e-01, ptr %2, align 4 %5 = load float, ptr %2, align 4 %6 = fpext float %5 to double %7 = call double @llvm.sin.f64(double %6) %8 = fptrunc double %7 to float store float %8, ptr %3, align 4 %9 = load float, ptr %2, align 4 %10 = fpext float %9 to double %11 = call double @llvm.cos.f64(double %10) %12 = fptrunc double %11 to float store float %12, ptr %4, align 4 %13 = load float, ptr %3, align 4 %14 = fpext float %13 to double %15 = load float, ptr %4, align 4 %16 = fpext float %15 to double %17 = call i32 (ptr, ...) @printf(ptr noundef @.str, double noundef %14, double noundef %16) ret i32 0 } ; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none) declare double @llvm.sin.f64(double) #1 ; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none) declare double @llvm.cos.f64(double) #1 declare i32 @printf(ptr noundef, ...) #2 attributes #0 = { noinline nounwind optnone ssp uwtable(sync) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } attributes #1 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } attributes #2 = { "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } !llvm.module.flags = !{!0, !1, !2, !3} !llvm.ident = !{!4} !0 = !{i32 1, !"wchar_size", i32 4} !1 = !{i32 8, !"PIC Level", i32 2} !2 = !{i32 7, !"uwtable", i32 1} !3 = !{i32 7, !"frame-pointer", i32 1} !4 = !{!"Homebrew clang version 18.1.8"}
maurermi/eece7398-compilers-homework
3,202
hw5-llvm/tests/looping_addition.s
; ModuleID = 'tests/looping_addition.c' source_filename = "tests/looping_addition.c" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" target triple = "arm64-apple-macosx15.0.0" @.str = private unnamed_addr constant [15 x i8] c"Final sum: %f\0A\00", align 1 ; Function Attrs: noinline nounwind optnone ssp uwtable(sync) define i32 @main() #0 { %1 = alloca i32, align 4 %2 = alloca float, align 4 %3 = alloca float, align 4 %4 = alloca i32, align 4 store i32 0, ptr %1, align 4 store float 1.000000e+00, ptr %2, align 4 store float 0.000000e+00, ptr %3, align 4 store i32 0, ptr %4, align 4 br label %5 5: ; preds = %29, %0 %6 = load i32, ptr %4, align 4 %7 = icmp slt i32 %6, 5 br i1 %7, label %8, label %32 8: ; preds = %5 %9 = load float, ptr %2, align 4 %10 = load i32, ptr %4, align 4 %11 = add nsw i32 %10, 1 %12 = sitofp i32 %11 to float %13 = fdiv float %9, %12 %14 = load float, ptr %3, align 4 %15 = fadd float %14, %13 store float %15, ptr %3, align 4 %16 = load float, ptr %3, align 4 %17 = fpext float %16 to double %18 = fcmp ogt double %17, 2.000000e+00 br i1 %18, label %19, label %24 19: ; preds = %8 %20 = load float, ptr %3, align 4 %21 = fpext float %20 to double %22 = fmul double %21, 5.000000e-01 %23 = fptrunc double %22 to float store float %23, ptr %3, align 4 br label %24 24: ; preds = %19, %8 %25 = load float, ptr %2, align 4 %26 = fpext float %25 to double %27 = fadd double %26, 1.000000e+00 %28 = fptrunc double %27 to float store float %28, ptr %2, align 4 br label %29 29: ; preds = %24 %30 = load i32, ptr %4, align 4 %31 = add nsw i32 %30, 1 store i32 %31, ptr %4, align 4 br label %5, !llvm.loop !5 32: ; preds = %5 %33 = load float, ptr %3, align 4 %34 = fpext float %33 to double %35 = call i32 (ptr, ...) @printf(ptr noundef @.str, double noundef %34) ret i32 0 } declare i32 @printf(ptr noundef, ...) #1 attributes #0 = { noinline nounwind optnone ssp uwtable(sync) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } attributes #1 = { "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } !llvm.module.flags = !{!0, !1, !2, !3} !llvm.ident = !{!4} !0 = !{i32 1, !"wchar_size", i32 4} !1 = !{i32 8, !"PIC Level", i32 2} !2 = !{i32 7, !"uwtable", i32 1} !3 = !{i32 7, !"frame-pointer", i32 1} !4 = !{!"Homebrew clang version 18.1.8"} !5 = distinct !{!5, !6} !6 = !{!"llvm.loop.mustprogress"}
maurermi/eece7398-compilers-homework
1,989
hw5-llvm/tests/addition.s
; ModuleID = 'tests/addition.c' source_filename = "tests/addition.c" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" target triple = "arm64-apple-macosx15.0.0" @.str = private unnamed_addr constant [12 x i8] c"Result: %f\0A\00", align 1 ; Function Attrs: noinline nounwind optnone ssp uwtable(sync) define i32 @main() #0 { %1 = alloca i32, align 4 %2 = alloca float, align 4 %3 = alloca float, align 4 %4 = alloca float, align 4 store i32 0, ptr %1, align 4 store float 3.500000e+00, ptr %2, align 4 store float 2.500000e+00, ptr %3, align 4 %5 = load float, ptr %2, align 4 %6 = load float, ptr %3, align 4 %7 = fadd float %5, %6 store float %7, ptr %4, align 4 %8 = load float, ptr %4, align 4 %9 = fpext float %8 to double %10 = fmul double %9, 1.500000e+00 %11 = fptrunc double %10 to float store float %11, ptr %4, align 4 %12 = load float, ptr %4, align 4 %13 = fpext float %12 to double %14 = call i32 (ptr, ...) @printf(ptr noundef @.str, double noundef %13) ret i32 0 } declare i32 @printf(ptr noundef, ...) #1 attributes #0 = { noinline nounwind optnone ssp uwtable(sync) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } attributes #1 = { "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+complxnum,+crc,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+jsconv,+lse,+neon,+pauth,+ras,+rcpc,+rdm,+sha2,+sha3,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+zcm,+zcz" } !llvm.module.flags = !{!0, !1, !2, !3} !llvm.ident = !{!4} !0 = !{i32 1, !"wchar_size", i32 4} !1 = !{i32 8, !"PIC Level", i32 2} !2 = !{i32 7, !"uwtable", i32 1} !3 = !{i32 7, !"frame-pointer", i32 1} !4 = !{!"Homebrew clang version 18.1.8"}
max-cura/okeanos
4,440
device/stanna/pkg/bcm2711/armstub/armstub8.S
/* Based on https://github.com/timanu8/armstubsRPI4/blob/master/armstub8.S */ /* Configuration: BCM2711, high-peripherals configuration */ #define ARM_PERI_BASE 0x4c0000000 #define BCM_PERI_BASE 0x47c000000 #define OSC_FREQ 54000000 #define ARM_LOCAL_CONTROL 0 // note: there's a "timer prescaler subtract" at +4 but it is deprecated #define ARM_LOCAL_PRESCALER 8 #define ARM_GIC_DISTB 0x41000 #define ARM_GIC_CPUB 0x42000 .org 0x0 .globl _start _start: // https://datasheets.raspberrypi.com/bcm2836/bcm2836-peripherals.pdf // Set the source of the ARM's core clock to be the 19.2MHz crystal (vs. the APB) // Also sets the core timer to increment by 1 (instead of 2) ldr x0, =ARM_PERI_BASE str wzr, [x0, #ARM_LOCAL_CONTROL] // Set the prescaler; value is calculated as 0x8000_0000 / local_prescaler; we set it to 1 mov w1, 0x80000000 str w1, [x0, #ARM_LOCAL_PRESCALER] // Set L2 cache latency to 3 ; REG=L2CTRL_EL1 mrs x0, S3_1_C11_C0_2 mov x1, #0x22 orr x0, x0, x1 msr S3_1_C11_C0_2, x0 // Used for the 64-bit system clock // NOTE: this value seems to differ between different SoCs; the 2711 uses 54MHz, but versions // used in prior boards seems to use 19.2MHz. ldr x0, =OSC_FREQ msr CNTFRQ_EL0, x0 msr CNTVOFF_EL2, xzr // Enable FP/SIMD by disabling the TFP (Trap Floating Point) field in bit 10 msr CPTR_EL3, xzr // Secure Configuration Register // Bit 13: Trap WFE // Bit 12: Trap WFI // Bit 10: (UNDOCUMENTED) - "RW" // Bit 9: Disable instruction fetch from NS memory // Bit 8: Enable HVC // Bit 7: Disable SMC // Bit 6: (RES0) Disable early termination // Bit 5: Controls CPSR.A bit writeability (COMPLEX) // Bit 4: Controls CPSR.F bit writeability (COMPLEX) // Bit 3: Take external aborts in Monitor mode rather than Abort mode // Bit 2: Take FIQs in Monitor mode rather than FIQ mode // Bit 1: Take IRQs in Monitor mode rather than IRQ mode // Bit 0: NS bit for non-monitor modes // RES0 - 11:10 // Setting: 10|8|5|4 mov x0, #0x530 msr SCR_EL3, x0 // Governs access to certain registers in lower ELs // Bit 0: CPUACTLR // Bit 1: CPUECTLR // Bit 4: L2CTLR // Bit 5: L2ECTLR // Bit 6: L2ACTLR // RES0 - 3:2 mov x0, #0x73 msr ACTLR_EL3, x0 // Bit 6: SMPEN ; REG=CPUECTLR_EL1 mov x0, #0x40 msr S3_1_C15_C2_1, x0 // (GIC setup goes here) // Set SCTLR_EL2 // 31:30 00 // 29:28 11 // 27:26 00 // 25 - EE - endianness (0=LE, 1=BE) // 24 0 // 23:22 11 // 21:20 00 // 19 - WXN - Write permission implies eXecute Never (1=W^X) // 18 1 // 17 0 // 16 1 // 15:13 000 // 12 - I - Instruction cacheability at EL2 (0=Non-cacheable at all levels, 1=no effect) // 11 1 // 10:6 00000 // 5:4 11 // 3 - SA - SP alignment checking at EL2 // 2 - C - Cacheability control for data accesses at EL2 (0=Non-cacheable at all levels, 1=no effect) // 1 - A - Alignment fault checking at EL2 (0=disable, 1=enable) // 0 - M - MMU enable for EL2 stage 1 address translation // Set: LE, !WXN, !I, !SA, !C, !A, !M ldr x0, =0x30c50830 msr SCTLR_EL2, x0 // Note: if we wanted to switch to L2, we would do it here /* mov x0, #0x3c9 msr spsr_el3, x0 adr x0, el2_post msr elr_el3, x0 eret el2_post: */ // Load CPU ID from least significant two bits of `MPIDR_EL1` mrs x7, MPIDR_EL1 and x6, x6, #3 cbz x6, primary_cpu adr x5, spin_cpu0 secondary_spin: wfe ldr x4, [x5, x6, lsl #3] mov x0, #0 secondary_halt: wfe b secondary_halt // b boot_kernel primary_cpu: ldr w4, kernel_entry32 //ldr w0, dtb_ptr32 boot_kernel: mov x1, #0 mov x2, #0 mov x3, #0 br x4 .ltorg .org 0xd8 .globl spin_cpu0 spin_cpu0: .quad 0 .org 0xe0 .globl spin_cpu1 spin_cpu1: .quad 0 .org 0xe8 .globl spin_cpu2 spin_cpu2: .quad 0 .org 0xf0 .globl spin_cpu3 spin_cpu3: // Shared with next two symbols/.word // FW clears the next 8 bytes after reading the initial value, leaving // the location suitable for use as spin_cpu3 .org 0xf0 .globl stub_magic stub_magic: .word 0x5afe570b .org 0xf4 .globl stub_version stub_version: .word 0 .org 0xf8 .globl dtb_ptr32 dtb_ptr32: .word 0x0 .org 0xfc .globl kernel_entry32 kernel_entry32: .word 0x0
max-cura/okeanos
5,596
device/okboot/extern/elf.S
@ vim:ft=arm @ @@ @@ FILE device/okboot/extern/elf.S @@ AUTH mcura @@ DESC Relocatable micro-stub that performs a series of memory-to-memory ELF @@ PT_LOAD operations. @@ @@ CHANGELOG: @@ 31 Jan 25 (mcura) @@ Created based on stub.S for version 2 bootloader (okboot) #define _prefetch_flush(reg) \ mov reg, #0; \ mcr p15, 0, reg, c7, c5, 4 #define _cln_inv_dcache_entire(reg) \ mov reg, #0; \ mcr p15, 0, reg, c7, c14, 0 #define _inv_both_caches_entire(reg) \ mov reg, #0; \ mcr p15, 0, reg, c7, c7, 0 #define _btac_flush(reg) \ mov reg, #0; \ mcr p15, 0, reg, c7, c5, 6 #define _dsb(reg) \ mov reg, #0; \ mcr p15, 0, reg, c7, c10, 4 .globl __symbol_relocation_elf .globl __symbol_relocation_elf_end #define ENABLE_DEBUG 0 #if ENABLE_DEBUG # define putss(str) push {r0};adr r0,str;bl uart_puts;pop {r0} # define putc(chr) push {r0};mov r0, chr; bl uart_write8;pop {r0} # define putx(num) push {r0};mov r0, num; bl uart_write_u32x;pop {r0} #else # define putss(x) # define putc(x) # define putx(x) #endif # Elf32_Phdr # p_type - ignore # p_offset # p_vaddr # p_paddr - ignore # p_filesz - ignore # p_memsz # p_flags - ignore # p_align - ignore #define P_OFFSET 0x4 #define P_VADDR 0x8 #define P_MEMSZ 0x14 #define PHDR_SIZE 0x20 @ r0 = pheaders : *const Elf32_Phdr @ r1 = pheaders_count : usize @ r2 = elf "file" base : *const u8 @ r3 = entry : word __symbol_relocation_elf: mov sp, #0x8000 putc(#'\n') putss(str.begin_elf_reloc) bl copy_segments putss(str.finished_copy_segments) bl clear_caches bx r3 @ r0 = pheaders @ r1 = pheaders_count @ r2 = elf "file" base .align 4 copy_segments: @ COPY ELF SEGMENTS push {r0-r2, r4-r11, r14} mov r9, r0 mov r10, r1 mov r11, r2 .phdr_loop: putx(r9) putx(r10) ldr r0, [r9, #P_VADDR] ldr r2, [r9, #P_MEMSZ] movs r2, r2 beq .phdr_fill_zero ldr r1, [r9, #P_OFFSET] add r1, r1, r11 bl memcpy b .phdr_loop2 .phdr_fill_zero: bl memset .phdr_loop2: subs r10, r10, #1 beq .copy_segments.ret add r9, r9, #PHDR_SIZE b .phdr_loop .copy_segments.ret: pop {r0-r2, r4-r11, r15} @ CLOBBERS: r4 clear_caches: _dsb(r4) _cln_inv_dcache_entire(r4) _inv_both_caches_entire(r4) _btac_flush(r4) _prefetch_flush(r4) _dsb(r4) bx lr @ --- UTILITY ROUTINES -- @ INPUT: r0 <- #0, len=r2 @ CLOBBER: r0, r4, r5, r6, r7, r8 memset: .memset_try_coarse: mov r4, #0 mvn r8, #0xf ands r8, r2, r8 beq .memset_try_fine mov r5, #0 mov r6, #0 mov r7, #0 .memset_loop_coarse: stmia r0!, {r4, r5, r6, r7} subs r8, r8, #0x10 bne .memset_loop_coarse .memset_try_fine: ands r8, r2, #0x0f beq .memset_done .memset_loop_fine: str r4, [r0], +#4 subs r8, r8, #4 bne .memset_loop_fine .memset_done: bx lr @ INPUT: r0 <- r1, len=r2 @ clobber: r0, r1, r4, r5, r6, r7, r8 memcpy: .memcpy_try_coarse: mvn r8, #0xf ands r8, r2, r8 beq .memcpy_try_fine .memcpy_loop_coarse: ldmia r1!, {r4, r5, r6, r7} stmia r0!, {r4, r5, r6, r7} subs r8, r8, #0x10 bne .memcpy_loop_coarse .memcpy_try_fine: ands r8, r2, #0x0f beq .memcpy_done .memcpy_loop_fine: ldr r4, [r1], +#4 str r4, [r0], +#4 subs r8, r8, #4 bne .memcpy_loop_fine .memcpy_done: bx lr @ --- DEBUGGING ROUTINES --- #if ENABLE_DEBUG str.begin_elf_reloc: .asciz "[asm] beginning ELF relocation" str.finished_copy_segments: .asciz "[asm] finished copying segments" #else str.begin_elf_reloc: str.finished_copy_segments: #endif #if ENABLE_DEBUG .parled.on: push {r4, r10, r14} _dsb(r4) ldr r10, loc.gpio_base mov r4, #(1 << 27) str r4, [r10, #0x1c] _dsb(r4) pop {r4, r10, r15} .parled.off: push {r4, r10, r14} _dsb(r4) ldr r10, loc.gpio_base mov r4, #(1 << 27) str r4, [r10, #0x28] _dsb(r4) pop {r4, r10, r15} .actled.on: push {r4, r10, r14} _dsb(r4) ldr r10, loc.gpio_base mov r4, #1 lsl r4, r4, #(47-32) str r4, [r10, #0x2c] _dsb(r4) pop {r4, r10, r15} .actled.off: push {r4, r10, r14} _dsb(r4) ldr r10, loc.gpio_base mov r4, #1 lsl r4, r4, #(47-32) str r4, [r10, #0x20] _dsb(r4) pop {r4, r10, r15} loc.gpio_base: .word 0x20200000 #define MU_STAT 0x64 #define MU_STAT_TX_READY 1 #define MU_LSR 0x54 #define MU_LSR_FIFO_CAN_WRITE 5 #define MU_IO 0x40 uart_puts: push {r0, r1, lr} mov r1, r0 .ascii_chars: ldrb r0, [r1], +#1 cmp r0, #0 beq .newline_puts bl uart_write8 b .ascii_chars .newline_puts: mov r0, #'\n' bl uart_write8 pop {r0, r1, pc} uart_write_u32x: push {r0, r1, r2, r3, lr} mov r3, #0xf mov r2, r0 mov r1, #32 .digits: sub r1, r1, #4 and r0, r2, r3, lsl r1 mov r0, r0, lsr r1 subs r0, r0, #10 addlt r0, r0, #('0' + 10) addge r0, r0, #'a' bl uart_write8 cmp r1, #0 beq .newline32 b .digits .newline32: mov r0, #'\n' bl uart_write8 pop {r0, r1, r2, r3, pc} uart_write8: push {r1, r2, r14} _dsb(r1) ldr r2, loc.uart_base .uart_busy: ldr r1, [r2, #MU_LSR] tst r1, #(1 << MU_LSR_FIFO_CAN_WRITE) bne .uart_can_write @ if bit is set b .uart_busy .uart_can_write: str r0, [r2, #MU_IO] _dsb(r1) pop {r1, r2, r15} loc.uart_base: .word 0x20215000 #endif @ --- TERMINATING SYMBOL --- __symbol_relocation_elf_end: nop nop
max-cura/okeanos
5,247
device/okboot/extern/stub.S
@ vim:ft=arm @ @@ @@ FILE device/okboot/extern/stub.S @@ AUTH mcura @@ DESC Relocatable micro-stub that performs a `memcpy` and then jumps to an @@ address. Part of the `theseus-device` crate. Primary symbol is @@ `_relocation_stub`; relocatable segment is @@ @@ [_relocation_stub, _relocation_stub_end) @@ @@ Note that this has some additional functionality beyond merely a @@ `memcpy` and branch; read the source for additional details (any @@ description here will almost inevitably become de-synchronised with the @@ code @@ @@ CHANGELOG: @@ 15 Apr 24 (mcura) @@ Separated from boot.S in an effort to clean up code for debugging. @@ 17 Jun 24 (mcura) @@ More cleanup in preparation for bootloader v2 #define _prefetch_flush(reg) \ mov reg, #0; \ mcr p15, 0, reg, c7, c5, 4 #define _cln_inv_dcache_entire(reg) \ mov reg, #0; \ mcr p15, 0, reg, c7, c14, 0 #define _inv_both_caches_entire(reg) \ mov reg, #0; \ mcr p15, 0, reg, c7, c7, 0 #define _btac_flush(reg) \ mov reg, #0; \ mcr p15, 0, reg, c7, c5, 6 #define _dsb(reg) \ mov reg, #0; \ mcr p15, 0, reg, c7, c10, 4 .globl __symbol_relocation_stub .globl __symbol_relocation_stub_end @ @ HACKING NOTES @ 1. DO NOT MAKE ANY REFERENCE TO ANY SYMBOL NOT CONTAINED BETWEEN @ _relocation_stub AND _relocation_stub_end! DOING SO MAY CAUSE DATA @ CORRUPTION. @ @ Note that this includes as well the use of `ldr {reg}, ={value}`, even @ if `value` is not explicitly a symbol (e.g. `ldr r0, =0x8000` and @ `ldr r0, =non_local_symbol` are both equally forbidden). This is @ because `ldr {reg}, ={value}` will attempt to resolve to an immediate @ `MOV`, and, failing that, will put `{value}` in the local data pool. @ I (mcura) am personally unclear on the rules of placement for these @ values, but prior experience (via disassembly) has shown the values @ showing up after the end of the relocation boundary--and thus not being @ moved by the stub. @ @ -------------------------------------------------------------------------------------------------- @ SECTION: RELOCATION @ -------------------------------------------------------------------------------------------------- @ INPUT: r0=dest r1=src r2=len r3=jump_to __symbol_relocation_stub: @ INPUT: r0, r1, r2 @ OUTPUT: N/A @ CLOBBERS: r0, r1, r4, r5, r6, r7, r8 .fastreloc_copy: @bl .actled.on .fastreloc_try_coarse: mvn r8, #0x0f ands r8, r2, r8 beq .fastreloc_try_fine .fastreloc_loop_coarse: @ need ! to write the incremented value back to r1/r0 when the instruction @ finishes ldmia r1!, {r4, r5, r6, r7} stmia r0!, {r4, r5, r6, r7} subs r8, r8, #0x10 bne .fastreloc_loop_coarse .fastreloc_try_fine: ands r8, r2, #0x0f beq .fastreloc_done .fastreloc_loop_fine: ldr r4, [r1], +#4 str r4, [r0], +#4 subs r8, r8, #4 bne .fastreloc_loop_fine .fastreloc_done: @bl .actled.off b .postreloc @ INPUT: r0, r1, r2 @ OUTPUT: N/A @ CLOBBERS: r0, r1, r2, r4 .slowreloc_copy: @bl .actled.on teq r2, #0 beq .slowreloc_done .slowreloc_loop: ldr r4, [r1], +#4 str r4, [r0], +#4 subs r2, r2, #4 bne .slowreloc_loop .slowreloc_done: b .postreloc .postreloc: @ INPUT: N/A @ OUTPUT: N/A @ CLOBBERS: r4 .clear_caches: _dsb(r4) @ not _cln_inv_dcache_entire(r4) _inv_both_caches_entire(r4) _btac_flush(r4) _prefetch_flush(r4) _dsb(r4) .jump_to_loaded_program: bx r3 @ -------------------------------------------------------------------------------------------------- @ SECTION: GPIO @ -------------------------------------------------------------------------------------------------- @ INPUT: r10 @ OUTPUT: N/A @ CLOBBERS: r4 .actled.on: _dsb(r4) mov r4, #1 lsl r4, r4, #(47-32) str r4, [r10, #0x2c] _dsb(r4) bx lr .actled.off: _dsb(r4) mov r4, #1 lsl r4, r4, #(47-32) str r4, [r10, #0x20] _dsb(r4) bx lr @ INPUT: N/A @ OUTPUT: r10=loc.gpio_base @ CLOBBERS: r4, r5, r7, r10 .gpio_setup: _dsb(r4) ldr r10, loc.gpio_base #if 0 @ pin 27 output (activity LED on parthiv-pi hat) ldr r5, [r10, #0x08] mov r7, #7 mvn r7, r7, lsl #21 and r5, r5, r7 mov r7, #1 orr r5, r5, r7, lsl #21 str r5, [r10, #0x08] @ pin 47 output (activity LED on Pi board) ldr r5, [r10, #0x10] mov r7, #7 mvn r7, r7, lsl #21 and r5, r5, r7 mov r7, #1 orr r5, r5, r7, lsl #21 str r5, [r10, #0x10] @mov r4, #1 @lsl r4, r4, #(47-32) @str r4, [r10, #0x2c] @mov r4, #1 @lsl r4, r4, #(27) @str r4, [r10, #0x1c] #endif _dsb(r4) .gpio_setup.done: b .fastreloc_copy @b .slowreloc_copy @ -------------------------------------------------------------------------------------------------- @ SECTION: TAIL @ -------------------------------------------------------------------------------------------------- @ @ DATA POOL @ loc.gpio_base: .word 0x20200000 @ @ END OF STUB @ __symbol_relocation_stub_end: nop
max-cura/okeanos
1,535
device/okboot/extern/boot.S
@ vim:ft=arm @ @ FILE extern/boot.S @ DESC Generic system entry point @ #ifndef CUSTOM_STACK .extern __symbol_stack_end__ #define STACK_HIGH __symbol_stack_init__ #elif !defined(STACK_HIGH) #error "CUSTOM_STACK is defined but STACK_HIGH is not" #endif #ifndef CUSTOM_SYMBOLS #define EXTERN_KERNEL_FN __symbol_kstart #define EXTERN_REBOOT_FN __symbol_kreboot #elif !defined(EXTERN_KERNEL_FN) || !defined(EXTERN_REBOOT_FN) #error "CUSTOM_SYMBOLS is defined but one of EXTERN_KERNEL_FN or EXTERN_REBOOT_FN is not" #endif .extern EXTERN_KERNEL_FN .extern EXTERN_REBOOT_FN #define EXTERN_BSS_START __symbol_bss_start__ #define EXTERN_BSS_END __symbol_bss_end__ .extern EXTERN_BSS_START .extern EXTERN_BSS_END #define SUPER_MODE 0b10011 #define MODE_MASK 0b11111 @ Op1=0, Rd=reg, CRn=c7, CRm=c5, Op2=4 : Flush prefetch buffer #define _prefetch_flush(reg) \ mov reg, #0; \ mcr p15, 0, reg, c7, c5, 4 .section ".text.boot" .globl _start _start: @ enter supervisor mode mrs r0, cpsr and r0, r0, #(~MODE_MASK) orr r0, r0, #SUPER_MODE @ disable IRQs (A2-11) orr r0, r0, #(1 << 7) msr cpsr, r0 _prefetch_flush(r1) @ zero bss mov r0, #0 ldr r1, =EXTERN_BSS_START ldr r2, =EXTERN_BSS_END subs r2, r2, r1 bcc _start.zero_bss.L1 _start.zero_bss.L0: strb r0, [r1], #1 subs r2, r2, #1 bne _start.zero_bss.L0 _start.zero_bss.L1: ldr sp, =STACK_HIGH mov fp, #0 bl EXTERN_KERNEL_FN bl EXTERN_REBOOT_FN .loop: b .loop
mborisov1/uc-basic
21,168
ports/nucleo_f412zg/Startup/startup_stm32f412zgtx.s
/** ****************************************************************************** * @file startup_stm32f412zx.s * @author MCD Application Team * @brief STM32F412Zx Devices vector table for GCC based toolchains. * This module performs: * - Set the initial SP * - Set the initial PC == Reset_Handler, * - Set the vector table entries with the exceptions ISR address * - Branches to main in the C library (which eventually * calls main()). * After Reset the Cortex-M4 processor is in Thread mode, * priority is Privileged, and the Stack is set to Main. ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ .syntax unified .cpu cortex-m4 .fpu softvfp .thumb .global g_pfnVectors .global Default_Handler /* start address for the initialization values of the .data section. defined in linker script */ .word _sidata /* start address for the .data section. defined in linker script */ .word _sdata /* end address for the .data section. defined in linker script */ .word _edata /* start address for the .bss section. defined in linker script */ .word _sbss /* end address for the .bss section. defined in linker script */ .word _ebss /* stack used for SystemInit_ExtMemCtl; always internal RAM used */ /** * @brief This is the code that gets called when the processor first * starts execution following a reset event. Only the absolutely * necessary set is performed, after which the application * supplied main() routine is called. * @param None * @retval : None */ .section .text.Reset_Handler .weak Reset_Handler .type Reset_Handler, %function Reset_Handler: ldr sp, =_estack /* set stack pointer */ /* Copy the data segment initializers from flash to SRAM */ ldr r0, =_sdata ldr r1, =_edata ldr r2, =_sidata movs r3, #0 b LoopCopyDataInit CopyDataInit: ldr r4, [r2, r3] str r4, [r0, r3] adds r3, r3, #4 LoopCopyDataInit: adds r4, r0, r3 cmp r4, r1 bcc CopyDataInit /* Zero fill the bss segment. */ ldr r2, =_sbss ldr r4, =_ebss movs r3, #0 b LoopFillZerobss FillZerobss: str r3, [r2] adds r2, r2, #4 LoopFillZerobss: cmp r2, r4 bcc FillZerobss /* Call the clock system initialization function.*/ bl SystemInit /* Call static constructors */ bl __libc_init_array /* Call the application's entry point.*/ bl main bx lr .size Reset_Handler, .-Reset_Handler /** * @brief This is the code that gets called when the processor receives an * unexpected interrupt. This simply enters an infinite loop, preserving * the system state for examination by a debugger. * @param None * @retval None */ .section .text.Default_Handler,"ax",%progbits Default_Handler: Infinite_Loop: b Infinite_Loop .size Default_Handler, .-Default_Handler /****************************************************************************** * * The minimal vector table for a Cortex M3. Note that the proper constructs * must be placed on this to ensure that it ends up at physical address * 0x0000.0000. * *******************************************************************************/ .section .isr_vector,"a",%progbits .type g_pfnVectors, %object .size g_pfnVectors, .-g_pfnVectors g_pfnVectors: .word _estack .word Reset_Handler .word NMI_Handler .word HardFault_Handler .word MemManage_Handler .word BusFault_Handler .word UsageFault_Handler .word 0 .word 0 .word 0 .word 0 .word SVC_Handler .word DebugMon_Handler .word 0 .word PendSV_Handler .word SysTick_Handler /* External Interrupts */ .word WWDG_IRQHandler /* Window WatchDog */ .word PVD_IRQHandler /* PVD through EXTI Line detection */ .word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */ .word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */ .word FLASH_IRQHandler /* FLASH */ .word RCC_IRQHandler /* RCC */ .word EXTI0_IRQHandler /* EXTI Line0 */ .word EXTI1_IRQHandler /* EXTI Line1 */ .word EXTI2_IRQHandler /* EXTI Line2 */ .word EXTI3_IRQHandler /* EXTI Line3 */ .word EXTI4_IRQHandler /* EXTI Line4 */ .word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */ .word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */ .word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */ .word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */ .word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */ .word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */ .word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */ .word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */ .word CAN1_TX_IRQHandler /* CAN1 TX */ .word CAN1_RX0_IRQHandler /* CAN1 RX0 */ .word CAN1_RX1_IRQHandler /* CAN1 RX1 */ .word CAN1_SCE_IRQHandler /* CAN1 SCE */ .word EXTI9_5_IRQHandler /* External Line[9:5]s */ .word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */ .word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */ .word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */ .word TIM1_CC_IRQHandler /* TIM1 Capture Compare */ .word TIM2_IRQHandler /* TIM2 */ .word TIM3_IRQHandler /* TIM3 */ .word TIM4_IRQHandler /* TIM4 */ .word I2C1_EV_IRQHandler /* I2C1 Event */ .word I2C1_ER_IRQHandler /* I2C1 Error */ .word I2C2_EV_IRQHandler /* I2C2 Event */ .word I2C2_ER_IRQHandler /* I2C2 Error */ .word SPI1_IRQHandler /* SPI1 */ .word SPI2_IRQHandler /* SPI2 */ .word USART1_IRQHandler /* USART1 */ .word USART2_IRQHandler /* USART2 */ .word USART3_IRQHandler /* USART3 */ .word EXTI15_10_IRQHandler /* External Line[15:10]s */ .word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */ .word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */ .word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */ .word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */ .word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */ .word TIM8_CC_IRQHandler /* TIM8 Capture Compare */ .word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */ .word 0 /* Reserved */ .word SDIO_IRQHandler /* SDIO */ .word TIM5_IRQHandler /* TIM5 */ .word SPI3_IRQHandler /* SPI3 */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word TIM6_IRQHandler /* TIM6 */ .word TIM7_IRQHandler /* TIM7 */ .word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */ .word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */ .word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */ .word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */ .word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */ .word DFSDM1_FLT0_IRQHandler /* DFSDM1 Filter0 */ .word DFSDM1_FLT1_IRQHandler /* DFSDM1 Filter1 */ .word CAN2_TX_IRQHandler /* CAN2 TX */ .word CAN2_RX0_IRQHandler /* CAN2 RX0 */ .word CAN2_RX1_IRQHandler /* CAN2 RX1 */ .word CAN2_SCE_IRQHandler /* CAN2 SCE */ .word OTG_FS_IRQHandler /* USB OTG FS */ .word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */ .word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */ .word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */ .word USART6_IRQHandler /* USART6 */ .word I2C3_EV_IRQHandler /* I2C3 event */ .word I2C3_ER_IRQHandler /* I2C3 error */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word RNG_IRQHandler /* RNG */ .word FPU_IRQHandler /* FPU */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word SPI4_IRQHandler /* SPI4 */ .word SPI5_IRQHandler /* SPI5 */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word QUADSPI_IRQHandler /* QuadSPI */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word FMPI2C1_EV_IRQHandler /* FMPI2C1 Event */ .word FMPI2C1_ER_IRQHandler /* FMPI2C1 Error */ /******************************************************************************* * * Provide weak aliases for each Exception handler to the Default_Handler. * As they are weak aliases, any function with the same name will override * this definition. * *******************************************************************************/ .weak NMI_Handler .thumb_set NMI_Handler,Default_Handler .weak HardFault_Handler .thumb_set HardFault_Handler,Default_Handler .weak MemManage_Handler .thumb_set MemManage_Handler,Default_Handler .weak BusFault_Handler .thumb_set BusFault_Handler,Default_Handler .weak UsageFault_Handler .thumb_set UsageFault_Handler,Default_Handler .weak SVC_Handler .thumb_set SVC_Handler,Default_Handler .weak DebugMon_Handler .thumb_set DebugMon_Handler,Default_Handler .weak PendSV_Handler .thumb_set PendSV_Handler,Default_Handler .weak SysTick_Handler .thumb_set SysTick_Handler,Default_Handler .weak WWDG_IRQHandler .thumb_set WWDG_IRQHandler,Default_Handler .weak PVD_IRQHandler .thumb_set PVD_IRQHandler,Default_Handler .weak TAMP_STAMP_IRQHandler .thumb_set TAMP_STAMP_IRQHandler,Default_Handler .weak RTC_WKUP_IRQHandler .thumb_set RTC_WKUP_IRQHandler,Default_Handler .weak FLASH_IRQHandler .thumb_set FLASH_IRQHandler,Default_Handler .weak RCC_IRQHandler .thumb_set RCC_IRQHandler,Default_Handler .weak EXTI0_IRQHandler .thumb_set EXTI0_IRQHandler,Default_Handler .weak EXTI1_IRQHandler .thumb_set EXTI1_IRQHandler,Default_Handler .weak EXTI2_IRQHandler .thumb_set EXTI2_IRQHandler,Default_Handler .weak EXTI3_IRQHandler .thumb_set EXTI3_IRQHandler,Default_Handler .weak EXTI4_IRQHandler .thumb_set EXTI4_IRQHandler,Default_Handler .weak DMA1_Stream0_IRQHandler .thumb_set DMA1_Stream0_IRQHandler,Default_Handler .weak DMA1_Stream1_IRQHandler .thumb_set DMA1_Stream1_IRQHandler,Default_Handler .weak DMA1_Stream2_IRQHandler .thumb_set DMA1_Stream2_IRQHandler,Default_Handler .weak DMA1_Stream3_IRQHandler .thumb_set DMA1_Stream3_IRQHandler,Default_Handler .weak DMA1_Stream4_IRQHandler .thumb_set DMA1_Stream4_IRQHandler,Default_Handler .weak DMA1_Stream5_IRQHandler .thumb_set DMA1_Stream5_IRQHandler,Default_Handler .weak DMA1_Stream6_IRQHandler .thumb_set DMA1_Stream6_IRQHandler,Default_Handler .weak ADC_IRQHandler .thumb_set ADC_IRQHandler,Default_Handler .weak CAN1_TX_IRQHandler .thumb_set CAN1_TX_IRQHandler,Default_Handler .weak CAN1_RX0_IRQHandler .thumb_set CAN1_RX0_IRQHandler,Default_Handler .weak CAN1_RX1_IRQHandler .thumb_set CAN1_RX1_IRQHandler,Default_Handler .weak CAN1_SCE_IRQHandler .thumb_set CAN1_SCE_IRQHandler,Default_Handler .weak EXTI9_5_IRQHandler .thumb_set EXTI9_5_IRQHandler,Default_Handler .weak TIM1_BRK_TIM9_IRQHandler .thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler .weak TIM1_UP_TIM10_IRQHandler .thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler .weak TIM1_TRG_COM_TIM11_IRQHandler .thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler .weak TIM1_CC_IRQHandler .thumb_set TIM1_CC_IRQHandler,Default_Handler .weak TIM2_IRQHandler .thumb_set TIM2_IRQHandler,Default_Handler .weak TIM3_IRQHandler .thumb_set TIM3_IRQHandler,Default_Handler .weak TIM4_IRQHandler .thumb_set TIM4_IRQHandler,Default_Handler .weak I2C1_EV_IRQHandler .thumb_set I2C1_EV_IRQHandler,Default_Handler .weak I2C1_ER_IRQHandler .thumb_set I2C1_ER_IRQHandler,Default_Handler .weak I2C2_EV_IRQHandler .thumb_set I2C2_EV_IRQHandler,Default_Handler .weak I2C2_ER_IRQHandler .thumb_set I2C2_ER_IRQHandler,Default_Handler .weak SPI1_IRQHandler .thumb_set SPI1_IRQHandler,Default_Handler .weak SPI2_IRQHandler .thumb_set SPI2_IRQHandler,Default_Handler .weak USART1_IRQHandler .thumb_set USART1_IRQHandler,Default_Handler .weak USART2_IRQHandler .thumb_set USART2_IRQHandler,Default_Handler .weak USART3_IRQHandler .thumb_set USART3_IRQHandler,Default_Handler .weak EXTI15_10_IRQHandler .thumb_set EXTI15_10_IRQHandler,Default_Handler .weak RTC_Alarm_IRQHandler .thumb_set RTC_Alarm_IRQHandler,Default_Handler .weak OTG_FS_WKUP_IRQHandler .thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler .weak TIM8_BRK_TIM12_IRQHandler .thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler .weak TIM8_UP_TIM13_IRQHandler .thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler .weak TIM8_TRG_COM_TIM14_IRQHandler .thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler .weak TIM8_CC_IRQHandler .thumb_set TIM8_CC_IRQHandler,Default_Handler .weak DMA1_Stream7_IRQHandler .thumb_set DMA1_Stream7_IRQHandler,Default_Handler .weak SDIO_IRQHandler .thumb_set SDIO_IRQHandler,Default_Handler .weak TIM5_IRQHandler .thumb_set TIM5_IRQHandler,Default_Handler .weak SPI3_IRQHandler .thumb_set SPI3_IRQHandler,Default_Handler .weak TIM6_IRQHandler .thumb_set TIM6_IRQHandler,Default_Handler .weak TIM7_IRQHandler .thumb_set TIM7_IRQHandler,Default_Handler .weak DMA2_Stream0_IRQHandler .thumb_set DMA2_Stream0_IRQHandler,Default_Handler .weak DMA2_Stream1_IRQHandler .thumb_set DMA2_Stream1_IRQHandler,Default_Handler .weak DMA2_Stream2_IRQHandler .thumb_set DMA2_Stream2_IRQHandler,Default_Handler .weak DMA2_Stream3_IRQHandler .thumb_set DMA2_Stream3_IRQHandler,Default_Handler .weak DMA2_Stream4_IRQHandler .thumb_set DMA2_Stream4_IRQHandler,Default_Handler .weak DFSDM1_FLT0_IRQHandler .thumb_set DFSDM1_FLT0_IRQHandler,Default_Handler .weak DFSDM1_FLT1_IRQHandler .thumb_set DFSDM1_FLT1_IRQHandler,Default_Handler .weak CAN2_TX_IRQHandler .thumb_set CAN2_TX_IRQHandler,Default_Handler .weak CAN2_RX0_IRQHandler .thumb_set CAN2_RX0_IRQHandler,Default_Handler .weak CAN2_RX1_IRQHandler .thumb_set CAN2_RX1_IRQHandler,Default_Handler .weak CAN2_SCE_IRQHandler .thumb_set CAN2_SCE_IRQHandler,Default_Handler .weak OTG_FS_IRQHandler .thumb_set OTG_FS_IRQHandler,Default_Handler .weak DMA2_Stream5_IRQHandler .thumb_set DMA2_Stream5_IRQHandler,Default_Handler .weak DMA2_Stream6_IRQHandler .thumb_set DMA2_Stream6_IRQHandler,Default_Handler .weak DMA2_Stream7_IRQHandler .thumb_set DMA2_Stream7_IRQHandler,Default_Handler .weak USART6_IRQHandler .thumb_set USART6_IRQHandler,Default_Handler .weak I2C3_EV_IRQHandler .thumb_set I2C3_EV_IRQHandler,Default_Handler .weak I2C3_ER_IRQHandler .thumb_set I2C3_ER_IRQHandler,Default_Handler .weak RNG_IRQHandler .thumb_set RNG_IRQHandler,Default_Handler .weak FPU_IRQHandler .thumb_set FPU_IRQHandler,Default_Handler .weak SPI4_IRQHandler .thumb_set SPI4_IRQHandler,Default_Handler .weak SPI5_IRQHandler .thumb_set SPI5_IRQHandler,Default_Handler .weak QUADSPI_IRQHandler .thumb_set QUADSPI_IRQHandler,Default_Handler .weak FMPI2C1_EV_IRQHandler .thumb_set FMPI2C1_EV_IRQHandler,Default_Handler .weak FMPI2C1_ER_IRQHandler .thumb_set FMPI2C1_ER_IRQHandler,Default_Handler
mbrla0/fnptr-poc
1,782
src/asm/instrument.s
.include "defs.inc" ; The instrumentation helper gadget. ; ; The code in this section is responsible for checking whether the target of ; a given indirect jump has alread been instrumented by the debugger, by doing a ; binary search through the list of known jump targets. That list is owned and ; controlled by the debugger, but resides in the memory space of the inferior. ; ; Since usually these jumps will only have a handful of distinct targets ; throughout their limetimes, doing this allows us to save quite a bit of time, ; by avoid the costly rendezvous with the debugger in the hit case, and only ; adding a comparatively small overhead to the miss case. ; ; Additionally, it only makes sense to use this gadget for indirect jumps that ; are hit multiple times. The fewer times an indirect jump instruction is used, ; the more likely it is for the ratio of hits to misses to be lower. ; .code64 .section InstrumentationHelper pushq %rax pushq %r15 pushq %r14 pushq %rbx ; Load long-lived parameters ; ; %rbx => Target jump address ; %r15 => Base address of cache ; %r14 => Number of elements in cache call getTargetAddress(%rip) movq %rax, %rbx movq cache(%rip), %r15 movq cacheLen(%rip), %r14 ; Check if we've already seen this addres. xorq %rax, %rax ; This is a new target address. Defer to the debugger. int3 popq %rbx popq %r14 popq %r15 popq %rax .section InstrumentationHelperParamsLocal cache: ; The address of the cache region used by this instrument. .quad 0 cacheLen: ; The number of addresses in the cache region. .quad 0 getTargetAddress: ; The gadget that loads the effective address of the jump ; target into %rax and calls `ret`. ; ; As an example, this assumes the address is stored in %rdx movq %rdx, %rax ret
mcdcam/mipsy-macro
1,493
examples/test_simple.preprocessed.S
# Create a register macro ($ prefix). Replaces all future occurrences of $COOL_REGISTER with $s0. #define $COOL_REGISTER $s0 # Create an address macro (@ prefix). #define @ADDR numbers($s0) # Create an immediate macro (no prefix). Mipsy already supports defining constants using the # e.g. VAL = 3 + 4 * 5 syntax. You should usually use that instead of this, it's better! #define INIT 4 # Create a directive macro (. prefix). You probably won't need to use this, but it's available. #define .DRCTV .byte # This is a raw macro (with ! prefix), no sanity checking of values is performed. # They're occasionally useful for defining full statements (instructions, directives etc.). # Don't use these unless you really know what you're doing, things will break if you're not careful. #define !RET jr $ra main: # These are scoped macros. They are removed when their label (`main__end` in this case) is reached. # This is particularly useful for giving names to registers used in a function. #defineuntil main__end $X $t0 #defineuntil main__end $Y $t1 li $t0, 4 # x = 4; li $t1, 123 # y = 123; move $s0, $t0 # cool_reg = x; sw $t1, numbers($s0) # numbers[cool_reg] = y; # $X and $Y go out of scope here, so occurrences of them after this label won't be replaced, # and they'll be able to be redefined as something else. main__end: li $v0, 0 jr $ra # return 0; .data prompt: .asciiz "Enter a number: " numbers: .byte 0, 1, 2, 3, 4, 5, 6, 7 # char numbers[8] = { 0, 1, ... 7 };
mcdcam/mipsy-macro
1,159
examples/test.preprocessed.S
# A comment # and another #![tabsize(8)] #defineuntil end THING 1 #define THING_2 0x456 #define $REG $v0 #define @ADDR 10($v0) #define !OP syscall #define .DRCTV .text main: # int main(void) { la $a0, prompt # printf("Enter a number: "); li $v0, 4 syscall li $v0, 1 # scanf("%d", number); syscall li $v0, 1; li $v0, 3;syscall; li$v0,1; li $v0, 3; syscall ; x = 10&7 * 0x3 | (5 / 2) + 2 ^ 3 -~(1) * 088976 % 000430 - 0o4 + 0b101010 li $v0, 11 li $a0, 'a' syscall li $v0, 11 li $a0, '"' syscall li $v0, 11 li $a0, '\0' syscall li $v0, 11 li $a0, '\"' syscall li $v0, 11 li $a0, ''' syscall li $v0, 11 li $a0, '\'' syscall li $v0, 11 li $a0, '\\' syscall li $v0, 11 li $a0, ' ' # why is this a thing???? syscall li $t7, 10 sw $t7, 10($v0) li $s2, 780 add $s1, $s2, 8 beq $s1, 3, end end: li $v0, 0 jr $ra # return 0 .data prompt: .asciiz "Enter a number: " multiline: .asciiz "some text and some more" evil1: .asciiz "a \"not very nice\" \n string\t \\ :(" evil2: .asciiz"a # string: 'hello'"why: .asciiz "hi??" same.line: .asciiz "hi" numbers: .word 1, 2, 3, 4 .text eof: syscall
mcdcam/mipsy-macro
1,160
examples/test.S
# A comment # and another #![tabsize(8)] #defineuntil end THING 1 #define THING_2 0x456 #define $REG $v0 #define @ADDR 10($v0) #define !OP syscall #define .DRCTV .text main: # int main(void) { la $a0, prompt # printf("Enter a number: "); li $v0, 4 syscall li $v0, THING # scanf("%d", number); syscall li $v0, 1; li $REG, 3;syscall; li$v0,1; li $REG, 3; syscall ; x = 10&7 * 0x3 | (5 / 2) + 2 ^ 3 -~(1) * 088976 % 000430 - 0o4 + 0b101010 li $v0, 11 li $a0, 'a' !OP li $v0, 11 li $a0, '"' syscall li $v0, 11 li $a0, '\0' syscall li $v0, 11 li $a0, '\"' syscall li $v0, 11 li $a0, ''' syscall li $v0, 11 li $a0, '\'' syscall li $v0, 11 li $a0, '\\' syscall li $v0, 11 li $a0, ' ' # why is this a thing???? syscall li $t7, 10 sw $t7, @ADDR li $s2, 780 add $s1, $s2, 8 beq $s1, 3, end end: li $v0, 0 jr $ra # return 0 .data prompt: .asciiz "Enter a number: " multiline: .asciiz "some text and some more" evil1: .asciiz "a \"not very nice\" \n string\t \\ :(" evil2: .asciiz"a # string: 'hello'"why: .asciiz "hi??" same.line: .asciiz "hi" numbers: .word 1, 2, 3, 4 .DRCTV eof: syscall
mcdcam/mipsy-macro
1,495
examples/test_simple.S
# Create a register macro ($ prefix). Replaces all future occurrences of $COOL_REGISTER with $s0. #define $COOL_REGISTER $s0 # Create an address macro (@ prefix). #define @ADDR numbers($s0) # Create an immediate macro (no prefix). Mipsy already supports defining constants using the # e.g. VAL = 3 + 4 * 5 syntax. You should usually use that instead of this, it's better! #define INIT 4 # Create a directive macro (. prefix). You probably won't need to use this, but it's available. #define .DRCTV .byte # This is a raw macro (with ! prefix), no sanity checking of values is performed. # They're occasionally useful for defining full statements (instructions, directives etc.). # Don't use these unless you really know what you're doing, things will break if you're not careful. #define !RET jr $ra main: # These are scoped macros. They are removed when their label (`main__end` in this case) is reached. # This is particularly useful for giving names to registers used in a function. #defineuntil main__end $X $t0 #defineuntil main__end $Y $t1 li $X, INIT # x = 4; li $Y, 123 # y = 123; move $COOL_REGISTER, $X # cool_reg = x; sw $Y, @ADDR # numbers[cool_reg] = y; # $X and $Y go out of scope here, so occurrences of them after this label won't be replaced, # and they'll be able to be redefined as something else. main__end: li $v0, 0 !RET # return 0; .data prompt: .asciiz "Enter a number: " numbers: .DRCTV 0, 1, 2, 3, 4, 5, 6, 7 # char numbers[8] = { 0, 1, ... 7 };
mdlayher/rvexec
1,312
asm/loop/loop.s
# Thank you to Prabhas Chongstitvatana for this example: # https://www.cp.eng.chula.ac.th/~prabhas/teaching/comparch/2022/Programming-RISC-V-assembly.htm j main twice: # @twice addi sp, sp, -16 # create stack frame with 4 slots sw ra, 12(sp) # first slot keeps return address sw s0, 8(sp) # second slot keeps s0 addi s0, sp, 16 # set s0 to this stack frame sw a0, -12(s0) # store passing value (x) to slot 4 lw a0, -12(s0) # get x add a0, a0, a0 # x + x return value in a0 lw ra, 12(sp) # restore return address lw s0, 8(sp) addi sp, sp, 16 # delete stack frame ret main: # @main addi sp, sp, -16 # stack frame has 4 slots sw ra, 12(sp) sw s0, 8(sp) addi s0, sp, 16 li a0, 2 # pass number 2 in a0 call twice # call twice *** sw a0, -12(s0) # put return value to a li a0, 0 lw ra, 12(sp) lw s0, 8(sp) addi sp, sp, 16 # restore sp
MediLang/medi
1,417
scripts/riscv32/baremetal/start.S
.section .init .global _start /* QEMU virt machine: RAM at 0x80000000, UART0 at 0x10000000 */ .equ RAM_BASE, 0x80000000 .equ RAM_SIZE, (128 * 1024 * 1024) .equ STACK_TOP, (RAM_BASE + RAM_SIZE - 0x100) .equ UART0_BASE, 0x10000000 /* QEMU Test Finisher (exit): address 0x100000 */ .equ TEST_FINISHER, 0x00100000 /* UART registers (16550-compatible) */ .equ UART_THR, 0x00 /* Transmit Holding Register */ .equ UART_LSR, 0x05 /* Line Status Register */ .equ LSR_THRE, 0x20 /* Transmit Holding Register Empty */ _start: /* Set up stack pointer */ la sp, stack_top /* Print "OK\n" to UART0 for visibility */ la a0, msg la a1, msg_end 1: beq a0, a1, 2f /* wait until THR empty */ 3: li t0, UART0_BASE addi t1, t0, UART_LSR lbu t2, 0(t1) andi t2, t2, LSR_THRE beqz t2, 3b /* write byte */ lbu t3, 0(a0) sb t3, UART_THR(t0) addi a0, a0, 1 j 1b 2: /* Call main() if present */ la t0, main jr t0 /* If main returns, signal PASS to QEMU Test Finisher and exit emulator */ li t0, TEST_FINISHER li t1, 0x5555 /* PASS code */ sw t1, 0(t0) /* Also request SBI legacy shutdown (requires -bios default/OpenSBI) */ li a7, 0x08 /* SBI legacy shutdown */ ecall /* Fallback if finisher not present: hang */ 4: wfi j 4b /* BSS-less simple stack top symbol */ .section .bss .balign 16 stack_top: .space 0 .section .rodata msg: .ascii "OK\n" msg_end:
memasdeligeorgakis/web-riscv-vm
2,206
code_examples/project_1/rust_riscv_2024_12_31_17_00.s
000110f4 <_start>: 110f4: 1141 addi sp,sp,-16 110f6: c606 sw ra,12(sp) 110f8: 00000097 auipc ra,0x0 110fc: 018080e7 jalr 24(ra) # 11110 <main> 11100: 00012537 lui a0,0x12 11104: 4585 li a1,1 11106: 14b52023 sw a1,320(a0) # 12140 <_ZN10rust_riscv6RESULT17ha26bc3dfb5ff0c2cE> 1110a: 40b2 lw ra,12(sp) 1110c: 0141 addi sp,sp,16 1110e: 8082 ret 00011110 <main>: 11110: 1141 addi sp,sp,-16 11112: c606 sw ra,12(sp) 11114: 4505 li a0,1 11116: 4589 li a1,2 11118: 00000097 auipc ra,0x0 1111c: 018080e7 jalr 24(ra) # 11130 <sum_2_number> 11120: 050d addi a0,a0,3 11122: 000125b7 lui a1,0x12 11126: 14a5a223 sw a0,324(a1) # 12144 <_ZN10rust_riscv8RESULT_217hd55270e3bb8c12abE> 1112a: 40b2 lw ra,12(sp) 1112c: 0141 addi sp,sp,16 1112e: 8082 ret 00011130 <sum_2_number>: 11130: 46a9 li a3,10 11132: 463d li a2,15 11134: 00d56363 bltu a0,a3,1113a <sum_2_number+0xa> 11138: 4665 li a2,25 1113a: 952e add a0,a0,a1 1113c: 9532 add a0,a0,a2 1113e: 8082 ret # #[no_mangle] # #[inline(never)] # fn main() { # let number_1: u32 = 1; # let number_2: u32 = 2; # let number_3: u32 = 3; # let sum_of_numbers = sum_2_number(number_1, number_2); # let sum_of_numbers: u32 = sum_of_numbers + number_3; # // let aaa = unsafe { add_vectors_rvv(&VARIABLE_1, &VARIABLE_1, &mut VARIABLE_1, 3) }; # unsafe { # RESULT_2 = sum_of_numbers; // Store result in a static variable to prevent optimization # } # } # #[no_mangle] # #[inline(never)] # fn sum_2_number(number_1: u32, number_2: u32) -> u32 { # let variable_1 = number_1 + number_2; # let variable_2 = if number_1 < 10 { # 15 # } # else { # 25 # }; # variable_1 + variable_2 # }
memasdeligeorgakis/web-riscv-vm
1,447
code_examples/project_1/rust_riscv_2025_02_23.s
000110f4 <_start>: 110f4: 1141 addi sp,sp,-16 110f6: c606 sw ra,12(sp) 110f8: 00000097 auipc ra,0x0 110fc: 018080e7 jalr 24(ra) # 11110 <main> 11100: 00012537 lui a0,0x12 11104: 4585 li a1,1 11106: 14b52023 sw a1,320(a0) # 12140 <_ZN10rust_riscv6RESULT17ha26bc3dfb5ff0c2cE> 1110a: 40b2 lw ra,12(sp) 1110c: 0141 addi sp,sp,16 1110e: 8082 ret 00011110 <main>: 11110: 1141 addi sp,sp,-16 11112: c606 sw ra,12(sp) 11114: 4505 li a0,1 11116: 4589 li a1,2 11118: 00000097 auipc ra,0x0 1111c: 018080e7 jalr 24(ra) # 11130 <sum_2_number> 11120: 050d addi a0,a0,3 11122: 000125b7 lui a1,0x12 11126: 14a5a223 sw a0,324(a1) # 12144 <_ZN10rust_riscv8RESULT_217hd55270e3bb8c12abE> 1112a: 40b2 lw ra,12(sp) 1112c: 0141 addi sp,sp,16 1112e: 8082 ret 00011130 <sum_2_number>: 11130: 46a9 li a3,10 11132: 463d li a2,15 11134: 00d56363 bltu a0,a3,1113a <sum_2_number+0xa> 11138: 4665 li a2,25 1113a: 952e add a0,a0,a1 1113c: 9532 add a0,a0,a2 1113e: 8082 ret
memasdeligeorgakis/web-riscv-vm
2,190
code_examples/project_1/rust_riscv.s
000110f4 <_start>: 110f4: 1141 addi sp,sp,-16 110f6: c606 sw ra,12(sp) 110f8: 00000097 auipc ra,0x0 110fc: 018080e7 jalr 24(ra) # 11110 <main> 11100: 00012537 lui a0,0x12 11104: 4585 li a1,1 11106: 14b52023 sw a1,320(a0) # 12140 <_ZN10rust_riscv6RESULT17ha26bc3dfb5ff0c2cE> 1110a: 40b2 lw ra,12(sp) 1110c: 0141 addi sp,sp,16 1110e: 8082 ret 00011110 <main>: 11110: 1141 addi sp,sp,-16 11112: c606 sw ra,12(sp) 11114: 4505 li a0,1 11116: 4589 li a1,2 11118: 00000097 auipc ra,0x0 1111c: 018080e7 jalr 24(ra) # 11130 <sum_2_number> 11120: 050d addi a0,a0,3 11122: 000125b7 lui a1,0x12 11126: 14a5a223 sw a0,324(a1) # 12144 <_ZN10rust_riscv8RESULT_217hd55270e3bb8c12abE> 1112a: 40b2 lw ra,12(sp) 1112c: 0141 addi sp,sp,16 1112e: 8082 ret 00011130 <sum_2_number>: 11130: 46a9 li a3,10 11132: 463d li a2,15 11134: 00d56363 bltu a0,a3,1113a <sum_2_number+0xa> 11138: 4665 li a2,25 1113a: 952e add a0,a0,a1 1113c: 9532 add a0,a0,a2 1113e: 8082 ret # #[no_mangle] # #[inline(never)] # fn main() { # let number_1: u32 = 1; # let number_2: u32 = 2; # let number_3: u32 = 3; # let sum_of_numbers = sum_2_number(number_1, number_2); # let sum_of_numbers: u32 = sum_of_numbers + number_3; # // let aaa = unsafe { add_vectors_rvv(&VARIABLE_1, &VARIABLE_1, &mut VARIABLE_1, 3) }; # unsafe { # RESULT_2 = sum_of_numbers; // Store result in a static variable to prevent optimization # } # } # #[no_mangle] # #[inline(never)] # fn sum_2_number(number_1: u32, number_2: u32) -> u32 { # let variable_1 = number_1 + number_2; # let variable_2 = if number_1 < 10 { # 15 # } # else { # 25 # }; # variable_1 + variable_2 # }
MeowBoy326/MacroKeyboard
15,340
Firmware/MacroKeyboard V0.1/Libraries/CMSIS/CM3/DeviceSupport/ST/STM32F10x/startup/arm/startup_stm32f10x_xl.s
;******************** (C) COPYRIGHT 2011 STMicroelectronics ******************** ;* File Name : startup_stm32f10x_xl.s ;* Author : MCD Application Team ;* Version : V3.5.1 ;* Date : 08-September-2021 ;* Description : STM32F10x XL-Density Devices vector table for MDK-ARM ;* toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == Reset_Handler ;* - Set the vector table entries with the exceptions ISR address ;* - Configure the clock system and also configure the external ;* SRAM mounted on STM3210E-EVAL board to be used as data ;* memory (optional, to be enabled by user) ;* - Branches to __main in the C library (which eventually ;* calls main()). ;* After Reset the CortexM3 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;* <<< Use Configuration Wizard in Context Menu >>> ;******************************************************************************* ;* ;* Copyright (c) 2011 STMicroelectronics. ;* All rights reserved. ;* ;* This software is licensed under terms that can be found in the LICENSE file ;* in the root directory of this software component. ;* If no LICENSE file comes with this software, it is provided AS-IS. ; ;******************************************************************************* ; Amount of memory (in bytes) allocated for Stack ; Tailor this value to your application needs ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000200 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG_IRQHandler ; Window Watchdog DCD PVD_IRQHandler ; PVD through EXTI Line detect DCD TAMPER_IRQHandler ; Tamper DCD RTC_IRQHandler ; RTC DCD FLASH_IRQHandler ; Flash DCD RCC_IRQHandler ; RCC DCD EXTI0_IRQHandler ; EXTI Line 0 DCD EXTI1_IRQHandler ; EXTI Line 1 DCD EXTI2_IRQHandler ; EXTI Line 2 DCD EXTI3_IRQHandler ; EXTI Line 3 DCD EXTI4_IRQHandler ; EXTI Line 4 DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 DCD ADC1_2_IRQHandler ; ADC1 & ADC2 DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0 DCD CAN1_RX1_IRQHandler ; CAN1 RX1 DCD CAN1_SCE_IRQHandler ; CAN1 SCE DCD EXTI9_5_IRQHandler ; EXTI Line 9..5 DCD TIM1_BRK_TIM9_IRQHandler ; TIM1 Break and TIM9 DCD TIM1_UP_TIM10_IRQHandler ; TIM1 Update and TIM10 DCD TIM1_TRG_COM_TIM11_IRQHandler ; TIM1 Trigger and Commutation and TIM11 DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare DCD TIM2_IRQHandler ; TIM2 DCD TIM3_IRQHandler ; TIM3 DCD TIM4_IRQHandler ; TIM4 DCD I2C1_EV_IRQHandler ; I2C1 Event DCD I2C1_ER_IRQHandler ; I2C1 Error DCD I2C2_EV_IRQHandler ; I2C2 Event DCD I2C2_ER_IRQHandler ; I2C2 Error DCD SPI1_IRQHandler ; SPI1 DCD SPI2_IRQHandler ; SPI2 DCD USART1_IRQHandler ; USART1 DCD USART2_IRQHandler ; USART2 DCD USART3_IRQHandler ; USART3 DCD EXTI15_10_IRQHandler ; EXTI Line 15..10 DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend DCD TIM8_BRK_TIM12_IRQHandler ; TIM8 Break and TIM12 DCD TIM8_UP_TIM13_IRQHandler ; TIM8 Update and TIM13 DCD TIM8_TRG_COM_TIM14_IRQHandler ; TIM8 Trigger and Commutation and TIM14 DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare DCD ADC3_IRQHandler ; ADC3 DCD FSMC_IRQHandler ; FSMC DCD SDIO_IRQHandler ; SDIO DCD TIM5_IRQHandler ; TIM5 DCD SPI3_IRQHandler ; SPI3 DCD UART4_IRQHandler ; UART4 DCD UART5_IRQHandler ; UART5 DCD TIM6_IRQHandler ; TIM6 DCD TIM7_IRQHandler ; TIM7 DCD DMA2_Channel1_IRQHandler ; DMA2 Channel1 DCD DMA2_Channel2_IRQHandler ; DMA2 Channel2 DCD DMA2_Channel3_IRQHandler ; DMA2 Channel3 DCD DMA2_Channel4_5_IRQHandler ; DMA2 Channel4 & Channel5 __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT __main IMPORT SystemInit LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT WWDG_IRQHandler [WEAK] EXPORT PVD_IRQHandler [WEAK] EXPORT TAMPER_IRQHandler [WEAK] EXPORT RTC_IRQHandler [WEAK] EXPORT FLASH_IRQHandler [WEAK] EXPORT RCC_IRQHandler [WEAK] EXPORT EXTI0_IRQHandler [WEAK] EXPORT EXTI1_IRQHandler [WEAK] EXPORT EXTI2_IRQHandler [WEAK] EXPORT EXTI3_IRQHandler [WEAK] EXPORT EXTI4_IRQHandler [WEAK] EXPORT DMA1_Channel1_IRQHandler [WEAK] EXPORT DMA1_Channel2_IRQHandler [WEAK] EXPORT DMA1_Channel3_IRQHandler [WEAK] EXPORT DMA1_Channel4_IRQHandler [WEAK] EXPORT DMA1_Channel5_IRQHandler [WEAK] EXPORT DMA1_Channel6_IRQHandler [WEAK] EXPORT DMA1_Channel7_IRQHandler [WEAK] EXPORT ADC1_2_IRQHandler [WEAK] EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK] EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK] EXPORT CAN1_RX1_IRQHandler [WEAK] EXPORT CAN1_SCE_IRQHandler [WEAK] EXPORT EXTI9_5_IRQHandler [WEAK] EXPORT TIM1_BRK_TIM9_IRQHandler [WEAK] EXPORT TIM1_UP_TIM10_IRQHandler [WEAK] EXPORT TIM1_TRG_COM_TIM11_IRQHandler [WEAK] EXPORT TIM1_CC_IRQHandler [WEAK] EXPORT TIM2_IRQHandler [WEAK] EXPORT TIM3_IRQHandler [WEAK] EXPORT TIM4_IRQHandler [WEAK] EXPORT I2C1_EV_IRQHandler [WEAK] EXPORT I2C1_ER_IRQHandler [WEAK] EXPORT I2C2_EV_IRQHandler [WEAK] EXPORT I2C2_ER_IRQHandler [WEAK] EXPORT SPI1_IRQHandler [WEAK] EXPORT SPI2_IRQHandler [WEAK] EXPORT USART1_IRQHandler [WEAK] EXPORT USART2_IRQHandler [WEAK] EXPORT USART3_IRQHandler [WEAK] EXPORT EXTI15_10_IRQHandler [WEAK] EXPORT RTCAlarm_IRQHandler [WEAK] EXPORT USBWakeUp_IRQHandler [WEAK] EXPORT TIM8_BRK_TIM12_IRQHandler [WEAK] EXPORT TIM8_UP_TIM13_IRQHandler [WEAK] EXPORT TIM8_TRG_COM_TIM14_IRQHandler [WEAK] EXPORT TIM8_CC_IRQHandler [WEAK] EXPORT ADC3_IRQHandler [WEAK] EXPORT FSMC_IRQHandler [WEAK] EXPORT SDIO_IRQHandler [WEAK] EXPORT TIM5_IRQHandler [WEAK] EXPORT SPI3_IRQHandler [WEAK] EXPORT UART4_IRQHandler [WEAK] EXPORT UART5_IRQHandler [WEAK] EXPORT TIM6_IRQHandler [WEAK] EXPORT TIM7_IRQHandler [WEAK] EXPORT DMA2_Channel1_IRQHandler [WEAK] EXPORT DMA2_Channel2_IRQHandler [WEAK] EXPORT DMA2_Channel3_IRQHandler [WEAK] EXPORT DMA2_Channel4_5_IRQHandler [WEAK] WWDG_IRQHandler PVD_IRQHandler TAMPER_IRQHandler RTC_IRQHandler FLASH_IRQHandler RCC_IRQHandler EXTI0_IRQHandler EXTI1_IRQHandler EXTI2_IRQHandler EXTI3_IRQHandler EXTI4_IRQHandler DMA1_Channel1_IRQHandler DMA1_Channel2_IRQHandler DMA1_Channel3_IRQHandler DMA1_Channel4_IRQHandler DMA1_Channel5_IRQHandler DMA1_Channel6_IRQHandler DMA1_Channel7_IRQHandler ADC1_2_IRQHandler USB_HP_CAN1_TX_IRQHandler USB_LP_CAN1_RX0_IRQHandler CAN1_RX1_IRQHandler CAN1_SCE_IRQHandler EXTI9_5_IRQHandler TIM1_BRK_TIM9_IRQHandler TIM1_UP_TIM10_IRQHandler TIM1_TRG_COM_TIM11_IRQHandler TIM1_CC_IRQHandler TIM2_IRQHandler TIM3_IRQHandler TIM4_IRQHandler I2C1_EV_IRQHandler I2C1_ER_IRQHandler I2C2_EV_IRQHandler I2C2_ER_IRQHandler SPI1_IRQHandler SPI2_IRQHandler USART1_IRQHandler USART2_IRQHandler USART3_IRQHandler EXTI15_10_IRQHandler RTCAlarm_IRQHandler USBWakeUp_IRQHandler TIM8_BRK_TIM12_IRQHandler TIM8_UP_TIM13_IRQHandler TIM8_TRG_COM_TIM14_IRQHandler TIM8_CC_IRQHandler ADC3_IRQHandler FSMC_IRQHandler SDIO_IRQHandler TIM5_IRQHandler SPI3_IRQHandler UART4_IRQHandler UART5_IRQHandler TIM6_IRQHandler TIM7_IRQHandler DMA2_Channel1_IRQHandler DMA2_Channel2_IRQHandler DMA2_Channel3_IRQHandler DMA2_Channel4_5_IRQHandler B . ENDP ALIGN ;******************************************************************************* ; User Stack and Heap initialization ;******************************************************************************* IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ALIGN ENDIF END
MeowBoy326/MacroKeyboard
13,501
Firmware/MacroKeyboard V0.1/Libraries/CMSIS/CM3/DeviceSupport/ST/STM32F10x/startup/arm/startup_stm32f10x_md_vl.s
;******************** (C) COPYRIGHT 2011 STMicroelectronics ******************** ;* File Name : startup_stm32f10x_md_vl.s ;* Author : MCD Application Team ;* Version : V3.5.1 ;* Date : 08-September-2021 ;* Description : STM32F10x Medium Density Value Line Devices vector table ;* for MDK-ARM toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == Reset_Handler ;* - Set the vector table entries with the exceptions ISR address ;* - Configure the clock system ;* - Branches to __main in the C library (which eventually ;* calls main()). ;* After Reset the CortexM3 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;* <<< Use Configuration Wizard in Context Menu >>> ;******************************************************************************* ;* ;* Copyright (c) 2011 STMicroelectronics. ;* All rights reserved. ;* ;* This software is licensed under terms that can be found in the LICENSE file ;* in the root directory of this software component. ;* If no LICENSE file comes with this software, it is provided AS-IS. ; ;******************************************************************************* ; Amount of memory (in bytes) allocated for Stack ; Tailor this value to your application needs ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000200 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG_IRQHandler ; Window Watchdog DCD PVD_IRQHandler ; PVD through EXTI Line detect DCD TAMPER_IRQHandler ; Tamper DCD RTC_IRQHandler ; RTC DCD FLASH_IRQHandler ; Flash DCD RCC_IRQHandler ; RCC DCD EXTI0_IRQHandler ; EXTI Line 0 DCD EXTI1_IRQHandler ; EXTI Line 1 DCD EXTI2_IRQHandler ; EXTI Line 2 DCD EXTI3_IRQHandler ; EXTI Line 3 DCD EXTI4_IRQHandler ; EXTI Line 4 DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 DCD ADC1_IRQHandler ; ADC1 DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD EXTI9_5_IRQHandler ; EXTI Line 9..5 DCD TIM1_BRK_TIM15_IRQHandler ; TIM1 Break and TIM15 DCD TIM1_UP_TIM16_IRQHandler ; TIM1 Update and TIM16 DCD TIM1_TRG_COM_TIM17_IRQHandler ; TIM1 Trigger and Commutation and TIM17 DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare DCD TIM2_IRQHandler ; TIM2 DCD TIM3_IRQHandler ; TIM3 DCD TIM4_IRQHandler ; TIM4 DCD I2C1_EV_IRQHandler ; I2C1 Event DCD I2C1_ER_IRQHandler ; I2C1 Error DCD I2C2_EV_IRQHandler ; I2C2 Event DCD I2C2_ER_IRQHandler ; I2C2 Error DCD SPI1_IRQHandler ; SPI1 DCD SPI2_IRQHandler ; SPI2 DCD USART1_IRQHandler ; USART1 DCD USART2_IRQHandler ; USART2 DCD USART3_IRQHandler ; USART3 DCD EXTI15_10_IRQHandler ; EXTI Line 15..10 DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line DCD CEC_IRQHandler ; HDMI-CEC DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD TIM6_DAC_IRQHandler ; TIM6 and DAC underrun DCD TIM7_IRQHandler ; TIM7 __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT __main IMPORT SystemInit LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT WWDG_IRQHandler [WEAK] EXPORT PVD_IRQHandler [WEAK] EXPORT TAMPER_IRQHandler [WEAK] EXPORT RTC_IRQHandler [WEAK] EXPORT FLASH_IRQHandler [WEAK] EXPORT RCC_IRQHandler [WEAK] EXPORT EXTI0_IRQHandler [WEAK] EXPORT EXTI1_IRQHandler [WEAK] EXPORT EXTI2_IRQHandler [WEAK] EXPORT EXTI3_IRQHandler [WEAK] EXPORT EXTI4_IRQHandler [WEAK] EXPORT DMA1_Channel1_IRQHandler [WEAK] EXPORT DMA1_Channel2_IRQHandler [WEAK] EXPORT DMA1_Channel3_IRQHandler [WEAK] EXPORT DMA1_Channel4_IRQHandler [WEAK] EXPORT DMA1_Channel5_IRQHandler [WEAK] EXPORT DMA1_Channel6_IRQHandler [WEAK] EXPORT DMA1_Channel7_IRQHandler [WEAK] EXPORT ADC1_IRQHandler [WEAK] EXPORT EXTI9_5_IRQHandler [WEAK] EXPORT TIM1_BRK_TIM15_IRQHandler [WEAK] EXPORT TIM1_UP_TIM16_IRQHandler [WEAK] EXPORT TIM1_TRG_COM_TIM17_IRQHandler [WEAK] EXPORT TIM1_CC_IRQHandler [WEAK] EXPORT TIM2_IRQHandler [WEAK] EXPORT TIM3_IRQHandler [WEAK] EXPORT TIM4_IRQHandler [WEAK] EXPORT I2C1_EV_IRQHandler [WEAK] EXPORT I2C1_ER_IRQHandler [WEAK] EXPORT I2C2_EV_IRQHandler [WEAK] EXPORT I2C2_ER_IRQHandler [WEAK] EXPORT SPI1_IRQHandler [WEAK] EXPORT SPI2_IRQHandler [WEAK] EXPORT USART1_IRQHandler [WEAK] EXPORT USART2_IRQHandler [WEAK] EXPORT USART3_IRQHandler [WEAK] EXPORT EXTI15_10_IRQHandler [WEAK] EXPORT RTCAlarm_IRQHandler [WEAK] EXPORT CEC_IRQHandler [WEAK] EXPORT TIM6_DAC_IRQHandler [WEAK] EXPORT TIM7_IRQHandler [WEAK] WWDG_IRQHandler PVD_IRQHandler TAMPER_IRQHandler RTC_IRQHandler FLASH_IRQHandler RCC_IRQHandler EXTI0_IRQHandler EXTI1_IRQHandler EXTI2_IRQHandler EXTI3_IRQHandler EXTI4_IRQHandler DMA1_Channel1_IRQHandler DMA1_Channel2_IRQHandler DMA1_Channel3_IRQHandler DMA1_Channel4_IRQHandler DMA1_Channel5_IRQHandler DMA1_Channel6_IRQHandler DMA1_Channel7_IRQHandler ADC1_IRQHandler EXTI9_5_IRQHandler TIM1_BRK_TIM15_IRQHandler TIM1_UP_TIM16_IRQHandler TIM1_TRG_COM_TIM17_IRQHandler TIM1_CC_IRQHandler TIM2_IRQHandler TIM3_IRQHandler TIM4_IRQHandler I2C1_EV_IRQHandler I2C1_ER_IRQHandler I2C2_EV_IRQHandler I2C2_ER_IRQHandler SPI1_IRQHandler SPI2_IRQHandler USART1_IRQHandler USART2_IRQHandler USART3_IRQHandler EXTI15_10_IRQHandler RTCAlarm_IRQHandler CEC_IRQHandler TIM6_DAC_IRQHandler TIM7_IRQHandler B . ENDP ALIGN ;******************************************************************************* ; User Stack and Heap initialization ;******************************************************************************* IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ALIGN ENDIF END
MeowBoy326/MacroKeyboard
14,888
Firmware/MacroKeyboard V0.1/Libraries/CMSIS/CM3/DeviceSupport/ST/STM32F10x/startup/arm/startup_stm32f10x_hd.s
;******************** (C) COPYRIGHT 2011 STMicroelectronics ******************** ;* File Name : startup_stm32f10x_hd.s ;* Author : MCD Application Team ;* Version : V3.5.1 ;* Date : 08-September-2021 ;* Description : STM32F10x High Density Devices vector table for MDK-ARM ;* toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == Reset_Handler ;* - Set the vector table entries with the exceptions ISR address ;* - Configure the clock system and also configure the external ;* SRAM mounted on STM3210E-EVAL board to be used as data ;* memory (optional, to be enabled by user) ;* - Branches to __main in the C library (which eventually ;* calls main()). ;* After Reset the CortexM3 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;* <<< Use Configuration Wizard in Context Menu >>> ;******************************************************************************* ;* ;* Copyright (c) 2011 STMicroelectronics. ;* All rights reserved. ;* ;* This software is licensed under terms that can be found in the LICENSE file ;* in the root directory of this software component. ;* If no LICENSE file comes with this software, it is provided AS-IS. ; ;******************************************************************************* ; Amount of memory (in bytes) allocated for Stack ; Tailor this value to your application needs ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000200 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG_IRQHandler ; Window Watchdog DCD PVD_IRQHandler ; PVD through EXTI Line detect DCD TAMPER_IRQHandler ; Tamper DCD RTC_IRQHandler ; RTC DCD FLASH_IRQHandler ; Flash DCD RCC_IRQHandler ; RCC DCD EXTI0_IRQHandler ; EXTI Line 0 DCD EXTI1_IRQHandler ; EXTI Line 1 DCD EXTI2_IRQHandler ; EXTI Line 2 DCD EXTI3_IRQHandler ; EXTI Line 3 DCD EXTI4_IRQHandler ; EXTI Line 4 DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 DCD ADC1_2_IRQHandler ; ADC1 & ADC2 DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0 DCD CAN1_RX1_IRQHandler ; CAN1 RX1 DCD CAN1_SCE_IRQHandler ; CAN1 SCE DCD EXTI9_5_IRQHandler ; EXTI Line 9..5 DCD TIM1_BRK_IRQHandler ; TIM1 Break DCD TIM1_UP_IRQHandler ; TIM1 Update DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare DCD TIM2_IRQHandler ; TIM2 DCD TIM3_IRQHandler ; TIM3 DCD TIM4_IRQHandler ; TIM4 DCD I2C1_EV_IRQHandler ; I2C1 Event DCD I2C1_ER_IRQHandler ; I2C1 Error DCD I2C2_EV_IRQHandler ; I2C2 Event DCD I2C2_ER_IRQHandler ; I2C2 Error DCD SPI1_IRQHandler ; SPI1 DCD SPI2_IRQHandler ; SPI2 DCD USART1_IRQHandler ; USART1 DCD USART2_IRQHandler ; USART2 DCD USART3_IRQHandler ; USART3 DCD EXTI15_10_IRQHandler ; EXTI Line 15..10 DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend DCD TIM8_BRK_IRQHandler ; TIM8 Break DCD TIM8_UP_IRQHandler ; TIM8 Update DCD TIM8_TRG_COM_IRQHandler ; TIM8 Trigger and Commutation DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare DCD ADC3_IRQHandler ; ADC3 DCD FSMC_IRQHandler ; FSMC DCD SDIO_IRQHandler ; SDIO DCD TIM5_IRQHandler ; TIM5 DCD SPI3_IRQHandler ; SPI3 DCD UART4_IRQHandler ; UART4 DCD UART5_IRQHandler ; UART5 DCD TIM6_IRQHandler ; TIM6 DCD TIM7_IRQHandler ; TIM7 DCD DMA2_Channel1_IRQHandler ; DMA2 Channel1 DCD DMA2_Channel2_IRQHandler ; DMA2 Channel2 DCD DMA2_Channel3_IRQHandler ; DMA2 Channel3 DCD DMA2_Channel4_5_IRQHandler ; DMA2 Channel4 & Channel5 __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT __main IMPORT SystemInit LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT WWDG_IRQHandler [WEAK] EXPORT PVD_IRQHandler [WEAK] EXPORT TAMPER_IRQHandler [WEAK] EXPORT RTC_IRQHandler [WEAK] EXPORT FLASH_IRQHandler [WEAK] EXPORT RCC_IRQHandler [WEAK] EXPORT EXTI0_IRQHandler [WEAK] EXPORT EXTI1_IRQHandler [WEAK] EXPORT EXTI2_IRQHandler [WEAK] EXPORT EXTI3_IRQHandler [WEAK] EXPORT EXTI4_IRQHandler [WEAK] EXPORT DMA1_Channel1_IRQHandler [WEAK] EXPORT DMA1_Channel2_IRQHandler [WEAK] EXPORT DMA1_Channel3_IRQHandler [WEAK] EXPORT DMA1_Channel4_IRQHandler [WEAK] EXPORT DMA1_Channel5_IRQHandler [WEAK] EXPORT DMA1_Channel6_IRQHandler [WEAK] EXPORT DMA1_Channel7_IRQHandler [WEAK] EXPORT ADC1_2_IRQHandler [WEAK] EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK] EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK] EXPORT CAN1_RX1_IRQHandler [WEAK] EXPORT CAN1_SCE_IRQHandler [WEAK] EXPORT EXTI9_5_IRQHandler [WEAK] EXPORT TIM1_BRK_IRQHandler [WEAK] EXPORT TIM1_UP_IRQHandler [WEAK] EXPORT TIM1_TRG_COM_IRQHandler [WEAK] EXPORT TIM1_CC_IRQHandler [WEAK] EXPORT TIM2_IRQHandler [WEAK] EXPORT TIM3_IRQHandler [WEAK] EXPORT TIM4_IRQHandler [WEAK] EXPORT I2C1_EV_IRQHandler [WEAK] EXPORT I2C1_ER_IRQHandler [WEAK] EXPORT I2C2_EV_IRQHandler [WEAK] EXPORT I2C2_ER_IRQHandler [WEAK] EXPORT SPI1_IRQHandler [WEAK] EXPORT SPI2_IRQHandler [WEAK] EXPORT USART1_IRQHandler [WEAK] EXPORT USART2_IRQHandler [WEAK] EXPORT USART3_IRQHandler [WEAK] EXPORT EXTI15_10_IRQHandler [WEAK] EXPORT RTCAlarm_IRQHandler [WEAK] EXPORT USBWakeUp_IRQHandler [WEAK] EXPORT TIM8_BRK_IRQHandler [WEAK] EXPORT TIM8_UP_IRQHandler [WEAK] EXPORT TIM8_TRG_COM_IRQHandler [WEAK] EXPORT TIM8_CC_IRQHandler [WEAK] EXPORT ADC3_IRQHandler [WEAK] EXPORT FSMC_IRQHandler [WEAK] EXPORT SDIO_IRQHandler [WEAK] EXPORT TIM5_IRQHandler [WEAK] EXPORT SPI3_IRQHandler [WEAK] EXPORT UART4_IRQHandler [WEAK] EXPORT UART5_IRQHandler [WEAK] EXPORT TIM6_IRQHandler [WEAK] EXPORT TIM7_IRQHandler [WEAK] EXPORT DMA2_Channel1_IRQHandler [WEAK] EXPORT DMA2_Channel2_IRQHandler [WEAK] EXPORT DMA2_Channel3_IRQHandler [WEAK] EXPORT DMA2_Channel4_5_IRQHandler [WEAK] WWDG_IRQHandler PVD_IRQHandler TAMPER_IRQHandler RTC_IRQHandler FLASH_IRQHandler RCC_IRQHandler EXTI0_IRQHandler EXTI1_IRQHandler EXTI2_IRQHandler EXTI3_IRQHandler EXTI4_IRQHandler DMA1_Channel1_IRQHandler DMA1_Channel2_IRQHandler DMA1_Channel3_IRQHandler DMA1_Channel4_IRQHandler DMA1_Channel5_IRQHandler DMA1_Channel6_IRQHandler DMA1_Channel7_IRQHandler ADC1_2_IRQHandler USB_HP_CAN1_TX_IRQHandler USB_LP_CAN1_RX0_IRQHandler CAN1_RX1_IRQHandler CAN1_SCE_IRQHandler EXTI9_5_IRQHandler TIM1_BRK_IRQHandler TIM1_UP_IRQHandler TIM1_TRG_COM_IRQHandler TIM1_CC_IRQHandler TIM2_IRQHandler TIM3_IRQHandler TIM4_IRQHandler I2C1_EV_IRQHandler I2C1_ER_IRQHandler I2C2_EV_IRQHandler I2C2_ER_IRQHandler SPI1_IRQHandler SPI2_IRQHandler USART1_IRQHandler USART2_IRQHandler USART3_IRQHandler EXTI15_10_IRQHandler RTCAlarm_IRQHandler USBWakeUp_IRQHandler TIM8_BRK_IRQHandler TIM8_UP_IRQHandler TIM8_TRG_COM_IRQHandler TIM8_CC_IRQHandler ADC3_IRQHandler FSMC_IRQHandler SDIO_IRQHandler TIM5_IRQHandler SPI3_IRQHandler UART4_IRQHandler UART5_IRQHandler TIM6_IRQHandler TIM7_IRQHandler DMA2_Channel1_IRQHandler DMA2_Channel2_IRQHandler DMA2_Channel3_IRQHandler DMA2_Channel4_5_IRQHandler B . ENDP ALIGN ;******************************************************************************* ; User Stack and Heap initialization ;******************************************************************************* IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ALIGN ENDIF END
MeowBoy326/MacroKeyboard
15,089
Firmware/MacroKeyboard V0.1/Libraries/CMSIS/CM3/DeviceSupport/ST/STM32F10x/startup/arm/startup_stm32f10x_hd_vl.s
;******************** (C) COPYRIGHT 2011 STMicroelectronics ******************** ;* File Name : startup_stm32f10x_hd_vl.s ;* Author : MCD Application Team ;* Version : V3.5.1 ;* Date : 08-September-2021 ;* Description : STM32F10x High Density Value Line Devices vector table ;* for MDK-ARM toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == Reset_Handler ;* - Set the vector table entries with the exceptions ISR address ;* - Configure the clock system and also configure the external ;* SRAM mounted on STM32100E-EVAL board to be used as data ;* memory (optional, to be enabled by user) ;* - Branches to __main in the C library (which eventually ;* calls main()). ;* After Reset the CortexM3 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;* <<< Use Configuration Wizard in Context Menu >>> ;******************************************************************************* ;* ;* Copyright (c) 2011 STMicroelectronics. ;* All rights reserved. ;* ;* This software is licensed under terms that can be found in the LICENSE file ;* in the root directory of this software component. ;* If no LICENSE file comes with this software, it is provided AS-IS. ; ;******************************************************************************* ; Amount of memory (in bytes) allocated for Stack ; Tailor this value to your application needs ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000200 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG_IRQHandler ; Window Watchdog DCD PVD_IRQHandler ; PVD through EXTI Line detect DCD TAMPER_IRQHandler ; Tamper DCD RTC_IRQHandler ; RTC DCD FLASH_IRQHandler ; Flash DCD RCC_IRQHandler ; RCC DCD EXTI0_IRQHandler ; EXTI Line 0 DCD EXTI1_IRQHandler ; EXTI Line 1 DCD EXTI2_IRQHandler ; EXTI Line 2 DCD EXTI3_IRQHandler ; EXTI Line 3 DCD EXTI4_IRQHandler ; EXTI Line 4 DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 DCD ADC1_IRQHandler ; ADC1 DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD EXTI9_5_IRQHandler ; EXTI Line 9..5 DCD TIM1_BRK_TIM15_IRQHandler ; TIM1 Break and TIM15 DCD TIM1_UP_TIM16_IRQHandler ; TIM1 Update and TIM16 DCD TIM1_TRG_COM_TIM17_IRQHandler ; TIM1 Trigger and Commutation and TIM17 DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare DCD TIM2_IRQHandler ; TIM2 DCD TIM3_IRQHandler ; TIM3 DCD TIM4_IRQHandler ; TIM4 DCD I2C1_EV_IRQHandler ; I2C1 Event DCD I2C1_ER_IRQHandler ; I2C1 Error DCD I2C2_EV_IRQHandler ; I2C2 Event DCD I2C2_ER_IRQHandler ; I2C2 Error DCD SPI1_IRQHandler ; SPI1 DCD SPI2_IRQHandler ; SPI2 DCD USART1_IRQHandler ; USART1 DCD USART2_IRQHandler ; USART2 DCD USART3_IRQHandler ; USART3 DCD EXTI15_10_IRQHandler ; EXTI Line 15..10 DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line DCD CEC_IRQHandler ; HDMI-CEC DCD TIM12_IRQHandler ; TIM12 DCD TIM13_IRQHandler ; TIM13 DCD TIM14_IRQHandler ; TIM14 DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD TIM5_IRQHandler ; TIM5 DCD SPI3_IRQHandler ; SPI3 DCD UART4_IRQHandler ; UART4 DCD UART5_IRQHandler ; UART5 DCD TIM6_DAC_IRQHandler ; TIM6 and DAC underrun DCD TIM7_IRQHandler ; TIM7 DCD DMA2_Channel1_IRQHandler ; DMA2 Channel1 DCD DMA2_Channel2_IRQHandler ; DMA2 Channel2 DCD DMA2_Channel3_IRQHandler ; DMA2 Channel3 DCD DMA2_Channel4_5_IRQHandler ; DMA2 Channel4 & Channel5 DCD DMA2_Channel5_IRQHandler ; DMA2 Channel5 __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT __main IMPORT SystemInit LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT WWDG_IRQHandler [WEAK] EXPORT PVD_IRQHandler [WEAK] EXPORT TAMPER_IRQHandler [WEAK] EXPORT RTC_IRQHandler [WEAK] EXPORT FLASH_IRQHandler [WEAK] EXPORT RCC_IRQHandler [WEAK] EXPORT EXTI0_IRQHandler [WEAK] EXPORT EXTI1_IRQHandler [WEAK] EXPORT EXTI2_IRQHandler [WEAK] EXPORT EXTI3_IRQHandler [WEAK] EXPORT EXTI4_IRQHandler [WEAK] EXPORT DMA1_Channel1_IRQHandler [WEAK] EXPORT DMA1_Channel2_IRQHandler [WEAK] EXPORT DMA1_Channel3_IRQHandler [WEAK] EXPORT DMA1_Channel4_IRQHandler [WEAK] EXPORT DMA1_Channel5_IRQHandler [WEAK] EXPORT DMA1_Channel6_IRQHandler [WEAK] EXPORT DMA1_Channel7_IRQHandler [WEAK] EXPORT ADC1_IRQHandler [WEAK] EXPORT EXTI9_5_IRQHandler [WEAK] EXPORT TIM1_BRK_TIM15_IRQHandler [WEAK] EXPORT TIM1_UP_TIM16_IRQHandler [WEAK] EXPORT TIM1_TRG_COM_TIM17_IRQHandler [WEAK] EXPORT TIM1_CC_IRQHandler [WEAK] EXPORT TIM2_IRQHandler [WEAK] EXPORT TIM3_IRQHandler [WEAK] EXPORT TIM4_IRQHandler [WEAK] EXPORT I2C1_EV_IRQHandler [WEAK] EXPORT I2C1_ER_IRQHandler [WEAK] EXPORT I2C2_EV_IRQHandler [WEAK] EXPORT I2C2_ER_IRQHandler [WEAK] EXPORT SPI1_IRQHandler [WEAK] EXPORT SPI2_IRQHandler [WEAK] EXPORT USART1_IRQHandler [WEAK] EXPORT USART2_IRQHandler [WEAK] EXPORT USART3_IRQHandler [WEAK] EXPORT EXTI15_10_IRQHandler [WEAK] EXPORT RTCAlarm_IRQHandler [WEAK] EXPORT CEC_IRQHandler [WEAK] EXPORT TIM12_IRQHandler [WEAK] EXPORT TIM13_IRQHandler [WEAK] EXPORT TIM14_IRQHandler [WEAK] EXPORT TIM5_IRQHandler [WEAK] EXPORT SPI3_IRQHandler [WEAK] EXPORT UART4_IRQHandler [WEAK] EXPORT UART5_IRQHandler [WEAK] EXPORT TIM6_DAC_IRQHandler [WEAK] EXPORT TIM7_IRQHandler [WEAK] EXPORT DMA2_Channel1_IRQHandler [WEAK] EXPORT DMA2_Channel2_IRQHandler [WEAK] EXPORT DMA2_Channel3_IRQHandler [WEAK] EXPORT DMA2_Channel4_5_IRQHandler [WEAK] EXPORT DMA2_Channel5_IRQHandler [WEAK] WWDG_IRQHandler PVD_IRQHandler TAMPER_IRQHandler RTC_IRQHandler FLASH_IRQHandler RCC_IRQHandler EXTI0_IRQHandler EXTI1_IRQHandler EXTI2_IRQHandler EXTI3_IRQHandler EXTI4_IRQHandler DMA1_Channel1_IRQHandler DMA1_Channel2_IRQHandler DMA1_Channel3_IRQHandler DMA1_Channel4_IRQHandler DMA1_Channel5_IRQHandler DMA1_Channel6_IRQHandler DMA1_Channel7_IRQHandler ADC1_IRQHandler EXTI9_5_IRQHandler TIM1_BRK_TIM15_IRQHandler TIM1_UP_TIM16_IRQHandler TIM1_TRG_COM_TIM17_IRQHandler TIM1_CC_IRQHandler TIM2_IRQHandler TIM3_IRQHandler TIM4_IRQHandler I2C1_EV_IRQHandler I2C1_ER_IRQHandler I2C2_EV_IRQHandler I2C2_ER_IRQHandler SPI1_IRQHandler SPI2_IRQHandler USART1_IRQHandler USART2_IRQHandler USART3_IRQHandler EXTI15_10_IRQHandler RTCAlarm_IRQHandler CEC_IRQHandler TIM12_IRQHandler TIM13_IRQHandler TIM14_IRQHandler TIM5_IRQHandler SPI3_IRQHandler UART4_IRQHandler UART5_IRQHandler TIM6_DAC_IRQHandler TIM7_IRQHandler DMA2_Channel1_IRQHandler DMA2_Channel2_IRQHandler DMA2_Channel3_IRQHandler DMA2_Channel4_5_IRQHandler DMA2_Channel5_IRQHandler B . ENDP ALIGN ;******************************************************************************* ; User Stack and Heap initialization ;******************************************************************************* IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ALIGN ENDIF END
MeowBoy326/MacroKeyboard
12,201
Firmware/MacroKeyboard V0.1/Libraries/CMSIS/CM3/DeviceSupport/ST/STM32F10x/startup/arm/startup_stm32f10x_md.s
;******************** (C) COPYRIGHT 2011 STMicroelectronics ******************** ;* File Name : startup_stm32f10x_md.s ;* Author : MCD Application Team ;* Version : V3.5.1 ;* Date : 08-September-2021 ;* Description : STM32F10x Medium Density Devices vector table for MDK-ARM ;* toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == Reset_Handler ;* - Set the vector table entries with the exceptions ISR address ;* - Configure the clock system ;* - Branches to __main in the C library (which eventually ;* calls main()). ;* After Reset the CortexM3 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;* <<< Use Configuration Wizard in Context Menu >>> ;******************************************************************************* ;* ;* Copyright (c) 2011 STMicroelectronics. ;* All rights reserved. ;* ;* This software is licensed under terms that can be found in the LICENSE file ;* in the root directory of this software component. ;* If no LICENSE file comes with this software, it is provided AS-IS. ; ;******************************************************************************* ; Amount of memory (in bytes) allocated for Stack ; Tailor this value to your application needs ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000200 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG_IRQHandler ; Window Watchdog DCD PVD_IRQHandler ; PVD through EXTI Line detect DCD TAMPER_IRQHandler ; Tamper DCD RTC_IRQHandler ; RTC DCD FLASH_IRQHandler ; Flash DCD RCC_IRQHandler ; RCC DCD EXTI0_IRQHandler ; EXTI Line 0 DCD EXTI1_IRQHandler ; EXTI Line 1 DCD EXTI2_IRQHandler ; EXTI Line 2 DCD EXTI3_IRQHandler ; EXTI Line 3 DCD EXTI4_IRQHandler ; EXTI Line 4 DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 DCD ADC1_2_IRQHandler ; ADC1_2 DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0 DCD CAN1_RX1_IRQHandler ; CAN1 RX1 DCD CAN1_SCE_IRQHandler ; CAN1 SCE DCD EXTI9_5_IRQHandler ; EXTI Line 9..5 DCD TIM1_BRK_IRQHandler ; TIM1 Break DCD TIM1_UP_IRQHandler ; TIM1 Update DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare DCD TIM2_IRQHandler ; TIM2 DCD TIM3_IRQHandler ; TIM3 DCD TIM4_IRQHandler ; TIM4 DCD I2C1_EV_IRQHandler ; I2C1 Event DCD I2C1_ER_IRQHandler ; I2C1 Error DCD I2C2_EV_IRQHandler ; I2C2 Event DCD I2C2_ER_IRQHandler ; I2C2 Error DCD SPI1_IRQHandler ; SPI1 DCD SPI2_IRQHandler ; SPI2 DCD USART1_IRQHandler ; USART1 DCD USART2_IRQHandler ; USART2 DCD USART3_IRQHandler ; USART3 DCD EXTI15_10_IRQHandler ; EXTI Line 15..10 DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT __main IMPORT SystemInit LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT WWDG_IRQHandler [WEAK] EXPORT PVD_IRQHandler [WEAK] EXPORT TAMPER_IRQHandler [WEAK] EXPORT RTC_IRQHandler [WEAK] EXPORT FLASH_IRQHandler [WEAK] EXPORT RCC_IRQHandler [WEAK] EXPORT EXTI0_IRQHandler [WEAK] EXPORT EXTI1_IRQHandler [WEAK] EXPORT EXTI2_IRQHandler [WEAK] EXPORT EXTI3_IRQHandler [WEAK] EXPORT EXTI4_IRQHandler [WEAK] EXPORT DMA1_Channel1_IRQHandler [WEAK] EXPORT DMA1_Channel2_IRQHandler [WEAK] EXPORT DMA1_Channel3_IRQHandler [WEAK] EXPORT DMA1_Channel4_IRQHandler [WEAK] EXPORT DMA1_Channel5_IRQHandler [WEAK] EXPORT DMA1_Channel6_IRQHandler [WEAK] EXPORT DMA1_Channel7_IRQHandler [WEAK] EXPORT ADC1_2_IRQHandler [WEAK] EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK] EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK] EXPORT CAN1_RX1_IRQHandler [WEAK] EXPORT CAN1_SCE_IRQHandler [WEAK] EXPORT EXTI9_5_IRQHandler [WEAK] EXPORT TIM1_BRK_IRQHandler [WEAK] EXPORT TIM1_UP_IRQHandler [WEAK] EXPORT TIM1_TRG_COM_IRQHandler [WEAK] EXPORT TIM1_CC_IRQHandler [WEAK] EXPORT TIM2_IRQHandler [WEAK] EXPORT TIM3_IRQHandler [WEAK] EXPORT TIM4_IRQHandler [WEAK] EXPORT I2C1_EV_IRQHandler [WEAK] EXPORT I2C1_ER_IRQHandler [WEAK] EXPORT I2C2_EV_IRQHandler [WEAK] EXPORT I2C2_ER_IRQHandler [WEAK] EXPORT SPI1_IRQHandler [WEAK] EXPORT SPI2_IRQHandler [WEAK] EXPORT USART1_IRQHandler [WEAK] EXPORT USART2_IRQHandler [WEAK] EXPORT USART3_IRQHandler [WEAK] EXPORT EXTI15_10_IRQHandler [WEAK] EXPORT RTCAlarm_IRQHandler [WEAK] EXPORT USBWakeUp_IRQHandler [WEAK] WWDG_IRQHandler PVD_IRQHandler TAMPER_IRQHandler RTC_IRQHandler FLASH_IRQHandler RCC_IRQHandler EXTI0_IRQHandler EXTI1_IRQHandler EXTI2_IRQHandler EXTI3_IRQHandler EXTI4_IRQHandler DMA1_Channel1_IRQHandler DMA1_Channel2_IRQHandler DMA1_Channel3_IRQHandler DMA1_Channel4_IRQHandler DMA1_Channel5_IRQHandler DMA1_Channel6_IRQHandler DMA1_Channel7_IRQHandler ADC1_2_IRQHandler USB_HP_CAN1_TX_IRQHandler USB_LP_CAN1_RX0_IRQHandler CAN1_RX1_IRQHandler CAN1_SCE_IRQHandler EXTI9_5_IRQHandler TIM1_BRK_IRQHandler TIM1_UP_IRQHandler TIM1_TRG_COM_IRQHandler TIM1_CC_IRQHandler TIM2_IRQHandler TIM3_IRQHandler TIM4_IRQHandler I2C1_EV_IRQHandler I2C1_ER_IRQHandler I2C2_EV_IRQHandler I2C2_ER_IRQHandler SPI1_IRQHandler SPI2_IRQHandler USART1_IRQHandler USART2_IRQHandler USART3_IRQHandler EXTI15_10_IRQHandler RTCAlarm_IRQHandler USBWakeUp_IRQHandler B . ENDP ALIGN ;******************************************************************************* ; User Stack and Heap initialization ;******************************************************************************* IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ALIGN ENDIF END
MeowBoy326/MacroKeyboard
11,822
Firmware/MacroKeyboard V0.1/Libraries/CMSIS/CM3/DeviceSupport/ST/STM32F10x/startup/arm/startup_stm32f10x_ld.s
;******************** (C) COPYRIGHT 2011 STMicroelectronics ******************** ;* File Name : startup_stm32f10x_ld.s ;* Author : MCD Application Team ;* Version : V3.5.1 ;* Date : 08-September-2021 ;* Description : STM32F10x Low Density Devices vector table for MDK-ARM ;* toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == Reset_Handler ;* - Set the vector table entries with the exceptions ISR address ;* - Configure the clock system ;* - Branches to __main in the C library (which eventually ;* calls main()). ;* After Reset the CortexM3 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;* <<< Use Configuration Wizard in Context Menu >>> ;******************************************************************************* ;* ;* Copyright (c) 2011 STMicroelectronics. ;* All rights reserved. ;* ;* This software is licensed under terms that can be found in the LICENSE file ;* in the root directory of this software component. ;* If no LICENSE file comes with this software, it is provided AS-IS. ; ;******************************************************************************* ; Amount of memory (in bytes) allocated for Stack ; Tailor this value to your application needs ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000200 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG_IRQHandler ; Window Watchdog DCD PVD_IRQHandler ; PVD through EXTI Line detect DCD TAMPER_IRQHandler ; Tamper DCD RTC_IRQHandler ; RTC DCD FLASH_IRQHandler ; Flash DCD RCC_IRQHandler ; RCC DCD EXTI0_IRQHandler ; EXTI Line 0 DCD EXTI1_IRQHandler ; EXTI Line 1 DCD EXTI2_IRQHandler ; EXTI Line 2 DCD EXTI3_IRQHandler ; EXTI Line 3 DCD EXTI4_IRQHandler ; EXTI Line 4 DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 DCD ADC1_2_IRQHandler ; ADC1_2 DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0 DCD CAN1_RX1_IRQHandler ; CAN1 RX1 DCD CAN1_SCE_IRQHandler ; CAN1 SCE DCD EXTI9_5_IRQHandler ; EXTI Line 9..5 DCD TIM1_BRK_IRQHandler ; TIM1 Break DCD TIM1_UP_IRQHandler ; TIM1 Update DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare DCD TIM2_IRQHandler ; TIM2 DCD TIM3_IRQHandler ; TIM3 DCD 0 ; Reserved DCD I2C1_EV_IRQHandler ; I2C1 Event DCD I2C1_ER_IRQHandler ; I2C1 Error DCD 0 ; Reserved DCD 0 ; Reserved DCD SPI1_IRQHandler ; SPI1 DCD 0 ; Reserved DCD USART1_IRQHandler ; USART1 DCD USART2_IRQHandler ; USART2 DCD 0 ; Reserved DCD EXTI15_10_IRQHandler ; EXTI Line 15..10 DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset handler routine Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT __main IMPORT SystemInit LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT WWDG_IRQHandler [WEAK] EXPORT PVD_IRQHandler [WEAK] EXPORT TAMPER_IRQHandler [WEAK] EXPORT RTC_IRQHandler [WEAK] EXPORT FLASH_IRQHandler [WEAK] EXPORT RCC_IRQHandler [WEAK] EXPORT EXTI0_IRQHandler [WEAK] EXPORT EXTI1_IRQHandler [WEAK] EXPORT EXTI2_IRQHandler [WEAK] EXPORT EXTI3_IRQHandler [WEAK] EXPORT EXTI4_IRQHandler [WEAK] EXPORT DMA1_Channel1_IRQHandler [WEAK] EXPORT DMA1_Channel2_IRQHandler [WEAK] EXPORT DMA1_Channel3_IRQHandler [WEAK] EXPORT DMA1_Channel4_IRQHandler [WEAK] EXPORT DMA1_Channel5_IRQHandler [WEAK] EXPORT DMA1_Channel6_IRQHandler [WEAK] EXPORT DMA1_Channel7_IRQHandler [WEAK] EXPORT ADC1_2_IRQHandler [WEAK] EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK] EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK] EXPORT CAN1_RX1_IRQHandler [WEAK] EXPORT CAN1_SCE_IRQHandler [WEAK] EXPORT EXTI9_5_IRQHandler [WEAK] EXPORT TIM1_BRK_IRQHandler [WEAK] EXPORT TIM1_UP_IRQHandler [WEAK] EXPORT TIM1_TRG_COM_IRQHandler [WEAK] EXPORT TIM1_CC_IRQHandler [WEAK] EXPORT TIM2_IRQHandler [WEAK] EXPORT TIM3_IRQHandler [WEAK] EXPORT I2C1_EV_IRQHandler [WEAK] EXPORT I2C1_ER_IRQHandler [WEAK] EXPORT SPI1_IRQHandler [WEAK] EXPORT USART1_IRQHandler [WEAK] EXPORT USART2_IRQHandler [WEAK] EXPORT EXTI15_10_IRQHandler [WEAK] EXPORT RTCAlarm_IRQHandler [WEAK] EXPORT USBWakeUp_IRQHandler [WEAK] WWDG_IRQHandler PVD_IRQHandler TAMPER_IRQHandler RTC_IRQHandler FLASH_IRQHandler RCC_IRQHandler EXTI0_IRQHandler EXTI1_IRQHandler EXTI2_IRQHandler EXTI3_IRQHandler EXTI4_IRQHandler DMA1_Channel1_IRQHandler DMA1_Channel2_IRQHandler DMA1_Channel3_IRQHandler DMA1_Channel4_IRQHandler DMA1_Channel5_IRQHandler DMA1_Channel6_IRQHandler DMA1_Channel7_IRQHandler ADC1_2_IRQHandler USB_HP_CAN1_TX_IRQHandler USB_LP_CAN1_RX0_IRQHandler CAN1_RX1_IRQHandler CAN1_SCE_IRQHandler EXTI9_5_IRQHandler TIM1_BRK_IRQHandler TIM1_UP_IRQHandler TIM1_TRG_COM_IRQHandler TIM1_CC_IRQHandler TIM2_IRQHandler TIM3_IRQHandler I2C1_EV_IRQHandler I2C1_ER_IRQHandler SPI1_IRQHandler USART1_IRQHandler USART2_IRQHandler EXTI15_10_IRQHandler RTCAlarm_IRQHandler USBWakeUp_IRQHandler B . ENDP ALIGN ;******************************************************************************* ; User Stack and Heap initialization ;******************************************************************************* IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ALIGN ENDIF END
MeowBoy326/MacroKeyboard
15,141
Firmware/MacroKeyboard V0.1/Libraries/CMSIS/CM3/DeviceSupport/ST/STM32F10x/startup/arm/startup_stm32f10x_cl.s
;******************** (C) COPYRIGHT 2011 STMicroelectronics ******************** ;* File Name : startup_stm32f10x_cl.s ;* Author : MCD Application Team ;* Version : V3.5.1 ;* Date : 08-September-2021 ;* Description : STM32F10x Connectivity line devices vector table for MDK-ARM ;* toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == Reset_Handler ;* - Set the vector table entries with the exceptions ISR address ;* - Configure the clock system ;* - Branches to __main in the C library (which eventually ;* calls main()). ;* After Reset the CortexM3 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;* <<< Use Configuration Wizard in Context Menu >>> ;******************************************************************************* ;* ;* Copyright (c) 2011 STMicroelectronics. ;* All rights reserved. ;* ;* This software is licensed under terms that can be found in the LICENSE file ;* in the root directory of this software component. ;* If no LICENSE file comes with this software, it is provided AS-IS. ; ;******************************************************************************* ; Amount of memory (in bytes) allocated for Stack ; Tailor this value to your application needs ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000200 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG_IRQHandler ; Window Watchdog DCD PVD_IRQHandler ; PVD through EXTI Line detect DCD TAMPER_IRQHandler ; Tamper DCD RTC_IRQHandler ; RTC DCD FLASH_IRQHandler ; Flash DCD RCC_IRQHandler ; RCC DCD EXTI0_IRQHandler ; EXTI Line 0 DCD EXTI1_IRQHandler ; EXTI Line 1 DCD EXTI2_IRQHandler ; EXTI Line 2 DCD EXTI3_IRQHandler ; EXTI Line 3 DCD EXTI4_IRQHandler ; EXTI Line 4 DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 DCD ADC1_2_IRQHandler ; ADC1 and ADC2 DCD CAN1_TX_IRQHandler ; CAN1 TX DCD CAN1_RX0_IRQHandler ; CAN1 RX0 DCD CAN1_RX1_IRQHandler ; CAN1 RX1 DCD CAN1_SCE_IRQHandler ; CAN1 SCE DCD EXTI9_5_IRQHandler ; EXTI Line 9..5 DCD TIM1_BRK_IRQHandler ; TIM1 Break DCD TIM1_UP_IRQHandler ; TIM1 Update DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare DCD TIM2_IRQHandler ; TIM2 DCD TIM3_IRQHandler ; TIM3 DCD TIM4_IRQHandler ; TIM4 DCD I2C1_EV_IRQHandler ; I2C1 Event DCD I2C1_ER_IRQHandler ; I2C1 Error DCD I2C2_EV_IRQHandler ; I2C2 Event DCD I2C2_ER_IRQHandler ; I2C1 Error DCD SPI1_IRQHandler ; SPI1 DCD SPI2_IRQHandler ; SPI2 DCD USART1_IRQHandler ; USART1 DCD USART2_IRQHandler ; USART2 DCD USART3_IRQHandler ; USART3 DCD EXTI15_10_IRQHandler ; EXTI Line 15..10 DCD RTCAlarm_IRQHandler ; RTC alarm through EXTI line DCD OTG_FS_WKUP_IRQHandler ; USB OTG FS Wakeup through EXTI line DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD TIM5_IRQHandler ; TIM5 DCD SPI3_IRQHandler ; SPI3 DCD UART4_IRQHandler ; UART4 DCD UART5_IRQHandler ; UART5 DCD TIM6_IRQHandler ; TIM6 DCD TIM7_IRQHandler ; TIM7 DCD DMA2_Channel1_IRQHandler ; DMA2 Channel1 DCD DMA2_Channel2_IRQHandler ; DMA2 Channel2 DCD DMA2_Channel3_IRQHandler ; DMA2 Channel3 DCD DMA2_Channel4_IRQHandler ; DMA2 Channel4 DCD DMA2_Channel5_IRQHandler ; DMA2 Channel5 DCD ETH_IRQHandler ; Ethernet DCD ETH_WKUP_IRQHandler ; Ethernet Wakeup through EXTI line DCD CAN2_TX_IRQHandler ; CAN2 TX DCD CAN2_RX0_IRQHandler ; CAN2 RX0 DCD CAN2_RX1_IRQHandler ; CAN2 RX1 DCD CAN2_SCE_IRQHandler ; CAN2 SCE DCD OTG_FS_IRQHandler ; USB OTG FS __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT SystemInit IMPORT __main LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT WWDG_IRQHandler [WEAK] EXPORT PVD_IRQHandler [WEAK] EXPORT TAMPER_IRQHandler [WEAK] EXPORT RTC_IRQHandler [WEAK] EXPORT FLASH_IRQHandler [WEAK] EXPORT RCC_IRQHandler [WEAK] EXPORT EXTI0_IRQHandler [WEAK] EXPORT EXTI1_IRQHandler [WEAK] EXPORT EXTI2_IRQHandler [WEAK] EXPORT EXTI3_IRQHandler [WEAK] EXPORT EXTI4_IRQHandler [WEAK] EXPORT DMA1_Channel1_IRQHandler [WEAK] EXPORT DMA1_Channel2_IRQHandler [WEAK] EXPORT DMA1_Channel3_IRQHandler [WEAK] EXPORT DMA1_Channel4_IRQHandler [WEAK] EXPORT DMA1_Channel5_IRQHandler [WEAK] EXPORT DMA1_Channel6_IRQHandler [WEAK] EXPORT DMA1_Channel7_IRQHandler [WEAK] EXPORT ADC1_2_IRQHandler [WEAK] EXPORT CAN1_TX_IRQHandler [WEAK] EXPORT CAN1_RX0_IRQHandler [WEAK] EXPORT CAN1_RX1_IRQHandler [WEAK] EXPORT CAN1_SCE_IRQHandler [WEAK] EXPORT EXTI9_5_IRQHandler [WEAK] EXPORT TIM1_BRK_IRQHandler [WEAK] EXPORT TIM1_UP_IRQHandler [WEAK] EXPORT TIM1_TRG_COM_IRQHandler [WEAK] EXPORT TIM1_CC_IRQHandler [WEAK] EXPORT TIM2_IRQHandler [WEAK] EXPORT TIM3_IRQHandler [WEAK] EXPORT TIM4_IRQHandler [WEAK] EXPORT I2C1_EV_IRQHandler [WEAK] EXPORT I2C1_ER_IRQHandler [WEAK] EXPORT I2C2_EV_IRQHandler [WEAK] EXPORT I2C2_ER_IRQHandler [WEAK] EXPORT SPI1_IRQHandler [WEAK] EXPORT SPI2_IRQHandler [WEAK] EXPORT USART1_IRQHandler [WEAK] EXPORT USART2_IRQHandler [WEAK] EXPORT USART3_IRQHandler [WEAK] EXPORT EXTI15_10_IRQHandler [WEAK] EXPORT RTCAlarm_IRQHandler [WEAK] EXPORT OTG_FS_WKUP_IRQHandler [WEAK] EXPORT TIM5_IRQHandler [WEAK] EXPORT SPI3_IRQHandler [WEAK] EXPORT UART4_IRQHandler [WEAK] EXPORT UART5_IRQHandler [WEAK] EXPORT TIM6_IRQHandler [WEAK] EXPORT TIM7_IRQHandler [WEAK] EXPORT DMA2_Channel1_IRQHandler [WEAK] EXPORT DMA2_Channel2_IRQHandler [WEAK] EXPORT DMA2_Channel3_IRQHandler [WEAK] EXPORT DMA2_Channel4_IRQHandler [WEAK] EXPORT DMA2_Channel5_IRQHandler [WEAK] EXPORT ETH_IRQHandler [WEAK] EXPORT ETH_WKUP_IRQHandler [WEAK] EXPORT CAN2_TX_IRQHandler [WEAK] EXPORT CAN2_RX0_IRQHandler [WEAK] EXPORT CAN2_RX1_IRQHandler [WEAK] EXPORT CAN2_SCE_IRQHandler [WEAK] EXPORT OTG_FS_IRQHandler [WEAK] WWDG_IRQHandler PVD_IRQHandler TAMPER_IRQHandler RTC_IRQHandler FLASH_IRQHandler RCC_IRQHandler EXTI0_IRQHandler EXTI1_IRQHandler EXTI2_IRQHandler EXTI3_IRQHandler EXTI4_IRQHandler DMA1_Channel1_IRQHandler DMA1_Channel2_IRQHandler DMA1_Channel3_IRQHandler DMA1_Channel4_IRQHandler DMA1_Channel5_IRQHandler DMA1_Channel6_IRQHandler DMA1_Channel7_IRQHandler ADC1_2_IRQHandler CAN1_TX_IRQHandler CAN1_RX0_IRQHandler CAN1_RX1_IRQHandler CAN1_SCE_IRQHandler EXTI9_5_IRQHandler TIM1_BRK_IRQHandler TIM1_UP_IRQHandler TIM1_TRG_COM_IRQHandler TIM1_CC_IRQHandler TIM2_IRQHandler TIM3_IRQHandler TIM4_IRQHandler I2C1_EV_IRQHandler I2C1_ER_IRQHandler I2C2_EV_IRQHandler I2C2_ER_IRQHandler SPI1_IRQHandler SPI2_IRQHandler USART1_IRQHandler USART2_IRQHandler USART3_IRQHandler EXTI15_10_IRQHandler RTCAlarm_IRQHandler OTG_FS_WKUP_IRQHandler TIM5_IRQHandler SPI3_IRQHandler UART4_IRQHandler UART5_IRQHandler TIM6_IRQHandler TIM7_IRQHandler DMA2_Channel1_IRQHandler DMA2_Channel2_IRQHandler DMA2_Channel3_IRQHandler DMA2_Channel4_IRQHandler DMA2_Channel5_IRQHandler ETH_IRQHandler ETH_WKUP_IRQHandler CAN2_TX_IRQHandler CAN2_RX0_IRQHandler CAN2_RX1_IRQHandler CAN2_SCE_IRQHandler OTG_FS_IRQHandler B . ENDP ALIGN ;******************************************************************************* ; User Stack and Heap initialization ;******************************************************************************* IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ALIGN ENDIF END
MeowBoy326/MacroKeyboard
13,095
Firmware/MacroKeyboard V0.1/Libraries/CMSIS/CM3/DeviceSupport/ST/STM32F10x/startup/arm/startup_stm32f10x_ld_vl.s
;******************** (C) COPYRIGHT 2011 STMicroelectronics ******************** ;* File Name : startup_stm32f10x_ld_vl.s ;* Author : MCD Application Team ;* Version : V3.5.1 ;* Date : 08-September-2021 ;* Description : STM32F10x Low Density Value Line Devices vector table ;* for MDK-ARM toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == Reset_Handler ;* - Set the vector table entries with the exceptions ISR address ;* - Configure the clock system ;* - Branches to __main in the C library (which eventually ;* calls main()). ;* After Reset the CortexM3 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;* <<< Use Configuration Wizard in Context Menu >>> ;******************************************************************************* ;* ;* Copyright (c) 2011 STMicroelectronics. ;* All rights reserved. ;* ;* This software is licensed under terms that can be found in the LICENSE file ;* in the root directory of this software component. ;* If no LICENSE file comes with this software, it is provided AS-IS. ; ;******************************************************************************* ; Amount of memory (in bytes) allocated for Stack ; Tailor this value to your application needs ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000200 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG_IRQHandler ; Window Watchdog DCD PVD_IRQHandler ; PVD through EXTI Line detect DCD TAMPER_IRQHandler ; Tamper DCD RTC_IRQHandler ; RTC DCD FLASH_IRQHandler ; Flash DCD RCC_IRQHandler ; RCC DCD EXTI0_IRQHandler ; EXTI Line 0 DCD EXTI1_IRQHandler ; EXTI Line 1 DCD EXTI2_IRQHandler ; EXTI Line 2 DCD EXTI3_IRQHandler ; EXTI Line 3 DCD EXTI4_IRQHandler ; EXTI Line 4 DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 DCD ADC1_IRQHandler ; ADC1 DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD EXTI9_5_IRQHandler ; EXTI Line 9..5 DCD TIM1_BRK_TIM15_IRQHandler ; TIM1 Break and TIM15 DCD TIM1_UP_TIM16_IRQHandler ; TIM1 Update and TIM16 DCD TIM1_TRG_COM_TIM17_IRQHandler ; TIM1 Trigger and Commutation and TIM17 DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare DCD TIM2_IRQHandler ; TIM2 DCD TIM3_IRQHandler ; TIM3 DCD 0 ; Reserved DCD I2C1_EV_IRQHandler ; I2C1 Event DCD I2C1_ER_IRQHandler ; I2C1 Error DCD 0 ; Reserved DCD 0 ; Reserved DCD SPI1_IRQHandler ; SPI1 DCD 0 ; Reserved DCD USART1_IRQHandler ; USART1 DCD USART2_IRQHandler ; USART2 DCD 0 ; Reserved DCD EXTI15_10_IRQHandler ; EXTI Line 15..10 DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line DCD CEC_IRQHandler ; HDMI-CEC DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD TIM6_DAC_IRQHandler ; TIM6 and DAC underrun DCD TIM7_IRQHandler ; TIM7 __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT __main IMPORT SystemInit LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Dummy Exception Handlers (infinite loops which can be modified) NMI_Handler PROC EXPORT NMI_Handler [WEAK] B . ENDP HardFault_Handler\ PROC EXPORT HardFault_Handler [WEAK] B . ENDP MemManage_Handler\ PROC EXPORT MemManage_Handler [WEAK] B . ENDP BusFault_Handler\ PROC EXPORT BusFault_Handler [WEAK] B . ENDP UsageFault_Handler\ PROC EXPORT UsageFault_Handler [WEAK] B . ENDP SVC_Handler PROC EXPORT SVC_Handler [WEAK] B . ENDP DebugMon_Handler\ PROC EXPORT DebugMon_Handler [WEAK] B . ENDP PendSV_Handler PROC EXPORT PendSV_Handler [WEAK] B . ENDP SysTick_Handler PROC EXPORT SysTick_Handler [WEAK] B . ENDP Default_Handler PROC EXPORT WWDG_IRQHandler [WEAK] EXPORT PVD_IRQHandler [WEAK] EXPORT TAMPER_IRQHandler [WEAK] EXPORT RTC_IRQHandler [WEAK] EXPORT FLASH_IRQHandler [WEAK] EXPORT RCC_IRQHandler [WEAK] EXPORT EXTI0_IRQHandler [WEAK] EXPORT EXTI1_IRQHandler [WEAK] EXPORT EXTI2_IRQHandler [WEAK] EXPORT EXTI3_IRQHandler [WEAK] EXPORT EXTI4_IRQHandler [WEAK] EXPORT DMA1_Channel1_IRQHandler [WEAK] EXPORT DMA1_Channel2_IRQHandler [WEAK] EXPORT DMA1_Channel3_IRQHandler [WEAK] EXPORT DMA1_Channel4_IRQHandler [WEAK] EXPORT DMA1_Channel5_IRQHandler [WEAK] EXPORT DMA1_Channel6_IRQHandler [WEAK] EXPORT DMA1_Channel7_IRQHandler [WEAK] EXPORT ADC1_IRQHandler [WEAK] EXPORT EXTI9_5_IRQHandler [WEAK] EXPORT TIM1_BRK_TIM15_IRQHandler [WEAK] EXPORT TIM1_UP_TIM16_IRQHandler [WEAK] EXPORT TIM1_TRG_COM_TIM17_IRQHandler [WEAK] EXPORT TIM1_CC_IRQHandler [WEAK] EXPORT TIM2_IRQHandler [WEAK] EXPORT TIM3_IRQHandler [WEAK] EXPORT I2C1_EV_IRQHandler [WEAK] EXPORT I2C1_ER_IRQHandler [WEAK] EXPORT SPI1_IRQHandler [WEAK] EXPORT USART1_IRQHandler [WEAK] EXPORT USART2_IRQHandler [WEAK] EXPORT EXTI15_10_IRQHandler [WEAK] EXPORT RTCAlarm_IRQHandler [WEAK] EXPORT CEC_IRQHandler [WEAK] EXPORT TIM6_DAC_IRQHandler [WEAK] EXPORT TIM7_IRQHandler [WEAK] WWDG_IRQHandler PVD_IRQHandler TAMPER_IRQHandler RTC_IRQHandler FLASH_IRQHandler RCC_IRQHandler EXTI0_IRQHandler EXTI1_IRQHandler EXTI2_IRQHandler EXTI3_IRQHandler EXTI4_IRQHandler DMA1_Channel1_IRQHandler DMA1_Channel2_IRQHandler DMA1_Channel3_IRQHandler DMA1_Channel4_IRQHandler DMA1_Channel5_IRQHandler DMA1_Channel6_IRQHandler DMA1_Channel7_IRQHandler ADC1_IRQHandler EXTI9_5_IRQHandler TIM1_BRK_TIM15_IRQHandler TIM1_UP_TIM16_IRQHandler TIM1_TRG_COM_TIM17_IRQHandler TIM1_CC_IRQHandler TIM2_IRQHandler TIM3_IRQHandler I2C1_EV_IRQHandler I2C1_ER_IRQHandler SPI1_IRQHandler USART1_IRQHandler USART2_IRQHandler EXTI15_10_IRQHandler RTCAlarm_IRQHandler CEC_IRQHandler TIM6_DAC_IRQHandler TIM7_IRQHandler B . ENDP ALIGN ;******************************************************************************* ; User Stack and Heap initialization ;******************************************************************************* IF :DEF:__MICROLIB EXPORT __initial_sp EXPORT __heap_base EXPORT __heap_limit ELSE IMPORT __use_two_region_memory EXPORT __user_initial_stackheap __user_initial_stackheap LDR R0, = Heap_Mem LDR R1, =(Stack_Mem + Stack_Size) LDR R2, = (Heap_Mem + Heap_Size) LDR R3, = Stack_Mem BX LR ALIGN ENDIF END
MeowBoy326/MacroKeyboard
12,193
Firmware/MacroKeyboard_V0.2/startup/startup_stm32f103xe.s
/** *************** (C) COPYRIGHT 2017 STMicroelectronics ************************ * @file startup_stm32f103xe.s * @author MCD Application Team * @brief STM32F103xE Devices vector table for Atollic toolchain. * This module performs: * - Set the initial SP * - Set the initial PC == Reset_Handler, * - Set the vector table entries with the exceptions ISR address * - Configure the clock system * - Configure external SRAM mounted on STM3210E-EVAL board * to be used as data memory (optional, to be enabled by user) * - Branches to main in the C library (which eventually * calls main()). * After Reset the Cortex-M3 processor is in Thread mode, * priority is Privileged, and the Stack is set to Main. ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2017 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under BSD 3-Clause license, * the "License"; You may not use this file except in compliance with the * License. You may obtain a copy of the License at: * opensource.org/licenses/BSD-3-Clause * ****************************************************************************** */ .syntax unified .cpu cortex-m3 .fpu softvfp .thumb .global g_pfnVectors .global Default_Handler /* start address for the initialization values of the .data section. defined in linker script */ .word _sidata /* start address for the .data section. defined in linker script */ .word _sdata /* end address for the .data section. defined in linker script */ .word _edata /* start address for the .bss section. defined in linker script */ .word _sbss /* end address for the .bss section. defined in linker script */ .word _ebss .equ BootRAM, 0xF1E0F85F /** * @brief This is the code that gets called when the processor first * starts execution following a reset event. Only the absolutely * necessary set is performed, after which the application * supplied main() routine is called. * @param None * @retval : None */ .section .text.Reset_Handler .weak Reset_Handler .type Reset_Handler, %function Reset_Handler: /* Copy the data segment initializers from flash to SRAM */ ldr r0, =_sdata ldr r1, =_edata ldr r2, =_sidata movs r3, #0 b LoopCopyDataInit CopyDataInit: ldr r4, [r2, r3] str r4, [r0, r3] adds r3, r3, #4 LoopCopyDataInit: adds r4, r0, r3 cmp r4, r1 bcc CopyDataInit /* Zero fill the bss segment. */ ldr r2, =_sbss ldr r4, =_ebss movs r3, #0 b LoopFillZerobss FillZerobss: str r3, [r2] adds r2, r2, #4 LoopFillZerobss: cmp r2, r4 bcc FillZerobss /* Call the clock system intitialization function.*/ bl SystemInit /* Call static constructors */ bl __libc_init_array /* Call the application's entry point.*/ bl main bx lr .size Reset_Handler, .-Reset_Handler /** * @brief This is the code that gets called when the processor receives an * unexpected interrupt. This simply enters an infinite loop, preserving * the system state for examination by a debugger. * * @param None * @retval : None */ .section .text.Default_Handler,"ax",%progbits Default_Handler: Infinite_Loop: b Infinite_Loop .size Default_Handler, .-Default_Handler /****************************************************************************** * * The minimal vector table for a Cortex M3. Note that the proper constructs * must be placed on this to ensure that it ends up at physical address * 0x0000.0000. * ******************************************************************************/ .section .isr_vector,"a",%progbits .type g_pfnVectors, %object .size g_pfnVectors, .-g_pfnVectors g_pfnVectors: .word _estack .word Reset_Handler .word NMI_Handler .word HardFault_Handler .word MemManage_Handler .word BusFault_Handler .word UsageFault_Handler .word 0 .word 0 .word 0 .word 0 .word SVC_Handler .word DebugMon_Handler .word 0 .word PendSV_Handler .word SysTick_Handler .word WWDG_IRQHandler .word PVD_IRQHandler .word TAMPER_IRQHandler .word RTC_IRQHandler .word FLASH_IRQHandler .word RCC_IRQHandler .word EXTI0_IRQHandler .word EXTI1_IRQHandler .word EXTI2_IRQHandler .word EXTI3_IRQHandler .word EXTI4_IRQHandler .word DMA1_Channel1_IRQHandler .word DMA1_Channel2_IRQHandler .word DMA1_Channel3_IRQHandler .word DMA1_Channel4_IRQHandler .word DMA1_Channel5_IRQHandler .word DMA1_Channel6_IRQHandler .word DMA1_Channel7_IRQHandler .word ADC1_2_IRQHandler .word USB_HP_CAN1_TX_IRQHandler .word USB_LP_CAN1_RX0_IRQHandler .word CAN1_RX1_IRQHandler .word CAN1_SCE_IRQHandler .word EXTI9_5_IRQHandler .word TIM1_BRK_IRQHandler .word TIM1_UP_IRQHandler .word TIM1_TRG_COM_IRQHandler .word TIM1_CC_IRQHandler .word TIM2_IRQHandler .word TIM3_IRQHandler .word TIM4_IRQHandler .word I2C1_EV_IRQHandler .word I2C1_ER_IRQHandler .word I2C2_EV_IRQHandler .word I2C2_ER_IRQHandler .word SPI1_IRQHandler .word SPI2_IRQHandler .word USART1_IRQHandler .word USART2_IRQHandler .word USART3_IRQHandler .word EXTI15_10_IRQHandler .word RTC_Alarm_IRQHandler .word USBWakeUp_IRQHandler .word TIM8_BRK_IRQHandler .word TIM8_UP_IRQHandler .word TIM8_TRG_COM_IRQHandler .word TIM8_CC_IRQHandler .word ADC3_IRQHandler .word FSMC_IRQHandler .word SDIO_IRQHandler .word TIM5_IRQHandler .word SPI3_IRQHandler .word UART4_IRQHandler .word UART5_IRQHandler .word TIM6_IRQHandler .word TIM7_IRQHandler .word DMA2_Channel1_IRQHandler .word DMA2_Channel2_IRQHandler .word DMA2_Channel3_IRQHandler .word DMA2_Channel4_5_IRQHandler .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word BootRAM /* @0x1E0. This is for boot in RAM mode for STM32F10x High Density devices. */ /******************************************************************************* * * Provide weak aliases for each Exception handler to the Default_Handler. * As they are weak aliases, any function with the same name will override * this definition. * *******************************************************************************/ .weak NMI_Handler .thumb_set NMI_Handler,Default_Handler .weak HardFault_Handler .thumb_set HardFault_Handler,Default_Handler .weak MemManage_Handler .thumb_set MemManage_Handler,Default_Handler .weak BusFault_Handler .thumb_set BusFault_Handler,Default_Handler .weak UsageFault_Handler .thumb_set UsageFault_Handler,Default_Handler .weak SVC_Handler .thumb_set SVC_Handler,Default_Handler .weak DebugMon_Handler .thumb_set DebugMon_Handler,Default_Handler .weak PendSV_Handler .thumb_set PendSV_Handler,Default_Handler .weak SysTick_Handler .thumb_set SysTick_Handler,Default_Handler .weak WWDG_IRQHandler .thumb_set WWDG_IRQHandler,Default_Handler .weak PVD_IRQHandler .thumb_set PVD_IRQHandler,Default_Handler .weak TAMPER_IRQHandler .thumb_set TAMPER_IRQHandler,Default_Handler .weak RTC_IRQHandler .thumb_set RTC_IRQHandler,Default_Handler .weak FLASH_IRQHandler .thumb_set FLASH_IRQHandler,Default_Handler .weak RCC_IRQHandler .thumb_set RCC_IRQHandler,Default_Handler .weak EXTI0_IRQHandler .thumb_set EXTI0_IRQHandler,Default_Handler .weak EXTI1_IRQHandler .thumb_set EXTI1_IRQHandler,Default_Handler .weak EXTI2_IRQHandler .thumb_set EXTI2_IRQHandler,Default_Handler .weak EXTI3_IRQHandler .thumb_set EXTI3_IRQHandler,Default_Handler .weak EXTI4_IRQHandler .thumb_set EXTI4_IRQHandler,Default_Handler .weak DMA1_Channel1_IRQHandler .thumb_set DMA1_Channel1_IRQHandler,Default_Handler .weak DMA1_Channel2_IRQHandler .thumb_set DMA1_Channel2_IRQHandler,Default_Handler .weak DMA1_Channel3_IRQHandler .thumb_set DMA1_Channel3_IRQHandler,Default_Handler .weak DMA1_Channel4_IRQHandler .thumb_set DMA1_Channel4_IRQHandler,Default_Handler .weak DMA1_Channel5_IRQHandler .thumb_set DMA1_Channel5_IRQHandler,Default_Handler .weak DMA1_Channel6_IRQHandler .thumb_set DMA1_Channel6_IRQHandler,Default_Handler .weak DMA1_Channel7_IRQHandler .thumb_set DMA1_Channel7_IRQHandler,Default_Handler .weak ADC1_2_IRQHandler .thumb_set ADC1_2_IRQHandler,Default_Handler .weak USB_HP_CAN1_TX_IRQHandler .thumb_set USB_HP_CAN1_TX_IRQHandler,Default_Handler .weak USB_LP_CAN1_RX0_IRQHandler .thumb_set USB_LP_CAN1_RX0_IRQHandler,Default_Handler .weak CAN1_RX1_IRQHandler .thumb_set CAN1_RX1_IRQHandler,Default_Handler .weak CAN1_SCE_IRQHandler .thumb_set CAN1_SCE_IRQHandler,Default_Handler .weak EXTI9_5_IRQHandler .thumb_set EXTI9_5_IRQHandler,Default_Handler .weak TIM1_BRK_IRQHandler .thumb_set TIM1_BRK_IRQHandler,Default_Handler .weak TIM1_UP_IRQHandler .thumb_set TIM1_UP_IRQHandler,Default_Handler .weak TIM1_TRG_COM_IRQHandler .thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler .weak TIM1_CC_IRQHandler .thumb_set TIM1_CC_IRQHandler,Default_Handler .weak TIM2_IRQHandler .thumb_set TIM2_IRQHandler,Default_Handler .weak TIM3_IRQHandler .thumb_set TIM3_IRQHandler,Default_Handler .weak TIM4_IRQHandler .thumb_set TIM4_IRQHandler,Default_Handler .weak I2C1_EV_IRQHandler .thumb_set I2C1_EV_IRQHandler,Default_Handler .weak I2C1_ER_IRQHandler .thumb_set I2C1_ER_IRQHandler,Default_Handler .weak I2C2_EV_IRQHandler .thumb_set I2C2_EV_IRQHandler,Default_Handler .weak I2C2_ER_IRQHandler .thumb_set I2C2_ER_IRQHandler,Default_Handler .weak SPI1_IRQHandler .thumb_set SPI1_IRQHandler,Default_Handler .weak SPI2_IRQHandler .thumb_set SPI2_IRQHandler,Default_Handler .weak USART1_IRQHandler .thumb_set USART1_IRQHandler,Default_Handler .weak USART2_IRQHandler .thumb_set USART2_IRQHandler,Default_Handler .weak USART3_IRQHandler .thumb_set USART3_IRQHandler,Default_Handler .weak EXTI15_10_IRQHandler .thumb_set EXTI15_10_IRQHandler,Default_Handler .weak RTC_Alarm_IRQHandler .thumb_set RTC_Alarm_IRQHandler,Default_Handler .weak USBWakeUp_IRQHandler .thumb_set USBWakeUp_IRQHandler,Default_Handler .weak TIM8_BRK_IRQHandler .thumb_set TIM8_BRK_IRQHandler,Default_Handler .weak TIM8_UP_IRQHandler .thumb_set TIM8_UP_IRQHandler,Default_Handler .weak TIM8_TRG_COM_IRQHandler .thumb_set TIM8_TRG_COM_IRQHandler,Default_Handler .weak TIM8_CC_IRQHandler .thumb_set TIM8_CC_IRQHandler,Default_Handler .weak ADC3_IRQHandler .thumb_set ADC3_IRQHandler,Default_Handler .weak FSMC_IRQHandler .thumb_set FSMC_IRQHandler,Default_Handler .weak SDIO_IRQHandler .thumb_set SDIO_IRQHandler,Default_Handler .weak TIM5_IRQHandler .thumb_set TIM5_IRQHandler,Default_Handler .weak SPI3_IRQHandler .thumb_set SPI3_IRQHandler,Default_Handler .weak UART4_IRQHandler .thumb_set UART4_IRQHandler,Default_Handler .weak UART5_IRQHandler .thumb_set UART5_IRQHandler,Default_Handler .weak TIM6_IRQHandler .thumb_set TIM6_IRQHandler,Default_Handler .weak TIM7_IRQHandler .thumb_set TIM7_IRQHandler,Default_Handler .weak DMA2_Channel1_IRQHandler .thumb_set DMA2_Channel1_IRQHandler,Default_Handler .weak DMA2_Channel2_IRQHandler .thumb_set DMA2_Channel2_IRQHandler,Default_Handler .weak DMA2_Channel3_IRQHandler .thumb_set DMA2_Channel3_IRQHandler,Default_Handler .weak DMA2_Channel4_5_IRQHandler .thumb_set DMA2_Channel4_5_IRQHandler,Default_Handler /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
mgild/solana-pubkey-compare
2,981
src/asm/cmp_pubkey_eq.s
//! Optimized BPF assembly implementation for 32-byte public key comparison //! //! This assembly function provides maximum performance for comparing Solana //! public keys by leveraging BPF's 64-bit memory operations and conditional //! jumps for early exit optimization. //! //! ## Performance Characteristics //! - **Best case**: 11 instructions (keys differ in first 8 bytes) //! - **Worst case**: 19 instructions (keys are identical) //! - **Memory ops**: 2-8 loads depending on where difference is found //! - **Branches**: 1-4 conditional jumps with early termination //! //! ## Instruction Breakdown //! - 2x `ldxdw` per 8-byte chunk (load 64-bit values) //! - 1x `jne` per chunk (conditional jump on not-equal) //! - 1x `lddw` + `exit` for return value //! //! ## Algorithm //! 1. Load 8 bytes from each key at offset 0, compare, exit if different //! 2. Load 8 bytes from each key at offset 8, compare, exit if different //! 3. Load 8 bytes from each key at offset 16, compare, exit if different //! 4. Load 8 bytes from each key at offset 24, compare, exit if different //! 5. Return true (1) if all chunks match //! //! ## Register Usage //! - r0: Return value (0 = false, 1 = true) //! - r1: Pointer to first key (lhs_ptr parameter) //! - r2: Pointer to second key (rhs_ptr parameter) //! - r3: Temporary for first key's 8-byte chunk //! - r4: Temporary for second key's 8-byte chunk .section .text .globl __solana_pubkey_compare__fast_eq .type __solana_pubkey_compare__fast_eq, @function __solana_pubkey_compare__fast_eq: // Function parameters: r1 = lhs_ptr, r2 = rhs_ptr // Returns: r0 = 1 if equal, 0 if not equal // Compare bytes 0-7: Load first 64-bit chunk from both keys ldxdw r3, [r1+0] // r3 = first 8 bytes of lhs ldxdw r4, [r2+0] // r4 = first 8 bytes of rhs jne r3, r4, not_equal // Early exit if chunks differ // Compare bytes 8-15: Load second 64-bit chunk from both keys ldxdw r3, [r1+8] // r3 = bytes 8-15 of lhs ldxdw r4, [r2+8] // r4 = bytes 8-15 of rhs jne r3, r4, not_equal // Early exit if chunks differ // Compare bytes 16-23: Load third 64-bit chunk from both keys ldxdw r3, [r1+16] // r3 = bytes 16-23 of lhs ldxdw r4, [r2+16] // r4 = bytes 16-23 of rhs jne r3, r4, not_equal // Early exit if chunks differ // Compare bytes 24-31: Load fourth 64-bit chunk from both keys ldxdw r3, [r1+24] // r3 = bytes 24-31 of lhs ldxdw r4, [r2+24] // r4 = bytes 24-31 of rhs jne r3, r4, not_equal // Early exit if chunks differ // All 32 bytes match - return true lddw r0, 1 // Load immediate value 1 into return register exit // Return to caller not_equal: // Keys differ - return false lddw r0, 0 // Load immediate value 0 into return register exit // Return to caller .size __solana_pubkey_compare__fast_eq, .-__solana_pubkey_compare__fast_eq
mgjv/M6502
2,632
assembly/address_modes.test.s
; Test 6502 address modes .include "test.inc" ; test we can use a label as address LDA #$ab STA variable LDX #$00 LDX variable VRFY :+ JMP :++ : TestStart $ff TestAddress variable, $ab TestX $ab TestEnd ; Test specific address modes : LDX #$de ; Immediate addressing mode STX $10 ; Zero page addressing mode STX $0100 ; Absolute addressing mode LDY #$04 STX $10,Y ; Zero page,Y addressing mode VRFY :+ JMP :++ : TestStart $01 TestX $de TestY $04 TestAddress $0100, $de TestAddress $0010, $de TestAddress $0014, $de TestEnd ; test specific address modes : LDY #$ab ; Immediate addressing mode STY $70 ; Zero page addressing mode STY $0200 ; Absolute addressing mode LDX #$03 STY $60,X ; Zero page,X addressing mode VRFY :+ JMP :++ : TestStart $02 TestY $ab TestX $03 TestAddress $0200, $ab TestAddress $0070, $ab TestAddress $0063, $ab TestEnd ; test specific address modes : LDA #$7a ; Immediate addressing mode STA $70 ; Zero page addressing mode STA $0300 ; Absolute addressing mode LDX #$02 LDY #$04 STA $60,X ; Zero page,X addressing mode STA $60,Y ; Zero page,Y addressing mode STA $0310, X ; Absolute,X addressing mode STA $0310, Y ; Absolute,X addressing mode VRFY :+ JMP :++ : TestStart $03 TestA $7a TestX $02 TestY $04 TestAddress $0300, $7a TestAddress $0070, $7a TestAddress $0062, $7a TestAddress $0064, $7a TestAddress $0312, $7a TestAddress $0314, $7a TestEnd ; Test relative mode, conditional branching only : LDA #$ff LDX #$ff LDY #$ff CLC BCC loc2 FAIL loc1: LDX #$02 CLC BCC loc3 FAIL loc2: LDA #$01 CLC BCC loc1 FAIL loc3: LDY #$03 VRFY :+ JMP :++ : TestStart $04 TestA $01 TestX $02 TestY $03 TestEnd ; Indirect addressing ZeroPage,X pre-index : LDA #$20 ; low byte STA $13 LDA #$40 ; high byte STA $14 LDX #$03 LDA #$ee STA ($10,X) ; Indirect addressing Zeropage,Y post-index LDA #$60 ; low byte STA $20 LDA #$40 ; high byte STA $21 LDY #$03 LDA #$cc STA ($20),Y VRFY :+ JMP :++ : TestStart $05 TestAddress $4020, $ee TestAddress $4063, $cc TestEnd ; End of all tests : HALT .data variable: .byte $00
mgjv/M6502
1,882
assembly/flags.test.s
; Test setting of flags .include "test.inc" ; TODO Add tests for carry flag ; TODO Add tests for decimal and interrupt flags ; Ensure LDA sets zero flag LDA #$00 VRFY :+ JMP :++ : TestStart $01 TestZeroSet TestEnd ; Ensure LDA clears zero flag : LDA #$01 VRFY :+ JMP :++ : TestStart $02 TestZeroClear TestEnd ; Ensure LDA sets negative flag : LDA #$FF VRFY :+ JMP :++ : TestStart $03 TestNegativeSet TestEnd ; Ensure LDA clears negative flag : LDA #$01 VRFY :+ JMP :++ : TestStart $04 TestNegativeClear TestEnd ; Ensure Overflow flag gets set correctly ; 1 + 1 = 2 : CLC LDA #$01 ADC #$01 VRFY :+ JMP :++ : TestStart $10 TestOverflowClear TestEnd ; 1 + -1 = 0; 1 + 255 = 0 : CLC LDA #$01 ADC #$ff VRFY :+ JMP :++ : TestStart $11 TestOverflowClear TestEnd ; 127 + 1 = 128 : CLC LDA #$7f ADC #$01 VRFY :+ JMP :++ : TestStart $12 TestOverflowSet TestEnd ; -128 + -1 = -129 : CLC LDA #$80 ADC #$ff VRFY :+ JMP :++ : TestStart $13 TestOverflowSet TestEnd ; Check we can use CLV : CLC LDA #$7f ADC #$01 ; Overflow is set here CLV VRFY :+ JMP :++ : TestStart $14 TestOverflowClear TestEnd ; 0 - 1 = -1, V clear : SEC LDA #$00 SBC #$01 VRFY :+ JMP :++ : TestStart $15 TestOverflowClear TestEnd ; -128 - 1 = -129, V set : SEC LDA #$80 SBC #$01 VRFY :+ JMP :++ : TestStart $16 TestOverflowSet TestEnd ; 127 - -1 = 128, V set : SEC LDA #$7F SBC #$FF VRFY :+ JMP :++ : TestStart $17 TestOverflowSet TestEnd ; End of all tests : HALT
mgjv/M6502
1,921
assembly/add_with_carry.test.s
; Tests for ADC and SBC ; Also see some tests for flags in 'flags' test ; TODO This needs many more tests .include "test.inc" ; Basic ADC flag setting CLC LDA #$00 ADC #$01 ADC #$01 VRFY :+ JMP :++ : TestStart $01 TestA $02 TestCarryClear TestOverflowClear TestZeroClear TestNegativeClear TestEnd ; ADC Test that we roll over, and that rthe correct flags are set : CLC LDA #$ff ADC #$01 VRFY :+ JMP :++ : TestStart $02 TestA $00 TestCarrySet TestOverflowClear TestZeroSet TestNegativeClear TestEnd ; ADC Check that carry flag is properly used, and reset : SEC ADC #$00 VRFY :+ JMP :++ : TestStart $03 TestA $01 TestCarryClear TestEnd ; ADC Test that we can clear the carry flag : CLC LDA #$ff ADC #$01 CLC ADC #$01 VRFY :+ JMP :++ : TestStart $04 TestA $01 TestCarryClear TestEnd ; SBC: 1 - 1 = 0; C set : SEC LDA #$01 SBC #$01 VRFY :+ JMP :++ : TestStart $10 TestA $00 TestZeroSet TestCarrySet TestOverflowClear TestNegativeClear TestEnd ; SBC 1 - 2 = -1/ff; C clear : SEC LDA #$01 SBC #$02 VRFY :+ JMP :++ : TestStart $11 TestA $ff TestZeroClear TestCarryClear TestOverflowClear TestNegativeSet TestEnd ; SBC -128 - 1 = -129; V set, C clear : SEC LDA #$80 SBC #$01 VRFY :+ JMP :++ : TestStart $12 TestA $7f TestZeroClear TestCarrySet TestOverflowSet TestNegativeClear TestEnd ; Check that carry flag is properly used, and reset : CLC LDA #$02 SBC #$00 VRFY :+ JMP :++ : TestStart $13 TestA $01 TestCarrySet TestEnd ; End of all tests : HALT
mgjv/M6502
3,622
assembly/comparison.test.s
; Comparison operation tests ; CMP, CPX, CPY ; ; See http://www.6502.org/tutorials/compare_instructions.html ; for comprehensive explanation of logic .include "test.inc" NZCmask .set %10000011 ; TODO Address mode tests for at least one of these ; CMP: A > memory, pos result -> C set LDA #$01 STA r1 LDA #$02 CMP r1 STA a1 ; store accumulator for check StStatus s1, NZCmask ; CMP: A > memory, neg result -> N and C set LDA #$ff CMP r1 ; r1 should still have $01 in it StStatus s2, NZCmask ; CMP: A < memory pos result -> none set LDA #$ff STA r2 LDA #$01 CMP r2 StStatus s3, NZCmask ; CMP: A < memory neg result -> N set LDA #$02 STA r3 LDA #$01 CMP r3 StStatus s4, NZCmask ; CMP: A == memory -> Z, C set LDA #$ff STA r4 CMP r4 StStatus s5, NZCmask VRFY :+ JMP :++ : TestStart $01 TestAddress s1, $01 ; C set TestAddress a1, $02 ; should be unaffected TestAddress r1, $01 ; should be unaffected TestAddress s2, $81 ; N and C set TestAddress s3, $00 ; none of the flags should be set TestAddress s4, $80 ; Just N set TestAddress s5, $03 ; Z, C set TestEnd ; CPX: X > memory, pos result -> C set : LDX #$01 STX r1 LDX #$02 CPX r1 STX a1 ; store X for check StStatus s1, NZCmask ; CPX: X > memory, neg result -> N and C set LDX #$ff CPX r1 ; r1 should still have $01 in it StStatus s2, NZCmask ; CPX: X < memory pos result -> none set LDX #$ff STX r2 LDX #$01 CPX r2 StStatus s3, NZCmask ; CPX: X < memory neg result -> N set LDX #$02 STX r3 LDX #$01 CPX r3 StStatus s4, NZCmask ; CPX: X == memory -> Z, C set LDX #$ff STX r4 CPX r4 StStatus s5, NZCmask VRFY :+ JMP :++ : TestStart $01 TestAddress s1, $01 ; C set TestAddress a1, $02 ; should be unaffected TestAddress r1, $01 ; should be unaffected TestAddress s2, $81 ; N and C set TestAddress s3, $00 ; none of the flags should be set TestAddress s4, $80 ; Just N set TestAddress s5, $03 ; Z, C set TestEnd ; CPY: Y > memory, pos result -> C set : LDY #$01 STY r1 LDY #$02 CPY r1 STY a1 ; store X for check StStatus s1, NZCmask ; CPY: Y > memory, neg result -> N and C set LDY #$ff CPY r1 ; r1 should still have $01 in it StStatus s2, NZCmask ; CPY: Y < memory pos result -> none set LDY #$ff STY r2 LDY #$01 CPY r2 StStatus s3, NZCmask ; CPY: Y < memory neg result -> N set LDY #$02 STY r3 LDY #$01 CPY r3 StStatus s4, NZCmask ; CPY: Y == memory -> Z, C set LDY #$ff STY r4 CPY r4 StStatus s5, NZCmask VRFY :+ JMP :++ : TestStart $01 TestAddress s1, $01 ; C set TestAddress a1, $02 ; should be unaffected TestAddress r1, $01 ; should be unaffected TestAddress s2, $81 ; N and C set TestAddress s3, $00 ; none of the flags should be set TestAddress s4, $80 ; Just N set TestAddress s5, $03 ; Z, C set TestEnd ; End of all tests : HALT .data ; Some result variables to provide work memory r1: .byte $de r2: .byte $ad r3: .byte $be r4: .byte $af ; Some variables to store stack status in s1: .byte $de s2: .byte $ad s3: .byte $be s4: .byte $ef s5: .byte $ee ; Some variables to store accumulator in a1: .byte $de a2: .byte $ad a3: .byte $be a4: .byte $ef
mgjv/M6502
3,045
assembly/stack.test.s
; Test stack operation and the stack instructions ; PHA, PLA, PHP, PLP ; TSX, TXS .include "test.inc" ; Try to preserve current stack pointer. This assumes TXS and TSX work TSX STX sp_saved ; basic stack test #1: PHA ; NOTE: Ensure the first two tests remain together LDA #$ee PHA LDA #$dd PHA LDA #$11 PHA VRFY :+ JMP :++ : TestStart $01 TestStack $01, $11 TestStack $02, $dd TestStack $03, $ee TestEnd ; basic stack test #2: PLA ; NOTE: Ensure the first two tests remain together : LDA #$00 PLA ; We should now have $11 in A PLA ; We should now have $dd in A VRFY :+ ; remove the last test element from the stack PLA JMP :++ : TestStart $02 TestA $dd TestEnd ; Test TXS, TSX : LDX #$7f ; unlikely we'll be overwriting anything here, assuming stack started at $ff TXS LDX #$00 TSX VRFY :+ JMP :++ : TestStart $10 TestStackPointer $7f TestX $7f TestEnd ; Test PLP : LDA #%11111111 PHA ; clear all status flags we can clear LDA #01 ADC #01 ; This should have cleared Carry, Zero, Overflow and Negative CLI CLD PLP ; Pull the all-bits-set from stack and set status VRFY :+ JMP :++ : TestStart $10 TestCarrySet TestNegativeSet TestZeroSet TestOverflowSet TestInterruptSet ; The value of the BRK flag is indeterminate TestEnd ; Test PHP 1 : LDA #%11111111 PHA PLP ; All flags, except BRK should now be set (see previous test) StStatus a1, %11001111 ; Store A but mask out bits 4 and 5 PHP ; So, push them again PLA ; and pull them into A AND #%11001111 ; mask out bits 4 and 5 VRFY :+ JMP :++ : TestStart $20 TestAddress a1, %11001111 ; all but ignored and brk should be set TestA %11001111 TestEnd ; Test PHP 2 : LDA #$00 PHA PLP ; All flags should now be clear StStatus a1, %11001111 ; Store A but mask out bits 4 and 5 PHP ; Push them again PLA ; and pull them into A AND #%11001111 ; mask out bits 4 and 5 VRFY :+ JMP :++ : TestStart $21 TestAddress a1, %00 ; all bits should be clear TestA %00 TestEnd ; Test PHP 3 : LDA #$91 PHA PLP StStatus a1, %11001111 ; Store A but mask out bits 4 and 5 PHP ; Push them again PLA ; and pull them into A AND #%11001111 ; mask out bits 4 and 5 VRFY :+ JMP :++ : TestStart $22 TestAddress a1, $81 TestA $81 TestEnd ; End of all tests ; Restore the saved stack pointer : LDX sp_saved TXS HALT .data sp_saved: .byte $aa ; Some result variables to prevent needing too many test blocks r1: .byte $de r2: .byte $ad r3: .byte $be r4: .byte $af a1: .byte $de a2: .byte $ad a3: .byte $be a4: .byte $ef
mgjv/M6502
1,188
assembly/increment.test.s
; Test for increments and decrements ; INX, INY, INC ; DEX, DEY, DEC .include "test.inc" ; Test increments LDX #$10 INX LDY #$20 INY LDA #$a0 STA $10 INC $10 STA s1 INC s1 VRFY :+ JMP :++ : TestStart $01 TestX $11 TestY $21 TestAddress $0010, $a1 TestAddress s1, $a1 TestEnd ; Test decrements : LDX #$10 DEX LDY #$20 DEY LDA #$a0 STA $10 DEC $10 STA s1 DEC s1 VRFY :+ JMP :++ : TestStart $01 TestX $0f TestY $1f TestAddress $0010, $9f TestAddress s1, $9f TestEnd ; Test remaining indexed address modes for INC and DEC : LDA #$10 STA s1 STA $10 LDA #$20 STA s2 STA $11 LDA #$30 STA s3 STA $12 LDX #$01 INC s1,X INC $10,X INX DEC s1,X DEC $10,X VRFY :+ JMP :++ : TestStart $03 TestAddress s1, $10 TestAddress s2, $21 TestAddress s3, $2f TestAddress $0010, $10 TestAddress $0011, $21 TestAddress $0012, $2f TestEnd ; End of all tests : HALT .data s1: .byte $00 s2: .byte $00 s3: .byte $00
mgjv/M6502
1,780
assembly/framework.test.s
; Test the framework itself .include "test.inc" ; Test StStatus LDA #$7f ADC #$01 ; This should set V and N and clear Z (result $80 = -128) STA a1 SEC SED SEI StStatus r1 StStatus r2, %01000001 ; Test we can pass our own mask LDA #$00 ; set Z, clear N. C and V should still be set StStatus r3 CLV StStatus r4 VRFY :+ JMP :++ : TestStart $01 TestAddress a1, $80 ; ensure macro has not affected A TestAddress r1, %11000001 TestAddress r2, %01000001 TestAddress r3, %01000011 TestAddress r4, %00000011 TestEnd ; Test that StStatus leaves the flags alone : LDA #$7f ADC #$01 ; This should set V and N and clear Z (result $80 = -128) STA a1 SEC SED SEI VRFY :+ StStatus a4 VRFY :++ JMP :+++ : TestStart $02 TestCarrySet TestDecimalSet TestInterruptSet TestOverflowSet TestNegativeSet TestZeroClear TestEnd : TestStart $03 TestCarrySet TestDecimalSet TestInterruptSet TestOverflowSet TestNegativeSet TestZeroClear TestEnd ; Test memory clearing : LDA #$ff ; put a marker value in the test locations STA r1 STA a4 ClearMemory r1, 8 VRFY :+ ClearMemory r2, 6, $ae VRFY :++ JMP :+++ : TestStart $10 TestAddress r1, $00 TestAddress a4, $00 TestEnd : TestStart $11 TestAddress r1, $00 TestAddress r2, $ae TestAddress a3, $ae TestAddress a4, $00 TestEnd ; End of all tests : HALT .data ; Some result variables to prevent needing too many test blocks r1: .byte $de r2: .byte $ad r3: .byte $be r4: .byte $af a1: .byte $de a2: .byte $ad a3: .byte $be a4: .byte $ef
mgjv/M6502
3,862
assembly/logical.test.s
; Logical operations ; AND, EOR, ORA .include "test.inc" ; AND: basic test for value and flags LDA #$80 ; clears zero, sets negative AND #$00 ; sets zero, clears negative - 00 VRFY :+ JMP :++ : TestStart $01 TestA $00 TestZeroSet TestNegativeClear TestEnd ; AND: basic test for value and flags : LDA #$ff LDX #$00 ; sets zero, clears negative AND #$ff ; clears zero, sets negative - ff VRFY :+ JMP :++ : TestStart $02 TestA $ff TestZeroClear TestNegativeSet TestEnd ; EOR: basic test for value and flags : LDA #$ff ; clears zero, sets negative EOR #$ff ; sets zero, clears negative 00 VRFY :+ JMP :++ : TestStart $03 TestA $00 TestZeroSet TestNegativeClear TestEnd ; EOR: basic test for value and flags : LDA #$00 ; sets zero, clears negative EOR #$ff ; clears zero, sets negative - ff VRFY :+ JMP :++ : TestStart $04 TestA $ff TestZeroClear TestNegativeSet TestEnd ; ORA: basic test for value and flags : LDA #$00 ; sets zero, clears negative LDX #$ff ; clears zero, sets negative ORA #$00 ; sets zero, clears negative 00 VRFY :+ JMP :++ : TestStart $05 TestA $00 TestZeroSet TestNegativeClear TestEnd ; ORA: basic test for value and flags : LDA #$00 ; sets zero, clears negative ORA #$ff ; clears zero, sets negative - ff VRFY :+ JMP :++ : TestStart $06 TestA $ff TestZeroClear TestNegativeSet TestEnd ; AND: some address modes and values : LDA #$0f AND #$ff STA r1 ; absolute address mode LDA #$f0 STA r2 LDA #$0f AND r2 STA r3 ; zero page LDA #%10000001 STA $10 LDA #%10001001 AND $10 STA r4 ; indirect absolute LDA #$01 STA a1 LDA #$03 STA a2 LDA #$ff LDX #$01 AND a1,X ; Should be $03 now STA a1 VRFY :+ JMP :++ : TestStart $10 ; absolute address mode tests TestAddress r1, $0f TestAddress r2, $f0 TestAddress r3, $00 ; zero page TestAddress r4, %10000001 ; indirect absolute TestAddress a1, $03 TestAddress a2, $03 TestEnd ; EOR: some address modes and values : LDA #$0f EOR #$ff STA r1 ; absolute address mode LDA #$f0 STA r2 LDA #$0f EOR r2 STA r3 ; zero page LDA #%10000001 STA $10 LDA #%10001001 EOR $10 STA r4 ; indirect absolute LDA #$01 STA a1 LDA #$03 STA a2 LDA #$ff LDX #$01 EOR a1,X STA a1 VRFY :+ JMP :++ : TestStart $11 ; absolute address mode tests TestAddress r1, $f0 TestAddress r2, $f0 TestAddress r3, $ff ; zero page TestAddress r4, %00001000 ; indirect absolute TestAddress a1, $fc TestAddress a2, $03 TestEnd ; ORA: some address modes and values : LDA #$0f ORA #$7a STA r1 ; absolute address mode LDA #$f0 STA r2 LDA #$0f ORA r2 STA r3 ; zero page LDA #%10000001 STA $10 LDA #%10001001 ORA $10 STA r4 ; indirect absolute LDA #$01 STA a1 LDA #$03 STA a2 LDA #$30 LDX #$01 ORA a1,X STA a1 VRFY :+ JMP :++ : TestStart $12 ; absolute address mode tests TestAddress r1, $7f TestAddress r2, $f0 TestAddress r3, $ff ; zero page TestAddress r4, %10001001 ; indirect absolute TestAddress a1, $33 TestAddress a2, $03 TestEnd ; End of all tests : HALT .data ; Some result variables to prevent needing too many test blocks r1: .byte $de r2: .byte $ad r3: .byte $be r4: .byte $af a1: .byte $de a2: .byte $ad a3: .byte $be a4: .byte $ef
mgjv/M6502
1,362
assembly/other.test.s
; Operators that don't fit any other category ; BIT, NOP ; Note: We're not going to test NOP. No need .include "test.inc" ; BIT tests LDA #$f0 STA $50 LDA #$0f STA r1 CLC LDA #$ff BIT $50 StStatus s1 LDA #$0f BIT $50 StStatus s2 LDA #$ff BIT r1 StStatus s3 LDA #$10 BIT r1 StStatus s4 LDA #$00 STA $60 LDA #$ff STA r2 LDA #$ff BIT $60 StStatus s5 LDA #$00 BIT r2 StStatus s6 LDA #$01 STA r3 LDA #$01 BIT r3 StStatus s7 LDA #$10 BIT r3 StStatus s8 VRFY :+ JMP :++ : TestStart $01 TestAddress s1, %11000000 TestAddress s2, %11000010 TestAddress s3, %00000000 TestAddress s4, %00000010 TestAddress s5, %00000010 TestAddress s6, %11000010 TestAddress s7, %00000000 TestAddress s8, %00000010 TestEnd ; Well, NOP is here. Not sure how to test that it doesn't do anything : NOP VRFY :+ JMP :++ : TestStart $02 TestEnd ; End of all tests : HALT .data ; Some result variables to prevent needing too many test blocks r1: .byte $de r2: .byte $ad r3: .byte $be r4: .byte $af s1: .byte $de s2: .byte $ad s3: .byte $be s4: .byte $ef s5: .byte $de s6: .byte $ad s7: .byte $be s8: .byte $ef
mgjv/M6502
5,606
assembly/bitshift.test.s
; Bitshift operations ; ASL, LSR, ROL, ROR .include "test.inc" ; ASL: basic test for value and flags LDA #$80 ; clears zero, sets negative CLC ASL ; sets zero, clears negative, sets carry VRFY :+ JMP :++ : TestStart $01 TestA $00 TestCarrySet TestZeroSet TestNegativeClear TestEnd ; ASL: basic test for value and flags : LDA #$7f LDX #$00 ; sets zero, clears negative SEC ASL ; clears zero, clears carry, sets negative - fe VRFY :+ JMP :++ : TestStart $02 TestA $fe TestCarryClear TestZeroClear TestNegativeSet TestEnd ; LSR: basic test for value and flags : LDA #$01 ; clears zero, clears negative LDX #$ff ; clears zero, sets negative CLC LSR ; sets zero, clears negative 00 VRFY :+ JMP :++ : TestStart $03 TestA $00 TestZeroSet TestNegativeClear TestCarrySet TestEnd ; LSR: basic test for value and flags : LDA #$fe LDX #$00 ; sets zero, clears negative SEC LSR ; clears carry, clears zero, clears negative - 7f VRFY :+ JMP :++ : TestStart $04 TestA $7f TestZeroClear TestNegativeClear TestCarryClear TestEnd ; LSR: ensure negative is indeed unset : LDA #$fe ; clears zero, sets negative SEC LSR ; clears carry, clears zero, clears negative - 7f VRFY :+ JMP :++ : TestStart $05 TestA $7f TestZeroClear TestNegativeClear TestCarryClear TestEnd ; ROL: basic test for value and flags : LDA #$80 ; clears zero, sets negative CLC ROL ; sets zero, clears negative, sets carry VRFY :+ JMP :++ : TestStart $06 TestA $00 TestCarrySet TestZeroSet TestNegativeClear TestEnd ; ROL: basic test for value and flags : LDA #$7f LDX #$00 ; sets zero, clears negative SEC ; sets carry ROL ; clears zero, clears carry, sets negative - ff VRFY :+ JMP :++ : TestStart $07 TestA $ff TestCarryClear TestZeroClear TestNegativeSet TestEnd ; ROR: basic test for value and flags : LDA #$01 ; clears zero, clears negative LDX #$ff ; clears zero, sets negative CLC ROR ; sets zero, clears negative, sets carry - 00 VRFY :+ JMP :++ : TestStart $08 TestA $00 TestZeroSet TestNegativeClear TestCarrySet TestEnd ; ROR: basic test for value and flags : LDA #$fe LDX #$00 ; sets zero, clears negative SEC ROR ; clears carry, clears zero, sets negative - ff VRFY :+ JMP :++ : TestStart $09 TestA $ff TestZeroClear TestNegativeSet TestCarryClear TestEnd ; ASL: some address modes and values : LDA #$0f ASL STA r1 ; absolute address mode LDA #$f0 STA r2 ASL r2 ; zero page LDA #%10000001 STA $10 ASL $10 ; indirect absolute LDA #$01 STA a1 LDA #$03 STA a2 LDX #$01 ASL a1,X VRFY :+ JMP :++ : TestStart $10 ; absolute address mode tests TestAddress r1, $1e TestAddress r2, $e0 ; zero page TestAddress $0010, %00000010 ; indirect absolute TestAddress a1, $01 TestAddress a2, $06 TestEnd ; LSR: some address modes and values : LDA #$0f LSR STA r1 ; absolute address mode LDA #$f0 STA r2 LSR r2 ; zero page LDA #%10000001 STA $10 LSR $10 ; indirect absolute LDA #$80 STA a1 LDA #$c0 STA a2 LDX #$01 LSR a1,X VRFY :+ JMP :++ : TestStart $11 ; absolute address mode tests TestAddress r1, $07 TestAddress r2, $78 ; zero page TestAddress $0010, %01000000 ; indirect absolute TestAddress a1, $80 TestAddress a2, $60 TestEnd ; ROL: some address modes and values : LDA #$0f CLC ROL STA r1 LDA #$0f SEC ROL STA r2 ; absolute address mode LDA #$f0 STA r3 CLC ROL r3 LDA #$f0 STA r4 SEC ROL r4 ; zero page LDA #%10000001 STA $10 SEC ROL $10 ; indirect absolute LDA #$80 STA a1 LDA #$0c STA a2 LDX #$01 CLC ROL a1,X VRFY :+ JMP :++ : TestStart $12 ; absolute address mode tests TestAddress r1, $1e TestAddress r2, $1f TestAddress r3, $e0 TestAddress r4, $e1 ; zero page TestAddress $0010, %00000011 ; indirect absolute TestAddress a1, $80 TestAddress a2, $18 TestEnd ; ROR: some address modes and values : LDA #$0f CLC ROR STA r1 LDA #$0f SEC ROR STA r2 ; absolute address mode LDA #$f0 STA r3 CLC ROR r3 LDA #$f0 STA r4 SEC ROR r4 ; zero page LDA #%10000001 STA $10 SEC ROR $10 ; indirect absolute LDA #$80 STA a1 LDA #$0c STA a2 LDX #$01 CLC ROR a1,X VRFY :+ JMP :++ : TestStart $13 ; absolute address mode tests TestAddress r1, $07 TestAddress r2, $87 TestAddress r3, $78 TestAddress r4, $f8 ; zero page TestAddress $0010, %11000000 ; indirect absolute TestAddress a1, $80 TestAddress a2, $06 TestEnd ; End of all tests : HALT .data ; Some result variables to prevent needing too many test blocks r1: .byte $de r2: .byte $ad r3: .byte $be r4: .byte $ef a1: .byte $de a2: .byte $ad a3: .byte $be a4: .byte $ef
mgjv/M6502
1,831
assembly/transfer.test.s
; Test the transfer instructions ; LDA, LDX, LDY ; STA, STX, STY ; TAX, TAY, TXA, TYA ; TSX, TXS .include "test.inc" ; load immediate (LDA, LDX, LDY) LDA #$01 LDX #$02 LDY #$03 VRFY :+ JMP :++ : TestStart $01 TestA $01 TestX $02 TestY $03 TestEnd ; Various storage tests (STA, STX, STY) : LDA #$40 LDX #$41 LDY #$42 ; Store in zero page STA $a0 STX $a1 STY $a2 ; Store in absolute address STA $3010 STX $3011 STY $3012 VRFY :+ JMP :++ : TestStart $02 TestAddress $00a0, $40 TestAddress $00a1, $41 TestAddress $00a2, $42 TestAddress $3010, $40 TestAddress $3011, $41 TestAddress $3012, $42 TestEnd ; Indexed addressing (STA, STX, STY) : LDA #$80 LDX #$00 STA $3020,X INX STA $3020,X INX STA $90,X INX STA $90,X LDX #$00 LDY #$8d STY $95,X INX STY $95,X LDY #$00 LDX #$33 STX $a0,Y INY STX $a0,Y VRFY :+ JMP :++ : TestStart $03 TestAddress $3020, $80 TestAddress $3021, $80 TestAddress $0092, $80 TestAddress $0093, $80 TestAddress $0095, $8d TestAddress $0096, $8d TestAddress $00a0, $33 TestAddress $00a1, $33 TestEnd ; TAX, TAY, TXA, TYA : LDX #$66 TXA STA $10 LDY #$77 TYA STA $11 LDA #$aa TAX LDA #$bb TAY VRFY :+ JMP :++ : TestStart $05 TestAddress $0010, $66 TestAddress $0011, $77 TestX $aa TestY $bb TestEnd ; TODO Work out tests for this. May need CMP to work ; TSX, TXS : TSX VRFY :+ JMP :++ : TestStart $10 ; How do I test that the stack pointer was actually copied to X? TestEnd ; End of all tests : HALT
mgjv/M6502
4,012
assembly/branches.test.s
; Test branching instructions .include "test.inc" ; This uses assumptions verified by the 'flags' test ; Conditional branch tests ; Test BEQ LDX #$ff ; load a canonical value in X and Y LDY #$ff LDA #$00 ; set the zero flag BEQ :+ FAIL ; this should be skipped FAIL : LDX #$03 ; this should be executed. LDA #$01 ; clear the zero flag BEQ :+ JMP :++ : FAIL ; should not jump here FAIL : LDY #$04 ; this should be executed. VRFY :+ JMP :++ : TestStart $01 TestX $03 TestY $04 TestEnd ; Test BNE : LDX #$ff ; load a canonical value in X and Y LDY #$ff LDA #$01 ; clear the zero flag BNE :+ FAIL ; this should be skipped FAIL : LDX #$03 ; this should be executed. LDA #$00 ; set the zero flag BNE :+ JMP :++ : FAIL ; should not jump here FAIL : LDY #$04 ; this should be executed VRFY :+ JMP :++ : TestStart $02 TestX $03 TestY $04 TestEnd ; Test BCS : LDX #$ff ; load a canonical value in X and Y LDY #$ff SEC ; set the carry flag BCS :+ FAIL ; this should be skipped FAIL : LDX #$03 ; this should be executed. CLC ; clear the carry flag BCS :+ JMP :++ : FAIL ; should not jump here FAIL : LDY #$04 ; this should be executed. VRFY :+ JMP :++ : TestStart $03 TestX $03 TestY $04 TestEnd ; Test BCC : LDX #$ff ; load a canonical value in X and Y LDY #$ff CLC ; clear the carry flag BCC :+ FAIL ; this should be skipped FAIL : LDX #$03 ; this should be executed. SEC ; set the carry flag BCC :+ JMP :++ : FAIL ; should not jump here FAIL : LDY #$04 ; this should be executed. VRFY :+ JMP :++ : TestStart $03 TestX $03 TestY $04 TestEnd ; Test BMI : LDX #$ff ; load a canonical value in X and Y LDY #$ff LDA #$ff ; set the negative flag BMI :+ FAIL ; this should be skipped FAIL : LDX #$03 ; this should be executed. LDA #$01 ; clear the negative flag BMI :+ JMP :++ : FAIL ; should not jump here FAIL : LDY #$04 ; this should be executed. VRFY :+ JMP :++ : TestStart $01 TestX $03 TestY $04 TestEnd ; Test BPL : LDX #$ff ; load a canonical value in X and Y LDY #$ff LDA #$01 ; clear the negative flag BPL :+ FAIL ; this should be skipped FAIL : LDX #$03 ; this should be executed. LDA #$ff ; set the negative flag BPL :+ JMP :++ : FAIL ; should not jump here FAIL : LDY #$04 ; this should be executed VRFY :+ JMP :++ : TestStart $02 TestX $03 TestY $04 TestEnd ; Test BVS : LDX #$ff ; load a canonical value in X and Y LDY #$ff LDA #$7f ; set the overflow flag ADC #$01 BVS :+ FAIL ; this should be skipped FAIL : LDX #$03 ; this should be executed. CLV ; clear the overflow flag BVS :+ JMP :++ : FAIL ; should not jump here FAIL : LDY #$04 ; this should be executed VRFY :+ JMP :++ : TestStart $02 TestX $03 TestY $04 TestEnd ; Test BVC : LDX #$ff ; load a canonical value in X and Y LDY #$ff CLV ; clear the overflow flag BVC :+ FAIL ; this should be skipped FAIL : LDX #$03 ; this should be executed. LDA #$7f ; set the overflow flag ADC #$01 BVC :+ JMP :++ : FAIL ; should not jump here FAIL : LDY #$04 ; this should be executed VRFY :+ JMP :++ : TestStart $02 TestX $03 TestY $04 TestEnd ; End of all tests : HALT
michaelscutari/ferros
1,980
src/strap.S
.section .text .globl s_trap_entry .align 2 # stvec base must be 4B-aligned s_trap_entry: // save s-mode "trapframe" addi sp, sp, -256 sd ra, 0(sp) // save return address sd sp, 8(sp) // save stack pointer sd gp, 16(sp) // save global pointer sd tp, 24(sp) // save thread pointer sd t0, 32(sp) // save temporaries sd t1, 40(sp) sd t2, 48(sp) sd t3, 56(sp) sd t4, 64(sp) sd t5, 72(sp) sd t6, 80(sp) sd s0, 88(sp) // save saved registers sd s1, 96(sp) sd s2, 104(sp) sd s3, 112(sp) sd s4, 120(sp) sd s5, 128(sp) sd s6, 136(sp) sd s7, 144(sp) sd s8, 152(sp) sd s9, 160(sp) sd s10, 168(sp) sd s11, 176(sp) sd a0, 184(sp) // save function arguments / return values sd a1, 192(sp) sd a2, 200(sp) sd a3, 208(sp) sd a4, 216(sp) sd a5, 224(sp) sd a6, 232(sp) sd a7, 240(sp) // handle the trap in rust call s_trap_handler // restore s-mode "trapframe" ld ra, 0(sp) // restore return address ld sp, 8(sp) // restore stack pointer ld gp, 16(sp) // restore global pointer // not this one, in case we changed to a different thread ld t0, 32(sp) // restore temporaries ld t1, 40(sp) ld t2, 48(sp) ld t3, 56(sp) ld t4, 64(sp) ld t5, 72(sp) ld t6, 80(sp) ld s0, 88(sp) // restore saved registers ld s1, 96(sp) ld s2, 104(sp) ld s3, 112(sp) ld s4, 120(sp) ld s5, 128(sp) ld s6, 136(sp) ld s7, 144(sp) ld s8, 152(sp) ld s9, 160(sp) ld s10, 168(sp) ld s11, 176(sp) ld a0, 184(sp) // restore function arguments / return values ld a1, 192(sp) ld a2, 200(sp) ld a3, 208(sp) ld a4, 216(sp) ld a5, 224(sp) ld a6, 232(sp) ld a7, 240(sp) addi sp, sp, 256 // return from trap sret
mid-kid/fusee-launcher
1,253
intermezzo.S
// // Payload launcher stub. // .globl _start .section ".text" _start: // First, we'll need to move ourselves _out_ of the target area. // We'll copy down into the IRAM. ldr r0, =INTERMEZZO_RELOCATED_ADDRESS ldr r1, =post_relocation ldr r2, =intermezzo_end sub r2, r2, r1 bl copy // Jump to the start of RAM, which should now contain the post-relocation code. ldr r0, =INTERMEZZO_RELOCATED_ADDRESS bx r0 .align 4 post_relocation: // Next, we'll copy our payload down to the appropriate relocaiton address. ldr r0, =RELOCATION_TARGET ldr r1, =PAYLOAD_START_ADDR ldr r2, =BEFORE_SPRAY_LENGTH bl copy ldr r0, =RELOCATION_TARGET ldr r1, =BEFORE_SPRAY_LENGTH add r0, r0, r1 ldr r1, =STACK_SPRAY_END ldr r2, =AFTER_SPRAY_LENGTH bl copy // Finally, jump into the relocated target. ldr r0, =ENTRY_POINT_ADDRESS bx r0 // // Simple block copy. // r0 = destination address // r1 = source address // r2 = length in bytes // Destroys r0-r3. // copy: // Copy the word... ldr r3, [r1], #4 str r3, [r0], #4 // And continue while we have words left to copy. subs r2, r2, #4 bne copy // Once we're done, return. bx lr
millerpandrew/sunbeam
2,608
runtime/compiled_code.s
section .data HEAP: times 1024 dq 0 section .text global start_here extern print_snake_val extern snake_error extern ischar extern isstring extern isarray main: mov rax, 581 mov QWORD [rsp + -8], rax mov rax, 813 mov QWORD [rsp + -16], rax mov rax, 869 mov QWORD [rsp + -24], rax mov rax, 869 mov QWORD [rsp + -32], rax mov rax, 893 mov QWORD [rsp + -40], rax mov rax, 261 mov QWORD [rsp + -48], rax mov rax, 957 mov QWORD [rsp + -56], rax mov rax, 893 mov QWORD [rsp + -64], rax mov rax, 917 mov QWORD [rsp + -72], rax mov rax, 869 mov QWORD [rsp + -80], rax mov rax, 805 mov QWORD [rsp + -88], rax mov QWORD [r15 + 0], 11 mov r9, QWORD [rsp + -8] mov QWORD [r15 + 8], r9 mov r9, QWORD [rsp + -16] mov QWORD [r15 + 16], r9 mov r9, QWORD [rsp + -24] mov QWORD [r15 + 24], r9 mov r9, QWORD [rsp + -32] mov QWORD [r15 + 32], r9 mov r9, QWORD [rsp + -40] mov QWORD [r15 + 40], r9 mov r9, QWORD [rsp + -48] mov QWORD [r15 + 48], r9 mov r9, QWORD [rsp + -56] mov QWORD [r15 + 56], r9 mov r9, QWORD [rsp + -64] mov QWORD [r15 + 64], r9 mov r9, QWORD [rsp + -72] mov QWORD [r15 + 72], r9 mov r9, QWORD [rsp + -80] mov QWORD [r15 + 80], r9 mov r9, QWORD [rsp + -88] mov QWORD [r15 + 88], r9 mov rax, r15 add r15, 96 add rax, 3 mov QWORD [rsp + -8], rax mov rdi, QWORD [rsp + -8] sub rsp, 16 call print_snake_val add rsp, 16 ret start_here: push r15 sub rsp, 8 lea r15, [rel HEAP] call main add rsp, 8 pop r15 ret overflow_err: mov rdi, 0 call snake_error arith_err: mov rdi, 1 call snake_error cmp_err: mov rdi, 2 call snake_error log_err: mov rdi, 3 call snake_error if_err: mov rdi, 4 call snake_error range_dec_err: mov rdi, 5 call snake_error index_oob_err: mov rdi, 6 call snake_error index_non_both: mov rdi, 7 call snake_error set_non_char: mov rdi, 8 call snake_error index_nan: mov rdi, 9 call snake_error length_not_array_error: mov rdi, 10 call snake_error
miro662/losgatos
1,451
kernel/src/entry.S
.section .text.boot /// Sets processor in RISC-V ABI compliant state and starts Rust kernel code .global entrypoint entrypoint: // disable interrupts csrw sie, zero // reset memory mapping csrw satp, zero // clear bss la t1, _bss_start la t2, _bss_end 1: sd zero, (t1) addi t1, t1, 8 blt t2, t1, 1b // initialize stack la sp, _stack_end // load 0 to sscratch, making it a null pointer csrwi sscratch, 0 // a0, a1 - hart id & dtb pointer - are being preserved // and passed to entrypoint_rs as arguments tail entrypoint_rs .section .text .global trap_handler .align 4 trap_handler: // store caller-saved registers addi sp, sp, -16*8 sd ra, 0*8(sp) sd a0, 1*8(sp) sd a1, 2*8(sp) sd a2, 3*8(sp) sd a3, 4*8(sp) sd a4, 5*8(sp) sd a5, 6*8(sp) sd a6, 7*8(sp) sd a7, 8*8(sp) sd t0, 9*8(sp) sd t1, 10*8(sp) sd t2, 11*8(sp) sd t3, 12*8(sp) sd t4, 13*8(sp) sd t5, 14*8(sp) sd t6, 15*8(sp) csrr a0, scause call trap_handler_rs // restore caller-saved registers ld ra, 0*8(sp) ld a0, 1*8(sp) ld a1, 2*8(sp) ld a2, 3*8(sp) ld a3, 4*8(sp) ld a4, 5*8(sp) ld a5, 6*8(sp) ld a6, 7*8(sp) ld a7, 8*8(sp) ld t0, 9*8(sp) ld t1, 10*8(sp) ld t2, 11*8(sp) ld t3, 12*8(sp) ld t4, 13*8(sp) ld t5, 14*8(sp) ld t6, 15*8(sp) addi sp, sp, 16*8 sret
mirthfulLee/RPG
11,809
library/std/src/sys/pal/sgx/abi/entry.S
/* This symbol is used at runtime to figure out the virtual address that the */ /* enclave is loaded at. */ .section absolute .global IMAGE_BASE IMAGE_BASE: .section ".note.x86_64-fortanix-unknown-sgx", "", @note .align 4 .long 1f - 0f /* name length (not including padding) */ .long 3f - 2f /* desc length (not including padding) */ .long 1 /* type = NT_VERSION */ 0: .asciz "toolchain-version" /* name */ 1: .align 4 2: .long 1 /* desc - toolchain version number, 32-bit LE */ 3: .align 4 .section .rodata /* The XSAVE area needs to be a large chunk of readable memory, but since we are */ /* going to restore everything to its initial state (XSTATE_BV=0), only certain */ /* parts need to have a defined value. In particular: */ /* */ /* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */ /* RFBM[2] is set, regardless of the value of XSTATE_BV */ /* * XSAVE header */ .align 64 .Lxsave_clear: .org .+24 .Lxsave_mxcsr: .short 0x1fbf /* We can store a bunch of data in the gap between MXCSR and the XSAVE header */ /* The following symbols point at read-only data that will be filled in by the */ /* post-linker. */ /* When using this macro, don't forget to adjust the linker version script! */ .macro globvar name:req size:req .global \name .protected \name .align \size .size \name , \size \name : .org .+\size .endm /* The base address (relative to enclave start) of the heap area */ globvar HEAP_BASE 8 /* The heap size in bytes */ globvar HEAP_SIZE 8 /* Value of the RELA entry in the dynamic table */ globvar RELA 8 /* Value of the RELACOUNT entry in the dynamic table */ globvar RELACOUNT 8 /* The enclave size in bytes */ globvar ENCLAVE_SIZE 8 /* The base address (relative to enclave start) of the enclave configuration area */ globvar CFGDATA_BASE 8 /* Non-zero if debugging is enabled, zero otherwise */ globvar DEBUG 1 /* The base address (relative to enclave start) of the enclave text section */ globvar TEXT_BASE 8 /* The size in bytes of enclave text section */ globvar TEXT_SIZE 8 /* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_OFFSET 8 /* The size in bytes of enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_LEN 8 /* The base address (relative to enclave start) of the enclave .eh_frame section */ globvar EH_FRM_OFFSET 8 /* The size in bytes of enclave .eh_frame section */ globvar EH_FRM_LEN 8 .org .Lxsave_clear+512 .Lxsave_header: .int 0, 0 /* XSTATE_BV */ .int 0, 0 /* XCOMP_BV */ .org .+48 /* reserved bits */ .data .Laborted: .byte 0 /* TCS local storage section */ .equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */ .equ tcsls_flags, 0x08 /* initialized by loader */ .equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */ .equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */ /* 14 unused bits */ .equ tcsls_user_fcw, 0x0a .equ tcsls_user_mxcsr, 0x0c .equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */ .equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */ .equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */ .equ tcsls_user_rsp, 0x28 .equ tcsls_user_retip, 0x30 .equ tcsls_user_rbp, 0x38 .equ tcsls_user_r12, 0x40 .equ tcsls_user_r13, 0x48 .equ tcsls_user_r14, 0x50 .equ tcsls_user_r15, 0x58 .equ tcsls_tls_ptr, 0x60 .equ tcsls_tcs_addr, 0x68 .macro load_tcsls_flag_secondary_bool reg:req comments:vararg .ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */ .abort .endif mov $(1<<tcsls_flag_secondary),%e\reg and %gs:tcsls_flags,%\reg .endm /* We place the ELF entry point in a separate section so it can be removed by elf2sgxs */ .section .text_no_sgx, "ax" .Lelf_entry_error_msg: .ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n" .Lelf_entry_error_msg_end: .global elf_entry .type elf_entry,function elf_entry: /* print error message */ movq $2,%rdi /* write to stderr (fd 2) */ lea .Lelf_entry_error_msg(%rip),%rsi movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx .Lelf_entry_call: movq $1,%rax /* write() syscall */ syscall test %rax,%rax jle .Lelf_exit /* exit on error */ add %rax,%rsi sub %rax,%rdx /* all chars written? */ jnz .Lelf_entry_call .Lelf_exit: movq $60,%rax /* exit() syscall */ movq $1,%rdi /* exit code 1 */ syscall ud2 /* should not be reached */ /* end elf_entry */ /* This code needs to be called *after* the enclave stack has been setup. */ /* There are 3 places where this needs to happen, so this is put in a macro. */ .macro entry_sanitize_final /* Sanitize rflags received from user */ /* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */ /* - AC flag: AEX on misaligned memory accesses leaks side channel info */ pushfq andq $~0x40400, (%rsp) popfq /* check for abort */ bt $0,.Laborted(%rip) jc .Lreentry_panic .endm .text .global sgx_entry .type sgx_entry,function sgx_entry: /* save user registers */ mov %rcx,%gs:tcsls_user_retip mov %rsp,%gs:tcsls_user_rsp mov %rbp,%gs:tcsls_user_rbp mov %r12,%gs:tcsls_user_r12 mov %r13,%gs:tcsls_user_r13 mov %r14,%gs:tcsls_user_r14 mov %r15,%gs:tcsls_user_r15 mov %rbx,%gs:tcsls_tcs_addr stmxcsr %gs:tcsls_user_mxcsr fnstcw %gs:tcsls_user_fcw /* check for debug buffer pointer */ testb $0xff,DEBUG(%rip) jz .Lskip_debug_init mov %r10,%gs:tcsls_debug_panic_buf_ptr .Lskip_debug_init: /* reset cpu state */ mov %rdx, %r10 mov $-1, %rax mov $-1, %rdx xrstor .Lxsave_clear(%rip) lfence mov %r10, %rdx /* check if returning from usercall */ mov %gs:tcsls_last_rsp,%r11 test %r11,%r11 jnz .Lusercall_ret /* setup stack */ mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */ /* here. This is fixed below under "adjust stack". */ /* check for thread init */ bts $tcsls_flag_init_once,%gs:tcsls_flags jc .Lskip_init /* adjust stack */ lea IMAGE_BASE(%rip),%rax add %rax,%rsp mov %rsp,%gs:tcsls_tos entry_sanitize_final /* call tcs_init */ /* store caller-saved registers in callee-saved registers */ mov %rdi,%rbx mov %rsi,%r12 mov %rdx,%r13 mov %r8,%r14 mov %r9,%r15 load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */ call tcs_init /* reload caller-saved registers */ mov %rbx,%rdi mov %r12,%rsi mov %r13,%rdx mov %r14,%r8 mov %r15,%r9 jmp .Lafter_init .Lskip_init: entry_sanitize_final .Lafter_init: /* call into main entry point */ load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */ call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */ mov %rax,%rsi /* RSI = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ xor %rdi,%rdi /* RDI = normal exit */ .Lexit: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set later */ /* RCX overwritten by ENCLU */ /* RDX contains return value */ /* RSP set later */ /* RBP set later */ /* RDI contains exit mode */ /* RSI contains return value */ xor %r8,%r8 xor %r9,%r9 xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ .Lsgx_exit: /* clear extended register state */ mov %rdx, %rcx /* save RDX */ mov $-1, %rax mov %rax, %rdx xrstor .Lxsave_clear(%rip) mov %rcx, %rdx /* restore RDX */ /* clear flags */ pushq $0 popfq /* restore user registers */ mov %gs:tcsls_user_r12,%r12 mov %gs:tcsls_user_r13,%r13 mov %gs:tcsls_user_r14,%r14 mov %gs:tcsls_user_r15,%r15 mov %gs:tcsls_user_retip,%rbx mov %gs:tcsls_user_rsp,%rsp mov %gs:tcsls_user_rbp,%rbp fldcw %gs:tcsls_user_fcw ldmxcsr %gs:tcsls_user_mxcsr /* exit enclave */ mov $0x4,%eax /* EEXIT */ enclu /* end sgx_entry */ .Lreentry_panic: orq $8,%rsp jmp abort_reentry /* This *MUST* be called with 6 parameters, otherwise register information */ /* might leak! */ .global usercall usercall: test %rcx,%rcx /* check `abort` function argument */ jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */ jmp .Lusercall_save_state /* non-aborting usercall */ .Lusercall_abort: /* set aborted bit */ movb $1,.Laborted(%rip) /* save registers in DEBUG mode, so that debugger can reconstruct the stack */ testb $0xff,DEBUG(%rip) jz .Lusercall_noreturn .Lusercall_save_state: /* save callee-saved state */ push %r15 push %r14 push %r13 push %r12 push %rbp push %rbx sub $8, %rsp fstcw 4(%rsp) stmxcsr (%rsp) movq %rsp,%gs:tcsls_last_rsp .Lusercall_noreturn: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set by sgx_exit */ /* RCX overwritten by ENCLU */ /* RDX contains parameter */ /* RSP set by sgx_exit */ /* RBP set by sgx_exit */ /* RDI contains parameter */ /* RSI contains parameter */ /* R8 contains parameter */ /* R9 contains parameter */ xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ /* extended registers/flags cleared by sgx_exit */ /* exit */ jmp .Lsgx_exit .Lusercall_ret: movq $0,%gs:tcsls_last_rsp /* restore callee-saved state, cf. "save" above */ mov %r11,%rsp /* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */ /* vector instructions is used. We omit the lfence here as one is required before */ /* the jmp instruction anyway. */ ldmxcsr (%rsp) fldcw 4(%rsp) add $8, %rsp entry_sanitize_final pop %rbx pop %rbp pop %r12 pop %r13 pop %r14 pop %r15 /* return */ mov %rsi,%rax /* RAX = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ pop %r11 lfence jmp *%r11 /* The following functions need to be defined externally: ``` // Called by entry code on re-entry after exit extern "C" fn abort_reentry() -> !; // Called once when a TCS is first entered extern "C" fn tcs_init(secondary: bool); // Standard TCS entrypoint extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64); ``` */ .global get_tcs_addr get_tcs_addr: mov %gs:tcsls_tcs_addr,%rax pop %r11 lfence jmp *%r11 .global get_tls_ptr get_tls_ptr: mov %gs:tcsls_tls_ptr,%rax pop %r11 lfence jmp *%r11 .global set_tls_ptr set_tls_ptr: mov %rdi,%gs:tcsls_tls_ptr pop %r11 lfence jmp *%r11 .global take_debug_panic_buf_ptr take_debug_panic_buf_ptr: xor %rax,%rax xchg %gs:tcsls_debug_panic_buf_ptr,%rax pop %r11 lfence jmp *%r11
miwumiwumilumilelu/rCore-Tutorial
1,726
os/src/link_app.S
.align 3 .section .data .global _num_app _num_app: .quad 10 .quad app_0_start .quad app_1_start .quad app_2_start .quad app_3_start .quad app_4_start .quad app_5_start .quad app_6_start .quad app_7_start .quad app_8_start .quad app_9_start .quad app_9_end .section .data .global app_0_start .global app_0_end app_0_start: .incbin "../user/build/bin/ch2b_bad_address.bin" app_0_end: .section .data .global app_1_start .global app_1_end app_1_start: .incbin "../user/build/bin/ch2b_bad_instructions.bin" app_1_end: .section .data .global app_2_start .global app_2_end app_2_start: .incbin "../user/build/bin/ch2b_bad_register.bin" app_2_end: .section .data .global app_3_start .global app_3_end app_3_start: .incbin "../user/build/bin/ch2b_hello_world.bin" app_3_end: .section .data .global app_4_start .global app_4_end app_4_start: .incbin "../user/build/bin/ch2b_power_3.bin" app_4_end: .section .data .global app_5_start .global app_5_end app_5_start: .incbin "../user/build/bin/ch2b_power_5.bin" app_5_end: .section .data .global app_6_start .global app_6_end app_6_start: .incbin "../user/build/bin/ch2b_power_7.bin" app_6_end: .section .data .global app_7_start .global app_7_end app_7_start: .incbin "../user/build/bin/ch3b_yield0.bin" app_7_end: .section .data .global app_8_start .global app_8_end app_8_start: .incbin "../user/build/bin/ch3b_yield1.bin" app_8_end: .section .data .global app_9_start .global app_9_end app_9_start: .incbin "../user/build/bin/ch3b_yield2.bin" app_9_end:
mktmansour/MKT-KSA-Geolocation-Security
5,583
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/common/keccak2x/feat.S
/* MIT License Copyright (c) 2020 Bas Westerbaan Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #if (__APPLE__ && __ARM_FEATURE_CRYPTO) || (__ARM_FEATURE_SHA3) .macro round // Execute theta, but without xoring into the state yet. // Compute parities p[i] = a[i] ^ a[5+i] ^ ... ^ a[20+i]. eor3 v25.16b, v0.16b, v5.16b, v10.16b eor3 v26.16b, v1.16b, v6.16b, v11.16b eor3 v27.16b, v2.16b, v7.16b, v12.16b eor3 v28.16b, v3.16b, v8.16b, v13.16b eor3 v29.16b, v4.16b, v9.16b, v14.16b eor3 v25.16b, v25.16b, v15.16b, v20.16b eor3 v26.16b, v26.16b, v16.16b, v21.16b eor3 v27.16b, v27.16b, v17.16b, v22.16b eor3 v28.16b, v28.16b, v18.16b, v23.16b eor3 v29.16b, v29.16b, v19.16b, v24.16b rax1 v30.2d, v29.2d, v26.2d // d[0] = rotl(p[1], 1) ^ p[4] rax1 v29.2d, v27.2d, v29.2d // d[3] = rotl(p[4], 1) ^ p[2] rax1 v27.2d, v25.2d, v27.2d // d[1] = rotl(p[2], 1) ^ p[0] rax1 v25.2d, v28.2d, v25.2d // d[4] = rotl(p[0], 1) ^ p[3] rax1 v28.2d, v26.2d, v28.2d // d[2] = rotl(p[3], 1) ^ p[1] // Xor parities from step theta into the state at the same time // as executing rho and pi. eor v0.16b, v0.16b, v30.16b mov v31.16b, v1.16b xar v1.2d, v6.2d, v27.2d, 20 xar v6.2d, v9.2d, v25.2d, 44 xar v9.2d, v22.2d, v28.2d, 3 xar v22.2d, v14.2d, v25.2d, 25 xar v14.2d, v20.2d, v30.2d, 46 xar v20.2d, v2.2d, v28.2d, 2 xar v2.2d, v12.2d, v28.2d, 21 xar v12.2d, v13.2d, v29.2d, 39 xar v13.2d, v19.2d, v25.2d, 56 xar v19.2d, v23.2d, v29.2d, 8 xar v23.2d, v15.2d, v30.2d, 23 xar v15.2d, v4.2d, v25.2d, 37 xar v4.2d, v24.2d, v25.2d, 50 xar v24.2d, v21.2d, v27.2d, 62 xar v21.2d, v8.2d, v29.2d, 9 xar v8.2d, v16.2d, v27.2d, 19 xar v16.2d, v5.2d, v30.2d, 28 xar v5.2d, v3.2d, v29.2d, 36 xar v3.2d, v18.2d, v29.2d, 43 xar v18.2d, v17.2d, v28.2d, 49 xar v17.2d, v11.2d, v27.2d, 54 xar v11.2d, v7.2d, v28.2d, 58 xar v7.2d, v10.2d, v30.2d, 61 xar v10.2d, v31.2d, v27.2d, 63 // Chi bcax v25.16b, v0.16b, v2.16b, v1.16b bcax v26.16b, v1.16b, v3.16b, v2.16b bcax v2.16b, v2.16b, v4.16b, v3.16b bcax v3.16b, v3.16b, v0.16b, v4.16b bcax v4.16b, v4.16b, v1.16b, v0.16b mov v0.16b, v25.16b mov v1.16b, v26.16b bcax v25.16b, v5.16b, v7.16b, v6.16b bcax v26.16b, v6.16b, v8.16b, v7.16b bcax v7.16b, v7.16b, v9.16b, v8.16b bcax v8.16b, v8.16b, v5.16b, v9.16b bcax v9.16b, v9.16b, v6.16b, v5.16b mov v5.16b, v25.16b mov v6.16b, v26.16b bcax v25.16b, v10.16b, v12.16b, v11.16b bcax v26.16b, v11.16b, v13.16b, v12.16b bcax v12.16b, v12.16b, v14.16b, v13.16b bcax v13.16b, v13.16b, v10.16b, v14.16b bcax v14.16b, v14.16b, v11.16b, v10.16b mov v10.16b, v25.16b mov v11.16b, v26.16b bcax v25.16b, v15.16b, v17.16b, v16.16b bcax v26.16b, v16.16b, v18.16b, v17.16b bcax v17.16b, v17.16b, v19.16b, v18.16b bcax v18.16b, v18.16b, v15.16b, v19.16b bcax v19.16b, v19.16b, v16.16b, v15.16b mov v15.16b, v25.16b mov v16.16b, v26.16b bcax v25.16b, v20.16b, v22.16b, v21.16b bcax v26.16b, v21.16b, v23.16b, v22.16b bcax v22.16b, v22.16b, v24.16b, v23.16b bcax v23.16b, v23.16b, v20.16b, v24.16b bcax v24.16b, v24.16b, v21.16b, v20.16b mov v20.16b, v25.16b mov v21.16b, v26.16b // iota ld1r {v25.2d}, [x1], #8 eor v0.16b, v0.16b, v25.16b .endm .align 4 .global f1600x2 .global _f1600x2 f1600x2: _f1600x2: stp d8, d9, [sp,#-16]! stp d10, d11, [sp,#-16]! stp d12, d13, [sp,#-16]! stp d14, d15, [sp,#-16]! mov x2, x0 mov x3, #24 ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64 ld1 {v4.2d, v5.2d, v6.2d, v7.2d}, [x0], #64 ld1 {v8.2d, v9.2d, v10.2d, v11.2d}, [x0], #64 ld1 {v12.2d, v13.2d, v14.2d, v15.2d}, [x0], #64 ld1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0], #64 ld1 {v20.2d, v21.2d, v22.2d, v23.2d}, [x0], #64 ld1 {v24.2d}, [x0] loop: round subs x3, x3, #1 cbnz x3, loop mov x0, x2 st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64 st1 {v4.2d, v5.2d, v6.2d, v7.2d}, [x0], #64 st1 {v8.2d, v9.2d, v10.2d, v11.2d}, [x0], #64 st1 {v12.2d, v13.2d, v14.2d, v15.2d}, [x0], #64 st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0], #64 st1 {v20.2d, v21.2d, v22.2d, v23.2d}, [x0], #64 st1 {v24.2d}, [x0] ldp d14, d15, [sp], #16 ldp d12, d13, [sp], #16 ldp d10, d11, [sp], #16 ldp d8, d9, [sp], #16 ret lr #endif
mktmansour/MKT-KSA-Geolocation-Security
76,935
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119f/avx2/vec256_ama_asm.S
#include "namespace.h" #define vec256_ama_asm CRYPTO_NAMESPACE(vec256_ama_asm) #define _vec256_ama_asm _CRYPTO_NAMESPACE(vec256_ama_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_ama_asm .p2align 5 .global _vec256_ama_asm .global vec256_ama_asm _vec256_ama_asm: vec256_ama_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>a12=reg256#2 # asm 2: vmovupd 384(<input_0=%rdi),>a12=%ymm1 vmovupd 384( % rdi), % ymm1 # qhasm: a12 = a12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<a12=reg256#2,>a12=reg256#2 # asm 2: vpxor 384(<input_1=%rsi),<a12=%ymm1,>a12=%ymm1 vpxor 384( % rsi), % ymm1, % ymm1 # qhasm: mem256[ input_0 + 384 ] = a12 # asm 1: vmovupd <a12=reg256#2,384(<input_0=int64#1) # asm 2: vmovupd <a12=%ymm1,384(<input_0=%rdi) vmovupd % ymm1, 384( % rdi) # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>a11=reg256#15 # asm 2: vmovupd 352(<input_0=%rdi),>a11=%ymm14 vmovupd 352( % rdi), % ymm14 # qhasm: a11 = a11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<a11=reg256#15,>a11=reg256#15 # asm 2: vpxor 352(<input_1=%rsi),<a11=%ymm14,>a11=%ymm14 vpxor 352( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 352 ] = a11 # asm 1: vmovupd <a11=reg256#15,352(<input_0=int64#1) # asm 2: vmovupd <a11=%ymm14,352(<input_0=%rdi) vmovupd % ymm14, 352( % rdi) # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>a10=reg256#15 # asm 2: vmovupd 320(<input_0=%rdi),>a10=%ymm14 vmovupd 320( % rdi), % ymm14 # qhasm: a10 = a10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<a10=reg256#15,>a10=reg256#15 # asm 2: vpxor 320(<input_1=%rsi),<a10=%ymm14,>a10=%ymm14 vpxor 320( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 320 ] = a10 # asm 1: vmovupd <a10=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <a10=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>a9=reg256#15 # asm 2: vmovupd 288(<input_0=%rdi),>a9=%ymm14 vmovupd 288( % rdi), % ymm14 # qhasm: a9 = a9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<a9=reg256#15,>a9=reg256#15 # asm 2: vpxor 288(<input_1=%rsi),<a9=%ymm14,>a9=%ymm14 vpxor 288( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 288 ] = a9 # asm 1: vmovupd <a9=reg256#15,288(<input_0=int64#1) # asm 2: vmovupd <a9=%ymm14,288(<input_0=%rdi) vmovupd % ymm14, 288( % rdi) # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>a8=reg256#15 # asm 2: vmovupd 256(<input_0=%rdi),>a8=%ymm14 vmovupd 256( % rdi), % ymm14 # qhasm: a8 = a8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<a8=reg256#15,>a8=reg256#15 # asm 2: vpxor 256(<input_1=%rsi),<a8=%ymm14,>a8=%ymm14 vpxor 256( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 256 ] = a8 # asm 1: vmovupd <a8=reg256#15,256(<input_0=int64#1) # asm 2: vmovupd <a8=%ymm14,256(<input_0=%rdi) vmovupd % ymm14, 256( % rdi) # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>a7=reg256#15 # asm 2: vmovupd 224(<input_0=%rdi),>a7=%ymm14 vmovupd 224( % rdi), % ymm14 # qhasm: a7 = a7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<a7=reg256#15,>a7=reg256#15 # asm 2: vpxor 224(<input_1=%rsi),<a7=%ymm14,>a7=%ymm14 vpxor 224( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 224 ] = a7 # asm 1: vmovupd <a7=reg256#15,224(<input_0=int64#1) # asm 2: vmovupd <a7=%ymm14,224(<input_0=%rdi) vmovupd % ymm14, 224( % rdi) # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>a6=reg256#15 # asm 2: vmovupd 192(<input_0=%rdi),>a6=%ymm14 vmovupd 192( % rdi), % ymm14 # qhasm: a6 = a6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<a6=reg256#15,>a6=reg256#15 # asm 2: vpxor 192(<input_1=%rsi),<a6=%ymm14,>a6=%ymm14 vpxor 192( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 192 ] = a6 # asm 1: vmovupd <a6=reg256#15,192(<input_0=int64#1) # asm 2: vmovupd <a6=%ymm14,192(<input_0=%rdi) vmovupd % ymm14, 192( % rdi) # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>a5=reg256#15 # asm 2: vmovupd 160(<input_0=%rdi),>a5=%ymm14 vmovupd 160( % rdi), % ymm14 # qhasm: a5 = a5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<a5=reg256#15,>a5=reg256#15 # asm 2: vpxor 160(<input_1=%rsi),<a5=%ymm14,>a5=%ymm14 vpxor 160( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 160 ] = a5 # asm 1: vmovupd <a5=reg256#15,160(<input_0=int64#1) # asm 2: vmovupd <a5=%ymm14,160(<input_0=%rdi) vmovupd % ymm14, 160( % rdi) # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>a4=reg256#15 # asm 2: vmovupd 128(<input_0=%rdi),>a4=%ymm14 vmovupd 128( % rdi), % ymm14 # qhasm: a4 = a4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<a4=reg256#15,>a4=reg256#15 # asm 2: vpxor 128(<input_1=%rsi),<a4=%ymm14,>a4=%ymm14 vpxor 128( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 128 ] = a4 # asm 1: vmovupd <a4=reg256#15,128(<input_0=int64#1) # asm 2: vmovupd <a4=%ymm14,128(<input_0=%rdi) vmovupd % ymm14, 128( % rdi) # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>a3=reg256#15 # asm 2: vmovupd 96(<input_0=%rdi),>a3=%ymm14 vmovupd 96( % rdi), % ymm14 # qhasm: a3 = a3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<a3=reg256#15,>a3=reg256#15 # asm 2: vpxor 96(<input_1=%rsi),<a3=%ymm14,>a3=%ymm14 vpxor 96( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 96 ] = a3 # asm 1: vmovupd <a3=reg256#15,96(<input_0=int64#1) # asm 2: vmovupd <a3=%ymm14,96(<input_0=%rdi) vmovupd % ymm14, 96( % rdi) # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>a2=reg256#15 # asm 2: vmovupd 64(<input_0=%rdi),>a2=%ymm14 vmovupd 64( % rdi), % ymm14 # qhasm: a2 = a2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<a2=reg256#15,>a2=reg256#15 # asm 2: vpxor 64(<input_1=%rsi),<a2=%ymm14,>a2=%ymm14 vpxor 64( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 64 ] = a2 # asm 1: vmovupd <a2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <a2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>a1=reg256#15 # asm 2: vmovupd 32(<input_0=%rdi),>a1=%ymm14 vmovupd 32( % rdi), % ymm14 # qhasm: a1 = a1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<a1=reg256#15,>a1=reg256#15 # asm 2: vpxor 32(<input_1=%rsi),<a1=%ymm14,>a1=%ymm14 vpxor 32( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 32 ] = a1 # asm 1: vmovupd <a1=reg256#15,32(<input_0=int64#1) # asm 2: vmovupd <a1=%ymm14,32(<input_0=%rdi) vmovupd % ymm14, 32( % rdi) # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>a0=reg256#15 # asm 2: vmovupd 0(<input_0=%rdi),>a0=%ymm14 vmovupd 0( % rdi), % ymm14 # qhasm: a0 = a0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<a0=reg256#15,>a0=reg256#15 # asm 2: vpxor 0(<input_1=%rsi),<a0=%ymm14,>a0=%ymm14 vpxor 0( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 0 ] = a0 # asm 1: vmovupd <a0=reg256#15,0(<input_0=int64#1) # asm 2: vmovupd <a0=%ymm14,0(<input_0=%rdi) vmovupd % ymm14, 0( % rdi) # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rsi), % ymm2, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rsi), % ymm1, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rsi), % ymm13, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rsi), % ymm12, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rsi), % ymm11, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rsi), % ymm10, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rsi), % ymm9, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rsi), % ymm8, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rsi), % ymm7, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rsi), % ymm6, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rsi), % ymm5, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rsi), % ymm4, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rsi), % ymm3, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
262,634
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119f/avx2/transpose_64x64_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x64_asm CRYPTO_NAMESPACE(transpose_64x64_asm) #define _transpose_64x64_asm _CRYPTO_NAMESPACE(transpose_64x64_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 r5 # qhasm: reg128 r6 # qhasm: reg128 r7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: int64 buf # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x64_asm .p2align 5 .global _transpose_64x64_asm .global transpose_64x64_asm _transpose_64x64_asm: transpose_64x64_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 64(<input_0=%rdi),>r1=%xmm7 movddup 64( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 128(<input_0=%rdi),>r2=%xmm8 movddup 128( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 192(<input_0=%rdi),>r3=%xmm9 movddup 192( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 256(<input_0=%rdi),>r4=%xmm10 movddup 256( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 320(<input_0=%rdi),>r5=%xmm11 movddup 320( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 384(<input_0=%rdi),>r6=%xmm12 movddup 384( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 448(<input_0=%rdi),>r7=%xmm13 movddup 448( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 0 ] = buf # asm 1: movq <buf=int64#2,0(<input_0=int64#1) # asm 2: movq <buf=%rsi,0(<input_0=%rdi) movq % rsi, 0( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 64 ] = buf # asm 1: movq <buf=int64#2,64(<input_0=int64#1) # asm 2: movq <buf=%rsi,64(<input_0=%rdi) movq % rsi, 64( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 128 ] = buf # asm 1: movq <buf=int64#2,128(<input_0=int64#1) # asm 2: movq <buf=%rsi,128(<input_0=%rdi) movq % rsi, 128( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 192 ] = buf # asm 1: movq <buf=int64#2,192(<input_0=int64#1) # asm 2: movq <buf=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 256 ] = buf # asm 1: movq <buf=int64#2,256(<input_0=int64#1) # asm 2: movq <buf=%rsi,256(<input_0=%rdi) movq % rsi, 256( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 320 ] = buf # asm 1: movq <buf=int64#2,320(<input_0=int64#1) # asm 2: movq <buf=%rsi,320(<input_0=%rdi) movq % rsi, 320( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 384 ] = buf # asm 1: movq <buf=int64#2,384(<input_0=int64#1) # asm 2: movq <buf=%rsi,384(<input_0=%rdi) movq % rsi, 384( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 448 ] = buf # asm 1: movq <buf=int64#2,448(<input_0=int64#1) # asm 2: movq <buf=%rsi,448(<input_0=%rdi) movq % rsi, 448( % rdi) # qhasm: r0 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 8(<input_0=%rdi),>r0=%xmm6 movddup 8( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 136(<input_0=%rdi),>r2=%xmm8 movddup 136( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 200(<input_0=%rdi),>r3=%xmm9 movddup 200( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 264(<input_0=%rdi),>r4=%xmm10 movddup 264( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 328(<input_0=%rdi),>r5=%xmm11 movddup 328( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 392(<input_0=%rdi),>r6=%xmm12 movddup 392( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 456(<input_0=%rdi),>r7=%xmm13 movddup 456( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 8 ] = buf # asm 1: movq <buf=int64#2,8(<input_0=int64#1) # asm 2: movq <buf=%rsi,8(<input_0=%rdi) movq % rsi, 8( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 72 ] = buf # asm 1: movq <buf=int64#2,72(<input_0=int64#1) # asm 2: movq <buf=%rsi,72(<input_0=%rdi) movq % rsi, 72( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 136 ] = buf # asm 1: movq <buf=int64#2,136(<input_0=int64#1) # asm 2: movq <buf=%rsi,136(<input_0=%rdi) movq % rsi, 136( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 200 ] = buf # asm 1: movq <buf=int64#2,200(<input_0=int64#1) # asm 2: movq <buf=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 264 ] = buf # asm 1: movq <buf=int64#2,264(<input_0=int64#1) # asm 2: movq <buf=%rsi,264(<input_0=%rdi) movq % rsi, 264( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 328 ] = buf # asm 1: movq <buf=int64#2,328(<input_0=int64#1) # asm 2: movq <buf=%rsi,328(<input_0=%rdi) movq % rsi, 328( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 392 ] = buf # asm 1: movq <buf=int64#2,392(<input_0=int64#1) # asm 2: movq <buf=%rsi,392(<input_0=%rdi) movq % rsi, 392( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 456 ] = buf # asm 1: movq <buf=int64#2,456(<input_0=int64#1) # asm 2: movq <buf=%rsi,456(<input_0=%rdi) movq % rsi, 456( % rdi) # qhasm: r0 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 16(<input_0=%rdi),>r0=%xmm6 movddup 16( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 80(<input_0=%rdi),>r1=%xmm7 movddup 80( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 208(<input_0=%rdi),>r3=%xmm9 movddup 208( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 272(<input_0=%rdi),>r4=%xmm10 movddup 272( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 336(<input_0=%rdi),>r5=%xmm11 movddup 336( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 400(<input_0=%rdi),>r6=%xmm12 movddup 400( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 464(<input_0=%rdi),>r7=%xmm13 movddup 464( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 16 ] = buf # asm 1: movq <buf=int64#2,16(<input_0=int64#1) # asm 2: movq <buf=%rsi,16(<input_0=%rdi) movq % rsi, 16( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 80 ] = buf # asm 1: movq <buf=int64#2,80(<input_0=int64#1) # asm 2: movq <buf=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 144 ] = buf # asm 1: movq <buf=int64#2,144(<input_0=int64#1) # asm 2: movq <buf=%rsi,144(<input_0=%rdi) movq % rsi, 144( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 208 ] = buf # asm 1: movq <buf=int64#2,208(<input_0=int64#1) # asm 2: movq <buf=%rsi,208(<input_0=%rdi) movq % rsi, 208( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 272 ] = buf # asm 1: movq <buf=int64#2,272(<input_0=int64#1) # asm 2: movq <buf=%rsi,272(<input_0=%rdi) movq % rsi, 272( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 336 ] = buf # asm 1: movq <buf=int64#2,336(<input_0=int64#1) # asm 2: movq <buf=%rsi,336(<input_0=%rdi) movq % rsi, 336( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 400 ] = buf # asm 1: movq <buf=int64#2,400(<input_0=int64#1) # asm 2: movq <buf=%rsi,400(<input_0=%rdi) movq % rsi, 400( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 464 ] = buf # asm 1: movq <buf=int64#2,464(<input_0=int64#1) # asm 2: movq <buf=%rsi,464(<input_0=%rdi) movq % rsi, 464( % rdi) # qhasm: r0 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 24(<input_0=%rdi),>r0=%xmm6 movddup 24( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 88(<input_0=%rdi),>r1=%xmm7 movddup 88( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 152(<input_0=%rdi),>r2=%xmm8 movddup 152( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 280(<input_0=%rdi),>r4=%xmm10 movddup 280( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 344(<input_0=%rdi),>r5=%xmm11 movddup 344( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 408(<input_0=%rdi),>r6=%xmm12 movddup 408( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 472(<input_0=%rdi),>r7=%xmm13 movddup 472( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 24 ] = buf # asm 1: movq <buf=int64#2,24(<input_0=int64#1) # asm 2: movq <buf=%rsi,24(<input_0=%rdi) movq % rsi, 24( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 88 ] = buf # asm 1: movq <buf=int64#2,88(<input_0=int64#1) # asm 2: movq <buf=%rsi,88(<input_0=%rdi) movq % rsi, 88( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 152 ] = buf # asm 1: movq <buf=int64#2,152(<input_0=int64#1) # asm 2: movq <buf=%rsi,152(<input_0=%rdi) movq % rsi, 152( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 216 ] = buf # asm 1: movq <buf=int64#2,216(<input_0=int64#1) # asm 2: movq <buf=%rsi,216(<input_0=%rdi) movq % rsi, 216( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 280 ] = buf # asm 1: movq <buf=int64#2,280(<input_0=int64#1) # asm 2: movq <buf=%rsi,280(<input_0=%rdi) movq % rsi, 280( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 344 ] = buf # asm 1: movq <buf=int64#2,344(<input_0=int64#1) # asm 2: movq <buf=%rsi,344(<input_0=%rdi) movq % rsi, 344( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 408 ] = buf # asm 1: movq <buf=int64#2,408(<input_0=int64#1) # asm 2: movq <buf=%rsi,408(<input_0=%rdi) movq % rsi, 408( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 472 ] = buf # asm 1: movq <buf=int64#2,472(<input_0=int64#1) # asm 2: movq <buf=%rsi,472(<input_0=%rdi) movq % rsi, 472( % rdi) # qhasm: r0 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 32(<input_0=%rdi),>r0=%xmm6 movddup 32( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 96(<input_0=%rdi),>r1=%xmm7 movddup 96( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 160(<input_0=%rdi),>r2=%xmm8 movddup 160( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 224(<input_0=%rdi),>r3=%xmm9 movddup 224( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 352(<input_0=%rdi),>r5=%xmm11 movddup 352( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 416(<input_0=%rdi),>r6=%xmm12 movddup 416( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 480(<input_0=%rdi),>r7=%xmm13 movddup 480( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 32 ] = buf # asm 1: movq <buf=int64#2,32(<input_0=int64#1) # asm 2: movq <buf=%rsi,32(<input_0=%rdi) movq % rsi, 32( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 96 ] = buf # asm 1: movq <buf=int64#2,96(<input_0=int64#1) # asm 2: movq <buf=%rsi,96(<input_0=%rdi) movq % rsi, 96( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 160 ] = buf # asm 1: movq <buf=int64#2,160(<input_0=int64#1) # asm 2: movq <buf=%rsi,160(<input_0=%rdi) movq % rsi, 160( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 224 ] = buf # asm 1: movq <buf=int64#2,224(<input_0=int64#1) # asm 2: movq <buf=%rsi,224(<input_0=%rdi) movq % rsi, 224( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 288 ] = buf # asm 1: movq <buf=int64#2,288(<input_0=int64#1) # asm 2: movq <buf=%rsi,288(<input_0=%rdi) movq % rsi, 288( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 352 ] = buf # asm 1: movq <buf=int64#2,352(<input_0=int64#1) # asm 2: movq <buf=%rsi,352(<input_0=%rdi) movq % rsi, 352( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 416 ] = buf # asm 1: movq <buf=int64#2,416(<input_0=int64#1) # asm 2: movq <buf=%rsi,416(<input_0=%rdi) movq % rsi, 416( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 480 ] = buf # asm 1: movq <buf=int64#2,480(<input_0=int64#1) # asm 2: movq <buf=%rsi,480(<input_0=%rdi) movq % rsi, 480( % rdi) # qhasm: r0 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 40(<input_0=%rdi),>r0=%xmm6 movddup 40( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 104(<input_0=%rdi),>r1=%xmm7 movddup 104( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 168(<input_0=%rdi),>r2=%xmm8 movddup 168( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 232(<input_0=%rdi),>r3=%xmm9 movddup 232( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 296(<input_0=%rdi),>r4=%xmm10 movddup 296( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 424(<input_0=%rdi),>r6=%xmm12 movddup 424( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 488(<input_0=%rdi),>r7=%xmm13 movddup 488( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 40 ] = buf # asm 1: movq <buf=int64#2,40(<input_0=int64#1) # asm 2: movq <buf=%rsi,40(<input_0=%rdi) movq % rsi, 40( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 104 ] = buf # asm 1: movq <buf=int64#2,104(<input_0=int64#1) # asm 2: movq <buf=%rsi,104(<input_0=%rdi) movq % rsi, 104( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 168 ] = buf # asm 1: movq <buf=int64#2,168(<input_0=int64#1) # asm 2: movq <buf=%rsi,168(<input_0=%rdi) movq % rsi, 168( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 232 ] = buf # asm 1: movq <buf=int64#2,232(<input_0=int64#1) # asm 2: movq <buf=%rsi,232(<input_0=%rdi) movq % rsi, 232( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 296 ] = buf # asm 1: movq <buf=int64#2,296(<input_0=int64#1) # asm 2: movq <buf=%rsi,296(<input_0=%rdi) movq % rsi, 296( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 360 ] = buf # asm 1: movq <buf=int64#2,360(<input_0=int64#1) # asm 2: movq <buf=%rsi,360(<input_0=%rdi) movq % rsi, 360( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 424 ] = buf # asm 1: movq <buf=int64#2,424(<input_0=int64#1) # asm 2: movq <buf=%rsi,424(<input_0=%rdi) movq % rsi, 424( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 488 ] = buf # asm 1: movq <buf=int64#2,488(<input_0=int64#1) # asm 2: movq <buf=%rsi,488(<input_0=%rdi) movq % rsi, 488( % rdi) # qhasm: r0 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 48(<input_0=%rdi),>r0=%xmm6 movddup 48( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 112(<input_0=%rdi),>r1=%xmm7 movddup 112( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 176(<input_0=%rdi),>r2=%xmm8 movddup 176( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 240(<input_0=%rdi),>r3=%xmm9 movddup 240( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 304(<input_0=%rdi),>r4=%xmm10 movddup 304( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 368(<input_0=%rdi),>r5=%xmm11 movddup 368( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 496(<input_0=%rdi),>r7=%xmm13 movddup 496( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 48 ] = buf # asm 1: movq <buf=int64#2,48(<input_0=int64#1) # asm 2: movq <buf=%rsi,48(<input_0=%rdi) movq % rsi, 48( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 112 ] = buf # asm 1: movq <buf=int64#2,112(<input_0=int64#1) # asm 2: movq <buf=%rsi,112(<input_0=%rdi) movq % rsi, 112( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 176 ] = buf # asm 1: movq <buf=int64#2,176(<input_0=int64#1) # asm 2: movq <buf=%rsi,176(<input_0=%rdi) movq % rsi, 176( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 240 ] = buf # asm 1: movq <buf=int64#2,240(<input_0=int64#1) # asm 2: movq <buf=%rsi,240(<input_0=%rdi) movq % rsi, 240( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 304 ] = buf # asm 1: movq <buf=int64#2,304(<input_0=int64#1) # asm 2: movq <buf=%rsi,304(<input_0=%rdi) movq % rsi, 304( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 368 ] = buf # asm 1: movq <buf=int64#2,368(<input_0=int64#1) # asm 2: movq <buf=%rsi,368(<input_0=%rdi) movq % rsi, 368( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 432 ] = buf # asm 1: movq <buf=int64#2,432(<input_0=int64#1) # asm 2: movq <buf=%rsi,432(<input_0=%rdi) movq % rsi, 432( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 496 ] = buf # asm 1: movq <buf=int64#2,496(<input_0=int64#1) # asm 2: movq <buf=%rsi,496(<input_0=%rdi) movq % rsi, 496( % rdi) # qhasm: r0 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 56(<input_0=%rdi),>r0=%xmm6 movddup 56( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 120(<input_0=%rdi),>r1=%xmm7 movddup 120( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 184(<input_0=%rdi),>r2=%xmm8 movddup 184( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 248(<input_0=%rdi),>r3=%xmm9 movddup 248( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 312(<input_0=%rdi),>r4=%xmm10 movddup 312( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 376(<input_0=%rdi),>r5=%xmm11 movddup 376( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 440(<input_0=%rdi),>r6=%xmm12 movddup 440( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>r3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>r3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<r3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<r7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>r5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>r5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<r1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<r0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<r3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<r5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<r4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<r7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>r6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>r6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#4,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm3,>buf=%rsi pextrq $0x0, % xmm3, % rsi # qhasm: mem64[ input_0 + 56 ] = buf # asm 1: movq <buf=int64#2,56(<input_0=int64#1) # asm 2: movq <buf=%rsi,56(<input_0=%rdi) movq % rsi, 56( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#8,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm7,>buf=%rsi pextrq $0x0, % xmm7, % rsi # qhasm: mem64[ input_0 + 120 ] = buf # asm 1: movq <buf=int64#2,120(<input_0=int64#1) # asm 2: movq <buf=%rsi,120(<input_0=%rdi) movq % rsi, 120( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 184 ] = buf # asm 1: movq <buf=int64#2,184(<input_0=int64#1) # asm 2: movq <buf=%rsi,184(<input_0=%rdi) movq % rsi, 184( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#1,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm0,>buf=%rsi pextrq $0x0, % xmm0, % rsi # qhasm: mem64[ input_0 + 248 ] = buf # asm 1: movq <buf=int64#2,248(<input_0=int64#1) # asm 2: movq <buf=%rsi,248(<input_0=%rdi) movq % rsi, 248( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 312 ] = buf # asm 1: movq <buf=int64#2,312(<input_0=int64#1) # asm 2: movq <buf=%rsi,312(<input_0=%rdi) movq % rsi, 312( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#3,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm2,>buf=%rsi pextrq $0x0, % xmm2, % rsi # qhasm: mem64[ input_0 + 376 ] = buf # asm 1: movq <buf=int64#2,376(<input_0=int64#1) # asm 2: movq <buf=%rsi,376(<input_0=%rdi) movq % rsi, 376( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#5,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm4,>buf=%rsi pextrq $0x0, % xmm4, % rsi # qhasm: mem64[ input_0 + 440 ] = buf # asm 1: movq <buf=int64#2,440(<input_0=int64#1) # asm 2: movq <buf=%rsi,440(<input_0=%rdi) movq % rsi, 440( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#2,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm1,>buf=%rsi pextrq $0x0, % xmm1, % rsi # qhasm: mem64[ input_0 + 504 ] = buf # asm 1: movq <buf=int64#2,504(<input_0=int64#1) # asm 2: movq <buf=%rsi,504(<input_0=%rdi) movq % rsi, 504( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 8(<input_0=%rdi),>r1=%xmm7 movddup 8( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 16(<input_0=%rdi),>r2=%xmm8 movddup 16( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 24(<input_0=%rdi),>r3=%xmm9 movddup 24( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 32(<input_0=%rdi),>r4=%xmm10 movddup 32( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 40(<input_0=%rdi),>r5=%xmm11 movddup 40( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 48(<input_0=%rdi),>r6=%xmm12 movddup 48( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 56(<input_0=%rdi),>r7=%xmm13 movddup 56( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 0 ] = t0 # asm 1: movdqu <t0=reg128#8,0(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,0(<input_0=%rdi) movdqu % xmm7, 0( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 16 ] = t0 # asm 1: movdqu <t0=reg128#8,16(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,16(<input_0=%rdi) movdqu % xmm7, 16( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 32 ] = t0 # asm 1: movdqu <t0=reg128#8,32(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,32(<input_0=%rdi) movdqu % xmm7, 32( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 48 ] = t0 # asm 1: movdqu <t0=reg128#7,48(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,48(<input_0=%rdi) movdqu % xmm6, 48( % rdi) # qhasm: r0 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 64(<input_0=%rdi),>r0=%xmm6 movddup 64( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 80(<input_0=%rdi),>r2=%xmm8 movddup 80( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 88(<input_0=%rdi),>r3=%xmm9 movddup 88( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 96(<input_0=%rdi),>r4=%xmm10 movddup 96( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 104(<input_0=%rdi),>r5=%xmm11 movddup 104( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 112(<input_0=%rdi),>r6=%xmm12 movddup 112( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 120(<input_0=%rdi),>r7=%xmm13 movddup 120( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 64 ] = t0 # asm 1: movdqu <t0=reg128#8,64(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,64(<input_0=%rdi) movdqu % xmm7, 64( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 80 ] = t0 # asm 1: movdqu <t0=reg128#8,80(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,80(<input_0=%rdi) movdqu % xmm7, 80( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 96 ] = t0 # asm 1: movdqu <t0=reg128#8,96(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,96(<input_0=%rdi) movdqu % xmm7, 96( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 112 ] = t0 # asm 1: movdqu <t0=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: r0 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 128(<input_0=%rdi),>r0=%xmm6 movddup 128( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 136(<input_0=%rdi),>r1=%xmm7 movddup 136( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 152(<input_0=%rdi),>r3=%xmm9 movddup 152( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 160(<input_0=%rdi),>r4=%xmm10 movddup 160( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 168(<input_0=%rdi),>r5=%xmm11 movddup 168( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 176(<input_0=%rdi),>r6=%xmm12 movddup 176( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 184(<input_0=%rdi),>r7=%xmm13 movddup 184( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 128 ] = t0 # asm 1: movdqu <t0=reg128#8,128(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,128(<input_0=%rdi) movdqu % xmm7, 128( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 144 ] = t0 # asm 1: movdqu <t0=reg128#8,144(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,144(<input_0=%rdi) movdqu % xmm7, 144( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 160 ] = t0 # asm 1: movdqu <t0=reg128#8,160(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,160(<input_0=%rdi) movdqu % xmm7, 160( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 176 ] = t0 # asm 1: movdqu <t0=reg128#7,176(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,176(<input_0=%rdi) movdqu % xmm6, 176( % rdi) # qhasm: r0 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 192(<input_0=%rdi),>r0=%xmm6 movddup 192( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 200(<input_0=%rdi),>r1=%xmm7 movddup 200( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 208(<input_0=%rdi),>r2=%xmm8 movddup 208( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 224(<input_0=%rdi),>r4=%xmm10 movddup 224( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 232(<input_0=%rdi),>r5=%xmm11 movddup 232( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 240(<input_0=%rdi),>r6=%xmm12 movddup 240( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 248(<input_0=%rdi),>r7=%xmm13 movddup 248( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 192 ] = t0 # asm 1: movdqu <t0=reg128#8,192(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,192(<input_0=%rdi) movdqu % xmm7, 192( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 208 ] = t0 # asm 1: movdqu <t0=reg128#8,208(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,208(<input_0=%rdi) movdqu % xmm7, 208( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 224 ] = t0 # asm 1: movdqu <t0=reg128#8,224(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,224(<input_0=%rdi) movdqu % xmm7, 224( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 240 ] = t0 # asm 1: movdqu <t0=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: r0 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 256(<input_0=%rdi),>r0=%xmm6 movddup 256( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 264(<input_0=%rdi),>r1=%xmm7 movddup 264( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 272(<input_0=%rdi),>r2=%xmm8 movddup 272( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 280(<input_0=%rdi),>r3=%xmm9 movddup 280( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 296(<input_0=%rdi),>r5=%xmm11 movddup 296( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 304(<input_0=%rdi),>r6=%xmm12 movddup 304( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 312(<input_0=%rdi),>r7=%xmm13 movddup 312( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 256 ] = t0 # asm 1: movdqu <t0=reg128#8,256(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,256(<input_0=%rdi) movdqu % xmm7, 256( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 272 ] = t0 # asm 1: movdqu <t0=reg128#8,272(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,272(<input_0=%rdi) movdqu % xmm7, 272( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 288 ] = t0 # asm 1: movdqu <t0=reg128#8,288(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,288(<input_0=%rdi) movdqu % xmm7, 288( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 304 ] = t0 # asm 1: movdqu <t0=reg128#7,304(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,304(<input_0=%rdi) movdqu % xmm6, 304( % rdi) # qhasm: r0 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 320(<input_0=%rdi),>r0=%xmm6 movddup 320( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 328(<input_0=%rdi),>r1=%xmm7 movddup 328( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 336(<input_0=%rdi),>r2=%xmm8 movddup 336( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 344(<input_0=%rdi),>r3=%xmm9 movddup 344( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 352(<input_0=%rdi),>r4=%xmm10 movddup 352( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 368(<input_0=%rdi),>r6=%xmm12 movddup 368( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 376(<input_0=%rdi),>r7=%xmm13 movddup 376( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 320 ] = t0 # asm 1: movdqu <t0=reg128#8,320(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,320(<input_0=%rdi) movdqu % xmm7, 320( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 336 ] = t0 # asm 1: movdqu <t0=reg128#8,336(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,336(<input_0=%rdi) movdqu % xmm7, 336( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 352 ] = t0 # asm 1: movdqu <t0=reg128#8,352(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,352(<input_0=%rdi) movdqu % xmm7, 352( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 368 ] = t0 # asm 1: movdqu <t0=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: r0 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 384(<input_0=%rdi),>r0=%xmm6 movddup 384( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 392(<input_0=%rdi),>r1=%xmm7 movddup 392( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 400(<input_0=%rdi),>r2=%xmm8 movddup 400( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 408(<input_0=%rdi),>r3=%xmm9 movddup 408( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 416(<input_0=%rdi),>r4=%xmm10 movddup 416( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 424(<input_0=%rdi),>r5=%xmm11 movddup 424( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 440(<input_0=%rdi),>r7=%xmm13 movddup 440( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 384 ] = t0 # asm 1: movdqu <t0=reg128#8,384(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,384(<input_0=%rdi) movdqu % xmm7, 384( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 400 ] = t0 # asm 1: movdqu <t0=reg128#8,400(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,400(<input_0=%rdi) movdqu % xmm7, 400( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 416 ] = t0 # asm 1: movdqu <t0=reg128#8,416(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,416(<input_0=%rdi) movdqu % xmm7, 416( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 432 ] = t0 # asm 1: movdqu <t0=reg128#7,432(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,432(<input_0=%rdi) movdqu % xmm6, 432( % rdi) # qhasm: r0 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 448(<input_0=%rdi),>r0=%xmm6 movddup 448( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 456(<input_0=%rdi),>r1=%xmm7 movddup 456( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 464(<input_0=%rdi),>r2=%xmm8 movddup 464( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 472(<input_0=%rdi),>r3=%xmm9 movddup 472( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 480(<input_0=%rdi),>r4=%xmm10 movddup 480( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 488(<input_0=%rdi),>r5=%xmm11 movddup 488( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 496(<input_0=%rdi),>r6=%xmm12 movddup 496( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>r3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>r3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<r7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>r5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>r5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<r1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<r0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<r3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<r5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<r4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<r7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>r6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>r6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#8,<r0=reg128#4,>t0=reg128#4 # asm 2: vpunpcklqdq <r1=%xmm7,<r0=%xmm3,>t0=%xmm3 vpunpcklqdq % xmm7, % xmm3, % xmm3 # qhasm: mem128[ input_0 + 448 ] = t0 # asm 1: movdqu <t0=reg128#4,448(<input_0=int64#1) # asm 2: movdqu <t0=%xmm3,448(<input_0=%rdi) movdqu % xmm3, 448( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#1,<r2=reg128#9,>t0=reg128#1 # asm 2: vpunpcklqdq <r3=%xmm0,<r2=%xmm8,>t0=%xmm0 vpunpcklqdq % xmm0, % xmm8, % xmm0 # qhasm: mem128[ input_0 + 464 ] = t0 # asm 1: movdqu <t0=reg128#1,464(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,464(<input_0=%rdi) movdqu % xmm0, 464( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#3,<r4=reg128#10,>t0=reg128#1 # asm 2: vpunpcklqdq <r5=%xmm2,<r4=%xmm9,>t0=%xmm0 vpunpcklqdq % xmm2, % xmm9, % xmm0 # qhasm: mem128[ input_0 + 480 ] = t0 # asm 1: movdqu <t0=reg128#1,480(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,480(<input_0=%rdi) movdqu % xmm0, 480( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#2,<r6=reg128#5,>t0=reg128#1 # asm 2: vpunpcklqdq <r7=%xmm1,<r6=%xmm4,>t0=%xmm0 vpunpcklqdq % xmm1, % xmm4, % xmm0 # qhasm: mem128[ input_0 + 496 ] = t0 # asm 1: movdqu <t0=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
2,712
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119f/avx2/consts.S
#include "namespace.h" #if defined(__APPLE__) #define ASM_HIDDEN .private_extern #else #define ASM_HIDDEN .hidden #endif #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) .data ASM_HIDDEN MASK0_0 ASM_HIDDEN MASK0_1 ASM_HIDDEN MASK1_0 ASM_HIDDEN MASK1_1 ASM_HIDDEN MASK2_0 ASM_HIDDEN MASK2_1 ASM_HIDDEN MASK3_0 ASM_HIDDEN MASK3_1 ASM_HIDDEN MASK4_0 ASM_HIDDEN MASK4_1 ASM_HIDDEN MASK5_0 ASM_HIDDEN MASK5_1 .globl MASK0_0 .globl MASK0_1 .globl MASK1_0 .globl MASK1_1 .globl MASK2_0 .globl MASK2_1 .globl MASK3_0 .globl MASK3_1 .globl MASK4_0 .globl MASK4_1 .globl MASK5_0 .globl MASK5_1 .p2align 5 MASK0_0: .quad 0x5555555555555555, 0x5555555555555555, 0x5555555555555555, 0x5555555555555555 MASK0_1: .quad 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA MASK1_0: .quad 0x3333333333333333, 0x3333333333333333, 0x3333333333333333, 0x3333333333333333 MASK1_1: .quad 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC MASK2_0: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F MASK2_1: .quad 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0 MASK3_0: .quad 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF MASK3_1: .quad 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00 MASK4_0: .quad 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF MASK4_1: .quad 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000 MASK5_0: .quad 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF MASK5_1: .quad 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000
mktmansour/MKT-KSA-Geolocation-Security
14,915
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119f/avx2/update_asm.S
#include "namespace.h" #define update_asm CRYPTO_NAMESPACE(update_asm) #define _update_asm _CRYPTO_NAMESPACE(update_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 s0 # qhasm: int64 s1 # qhasm: int64 s2 # qhasm: enter update_asm .p2align 5 .global _update_asm .global update_asm _update_asm: update_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: s2 = input_1 # asm 1: mov <input_1=int64#2,>s2=int64#2 # asm 2: mov <input_1=%rsi,>s2=%rsi mov % rsi, % rsi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
53,565
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119f/avx2/vec128_mul_asm.S
#include "namespace.h" #define vec128_mul_asm CRYPTO_NAMESPACE(vec128_mul_asm) #define _vec128_mul_asm _CRYPTO_NAMESPACE(vec128_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 b6 # qhasm: reg256 b7 # qhasm: reg256 b8 # qhasm: reg256 b9 # qhasm: reg256 b10 # qhasm: reg256 b11 # qhasm: reg256 b12 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: reg128 h0 # qhasm: reg128 h1 # qhasm: reg128 h2 # qhasm: reg128 h3 # qhasm: reg128 h4 # qhasm: reg128 h5 # qhasm: reg128 h6 # qhasm: reg128 h7 # qhasm: reg128 h8 # qhasm: reg128 h9 # qhasm: reg128 h10 # qhasm: reg128 h11 # qhasm: reg128 h12 # qhasm: reg128 h13 # qhasm: reg128 h14 # qhasm: reg128 h15 # qhasm: reg128 h16 # qhasm: reg128 h17 # qhasm: reg128 h18 # qhasm: reg128 h19 # qhasm: reg128 h20 # qhasm: reg128 h21 # qhasm: reg128 h22 # qhasm: reg128 h23 # qhasm: reg128 h24 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: enter vec128_mul_asm .p2align 5 .global _vec128_mul_asm .global vec128_mul_asm _vec128_mul_asm: vec128_mul_asm: mov % rsp, % r11 and $31, % r11 add $608, % r11 sub % r11, % rsp # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#5 # asm 2: leaq <buf=0(%rsp),>ptr=%r8 leaq 0( % rsp), % r8 # qhasm: tmp = input_3 # asm 1: mov <input_3=int64#4,>tmp=int64#6 # asm 2: mov <input_3=%rcx,>tmp=%r9 mov % rcx, % r9 # qhasm: tmp *= 12 # asm 1: imulq $12,<tmp=int64#6,>tmp=int64#6 # asm 2: imulq $12,<tmp=%r9,>tmp=%r9 imulq $12, % r9, % r9 # qhasm: input_2 += tmp # asm 1: add <tmp=int64#6,<input_2=int64#3 # asm 2: add <tmp=%r9,<input_2=%rdx add % r9, % rdx # qhasm: b12 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b12=reg256#1 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b12=%ymm0 vbroadcasti128 0( % rdx), % ymm0 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: a6 = a6 ^ a6 # asm 1: vpxor <a6=reg256#2,<a6=reg256#2,>a6=reg256#2 # asm 2: vpxor <a6=%ymm1,<a6=%ymm1,>a6=%ymm1 vpxor % ymm1, % ymm1, % ymm1 # qhasm: a6[0] = mem128[ input_1 + 96 ] # asm 1: vinsertf128 $0x0,96(<input_1=int64#2),<a6=reg256#2,<a6=reg256#2 # asm 2: vinsertf128 $0x0,96(<input_1=%rsi),<a6=%ymm1,<a6=%ymm1 vinsertf128 $0x0, 96( % rsi), % ymm1, % ymm1 # qhasm: r18 = b12 & a6 # asm 1: vpand <b12=reg256#1,<a6=reg256#2,>r18=reg256#3 # asm 2: vpand <b12=%ymm0,<a6=%ymm1,>r18=%ymm2 vpand % ymm0, % ymm1, % ymm2 # qhasm: mem256[ ptr + 576 ] = r18 # asm 1: vmovupd <r18=reg256#3,576(<ptr=int64#5) # asm 2: vmovupd <r18=%ymm2,576(<ptr=%r8) vmovupd % ymm2, 576( % r8) # qhasm: a5[0] = mem128[ input_1 + 80 ] # asm 1: vinsertf128 $0x0,80(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x0,80(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x0, 80( % rsi), % ymm2, % ymm2 # qhasm: a5[1] = mem128[ input_1 + 192 ] # asm 1: vinsertf128 $0x1,192(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x1,192(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x1, 192( % rsi), % ymm2, % ymm2 # qhasm: r17 = b12 & a5 # asm 1: vpand <b12=reg256#1,<a5=reg256#3,>r17=reg256#4 # asm 2: vpand <b12=%ymm0,<a5=%ymm2,>r17=%ymm3 vpand % ymm0, % ymm2, % ymm3 # qhasm: a4[0] = mem128[ input_1 + 64 ] # asm 1: vinsertf128 $0x0,64(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x0,64(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x0, 64( % rsi), % ymm4, % ymm4 # qhasm: a4[1] = mem128[ input_1 + 176 ] # asm 1: vinsertf128 $0x1,176(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x1,176(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x1, 176( % rsi), % ymm4, % ymm4 # qhasm: r16 = b12 & a4 # asm 1: vpand <b12=reg256#1,<a4=reg256#5,>r16=reg256#6 # asm 2: vpand <b12=%ymm0,<a4=%ymm4,>r16=%ymm5 vpand % ymm0, % ymm4, % ymm5 # qhasm: a3[0] = mem128[ input_1 + 48 ] # asm 1: vinsertf128 $0x0,48(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x0,48(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x0, 48( % rsi), % ymm6, % ymm6 # qhasm: a3[1] = mem128[ input_1 + 160 ] # asm 1: vinsertf128 $0x1,160(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x1,160(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x1, 160( % rsi), % ymm6, % ymm6 # qhasm: r15 = b12 & a3 # asm 1: vpand <b12=reg256#1,<a3=reg256#7,>r15=reg256#8 # asm 2: vpand <b12=%ymm0,<a3=%ymm6,>r15=%ymm7 vpand % ymm0, % ymm6, % ymm7 # qhasm: a2[0] = mem128[ input_1 + 32 ] # asm 1: vinsertf128 $0x0,32(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x0,32(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x0, 32( % rsi), % ymm8, % ymm8 # qhasm: a2[1] = mem128[ input_1 + 144 ] # asm 1: vinsertf128 $0x1,144(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x1,144(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x1, 144( % rsi), % ymm8, % ymm8 # qhasm: r14 = b12 & a2 # asm 1: vpand <b12=reg256#1,<a2=reg256#9,>r14=reg256#10 # asm 2: vpand <b12=%ymm0,<a2=%ymm8,>r14=%ymm9 vpand % ymm0, % ymm8, % ymm9 # qhasm: a1[0] = mem128[ input_1 + 16 ] # asm 1: vinsertf128 $0x0,16(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x0,16(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x0, 16( % rsi), % ymm10, % ymm10 # qhasm: a1[1] = mem128[ input_1 + 128 ] # asm 1: vinsertf128 $0x1,128(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x1,128(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x1, 128( % rsi), % ymm10, % ymm10 # qhasm: r13 = b12 & a1 # asm 1: vpand <b12=reg256#1,<a1=reg256#11,>r13=reg256#12 # asm 2: vpand <b12=%ymm0,<a1=%ymm10,>r13=%ymm11 vpand % ymm0, % ymm10, % ymm11 # qhasm: a0[0] = mem128[ input_1 + 0 ] # asm 1: vinsertf128 $0x0,0(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x0,0(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x0, 0( % rsi), % ymm12, % ymm12 # qhasm: a0[1] = mem128[ input_1 + 112 ] # asm 1: vinsertf128 $0x1,112(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x1,112(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x1, 112( % rsi), % ymm12, % ymm12 # qhasm: r12 = b12 & a0 # asm 1: vpand <b12=reg256#1,<a0=reg256#13,>r12=reg256#1 # asm 2: vpand <b12=%ymm0,<a0=%ymm12,>r12=%ymm0 vpand % ymm0, % ymm12, % ymm0 # qhasm: b11 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b11=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b11=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b11 & a6 # asm 1: vpand <b11=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b11=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#4,<r17=reg256#4 # asm 2: vpxor <r=%ymm14,<r17=%ymm3,<r17=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 544 ] = r17 # asm 1: vmovupd <r17=reg256#4,544(<ptr=int64#5) # asm 2: vmovupd <r17=%ymm3,544(<ptr=%r8) vmovupd % ymm3, 544( % r8) # qhasm: r = b11 & a5 # asm 1: vpand <b11=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#4,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm3,<r16=%ymm5,<r16=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b11 & a4 # asm 1: vpand <b11=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#4,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm3,<r15=%ymm7,<r15=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b11 & a3 # asm 1: vpand <b11=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#4,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm3,<r14=%ymm9,<r14=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b11 & a2 # asm 1: vpand <b11=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#4,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm3,<r13=%ymm11,<r13=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b11 & a1 # asm 1: vpand <b11=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#4,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm3,<r12=%ymm0,<r12=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r11 = b11 & a0 # asm 1: vpand <b11=reg256#14,<a0=reg256#13,>r11=reg256#4 # asm 2: vpand <b11=%ymm13,<a0=%ymm12,>r11=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b10 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b10=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b10=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b10 & a6 # asm 1: vpand <b10=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b10=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm14,<r16=%ymm5,<r16=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 512 ] = r16 # asm 1: vmovupd <r16=reg256#6,512(<ptr=int64#5) # asm 2: vmovupd <r16=%ymm5,512(<ptr=%r8) vmovupd % ymm5, 512( % r8) # qhasm: r = b10 & a5 # asm 1: vpand <b10=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#6,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm5,<r15=%ymm7,<r15=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b10 & a4 # asm 1: vpand <b10=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#6,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm5,<r14=%ymm9,<r14=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b10 & a3 # asm 1: vpand <b10=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#6,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm5,<r13=%ymm11,<r13=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b10 & a2 # asm 1: vpand <b10=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#6,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm5,<r12=%ymm0,<r12=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b10 & a1 # asm 1: vpand <b10=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#6,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm5,<r11=%ymm3,<r11=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r10 = b10 & a0 # asm 1: vpand <b10=reg256#14,<a0=reg256#13,>r10=reg256#6 # asm 2: vpand <b10=%ymm13,<a0=%ymm12,>r10=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b9 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b9=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b9=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b9 & a6 # asm 1: vpand <b9=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b9=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm14,<r15=%ymm7,<r15=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 480 ] = r15 # asm 1: vmovupd <r15=reg256#8,480(<ptr=int64#5) # asm 2: vmovupd <r15=%ymm7,480(<ptr=%r8) vmovupd % ymm7, 480( % r8) # qhasm: r = b9 & a5 # asm 1: vpand <b9=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#8,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm7,<r14=%ymm9,<r14=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b9 & a4 # asm 1: vpand <b9=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#8,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm7,<r13=%ymm11,<r13=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b9 & a3 # asm 1: vpand <b9=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#8,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm7,<r12=%ymm0,<r12=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b9 & a2 # asm 1: vpand <b9=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#8,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm7,<r11=%ymm3,<r11=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b9 & a1 # asm 1: vpand <b9=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#8,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm7,<r10=%ymm5,<r10=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r9 = b9 & a0 # asm 1: vpand <b9=reg256#14,<a0=reg256#13,>r9=reg256#8 # asm 2: vpand <b9=%ymm13,<a0=%ymm12,>r9=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b8 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b8=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b8=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b8 & a6 # asm 1: vpand <b8=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b8=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm14,<r14=%ymm9,<r14=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 448 ] = r14 # asm 1: vmovupd <r14=reg256#10,448(<ptr=int64#5) # asm 2: vmovupd <r14=%ymm9,448(<ptr=%r8) vmovupd % ymm9, 448( % r8) # qhasm: r = b8 & a5 # asm 1: vpand <b8=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#10,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm9,<r13=%ymm11,<r13=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b8 & a4 # asm 1: vpand <b8=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#10,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm9,<r12=%ymm0,<r12=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b8 & a3 # asm 1: vpand <b8=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#10,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm9,<r11=%ymm3,<r11=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b8 & a2 # asm 1: vpand <b8=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#10,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm9,<r10=%ymm5,<r10=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b8 & a1 # asm 1: vpand <b8=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#10,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm9,<r9=%ymm7,<r9=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r8 = b8 & a0 # asm 1: vpand <b8=reg256#14,<a0=reg256#13,>r8=reg256#10 # asm 2: vpand <b8=%ymm13,<a0=%ymm12,>r8=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b7 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b7=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b7=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b7 & a6 # asm 1: vpand <b7=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b7=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm14,<r13=%ymm11,<r13=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 416 ] = r13 # asm 1: vmovupd <r13=reg256#12,416(<ptr=int64#5) # asm 2: vmovupd <r13=%ymm11,416(<ptr=%r8) vmovupd % ymm11, 416( % r8) # qhasm: r = b7 & a5 # asm 1: vpand <b7=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#12,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm11,<r12=%ymm0,<r12=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b7 & a4 # asm 1: vpand <b7=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#12,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm11,<r11=%ymm3,<r11=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b7 & a3 # asm 1: vpand <b7=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#12,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm11,<r10=%ymm5,<r10=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b7 & a2 # asm 1: vpand <b7=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#12,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm11,<r9=%ymm7,<r9=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b7 & a1 # asm 1: vpand <b7=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#12,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm11,<r8=%ymm9,<r8=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r7 = b7 & a0 # asm 1: vpand <b7=reg256#14,<a0=reg256#13,>r7=reg256#12 # asm 2: vpand <b7=%ymm13,<a0=%ymm12,>r7=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b6 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b6=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b6=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b6 & a6 # asm 1: vpand <b6=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b6=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm14,<r12=%ymm0,<r12=%ymm0 vpxor % ymm14, % ymm0, % ymm0 # qhasm: mem256[ ptr + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<ptr=int64#5) # asm 2: vmovupd <r12=%ymm0,384(<ptr=%r8) vmovupd % ymm0, 384( % r8) # qhasm: r = b6 & a5 # asm 1: vpand <b6=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm0,<r11=%ymm3,<r11=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b6 & a4 # asm 1: vpand <b6=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm0,<r10=%ymm5,<r10=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b6 & a3 # asm 1: vpand <b6=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm0,<r9=%ymm7,<r9=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b6 & a2 # asm 1: vpand <b6=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm0,<r8=%ymm9,<r8=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b6 & a1 # asm 1: vpand <b6=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm0,<r7=%ymm11,<r7=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r6 = b6 & a0 # asm 1: vpand <b6=reg256#14,<a0=reg256#13,>r6=reg256#1 # asm 2: vpand <b6=%ymm13,<a0=%ymm12,>r6=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: b5 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b5=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b5=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b5 & a6 # asm 1: vpand <b5=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b5=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm14,<r11=%ymm3,<r11=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 352 ] = r11 # asm 1: vmovupd <r11=reg256#4,352(<ptr=int64#5) # asm 2: vmovupd <r11=%ymm3,352(<ptr=%r8) vmovupd % ymm3, 352( % r8) # qhasm: r = b5 & a5 # asm 1: vpand <b5=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#4,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm3,<r10=%ymm5,<r10=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b5 & a4 # asm 1: vpand <b5=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#4,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm3,<r9=%ymm7,<r9=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b5 & a3 # asm 1: vpand <b5=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#4,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm3,<r8=%ymm9,<r8=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b5 & a2 # asm 1: vpand <b5=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#4,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm3,<r7=%ymm11,<r7=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b5 & a1 # asm 1: vpand <b5=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#4,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm3,<r6=%ymm0,<r6=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r5 = b5 & a0 # asm 1: vpand <b5=reg256#14,<a0=reg256#13,>r5=reg256#4 # asm 2: vpand <b5=%ymm13,<a0=%ymm12,>r5=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b4 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b4=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b4=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b4 & a6 # asm 1: vpand <b4=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b4=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm14,<r10=%ymm5,<r10=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#6,320(<ptr=int64#5) # asm 2: vmovupd <r10=%ymm5,320(<ptr=%r8) vmovupd % ymm5, 320( % r8) # qhasm: r = b4 & a5 # asm 1: vpand <b4=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#6,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm5,<r9=%ymm7,<r9=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b4 & a4 # asm 1: vpand <b4=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#6,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm5,<r8=%ymm9,<r8=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b4 & a3 # asm 1: vpand <b4=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#6,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm5,<r7=%ymm11,<r7=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b4 & a2 # asm 1: vpand <b4=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#6,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm5,<r6=%ymm0,<r6=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b4 & a1 # asm 1: vpand <b4=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#6,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm5,<r5=%ymm3,<r5=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r4 = b4 & a0 # asm 1: vpand <b4=reg256#14,<a0=reg256#13,>r4=reg256#6 # asm 2: vpand <b4=%ymm13,<a0=%ymm12,>r4=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b3 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b3=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b3=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b3 & a6 # asm 1: vpand <b3=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b3=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm14,<r9=%ymm7,<r9=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#8,288(<ptr=int64#5) # asm 2: vmovupd <r9=%ymm7,288(<ptr=%r8) vmovupd % ymm7, 288( % r8) # qhasm: r = b3 & a5 # asm 1: vpand <b3=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#8,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm7,<r8=%ymm9,<r8=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b3 & a4 # asm 1: vpand <b3=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#8,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm7,<r7=%ymm11,<r7=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b3 & a3 # asm 1: vpand <b3=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#8,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm7,<r6=%ymm0,<r6=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b3 & a2 # asm 1: vpand <b3=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#8,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm7,<r5=%ymm3,<r5=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b3 & a1 # asm 1: vpand <b3=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#8,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm7,<r4=%ymm5,<r4=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r3 = b3 & a0 # asm 1: vpand <b3=reg256#14,<a0=reg256#13,>r3=reg256#8 # asm 2: vpand <b3=%ymm13,<a0=%ymm12,>r3=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b2 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b2=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b2=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b2 & a6 # asm 1: vpand <b2=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b2=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm14,<r8=%ymm9,<r8=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#10,256(<ptr=int64#5) # asm 2: vmovupd <r8=%ymm9,256(<ptr=%r8) vmovupd % ymm9, 256( % r8) # qhasm: r = b2 & a5 # asm 1: vpand <b2=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#10,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm9,<r7=%ymm11,<r7=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b2 & a4 # asm 1: vpand <b2=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#10,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm9,<r6=%ymm0,<r6=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b2 & a3 # asm 1: vpand <b2=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#10,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm9,<r5=%ymm3,<r5=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b2 & a2 # asm 1: vpand <b2=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#10,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm9,<r4=%ymm5,<r4=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b2 & a1 # asm 1: vpand <b2=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#10,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm9,<r3=%ymm7,<r3=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r2 = b2 & a0 # asm 1: vpand <b2=reg256#14,<a0=reg256#13,>r2=reg256#10 # asm 2: vpand <b2=%ymm13,<a0=%ymm12,>r2=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b1 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b1=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b1=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b1 & a6 # asm 1: vpand <b1=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b1=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm14,<r7=%ymm11,<r7=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#12,224(<ptr=int64#5) # asm 2: vmovupd <r7=%ymm11,224(<ptr=%r8) vmovupd % ymm11, 224( % r8) # qhasm: r = b1 & a5 # asm 1: vpand <b1=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#12,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm11,<r6=%ymm0,<r6=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b1 & a4 # asm 1: vpand <b1=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#12,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm11,<r5=%ymm3,<r5=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b1 & a3 # asm 1: vpand <b1=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#12,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm11,<r4=%ymm5,<r4=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b1 & a2 # asm 1: vpand <b1=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#12,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm11,<r3=%ymm7,<r3=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b1 & a1 # asm 1: vpand <b1=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#12,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm11,<r2=%ymm9,<r2=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r1 = b1 & a0 # asm 1: vpand <b1=reg256#14,<a0=reg256#13,>r1=reg256#12 # asm 2: vpand <b1=%ymm13,<a0=%ymm12,>r1=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b0 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b0=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b0=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b0 & a6 # asm 1: vpand <b0=reg256#14,<a6=reg256#2,>r=reg256#2 # asm 2: vpand <b0=%ymm13,<a6=%ymm1,>r=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#2,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm1,<r6=%ymm0,<r6=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<ptr=int64#5) # asm 2: vmovupd <r6=%ymm0,192(<ptr=%r8) vmovupd % ymm0, 192( % r8) # qhasm: r = b0 & a5 # asm 1: vpand <b0=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm0,<r5=%ymm3,<r5=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b0 & a4 # asm 1: vpand <b0=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm0,<r4=%ymm5,<r4=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b0 & a3 # asm 1: vpand <b0=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm0,<r3=%ymm7,<r3=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b0 & a2 # asm 1: vpand <b0=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm0,<r2=%ymm9,<r2=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b0 & a1 # asm 1: vpand <b0=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#12,<r1=reg256#12 # asm 2: vpxor <r=%ymm0,<r1=%ymm11,<r1=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r0 = b0 & a0 # asm 1: vpand <b0=reg256#14,<a0=reg256#13,>r0=reg256#1 # asm 2: vpand <b0=%ymm13,<a0=%ymm12,>r0=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#5) # asm 2: vmovupd <r5=%ymm3,160(<ptr=%r8) vmovupd % ymm3, 160( % r8) # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#6,128(<ptr=int64#5) # asm 2: vmovupd <r4=%ymm5,128(<ptr=%r8) vmovupd % ymm5, 128( % r8) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#8,96(<ptr=int64#5) # asm 2: vmovupd <r3=%ymm7,96(<ptr=%r8) vmovupd % ymm7, 96( % r8) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#10,64(<ptr=int64#5) # asm 2: vmovupd <r2=%ymm9,64(<ptr=%r8) vmovupd % ymm9, 64( % r8) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#12,32(<ptr=int64#5) # asm 2: vmovupd <r1=%ymm11,32(<ptr=%r8) vmovupd % ymm11, 32( % r8) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#5) # asm 2: vmovupd <r0=%ymm0,0(<ptr=%r8) vmovupd % ymm0, 0( % r8) # qhasm: vzeroupper vzeroupper # qhasm: h24 = mem128[ ptr + 560 ] # asm 1: movdqu 560(<ptr=int64#5),>h24=reg128#1 # asm 2: movdqu 560(<ptr=%r8),>h24=%xmm0 movdqu 560( % r8), % xmm0 # qhasm: h11 = h24 # asm 1: movdqa <h24=reg128#1,>h11=reg128#2 # asm 2: movdqa <h24=%xmm0,>h11=%xmm1 movdqa % xmm0, % xmm1 # qhasm: h12 = h24 # asm 1: movdqa <h24=reg128#1,>h12=reg128#3 # asm 2: movdqa <h24=%xmm0,>h12=%xmm2 movdqa % xmm0, % xmm2 # qhasm: h14 = h24 # asm 1: movdqa <h24=reg128#1,>h14=reg128#4 # asm 2: movdqa <h24=%xmm0,>h14=%xmm3 movdqa % xmm0, % xmm3 # qhasm: h15 = h24 # asm 1: movdqa <h24=reg128#1,>h15=reg128#1 # asm 2: movdqa <h24=%xmm0,>h15=%xmm0 movdqa % xmm0, % xmm0 # qhasm: h23 = mem128[ ptr + 528 ] # asm 1: movdqu 528(<ptr=int64#5),>h23=reg128#5 # asm 2: movdqu 528(<ptr=%r8),>h23=%xmm4 movdqu 528( % r8), % xmm4 # qhasm: h10 = h23 # asm 1: movdqa <h23=reg128#5,>h10=reg128#6 # asm 2: movdqa <h23=%xmm4,>h10=%xmm5 movdqa % xmm4, % xmm5 # qhasm: h11 = h11 ^ h23 # asm 1: vpxor <h23=reg128#5,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h23=%xmm4,<h11=%xmm1,>h11=%xmm1 vpxor % xmm4, % xmm1, % xmm1 # qhasm: h13 = h23 # asm 1: movdqa <h23=reg128#5,>h13=reg128#7 # asm 2: movdqa <h23=%xmm4,>h13=%xmm6 movdqa % xmm4, % xmm6 # qhasm: h14 = h14 ^ h23 # asm 1: vpxor <h23=reg128#5,<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor <h23=%xmm4,<h14=%xmm3,>h14=%xmm3 vpxor % xmm4, % xmm3, % xmm3 # qhasm: h22 = mem128[ ptr + 496 ] # asm 1: movdqu 496(<ptr=int64#5),>h22=reg128#5 # asm 2: movdqu 496(<ptr=%r8),>h22=%xmm4 movdqu 496( % r8), % xmm4 # qhasm: h9 = h22 # asm 1: movdqa <h22=reg128#5,>h9=reg128#8 # asm 2: movdqa <h22=%xmm4,>h9=%xmm7 movdqa % xmm4, % xmm7 # qhasm: h10 = h10 ^ h22 # asm 1: vpxor <h22=reg128#5,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h22=%xmm4,<h10=%xmm5,>h10=%xmm5 vpxor % xmm4, % xmm5, % xmm5 # qhasm: h12 = h12 ^ h22 # asm 1: vpxor <h22=reg128#5,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h22=%xmm4,<h12=%xmm2,>h12=%xmm2 vpxor % xmm4, % xmm2, % xmm2 # qhasm: h13 = h13 ^ h22 # asm 1: vpxor <h22=reg128#5,<h13=reg128#7,>h13=reg128#5 # asm 2: vpxor <h22=%xmm4,<h13=%xmm6,>h13=%xmm4 vpxor % xmm4, % xmm6, % xmm4 # qhasm: h21 = mem128[ ptr + 464 ] # asm 1: movdqu 464(<ptr=int64#5),>h21=reg128#7 # asm 2: movdqu 464(<ptr=%r8),>h21=%xmm6 movdqu 464( % r8), % xmm6 # qhasm: h8 = h21 # asm 1: movdqa <h21=reg128#7,>h8=reg128#9 # asm 2: movdqa <h21=%xmm6,>h8=%xmm8 movdqa % xmm6, % xmm8 # qhasm: h9 = h9 ^ h21 # asm 1: vpxor <h21=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h21=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h11 = h11 ^ h21 # asm 1: vpxor <h21=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h21=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h12 = h12 ^ h21 # asm 1: vpxor <h21=reg128#7,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h21=%xmm6,<h12=%xmm2,>h12=%xmm2 vpxor % xmm6, % xmm2, % xmm2 # qhasm: h20 = mem128[ ptr + 432 ] # asm 1: movdqu 432(<ptr=int64#5),>h20=reg128#7 # asm 2: movdqu 432(<ptr=%r8),>h20=%xmm6 movdqu 432( % r8), % xmm6 # qhasm: h7 = h20 # asm 1: movdqa <h20=reg128#7,>h7=reg128#10 # asm 2: movdqa <h20=%xmm6,>h7=%xmm9 movdqa % xmm6, % xmm9 # qhasm: h8 = h8 ^ h20 # asm 1: vpxor <h20=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h20=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h10 = h10 ^ h20 # asm 1: vpxor <h20=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h20=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h11 = h11 ^ h20 # asm 1: vpxor <h20=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h20=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h19 = mem128[ ptr + 400 ] # asm 1: movdqu 400(<ptr=int64#5),>h19=reg128#7 # asm 2: movdqu 400(<ptr=%r8),>h19=%xmm6 movdqu 400( % r8), % xmm6 # qhasm: h6 = h19 # asm 1: movdqa <h19=reg128#7,>h6=reg128#11 # asm 2: movdqa <h19=%xmm6,>h6=%xmm10 movdqa % xmm6, % xmm10 # qhasm: h7 = h7 ^ h19 # asm 1: vpxor <h19=reg128#7,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h19=%xmm6,<h7=%xmm9,>h7=%xmm9 vpxor % xmm6, % xmm9, % xmm9 # qhasm: h9 = h9 ^ h19 # asm 1: vpxor <h19=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h19=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h10 = h10 ^ h19 # asm 1: vpxor <h19=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h19=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h18 = mem128[ ptr + 368 ] # asm 1: movdqu 368(<ptr=int64#5),>h18=reg128#7 # asm 2: movdqu 368(<ptr=%r8),>h18=%xmm6 movdqu 368( % r8), % xmm6 # qhasm: h18 = h18 ^ mem128[ ptr + 576 ] # asm 1: vpxor 576(<ptr=int64#5),<h18=reg128#7,>h18=reg128#7 # asm 2: vpxor 576(<ptr=%r8),<h18=%xmm6,>h18=%xmm6 vpxor 576( % r8), % xmm6, % xmm6 # qhasm: h5 = h18 # asm 1: movdqa <h18=reg128#7,>h5=reg128#12 # asm 2: movdqa <h18=%xmm6,>h5=%xmm11 movdqa % xmm6, % xmm11 # qhasm: h6 = h6 ^ h18 # asm 1: vpxor <h18=reg128#7,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h18=%xmm6,<h6=%xmm10,>h6=%xmm10 vpxor % xmm6, % xmm10, % xmm10 # qhasm: h8 = h8 ^ h18 # asm 1: vpxor <h18=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h18=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h9 = h9 ^ h18 # asm 1: vpxor <h18=reg128#7,<h9=reg128#8,>h9=reg128#7 # asm 2: vpxor <h18=%xmm6,<h9=%xmm7,>h9=%xmm6 vpxor % xmm6, % xmm7, % xmm6 # qhasm: h17 = mem128[ ptr + 336 ] # asm 1: movdqu 336(<ptr=int64#5),>h17=reg128#8 # asm 2: movdqu 336(<ptr=%r8),>h17=%xmm7 movdqu 336( % r8), % xmm7 # qhasm: h17 = h17 ^ mem128[ ptr + 544 ] # asm 1: vpxor 544(<ptr=int64#5),<h17=reg128#8,>h17=reg128#8 # asm 2: vpxor 544(<ptr=%r8),<h17=%xmm7,>h17=%xmm7 vpxor 544( % r8), % xmm7, % xmm7 # qhasm: h4 = h17 # asm 1: movdqa <h17=reg128#8,>h4=reg128#13 # asm 2: movdqa <h17=%xmm7,>h4=%xmm12 movdqa % xmm7, % xmm12 # qhasm: h5 = h5 ^ h17 # asm 1: vpxor <h17=reg128#8,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h17=%xmm7,<h5=%xmm11,>h5=%xmm11 vpxor % xmm7, % xmm11, % xmm11 # qhasm: h7 = h7 ^ h17 # asm 1: vpxor <h17=reg128#8,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h17=%xmm7,<h7=%xmm9,>h7=%xmm9 vpxor % xmm7, % xmm9, % xmm9 # qhasm: h8 = h8 ^ h17 # asm 1: vpxor <h17=reg128#8,<h8=reg128#9,>h8=reg128#8 # asm 2: vpxor <h17=%xmm7,<h8=%xmm8,>h8=%xmm7 vpxor % xmm7, % xmm8, % xmm7 # qhasm: h16 = mem128[ ptr + 304 ] # asm 1: movdqu 304(<ptr=int64#5),>h16=reg128#9 # asm 2: movdqu 304(<ptr=%r8),>h16=%xmm8 movdqu 304( % r8), % xmm8 # qhasm: h16 = h16 ^ mem128[ ptr + 512 ] # asm 1: vpxor 512(<ptr=int64#5),<h16=reg128#9,>h16=reg128#9 # asm 2: vpxor 512(<ptr=%r8),<h16=%xmm8,>h16=%xmm8 vpxor 512( % r8), % xmm8, % xmm8 # qhasm: h3 = h16 # asm 1: movdqa <h16=reg128#9,>h3=reg128#14 # asm 2: movdqa <h16=%xmm8,>h3=%xmm13 movdqa % xmm8, % xmm13 # qhasm: h4 = h4 ^ h16 # asm 1: vpxor <h16=reg128#9,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h16=%xmm8,<h4=%xmm12,>h4=%xmm12 vpxor % xmm8, % xmm12, % xmm12 # qhasm: h6 = h6 ^ h16 # asm 1: vpxor <h16=reg128#9,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h16=%xmm8,<h6=%xmm10,>h6=%xmm10 vpxor % xmm8, % xmm10, % xmm10 # qhasm: h7 = h7 ^ h16 # asm 1: vpxor <h16=reg128#9,<h7=reg128#10,>h7=reg128#9 # asm 2: vpxor <h16=%xmm8,<h7=%xmm9,>h7=%xmm8 vpxor % xmm8, % xmm9, % xmm8 # qhasm: h15 = h15 ^ mem128[ ptr + 272 ] # asm 1: vpxor 272(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 272(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 272( % r8), % xmm0, % xmm0 # qhasm: h15 = h15 ^ mem128[ ptr + 480 ] # asm 1: vpxor 480(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 480(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 480( % r8), % xmm0, % xmm0 # qhasm: h2 = h15 # asm 1: movdqa <h15=reg128#1,>h2=reg128#10 # asm 2: movdqa <h15=%xmm0,>h2=%xmm9 movdqa % xmm0, % xmm9 # qhasm: h3 = h3 ^ h15 # asm 1: vpxor <h15=reg128#1,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h15=%xmm0,<h3=%xmm13,>h3=%xmm13 vpxor % xmm0, % xmm13, % xmm13 # qhasm: h5 = h5 ^ h15 # asm 1: vpxor <h15=reg128#1,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h15=%xmm0,<h5=%xmm11,>h5=%xmm11 vpxor % xmm0, % xmm11, % xmm11 # qhasm: h6 = h6 ^ h15 # asm 1: vpxor <h15=reg128#1,<h6=reg128#11,>h6=reg128#1 # asm 2: vpxor <h15=%xmm0,<h6=%xmm10,>h6=%xmm0 vpxor % xmm0, % xmm10, % xmm0 # qhasm: h14 = h14 ^ mem128[ ptr + 240 ] # asm 1: vpxor 240(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 240(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 240( % r8), % xmm3, % xmm3 # qhasm: h14 = h14 ^ mem128[ ptr + 448 ] # asm 1: vpxor 448(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 448(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 448( % r8), % xmm3, % xmm3 # qhasm: h1 = h14 # asm 1: movdqa <h14=reg128#4,>h1=reg128#11 # asm 2: movdqa <h14=%xmm3,>h1=%xmm10 movdqa % xmm3, % xmm10 # qhasm: h2 = h2 ^ h14 # asm 1: vpxor <h14=reg128#4,<h2=reg128#10,>h2=reg128#10 # asm 2: vpxor <h14=%xmm3,<h2=%xmm9,>h2=%xmm9 vpxor % xmm3, % xmm9, % xmm9 # qhasm: h4 = h4 ^ h14 # asm 1: vpxor <h14=reg128#4,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h14=%xmm3,<h4=%xmm12,>h4=%xmm12 vpxor % xmm3, % xmm12, % xmm12 # qhasm: h5 = h5 ^ h14 # asm 1: vpxor <h14=reg128#4,<h5=reg128#12,>h5=reg128#4 # asm 2: vpxor <h14=%xmm3,<h5=%xmm11,>h5=%xmm3 vpxor % xmm3, % xmm11, % xmm3 # qhasm: h13 = h13 ^ mem128[ ptr + 208 ] # asm 1: vpxor 208(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 208(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 208( % r8), % xmm4, % xmm4 # qhasm: h13 = h13 ^ mem128[ ptr + 416 ] # asm 1: vpxor 416(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 416(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 416( % r8), % xmm4, % xmm4 # qhasm: h0 = h13 # asm 1: movdqa <h13=reg128#5,>h0=reg128#12 # asm 2: movdqa <h13=%xmm4,>h0=%xmm11 movdqa % xmm4, % xmm11 # qhasm: h1 = h1 ^ h13 # asm 1: vpxor <h13=reg128#5,<h1=reg128#11,>h1=reg128#11 # asm 2: vpxor <h13=%xmm4,<h1=%xmm10,>h1=%xmm10 vpxor % xmm4, % xmm10, % xmm10 # qhasm: h3 = h3 ^ h13 # asm 1: vpxor <h13=reg128#5,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h13=%xmm4,<h3=%xmm13,>h3=%xmm13 vpxor % xmm4, % xmm13, % xmm13 # qhasm: h4 = h4 ^ h13 # asm 1: vpxor <h13=reg128#5,<h4=reg128#13,>h4=reg128#5 # asm 2: vpxor <h13=%xmm4,<h4=%xmm12,>h4=%xmm4 vpxor % xmm4, % xmm12, % xmm4 # qhasm: h12 = h12 ^ mem128[ ptr + 384 ] # asm 1: vpxor 384(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 384(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 384( % r8), % xmm2, % xmm2 # qhasm: h12 = h12 ^ mem128[ ptr + 176 ] # asm 1: vpxor 176(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 176(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 176( % r8), % xmm2, % xmm2 # qhasm: mem128[ input_0 + 192 ] = h12 # asm 1: movdqu <h12=reg128#3,192(<input_0=int64#1) # asm 2: movdqu <h12=%xmm2,192(<input_0=%rdi) movdqu % xmm2, 192( % rdi) # qhasm: h11 = h11 ^ mem128[ ptr + 352 ] # asm 1: vpxor 352(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 352(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 352( % r8), % xmm1, % xmm1 # qhasm: h11 = h11 ^ mem128[ ptr + 144 ] # asm 1: vpxor 144(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 144(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 144( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 176 ] = h11 # asm 1: movdqu <h11=reg128#2,176(<input_0=int64#1) # asm 2: movdqu <h11=%xmm1,176(<input_0=%rdi) movdqu % xmm1, 176( % rdi) # qhasm: h10 = h10 ^ mem128[ ptr + 320 ] # asm 1: vpxor 320(<ptr=int64#5),<h10=reg128#6,>h10=reg128#2 # asm 2: vpxor 320(<ptr=%r8),<h10=%xmm5,>h10=%xmm1 vpxor 320( % r8), % xmm5, % xmm1 # qhasm: h10 = h10 ^ mem128[ ptr + 112 ] # asm 1: vpxor 112(<ptr=int64#5),<h10=reg128#2,>h10=reg128#2 # asm 2: vpxor 112(<ptr=%r8),<h10=%xmm1,>h10=%xmm1 vpxor 112( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 160 ] = h10 # asm 1: movdqu <h10=reg128#2,160(<input_0=int64#1) # asm 2: movdqu <h10=%xmm1,160(<input_0=%rdi) movdqu % xmm1, 160( % rdi) # qhasm: h9 = h9 ^ mem128[ ptr + 288 ] # asm 1: vpxor 288(<ptr=int64#5),<h9=reg128#7,>h9=reg128#2 # asm 2: vpxor 288(<ptr=%r8),<h9=%xmm6,>h9=%xmm1 vpxor 288( % r8), % xmm6, % xmm1 # qhasm: h9 = h9 ^ mem128[ ptr + 80 ] # asm 1: vpxor 80(<ptr=int64#5),<h9=reg128#2,>h9=reg128#2 # asm 2: vpxor 80(<ptr=%r8),<h9=%xmm1,>h9=%xmm1 vpxor 80( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 144 ] = h9 # asm 1: movdqu <h9=reg128#2,144(<input_0=int64#1) # asm 2: movdqu <h9=%xmm1,144(<input_0=%rdi) movdqu % xmm1, 144( % rdi) # qhasm: h8 = h8 ^ mem128[ ptr + 256 ] # asm 1: vpxor 256(<ptr=int64#5),<h8=reg128#8,>h8=reg128#2 # asm 2: vpxor 256(<ptr=%r8),<h8=%xmm7,>h8=%xmm1 vpxor 256( % r8), % xmm7, % xmm1 # qhasm: h8 = h8 ^ mem128[ ptr + 48 ] # asm 1: vpxor 48(<ptr=int64#5),<h8=reg128#2,>h8=reg128#2 # asm 2: vpxor 48(<ptr=%r8),<h8=%xmm1,>h8=%xmm1 vpxor 48( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 128 ] = h8 # asm 1: movdqu <h8=reg128#2,128(<input_0=int64#1) # asm 2: movdqu <h8=%xmm1,128(<input_0=%rdi) movdqu % xmm1, 128( % rdi) # qhasm: h7 = h7 ^ mem128[ ptr + 224 ] # asm 1: vpxor 224(<ptr=int64#5),<h7=reg128#9,>h7=reg128#2 # asm 2: vpxor 224(<ptr=%r8),<h7=%xmm8,>h7=%xmm1 vpxor 224( % r8), % xmm8, % xmm1 # qhasm: h7 = h7 ^ mem128[ ptr + 16 ] # asm 1: vpxor 16(<ptr=int64#5),<h7=reg128#2,>h7=reg128#2 # asm 2: vpxor 16(<ptr=%r8),<h7=%xmm1,>h7=%xmm1 vpxor 16( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 112 ] = h7 # asm 1: movdqu <h7=reg128#2,112(<input_0=int64#1) # asm 2: movdqu <h7=%xmm1,112(<input_0=%rdi) movdqu % xmm1, 112( % rdi) # qhasm: h6 = h6 ^ mem128[ ptr + 192 ] # asm 1: vpxor 192(<ptr=int64#5),<h6=reg128#1,>h6=reg128#1 # asm 2: vpxor 192(<ptr=%r8),<h6=%xmm0,>h6=%xmm0 vpxor 192( % r8), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 96 ] = h6 # asm 1: movdqu <h6=reg128#1,96(<input_0=int64#1) # asm 2: movdqu <h6=%xmm0,96(<input_0=%rdi) movdqu % xmm0, 96( % rdi) # qhasm: h5 = h5 ^ mem128[ ptr + 160 ] # asm 1: vpxor 160(<ptr=int64#5),<h5=reg128#4,>h5=reg128#1 # asm 2: vpxor 160(<ptr=%r8),<h5=%xmm3,>h5=%xmm0 vpxor 160( % r8), % xmm3, % xmm0 # qhasm: mem128[ input_0 + 80 ] = h5 # asm 1: movdqu <h5=reg128#1,80(<input_0=int64#1) # asm 2: movdqu <h5=%xmm0,80(<input_0=%rdi) movdqu % xmm0, 80( % rdi) # qhasm: h4 = h4 ^ mem128[ ptr + 128 ] # asm 1: vpxor 128(<ptr=int64#5),<h4=reg128#5,>h4=reg128#1 # asm 2: vpxor 128(<ptr=%r8),<h4=%xmm4,>h4=%xmm0 vpxor 128( % r8), % xmm4, % xmm0 # qhasm: mem128[ input_0 + 64 ] = h4 # asm 1: movdqu <h4=reg128#1,64(<input_0=int64#1) # asm 2: movdqu <h4=%xmm0,64(<input_0=%rdi) movdqu % xmm0, 64( % rdi) # qhasm: h3 = h3 ^ mem128[ ptr + 96 ] # asm 1: vpxor 96(<ptr=int64#5),<h3=reg128#14,>h3=reg128#1 # asm 2: vpxor 96(<ptr=%r8),<h3=%xmm13,>h3=%xmm0 vpxor 96( % r8), % xmm13, % xmm0 # qhasm: mem128[ input_0 + 48 ] = h3 # asm 1: movdqu <h3=reg128#1,48(<input_0=int64#1) # asm 2: movdqu <h3=%xmm0,48(<input_0=%rdi) movdqu % xmm0, 48( % rdi) # qhasm: h2 = h2 ^ mem128[ ptr + 64 ] # asm 1: vpxor 64(<ptr=int64#5),<h2=reg128#10,>h2=reg128#1 # asm 2: vpxor 64(<ptr=%r8),<h2=%xmm9,>h2=%xmm0 vpxor 64( % r8), % xmm9, % xmm0 # qhasm: mem128[ input_0 + 32 ] = h2 # asm 1: movdqu <h2=reg128#1,32(<input_0=int64#1) # asm 2: movdqu <h2=%xmm0,32(<input_0=%rdi) movdqu % xmm0, 32( % rdi) # qhasm: h1 = h1 ^ mem128[ ptr + 32 ] # asm 1: vpxor 32(<ptr=int64#5),<h1=reg128#11,>h1=reg128#1 # asm 2: vpxor 32(<ptr=%r8),<h1=%xmm10,>h1=%xmm0 vpxor 32( % r8), % xmm10, % xmm0 # qhasm: mem128[ input_0 + 16 ] = h1 # asm 1: movdqu <h1=reg128#1,16(<input_0=int64#1) # asm 2: movdqu <h1=%xmm0,16(<input_0=%rdi) movdqu % xmm0, 16( % rdi) # qhasm: h0 = h0 ^ mem128[ ptr + 0 ] # asm 1: vpxor 0(<ptr=int64#5),<h0=reg128#12,>h0=reg128#1 # asm 2: vpxor 0(<ptr=%r8),<h0=%xmm11,>h0=%xmm0 vpxor 0( % r8), % xmm11, % xmm0 # qhasm: mem128[ input_0 + 0 ] = h0 # asm 1: movdqu <h0=reg128#1,0(<input_0=int64#1) # asm 2: movdqu <h0=%xmm0,0(<input_0=%rdi) movdqu % xmm0, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
11,545
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119f/avx2/vec_reduce_asm.S
#include "namespace.h" #define vec_reduce_asm CRYPTO_NAMESPACE(vec_reduce_asm) #define _vec_reduce_asm _CRYPTO_NAMESPACE(vec_reduce_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 t0 # qhasm: int64 t1 # qhasm: int64 c # qhasm: int64 r # qhasm: enter vec_reduce_asm .p2align 5 .global _vec_reduce_asm .global vec_reduce_asm _vec_reduce_asm: vec_reduce_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: r = 0 # asm 1: mov $0,>r=int64#7 # asm 2: mov $0,>r=%rax mov $0, % rax # qhasm: t0 = mem64[ input_0 + 192 ] # asm 1: movq 192(<input_0=int64#1),>t0=int64#2 # asm 2: movq 192(<input_0=%rdi),>t0=%rsi movq 192( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 200 ] # asm 1: movq 200(<input_0=int64#1),>t1=int64#3 # asm 2: movq 200(<input_0=%rdi),>t1=%rdx movq 200( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 176 ] # asm 1: movq 176(<input_0=int64#1),>t0=int64#2 # asm 2: movq 176(<input_0=%rdi),>t0=%rsi movq 176( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 184 ] # asm 1: movq 184(<input_0=int64#1),>t1=int64#3 # asm 2: movq 184(<input_0=%rdi),>t1=%rdx movq 184( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 160 ] # asm 1: movq 160(<input_0=int64#1),>t0=int64#2 # asm 2: movq 160(<input_0=%rdi),>t0=%rsi movq 160( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 168 ] # asm 1: movq 168(<input_0=int64#1),>t1=int64#3 # asm 2: movq 168(<input_0=%rdi),>t1=%rdx movq 168( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 144 ] # asm 1: movq 144(<input_0=int64#1),>t0=int64#2 # asm 2: movq 144(<input_0=%rdi),>t0=%rsi movq 144( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 152 ] # asm 1: movq 152(<input_0=int64#1),>t1=int64#3 # asm 2: movq 152(<input_0=%rdi),>t1=%rdx movq 152( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 128 ] # asm 1: movq 128(<input_0=int64#1),>t0=int64#2 # asm 2: movq 128(<input_0=%rdi),>t0=%rsi movq 128( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 136 ] # asm 1: movq 136(<input_0=int64#1),>t1=int64#3 # asm 2: movq 136(<input_0=%rdi),>t1=%rdx movq 136( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 112 ] # asm 1: movq 112(<input_0=int64#1),>t0=int64#2 # asm 2: movq 112(<input_0=%rdi),>t0=%rsi movq 112( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 120 ] # asm 1: movq 120(<input_0=int64#1),>t1=int64#3 # asm 2: movq 120(<input_0=%rdi),>t1=%rdx movq 120( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 96 ] # asm 1: movq 96(<input_0=int64#1),>t0=int64#2 # asm 2: movq 96(<input_0=%rdi),>t0=%rsi movq 96( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 104 ] # asm 1: movq 104(<input_0=int64#1),>t1=int64#3 # asm 2: movq 104(<input_0=%rdi),>t1=%rdx movq 104( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 80 ] # asm 1: movq 80(<input_0=int64#1),>t0=int64#2 # asm 2: movq 80(<input_0=%rdi),>t0=%rsi movq 80( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 88 ] # asm 1: movq 88(<input_0=int64#1),>t1=int64#3 # asm 2: movq 88(<input_0=%rdi),>t1=%rdx movq 88( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 64 ] # asm 1: movq 64(<input_0=int64#1),>t0=int64#2 # asm 2: movq 64(<input_0=%rdi),>t0=%rsi movq 64( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 72 ] # asm 1: movq 72(<input_0=int64#1),>t1=int64#3 # asm 2: movq 72(<input_0=%rdi),>t1=%rdx movq 72( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 48 ] # asm 1: movq 48(<input_0=int64#1),>t0=int64#2 # asm 2: movq 48(<input_0=%rdi),>t0=%rsi movq 48( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 56 ] # asm 1: movq 56(<input_0=int64#1),>t1=int64#3 # asm 2: movq 56(<input_0=%rdi),>t1=%rdx movq 56( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 32 ] # asm 1: movq 32(<input_0=int64#1),>t0=int64#2 # asm 2: movq 32(<input_0=%rdi),>t0=%rsi movq 32( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 40 ] # asm 1: movq 40(<input_0=int64#1),>t1=int64#3 # asm 2: movq 40(<input_0=%rdi),>t1=%rdx movq 40( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 16 ] # asm 1: movq 16(<input_0=int64#1),>t0=int64#2 # asm 2: movq 16(<input_0=%rdi),>t0=%rsi movq 16( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 24 ] # asm 1: movq 24(<input_0=int64#1),>t1=int64#3 # asm 2: movq 24(<input_0=%rdi),>t1=%rdx movq 24( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>t0=int64#2 # asm 2: movq 0(<input_0=%rdi),>t0=%rsi movq 0( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>t1=int64#1 # asm 2: movq 8(<input_0=%rdi),>t1=%rdi movq 8( % rdi), % rdi # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#1,<t0=int64#2 # asm 2: xor <t1=%rdi,<t0=%rsi xor % rdi, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#1 # asm 2: popcnt <t0=%rsi, >c=%rdi popcnt % rsi, % rdi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#1d # asm 2: and $1,<c=%edi and $1, % edi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#1,<r=int64#7 # asm 2: or <c=%rdi,<r=%rax or % rdi, % rax # qhasm: return r add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
24,351
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119f/avx2/syndrome_asm.S
#include "namespace.h" #define syndrome_asm CRYPTO_NAMESPACE(syndrome_asm) #define _syndrome_asm _CRYPTO_NAMESPACE(syndrome_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 b64 # qhasm: int64 synd # qhasm: int64 addr # qhasm: int64 c # qhasm: int64 c_all # qhasm: int64 row # qhasm: reg256 pp # qhasm: reg256 ee # qhasm: reg256 ss # qhasm: int64 b0 # qhasm: int64 b1 # qhasm: int64 i # qhasm: int64 p # qhasm: int64 e # qhasm: int64 s # qhasm: int64 tmp # qhasm: stack64 back # qhasm: int64 buf_ptr # qhasm: stack256 buf # qhasm: enter syndrome_asm .p2align 5 .global _syndrome_asm .global syndrome_asm _syndrome_asm: syndrome_asm: mov % rsp, % r11 and $31, % r11 add $64, % r11 sub % r11, % rsp # qhasm: input_2 += 193 # asm 1: add $193,<input_2=int64#3 # asm 2: add $193,<input_2=%rdx add $193, % rdx # qhasm: *(uint8 *) (input_0 + 193) = 0 # asm 1: movb $0,193(<input_0=int64#1) # asm 2: movb $0,193(<input_0=%rdi) movb $0, 193( % rdi) # qhasm: tmp = *(uint8 *) (input_2 + 0) # asm 1: movzbq 0(<input_2=int64#3),>tmp=int64#4 # asm 2: movzbq 0(<input_2=%rdx),>tmp=%rcx movzbq 0( % rdx), % rcx # qhasm: back = tmp # asm 1: movq <tmp=int64#4,>back=stack64#1 # asm 2: movq <tmp=%rcx,>back=32(%rsp) movq % rcx, 32( % rsp) # qhasm: i = 0 # asm 1: mov $0,>i=int64#4 # asm 2: mov $0,>i=%rcx mov $0, % rcx # qhasm: inner1: ._inner1: # qhasm: addr = input_2 + i # asm 1: lea (<input_2=int64#3,<i=int64#4),>addr=int64#5 # asm 2: lea (<input_2=%rdx,<i=%rcx),>addr=%r8 lea ( % rdx, % rcx), % r8 # qhasm: b0 = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#5),>b0=int64#6 # asm 2: movzbq 0(<addr=%r8),>b0=%r9 movzbq 0( % r8), % r9 # qhasm: b1 = *(uint8 *) (addr + 1) # asm 1: movzbq 1(<addr=int64#5),>b1=int64#7 # asm 2: movzbq 1(<addr=%r8),>b1=%rax movzbq 1( % r8), % rax # qhasm: (uint64) b0 >>= 3 # asm 1: shr $3,<b0=int64#6 # asm 2: shr $3,<b0=%r9 shr $3, % r9 # qhasm: b1 <<= 5 # asm 1: shl $5,<b1=int64#7 # asm 2: shl $5,<b1=%rax shl $5, % rax # qhasm: b0 |= b1 # asm 1: or <b1=int64#7,<b0=int64#6 # asm 2: or <b1=%rax,<b0=%r9 or % rax, % r9 # qhasm: *(uint8 *) (addr + 0) = b0 # asm 1: movb <b0=int64#6b,0(<addr=int64#5) # asm 2: movb <b0=%r9b,0(<addr=%r8) movb % r9b, 0( % r8) # qhasm: i += 1 # asm 1: add $1,<i=int64#4 # asm 2: add $1,<i=%rcx add $1, % rcx # qhasm: =? i-676 # asm 1: cmp $676,<i=int64#4 # asm 2: cmp $676,<i=%rcx cmp $676, % rcx # comment:fp stack unchanged by jump # qhasm: goto inner1 if != jne ._inner1 # qhasm: b0 = *(uint8 *) (addr + 1) # asm 1: movzbq 1(<addr=int64#5),>b0=int64#4 # asm 2: movzbq 1(<addr=%r8),>b0=%rcx movzbq 1( % r8), % rcx # qhasm: (uint64) b0 >>= 3 # asm 1: shr $3,<b0=int64#4 # asm 2: shr $3,<b0=%rcx shr $3, % rcx # qhasm: *(uint8 *) (addr + 1) = b0 # asm 1: movb <b0=int64#4b,1(<addr=int64#5) # asm 2: movb <b0=%cl,1(<addr=%r8) movb % cl, 1( % r8) # qhasm: input_1 += 1047319 # asm 1: add $1047319,<input_1=int64#2 # asm 2: add $1047319,<input_1=%rsi add $1047319, % rsi # qhasm: buf_ptr = &buf # asm 1: leaq <buf=stack256#1,>buf_ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>buf_ptr=%rcx leaq 0( % rsp), % rcx # qhasm: row = 1547 # asm 1: mov $1547,>row=int64#5 # asm 2: mov $1547,>row=%r8 mov $1547, % r8 # qhasm: loop: ._loop: # qhasm: row -= 1 # asm 1: sub $1,<row=int64#5 # asm 2: sub $1,<row=%r8 sub $1, % r8 # qhasm: input_1 -= 677 # asm 1: sub $677,<input_1=int64#2 # asm 2: sub $677,<input_1=%rsi sub $677, % rsi # qhasm: ss = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>ss=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>ss=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: ee = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 0(<input_2=%rdx),>ee=%ymm1 vmovupd 0( % rdx), % ymm1 # qhasm: ss &= ee # asm 1: vpand <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpand <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpand % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>pp=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 32(<input_2=%rdx),>ee=%ymm2 vmovupd 32( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 64(<input_1=%rsi),>pp=%ymm1 vmovupd 64( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 64(<input_2=%rdx),>ee=%ymm2 vmovupd 64( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 96(<input_1=%rsi),>pp=%ymm1 vmovupd 96( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 96(<input_2=%rdx),>ee=%ymm2 vmovupd 96( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 128(<input_1=%rsi),>pp=%ymm1 vmovupd 128( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 128 ] # asm 1: vmovupd 128(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 128(<input_2=%rdx),>ee=%ymm2 vmovupd 128( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 160(<input_1=%rsi),>pp=%ymm1 vmovupd 160( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 160 ] # asm 1: vmovupd 160(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 160(<input_2=%rdx),>ee=%ymm2 vmovupd 160( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 192(<input_1=%rsi),>pp=%ymm1 vmovupd 192( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 192 ] # asm 1: vmovupd 192(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 192(<input_2=%rdx),>ee=%ymm2 vmovupd 192( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 224(<input_1=%rsi),>pp=%ymm1 vmovupd 224( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 224 ] # asm 1: vmovupd 224(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 224(<input_2=%rdx),>ee=%ymm2 vmovupd 224( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 256(<input_1=%rsi),>pp=%ymm1 vmovupd 256( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 256 ] # asm 1: vmovupd 256(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 256(<input_2=%rdx),>ee=%ymm2 vmovupd 256( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 288(<input_1=%rsi),>pp=%ymm1 vmovupd 288( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 288 ] # asm 1: vmovupd 288(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 288(<input_2=%rdx),>ee=%ymm2 vmovupd 288( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 320(<input_1=%rsi),>pp=%ymm1 vmovupd 320( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 320 ] # asm 1: vmovupd 320(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 320(<input_2=%rdx),>ee=%ymm2 vmovupd 320( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 352(<input_1=%rsi),>pp=%ymm1 vmovupd 352( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 352 ] # asm 1: vmovupd 352(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 352(<input_2=%rdx),>ee=%ymm2 vmovupd 352( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>pp=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 384 ] # asm 1: vmovupd 384(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 384(<input_2=%rdx),>ee=%ymm2 vmovupd 384( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 416 ] # asm 1: vmovupd 416(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 416(<input_1=%rsi),>pp=%ymm1 vmovupd 416( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 416 ] # asm 1: vmovupd 416(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 416(<input_2=%rdx),>ee=%ymm2 vmovupd 416( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 448 ] # asm 1: vmovupd 448(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 448(<input_1=%rsi),>pp=%ymm1 vmovupd 448( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 448 ] # asm 1: vmovupd 448(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 448(<input_2=%rdx),>ee=%ymm2 vmovupd 448( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 480 ] # asm 1: vmovupd 480(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 480(<input_1=%rsi),>pp=%ymm1 vmovupd 480( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 480 ] # asm 1: vmovupd 480(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 480(<input_2=%rdx),>ee=%ymm2 vmovupd 480( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 512 ] # asm 1: vmovupd 512(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 512(<input_1=%rsi),>pp=%ymm1 vmovupd 512( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 512 ] # asm 1: vmovupd 512(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 512(<input_2=%rdx),>ee=%ymm2 vmovupd 512( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 544 ] # asm 1: vmovupd 544(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 544(<input_1=%rsi),>pp=%ymm1 vmovupd 544( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 544 ] # asm 1: vmovupd 544(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 544(<input_2=%rdx),>ee=%ymm2 vmovupd 544( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 576 ] # asm 1: vmovupd 576(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 576(<input_1=%rsi),>pp=%ymm1 vmovupd 576( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 576 ] # asm 1: vmovupd 576(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 576(<input_2=%rdx),>ee=%ymm2 vmovupd 576( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 608 ] # asm 1: vmovupd 608(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 608(<input_1=%rsi),>pp=%ymm1 vmovupd 608( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 608 ] # asm 1: vmovupd 608(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 608(<input_2=%rdx),>ee=%ymm2 vmovupd 608( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 640 ] # asm 1: vmovupd 640(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 640(<input_1=%rsi),>pp=%ymm1 vmovupd 640( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 640 ] # asm 1: vmovupd 640(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 640(<input_2=%rdx),>ee=%ymm2 vmovupd 640( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: buf = ss # asm 1: vmovapd <ss=reg256#1,>buf=stack256#1 # asm 2: vmovapd <ss=%ymm0,>buf=0(%rsp) vmovapd % ymm0, 0( % rsp) # qhasm: s = *(uint32 *) (input_1 + 672) # asm 1: movl 672(<input_1=int64#2),>s=int64#6d # asm 2: movl 672(<input_1=%rsi),>s=%r9d movl 672( % rsi), % r9d # qhasm: e = *(uint32 *) (input_2 + 672) # asm 1: movl 672(<input_2=int64#3),>e=int64#7d # asm 2: movl 672(<input_2=%rdx),>e=%eax movl 672( % rdx), % eax # qhasm: s &= e # asm 1: and <e=int64#7,<s=int64#6 # asm 2: and <e=%rax,<s=%r9 and % rax, % r9 # qhasm: p = *(uint8 *) (input_1 + 676) # asm 1: movzbq 676(<input_1=int64#2),>p=int64#7 # asm 2: movzbq 676(<input_1=%rsi),>p=%rax movzbq 676( % rsi), % rax # qhasm: e = *(uint8 *) (input_2 + 676) # asm 1: movzbq 676(<input_2=int64#3),>e=int64#8 # asm 2: movzbq 676(<input_2=%rdx),>e=%r10 movzbq 676( % rdx), % r10 # qhasm: p &= e # asm 1: and <e=int64#8,<p=int64#7 # asm 2: and <e=%r10,<p=%rax and % r10, % rax # qhasm: s ^= p # asm 1: xor <p=int64#7,<s=int64#6 # asm 2: xor <p=%rax,<s=%r9 xor % rax, % r9 # qhasm: c_all = count(s) # asm 1: popcnt <s=int64#6, >c_all=int64#6 # asm 2: popcnt <s=%r9, >c_all=%r9 popcnt % r9, % r9 # qhasm: b64 = mem64[ buf_ptr + 0 ] # asm 1: movq 0(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 0(<buf_ptr=%rcx),>b64=%rax movq 0( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 8 ] # asm 1: movq 8(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 8(<buf_ptr=%rcx),>b64=%rax movq 8( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 16 ] # asm 1: movq 16(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 16(<buf_ptr=%rcx),>b64=%rax movq 16( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 24 ] # asm 1: movq 24(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 24(<buf_ptr=%rcx),>b64=%rax movq 24( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: addr = row # asm 1: mov <row=int64#5,>addr=int64#7 # asm 2: mov <row=%r8,>addr=%rax mov % r8, % rax # qhasm: (uint64) addr >>= 3 # asm 1: shr $3,<addr=int64#7 # asm 2: shr $3,<addr=%rax shr $3, % rax # qhasm: addr += input_0 # asm 1: add <input_0=int64#1,<addr=int64#7 # asm 2: add <input_0=%rdi,<addr=%rax add % rdi, % rax # qhasm: synd = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#7),>synd=int64#8 # asm 2: movzbq 0(<addr=%rax),>synd=%r10 movzbq 0( % rax), % r10 # qhasm: synd <<= 1 # asm 1: shl $1,<synd=int64#8 # asm 2: shl $1,<synd=%r10 shl $1, % r10 # qhasm: (uint32) c_all &= 1 # asm 1: and $1,<c_all=int64#6d # asm 2: and $1,<c_all=%r9d and $1, % r9d # qhasm: synd |= c_all # asm 1: or <c_all=int64#6,<synd=int64#8 # asm 2: or <c_all=%r9,<synd=%r10 or % r9, % r10 # qhasm: *(uint8 *) (addr + 0) = synd # asm 1: movb <synd=int64#8b,0(<addr=int64#7) # asm 2: movb <synd=%r10b,0(<addr=%rax) movb % r10b, 0( % rax) # qhasm: =? row-0 # asm 1: cmp $0,<row=int64#5 # asm 2: cmp $0,<row=%r8 cmp $0, % r8 # comment:fp stack unchanged by jump # qhasm: goto loop if != jne ._loop # qhasm: i = 676 # asm 1: mov $676,>i=int64#2 # asm 2: mov $676,>i=%rsi mov $676, % rsi # qhasm: inner2: ._inner2: # qhasm: i -= 1 # asm 1: sub $1,<i=int64#2 # asm 2: sub $1,<i=%rsi sub $1, % rsi # qhasm: addr = input_2 + i # asm 1: lea (<input_2=int64#3,<i=int64#2),>addr=int64#4 # asm 2: lea (<input_2=%rdx,<i=%rsi),>addr=%rcx lea ( % rdx, % rsi), % rcx # qhasm: b0 = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#4),>b0=int64#5 # asm 2: movzbq 0(<addr=%rcx),>b0=%r8 movzbq 0( % rcx), % r8 # qhasm: b1 = *(uint8 *) (addr + 1) # asm 1: movzbq 1(<addr=int64#4),>b1=int64#6 # asm 2: movzbq 1(<addr=%rcx),>b1=%r9 movzbq 1( % rcx), % r9 # qhasm: (uint64) b0 >>= 5 # asm 1: shr $5,<b0=int64#5 # asm 2: shr $5,<b0=%r8 shr $5, % r8 # qhasm: b1 <<= 3 # asm 1: shl $3,<b1=int64#6 # asm 2: shl $3,<b1=%r9 shl $3, % r9 # qhasm: b1 |= b0 # asm 1: or <b0=int64#5,<b1=int64#6 # asm 2: or <b0=%r8,<b1=%r9 or % r8, % r9 # qhasm: *(uint8 *) (addr + 1) = b1 # asm 1: movb <b1=int64#6b,1(<addr=int64#4) # asm 2: movb <b1=%r9b,1(<addr=%rcx) movb % r9b, 1( % rcx) # qhasm: =? i-0 # asm 1: cmp $0,<i=int64#2 # asm 2: cmp $0,<i=%rsi cmp $0, % rsi # comment:fp stack unchanged by jump # qhasm: goto inner2 if != jne ._inner2 # qhasm: tmp = back # asm 1: movq <back=stack64#1,>tmp=int64#2 # asm 2: movq <back=32(%rsp),>tmp=%rsi movq 32( % rsp), % rsi # qhasm: *(uint8 *) (input_2 + 0) = tmp # asm 1: movb <tmp=int64#2b,0(<input_2=int64#3) # asm 2: movb <tmp=%sil,0(<input_2=%rdx) movb % sil, 0( % rdx) # qhasm: input_2 -= 193 # asm 1: sub $193,<input_2=int64#3 # asm 2: sub $193,<input_2=%rdx sub $193, % rdx # qhasm: i = 0 # asm 1: mov $0,>i=int64#2 # asm 2: mov $0,>i=%rsi mov $0, % rsi # qhasm: inner3: ._inner3: # qhasm: s = *(uint8 *) (input_0 + 0) # asm 1: movzbq 0(<input_0=int64#1),>s=int64#4 # asm 2: movzbq 0(<input_0=%rdi),>s=%rcx movzbq 0( % rdi), % rcx # qhasm: e = *(uint8 *) (input_2 + 0) # asm 1: movzbq 0(<input_2=int64#3),>e=int64#5 # asm 2: movzbq 0(<input_2=%rdx),>e=%r8 movzbq 0( % rdx), % r8 # qhasm: s ^= e # asm 1: xor <e=int64#5,<s=int64#4 # asm 2: xor <e=%r8,<s=%rcx xor % r8, % rcx # qhasm: *(uint8 *) (input_0 + 0) = s # asm 1: movb <s=int64#4b,0(<input_0=int64#1) # asm 2: movb <s=%cl,0(<input_0=%rdi) movb % cl, 0( % rdi) # qhasm: i += 1 # asm 1: add $1,<i=int64#2 # asm 2: add $1,<i=%rsi add $1, % rsi # qhasm: input_0 += 1 # asm 1: add $1,<input_0=int64#1 # asm 2: add $1,<input_0=%rdi add $1, % rdi # qhasm: input_2 += 1 # asm 1: add $1,<input_2=int64#3 # asm 2: add $1,<input_2=%rdx add $1, % rdx # qhasm: =? i-193 # asm 1: cmp $193,<i=int64#2 # asm 2: cmp $193,<i=%rsi cmp $193, % rsi # comment:fp stack unchanged by jump # qhasm: goto inner3 if != jne ._inner3 # qhasm: s = *(uint8 *) (input_0 + 0) # asm 1: movzbq 0(<input_0=int64#1),>s=int64#2 # asm 2: movzbq 0(<input_0=%rdi),>s=%rsi movzbq 0( % rdi), % rsi # qhasm: e = *(uint8 *) (input_2 + 0) # asm 1: movzbq 0(<input_2=int64#3),>e=int64#3 # asm 2: movzbq 0(<input_2=%rdx),>e=%rdx movzbq 0( % rdx), % rdx # qhasm: (uint32) e &= 7 # asm 1: and $7,<e=int64#3d # asm 2: and $7,<e=%edx and $7, % edx # qhasm: s ^= e # asm 1: xor <e=int64#3,<s=int64#2 # asm 2: xor <e=%rdx,<s=%rsi xor % rdx, % rsi # qhasm: *(uint8 *) (input_0 + 0) = s # asm 1: movb <s=int64#2b,0(<input_0=int64#1) # asm 2: movb <s=%sil,0(<input_0=%rdi) movb % sil, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
254,430
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119f/avx2/transpose_64x128_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x128_sp_asm CRYPTO_NAMESPACE(transpose_64x128_sp_asm) #define _transpose_64x128_sp_asm _CRYPTO_NAMESPACE(transpose_64x128_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 x0 # qhasm: reg128 x1 # qhasm: reg128 x2 # qhasm: reg128 x3 # qhasm: reg128 x4 # qhasm: reg128 x5 # qhasm: reg128 x6 # qhasm: reg128 x7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x128_sp_asm .p2align 5 .global _transpose_64x128_sp_asm .global transpose_64x128_sp_asm _transpose_64x128_sp_asm: transpose_64x128_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 128(<input_0=%rdi),>x1=%xmm7 movdqu 128( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 256(<input_0=%rdi),>x2=%xmm8 movdqu 256( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 384(<input_0=%rdi),>x3=%xmm9 movdqu 384( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 512(<input_0=%rdi),>x4=%xmm10 movdqu 512( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 640(<input_0=%rdi),>x5=%xmm11 movdqu 640( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 768(<input_0=%rdi),>x6=%xmm12 movdqu 768( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 896(<input_0=%rdi),>x7=%xmm13 movdqu 896( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 128 ] = x1 # asm 1: movdqu <x1=reg128#14,128(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,128(<input_0=%rdi) movdqu % xmm13, 128( % rdi) # qhasm: mem128[ input_0 + 256 ] = x2 # asm 1: movdqu <x2=reg128#15,256(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,256(<input_0=%rdi) movdqu % xmm14, 256( % rdi) # qhasm: mem128[ input_0 + 384 ] = x3 # asm 1: movdqu <x3=reg128#11,384(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,384(<input_0=%rdi) movdqu % xmm10, 384( % rdi) # qhasm: mem128[ input_0 + 512 ] = x4 # asm 1: movdqu <x4=reg128#12,512(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,512(<input_0=%rdi) movdqu % xmm11, 512( % rdi) # qhasm: mem128[ input_0 + 640 ] = x5 # asm 1: movdqu <x5=reg128#9,640(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,640(<input_0=%rdi) movdqu % xmm8, 640( % rdi) # qhasm: mem128[ input_0 + 768 ] = x6 # asm 1: movdqu <x6=reg128#13,768(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,768(<input_0=%rdi) movdqu % xmm12, 768( % rdi) # qhasm: mem128[ input_0 + 896 ] = x7 # asm 1: movdqu <x7=reg128#7,896(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,896(<input_0=%rdi) movdqu % xmm6, 896( % rdi) # qhasm: x0 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 16(<input_0=%rdi),>x0=%xmm6 movdqu 16( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 272(<input_0=%rdi),>x2=%xmm8 movdqu 272( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 400(<input_0=%rdi),>x3=%xmm9 movdqu 400( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 528(<input_0=%rdi),>x4=%xmm10 movdqu 528( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 656(<input_0=%rdi),>x5=%xmm11 movdqu 656( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 784(<input_0=%rdi),>x6=%xmm12 movdqu 784( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 912(<input_0=%rdi),>x7=%xmm13 movdqu 912( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 16 ] = x0 # asm 1: movdqu <x0=reg128#10,16(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,16(<input_0=%rdi) movdqu % xmm9, 16( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 272 ] = x2 # asm 1: movdqu <x2=reg128#15,272(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,272(<input_0=%rdi) movdqu % xmm14, 272( % rdi) # qhasm: mem128[ input_0 + 400 ] = x3 # asm 1: movdqu <x3=reg128#11,400(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,400(<input_0=%rdi) movdqu % xmm10, 400( % rdi) # qhasm: mem128[ input_0 + 528 ] = x4 # asm 1: movdqu <x4=reg128#12,528(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,528(<input_0=%rdi) movdqu % xmm11, 528( % rdi) # qhasm: mem128[ input_0 + 656 ] = x5 # asm 1: movdqu <x5=reg128#9,656(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,656(<input_0=%rdi) movdqu % xmm8, 656( % rdi) # qhasm: mem128[ input_0 + 784 ] = x6 # asm 1: movdqu <x6=reg128#13,784(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,784(<input_0=%rdi) movdqu % xmm12, 784( % rdi) # qhasm: mem128[ input_0 + 912 ] = x7 # asm 1: movdqu <x7=reg128#7,912(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,912(<input_0=%rdi) movdqu % xmm6, 912( % rdi) # qhasm: x0 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 32(<input_0=%rdi),>x0=%xmm6 movdqu 32( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 160(<input_0=%rdi),>x1=%xmm7 movdqu 160( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 416(<input_0=%rdi),>x3=%xmm9 movdqu 416( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 544(<input_0=%rdi),>x4=%xmm10 movdqu 544( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 672(<input_0=%rdi),>x5=%xmm11 movdqu 672( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 800(<input_0=%rdi),>x6=%xmm12 movdqu 800( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 928(<input_0=%rdi),>x7=%xmm13 movdqu 928( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 32 ] = x0 # asm 1: movdqu <x0=reg128#10,32(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,32(<input_0=%rdi) movdqu % xmm9, 32( % rdi) # qhasm: mem128[ input_0 + 160 ] = x1 # asm 1: movdqu <x1=reg128#14,160(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,160(<input_0=%rdi) movdqu % xmm13, 160( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 416 ] = x3 # asm 1: movdqu <x3=reg128#11,416(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,416(<input_0=%rdi) movdqu % xmm10, 416( % rdi) # qhasm: mem128[ input_0 + 544 ] = x4 # asm 1: movdqu <x4=reg128#12,544(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,544(<input_0=%rdi) movdqu % xmm11, 544( % rdi) # qhasm: mem128[ input_0 + 672 ] = x5 # asm 1: movdqu <x5=reg128#9,672(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,672(<input_0=%rdi) movdqu % xmm8, 672( % rdi) # qhasm: mem128[ input_0 + 800 ] = x6 # asm 1: movdqu <x6=reg128#13,800(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,800(<input_0=%rdi) movdqu % xmm12, 800( % rdi) # qhasm: mem128[ input_0 + 928 ] = x7 # asm 1: movdqu <x7=reg128#7,928(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,928(<input_0=%rdi) movdqu % xmm6, 928( % rdi) # qhasm: x0 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 48(<input_0=%rdi),>x0=%xmm6 movdqu 48( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 176(<input_0=%rdi),>x1=%xmm7 movdqu 176( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 304(<input_0=%rdi),>x2=%xmm8 movdqu 304( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 560(<input_0=%rdi),>x4=%xmm10 movdqu 560( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 688(<input_0=%rdi),>x5=%xmm11 movdqu 688( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 816(<input_0=%rdi),>x6=%xmm12 movdqu 816( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 944(<input_0=%rdi),>x7=%xmm13 movdqu 944( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 48 ] = x0 # asm 1: movdqu <x0=reg128#10,48(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,48(<input_0=%rdi) movdqu % xmm9, 48( % rdi) # qhasm: mem128[ input_0 + 176 ] = x1 # asm 1: movdqu <x1=reg128#14,176(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,176(<input_0=%rdi) movdqu % xmm13, 176( % rdi) # qhasm: mem128[ input_0 + 304 ] = x2 # asm 1: movdqu <x2=reg128#15,304(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,304(<input_0=%rdi) movdqu % xmm14, 304( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 560 ] = x4 # asm 1: movdqu <x4=reg128#12,560(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,560(<input_0=%rdi) movdqu % xmm11, 560( % rdi) # qhasm: mem128[ input_0 + 688 ] = x5 # asm 1: movdqu <x5=reg128#9,688(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,688(<input_0=%rdi) movdqu % xmm8, 688( % rdi) # qhasm: mem128[ input_0 + 816 ] = x6 # asm 1: movdqu <x6=reg128#13,816(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,816(<input_0=%rdi) movdqu % xmm12, 816( % rdi) # qhasm: mem128[ input_0 + 944 ] = x7 # asm 1: movdqu <x7=reg128#7,944(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,944(<input_0=%rdi) movdqu % xmm6, 944( % rdi) # qhasm: x0 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 64(<input_0=%rdi),>x0=%xmm6 movdqu 64( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 192(<input_0=%rdi),>x1=%xmm7 movdqu 192( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 320(<input_0=%rdi),>x2=%xmm8 movdqu 320( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 448(<input_0=%rdi),>x3=%xmm9 movdqu 448( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 704(<input_0=%rdi),>x5=%xmm11 movdqu 704( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 832(<input_0=%rdi),>x6=%xmm12 movdqu 832( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 960(<input_0=%rdi),>x7=%xmm13 movdqu 960( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 64 ] = x0 # asm 1: movdqu <x0=reg128#10,64(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,64(<input_0=%rdi) movdqu % xmm9, 64( % rdi) # qhasm: mem128[ input_0 + 192 ] = x1 # asm 1: movdqu <x1=reg128#14,192(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,192(<input_0=%rdi) movdqu % xmm13, 192( % rdi) # qhasm: mem128[ input_0 + 320 ] = x2 # asm 1: movdqu <x2=reg128#15,320(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,320(<input_0=%rdi) movdqu % xmm14, 320( % rdi) # qhasm: mem128[ input_0 + 448 ] = x3 # asm 1: movdqu <x3=reg128#11,448(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,448(<input_0=%rdi) movdqu % xmm10, 448( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 704 ] = x5 # asm 1: movdqu <x5=reg128#9,704(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,704(<input_0=%rdi) movdqu % xmm8, 704( % rdi) # qhasm: mem128[ input_0 + 832 ] = x6 # asm 1: movdqu <x6=reg128#13,832(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,832(<input_0=%rdi) movdqu % xmm12, 832( % rdi) # qhasm: mem128[ input_0 + 960 ] = x7 # asm 1: movdqu <x7=reg128#7,960(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,960(<input_0=%rdi) movdqu % xmm6, 960( % rdi) # qhasm: x0 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 80(<input_0=%rdi),>x0=%xmm6 movdqu 80( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 208(<input_0=%rdi),>x1=%xmm7 movdqu 208( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 336(<input_0=%rdi),>x2=%xmm8 movdqu 336( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 464(<input_0=%rdi),>x3=%xmm9 movdqu 464( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 592(<input_0=%rdi),>x4=%xmm10 movdqu 592( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 848(<input_0=%rdi),>x6=%xmm12 movdqu 848( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 976(<input_0=%rdi),>x7=%xmm13 movdqu 976( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 80 ] = x0 # asm 1: movdqu <x0=reg128#10,80(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,80(<input_0=%rdi) movdqu % xmm9, 80( % rdi) # qhasm: mem128[ input_0 + 208 ] = x1 # asm 1: movdqu <x1=reg128#14,208(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,208(<input_0=%rdi) movdqu % xmm13, 208( % rdi) # qhasm: mem128[ input_0 + 336 ] = x2 # asm 1: movdqu <x2=reg128#15,336(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,336(<input_0=%rdi) movdqu % xmm14, 336( % rdi) # qhasm: mem128[ input_0 + 464 ] = x3 # asm 1: movdqu <x3=reg128#11,464(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,464(<input_0=%rdi) movdqu % xmm10, 464( % rdi) # qhasm: mem128[ input_0 + 592 ] = x4 # asm 1: movdqu <x4=reg128#12,592(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,592(<input_0=%rdi) movdqu % xmm11, 592( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 848 ] = x6 # asm 1: movdqu <x6=reg128#13,848(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,848(<input_0=%rdi) movdqu % xmm12, 848( % rdi) # qhasm: mem128[ input_0 + 976 ] = x7 # asm 1: movdqu <x7=reg128#7,976(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,976(<input_0=%rdi) movdqu % xmm6, 976( % rdi) # qhasm: x0 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 96(<input_0=%rdi),>x0=%xmm6 movdqu 96( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 224(<input_0=%rdi),>x1=%xmm7 movdqu 224( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 352(<input_0=%rdi),>x2=%xmm8 movdqu 352( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 480(<input_0=%rdi),>x3=%xmm9 movdqu 480( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 608(<input_0=%rdi),>x4=%xmm10 movdqu 608( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 736(<input_0=%rdi),>x5=%xmm11 movdqu 736( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 992(<input_0=%rdi),>x7=%xmm13 movdqu 992( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 96 ] = x0 # asm 1: movdqu <x0=reg128#10,96(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,96(<input_0=%rdi) movdqu % xmm9, 96( % rdi) # qhasm: mem128[ input_0 + 224 ] = x1 # asm 1: movdqu <x1=reg128#14,224(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,224(<input_0=%rdi) movdqu % xmm13, 224( % rdi) # qhasm: mem128[ input_0 + 352 ] = x2 # asm 1: movdqu <x2=reg128#15,352(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,352(<input_0=%rdi) movdqu % xmm14, 352( % rdi) # qhasm: mem128[ input_0 + 480 ] = x3 # asm 1: movdqu <x3=reg128#11,480(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,480(<input_0=%rdi) movdqu % xmm10, 480( % rdi) # qhasm: mem128[ input_0 + 608 ] = x4 # asm 1: movdqu <x4=reg128#12,608(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,608(<input_0=%rdi) movdqu % xmm11, 608( % rdi) # qhasm: mem128[ input_0 + 736 ] = x5 # asm 1: movdqu <x5=reg128#9,736(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,736(<input_0=%rdi) movdqu % xmm8, 736( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 992 ] = x7 # asm 1: movdqu <x7=reg128#7,992(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,992(<input_0=%rdi) movdqu % xmm6, 992( % rdi) # qhasm: x0 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 112(<input_0=%rdi),>x0=%xmm6 movdqu 112( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 240(<input_0=%rdi),>x1=%xmm7 movdqu 240( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 368(<input_0=%rdi),>x2=%xmm8 movdqu 368( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 496(<input_0=%rdi),>x3=%xmm9 movdqu 496( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 624(<input_0=%rdi),>x4=%xmm10 movdqu 624( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 752(<input_0=%rdi),>x5=%xmm11 movdqu 752( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 880(<input_0=%rdi),>x6=%xmm12 movdqu 880( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>x3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>x3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<x3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<x7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>x5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>x5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<x1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<x0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<x3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<x5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<x4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<x7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>x6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>x6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 112 ] = x0 # asm 1: movdqu <x0=reg128#4,112(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,112(<input_0=%rdi) movdqu % xmm3, 112( % rdi) # qhasm: mem128[ input_0 + 240 ] = x1 # asm 1: movdqu <x1=reg128#8,240(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,240(<input_0=%rdi) movdqu % xmm7, 240( % rdi) # qhasm: mem128[ input_0 + 368 ] = x2 # asm 1: movdqu <x2=reg128#9,368(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,368(<input_0=%rdi) movdqu % xmm8, 368( % rdi) # qhasm: mem128[ input_0 + 496 ] = x3 # asm 1: movdqu <x3=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: mem128[ input_0 + 624 ] = x4 # asm 1: movdqu <x4=reg128#10,624(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,624(<input_0=%rdi) movdqu % xmm9, 624( % rdi) # qhasm: mem128[ input_0 + 752 ] = x5 # asm 1: movdqu <x5=reg128#3,752(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,752(<input_0=%rdi) movdqu % xmm2, 752( % rdi) # qhasm: mem128[ input_0 + 880 ] = x6 # asm 1: movdqu <x6=reg128#5,880(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,880(<input_0=%rdi) movdqu % xmm4, 880( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 16(<input_0=%rdi),>x1=%xmm7 movdqu 16( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 32(<input_0=%rdi),>x2=%xmm8 movdqu 32( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 48(<input_0=%rdi),>x3=%xmm9 movdqu 48( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 64(<input_0=%rdi),>x4=%xmm10 movdqu 64( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 80(<input_0=%rdi),>x5=%xmm11 movdqu 80( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 96(<input_0=%rdi),>x6=%xmm12 movdqu 96( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 112(<input_0=%rdi),>x7=%xmm13 movdqu 112( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 16 ] = x1 # asm 1: movdqu <x1=reg128#14,16(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,16(<input_0=%rdi) movdqu % xmm13, 16( % rdi) # qhasm: mem128[ input_0 + 32 ] = x2 # asm 1: movdqu <x2=reg128#15,32(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,32(<input_0=%rdi) movdqu % xmm14, 32( % rdi) # qhasm: mem128[ input_0 + 48 ] = x3 # asm 1: movdqu <x3=reg128#11,48(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,48(<input_0=%rdi) movdqu % xmm10, 48( % rdi) # qhasm: mem128[ input_0 + 64 ] = x4 # asm 1: movdqu <x4=reg128#12,64(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,64(<input_0=%rdi) movdqu % xmm11, 64( % rdi) # qhasm: mem128[ input_0 + 80 ] = x5 # asm 1: movdqu <x5=reg128#9,80(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,80(<input_0=%rdi) movdqu % xmm8, 80( % rdi) # qhasm: mem128[ input_0 + 96 ] = x6 # asm 1: movdqu <x6=reg128#13,96(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,96(<input_0=%rdi) movdqu % xmm12, 96( % rdi) # qhasm: mem128[ input_0 + 112 ] = x7 # asm 1: movdqu <x7=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: x0 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 128(<input_0=%rdi),>x0=%xmm6 movdqu 128( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 160(<input_0=%rdi),>x2=%xmm8 movdqu 160( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 176(<input_0=%rdi),>x3=%xmm9 movdqu 176( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 192(<input_0=%rdi),>x4=%xmm10 movdqu 192( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 208(<input_0=%rdi),>x5=%xmm11 movdqu 208( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 224(<input_0=%rdi),>x6=%xmm12 movdqu 224( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 240(<input_0=%rdi),>x7=%xmm13 movdqu 240( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 128 ] = x0 # asm 1: movdqu <x0=reg128#10,128(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,128(<input_0=%rdi) movdqu % xmm9, 128( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 160 ] = x2 # asm 1: movdqu <x2=reg128#15,160(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,160(<input_0=%rdi) movdqu % xmm14, 160( % rdi) # qhasm: mem128[ input_0 + 176 ] = x3 # asm 1: movdqu <x3=reg128#11,176(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,176(<input_0=%rdi) movdqu % xmm10, 176( % rdi) # qhasm: mem128[ input_0 + 192 ] = x4 # asm 1: movdqu <x4=reg128#12,192(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,192(<input_0=%rdi) movdqu % xmm11, 192( % rdi) # qhasm: mem128[ input_0 + 208 ] = x5 # asm 1: movdqu <x5=reg128#9,208(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,208(<input_0=%rdi) movdqu % xmm8, 208( % rdi) # qhasm: mem128[ input_0 + 224 ] = x6 # asm 1: movdqu <x6=reg128#13,224(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,224(<input_0=%rdi) movdqu % xmm12, 224( % rdi) # qhasm: mem128[ input_0 + 240 ] = x7 # asm 1: movdqu <x7=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: x0 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 256(<input_0=%rdi),>x0=%xmm6 movdqu 256( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 272(<input_0=%rdi),>x1=%xmm7 movdqu 272( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 304(<input_0=%rdi),>x3=%xmm9 movdqu 304( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 320(<input_0=%rdi),>x4=%xmm10 movdqu 320( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 336(<input_0=%rdi),>x5=%xmm11 movdqu 336( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 352(<input_0=%rdi),>x6=%xmm12 movdqu 352( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 368(<input_0=%rdi),>x7=%xmm13 movdqu 368( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 256 ] = x0 # asm 1: movdqu <x0=reg128#10,256(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,256(<input_0=%rdi) movdqu % xmm9, 256( % rdi) # qhasm: mem128[ input_0 + 272 ] = x1 # asm 1: movdqu <x1=reg128#14,272(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,272(<input_0=%rdi) movdqu % xmm13, 272( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 304 ] = x3 # asm 1: movdqu <x3=reg128#11,304(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,304(<input_0=%rdi) movdqu % xmm10, 304( % rdi) # qhasm: mem128[ input_0 + 320 ] = x4 # asm 1: movdqu <x4=reg128#12,320(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,320(<input_0=%rdi) movdqu % xmm11, 320( % rdi) # qhasm: mem128[ input_0 + 336 ] = x5 # asm 1: movdqu <x5=reg128#9,336(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,336(<input_0=%rdi) movdqu % xmm8, 336( % rdi) # qhasm: mem128[ input_0 + 352 ] = x6 # asm 1: movdqu <x6=reg128#13,352(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,352(<input_0=%rdi) movdqu % xmm12, 352( % rdi) # qhasm: mem128[ input_0 + 368 ] = x7 # asm 1: movdqu <x7=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: x0 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 384(<input_0=%rdi),>x0=%xmm6 movdqu 384( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 400(<input_0=%rdi),>x1=%xmm7 movdqu 400( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 416(<input_0=%rdi),>x2=%xmm8 movdqu 416( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 448(<input_0=%rdi),>x4=%xmm10 movdqu 448( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 464(<input_0=%rdi),>x5=%xmm11 movdqu 464( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 480(<input_0=%rdi),>x6=%xmm12 movdqu 480( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 496(<input_0=%rdi),>x7=%xmm13 movdqu 496( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 384 ] = x0 # asm 1: movdqu <x0=reg128#10,384(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,384(<input_0=%rdi) movdqu % xmm9, 384( % rdi) # qhasm: mem128[ input_0 + 400 ] = x1 # asm 1: movdqu <x1=reg128#14,400(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,400(<input_0=%rdi) movdqu % xmm13, 400( % rdi) # qhasm: mem128[ input_0 + 416 ] = x2 # asm 1: movdqu <x2=reg128#15,416(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,416(<input_0=%rdi) movdqu % xmm14, 416( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 448 ] = x4 # asm 1: movdqu <x4=reg128#12,448(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,448(<input_0=%rdi) movdqu % xmm11, 448( % rdi) # qhasm: mem128[ input_0 + 464 ] = x5 # asm 1: movdqu <x5=reg128#9,464(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,464(<input_0=%rdi) movdqu % xmm8, 464( % rdi) # qhasm: mem128[ input_0 + 480 ] = x6 # asm 1: movdqu <x6=reg128#13,480(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,480(<input_0=%rdi) movdqu % xmm12, 480( % rdi) # qhasm: mem128[ input_0 + 496 ] = x7 # asm 1: movdqu <x7=reg128#7,496(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,496(<input_0=%rdi) movdqu % xmm6, 496( % rdi) # qhasm: x0 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 512(<input_0=%rdi),>x0=%xmm6 movdqu 512( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 528(<input_0=%rdi),>x1=%xmm7 movdqu 528( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 544(<input_0=%rdi),>x2=%xmm8 movdqu 544( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 560(<input_0=%rdi),>x3=%xmm9 movdqu 560( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 592(<input_0=%rdi),>x5=%xmm11 movdqu 592( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 608(<input_0=%rdi),>x6=%xmm12 movdqu 608( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 624(<input_0=%rdi),>x7=%xmm13 movdqu 624( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 512 ] = x0 # asm 1: movdqu <x0=reg128#10,512(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,512(<input_0=%rdi) movdqu % xmm9, 512( % rdi) # qhasm: mem128[ input_0 + 528 ] = x1 # asm 1: movdqu <x1=reg128#14,528(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,528(<input_0=%rdi) movdqu % xmm13, 528( % rdi) # qhasm: mem128[ input_0 + 544 ] = x2 # asm 1: movdqu <x2=reg128#15,544(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,544(<input_0=%rdi) movdqu % xmm14, 544( % rdi) # qhasm: mem128[ input_0 + 560 ] = x3 # asm 1: movdqu <x3=reg128#11,560(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,560(<input_0=%rdi) movdqu % xmm10, 560( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 592 ] = x5 # asm 1: movdqu <x5=reg128#9,592(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,592(<input_0=%rdi) movdqu % xmm8, 592( % rdi) # qhasm: mem128[ input_0 + 608 ] = x6 # asm 1: movdqu <x6=reg128#13,608(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,608(<input_0=%rdi) movdqu % xmm12, 608( % rdi) # qhasm: mem128[ input_0 + 624 ] = x7 # asm 1: movdqu <x7=reg128#7,624(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,624(<input_0=%rdi) movdqu % xmm6, 624( % rdi) # qhasm: x0 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 640(<input_0=%rdi),>x0=%xmm6 movdqu 640( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 656(<input_0=%rdi),>x1=%xmm7 movdqu 656( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 672(<input_0=%rdi),>x2=%xmm8 movdqu 672( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 688(<input_0=%rdi),>x3=%xmm9 movdqu 688( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 704(<input_0=%rdi),>x4=%xmm10 movdqu 704( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 736(<input_0=%rdi),>x6=%xmm12 movdqu 736( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 752(<input_0=%rdi),>x7=%xmm13 movdqu 752( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 640 ] = x0 # asm 1: movdqu <x0=reg128#10,640(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,640(<input_0=%rdi) movdqu % xmm9, 640( % rdi) # qhasm: mem128[ input_0 + 656 ] = x1 # asm 1: movdqu <x1=reg128#14,656(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,656(<input_0=%rdi) movdqu % xmm13, 656( % rdi) # qhasm: mem128[ input_0 + 672 ] = x2 # asm 1: movdqu <x2=reg128#15,672(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,672(<input_0=%rdi) movdqu % xmm14, 672( % rdi) # qhasm: mem128[ input_0 + 688 ] = x3 # asm 1: movdqu <x3=reg128#11,688(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,688(<input_0=%rdi) movdqu % xmm10, 688( % rdi) # qhasm: mem128[ input_0 + 704 ] = x4 # asm 1: movdqu <x4=reg128#12,704(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,704(<input_0=%rdi) movdqu % xmm11, 704( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 736 ] = x6 # asm 1: movdqu <x6=reg128#13,736(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,736(<input_0=%rdi) movdqu % xmm12, 736( % rdi) # qhasm: mem128[ input_0 + 752 ] = x7 # asm 1: movdqu <x7=reg128#7,752(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,752(<input_0=%rdi) movdqu % xmm6, 752( % rdi) # qhasm: x0 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 768(<input_0=%rdi),>x0=%xmm6 movdqu 768( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 784(<input_0=%rdi),>x1=%xmm7 movdqu 784( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 800(<input_0=%rdi),>x2=%xmm8 movdqu 800( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 816(<input_0=%rdi),>x3=%xmm9 movdqu 816( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 832(<input_0=%rdi),>x4=%xmm10 movdqu 832( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 848(<input_0=%rdi),>x5=%xmm11 movdqu 848( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 880(<input_0=%rdi),>x7=%xmm13 movdqu 880( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 768 ] = x0 # asm 1: movdqu <x0=reg128#10,768(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,768(<input_0=%rdi) movdqu % xmm9, 768( % rdi) # qhasm: mem128[ input_0 + 784 ] = x1 # asm 1: movdqu <x1=reg128#14,784(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,784(<input_0=%rdi) movdqu % xmm13, 784( % rdi) # qhasm: mem128[ input_0 + 800 ] = x2 # asm 1: movdqu <x2=reg128#15,800(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,800(<input_0=%rdi) movdqu % xmm14, 800( % rdi) # qhasm: mem128[ input_0 + 816 ] = x3 # asm 1: movdqu <x3=reg128#11,816(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,816(<input_0=%rdi) movdqu % xmm10, 816( % rdi) # qhasm: mem128[ input_0 + 832 ] = x4 # asm 1: movdqu <x4=reg128#12,832(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,832(<input_0=%rdi) movdqu % xmm11, 832( % rdi) # qhasm: mem128[ input_0 + 848 ] = x5 # asm 1: movdqu <x5=reg128#9,848(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,848(<input_0=%rdi) movdqu % xmm8, 848( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 880 ] = x7 # asm 1: movdqu <x7=reg128#7,880(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,880(<input_0=%rdi) movdqu % xmm6, 880( % rdi) # qhasm: x0 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 896(<input_0=%rdi),>x0=%xmm6 movdqu 896( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 912(<input_0=%rdi),>x1=%xmm7 movdqu 912( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 928(<input_0=%rdi),>x2=%xmm8 movdqu 928( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 944(<input_0=%rdi),>x3=%xmm9 movdqu 944( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 960(<input_0=%rdi),>x4=%xmm10 movdqu 960( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 976(<input_0=%rdi),>x5=%xmm11 movdqu 976( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 992(<input_0=%rdi),>x6=%xmm12 movdqu 992( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>x3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>x3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<x7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>x5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>x5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<x1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<x0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<x3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<x5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<x4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<x7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>x6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>x6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 896 ] = x0 # asm 1: movdqu <x0=reg128#4,896(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,896(<input_0=%rdi) movdqu % xmm3, 896( % rdi) # qhasm: mem128[ input_0 + 912 ] = x1 # asm 1: movdqu <x1=reg128#8,912(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,912(<input_0=%rdi) movdqu % xmm7, 912( % rdi) # qhasm: mem128[ input_0 + 928 ] = x2 # asm 1: movdqu <x2=reg128#9,928(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,928(<input_0=%rdi) movdqu % xmm8, 928( % rdi) # qhasm: mem128[ input_0 + 944 ] = x3 # asm 1: movdqu <x3=reg128#1,944(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,944(<input_0=%rdi) movdqu % xmm0, 944( % rdi) # qhasm: mem128[ input_0 + 960 ] = x4 # asm 1: movdqu <x4=reg128#10,960(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,960(<input_0=%rdi) movdqu % xmm9, 960( % rdi) # qhasm: mem128[ input_0 + 976 ] = x5 # asm 1: movdqu <x5=reg128#3,976(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,976(<input_0=%rdi) movdqu % xmm2, 976( % rdi) # qhasm: mem128[ input_0 + 992 ] = x6 # asm 1: movdqu <x6=reg128#5,992(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,992(<input_0=%rdi) movdqu % xmm4, 992( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
69,549
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119f/avx2/vec256_mul_asm.S
#include "namespace.h" #define vec256_mul_asm CRYPTO_NAMESPACE(vec256_mul_asm) #define _vec256_mul_asm _CRYPTO_NAMESPACE(vec256_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_mul_asm .p2align 5 .global _vec256_mul_asm .global vec256_mul_asm _vec256_mul_asm: vec256_mul_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#3,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm2,384(<input_0=%rdi) vmovupd % ymm2, 384( % rdi) # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#2,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm1,352(<input_0=%rdi) vmovupd % ymm1, 352( % rdi) # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#13,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm12,288(<input_0=%rdi) vmovupd % ymm12, 288( % rdi) # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#12,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm11,256(<input_0=%rdi) vmovupd % ymm11, 256( % rdi) # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#11,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm10,224(<input_0=%rdi) vmovupd % ymm10, 224( % rdi) # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#8,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm7,128(<input_0=%rdi) vmovupd % ymm7, 128( % rdi) # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#7,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm6,96(<input_0=%rdi) vmovupd % ymm6, 96( % rdi) # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#6,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm5,64(<input_0=%rdi) vmovupd % ymm5, 64( % rdi) # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#5,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm4,32(<input_0=%rdi) vmovupd % ymm4, 32( % rdi) # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#4,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm3,0(<input_0=%rdi) vmovupd % ymm3, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
264,233
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119f/avx2/transpose_64x256_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x256_sp_asm CRYPTO_NAMESPACE(transpose_64x256_sp_asm) #define _transpose_64x256_sp_asm _CRYPTO_NAMESPACE(transpose_64x256_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 x0 # qhasm: reg256 x1 # qhasm: reg256 x2 # qhasm: reg256 x3 # qhasm: reg256 x4 # qhasm: reg256 x5 # qhasm: reg256 x6 # qhasm: reg256 x7 # qhasm: reg256 t0 # qhasm: reg256 t1 # qhasm: reg256 v00 # qhasm: reg256 v01 # qhasm: reg256 v10 # qhasm: reg256 v11 # qhasm: reg256 mask0 # qhasm: reg256 mask1 # qhasm: reg256 mask2 # qhasm: reg256 mask3 # qhasm: reg256 mask4 # qhasm: reg256 mask5 # qhasm: enter transpose_64x256_sp_asm .p2align 5 .global _transpose_64x256_sp_asm .global transpose_64x256_sp_asm _transpose_64x256_sp_asm: transpose_64x256_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem256[ MASK5_0 ] # asm 1: vmovapd MASK5_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK5_0(%rip),>mask0=%ymm0 vmovapd MASK5_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK5_1 ] # asm 1: vmovapd MASK5_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK5_1(%rip),>mask1=%ymm1 vmovapd MASK5_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK4_0 ] # asm 1: vmovapd MASK4_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK4_0(%rip),>mask2=%ymm2 vmovapd MASK4_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK4_1 ] # asm 1: vmovapd MASK4_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK4_1(%rip),>mask3=%ymm3 vmovapd MASK4_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK3_0 ] # asm 1: vmovapd MASK3_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK3_0(%rip),>mask4=%ymm4 vmovapd MASK3_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK3_1 ] # asm 1: vmovapd MASK3_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK3_1(%rip),>mask5=%ymm5 vmovapd MASK3_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 256(<input_0=%rdi),>x1=%ymm7 vmovupd 256( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 512(<input_0=%rdi),>x2=%ymm8 vmovupd 512( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 768(<input_0=%rdi),>x3=%ymm9 vmovupd 768( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1024(<input_0=%rdi),>x4=%ymm10 vmovupd 1024( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1280(<input_0=%rdi),>x5=%ymm11 vmovupd 1280( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1536(<input_0=%rdi),>x6=%ymm12 vmovupd 1536( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1792(<input_0=%rdi),>x7=%ymm13 vmovupd 1792( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 256 ] = x1 # asm 1: vmovupd <x1=reg256#14,256(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,256(<input_0=%rdi) vmovupd % ymm13, 256( % rdi) # qhasm: mem256[ input_0 + 512 ] = x2 # asm 1: vmovupd <x2=reg256#15,512(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,512(<input_0=%rdi) vmovupd % ymm14, 512( % rdi) # qhasm: mem256[ input_0 + 768 ] = x3 # asm 1: vmovupd <x3=reg256#11,768(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,768(<input_0=%rdi) vmovupd % ymm10, 768( % rdi) # qhasm: mem256[ input_0 + 1024 ] = x4 # asm 1: vmovupd <x4=reg256#12,1024(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1024(<input_0=%rdi) vmovupd % ymm11, 1024( % rdi) # qhasm: mem256[ input_0 + 1280 ] = x5 # asm 1: vmovupd <x5=reg256#9,1280(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1280(<input_0=%rdi) vmovupd % ymm8, 1280( % rdi) # qhasm: mem256[ input_0 + 1536 ] = x6 # asm 1: vmovupd <x6=reg256#13,1536(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1536(<input_0=%rdi) vmovupd % ymm12, 1536( % rdi) # qhasm: mem256[ input_0 + 1792 ] = x7 # asm 1: vmovupd <x7=reg256#7,1792(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1792(<input_0=%rdi) vmovupd % ymm6, 1792( % rdi) # qhasm: x0 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 32(<input_0=%rdi),>x0=%ymm6 vmovupd 32( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 544(<input_0=%rdi),>x2=%ymm8 vmovupd 544( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 800(<input_0=%rdi),>x3=%ymm9 vmovupd 800( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1056(<input_0=%rdi),>x4=%ymm10 vmovupd 1056( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1312(<input_0=%rdi),>x5=%ymm11 vmovupd 1312( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1568(<input_0=%rdi),>x6=%ymm12 vmovupd 1568( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1824(<input_0=%rdi),>x7=%ymm13 vmovupd 1824( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 32 ] = x0 # asm 1: vmovupd <x0=reg256#10,32(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,32(<input_0=%rdi) vmovupd % ymm9, 32( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 544 ] = x2 # asm 1: vmovupd <x2=reg256#15,544(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,544(<input_0=%rdi) vmovupd % ymm14, 544( % rdi) # qhasm: mem256[ input_0 + 800 ] = x3 # asm 1: vmovupd <x3=reg256#11,800(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,800(<input_0=%rdi) vmovupd % ymm10, 800( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x4 # asm 1: vmovupd <x4=reg256#12,1056(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1056(<input_0=%rdi) vmovupd % ymm11, 1056( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x5 # asm 1: vmovupd <x5=reg256#9,1312(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1312(<input_0=%rdi) vmovupd % ymm8, 1312( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x6 # asm 1: vmovupd <x6=reg256#13,1568(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1568(<input_0=%rdi) vmovupd % ymm12, 1568( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x7 # asm 1: vmovupd <x7=reg256#7,1824(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1824(<input_0=%rdi) vmovupd % ymm6, 1824( % rdi) # qhasm: x0 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 64(<input_0=%rdi),>x0=%ymm6 vmovupd 64( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 320(<input_0=%rdi),>x1=%ymm7 vmovupd 320( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 832(<input_0=%rdi),>x3=%ymm9 vmovupd 832( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1088(<input_0=%rdi),>x4=%ymm10 vmovupd 1088( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1344(<input_0=%rdi),>x5=%ymm11 vmovupd 1344( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1600(<input_0=%rdi),>x6=%ymm12 vmovupd 1600( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1856(<input_0=%rdi),>x7=%ymm13 vmovupd 1856( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 64 ] = x0 # asm 1: vmovupd <x0=reg256#10,64(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,64(<input_0=%rdi) vmovupd % ymm9, 64( % rdi) # qhasm: mem256[ input_0 + 320 ] = x1 # asm 1: vmovupd <x1=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 832 ] = x3 # asm 1: vmovupd <x3=reg256#11,832(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,832(<input_0=%rdi) vmovupd % ymm10, 832( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x4 # asm 1: vmovupd <x4=reg256#12,1088(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1088(<input_0=%rdi) vmovupd % ymm11, 1088( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x5 # asm 1: vmovupd <x5=reg256#9,1344(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1344(<input_0=%rdi) vmovupd % ymm8, 1344( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x6 # asm 1: vmovupd <x6=reg256#13,1600(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1600(<input_0=%rdi) vmovupd % ymm12, 1600( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x7 # asm 1: vmovupd <x7=reg256#7,1856(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1856(<input_0=%rdi) vmovupd % ymm6, 1856( % rdi) # qhasm: x0 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 96(<input_0=%rdi),>x0=%ymm6 vmovupd 96( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 352(<input_0=%rdi),>x1=%ymm7 vmovupd 352( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 608(<input_0=%rdi),>x2=%ymm8 vmovupd 608( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1120(<input_0=%rdi),>x4=%ymm10 vmovupd 1120( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1376(<input_0=%rdi),>x5=%ymm11 vmovupd 1376( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1632(<input_0=%rdi),>x6=%ymm12 vmovupd 1632( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1888(<input_0=%rdi),>x7=%ymm13 vmovupd 1888( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 96 ] = x0 # asm 1: vmovupd <x0=reg256#10,96(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,96(<input_0=%rdi) vmovupd % ymm9, 96( % rdi) # qhasm: mem256[ input_0 + 352 ] = x1 # asm 1: vmovupd <x1=reg256#14,352(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,352(<input_0=%rdi) vmovupd % ymm13, 352( % rdi) # qhasm: mem256[ input_0 + 608 ] = x2 # asm 1: vmovupd <x2=reg256#15,608(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,608(<input_0=%rdi) vmovupd % ymm14, 608( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x4 # asm 1: vmovupd <x4=reg256#12,1120(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1120(<input_0=%rdi) vmovupd % ymm11, 1120( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x5 # asm 1: vmovupd <x5=reg256#9,1376(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1376(<input_0=%rdi) vmovupd % ymm8, 1376( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x6 # asm 1: vmovupd <x6=reg256#13,1632(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1632(<input_0=%rdi) vmovupd % ymm12, 1632( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x7 # asm 1: vmovupd <x7=reg256#7,1888(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1888(<input_0=%rdi) vmovupd % ymm6, 1888( % rdi) # qhasm: x0 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 128(<input_0=%rdi),>x0=%ymm6 vmovupd 128( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 384(<input_0=%rdi),>x1=%ymm7 vmovupd 384( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 640(<input_0=%rdi),>x2=%ymm8 vmovupd 640( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 896(<input_0=%rdi),>x3=%ymm9 vmovupd 896( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1408(<input_0=%rdi),>x5=%ymm11 vmovupd 1408( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1664(<input_0=%rdi),>x6=%ymm12 vmovupd 1664( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1920(<input_0=%rdi),>x7=%ymm13 vmovupd 1920( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 128 ] = x0 # asm 1: vmovupd <x0=reg256#10,128(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,128(<input_0=%rdi) vmovupd % ymm9, 128( % rdi) # qhasm: mem256[ input_0 + 384 ] = x1 # asm 1: vmovupd <x1=reg256#14,384(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,384(<input_0=%rdi) vmovupd % ymm13, 384( % rdi) # qhasm: mem256[ input_0 + 640 ] = x2 # asm 1: vmovupd <x2=reg256#15,640(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,640(<input_0=%rdi) vmovupd % ymm14, 640( % rdi) # qhasm: mem256[ input_0 + 896 ] = x3 # asm 1: vmovupd <x3=reg256#11,896(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,896(<input_0=%rdi) vmovupd % ymm10, 896( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x5 # asm 1: vmovupd <x5=reg256#9,1408(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1408(<input_0=%rdi) vmovupd % ymm8, 1408( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x6 # asm 1: vmovupd <x6=reg256#13,1664(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1664(<input_0=%rdi) vmovupd % ymm12, 1664( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x7 # asm 1: vmovupd <x7=reg256#7,1920(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1920(<input_0=%rdi) vmovupd % ymm6, 1920( % rdi) # qhasm: x0 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 160(<input_0=%rdi),>x0=%ymm6 vmovupd 160( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 416(<input_0=%rdi),>x1=%ymm7 vmovupd 416( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 672(<input_0=%rdi),>x2=%ymm8 vmovupd 672( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 928(<input_0=%rdi),>x3=%ymm9 vmovupd 928( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1184(<input_0=%rdi),>x4=%ymm10 vmovupd 1184( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1696(<input_0=%rdi),>x6=%ymm12 vmovupd 1696( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1952(<input_0=%rdi),>x7=%ymm13 vmovupd 1952( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 160 ] = x0 # asm 1: vmovupd <x0=reg256#10,160(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,160(<input_0=%rdi) vmovupd % ymm9, 160( % rdi) # qhasm: mem256[ input_0 + 416 ] = x1 # asm 1: vmovupd <x1=reg256#14,416(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,416(<input_0=%rdi) vmovupd % ymm13, 416( % rdi) # qhasm: mem256[ input_0 + 672 ] = x2 # asm 1: vmovupd <x2=reg256#15,672(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,672(<input_0=%rdi) vmovupd % ymm14, 672( % rdi) # qhasm: mem256[ input_0 + 928 ] = x3 # asm 1: vmovupd <x3=reg256#11,928(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,928(<input_0=%rdi) vmovupd % ymm10, 928( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x4 # asm 1: vmovupd <x4=reg256#12,1184(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1184(<input_0=%rdi) vmovupd % ymm11, 1184( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x6 # asm 1: vmovupd <x6=reg256#13,1696(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1696(<input_0=%rdi) vmovupd % ymm12, 1696( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x7 # asm 1: vmovupd <x7=reg256#7,1952(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1952(<input_0=%rdi) vmovupd % ymm6, 1952( % rdi) # qhasm: x0 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 192(<input_0=%rdi),>x0=%ymm6 vmovupd 192( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 448(<input_0=%rdi),>x1=%ymm7 vmovupd 448( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 704(<input_0=%rdi),>x2=%ymm8 vmovupd 704( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 960(<input_0=%rdi),>x3=%ymm9 vmovupd 960( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1216(<input_0=%rdi),>x4=%ymm10 vmovupd 1216( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1472(<input_0=%rdi),>x5=%ymm11 vmovupd 1472( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1984(<input_0=%rdi),>x7=%ymm13 vmovupd 1984( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 192 ] = x0 # asm 1: vmovupd <x0=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 448 ] = x1 # asm 1: vmovupd <x1=reg256#14,448(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,448(<input_0=%rdi) vmovupd % ymm13, 448( % rdi) # qhasm: mem256[ input_0 + 704 ] = x2 # asm 1: vmovupd <x2=reg256#15,704(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,704(<input_0=%rdi) vmovupd % ymm14, 704( % rdi) # qhasm: mem256[ input_0 + 960 ] = x3 # asm 1: vmovupd <x3=reg256#11,960(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,960(<input_0=%rdi) vmovupd % ymm10, 960( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x4 # asm 1: vmovupd <x4=reg256#12,1216(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1216(<input_0=%rdi) vmovupd % ymm11, 1216( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x5 # asm 1: vmovupd <x5=reg256#9,1472(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1472(<input_0=%rdi) vmovupd % ymm8, 1472( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x7 # asm 1: vmovupd <x7=reg256#7,1984(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1984(<input_0=%rdi) vmovupd % ymm6, 1984( % rdi) # qhasm: x0 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 224(<input_0=%rdi),>x0=%ymm6 vmovupd 224( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 480(<input_0=%rdi),>x1=%ymm7 vmovupd 480( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 736(<input_0=%rdi),>x2=%ymm8 vmovupd 736( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 992(<input_0=%rdi),>x3=%ymm9 vmovupd 992( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1248(<input_0=%rdi),>x4=%ymm10 vmovupd 1248( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1504(<input_0=%rdi),>x5=%ymm11 vmovupd 1504( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1760(<input_0=%rdi),>x6=%ymm12 vmovupd 1760( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0 vpand % ymm9, % ymm0, % ymm0 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12 vpsllq $32, % ymm13, % ymm12 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#1,<v10=reg256#13,>x3=reg256#1 # asm 2: vpor <v00=%ymm0,<v10=%ymm12,>x3=%ymm0 vpor % ymm0, % ymm12, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12 vpslld $16, % ymm11, % ymm12 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13 vpsrld $16, % ymm14, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14 # asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13 vpslld $16, % ymm0, % ymm13 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13 vpslld $16, % ymm8, % ymm13 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2 vpand % ymm7, % ymm2, % ymm2 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9 # asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8 vpslld $16, % ymm1, % ymm8 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#3,<v10=reg256#9,>x5=reg256#3 # asm 2: vpor <v00=%ymm2,<v10=%ymm8,>x5=%ymm2 vpor % ymm2, % ymm8, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8 # asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7 vpsllw $8, % ymm12, % ymm7 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9 # asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8 vpsrlw $8, % ymm9, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10 # asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9 vpsllw $8, % ymm0, % ymm9 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12 # asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11 vpsllw $8, % ymm2, % ymm11 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11 # asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10 vpsrlw $8, % ymm10, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4 vpand % ymm6, % ymm4, % ymm4 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11 # asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10 vpsllw $8, % ymm1, % ymm10 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#5,<v10=reg256#11,>x6=reg256#5 # asm 2: vpor <v00=%ymm4,<v10=%ymm10,>x6=%ymm4 vpor % ymm4, % ymm10, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 224 ] = x0 # asm 1: vmovupd <x0=reg256#4,224(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,224(<input_0=%rdi) vmovupd % ymm3, 224( % rdi) # qhasm: mem256[ input_0 + 480 ] = x1 # asm 1: vmovupd <x1=reg256#8,480(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,480(<input_0=%rdi) vmovupd % ymm7, 480( % rdi) # qhasm: mem256[ input_0 + 736 ] = x2 # asm 1: vmovupd <x2=reg256#9,736(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,736(<input_0=%rdi) vmovupd % ymm8, 736( % rdi) # qhasm: mem256[ input_0 + 992 ] = x3 # asm 1: vmovupd <x3=reg256#1,992(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,992(<input_0=%rdi) vmovupd % ymm0, 992( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x4 # asm 1: vmovupd <x4=reg256#10,1248(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1248(<input_0=%rdi) vmovupd % ymm9, 1248( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x5 # asm 1: vmovupd <x5=reg256#3,1504(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1504(<input_0=%rdi) vmovupd % ymm2, 1504( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x6 # asm 1: vmovupd <x6=reg256#5,1760(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1760(<input_0=%rdi) vmovupd % ymm4, 1760( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: mask0 aligned= mem256[ MASK2_0 ] # asm 1: vmovapd MASK2_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK2_0(%rip),>mask0=%ymm0 vmovapd MASK2_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK2_1 ] # asm 1: vmovapd MASK2_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK2_1(%rip),>mask1=%ymm1 vmovapd MASK2_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK1_0 ] # asm 1: vmovapd MASK1_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK1_0(%rip),>mask2=%ymm2 vmovapd MASK1_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK1_1 ] # asm 1: vmovapd MASK1_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK1_1(%rip),>mask3=%ymm3 vmovapd MASK1_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK0_0 ] # asm 1: vmovapd MASK0_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK0_0(%rip),>mask4=%ymm4 vmovapd MASK0_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK0_1 ] # asm 1: vmovapd MASK0_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK0_1(%rip),>mask5=%ymm5 vmovapd MASK0_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 32(<input_0=%rdi),>x1=%ymm7 vmovupd 32( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 64(<input_0=%rdi),>x2=%ymm8 vmovupd 64( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 96(<input_0=%rdi),>x3=%ymm9 vmovupd 96( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 128(<input_0=%rdi),>x4=%ymm10 vmovupd 128( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 160(<input_0=%rdi),>x5=%ymm11 vmovupd 160( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 192(<input_0=%rdi),>x6=%ymm12 vmovupd 192( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 224(<input_0=%rdi),>x7=%ymm13 vmovupd 224( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 32 ] = x1 # asm 1: vmovupd <x1=reg256#14,32(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,32(<input_0=%rdi) vmovupd % ymm13, 32( % rdi) # qhasm: mem256[ input_0 + 64 ] = x2 # asm 1: vmovupd <x2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: mem256[ input_0 + 96 ] = x3 # asm 1: vmovupd <x3=reg256#11,96(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,96(<input_0=%rdi) vmovupd % ymm10, 96( % rdi) # qhasm: mem256[ input_0 + 128 ] = x4 # asm 1: vmovupd <x4=reg256#12,128(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,128(<input_0=%rdi) vmovupd % ymm11, 128( % rdi) # qhasm: mem256[ input_0 + 160 ] = x5 # asm 1: vmovupd <x5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 192 ] = x6 # asm 1: vmovupd <x6=reg256#13,192(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,192(<input_0=%rdi) vmovupd % ymm12, 192( % rdi) # qhasm: mem256[ input_0 + 224 ] = x7 # asm 1: vmovupd <x7=reg256#7,224(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,224(<input_0=%rdi) vmovupd % ymm6, 224( % rdi) # qhasm: x0 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 256(<input_0=%rdi),>x0=%ymm6 vmovupd 256( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 320(<input_0=%rdi),>x2=%ymm8 vmovupd 320( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 352(<input_0=%rdi),>x3=%ymm9 vmovupd 352( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 384(<input_0=%rdi),>x4=%ymm10 vmovupd 384( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 416(<input_0=%rdi),>x5=%ymm11 vmovupd 416( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 448(<input_0=%rdi),>x6=%ymm12 vmovupd 448( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 480(<input_0=%rdi),>x7=%ymm13 vmovupd 480( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 256 ] = x0 # asm 1: vmovupd <x0=reg256#10,256(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,256(<input_0=%rdi) vmovupd % ymm9, 256( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 320 ] = x2 # asm 1: vmovupd <x2=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: mem256[ input_0 + 352 ] = x3 # asm 1: vmovupd <x3=reg256#11,352(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,352(<input_0=%rdi) vmovupd % ymm10, 352( % rdi) # qhasm: mem256[ input_0 + 384 ] = x4 # asm 1: vmovupd <x4=reg256#12,384(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,384(<input_0=%rdi) vmovupd % ymm11, 384( % rdi) # qhasm: mem256[ input_0 + 416 ] = x5 # asm 1: vmovupd <x5=reg256#9,416(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,416(<input_0=%rdi) vmovupd % ymm8, 416( % rdi) # qhasm: mem256[ input_0 + 448 ] = x6 # asm 1: vmovupd <x6=reg256#13,448(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,448(<input_0=%rdi) vmovupd % ymm12, 448( % rdi) # qhasm: mem256[ input_0 + 480 ] = x7 # asm 1: vmovupd <x7=reg256#7,480(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,480(<input_0=%rdi) vmovupd % ymm6, 480( % rdi) # qhasm: x0 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 512(<input_0=%rdi),>x0=%ymm6 vmovupd 512( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 544(<input_0=%rdi),>x1=%ymm7 vmovupd 544( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 608(<input_0=%rdi),>x3=%ymm9 vmovupd 608( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 640(<input_0=%rdi),>x4=%ymm10 vmovupd 640( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 672(<input_0=%rdi),>x5=%ymm11 vmovupd 672( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 704(<input_0=%rdi),>x6=%ymm12 vmovupd 704( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 736(<input_0=%rdi),>x7=%ymm13 vmovupd 736( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 512 ] = x0 # asm 1: vmovupd <x0=reg256#10,512(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,512(<input_0=%rdi) vmovupd % ymm9, 512( % rdi) # qhasm: mem256[ input_0 + 544 ] = x1 # asm 1: vmovupd <x1=reg256#14,544(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,544(<input_0=%rdi) vmovupd % ymm13, 544( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 608 ] = x3 # asm 1: vmovupd <x3=reg256#11,608(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,608(<input_0=%rdi) vmovupd % ymm10, 608( % rdi) # qhasm: mem256[ input_0 + 640 ] = x4 # asm 1: vmovupd <x4=reg256#12,640(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,640(<input_0=%rdi) vmovupd % ymm11, 640( % rdi) # qhasm: mem256[ input_0 + 672 ] = x5 # asm 1: vmovupd <x5=reg256#9,672(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,672(<input_0=%rdi) vmovupd % ymm8, 672( % rdi) # qhasm: mem256[ input_0 + 704 ] = x6 # asm 1: vmovupd <x6=reg256#13,704(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,704(<input_0=%rdi) vmovupd % ymm12, 704( % rdi) # qhasm: mem256[ input_0 + 736 ] = x7 # asm 1: vmovupd <x7=reg256#7,736(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,736(<input_0=%rdi) vmovupd % ymm6, 736( % rdi) # qhasm: x0 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 768(<input_0=%rdi),>x0=%ymm6 vmovupd 768( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 800(<input_0=%rdi),>x1=%ymm7 vmovupd 800( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 832(<input_0=%rdi),>x2=%ymm8 vmovupd 832( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 896(<input_0=%rdi),>x4=%ymm10 vmovupd 896( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 928(<input_0=%rdi),>x5=%ymm11 vmovupd 928( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 960(<input_0=%rdi),>x6=%ymm12 vmovupd 960( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 992(<input_0=%rdi),>x7=%ymm13 vmovupd 992( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 768 ] = x0 # asm 1: vmovupd <x0=reg256#10,768(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,768(<input_0=%rdi) vmovupd % ymm9, 768( % rdi) # qhasm: mem256[ input_0 + 800 ] = x1 # asm 1: vmovupd <x1=reg256#14,800(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,800(<input_0=%rdi) vmovupd % ymm13, 800( % rdi) # qhasm: mem256[ input_0 + 832 ] = x2 # asm 1: vmovupd <x2=reg256#15,832(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,832(<input_0=%rdi) vmovupd % ymm14, 832( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 896 ] = x4 # asm 1: vmovupd <x4=reg256#12,896(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,896(<input_0=%rdi) vmovupd % ymm11, 896( % rdi) # qhasm: mem256[ input_0 + 928 ] = x5 # asm 1: vmovupd <x5=reg256#9,928(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,928(<input_0=%rdi) vmovupd % ymm8, 928( % rdi) # qhasm: mem256[ input_0 + 960 ] = x6 # asm 1: vmovupd <x6=reg256#13,960(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,960(<input_0=%rdi) vmovupd % ymm12, 960( % rdi) # qhasm: mem256[ input_0 + 992 ] = x7 # asm 1: vmovupd <x7=reg256#7,992(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,992(<input_0=%rdi) vmovupd % ymm6, 992( % rdi) # qhasm: x0 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1024(<input_0=%rdi),>x0=%ymm6 vmovupd 1024( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1056(<input_0=%rdi),>x1=%ymm7 vmovupd 1056( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1088(<input_0=%rdi),>x2=%ymm8 vmovupd 1088( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1120(<input_0=%rdi),>x3=%ymm9 vmovupd 1120( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1184(<input_0=%rdi),>x5=%ymm11 vmovupd 1184( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1216(<input_0=%rdi),>x6=%ymm12 vmovupd 1216( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1248(<input_0=%rdi),>x7=%ymm13 vmovupd 1248( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1024 ] = x0 # asm 1: vmovupd <x0=reg256#10,1024(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1024(<input_0=%rdi) vmovupd % ymm9, 1024( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x1 # asm 1: vmovupd <x1=reg256#14,1056(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1056(<input_0=%rdi) vmovupd % ymm13, 1056( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x2 # asm 1: vmovupd <x2=reg256#15,1088(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1088(<input_0=%rdi) vmovupd % ymm14, 1088( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x3 # asm 1: vmovupd <x3=reg256#11,1120(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1120(<input_0=%rdi) vmovupd % ymm10, 1120( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x5 # asm 1: vmovupd <x5=reg256#9,1184(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1184(<input_0=%rdi) vmovupd % ymm8, 1184( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x6 # asm 1: vmovupd <x6=reg256#13,1216(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1216(<input_0=%rdi) vmovupd % ymm12, 1216( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x7 # asm 1: vmovupd <x7=reg256#7,1248(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1248(<input_0=%rdi) vmovupd % ymm6, 1248( % rdi) # qhasm: x0 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1280(<input_0=%rdi),>x0=%ymm6 vmovupd 1280( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1312(<input_0=%rdi),>x1=%ymm7 vmovupd 1312( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1344(<input_0=%rdi),>x2=%ymm8 vmovupd 1344( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1376(<input_0=%rdi),>x3=%ymm9 vmovupd 1376( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1408(<input_0=%rdi),>x4=%ymm10 vmovupd 1408( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1472(<input_0=%rdi),>x6=%ymm12 vmovupd 1472( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1504(<input_0=%rdi),>x7=%ymm13 vmovupd 1504( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1280 ] = x0 # asm 1: vmovupd <x0=reg256#10,1280(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1280(<input_0=%rdi) vmovupd % ymm9, 1280( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x1 # asm 1: vmovupd <x1=reg256#14,1312(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1312(<input_0=%rdi) vmovupd % ymm13, 1312( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x2 # asm 1: vmovupd <x2=reg256#15,1344(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1344(<input_0=%rdi) vmovupd % ymm14, 1344( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x3 # asm 1: vmovupd <x3=reg256#11,1376(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1376(<input_0=%rdi) vmovupd % ymm10, 1376( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x4 # asm 1: vmovupd <x4=reg256#12,1408(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1408(<input_0=%rdi) vmovupd % ymm11, 1408( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x6 # asm 1: vmovupd <x6=reg256#13,1472(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1472(<input_0=%rdi) vmovupd % ymm12, 1472( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x7 # asm 1: vmovupd <x7=reg256#7,1504(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1504(<input_0=%rdi) vmovupd % ymm6, 1504( % rdi) # qhasm: x0 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1536(<input_0=%rdi),>x0=%ymm6 vmovupd 1536( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1568(<input_0=%rdi),>x1=%ymm7 vmovupd 1568( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1600(<input_0=%rdi),>x2=%ymm8 vmovupd 1600( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1632(<input_0=%rdi),>x3=%ymm9 vmovupd 1632( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1664(<input_0=%rdi),>x4=%ymm10 vmovupd 1664( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1696(<input_0=%rdi),>x5=%ymm11 vmovupd 1696( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1760(<input_0=%rdi),>x7=%ymm13 vmovupd 1760( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1536 ] = x0 # asm 1: vmovupd <x0=reg256#10,1536(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1536(<input_0=%rdi) vmovupd % ymm9, 1536( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x1 # asm 1: vmovupd <x1=reg256#14,1568(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1568(<input_0=%rdi) vmovupd % ymm13, 1568( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x2 # asm 1: vmovupd <x2=reg256#15,1600(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1600(<input_0=%rdi) vmovupd % ymm14, 1600( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x3 # asm 1: vmovupd <x3=reg256#11,1632(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1632(<input_0=%rdi) vmovupd % ymm10, 1632( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x4 # asm 1: vmovupd <x4=reg256#12,1664(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1664(<input_0=%rdi) vmovupd % ymm11, 1664( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x5 # asm 1: vmovupd <x5=reg256#9,1696(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1696(<input_0=%rdi) vmovupd % ymm8, 1696( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x7 # asm 1: vmovupd <x7=reg256#7,1760(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1760(<input_0=%rdi) vmovupd % ymm6, 1760( % rdi) # qhasm: x0 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1792(<input_0=%rdi),>x0=%ymm6 vmovupd 1792( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1824(<input_0=%rdi),>x1=%ymm7 vmovupd 1824( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1856(<input_0=%rdi),>x2=%ymm8 vmovupd 1856( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1888(<input_0=%rdi),>x3=%ymm9 vmovupd 1888( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1920(<input_0=%rdi),>x4=%ymm10 vmovupd 1920( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1952(<input_0=%rdi),>x5=%ymm11 vmovupd 1952( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1984(<input_0=%rdi),>x6=%ymm12 vmovupd 1984( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0 vpand % ymm13, % ymm0, % ymm0 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1 # asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0 vpsllq $4, % ymm0, % ymm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#1,>x3=reg256#1 # asm 2: vpor <v00=%ymm12,<v10=%ymm0,>x3=%ymm0 vpor % ymm12, % ymm0, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12 vpand % ymm11, % ymm2, % ymm12 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13 # asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12 vpsllq $2, % ymm12, % ymm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13 vpand % ymm14, % ymm3, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13 vpsrlq $2, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13 vpand % ymm0, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13 vpand % ymm8, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3 # asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2 vpand % ymm1, % ymm2, % ymm2 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3 # asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2 vpsllq $2, % ymm2, % ymm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#3,>x5=reg256#3 # asm 2: vpor <v00=%ymm8,<v10=%ymm2,>x5=%ymm2 vpor % ymm8, % ymm2, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8 # asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7 vpand % ymm12, % ymm4, % ymm7 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8 # asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7 vpsllq $1, % ymm7, % ymm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9 # asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8 vpand % ymm9, % ymm5, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8 vpsrlq $1, % ymm8, % ymm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10 # asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9 vpand % ymm0, % ymm4, % ymm9 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10 # asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9 vpsllq $1, % ymm9, % ymm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12 # asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11 vpand % ymm2, % ymm4, % ymm11 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12 # asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11 vpsllq $1, % ymm11, % ymm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11 # asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10 vpsrlq $1, % ymm10, % ymm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10 vpand % ymm6, % ymm4, % ymm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5 # asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4 vpand % ymm1, % ymm4, % ymm4 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5 # asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4 vpsllq $1, % ymm4, % ymm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#5,>x6=reg256#5 # asm 2: vpor <v00=%ymm10,<v10=%ymm4,>x6=%ymm4 vpor % ymm10, % ymm4, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 1792 ] = x0 # asm 1: vmovupd <x0=reg256#4,1792(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,1792(<input_0=%rdi) vmovupd % ymm3, 1792( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x1 # asm 1: vmovupd <x1=reg256#8,1824(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,1824(<input_0=%rdi) vmovupd % ymm7, 1824( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x2 # asm 1: vmovupd <x2=reg256#9,1856(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,1856(<input_0=%rdi) vmovupd % ymm8, 1856( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x3 # asm 1: vmovupd <x3=reg256#1,1888(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,1888(<input_0=%rdi) vmovupd % ymm0, 1888( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x4 # asm 1: vmovupd <x4=reg256#10,1920(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1920(<input_0=%rdi) vmovupd % ymm9, 1920( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x5 # asm 1: vmovupd <x5=reg256#3,1952(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1952(<input_0=%rdi) vmovupd % ymm2, 1952( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x6 # asm 1: vmovupd <x6=reg256#5,1984(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1984(<input_0=%rdi) vmovupd % ymm4, 1984( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,827
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119f/avx2/vec256_maa_asm.S
#include "namespace.h" #define vec256_maa_asm CRYPTO_NAMESPACE(vec256_maa_asm) #define _vec256_maa_asm _CRYPTO_NAMESPACE(vec256_maa_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_maa_asm .p2align 5 .global _vec256_maa_asm .global vec256_maa_asm _vec256_maa_asm: vec256_maa_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_0 + 384 ] # asm 1: vpxor 384(<input_0=int64#1),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_0=%rdi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rdi), % ymm2, % ymm0 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm0,384(<input_0=%rdi) vmovupd % ymm0, 384( % rdi) # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#1,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm0,>r12=%ymm0 vpxor 384( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_0 + 352 ] # asm 1: vpxor 352(<input_0=int64#1),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_0=%rdi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rdi), % ymm1, % ymm0 # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm0,352(<input_0=%rdi) vmovupd % ymm0, 352( % rdi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#1,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm0,>r11=%ymm0 vpxor 352( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_0 + 320 ] # asm 1: vpxor 320(<input_0=int64#1),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_0=%rdi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rdi), % ymm13, % ymm0 # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm0,320(<input_0=%rdi) vmovupd % ymm0, 320( % rdi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#1,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm0,>r10=%ymm0 vpxor 320( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_0 + 288 ] # asm 1: vpxor 288(<input_0=int64#1),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_0=%rdi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rdi), % ymm12, % ymm0 # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm0,288(<input_0=%rdi) vmovupd % ymm0, 288( % rdi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#1,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm0,>r9=%ymm0 vpxor 288( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_0 + 256 ] # asm 1: vpxor 256(<input_0=int64#1),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_0=%rdi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rdi), % ymm11, % ymm0 # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm0,256(<input_0=%rdi) vmovupd % ymm0, 256( % rdi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#1,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm0,>r8=%ymm0 vpxor 256( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_0 + 224 ] # asm 1: vpxor 224(<input_0=int64#1),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_0=%rdi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rdi), % ymm10, % ymm0 # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm0,224(<input_0=%rdi) vmovupd % ymm0, 224( % rdi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#1,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm0,>r7=%ymm0 vpxor 224( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_0 + 192 ] # asm 1: vpxor 192(<input_0=int64#1),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_0=%rdi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rdi), % ymm9, % ymm0 # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm0,192(<input_0=%rdi) vmovupd % ymm0, 192( % rdi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#1,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm0,>r6=%ymm0 vpxor 192( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_0 + 160 ] # asm 1: vpxor 160(<input_0=int64#1),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_0=%rdi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rdi), % ymm8, % ymm0 # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm0,160(<input_0=%rdi) vmovupd % ymm0, 160( % rdi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#1,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm0,>r5=%ymm0 vpxor 160( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_0 + 128 ] # asm 1: vpxor 128(<input_0=int64#1),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_0=%rdi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rdi), % ymm7, % ymm0 # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm0,128(<input_0=%rdi) vmovupd % ymm0, 128( % rdi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#1,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm0,>r4=%ymm0 vpxor 128( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_0 + 96 ] # asm 1: vpxor 96(<input_0=int64#1),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_0=%rdi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rdi), % ymm6, % ymm0 # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#1,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm0,>r3=%ymm0 vpxor 96( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_0 + 64 ] # asm 1: vpxor 64(<input_0=int64#1),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_0=%rdi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rdi), % ymm5, % ymm0 # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#1,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm0,>r2=%ymm0 vpxor 64( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_0 + 32 ] # asm 1: vpxor 32(<input_0=int64#1),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_0=%rdi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rdi), % ymm4, % ymm0 # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#1,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm0,>r1=%ymm0 vpxor 32( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_0 + 0 ] # asm 1: vpxor 0(<input_0=int64#1),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_0=%rdi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rdi), % ymm3, % ymm0 # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#1,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm0,>r0=%ymm0 vpxor 0( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
2,723
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-1024/avx2/basemul.S
#include "cdecl.h" .macro schoolbook off vmovdqa _16XQINV*2(%rcx),%ymm0 vmovdqa (64*\off+ 0)*2(%rsi),%ymm1 # a0 vmovdqa (64*\off+16)*2(%rsi),%ymm2 # b0 vmovdqa (64*\off+32)*2(%rsi),%ymm3 # a1 vmovdqa (64*\off+48)*2(%rsi),%ymm4 # b1 vpmullw %ymm0,%ymm1,%ymm9 # a0.lo vpmullw %ymm0,%ymm2,%ymm10 # b0.lo vpmullw %ymm0,%ymm3,%ymm11 # a1.lo vpmullw %ymm0,%ymm4,%ymm12 # b1.lo vmovdqa (64*\off+ 0)*2(%rdx),%ymm5 # c0 vmovdqa (64*\off+16)*2(%rdx),%ymm6 # d0 vpmulhw %ymm5,%ymm1,%ymm13 # a0c0.hi vpmulhw %ymm6,%ymm1,%ymm1 # a0d0.hi vpmulhw %ymm5,%ymm2,%ymm14 # b0c0.hi vpmulhw %ymm6,%ymm2,%ymm2 # b0d0.hi vmovdqa (64*\off+32)*2(%rdx),%ymm7 # c1 vmovdqa (64*\off+48)*2(%rdx),%ymm8 # d1 vpmulhw %ymm7,%ymm3,%ymm15 # a1c1.hi vpmulhw %ymm8,%ymm3,%ymm3 # a1d1.hi vpmulhw %ymm7,%ymm4,%ymm0 # b1c1.hi vpmulhw %ymm8,%ymm4,%ymm4 # b1d1.hi vmovdqa %ymm13,(%rsp) vpmullw %ymm5,%ymm9,%ymm13 # a0c0.lo vpmullw %ymm6,%ymm9,%ymm9 # a0d0.lo vpmullw %ymm5,%ymm10,%ymm5 # b0c0.lo vpmullw %ymm6,%ymm10,%ymm10 # b0d0.lo vpmullw %ymm7,%ymm11,%ymm6 # a1c1.lo vpmullw %ymm8,%ymm11,%ymm11 # a1d1.lo vpmullw %ymm7,%ymm12,%ymm7 # b1c1.lo vpmullw %ymm8,%ymm12,%ymm12 # b1d1.lo vmovdqa _16XQ*2(%rcx),%ymm8 vpmulhw %ymm8,%ymm13,%ymm13 vpmulhw %ymm8,%ymm9,%ymm9 vpmulhw %ymm8,%ymm5,%ymm5 vpmulhw %ymm8,%ymm10,%ymm10 vpmulhw %ymm8,%ymm6,%ymm6 vpmulhw %ymm8,%ymm11,%ymm11 vpmulhw %ymm8,%ymm7,%ymm7 vpmulhw %ymm8,%ymm12,%ymm12 vpsubw (%rsp),%ymm13,%ymm13 # -a0c0 vpsubw %ymm9,%ymm1,%ymm9 # a0d0 vpsubw %ymm5,%ymm14,%ymm5 # b0c0 vpsubw %ymm10,%ymm2,%ymm10 # b0d0 vpsubw %ymm6,%ymm15,%ymm6 # a1c1 vpsubw %ymm11,%ymm3,%ymm11 # a1d1 vpsubw %ymm7,%ymm0,%ymm7 # b1c1 vpsubw %ymm12,%ymm4,%ymm12 # b1d1 vmovdqa (%r9),%ymm0 vmovdqa 32(%r9),%ymm1 vpmullw %ymm0,%ymm10,%ymm2 vpmullw %ymm0,%ymm12,%ymm3 vpmulhw %ymm1,%ymm10,%ymm10 vpmulhw %ymm1,%ymm12,%ymm12 vpmulhw %ymm8,%ymm2,%ymm2 vpmulhw %ymm8,%ymm3,%ymm3 vpsubw %ymm2,%ymm10,%ymm10 # rb0d0 vpsubw %ymm3,%ymm12,%ymm12 # rb1d1 vpaddw %ymm5,%ymm9,%ymm9 vpaddw %ymm7,%ymm11,%ymm11 vpsubw %ymm13,%ymm10,%ymm13 vpsubw %ymm12,%ymm6,%ymm6 vmovdqa %ymm13,(64*\off+ 0)*2(%rdi) vmovdqa %ymm9,(64*\off+16)*2(%rdi) vmovdqa %ymm6,(64*\off+32)*2(%rdi) vmovdqa %ymm11,(64*\off+48)*2(%rdi) .endm .text .global cdecl(PQCLEAN_MLKEM1024_AVX2_basemul_avx) .global _cdecl(PQCLEAN_MLKEM1024_AVX2_basemul_avx) cdecl(PQCLEAN_MLKEM1024_AVX2_basemul_avx): _cdecl(PQCLEAN_MLKEM1024_AVX2_basemul_avx): mov %rsp,%r8 and $-32,%rsp sub $32,%rsp lea (_ZETAS_EXP+176)*2(%rcx),%r9 schoolbook 0 add $32*2,%r9 schoolbook 1 add $192*2,%r9 schoolbook 2 add $32*2,%r9 schoolbook 3 mov %r8,%rsp ret
mktmansour/MKT-KSA-Geolocation-Security
4,688
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-1024/avx2/shuffle.S
#include "cdecl.h" .include "fq.inc" .include "shuffle.inc" /* nttpack_avx: #load vmovdqa (%rdi),%ymm4 vmovdqa 32(%rdi),%ymm5 vmovdqa 64(%rdi),%ymm6 vmovdqa 96(%rdi),%ymm7 vmovdqa 128(%rdi),%ymm8 vmovdqa 160(%rdi),%ymm9 vmovdqa 192(%rdi),%ymm10 vmovdqa 224(%rdi),%ymm11 shuffle1 4,5,3,5 shuffle1 6,7,4,7 shuffle1 8,9,6,9 shuffle1 10,11,8,11 shuffle2 3,4,10,4 shuffle2 6,8,3,8 shuffle2 5,7,6,7 shuffle2 9,11,5,11 shuffle4 10,3,9,3 shuffle4 6,5,10,5 shuffle4 4,8,6,8 shuffle4 7,11,4,11 shuffle8 9,10,7,10 shuffle8 6,4,9,4 shuffle8 3,5,6,5 shuffle8 8,11,3,11 #store vmovdqa %ymm7,(%rdi) vmovdqa %ymm9,32(%rdi) vmovdqa %ymm6,64(%rdi) vmovdqa %ymm3,96(%rdi) vmovdqa %ymm10,128(%rdi) vmovdqa %ymm4,160(%rdi) vmovdqa %ymm5,192(%rdi) vmovdqa %ymm11,224(%rdi) ret */ .text nttunpack128_avx: #load vmovdqa (%rdi),%ymm4 vmovdqa 32(%rdi),%ymm5 vmovdqa 64(%rdi),%ymm6 vmovdqa 96(%rdi),%ymm7 vmovdqa 128(%rdi),%ymm8 vmovdqa 160(%rdi),%ymm9 vmovdqa 192(%rdi),%ymm10 vmovdqa 224(%rdi),%ymm11 shuffle8 4,8,3,8 shuffle8 5,9,4,9 shuffle8 6,10,5,10 shuffle8 7,11,6,11 shuffle4 3,5,7,5 shuffle4 8,10,3,10 shuffle4 4,6,8,6 shuffle4 9,11,4,11 shuffle2 7,8,9,8 shuffle2 5,6,7,6 shuffle2 3,4,5,4 shuffle2 10,11,3,11 shuffle1 9,5,10,5 shuffle1 8,4,9,4 shuffle1 7,3,8,3 shuffle1 6,11,7,11 #store vmovdqa %ymm10,(%rdi) vmovdqa %ymm5,32(%rdi) vmovdqa %ymm9,64(%rdi) vmovdqa %ymm4,96(%rdi) vmovdqa %ymm8,128(%rdi) vmovdqa %ymm3,160(%rdi) vmovdqa %ymm7,192(%rdi) vmovdqa %ymm11,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM1024_AVX2_nttunpack_avx) .global _cdecl(PQCLEAN_MLKEM1024_AVX2_nttunpack_avx) cdecl(PQCLEAN_MLKEM1024_AVX2_nttunpack_avx): _cdecl(PQCLEAN_MLKEM1024_AVX2_nttunpack_avx): call nttunpack128_avx add $256,%rdi call nttunpack128_avx ret ntttobytes128_avx: #load vmovdqa (%rsi),%ymm5 vmovdqa 32(%rsi),%ymm6 vmovdqa 64(%rsi),%ymm7 vmovdqa 96(%rsi),%ymm8 vmovdqa 128(%rsi),%ymm9 vmovdqa 160(%rsi),%ymm10 vmovdqa 192(%rsi),%ymm11 vmovdqa 224(%rsi),%ymm12 #csubq csubq 5,13 csubq 6,13 csubq 7,13 csubq 8,13 csubq 9,13 csubq 10,13 csubq 11,13 csubq 12,13 #bitpack vpsllw $12,%ymm6,%ymm4 vpor %ymm4,%ymm5,%ymm4 vpsrlw $4,%ymm6,%ymm5 vpsllw $8,%ymm7,%ymm6 vpor %ymm5,%ymm6,%ymm5 vpsrlw $8,%ymm7,%ymm6 vpsllw $4,%ymm8,%ymm7 vpor %ymm6,%ymm7,%ymm6 vpsllw $12,%ymm10,%ymm7 vpor %ymm7,%ymm9,%ymm7 vpsrlw $4,%ymm10,%ymm8 vpsllw $8,%ymm11,%ymm9 vpor %ymm8,%ymm9,%ymm8 vpsrlw $8,%ymm11,%ymm9 vpsllw $4,%ymm12,%ymm10 vpor %ymm9,%ymm10,%ymm9 shuffle1 4,5,3,5 shuffle1 6,7,4,7 shuffle1 8,9,6,9 shuffle2 3,4,8,4 shuffle2 6,5,3,5 shuffle2 7,9,6,9 shuffle4 8,3,7,3 shuffle4 6,4,8,4 shuffle4 5,9,6,9 shuffle8 7,8,5,8 shuffle8 6,3,7,3 shuffle8 4,9,6,9 #store vmovdqu %ymm5,(%rdi) vmovdqu %ymm7,32(%rdi) vmovdqu %ymm6,64(%rdi) vmovdqu %ymm8,96(%rdi) vmovdqu %ymm3,128(%rdi) vmovdqu %ymm9,160(%rdi) ret .global cdecl(PQCLEAN_MLKEM1024_AVX2_ntttobytes_avx) .global _cdecl(PQCLEAN_MLKEM1024_AVX2_ntttobytes_avx) cdecl(PQCLEAN_MLKEM1024_AVX2_ntttobytes_avx): _cdecl(PQCLEAN_MLKEM1024_AVX2_ntttobytes_avx): #consts vmovdqa _16XQ*2(%rdx),%ymm0 call ntttobytes128_avx add $256,%rsi add $192,%rdi call ntttobytes128_avx ret nttfrombytes128_avx: #load vmovdqu (%rsi),%ymm4 vmovdqu 32(%rsi),%ymm5 vmovdqu 64(%rsi),%ymm6 vmovdqu 96(%rsi),%ymm7 vmovdqu 128(%rsi),%ymm8 vmovdqu 160(%rsi),%ymm9 shuffle8 4,7,3,7 shuffle8 5,8,4,8 shuffle8 6,9,5,9 shuffle4 3,8,6,8 shuffle4 7,5,3,5 shuffle4 4,9,7,9 shuffle2 6,5,4,5 shuffle2 8,7,6,7 shuffle2 3,9,8,9 shuffle1 4,7,10,7 shuffle1 5,8,4,8 shuffle1 6,9,5,9 #bitunpack vpsrlw $12,%ymm10,%ymm11 vpsllw $4,%ymm7,%ymm12 vpor %ymm11,%ymm12,%ymm11 vpand %ymm0,%ymm10,%ymm10 vpand %ymm0,%ymm11,%ymm11 vpsrlw $8,%ymm7,%ymm12 vpsllw $8,%ymm4,%ymm13 vpor %ymm12,%ymm13,%ymm12 vpand %ymm0,%ymm12,%ymm12 vpsrlw $4,%ymm4,%ymm13 vpand %ymm0,%ymm13,%ymm13 vpsrlw $12,%ymm8,%ymm14 vpsllw $4,%ymm5,%ymm15 vpor %ymm14,%ymm15,%ymm14 vpand %ymm0,%ymm8,%ymm8 vpand %ymm0,%ymm14,%ymm14 vpsrlw $8,%ymm5,%ymm15 vpsllw $8,%ymm9,%ymm1 vpor %ymm15,%ymm1,%ymm15 vpand %ymm0,%ymm15,%ymm15 vpsrlw $4,%ymm9,%ymm1 vpand %ymm0,%ymm1,%ymm1 #store vmovdqa %ymm10,(%rdi) vmovdqa %ymm11,32(%rdi) vmovdqa %ymm12,64(%rdi) vmovdqa %ymm13,96(%rdi) vmovdqa %ymm8,128(%rdi) vmovdqa %ymm14,160(%rdi) vmovdqa %ymm15,192(%rdi) vmovdqa %ymm1,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM1024_AVX2_nttfrombytes_avx) .global _cdecl(PQCLEAN_MLKEM1024_AVX2_nttfrombytes_avx) cdecl(PQCLEAN_MLKEM1024_AVX2_nttfrombytes_avx): _cdecl(PQCLEAN_MLKEM1024_AVX2_nttfrombytes_avx): #consts vmovdqa _16XMASK*2(%rdx),%ymm0 call nttfrombytes128_avx add $256,%rdi add $192,%rsi call nttfrombytes128_avx ret
mktmansour/MKT-KSA-Geolocation-Security
1,805
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-1024/avx2/fq.S
#include "cdecl.h" .include "fq.inc" .text reduce128_avx: #load vmovdqa (%rdi),%ymm2 vmovdqa 32(%rdi),%ymm3 vmovdqa 64(%rdi),%ymm4 vmovdqa 96(%rdi),%ymm5 vmovdqa 128(%rdi),%ymm6 vmovdqa 160(%rdi),%ymm7 vmovdqa 192(%rdi),%ymm8 vmovdqa 224(%rdi),%ymm9 red16 2 red16 3 red16 4 red16 5 red16 6 red16 7 red16 8 red16 9 #store vmovdqa %ymm2,(%rdi) vmovdqa %ymm3,32(%rdi) vmovdqa %ymm4,64(%rdi) vmovdqa %ymm5,96(%rdi) vmovdqa %ymm6,128(%rdi) vmovdqa %ymm7,160(%rdi) vmovdqa %ymm8,192(%rdi) vmovdqa %ymm9,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM1024_AVX2_reduce_avx) .global _cdecl(PQCLEAN_MLKEM1024_AVX2_reduce_avx) cdecl(PQCLEAN_MLKEM1024_AVX2_reduce_avx): _cdecl(PQCLEAN_MLKEM1024_AVX2_reduce_avx): #consts vmovdqa _16XQ*2(%rsi),%ymm0 vmovdqa _16XV*2(%rsi),%ymm1 call reduce128_avx add $256,%rdi call reduce128_avx ret tomont128_avx: #load vmovdqa (%rdi),%ymm3 vmovdqa 32(%rdi),%ymm4 vmovdqa 64(%rdi),%ymm5 vmovdqa 96(%rdi),%ymm6 vmovdqa 128(%rdi),%ymm7 vmovdqa 160(%rdi),%ymm8 vmovdqa 192(%rdi),%ymm9 vmovdqa 224(%rdi),%ymm10 fqmulprecomp 1,2,3,11 fqmulprecomp 1,2,4,12 fqmulprecomp 1,2,5,13 fqmulprecomp 1,2,6,14 fqmulprecomp 1,2,7,15 fqmulprecomp 1,2,8,11 fqmulprecomp 1,2,9,12 fqmulprecomp 1,2,10,13 #store vmovdqa %ymm3,(%rdi) vmovdqa %ymm4,32(%rdi) vmovdqa %ymm5,64(%rdi) vmovdqa %ymm6,96(%rdi) vmovdqa %ymm7,128(%rdi) vmovdqa %ymm8,160(%rdi) vmovdqa %ymm9,192(%rdi) vmovdqa %ymm10,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM1024_AVX2_tomont_avx) .global _cdecl(PQCLEAN_MLKEM1024_AVX2_tomont_avx) cdecl(PQCLEAN_MLKEM1024_AVX2_tomont_avx): _cdecl(PQCLEAN_MLKEM1024_AVX2_tomont_avx): #consts vmovdqa _16XQ*2(%rsi),%ymm0 vmovdqa _16XMONTSQLO*2(%rsi),%ymm1 vmovdqa _16XMONTSQHI*2(%rsi),%ymm2 call tomont128_avx add $256,%rdi call tomont128_avx ret
mktmansour/MKT-KSA-Geolocation-Security
4,182
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-1024/avx2/ntt.S
#include "cdecl.h" .include "shuffle.inc" .macro mul rh0,rh1,rh2,rh3,zl0=15,zl1=15,zh0=2,zh1=2 vpmullw %ymm\zl0,%ymm\rh0,%ymm12 vpmullw %ymm\zl0,%ymm\rh1,%ymm13 vpmullw %ymm\zl1,%ymm\rh2,%ymm14 vpmullw %ymm\zl1,%ymm\rh3,%ymm15 vpmulhw %ymm\zh0,%ymm\rh0,%ymm\rh0 vpmulhw %ymm\zh0,%ymm\rh1,%ymm\rh1 vpmulhw %ymm\zh1,%ymm\rh2,%ymm\rh2 vpmulhw %ymm\zh1,%ymm\rh3,%ymm\rh3 .endm .macro reduce vpmulhw %ymm0,%ymm12,%ymm12 vpmulhw %ymm0,%ymm13,%ymm13 vpmulhw %ymm0,%ymm14,%ymm14 vpmulhw %ymm0,%ymm15,%ymm15 .endm .macro update rln,rl0,rl1,rl2,rl3,rh0,rh1,rh2,rh3 vpaddw %ymm\rh0,%ymm\rl0,%ymm\rln vpsubw %ymm\rh0,%ymm\rl0,%ymm\rh0 vpaddw %ymm\rh1,%ymm\rl1,%ymm\rl0 vpsubw %ymm\rh1,%ymm\rl1,%ymm\rh1 vpaddw %ymm\rh2,%ymm\rl2,%ymm\rl1 vpsubw %ymm\rh2,%ymm\rl2,%ymm\rh2 vpaddw %ymm\rh3,%ymm\rl3,%ymm\rl2 vpsubw %ymm\rh3,%ymm\rl3,%ymm\rh3 vpsubw %ymm12,%ymm\rln,%ymm\rln vpaddw %ymm12,%ymm\rh0,%ymm\rh0 vpsubw %ymm13,%ymm\rl0,%ymm\rl0 vpaddw %ymm13,%ymm\rh1,%ymm\rh1 vpsubw %ymm14,%ymm\rl1,%ymm\rl1 vpaddw %ymm14,%ymm\rh2,%ymm\rh2 vpsubw %ymm15,%ymm\rl2,%ymm\rl2 vpaddw %ymm15,%ymm\rh3,%ymm\rh3 .endm .macro level0 off vpbroadcastq (_ZETAS_EXP+0)*2(%rsi),%ymm15 vmovdqa (64*\off+128)*2(%rdi),%ymm8 vmovdqa (64*\off+144)*2(%rdi),%ymm9 vmovdqa (64*\off+160)*2(%rdi),%ymm10 vmovdqa (64*\off+176)*2(%rdi),%ymm11 vpbroadcastq (_ZETAS_EXP+4)*2(%rsi),%ymm2 mul 8,9,10,11 vmovdqa (64*\off+ 0)*2(%rdi),%ymm4 vmovdqa (64*\off+ 16)*2(%rdi),%ymm5 vmovdqa (64*\off+ 32)*2(%rdi),%ymm6 vmovdqa (64*\off+ 48)*2(%rdi),%ymm7 reduce update 3,4,5,6,7,8,9,10,11 vmovdqa %ymm3,(64*\off+ 0)*2(%rdi) vmovdqa %ymm4,(64*\off+ 16)*2(%rdi) vmovdqa %ymm5,(64*\off+ 32)*2(%rdi) vmovdqa %ymm6,(64*\off+ 48)*2(%rdi) vmovdqa %ymm8,(64*\off+128)*2(%rdi) vmovdqa %ymm9,(64*\off+144)*2(%rdi) vmovdqa %ymm10,(64*\off+160)*2(%rdi) vmovdqa %ymm11,(64*\off+176)*2(%rdi) .endm .macro levels1t6 off /* level 1 */ vmovdqa (_ZETAS_EXP+224*\off+16)*2(%rsi),%ymm15 vmovdqa (128*\off+ 64)*2(%rdi),%ymm8 vmovdqa (128*\off+ 80)*2(%rdi),%ymm9 vmovdqa (128*\off+ 96)*2(%rdi),%ymm10 vmovdqa (128*\off+112)*2(%rdi),%ymm11 vmovdqa (_ZETAS_EXP+224*\off+32)*2(%rsi),%ymm2 mul 8,9,10,11 vmovdqa (128*\off+ 0)*2(%rdi),%ymm4 vmovdqa (128*\off+ 16)*2(%rdi),%ymm5 vmovdqa (128*\off+ 32)*2(%rdi),%ymm6 vmovdqa (128*\off+ 48)*2(%rdi),%ymm7 reduce update 3,4,5,6,7,8,9,10,11 /* level 2 */ shuffle8 5,10,7,10 shuffle8 6,11,5,11 vmovdqa (_ZETAS_EXP+224*\off+48)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+64)*2(%rsi),%ymm2 mul 7,10,5,11 shuffle8 3,8,6,8 shuffle8 4,9,3,9 reduce update 4,6,8,3,9,7,10,5,11 /* level 3 */ shuffle4 8,5,9,5 shuffle4 3,11,8,11 vmovdqa (_ZETAS_EXP+224*\off+80)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+96)*2(%rsi),%ymm2 mul 9,5,8,11 shuffle4 4,7,3,7 shuffle4 6,10,4,10 reduce update 6,3,7,4,10,9,5,8,11 /* level 4 */ shuffle2 7,8,10,8 shuffle2 4,11,7,11 vmovdqa (_ZETAS_EXP+224*\off+112)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+128)*2(%rsi),%ymm2 mul 10,8,7,11 shuffle2 6,9,4,9 shuffle2 3,5,6,5 reduce update 3,4,9,6,5,10,8,7,11 /* level 5 */ shuffle1 9,7,5,7 shuffle1 6,11,9,11 vmovdqa (_ZETAS_EXP+224*\off+144)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+160)*2(%rsi),%ymm2 mul 5,7,9,11 shuffle1 3,10,6,10 shuffle1 4,8,3,8 reduce update 4,6,10,3,8,5,7,9,11 /* level 6 */ vmovdqa (_ZETAS_EXP+224*\off+176)*2(%rsi),%ymm14 vmovdqa (_ZETAS_EXP+224*\off+208)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+192)*2(%rsi),%ymm8 vmovdqa (_ZETAS_EXP+224*\off+224)*2(%rsi),%ymm2 mul 10,3,9,11,14,15,8,2 reduce update 8,4,6,5,7,10,3,9,11 vmovdqa %ymm8,(128*\off+ 0)*2(%rdi) vmovdqa %ymm4,(128*\off+ 16)*2(%rdi) vmovdqa %ymm10,(128*\off+ 32)*2(%rdi) vmovdqa %ymm3,(128*\off+ 48)*2(%rdi) vmovdqa %ymm6,(128*\off+ 64)*2(%rdi) vmovdqa %ymm5,(128*\off+ 80)*2(%rdi) vmovdqa %ymm9,(128*\off+ 96)*2(%rdi) vmovdqa %ymm11,(128*\off+112)*2(%rdi) .endm .text .global cdecl(PQCLEAN_MLKEM1024_AVX2_ntt_avx) .global _cdecl(PQCLEAN_MLKEM1024_AVX2_ntt_avx) cdecl(PQCLEAN_MLKEM1024_AVX2_ntt_avx): _cdecl(PQCLEAN_MLKEM1024_AVX2_ntt_avx): vmovdqa _16XQ*2(%rsi),%ymm0 level0 0 level0 1 levels1t6 0 levels1t6 1 ret
mktmansour/MKT-KSA-Geolocation-Security
4,791
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-1024/avx2/invntt.S
#include "cdecl.h" .include "shuffle.inc" .include "fq.inc" .macro butterfly rl0,rl1,rl2,rl3,rh0,rh1,rh2,rh3,zl0=2,zl1=2,zh0=3,zh1=3 vpsubw %ymm\rl0,%ymm\rh0,%ymm12 vpaddw %ymm\rh0,%ymm\rl0,%ymm\rl0 vpsubw %ymm\rl1,%ymm\rh1,%ymm13 vpmullw %ymm\zl0,%ymm12,%ymm\rh0 vpaddw %ymm\rh1,%ymm\rl1,%ymm\rl1 vpsubw %ymm\rl2,%ymm\rh2,%ymm14 vpmullw %ymm\zl0,%ymm13,%ymm\rh1 vpaddw %ymm\rh2,%ymm\rl2,%ymm\rl2 vpsubw %ymm\rl3,%ymm\rh3,%ymm15 vpmullw %ymm\zl1,%ymm14,%ymm\rh2 vpaddw %ymm\rh3,%ymm\rl3,%ymm\rl3 vpmullw %ymm\zl1,%ymm15,%ymm\rh3 vpmulhw %ymm\zh0,%ymm12,%ymm12 vpmulhw %ymm\zh0,%ymm13,%ymm13 vpmulhw %ymm\zh1,%ymm14,%ymm14 vpmulhw %ymm\zh1,%ymm15,%ymm15 vpmulhw %ymm0,%ymm\rh0,%ymm\rh0 vpmulhw %ymm0,%ymm\rh1,%ymm\rh1 vpmulhw %ymm0,%ymm\rh2,%ymm\rh2 vpmulhw %ymm0,%ymm\rh3,%ymm\rh3 # # vpsubw %ymm\rh0,%ymm12,%ymm\rh0 vpsubw %ymm\rh1,%ymm13,%ymm\rh1 vpsubw %ymm\rh2,%ymm14,%ymm\rh2 vpsubw %ymm\rh3,%ymm15,%ymm\rh3 .endm .macro intt_levels0t5 off /* level 0 */ vmovdqa _16XFLO*2(%rsi),%ymm2 vmovdqa _16XFHI*2(%rsi),%ymm3 vmovdqa (128*\off+ 0)*2(%rdi),%ymm4 vmovdqa (128*\off+ 32)*2(%rdi),%ymm6 vmovdqa (128*\off+ 16)*2(%rdi),%ymm5 vmovdqa (128*\off+ 48)*2(%rdi),%ymm7 fqmulprecomp 2,3,4 fqmulprecomp 2,3,6 fqmulprecomp 2,3,5 fqmulprecomp 2,3,7 vmovdqa (128*\off+ 64)*2(%rdi),%ymm8 vmovdqa (128*\off+ 96)*2(%rdi),%ymm10 vmovdqa (128*\off+ 80)*2(%rdi),%ymm9 vmovdqa (128*\off+112)*2(%rdi),%ymm11 fqmulprecomp 2,3,8 fqmulprecomp 2,3,10 fqmulprecomp 2,3,9 fqmulprecomp 2,3,11 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+208)*2(%rsi),%ymm15 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+176)*2(%rsi),%ymm1 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+224)*2(%rsi),%ymm2 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+192)*2(%rsi),%ymm3 vmovdqa _REVIDXB*2(%rsi),%ymm12 vpshufb %ymm12,%ymm15,%ymm15 vpshufb %ymm12,%ymm1,%ymm1 vpshufb %ymm12,%ymm2,%ymm2 vpshufb %ymm12,%ymm3,%ymm3 butterfly 4,5,8,9,6,7,10,11,15,1,2,3 /* level 1 */ vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+144)*2(%rsi),%ymm2 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+160)*2(%rsi),%ymm3 vmovdqa _REVIDXB*2(%rsi),%ymm1 vpshufb %ymm1,%ymm2,%ymm2 vpshufb %ymm1,%ymm3,%ymm3 butterfly 4,5,6,7,8,9,10,11,2,2,3,3 shuffle1 4,5,3,5 shuffle1 6,7,4,7 shuffle1 8,9,6,9 shuffle1 10,11,8,11 /* level 2 */ vmovdqa _REVIDXD*2(%rsi),%ymm12 vpermd (_ZETAS_EXP+(1-\off)*224+112)*2(%rsi),%ymm12,%ymm2 vpermd (_ZETAS_EXP+(1-\off)*224+128)*2(%rsi),%ymm12,%ymm10 butterfly 3,4,6,8,5,7,9,11,2,2,10,10 vmovdqa _16XV*2(%rsi),%ymm1 red16 3 shuffle2 3,4,10,4 shuffle2 6,8,3,8 shuffle2 5,7,6,7 shuffle2 9,11,5,11 /* level 3 */ vpermq $0x1B,(_ZETAS_EXP+(1-\off)*224+80)*2(%rsi),%ymm2 vpermq $0x1B,(_ZETAS_EXP+(1-\off)*224+96)*2(%rsi),%ymm9 butterfly 10,3,6,5,4,8,7,11,2,2,9,9 shuffle4 10,3,9,3 shuffle4 6,5,10,5 shuffle4 4,8,6,8 shuffle4 7,11,4,11 /* level 4 */ vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+48)*2(%rsi),%ymm2 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+64)*2(%rsi),%ymm7 butterfly 9,10,6,4,3,5,8,11,2,2,7,7 red16 9 shuffle8 9,10,7,10 shuffle8 6,4,9,4 shuffle8 3,5,6,5 shuffle8 8,11,3,11 /* level 5 */ vmovdqa (_ZETAS_EXP+(1-\off)*224+16)*2(%rsi),%ymm2 vmovdqa (_ZETAS_EXP+(1-\off)*224+32)*2(%rsi),%ymm8 butterfly 7,9,6,3,10,4,5,11,2,2,8,8 vmovdqa %ymm7,(128*\off+ 0)*2(%rdi) vmovdqa %ymm9,(128*\off+ 16)*2(%rdi) vmovdqa %ymm6,(128*\off+ 32)*2(%rdi) vmovdqa %ymm3,(128*\off+ 48)*2(%rdi) vmovdqa %ymm10,(128*\off+ 64)*2(%rdi) vmovdqa %ymm4,(128*\off+ 80)*2(%rdi) vmovdqa %ymm5,(128*\off+ 96)*2(%rdi) vmovdqa %ymm11,(128*\off+112)*2(%rdi) .endm .macro intt_level6 off /* level 6 */ vmovdqa (64*\off+ 0)*2(%rdi),%ymm4 vmovdqa (64*\off+128)*2(%rdi),%ymm8 vmovdqa (64*\off+ 16)*2(%rdi),%ymm5 vmovdqa (64*\off+144)*2(%rdi),%ymm9 vpbroadcastq (_ZETAS_EXP+0)*2(%rsi),%ymm2 vmovdqa (64*\off+ 32)*2(%rdi),%ymm6 vmovdqa (64*\off+160)*2(%rdi),%ymm10 vmovdqa (64*\off+ 48)*2(%rdi),%ymm7 vmovdqa (64*\off+176)*2(%rdi),%ymm11 vpbroadcastq (_ZETAS_EXP+4)*2(%rsi),%ymm3 butterfly 4,5,6,7,8,9,10,11 .if \off == 0 red16 4 .endif vmovdqa %ymm4,(64*\off+ 0)*2(%rdi) vmovdqa %ymm5,(64*\off+ 16)*2(%rdi) vmovdqa %ymm6,(64*\off+ 32)*2(%rdi) vmovdqa %ymm7,(64*\off+ 48)*2(%rdi) vmovdqa %ymm8,(64*\off+128)*2(%rdi) vmovdqa %ymm9,(64*\off+144)*2(%rdi) vmovdqa %ymm10,(64*\off+160)*2(%rdi) vmovdqa %ymm11,(64*\off+176)*2(%rdi) .endm .text .global cdecl(PQCLEAN_MLKEM1024_AVX2_invntt_avx) .global _cdecl(PQCLEAN_MLKEM1024_AVX2_invntt_avx) cdecl(PQCLEAN_MLKEM1024_AVX2_invntt_avx): _cdecl(PQCLEAN_MLKEM1024_AVX2_invntt_avx): vmovdqa _16XQ*2(%rsi),%ymm0 intt_levels0t5 0 intt_levels0t5 1 intt_level6 0 intt_level6 1 ret
mktmansour/MKT-KSA-Geolocation-Security
12,896
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-1024/aarch64/__asm_iNTT.S
/* * We offer * CC0 1.0 Universal or the following MIT License for this file. * You may freely choose one of them that applies. * * MIT License * * Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang * Copyright (c) 2023: Vincent Hwang * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "macros.inc" .align 2 .global PQCLEAN_MLKEM1024_AARCH64__asm_intt_SIMD_bot .global _PQCLEAN_MLKEM1024_AARCH64__asm_intt_SIMD_bot PQCLEAN_MLKEM1024_AARCH64__asm_intt_SIMD_bot: _PQCLEAN_MLKEM1024_AARCH64__asm_intt_SIMD_bot: push_all Q .req w20 BarrettM .req w21 src0 .req x0 src1 .req x1 table .req x28 counter .req x19 ldrsh Q, [x2, #0] ldrsh BarrettM, [x2, #8] add table, x1, #64 add src0, x0, #256*0 add src1, x0, #256*1 mov v0.H[0], Q mov v0.H[1], BarrettM ldr q28, [src0, #1*16] ldr q29, [src1, #1*16] ldr q30, [src0, #3*16] ldr q31, [src1, #3*16] trn_4x4_2l4 v28, v29, v30, v31, v20, v21, v22, v23, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 trn_4x4_2l4 v24, v25, v26, v27, v20, v21, v22, v23, table, table, q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 do_butterfly_vec_bot v28, v30, v18, v19, v29, v31, v0, v12, v13, v14, v15 do_butterfly_vec_mix_rev_l4 \ v18, v19, v29, v31, \ v24, v26, v16, v17, v25, v27, v0, v12, v13, v14, v15, \ table, \ q8, q9, q10, q11, \ #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mix_rev_l4 \ v16, v17, v25, v27, \ v28, v29, v18, v19, v30, v31, v0, v8, v9, v10, v11, \ table, \ q4, q5, q6, q7, \ #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mix_rev_l3 \ v18, v19, v30, v31, \ v24, v25, v16, v17, v26, v27, v0, v6, v7, v6, v7, \ table, \ q1, q2, q3, \ #1*16, #2*16, #3*16 do_butterfly_vec_mix_rev v24, v25, v16, v17, v26, v27, v24, v25, v18, v19, v28, v29, v0, v4, v5, v4, v5, v2, v3, v2, v3 do_butterfly_vec_mix_rev v24, v25, v18, v19, v28, v29, v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3, v2, v3, v2, v3 do_butterfly_vec_top v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3 oo_barrett v24, v25, v26, v27, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, #11, v0 add table, table, #256 trn_4x4 v28, v29, v30, v31, v16, v17, v18, v19 trn_4x4_2s4 v24, v25, v26, v27, v16, v17, v18, v19, src0, src1, q28, q29, q30, q31, #1*16, #1*16, #3*16, #3*16 mov counter, #3 _intt_bot_loop: str q24, [src0, #0*16] ldr q28, [src0, #(64+1*16)] str q25, [src1, #0*16] ldr q29, [src1, #(64+1*16)] str q26, [src0, #2*16] ldr q30, [src0, #(64+3*16)] str q27, [src1, #2*16] ldr q31, [src1, #(64+3*16)] add src0, src0, #64 add src1, src1, #64 trn_4x4_2l4 v28, v29, v30, v31, v20, v21, v22, v23, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 trn_4x4_2l4 v24, v25, v26, v27, v20, v21, v22, v23, table, table, q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 do_butterfly_vec_bot v28, v30, v18, v19, v29, v31, v0, v12, v13, v14, v15 do_butterfly_vec_mix_rev_l4 \ v18, v19, v29, v31, \ v24, v26, v16, v17, v25, v27, v0, v12, v13, v14, v15, \ table, \ q8, q9, q10, q11, \ #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mix_rev_l4 \ v16, v17, v25, v27, \ v28, v29, v18, v19, v30, v31, v0, v8, v9, v10, v11, \ table, \ q4, q5, q6, q7, \ #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mix_rev_l3 \ v18, v19, v30, v31, \ v24, v25, v16, v17, v26, v27, v0, v6, v7, v6, v7, \ table, \ q1, q2, q3, \ #1*16, #2*16, #3*16 do_butterfly_vec_mix_rev v24, v25, v16, v17, v26, v27, v24, v25, v18, v19, v28, v29, v0, v4, v5, v4, v5, v2, v3, v2, v3 do_butterfly_vec_mix_rev v24, v25, v18, v19, v28, v29, v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3, v2, v3, v2, v3 do_butterfly_vec_top v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3 oo_barrett v24, v25, v26, v27, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, #11, v0 add table, table, #256 trn_4x4 v28, v29, v30, v31, v16, v17, v18, v19 trn_4x4_2s4 v24, v25, v26, v27, v16, v17, v18, v19, src0, src1, q28, q29, q30, q31, #1*16, #1*16, #3*16, #3*16 sub counter, counter, #1 cbnz counter, _intt_bot_loop str q24, [src0, #0*16] str q25, [src1, #0*16] str q26, [src0, #2*16] str q27, [src1, #2*16] .unreq Q .unreq BarrettM .unreq src0 .unreq src1 .unreq table .unreq counter pop_all ret .align 2 .global PQCLEAN_MLKEM1024_AARCH64__asm_intt_SIMD_top .global _PQCLEAN_MLKEM1024_AARCH64__asm_intt_SIMD_top PQCLEAN_MLKEM1024_AARCH64__asm_intt_SIMD_top: _PQCLEAN_MLKEM1024_AARCH64__asm_intt_SIMD_top: push_all Q .req w20 BarrettM .req w21 invN .req w22 invN_f .req w23 src .req x0 table .req x1 counter .req x19 ldrsh Q, [x2, #0] ldrsh BarrettM, [x2, #8] ldr invN, [x2, #10] ldr invN_f, [x2, #14] mov v4.S[0], invN mov v4.S[1], invN_f ldr q0, [table, #0*16] mov v0.H[0], Q ldr q1, [table, #1*16] ldr q2, [table, #2*16] ldr q3, [table, #3*16] ldr q16, [src, # 8*32] ldr q17, [src, # 9*32] ldr q18, [src, #10*32] ldr q19, [src, #11*32] ldr q20, [src, #12*32] ldr q21, [src, #13*32] ldr q22, [src, #14*32] ldr q23, [src, #15*32] qo_butterfly_botll \ v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, \ src, \ q8, q9, q10, q11, \ #0*32, #1*32, #2*32, #3*32, \ src, \ q12, q13, q14, q15, \ #4*32, #5*32, #6*32, #7*32 qo_butterfly_mix_rev v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v0, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7 qo_butterfly_mix_rev v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v0, v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7 qo_butterfly_mix_rev v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v0, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3 qo_butterfly_mix_rev v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7 qo_butterfly_mix_rev v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v0, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5 qo_butterfly_mix_rev v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v0, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_mix_rev v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v12, v13, v14, v15, v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_topsl \ v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, \ src, \ q16, q17, q18, q19, \ #8*32, #9*32, #10*32, #11*32, \ src, \ q16, q17, q18, q19, \ #(16+8*32), #(16+9*32), #(16+10*32), #(16+11*32) qo_montgomery_mul_insl \ v8, v9, v10, v11, v28, v29, v30, v31, v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0, \ src, \ q20, q21, q22, q23, \ #12*32, #13*32, #14*32, #15*32, \ src, \ q20, q21, q22, q23, \ #(16+12*32), #(16+13*32), #(16+14*32), #(16+15*32) qo_butterfly_botsl_mul \ v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, \ src, \ q8, q9, q10, q11, \ #0*32, #1*32, #2*32, #3*32, \ src, \ q8, q9, q10, q11, \ #(16+0*32), #(16+1*32), #(16+2*32), #(16+3*32), \ v12, v13, v14, v15, v24, v25, v26, v27, \ v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0 str q12, [src, # 4*32] ldr q12, [src, #(16+ 4*32)] str q13, [src, # 5*32] ldr q13, [src, #(16+ 5*32)] str q14, [src, # 6*32] ldr q14, [src, #(16+ 6*32)] str q15, [src, # 7*32] ldr q15, [src, #(16+ 7*32)] qo_butterfly_mix_rev v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v0, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7 qo_butterfly_mix_rev v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v0, v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7 qo_butterfly_mix_rev v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v0, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3 qo_butterfly_mix_rev v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7 qo_butterfly_mix_rev v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v0, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5 qo_butterfly_mix_rev v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v0, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_mix_rev v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v12, v13, v14, v15, v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_tops \ v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, \ src, \ q16, q17, q18, q19, \ #(16+8*32), #(16+9*32), #(16+10*32), #(16+11*32) qo_montgomery_mul_ins \ v8, v9, v10, v11, v28, v29, v30, v31, v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0, \ src, \ q20, q21, q22, q23, \ #(16+12*32), #(16+13*32), #(16+14*32), #(16+15*32) qo_montgomery_mul_ins \ v12, v13, v14, v15, v24, v25, v26, v27, v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0, \ src, \ q8, q9, q10, q11, \ #(16+0*32), #(16+1*32), #(16+2*32), #(16+3*32) str q12, [src, #(16+ 4*32)] str q13, [src, #(16+ 5*32)] str q14, [src, #(16+ 6*32)] str q15, [src, #(16+ 7*32)] .unreq Q .unreq BarrettM .unreq invN .unreq invN_f .unreq src .unreq counter pop_all ret
mktmansour/MKT-KSA-Geolocation-Security
23,900
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-1024/aarch64/__asm_base_mul.S
/* * We offer * CC0 1.0 Universal or the following MIT License for this file. * You may freely choose one of them that applies. * * MIT License * * Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang * Copyright (c) 2023: Vincent Hwang * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "macros.inc" #include "params.h" .align 2 .global PQCLEAN_MLKEM1024_AARCH64__asm_point_mul_extended .global _PQCLEAN_MLKEM1024_AARCH64__asm_point_mul_extended PQCLEAN_MLKEM1024_AARCH64__asm_point_mul_extended: _PQCLEAN_MLKEM1024_AARCH64__asm_point_mul_extended: push_all Q .req w20 des .req x0 src1 .req x1 src2ex .req x2 counter .req x19 ldrsh Q, [x3] dup v28.8H, Q ldr q0, [src1, #0*16] ldr q1, [src1, #1*16] ldr q2, [src1, #2*16] ldr q3, [src1, #3*16] ldr q4, [src1, #4*16] ldr q5, [src1, #5*16] ldr q6, [src1, #6*16] ldr q7, [src1, #7*16] add src1, src1, #8*16 uzp2 v1.8H, v0.8H, v1.8H uzp2 v3.8H, v2.8H, v3.8H uzp2 v5.8H, v4.8H, v5.8H uzp2 v7.8H, v6.8H, v7.8H ldr q8, [src2ex, #0*16] ldr q10, [src2ex, #2*16] ldr q12, [src2ex, #4*16] ldr q14, [src2ex, #6*16] ldr q9, [src2ex, #1*16] ldr q11, [src2ex, #3*16] ldr q13, [src2ex, #5*16] ldr q15, [src2ex, #7*16] add src2ex, src2ex, #8*16 ldr q16, [src1, #0*16] sqrdmulh v0.8H, v1.8H, v8.8H ldr q17, [src1, #1*16] sqrdmulh v2.8H, v3.8H, v10.8H ldr q18, [src1, #2*16] sqrdmulh v4.8H, v5.8H, v12.8H ldr q19, [src1, #3*16] sqrdmulh v6.8H, v7.8H, v14.8H ldr q20, [src1, #4*16] mul v1.8H, v1.8H, v9.8H uzp2 v17.8H, v16.8H, v17.8H ldr q21, [src1, #5*16] mul v3.8H, v3.8H, v11.8H uzp2 v19.8H, v18.8H, v19.8H ldr q22, [src1, #6*16] mul v5.8H, v5.8H, v13.8H uzp2 v21.8H, v20.8H, v21.8H ldr q23, [src1, #7*16] mul v7.8H, v7.8H, v15.8H uzp2 v23.8H, v22.8H, v23.8H add src1, src1, #8*16 ldr q8, [src2ex, #0*16] mls v1.8H, v0.8H, v28.8H ldr q10, [src2ex, #2*16] mls v3.8H, v2.8H, v28.8H ldr q12, [src2ex, #4*16] mls v5.8H, v4.8H, v28.8H ldr q14, [src2ex, #6*16] mls v7.8H, v6.8H, v28.8H ldr q9, [src2ex, #1*16] str q1, [des, #0*16] ldr q11, [src2ex, #3*16] str q3, [des, #1*16] ldr q13, [src2ex, #5*16] str q5, [des, #2*16] ldr q15, [src2ex, #7*16] str q7, [des, #3*16] add des, des, #4*16 add src2ex, src2ex, #8*16 ldr q0, [src1, #0*16] sqrdmulh v16.8H, v17.8H, v8.8H ldr q1, [src1, #1*16] sqrdmulh v18.8H, v19.8H, v10.8H ldr q2, [src1, #2*16] sqrdmulh v20.8H, v21.8H, v12.8H ldr q3, [src1, #3*16] sqrdmulh v22.8H, v23.8H, v14.8H ldr q4, [src1, #4*16] mul v17.8H, v17.8H, v9.8H uzp2 v1.8H, v0.8H, v1.8H ldr q5, [src1, #5*16] mul v19.8H, v19.8H, v11.8H uzp2 v3.8H, v2.8H, v3.8H ldr q6, [src1, #6*16] mul v21.8H, v21.8H, v13.8H uzp2 v5.8H, v4.8H, v5.8H ldr q7, [src1, #7*16] mul v23.8H, v23.8H, v15.8H uzp2 v7.8H, v6.8H, v7.8H add src1, src1, #8*16 ldr q8, [src2ex, #0*16] mls v17.8H, v16.8H, v28.8H ldr q10, [src2ex, #2*16] mls v19.8H, v18.8H, v28.8H ldr q12, [src2ex, #4*16] mls v21.8H, v20.8H, v28.8H ldr q14, [src2ex, #6*16] mls v23.8H, v22.8H, v28.8H ldr q9, [src2ex, #1*16] str q17, [des, #0*16] ldr q11, [src2ex, #3*16] str q19, [des, #1*16] ldr q13, [src2ex, #5*16] str q21, [des, #2*16] ldr q15, [src2ex, #7*16] str q23, [des, #3*16] add des, des, #4*16 add src2ex, src2ex, #8*16 ldr q16, [src1, #0*16] sqrdmulh v0.8H, v1.8H, v8.8H ldr q17, [src1, #1*16] sqrdmulh v2.8H, v3.8H, v10.8H ldr q18, [src1, #2*16] sqrdmulh v4.8H, v5.8H, v12.8H ldr q19, [src1, #3*16] sqrdmulh v6.8H, v7.8H, v14.8H ldr q20, [src1, #4*16] mul v1.8H, v1.8H, v9.8H uzp2 v17.8H, v16.8H, v17.8H ldr q21, [src1, #5*16] mul v3.8H, v3.8H, v11.8H uzp2 v19.8H, v18.8H, v19.8H ldr q22, [src1, #6*16] mul v5.8H, v5.8H, v13.8H uzp2 v21.8H, v20.8H, v21.8H ldr q23, [src1, #7*16] mul v7.8H, v7.8H, v15.8H uzp2 v23.8H, v22.8H, v23.8H add src1, src1, #8*16 ldr q8, [src2ex, #0*16] mls v1.8H, v0.8H, v28.8H ldr q10, [src2ex, #2*16] mls v3.8H, v2.8H, v28.8H ldr q12, [src2ex, #4*16] mls v5.8H, v4.8H, v28.8H ldr q14, [src2ex, #6*16] mls v7.8H, v6.8H, v28.8H ldr q9, [src2ex, #1*16] str q1, [des, #0*16] ldr q11, [src2ex, #3*16] str q3, [des, #1*16] ldr q13, [src2ex, #5*16] str q5, [des, #2*16] ldr q15, [src2ex, #7*16] str q7, [des, #3*16] add des, des, #4*16 add src2ex, src2ex, #8*16 sqrdmulh v16.8H, v17.8H, v8.8H sqrdmulh v18.8H, v19.8H, v10.8H sqrdmulh v20.8H, v21.8H, v12.8H sqrdmulh v22.8H, v23.8H, v14.8H mul v17.8H, v17.8H, v9.8H mul v19.8H, v19.8H, v11.8H mul v21.8H, v21.8H, v13.8H mul v23.8H, v23.8H, v15.8H mls v17.8H, v16.8H, v28.8H mls v19.8H, v18.8H, v28.8H mls v21.8H, v20.8H, v28.8H mls v23.8H, v22.8H, v28.8H str q17, [des, #0*16] str q19, [des, #1*16] str q21, [des, #2*16] str q23, [des, #3*16] add des, des, #4*16 .unreq Q .unreq des .unreq src1 .unreq src2ex .unreq counter pop_all ret .align 2 .global PQCLEAN_MLKEM1024_AARCH64__asm_asymmetric_mul .global _PQCLEAN_MLKEM1024_AARCH64__asm_asymmetric_mul PQCLEAN_MLKEM1024_AARCH64__asm_asymmetric_mul: _PQCLEAN_MLKEM1024_AARCH64__asm_asymmetric_mul: push_all des .req x11 src1_0 .req x0 src2_0 .req x1 src2asy_0 .req x2 src1_1 .req x4 src2_1 .req x5 src2asy_1 .req x6 src1_2 .req x8 src2_2 .req x9 src2asy_2 .req x10 src1_3 .req x12 src2_3 .req x13 src2asy_3 .req x14 counter .req x19 ldr s4, [x3] add des, x4, #0 add src1_1, src1_0, #512*1 add src2_1, src2_0, #512*1 add src2asy_1, src2asy_0, #256*1 #if KYBER_K > 2 add src1_2, src1_0, #512*2 add src2_2, src2_0, #512*2 add src2asy_2, src2asy_0, #256*2 #endif #if KYBER_K > 3 add src1_3, src1_0, #512*3 add src2_3, src2_0, #512*3 add src2asy_3, src2asy_0, #256*3 #endif ldr q20, [src1_0, #0*16] ldr q21, [src1_0, #1*16] ldr q22, [src2_0, #0*16] ldr q23, [src2_0, #1*16] add src1_0, src1_0, #32 add src2_0, src2_0, #32 uzp1 v0.8H, v20.8H, v21.8H uzp2 v1.8H, v20.8H, v21.8H uzp1 v2.8H, v22.8H, v23.8H uzp2 v3.8H, v22.8H, v23.8H ld1 {v28.8H}, [src2asy_0], #16 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif // TODO:interleaving mov counter, #15 _asymmetric_mul_loop: ldr q20, [src1_0, #0*16] uzp1 v6.8H, v16.8H, v18.8H ldr q21, [src1_0, #1*16] uzp1 v7.8H, v17.8H, v19.8H ldr q22, [src2_0, #0*16] mul v6.8H, v6.8H, v4.H[1] ldr q23, [src2_0, #1*16] mul v7.8H, v7.8H, v4.H[1] add src1_0, src1_0, #32 add src2_0, src2_0, #32 smlal v16.4S, v6.4H, v4.H[0] uzp1 v0.8H, v20.8H, v21.8H smlal2 v18.4S, v6.8H, v4.H[0] uzp2 v1.8H, v20.8H, v21.8H smlal v17.4S, v7.4H, v4.H[0] uzp1 v2.8H, v22.8H, v23.8H smlal2 v19.4S, v7.8H, v4.H[0] uzp2 v3.8H, v22.8H, v23.8H ld1 {v28.8H}, [src2asy_0], #16 uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H st2 { v6.8H, v7.8H}, [des], #32 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif sub counter, counter, #1 cbnz counter, _asymmetric_mul_loop uzp1 v6.8H, v16.8H, v18.8H uzp1 v7.8H, v17.8H, v19.8H mul v6.8H, v6.8H, v4.H[1] mul v7.8H, v7.8H, v4.H[1] smlal v16.4S, v6.4H, v4.H[0] smlal2 v18.4S, v6.8H, v4.H[0] smlal v17.4S, v7.4H, v4.H[0] smlal2 v19.4S, v7.8H, v4.H[0] uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H st2 { v6.8H, v7.8H}, [des], #32 .unreq des .unreq src1_0 .unreq src2_0 .unreq src2asy_0 .unreq src1_1 .unreq src2_1 .unreq src2asy_1 .unreq src1_2 .unreq src2_2 .unreq src2asy_2 .unreq src1_3 .unreq src2_3 .unreq src2asy_3 .unreq counter pop_all ret .align 2 .global PQCLEAN_MLKEM1024_AARCH64__asm_asymmetric_mul_montgomery .global _PQCLEAN_MLKEM1024_AARCH64__asm_asymmetric_mul_montgomery PQCLEAN_MLKEM1024_AARCH64__asm_asymmetric_mul_montgomery: _PQCLEAN_MLKEM1024_AARCH64__asm_asymmetric_mul_montgomery: push_all des .req x11 src1_0 .req x0 src2_0 .req x1 src2asy_0 .req x2 src1_1 .req x4 src2_1 .req x5 src2asy_1 .req x6 src1_2 .req x8 src2_2 .req x9 src2asy_2 .req x10 src1_3 .req x12 src2_3 .req x13 src2asy_3 .req x14 counter .req x19 ldr q4, [x3] add des, x4, #0 add src1_1, src1_0, #512*1 add src2_1, src2_0, #512*1 add src2asy_1, src2asy_0, #256*1 #if KYBER_K > 2 add src1_2, src1_0, #512*2 add src2_2, src2_0, #512*2 add src2asy_2, src2asy_0, #256*2 #endif #if KYBER_K > 3 add src1_3, src1_0, #512*3 add src2_3, src2_0, #512*3 add src2asy_3, src2asy_0, #256*3 #endif ldr q20, [src1_0, #0*16] ldr q21, [src1_0, #1*16] ldr q22, [src2_0, #0*16] ldr q23, [src2_0, #1*16] add src1_0, src1_0, #32 add src2_0, src2_0, #32 uzp1 v0.8H, v20.8H, v21.8H uzp2 v1.8H, v20.8H, v21.8H uzp1 v2.8H, v22.8H, v23.8H uzp2 v3.8H, v22.8H, v23.8H ld1 {v28.8H}, [src2asy_0], #16 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif mov counter, #15 _asymmetric_mul_montgomery_loop: uzp1 v6.8H, v16.8H, v18.8H uzp1 v7.8H, v17.8H, v19.8H mul v6.8H, v6.8H, v4.H[1] mul v7.8H, v7.8H, v4.H[1] ldr q20, [src1_0, #0*16] smlal v16.4S, v6.4H, v4.H[0] ldr q21, [src1_0, #1*16] smlal2 v18.4S, v6.8H, v4.H[0] ldr q22, [src2_0, #0*16] smlal v17.4S, v7.4H, v4.H[0] ldr q23, [src2_0, #1*16] smlal2 v19.4S, v7.8H, v4.H[0] add src1_0, src1_0, #32 add src2_0, src2_0, #32 uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H uzp1 v0.8H, v20.8H, v21.8H sqrdmulh v16.8H, v6.8H, v4.H[4] uzp2 v1.8H, v20.8H, v21.8H sqrdmulh v17.8H, v7.8H, v4.H[4] uzp1 v2.8H, v22.8H, v23.8H mul v6.8H, v6.8H, v4.H[5] uzp2 v3.8H, v22.8H, v23.8H mul v7.8H, v7.8H, v4.H[5] mls v6.8H, v16.8H, v4.H[0] mls v7.8H, v17.8H, v4.H[0] st2 { v6.8H, v7.8H}, [des], #32 ld1 {v28.8H}, [src2asy_0], #16 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif sub counter, counter, #1 cbnz counter, _asymmetric_mul_montgomery_loop uzp1 v6.8H, v16.8H, v18.8H uzp1 v7.8H, v17.8H, v19.8H mul v6.8H, v6.8H, v4.H[1] mul v7.8H, v7.8H, v4.H[1] smlal v16.4S, v6.4H, v4.H[0] smlal2 v18.4S, v6.8H, v4.H[0] smlal v17.4S, v7.4H, v4.H[0] smlal2 v19.4S, v7.8H, v4.H[0] uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H sqrdmulh v16.8H, v6.8H, v4.H[4] sqrdmulh v17.8H, v7.8H, v4.H[4] mul v6.8H, v6.8H, v4.H[5] mul v7.8H, v7.8H, v4.H[5] mls v6.8H, v16.8H, v4.H[0] mls v7.8H, v17.8H, v4.H[0] st2 { v6.8H, v7.8H}, [des], #32 .unreq des .unreq src1_0 .unreq src2_0 .unreq src2asy_0 .unreq src1_1 .unreq src2_1 .unreq src2asy_1 .unreq src1_2 .unreq src2_2 .unreq src2asy_2 .unreq src1_3 .unreq src2_3 .unreq src2asy_3 .unreq counter pop_all ret
mktmansour/MKT-KSA-Geolocation-Security
12,806
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-1024/aarch64/__asm_NTT.S
/* * We offer * CC0 1.0 Universal or the following MIT License for this file. * You may freely choose one of them that applies. * * MIT License * * Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang * Copyright (c) 2023: Vincent Hwang * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "macros.inc" .align 2 .global PQCLEAN_MLKEM1024_AARCH64__asm_ntt_SIMD_top .global _PQCLEAN_MLKEM1024_AARCH64__asm_ntt_SIMD_top PQCLEAN_MLKEM1024_AARCH64__asm_ntt_SIMD_top: _PQCLEAN_MLKEM1024_AARCH64__asm_ntt_SIMD_top: push_simd Q .req w8 src .req x0 table .req x1 counter .req x11 ldrsh Q, [x2, #0] ldr q0, [table, # 0*16] ldr q1, [table, # 1*16] ldr q2, [table, # 2*16] ldr q3, [table, # 3*16] mov v0.H[0], Q ldr q13, [src, # 9*32] ldr q15, [src, #11*32] ldr q17, [src, #13*32] ldr q19, [src, #15*32] qo_butterfly_topl \ v13, v15, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q5, q7, q9, q11, \ #1*32, #3*32, #5*32, #7*32 qo_butterfly_mixll \ v5, v7, v9, v11, v13, v15, v17, v19, v28, v29, v30, v31, \ v12, v14, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q12, q14, q16, q18, \ #8*32, #10*32, #12*32, #14*32, \ src, \ q4, q6, q8, q10, \ #0*32, #2*32, #4*32, #6*32 qo_butterfly_mix \ v4, v6, v8, v10, v12, v14, v16, v18, v20, v21, v22, v23, \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7 qo_butterfly_mixsls \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v13, v15, v17, v19, v20, v21, v22, v23, \ v0, \ v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, \ src, \ q5, q7, q9, q11, \ #1*32, #3*32, #5*32, #7*32, \ src, \ q5, q7, q9, q11, \ #(16+1*32), #(16+3*32), #(16+5*32), #(16+7*32), \ src, \ q4, q6, q8, q10, \ #0*32, #2*32, #4*32, #6*32 qo_butterfly_botsls \ v12, v14, v16, v18, v13, v15, v17, v19, v20, v21, v22, v23, \ src, \ q13, q15, q17, q19, \ #9*32, #11*32, #13*32, #15*32, \ src, \ q13, q15, q17, q19, \ #(16+9*32), #(16+11*32), #(16+13*32), #(16+15*32), \ src, \ q12, q14, q16, q18, \ #8*32, #10*32, #12*32, #14*32 qo_butterfly_topl \ v13, v15, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q12, q14, q16, q18, \ #(16+8*32), #(16+10*32), #(16+12*32), #(16+14*32) qo_butterfly_mixl \ v5, v7, v9, v11, v13, v15, v17, v19, v28, v29, v30, v31, \ v12, v14, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q4, q6, q8, q10, \ #(16+0*32), #(16+2*32), #(16+4*32), #(16+6*32) qo_butterfly_mix \ v4, v6, v8, v10, v12, v14, v16, v18, v20, v21, v22, v23, \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7 qo_butterfly_mixss \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v13, v15, v17, v19, v20, v21, v22, v23, \ v0, \ v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, \ src, \ q5, q7, q9, q11, \ #(16+1*32), #(16+3*32), #(16+5*32), #(16+7*32), \ src, \ q4, q6, q8, q10, \ #(16+0*32), #(16+2*32), #(16+4*32), #(16+6*32) qo_butterfly_botss \ v12, v14, v16, v18, v13, v15, v17, v19, v20, v21, v22, v23, \ src, \ q13, q15, q17, q19, \ #(16+9*32), #(16+11*32), #(16+13*32), #(16+15*32), \ src, \ q12, q14, q16, q18, \ #(16+8*32), #(16+10*32), #(16+12*32), #(16+14*32) .unreq Q .unreq src .unreq table .unreq counter pop_simd ret .align 2 .global PQCLEAN_MLKEM1024_AARCH64__asm_ntt_SIMD_bot .global _PQCLEAN_MLKEM1024_AARCH64__asm_ntt_SIMD_bot PQCLEAN_MLKEM1024_AARCH64__asm_ntt_SIMD_bot: _PQCLEAN_MLKEM1024_AARCH64__asm_ntt_SIMD_bot: push_simd Q .req w8 BarrettM .req w9 src0 .req x0 src1 .req x1 table .req x10 counter .req x11 ldrsh Q, [x2, #0] ldrsh BarrettM, [x2, #8] add table, x1, #64 add src0, x0, #256*0 add src1, x0, #256*1 mov v0.H[0], Q mov v0.H[1], BarrettM ldr q28, [src0, # 1*16] ldr q29, [src1, # 1*16] ldr q30, [src0, # 3*16] ldr q31, [src1, # 3*16] trn_4x4_l3 v28, v29, v30, v31, v20, v21, v22, v23, table, q1, q2, q3, #1*16, #2*16, #3*16 do_butterfly_vec_top_2ltrn_4x4 \ v29, v31, v18, v19, v0, v2, v3, v2, v3, \ src0, src1, \ q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16, \ v24, v25, v26, v27, v20, v21, v22, v23 do_butterfly_vec_mixl \ v25, v27, v29, v31, v18, v19, \ v28, v30, v16, v17, \ v0, \ v2, v3, v2, v3, \ table, \ q4, q5, q6, q7, #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mixl \ v24, v26, v28, v30, v16, v17, \ v27, v31, v18, v19, \ v0, \ v4, v5, v6, v7, \ table, \ q8, q9, q10, q11, #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mixl \ v25, v29, v27, v31, v18, v19, \ v26, v30, v16, v17, \ v0, \ v4, v5, v6, v7, \ table, \ q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 add table, table, #256 do_butterfly_vec_mix v24, v28, v26, v30, v16, v17, v24, v26, v25, v27, v18, v19, v0, v4, v5, v6, v7, v8, v9, v10, v11 do_butterfly_vec_mix v24, v26, v25, v27, v18, v19, v28, v30, v29, v31, v16, v17, v0, v8, v9, v10, v11, v12, v13, v14, v15 do_butterfly_vec_bot_oo_barrett_trn_4x4 \ v28, v30, v29, v31, v16, v17, \ v24, v25, v26, v27, v20, v21, v22, v23, v28, v29, v30, v31, v16, v17, v18, v19, v0, #11, v0 trn_4x4_2s4 v28, v29, v30, v31, v16, v17, v18, v19, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 mov counter, #3 _ntt_bot_loop: str q28, [src0, # 1*16] ldr q28, [src0, #(64+1*16)] str q29, [src1, # 1*16] ldr q29, [src1, #(64+1*16)] str q30, [src0, # 3*16] ldr q30, [src0, #(64+3*16)] str q31, [src1, # 3*16] ldr q31, [src1, #(64+3*16)] add src0, src0, #64 add src1, src1, #64 trn_4x4_l3 v28, v29, v30, v31, v20, v21, v22, v23, table, q1, q2, q3, #1*16, #2*16, #3*16 do_butterfly_vec_top_2ltrn_4x4 \ v29, v31, v18, v19, v0, v2, v3, v2, v3, \ src0, src1, \ q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16, \ v24, v25, v26, v27, v20, v21, v22, v23 do_butterfly_vec_mixl \ v25, v27, v29, v31, v18, v19, \ v28, v30, v16, v17, \ v0, \ v2, v3, v2, v3, \ table, \ q4, q5, q6, q7, #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mixl \ v24, v26, v28, v30, v16, v17, \ v27, v31, v18, v19, \ v0, \ v4, v5, v6, v7, \ table, \ q8, q9, q10, q11, #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mixl \ v25, v29, v27, v31, v18, v19, \ v26, v30, v16, v17, \ v0, \ v4, v5, v6, v7, \ table, \ q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 add table, table, #256 do_butterfly_vec_mix v24, v28, v26, v30, v16, v17, v24, v26, v25, v27, v18, v19, v0, v4, v5, v6, v7, v8, v9, v10, v11 do_butterfly_vec_mix v24, v26, v25, v27, v18, v19, v28, v30, v29, v31, v16, v17, v0, v8, v9, v10, v11, v12, v13, v14, v15 do_butterfly_vec_bot_oo_barrett_trn_4x4 \ v28, v30, v29, v31, v16, v17, \ v24, v25, v26, v27, v20, v21, v22, v23, v28, v29, v30, v31, v16, v17, v18, v19, v0, #11, v0 trn_4x4_2s4 v28, v29, v30, v31, v16, v17, v18, v19, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 sub counter, counter, #1 cbnz counter, _ntt_bot_loop str q28, [src0, # 1*16] str q29, [src1, # 1*16] str q30, [src0, # 3*16] str q31, [src1, # 3*16] add src0, src0, #64 add src1, src1, #64 .unreq Q .unreq BarrettM .unreq src0 .unreq src1 .unreq table .unreq counter pop_simd ret
mktmansour/MKT-KSA-Geolocation-Security
7,475
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-1024/aarch64/__asm_poly.S
/* * We offer * CC0 1.0 Universal or the following MIT License for this file. * You may freely choose one of them that applies. * * MIT License * * Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang * Copyright (c) 2023: Vincent Hwang * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "macros.inc" .align 2 .global PQCLEAN_MLKEM1024_AARCH64__asm_add_reduce .global _PQCLEAN_MLKEM1024_AARCH64__asm_add_reduce PQCLEAN_MLKEM1024_AARCH64__asm_add_reduce: _PQCLEAN_MLKEM1024_AARCH64__asm_add_reduce: mov w4, #3329 mov w5, #25519 add x2, x0, #0 dup v0.8H, w4 dup v1.8H, w5 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64 add v4.8H, v16.8H, v24.8H add v5.8H, v17.8H, v25.8H add v6.8H, v18.8H, v26.8H add v7.8H, v19.8H, v27.8H add v16.8H, v20.8H, v28.8H add v17.8H, v21.8H, v29.8H add v18.8H, v22.8H, v30.8H add v19.8H, v23.8H, v31.8H oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0 mov x15, #3 _add_reduce_loop: st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64 ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64 add v4.8H, v16.8H, v24.8H add v5.8H, v17.8H, v25.8H add v6.8H, v18.8H, v26.8H add v7.8H, v19.8H, v27.8H add v16.8H, v20.8H, v28.8H add v17.8H, v21.8H, v29.8H add v18.8H, v22.8H, v30.8H add v19.8H, v23.8H, v31.8H oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0 sub x15, x15, #1 cbnz x15, _add_reduce_loop st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64 ret .align 2 .global PQCLEAN_MLKEM1024_AARCH64__asm_sub_reduce .global _PQCLEAN_MLKEM1024_AARCH64__asm_sub_reduce PQCLEAN_MLKEM1024_AARCH64__asm_sub_reduce: _PQCLEAN_MLKEM1024_AARCH64__asm_sub_reduce: mov w4, #3329 mov w5, #25519 add x2, x0, #0 dup v0.8H, w4 dup v1.8H, w5 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64 sub v4.8H, v16.8H, v24.8H sub v5.8H, v17.8H, v25.8H sub v6.8H, v18.8H, v26.8H sub v7.8H, v19.8H, v27.8H sub v16.8H, v20.8H, v28.8H sub v17.8H, v21.8H, v29.8H sub v18.8H, v22.8H, v30.8H sub v19.8H, v23.8H, v31.8H oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0 mov x15, #3 _sub_reduce_loop: st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64 ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64 sub v4.8H, v16.8H, v24.8H sub v5.8H, v17.8H, v25.8H sub v6.8H, v18.8H, v26.8H sub v7.8H, v19.8H, v27.8H sub v16.8H, v20.8H, v28.8H sub v17.8H, v21.8H, v29.8H sub v18.8H, v22.8H, v30.8H sub v19.8H, v23.8H, v31.8H oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0 sub x15, x15, #1 cbnz x15, _sub_reduce_loop st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64 ret .align 2 .global PQCLEAN_MLKEM1024_AARCH64__asm_add_add_reduce .global _PQCLEAN_MLKEM1024_AARCH64__asm_add_add_reduce PQCLEAN_MLKEM1024_AARCH64__asm_add_add_reduce: _PQCLEAN_MLKEM1024_AARCH64__asm_add_add_reduce: mov w4, #3329 mov w5, #25519 add x3, x0, #0 dup v0.8H, w4 dup v1.8H, w5 ld1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x3], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x3], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x1], #64 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 add v4.8H, v4.8H, v16.8H add v5.8H, v5.8H, v17.8H ld1 {v16.8H, v17.8H}, [x2], #32 add v6.8H, v6.8H, v18.8H add v7.8H, v7.8H, v19.8H ld1 {v18.8H, v19.8H}, [x2], #32 add v20.8H, v20.8H, v24.8H add v21.8H, v21.8H, v25.8H ld1 {v24.8H, v25.8H}, [x2], #32 add v22.8H, v22.8H, v26.8H add v23.8H, v23.8H, v27.8H ld1 {v26.8H, v27.8H}, [x2], #32 add v4.8H, v4.8H, v16.8H add v5.8H, v5.8H, v17.8H add v6.8H, v6.8H, v18.8H add v7.8H, v7.8H, v19.8H add v20.8H, v20.8H, v24.8H add v21.8H, v21.8H, v25.8H add v22.8H, v22.8H, v26.8H add v23.8H, v23.8H, v27.8H oo_barrett v4, v5, v6, v7, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v1, #11, v0 mov x15, #3 _add_add_reduce_loop: st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 ld1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x3], #64 st1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x0], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x3], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x1], #64 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 add v4.8H, v4.8H, v16.8H add v5.8H, v5.8H, v17.8H ld1 {v16.8H, v17.8H}, [x2], #32 add v6.8H, v6.8H, v18.8H add v7.8H, v7.8H, v19.8H ld1 {v18.8H, v19.8H}, [x2], #32 add v20.8H, v20.8H, v24.8H add v21.8H, v21.8H, v25.8H ld1 {v24.8H, v25.8H}, [x2], #32 add v22.8H, v22.8H, v26.8H add v23.8H, v23.8H, v27.8H ld1 {v26.8H, v27.8H}, [x2], #32 add v4.8H, v4.8H, v16.8H add v5.8H, v5.8H, v17.8H add v6.8H, v6.8H, v18.8H add v7.8H, v7.8H, v19.8H add v20.8H, v20.8H, v24.8H add v21.8H, v21.8H, v25.8H add v22.8H, v22.8H, v26.8H add v23.8H, v23.8H, v27.8H oo_barrett v4, v5, v6, v7, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v1, #11, v0 sub x15, x15, #1 cbnz x15, _add_add_reduce_loop st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 st1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x0], #64 ret
mktmansour/MKT-KSA-Geolocation-Security
29,012
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864/avx2/vec_mul_asm.S
#include "namespace.h" #define vec_mul_asm CRYPTO_NAMESPACE(vec_mul_asm) #define _vec_mul_asm _CRYPTO_NAMESPACE(vec_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 s0 # qhasm: reg256 s1 # qhasm: reg256 s2 # qhasm: reg256 s3 # qhasm: reg256 s4 # qhasm: reg256 s5 # qhasm: reg256 t0 # qhasm: reg256 t1 # qhasm: reg256 t2 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r # qhasm: int64 h0 # qhasm: int64 h1 # qhasm: int64 h2 # qhasm: int64 h3 # qhasm: int64 h4 # qhasm: int64 h5 # qhasm: int64 h6 # qhasm: int64 h7 # qhasm: int64 h8 # qhasm: int64 h9 # qhasm: int64 h10 # qhasm: int64 h11 # qhasm: int64 h12 # qhasm: int64 h13 # qhasm: int64 h14 # qhasm: int64 h15 # qhasm: int64 h16 # qhasm: int64 h17 # qhasm: int64 h18 # qhasm: int64 h19 # qhasm: int64 h20 # qhasm: int64 h21 # qhasm: int64 h22 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: stack64 r11_stack # qhasm: stack64 r12_stack # qhasm: stack64 r13_stack # qhasm: stack64 r14_stack # qhasm: stack64 r15_stack # qhasm: stack64 rbx_stack # qhasm: stack64 rbp_stack # qhasm: enter vec_mul_asm .p2align 5 .global _vec_mul_asm .global vec_mul_asm _vec_mul_asm: vec_mul_asm: mov % rsp, % r11 and $31, % r11 add $672, % r11 sub % r11, % rsp # qhasm: r11_stack = caller_r11 # asm 1: movq <caller_r11=int64#9,>r11_stack=stack64#1 # asm 2: movq <caller_r11=%r11,>r11_stack=608(%rsp) movq % r11, 608( % rsp) # qhasm: r12_stack = caller_r12 # asm 1: movq <caller_r12=int64#10,>r12_stack=stack64#2 # asm 2: movq <caller_r12=%r12,>r12_stack=616(%rsp) movq % r12, 616( % rsp) # qhasm: r13_stack = caller_r13 # asm 1: movq <caller_r13=int64#11,>r13_stack=stack64#3 # asm 2: movq <caller_r13=%r13,>r13_stack=624(%rsp) movq % r13, 624( % rsp) # qhasm: r14_stack = caller_r14 # asm 1: movq <caller_r14=int64#12,>r14_stack=stack64#4 # asm 2: movq <caller_r14=%r14,>r14_stack=632(%rsp) movq % r14, 632( % rsp) # qhasm: r15_stack = caller_r15 # asm 1: movq <caller_r15=int64#13,>r15_stack=stack64#5 # asm 2: movq <caller_r15=%r15,>r15_stack=640(%rsp) movq % r15, 640( % rsp) # qhasm: rbx_stack = caller_rbx # asm 1: movq <caller_rbx=int64#14,>rbx_stack=stack64#6 # asm 2: movq <caller_rbx=%rbx,>rbx_stack=648(%rsp) movq % rbx, 648( % rsp) # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>ptr=%rcx leaq 0( % rsp), % rcx # qhasm: s0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>s0=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>s0=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: s1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>s1=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>s1=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: s2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>s2=reg256#3 # asm 2: vmovupd 64(<input_1=%rsi),>s2=%ymm2 vmovupd 64( % rsi), % ymm2 # qhasm: t0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>t0=reg256#4 # asm 2: vmovupd 0(<input_2=%rdx),>t0=%ymm3 vmovupd 0( % rdx), % ymm3 # qhasm: t1 = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>t1=reg256#5 # asm 2: vmovupd 32(<input_2=%rdx),>t1=%ymm4 vmovupd 32( % rdx), % ymm4 # qhasm: t2 = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>t2=reg256#6 # asm 2: vmovupd 64(<input_2=%rdx),>t2=%ymm5 vmovupd 64( % rdx), % ymm5 # qhasm: a5[0,1,2,3] = s2[2,2,3,3] # asm 1: vpermq $0xfa,<s2=reg256#3,>a5=reg256#7 # asm 2: vpermq $0xfa,<s2=%ymm2,>a5=%ymm6 vpermq $0xfa, % ymm2, % ymm6 # qhasm: b5[0,1,2,3] = t2[2,3,2,3] # asm 1: vpermq $0xee,<t2=reg256#6,>b5=reg256#8 # asm 2: vpermq $0xee,<t2=%ymm5,>b5=%ymm7 vpermq $0xee, % ymm5, % ymm7 # qhasm: r10 = a5 & b5 # asm 1: vpand <a5=reg256#7,<b5=reg256#8,>r10=reg256#9 # asm 2: vpand <a5=%ymm6,<b5=%ymm7,>r10=%ymm8 vpand % ymm6, % ymm7, % ymm8 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#9,320(<ptr=int64#4) # asm 2: vmovupd <r10=%ymm8,320(<ptr=%rcx) vmovupd % ymm8, 320( % rcx) # qhasm: b4[0,1,2,3] = t2[0,1,0,1] # asm 1: vpermq $0x44,<t2=reg256#6,>b4=reg256#6 # asm 2: vpermq $0x44,<t2=%ymm5,>b4=%ymm5 vpermq $0x44, % ymm5, % ymm5 # qhasm: r9 = a5 & b4 # asm 1: vpand <a5=reg256#7,<b4=reg256#6,>r9=reg256#9 # asm 2: vpand <a5=%ymm6,<b4=%ymm5,>r9=%ymm8 vpand % ymm6, % ymm5, % ymm8 # qhasm: b3[0,1,2,3] = t1[2,3,2,3] # asm 1: vpermq $0xee,<t1=reg256#5,>b3=reg256#10 # asm 2: vpermq $0xee,<t1=%ymm4,>b3=%ymm9 vpermq $0xee, % ymm4, % ymm9 # qhasm: r8 = a5 & b3 # asm 1: vpand <a5=reg256#7,<b3=reg256#10,>r8=reg256#11 # asm 2: vpand <a5=%ymm6,<b3=%ymm9,>r8=%ymm10 vpand % ymm6, % ymm9, % ymm10 # qhasm: b2[0,1,2,3] = t1[0,1,0,1] # asm 1: vpermq $0x44,<t1=reg256#5,>b2=reg256#5 # asm 2: vpermq $0x44,<t1=%ymm4,>b2=%ymm4 vpermq $0x44, % ymm4, % ymm4 # qhasm: r7 = a5 & b2 # asm 1: vpand <a5=reg256#7,<b2=reg256#5,>r7=reg256#12 # asm 2: vpand <a5=%ymm6,<b2=%ymm4,>r7=%ymm11 vpand % ymm6, % ymm4, % ymm11 # qhasm: b1[0,1,2,3] = t0[2,3,2,3] # asm 1: vpermq $0xee,<t0=reg256#4,>b1=reg256#13 # asm 2: vpermq $0xee,<t0=%ymm3,>b1=%ymm12 vpermq $0xee, % ymm3, % ymm12 # qhasm: r6 = a5 & b1 # asm 1: vpand <a5=reg256#7,<b1=reg256#13,>r6=reg256#14 # asm 2: vpand <a5=%ymm6,<b1=%ymm12,>r6=%ymm13 vpand % ymm6, % ymm12, % ymm13 # qhasm: b0[0,1,2,3] = t0[0,1,0,1] # asm 1: vpermq $0x44,<t0=reg256#4,>b0=reg256#4 # asm 2: vpermq $0x44,<t0=%ymm3,>b0=%ymm3 vpermq $0x44, % ymm3, % ymm3 # qhasm: r5 = a5 & b0 # asm 1: vpand <a5=reg256#7,<b0=reg256#4,>r5=reg256#7 # asm 2: vpand <a5=%ymm6,<b0=%ymm3,>r5=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: a4[0,1,2,3] = s2[0,0,1,1] # asm 1: vpermq $0x50,<s2=reg256#3,>a4=reg256#3 # asm 2: vpermq $0x50,<s2=%ymm2,>a4=%ymm2 vpermq $0x50, % ymm2, % ymm2 # qhasm: r = a4 & b5 # asm 1: vpand <a4=reg256#3,<b5=reg256#8,>r=reg256#15 # asm 2: vpand <a4=%ymm2,<b5=%ymm7,>r=%ymm14 vpand % ymm2, % ymm7, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#9,<r9=reg256#9 # asm 2: vpxor <r=%ymm14,<r9=%ymm8,<r9=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#9,288(<ptr=int64#4) # asm 2: vmovupd <r9=%ymm8,288(<ptr=%rcx) vmovupd % ymm8, 288( % rcx) # qhasm: r = a4 & b4 # asm 1: vpand <a4=reg256#3,<b4=reg256#6,>r=reg256#9 # asm 2: vpand <a4=%ymm2,<b4=%ymm5,>r=%ymm8 vpand % ymm2, % ymm5, % ymm8 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#9,<r8=reg256#11,<r8=reg256#11 # asm 2: vpxor <r=%ymm8,<r8=%ymm10,<r8=%ymm10 vpxor % ymm8, % ymm10, % ymm10 # qhasm: r = a4 & b3 # asm 1: vpand <a4=reg256#3,<b3=reg256#10,>r=reg256#9 # asm 2: vpand <a4=%ymm2,<b3=%ymm9,>r=%ymm8 vpand % ymm2, % ymm9, % ymm8 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#9,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm8,<r7=%ymm11,<r7=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r = a4 & b2 # asm 1: vpand <a4=reg256#3,<b2=reg256#5,>r=reg256#9 # asm 2: vpand <a4=%ymm2,<b2=%ymm4,>r=%ymm8 vpand % ymm2, % ymm4, % ymm8 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#9,<r6=reg256#14,<r6=reg256#14 # asm 2: vpxor <r=%ymm8,<r6=%ymm13,<r6=%ymm13 vpxor % ymm8, % ymm13, % ymm13 # qhasm: r = a4 & b1 # asm 1: vpand <a4=reg256#3,<b1=reg256#13,>r=reg256#9 # asm 2: vpand <a4=%ymm2,<b1=%ymm12,>r=%ymm8 vpand % ymm2, % ymm12, % ymm8 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#9,<r5=reg256#7,<r5=reg256#7 # asm 2: vpxor <r=%ymm8,<r5=%ymm6,<r5=%ymm6 vpxor % ymm8, % ymm6, % ymm6 # qhasm: r4 = a4 & b0 # asm 1: vpand <a4=reg256#3,<b0=reg256#4,>r4=reg256#3 # asm 2: vpand <a4=%ymm2,<b0=%ymm3,>r4=%ymm2 vpand % ymm2, % ymm3, % ymm2 # qhasm: a3[0,1,2,3] = s1[2,2,3,3] # asm 1: vpermq $0xfa,<s1=reg256#2,>a3=reg256#9 # asm 2: vpermq $0xfa,<s1=%ymm1,>a3=%ymm8 vpermq $0xfa, % ymm1, % ymm8 # qhasm: r = a3 & b5 # asm 1: vpand <a3=reg256#9,<b5=reg256#8,>r=reg256#15 # asm 2: vpand <a3=%ymm8,<b5=%ymm7,>r=%ymm14 vpand % ymm8, % ymm7, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#11,<r8=reg256#11 # asm 2: vpxor <r=%ymm14,<r8=%ymm10,<r8=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#11,256(<ptr=int64#4) # asm 2: vmovupd <r8=%ymm10,256(<ptr=%rcx) vmovupd % ymm10, 256( % rcx) # qhasm: r = a3 & b4 # asm 1: vpand <a3=reg256#9,<b4=reg256#6,>r=reg256#11 # asm 2: vpand <a3=%ymm8,<b4=%ymm5,>r=%ymm10 vpand % ymm8, % ymm5, % ymm10 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#11,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm10,<r7=%ymm11,<r7=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r = a3 & b3 # asm 1: vpand <a3=reg256#9,<b3=reg256#10,>r=reg256#11 # asm 2: vpand <a3=%ymm8,<b3=%ymm9,>r=%ymm10 vpand % ymm8, % ymm9, % ymm10 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#11,<r6=reg256#14,<r6=reg256#14 # asm 2: vpxor <r=%ymm10,<r6=%ymm13,<r6=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r = a3 & b2 # asm 1: vpand <a3=reg256#9,<b2=reg256#5,>r=reg256#11 # asm 2: vpand <a3=%ymm8,<b2=%ymm4,>r=%ymm10 vpand % ymm8, % ymm4, % ymm10 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#11,<r5=reg256#7,<r5=reg256#7 # asm 2: vpxor <r=%ymm10,<r5=%ymm6,<r5=%ymm6 vpxor % ymm10, % ymm6, % ymm6 # qhasm: r = a3 & b1 # asm 1: vpand <a3=reg256#9,<b1=reg256#13,>r=reg256#11 # asm 2: vpand <a3=%ymm8,<b1=%ymm12,>r=%ymm10 vpand % ymm8, % ymm12, % ymm10 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#11,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm10,<r4=%ymm2,<r4=%ymm2 vpxor % ymm10, % ymm2, % ymm2 # qhasm: r3 = a3 & b0 # asm 1: vpand <a3=reg256#9,<b0=reg256#4,>r3=reg256#9 # asm 2: vpand <a3=%ymm8,<b0=%ymm3,>r3=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: a2[0,1,2,3] = s1[0,0,1,1] # asm 1: vpermq $0x50,<s1=reg256#2,>a2=reg256#2 # asm 2: vpermq $0x50,<s1=%ymm1,>a2=%ymm1 vpermq $0x50, % ymm1, % ymm1 # qhasm: r = a2 & b5 # asm 1: vpand <a2=reg256#2,<b5=reg256#8,>r=reg256#11 # asm 2: vpand <a2=%ymm1,<b5=%ymm7,>r=%ymm10 vpand % ymm1, % ymm7, % ymm10 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#11,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm10,<r7=%ymm11,<r7=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#12,224(<ptr=int64#4) # asm 2: vmovupd <r7=%ymm11,224(<ptr=%rcx) vmovupd % ymm11, 224( % rcx) # qhasm: r = a2 & b4 # asm 1: vpand <a2=reg256#2,<b4=reg256#6,>r=reg256#11 # asm 2: vpand <a2=%ymm1,<b4=%ymm5,>r=%ymm10 vpand % ymm1, % ymm5, % ymm10 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#11,<r6=reg256#14,<r6=reg256#14 # asm 2: vpxor <r=%ymm10,<r6=%ymm13,<r6=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r = a2 & b3 # asm 1: vpand <a2=reg256#2,<b3=reg256#10,>r=reg256#11 # asm 2: vpand <a2=%ymm1,<b3=%ymm9,>r=%ymm10 vpand % ymm1, % ymm9, % ymm10 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#11,<r5=reg256#7,<r5=reg256#7 # asm 2: vpxor <r=%ymm10,<r5=%ymm6,<r5=%ymm6 vpxor % ymm10, % ymm6, % ymm6 # qhasm: r = a2 & b2 # asm 1: vpand <a2=reg256#2,<b2=reg256#5,>r=reg256#11 # asm 2: vpand <a2=%ymm1,<b2=%ymm4,>r=%ymm10 vpand % ymm1, % ymm4, % ymm10 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#11,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm10,<r4=%ymm2,<r4=%ymm2 vpxor % ymm10, % ymm2, % ymm2 # qhasm: r = a2 & b1 # asm 1: vpand <a2=reg256#2,<b1=reg256#13,>r=reg256#11 # asm 2: vpand <a2=%ymm1,<b1=%ymm12,>r=%ymm10 vpand % ymm1, % ymm12, % ymm10 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#11,<r3=reg256#9,<r3=reg256#9 # asm 2: vpxor <r=%ymm10,<r3=%ymm8,<r3=%ymm8 vpxor % ymm10, % ymm8, % ymm8 # qhasm: r2 = a2 & b0 # asm 1: vpand <a2=reg256#2,<b0=reg256#4,>r2=reg256#2 # asm 2: vpand <a2=%ymm1,<b0=%ymm3,>r2=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: a1[0,1,2,3] = s0[2,2,3,3] # asm 1: vpermq $0xfa,<s0=reg256#1,>a1=reg256#11 # asm 2: vpermq $0xfa,<s0=%ymm0,>a1=%ymm10 vpermq $0xfa, % ymm0, % ymm10 # qhasm: r = a1 & b5 # asm 1: vpand <a1=reg256#11,<b5=reg256#8,>r=reg256#12 # asm 2: vpand <a1=%ymm10,<b5=%ymm7,>r=%ymm11 vpand % ymm10, % ymm7, % ymm11 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#12,<r6=reg256#14,<r6=reg256#14 # asm 2: vpxor <r=%ymm11,<r6=%ymm13,<r6=%ymm13 vpxor % ymm11, % ymm13, % ymm13 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#14,192(<ptr=int64#4) # asm 2: vmovupd <r6=%ymm13,192(<ptr=%rcx) vmovupd % ymm13, 192( % rcx) # qhasm: r = a1 & b4 # asm 1: vpand <a1=reg256#11,<b4=reg256#6,>r=reg256#12 # asm 2: vpand <a1=%ymm10,<b4=%ymm5,>r=%ymm11 vpand % ymm10, % ymm5, % ymm11 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#12,<r5=reg256#7,<r5=reg256#7 # asm 2: vpxor <r=%ymm11,<r5=%ymm6,<r5=%ymm6 vpxor % ymm11, % ymm6, % ymm6 # qhasm: r = a1 & b3 # asm 1: vpand <a1=reg256#11,<b3=reg256#10,>r=reg256#12 # asm 2: vpand <a1=%ymm10,<b3=%ymm9,>r=%ymm11 vpand % ymm10, % ymm9, % ymm11 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#12,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm11,<r4=%ymm2,<r4=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r = a1 & b2 # asm 1: vpand <a1=reg256#11,<b2=reg256#5,>r=reg256#12 # asm 2: vpand <a1=%ymm10,<b2=%ymm4,>r=%ymm11 vpand % ymm10, % ymm4, % ymm11 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#12,<r3=reg256#9,<r3=reg256#9 # asm 2: vpxor <r=%ymm11,<r3=%ymm8,<r3=%ymm8 vpxor % ymm11, % ymm8, % ymm8 # qhasm: r = a1 & b1 # asm 1: vpand <a1=reg256#11,<b1=reg256#13,>r=reg256#12 # asm 2: vpand <a1=%ymm10,<b1=%ymm12,>r=%ymm11 vpand % ymm10, % ymm12, % ymm11 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#12,<r2=reg256#2,<r2=reg256#2 # asm 2: vpxor <r=%ymm11,<r2=%ymm1,<r2=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r1 = a1 & b0 # asm 1: vpand <a1=reg256#11,<b0=reg256#4,>r1=reg256#11 # asm 2: vpand <a1=%ymm10,<b0=%ymm3,>r1=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: a0[0,1,2,3] = s0[0,0,1,1] # asm 1: vpermq $0x50,<s0=reg256#1,>a0=reg256#1 # asm 2: vpermq $0x50,<s0=%ymm0,>a0=%ymm0 vpermq $0x50, % ymm0, % ymm0 # qhasm: r = a0 & b5 # asm 1: vpand <a0=reg256#1,<b5=reg256#8,>r=reg256#8 # asm 2: vpand <a0=%ymm0,<b5=%ymm7,>r=%ymm7 vpand % ymm0, % ymm7, % ymm7 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#8,<r5=reg256#7,<r5=reg256#7 # asm 2: vpxor <r=%ymm7,<r5=%ymm6,<r5=%ymm6 vpxor % ymm7, % ymm6, % ymm6 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#7,160(<ptr=int64#4) # asm 2: vmovupd <r5=%ymm6,160(<ptr=%rcx) vmovupd % ymm6, 160( % rcx) # qhasm: r = a0 & b4 # asm 1: vpand <a0=reg256#1,<b4=reg256#6,>r=reg256#6 # asm 2: vpand <a0=%ymm0,<b4=%ymm5,>r=%ymm5 vpand % ymm0, % ymm5, % ymm5 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#6,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm5,<r4=%ymm2,<r4=%ymm2 vpxor % ymm5, % ymm2, % ymm2 # qhasm: r = a0 & b3 # asm 1: vpand <a0=reg256#1,<b3=reg256#10,>r=reg256#6 # asm 2: vpand <a0=%ymm0,<b3=%ymm9,>r=%ymm5 vpand % ymm0, % ymm9, % ymm5 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#6,<r3=reg256#9,<r3=reg256#9 # asm 2: vpxor <r=%ymm5,<r3=%ymm8,<r3=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r = a0 & b2 # asm 1: vpand <a0=reg256#1,<b2=reg256#5,>r=reg256#5 # asm 2: vpand <a0=%ymm0,<b2=%ymm4,>r=%ymm4 vpand % ymm0, % ymm4, % ymm4 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#5,<r2=reg256#2,<r2=reg256#2 # asm 2: vpxor <r=%ymm4,<r2=%ymm1,<r2=%ymm1 vpxor % ymm4, % ymm1, % ymm1 # qhasm: r = a0 & b1 # asm 1: vpand <a0=reg256#1,<b1=reg256#13,>r=reg256#5 # asm 2: vpand <a0=%ymm0,<b1=%ymm12,>r=%ymm4 vpand % ymm0, % ymm12, % ymm4 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#5,<r1=reg256#11,<r1=reg256#11 # asm 2: vpxor <r=%ymm4,<r1=%ymm10,<r1=%ymm10 vpxor % ymm4, % ymm10, % ymm10 # qhasm: r0 = a0 & b0 # asm 1: vpand <a0=reg256#1,<b0=reg256#4,>r0=reg256#1 # asm 2: vpand <a0=%ymm0,<b0=%ymm3,>r0=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#3,128(<ptr=int64#4) # asm 2: vmovupd <r4=%ymm2,128(<ptr=%rcx) vmovupd % ymm2, 128( % rcx) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#9,96(<ptr=int64#4) # asm 2: vmovupd <r3=%ymm8,96(<ptr=%rcx) vmovupd % ymm8, 96( % rcx) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#2,64(<ptr=int64#4) # asm 2: vmovupd <r2=%ymm1,64(<ptr=%rcx) vmovupd % ymm1, 64( % rcx) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#11,32(<ptr=int64#4) # asm 2: vmovupd <r1=%ymm10,32(<ptr=%rcx) vmovupd % ymm10, 32( % rcx) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#4) # asm 2: vmovupd <r0=%ymm0,0(<ptr=%rcx) vmovupd % ymm0, 0( % rcx) # qhasm: h22 = mem64[ ptr + 344 ] # asm 1: movq 344(<ptr=int64#4),>h22=int64#2 # asm 2: movq 344(<ptr=%rcx),>h22=%rsi movq 344( % rcx), % rsi # qhasm: h13 = h22 # asm 1: mov <h22=int64#2,>h13=int64#3 # asm 2: mov <h22=%rsi,>h13=%rdx mov % rsi, % rdx # qhasm: h10 = h22 # asm 1: mov <h22=int64#2,>h10=int64#2 # asm 2: mov <h22=%rsi,>h10=%rsi mov % rsi, % rsi # qhasm: h21 = mem64[ ptr + 336 ] # asm 1: movq 336(<ptr=int64#4),>h21=int64#5 # asm 2: movq 336(<ptr=%rcx),>h21=%r8 movq 336( % rcx), % r8 # qhasm: h21 ^= *(uint64 *) ( ptr + 328 ) # asm 1: xorq 328(<ptr=int64#4),<h21=int64#5 # asm 2: xorq 328(<ptr=%rcx),<h21=%r8 xorq 328( % rcx), % r8 # qhasm: h12 = h21 # asm 1: mov <h21=int64#5,>h12=int64#6 # asm 2: mov <h21=%r8,>h12=%r9 mov % r8, % r9 # qhasm: h9 = h21 # asm 1: mov <h21=int64#5,>h9=int64#5 # asm 2: mov <h21=%r8,>h9=%r8 mov % r8, % r8 # qhasm: h20 = mem64[ ptr + 312 ] # asm 1: movq 312(<ptr=int64#4),>h20=int64#7 # asm 2: movq 312(<ptr=%rcx),>h20=%rax movq 312( % rcx), % rax # qhasm: h20 ^= *(uint64 *) ( ptr + 320 ) # asm 1: xorq 320(<ptr=int64#4),<h20=int64#7 # asm 2: xorq 320(<ptr=%rcx),<h20=%rax xorq 320( % rcx), % rax # qhasm: h11 = h20 # asm 1: mov <h20=int64#7,>h11=int64#8 # asm 2: mov <h20=%rax,>h11=%r10 mov % rax, % r10 # qhasm: h8 = h20 # asm 1: mov <h20=int64#7,>h8=int64#7 # asm 2: mov <h20=%rax,>h8=%rax mov % rax, % rax # qhasm: h19 = mem64[ ptr + 304 ] # asm 1: movq 304(<ptr=int64#4),>h19=int64#9 # asm 2: movq 304(<ptr=%rcx),>h19=%r11 movq 304( % rcx), % r11 # qhasm: h19 ^= *(uint64 *) ( ptr + 296 ) # asm 1: xorq 296(<ptr=int64#4),<h19=int64#9 # asm 2: xorq 296(<ptr=%rcx),<h19=%r11 xorq 296( % rcx), % r11 # qhasm: h10 ^= h19 # asm 1: xor <h19=int64#9,<h10=int64#2 # asm 2: xor <h19=%r11,<h10=%rsi xor % r11, % rsi # qhasm: h7 = h19 # asm 1: mov <h19=int64#9,>h7=int64#9 # asm 2: mov <h19=%r11,>h7=%r11 mov % r11, % r11 # qhasm: h18 = mem64[ ptr + 280 ] # asm 1: movq 280(<ptr=int64#4),>h18=int64#10 # asm 2: movq 280(<ptr=%rcx),>h18=%r12 movq 280( % rcx), % r12 # qhasm: h18 ^= *(uint64 *) ( ptr + 288 ) # asm 1: xorq 288(<ptr=int64#4),<h18=int64#10 # asm 2: xorq 288(<ptr=%rcx),<h18=%r12 xorq 288( % rcx), % r12 # qhasm: h9 ^= h18 # asm 1: xor <h18=int64#10,<h9=int64#5 # asm 2: xor <h18=%r12,<h9=%r8 xor % r12, % r8 # qhasm: h6 = h18 # asm 1: mov <h18=int64#10,>h6=int64#10 # asm 2: mov <h18=%r12,>h6=%r12 mov % r12, % r12 # qhasm: h17 = mem64[ ptr + 272 ] # asm 1: movq 272(<ptr=int64#4),>h17=int64#11 # asm 2: movq 272(<ptr=%rcx),>h17=%r13 movq 272( % rcx), % r13 # qhasm: h17 ^= *(uint64 *) ( ptr + 264 ) # asm 1: xorq 264(<ptr=int64#4),<h17=int64#11 # asm 2: xorq 264(<ptr=%rcx),<h17=%r13 xorq 264( % rcx), % r13 # qhasm: h8 ^= h17 # asm 1: xor <h17=int64#11,<h8=int64#7 # asm 2: xor <h17=%r13,<h8=%rax xor % r13, % rax # qhasm: h5 = h17 # asm 1: mov <h17=int64#11,>h5=int64#11 # asm 2: mov <h17=%r13,>h5=%r13 mov % r13, % r13 # qhasm: h16 = mem64[ ptr + 248 ] # asm 1: movq 248(<ptr=int64#4),>h16=int64#12 # asm 2: movq 248(<ptr=%rcx),>h16=%r14 movq 248( % rcx), % r14 # qhasm: h16 ^= *(uint64 *) ( ptr + 256 ) # asm 1: xorq 256(<ptr=int64#4),<h16=int64#12 # asm 2: xorq 256(<ptr=%rcx),<h16=%r14 xorq 256( % rcx), % r14 # qhasm: h7 ^= h16 # asm 1: xor <h16=int64#12,<h7=int64#9 # asm 2: xor <h16=%r14,<h7=%r11 xor % r14, % r11 # qhasm: h4 = h16 # asm 1: mov <h16=int64#12,>h4=int64#12 # asm 2: mov <h16=%r14,>h4=%r14 mov % r14, % r14 # qhasm: h15 = mem64[ ptr + 240 ] # asm 1: movq 240(<ptr=int64#4),>h15=int64#13 # asm 2: movq 240(<ptr=%rcx),>h15=%r15 movq 240( % rcx), % r15 # qhasm: h15 ^= *(uint64 *) ( ptr + 232 ) # asm 1: xorq 232(<ptr=int64#4),<h15=int64#13 # asm 2: xorq 232(<ptr=%rcx),<h15=%r15 xorq 232( % rcx), % r15 # qhasm: h6 ^= h15 # asm 1: xor <h15=int64#13,<h6=int64#10 # asm 2: xor <h15=%r15,<h6=%r12 xor % r15, % r12 # qhasm: h3 = h15 # asm 1: mov <h15=int64#13,>h3=int64#13 # asm 2: mov <h15=%r15,>h3=%r15 mov % r15, % r15 # qhasm: h14 = mem64[ ptr + 216 ] # asm 1: movq 216(<ptr=int64#4),>h14=int64#14 # asm 2: movq 216(<ptr=%rcx),>h14=%rbx movq 216( % rcx), % rbx # qhasm: h14 ^= *(uint64 *) ( ptr + 224 ) # asm 1: xorq 224(<ptr=int64#4),<h14=int64#14 # asm 2: xorq 224(<ptr=%rcx),<h14=%rbx xorq 224( % rcx), % rbx # qhasm: h5 ^= h14 # asm 1: xor <h14=int64#14,<h5=int64#11 # asm 2: xor <h14=%rbx,<h5=%r13 xor % rbx, % r13 # qhasm: h2 = h14 # asm 1: mov <h14=int64#14,>h2=int64#14 # asm 2: mov <h14=%rbx,>h2=%rbx mov % rbx, % rbx # qhasm: h13 ^= *(uint64 *) ( ptr + 208 ) # asm 1: xorq 208(<ptr=int64#4),<h13=int64#3 # asm 2: xorq 208(<ptr=%rcx),<h13=%rdx xorq 208( % rcx), % rdx # qhasm: h13 ^= *(uint64 *) ( ptr + 200 ) # asm 1: xorq 200(<ptr=int64#4),<h13=int64#3 # asm 2: xorq 200(<ptr=%rcx),<h13=%rdx xorq 200( % rcx), % rdx # qhasm: h4 ^= h13 # asm 1: xor <h13=int64#3,<h4=int64#12 # asm 2: xor <h13=%rdx,<h4=%r14 xor % rdx, % r14 # qhasm: h1 = h13 # asm 1: mov <h13=int64#3,>h1=int64#3 # asm 2: mov <h13=%rdx,>h1=%rdx mov % rdx, % rdx # qhasm: h12 ^= *(uint64 *) ( ptr + 184 ) # asm 1: xorq 184(<ptr=int64#4),<h12=int64#6 # asm 2: xorq 184(<ptr=%rcx),<h12=%r9 xorq 184( % rcx), % r9 # qhasm: h12 ^= *(uint64 *) ( ptr + 192 ) # asm 1: xorq 192(<ptr=int64#4),<h12=int64#6 # asm 2: xorq 192(<ptr=%rcx),<h12=%r9 xorq 192( % rcx), % r9 # qhasm: h3 ^= h12 # asm 1: xor <h12=int64#6,<h3=int64#13 # asm 2: xor <h12=%r9,<h3=%r15 xor % r9, % r15 # qhasm: h0 = h12 # asm 1: mov <h12=int64#6,>h0=int64#6 # asm 2: mov <h12=%r9,>h0=%r9 mov % r9, % r9 # qhasm: h11 ^= *(uint64 *) ( ptr + 176 ) # asm 1: xorq 176(<ptr=int64#4),<h11=int64#8 # asm 2: xorq 176(<ptr=%rcx),<h11=%r10 xorq 176( % rcx), % r10 # qhasm: h11 ^= *(uint64 *) ( ptr + 168 ) # asm 1: xorq 168(<ptr=int64#4),<h11=int64#8 # asm 2: xorq 168(<ptr=%rcx),<h11=%r10 xorq 168( % rcx), % r10 # qhasm: mem64[ input_0 + 88 ] = h11 # asm 1: movq <h11=int64#8,88(<input_0=int64#1) # asm 2: movq <h11=%r10,88(<input_0=%rdi) movq % r10, 88( % rdi) # qhasm: h10 ^= *(uint64 *) ( ptr + 152 ) # asm 1: xorq 152(<ptr=int64#4),<h10=int64#2 # asm 2: xorq 152(<ptr=%rcx),<h10=%rsi xorq 152( % rcx), % rsi # qhasm: h10 ^= *(uint64 *) ( ptr + 160 ) # asm 1: xorq 160(<ptr=int64#4),<h10=int64#2 # asm 2: xorq 160(<ptr=%rcx),<h10=%rsi xorq 160( % rcx), % rsi # qhasm: mem64[ input_0 + 80 ] = h10 # asm 1: movq <h10=int64#2,80(<input_0=int64#1) # asm 2: movq <h10=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: h9 ^= *(uint64 *) ( ptr + 144 ) # asm 1: xorq 144(<ptr=int64#4),<h9=int64#5 # asm 2: xorq 144(<ptr=%rcx),<h9=%r8 xorq 144( % rcx), % r8 # qhasm: h9 ^= *(uint64 *) ( ptr + 136 ) # asm 1: xorq 136(<ptr=int64#4),<h9=int64#5 # asm 2: xorq 136(<ptr=%rcx),<h9=%r8 xorq 136( % rcx), % r8 # qhasm: mem64[ input_0 + 72 ] = h9 # asm 1: movq <h9=int64#5,72(<input_0=int64#1) # asm 2: movq <h9=%r8,72(<input_0=%rdi) movq % r8, 72( % rdi) # qhasm: h8 ^= *(uint64 *) ( ptr + 120 ) # asm 1: xorq 120(<ptr=int64#4),<h8=int64#7 # asm 2: xorq 120(<ptr=%rcx),<h8=%rax xorq 120( % rcx), % rax # qhasm: h8 ^= *(uint64 *) ( ptr + 128 ) # asm 1: xorq 128(<ptr=int64#4),<h8=int64#7 # asm 2: xorq 128(<ptr=%rcx),<h8=%rax xorq 128( % rcx), % rax # qhasm: mem64[ input_0 + 64 ] = h8 # asm 1: movq <h8=int64#7,64(<input_0=int64#1) # asm 2: movq <h8=%rax,64(<input_0=%rdi) movq % rax, 64( % rdi) # qhasm: h7 ^= *(uint64 *) ( ptr + 112 ) # asm 1: xorq 112(<ptr=int64#4),<h7=int64#9 # asm 2: xorq 112(<ptr=%rcx),<h7=%r11 xorq 112( % rcx), % r11 # qhasm: h7 ^= *(uint64 *) ( ptr + 104 ) # asm 1: xorq 104(<ptr=int64#4),<h7=int64#9 # asm 2: xorq 104(<ptr=%rcx),<h7=%r11 xorq 104( % rcx), % r11 # qhasm: mem64[ input_0 + 56 ] = h7 # asm 1: movq <h7=int64#9,56(<input_0=int64#1) # asm 2: movq <h7=%r11,56(<input_0=%rdi) movq % r11, 56( % rdi) # qhasm: h6 ^= *(uint64 *) ( ptr + 88 ) # asm 1: xorq 88(<ptr=int64#4),<h6=int64#10 # asm 2: xorq 88(<ptr=%rcx),<h6=%r12 xorq 88( % rcx), % r12 # qhasm: h6 ^= *(uint64 *) ( ptr + 96 ) # asm 1: xorq 96(<ptr=int64#4),<h6=int64#10 # asm 2: xorq 96(<ptr=%rcx),<h6=%r12 xorq 96( % rcx), % r12 # qhasm: mem64[ input_0 + 48 ] = h6 # asm 1: movq <h6=int64#10,48(<input_0=int64#1) # asm 2: movq <h6=%r12,48(<input_0=%rdi) movq % r12, 48( % rdi) # qhasm: h5 ^= *(uint64 *) ( ptr + 80 ) # asm 1: xorq 80(<ptr=int64#4),<h5=int64#11 # asm 2: xorq 80(<ptr=%rcx),<h5=%r13 xorq 80( % rcx), % r13 # qhasm: h5 ^= *(uint64 *) ( ptr + 72 ) # asm 1: xorq 72(<ptr=int64#4),<h5=int64#11 # asm 2: xorq 72(<ptr=%rcx),<h5=%r13 xorq 72( % rcx), % r13 # qhasm: mem64[ input_0 + 40 ] = h5 # asm 1: movq <h5=int64#11,40(<input_0=int64#1) # asm 2: movq <h5=%r13,40(<input_0=%rdi) movq % r13, 40( % rdi) # qhasm: h4 ^= *(uint64 *) ( ptr + 56 ) # asm 1: xorq 56(<ptr=int64#4),<h4=int64#12 # asm 2: xorq 56(<ptr=%rcx),<h4=%r14 xorq 56( % rcx), % r14 # qhasm: h4 ^= *(uint64 *) ( ptr + 64 ) # asm 1: xorq 64(<ptr=int64#4),<h4=int64#12 # asm 2: xorq 64(<ptr=%rcx),<h4=%r14 xorq 64( % rcx), % r14 # qhasm: mem64[ input_0 + 32 ] = h4 # asm 1: movq <h4=int64#12,32(<input_0=int64#1) # asm 2: movq <h4=%r14,32(<input_0=%rdi) movq % r14, 32( % rdi) # qhasm: h3 ^= *(uint64 *) ( ptr + 48 ) # asm 1: xorq 48(<ptr=int64#4),<h3=int64#13 # asm 2: xorq 48(<ptr=%rcx),<h3=%r15 xorq 48( % rcx), % r15 # qhasm: h3 ^= *(uint64 *) ( ptr + 40 ) # asm 1: xorq 40(<ptr=int64#4),<h3=int64#13 # asm 2: xorq 40(<ptr=%rcx),<h3=%r15 xorq 40( % rcx), % r15 # qhasm: mem64[ input_0 + 24 ] = h3 # asm 1: movq <h3=int64#13,24(<input_0=int64#1) # asm 2: movq <h3=%r15,24(<input_0=%rdi) movq % r15, 24( % rdi) # qhasm: h2 ^= *(uint64 *) ( ptr + 24 ) # asm 1: xorq 24(<ptr=int64#4),<h2=int64#14 # asm 2: xorq 24(<ptr=%rcx),<h2=%rbx xorq 24( % rcx), % rbx # qhasm: h2 ^= *(uint64 *) ( ptr + 32 ) # asm 1: xorq 32(<ptr=int64#4),<h2=int64#14 # asm 2: xorq 32(<ptr=%rcx),<h2=%rbx xorq 32( % rcx), % rbx # qhasm: mem64[ input_0 + 16 ] = h2 # asm 1: movq <h2=int64#14,16(<input_0=int64#1) # asm 2: movq <h2=%rbx,16(<input_0=%rdi) movq % rbx, 16( % rdi) # qhasm: h1 ^= *(uint64 *) ( ptr + 16 ) # asm 1: xorq 16(<ptr=int64#4),<h1=int64#3 # asm 2: xorq 16(<ptr=%rcx),<h1=%rdx xorq 16( % rcx), % rdx # qhasm: h1 ^= *(uint64 *) ( ptr + 8 ) # asm 1: xorq 8(<ptr=int64#4),<h1=int64#3 # asm 2: xorq 8(<ptr=%rcx),<h1=%rdx xorq 8( % rcx), % rdx # qhasm: mem64[ input_0 + 8 ] = h1 # asm 1: movq <h1=int64#3,8(<input_0=int64#1) # asm 2: movq <h1=%rdx,8(<input_0=%rdi) movq % rdx, 8( % rdi) # qhasm: h0 ^= *(uint64 *) ( ptr + 0 ) # asm 1: xorq 0(<ptr=int64#4),<h0=int64#6 # asm 2: xorq 0(<ptr=%rcx),<h0=%r9 xorq 0( % rcx), % r9 # qhasm: mem64[ input_0 + 0 ] = h0 # asm 1: movq <h0=int64#6,0(<input_0=int64#1) # asm 2: movq <h0=%r9,0(<input_0=%rdi) movq % r9, 0( % rdi) # qhasm: caller_r11 = r11_stack # asm 1: movq <r11_stack=stack64#1,>caller_r11=int64#9 # asm 2: movq <r11_stack=608(%rsp),>caller_r11=%r11 movq 608( % rsp), % r11 # qhasm: caller_r12 = r12_stack # asm 1: movq <r12_stack=stack64#2,>caller_r12=int64#10 # asm 2: movq <r12_stack=616(%rsp),>caller_r12=%r12 movq 616( % rsp), % r12 # qhasm: caller_r13 = r13_stack # asm 1: movq <r13_stack=stack64#3,>caller_r13=int64#11 # asm 2: movq <r13_stack=624(%rsp),>caller_r13=%r13 movq 624( % rsp), % r13 # qhasm: caller_r14 = r14_stack # asm 1: movq <r14_stack=stack64#4,>caller_r14=int64#12 # asm 2: movq <r14_stack=632(%rsp),>caller_r14=%r14 movq 632( % rsp), % r14 # qhasm: caller_r15 = r15_stack # asm 1: movq <r15_stack=stack64#5,>caller_r15=int64#13 # asm 2: movq <r15_stack=640(%rsp),>caller_r15=%r15 movq 640( % rsp), % r15 # qhasm: caller_rbx = rbx_stack # asm 1: movq <rbx_stack=stack64#6,>caller_rbx=int64#14 # asm 2: movq <rbx_stack=648(%rsp),>caller_rbx=%rbx movq 648( % rsp), % rbx # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
29,381
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864/avx2/vec_mul_sp_asm.S
#include "namespace.h" #define vec_mul_sp_asm CRYPTO_NAMESPACE(vec_mul_sp_asm) #define _vec_mul_sp_asm _CRYPTO_NAMESPACE(vec_mul_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 s0 # qhasm: reg256 s1 # qhasm: reg256 s2 # qhasm: reg256 s3 # qhasm: reg256 s4 # qhasm: reg256 s5 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r # qhasm: int64 h0 # qhasm: int64 h1 # qhasm: int64 h2 # qhasm: int64 h3 # qhasm: int64 h4 # qhasm: int64 h5 # qhasm: int64 h6 # qhasm: int64 h7 # qhasm: int64 h8 # qhasm: int64 h9 # qhasm: int64 h10 # qhasm: int64 h11 # qhasm: int64 h12 # qhasm: int64 h13 # qhasm: int64 h14 # qhasm: int64 h15 # qhasm: int64 h16 # qhasm: int64 h17 # qhasm: int64 h18 # qhasm: int64 h19 # qhasm: int64 h20 # qhasm: int64 h21 # qhasm: int64 h22 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: stack64 r11_stack # qhasm: stack64 r12_stack # qhasm: stack64 r13_stack # qhasm: stack64 r14_stack # qhasm: stack64 r15_stack # qhasm: stack64 rbx_stack # qhasm: stack64 rbp_stack # qhasm: enter vec_mul_sp_asm .p2align 5 .global _vec_mul_sp_asm .global vec_mul_sp_asm _vec_mul_sp_asm: vec_mul_sp_asm: mov % rsp, % r11 and $31, % r11 add $672, % r11 sub % r11, % rsp # qhasm: r11_stack = caller_r11 # asm 1: movq <caller_r11=int64#9,>r11_stack=stack64#1 # asm 2: movq <caller_r11=%r11,>r11_stack=608(%rsp) movq % r11, 608( % rsp) # qhasm: r12_stack = caller_r12 # asm 1: movq <caller_r12=int64#10,>r12_stack=stack64#2 # asm 2: movq <caller_r12=%r12,>r12_stack=616(%rsp) movq % r12, 616( % rsp) # qhasm: r13_stack = caller_r13 # asm 1: movq <caller_r13=int64#11,>r13_stack=stack64#3 # asm 2: movq <caller_r13=%r13,>r13_stack=624(%rsp) movq % r13, 624( % rsp) # qhasm: r14_stack = caller_r14 # asm 1: movq <caller_r14=int64#12,>r14_stack=stack64#4 # asm 2: movq <caller_r14=%r14,>r14_stack=632(%rsp) movq % r14, 632( % rsp) # qhasm: r15_stack = caller_r15 # asm 1: movq <caller_r15=int64#13,>r15_stack=stack64#5 # asm 2: movq <caller_r15=%r15,>r15_stack=640(%rsp) movq % r15, 640( % rsp) # qhasm: rbx_stack = caller_rbx # asm 1: movq <caller_rbx=int64#14,>rbx_stack=stack64#6 # asm 2: movq <caller_rbx=%rbx,>rbx_stack=648(%rsp) movq % rbx, 648( % rsp) # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>ptr=%rcx leaq 0( % rsp), % rcx # qhasm: s0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>s0=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>s0=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: s1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>s1=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>s1=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: s2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>s2=reg256#3 # asm 2: vmovupd 64(<input_1=%rsi),>s2=%ymm2 vmovupd 64( % rsi), % ymm2 # qhasm: a5[0,1,2,3] = s2[2,2,3,3] # asm 1: vpermq $0xfa,<s2=reg256#3,>a5=reg256#4 # asm 2: vpermq $0xfa,<s2=%ymm2,>a5=%ymm3 vpermq $0xfa, % ymm2, % ymm3 # qhasm: r = mem256[ input_2 + 160 ] # asm 1: vmovupd 160(<input_2=int64#3),>r=reg256#5 # asm 2: vmovupd 160(<input_2=%rdx),>r=%ymm4 vmovupd 160( % rdx), % ymm4 # qhasm: b5[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#5,>b5=reg256#5 # asm 2: vpermq $0xdd,<r=%ymm4,>b5=%ymm4 vpermq $0xdd, % ymm4, % ymm4 # qhasm: r10 = a5 & b5 # asm 1: vpand <a5=reg256#4,<b5=reg256#5,>r10=reg256#6 # asm 2: vpand <a5=%ymm3,<b5=%ymm4,>r10=%ymm5 vpand % ymm3, % ymm4, % ymm5 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#6,320(<ptr=int64#4) # asm 2: vmovupd <r10=%ymm5,320(<ptr=%rcx) vmovupd % ymm5, 320( % rcx) # qhasm: r = mem256[ input_2 + 128 ] # asm 1: vmovupd 128(<input_2=int64#3),>r=reg256#6 # asm 2: vmovupd 128(<input_2=%rdx),>r=%ymm5 vmovupd 128( % rdx), % ymm5 # qhasm: b4[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#6,>b4=reg256#6 # asm 2: vpermq $0xdd,<r=%ymm5,>b4=%ymm5 vpermq $0xdd, % ymm5, % ymm5 # qhasm: r9 = a5 & b4 # asm 1: vpand <a5=reg256#4,<b4=reg256#6,>r9=reg256#7 # asm 2: vpand <a5=%ymm3,<b4=%ymm5,>r9=%ymm6 vpand % ymm3, % ymm5, % ymm6 # qhasm: r = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(<input_2=int64#3),>r=reg256#8 # asm 2: vmovupd 96(<input_2=%rdx),>r=%ymm7 vmovupd 96( % rdx), % ymm7 # qhasm: b3[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#8,>b3=reg256#8 # asm 2: vpermq $0xdd,<r=%ymm7,>b3=%ymm7 vpermq $0xdd, % ymm7, % ymm7 # qhasm: r8 = a5 & b3 # asm 1: vpand <a5=reg256#4,<b3=reg256#8,>r8=reg256#9 # asm 2: vpand <a5=%ymm3,<b3=%ymm7,>r8=%ymm8 vpand % ymm3, % ymm7, % ymm8 # qhasm: r = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>r=reg256#10 # asm 2: vmovupd 64(<input_2=%rdx),>r=%ymm9 vmovupd 64( % rdx), % ymm9 # qhasm: b2[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#10,>b2=reg256#10 # asm 2: vpermq $0xdd,<r=%ymm9,>b2=%ymm9 vpermq $0xdd, % ymm9, % ymm9 # qhasm: r7 = a5 & b2 # asm 1: vpand <a5=reg256#4,<b2=reg256#10,>r7=reg256#11 # asm 2: vpand <a5=%ymm3,<b2=%ymm9,>r7=%ymm10 vpand % ymm3, % ymm9, % ymm10 # qhasm: r = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>r=reg256#12 # asm 2: vmovupd 32(<input_2=%rdx),>r=%ymm11 vmovupd 32( % rdx), % ymm11 # qhasm: b1[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#12,>b1=reg256#12 # asm 2: vpermq $0xdd,<r=%ymm11,>b1=%ymm11 vpermq $0xdd, % ymm11, % ymm11 # qhasm: r6 = a5 & b1 # asm 1: vpand <a5=reg256#4,<b1=reg256#12,>r6=reg256#13 # asm 2: vpand <a5=%ymm3,<b1=%ymm11,>r6=%ymm12 vpand % ymm3, % ymm11, % ymm12 # qhasm: r = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>r=reg256#14 # asm 2: vmovupd 0(<input_2=%rdx),>r=%ymm13 vmovupd 0( % rdx), % ymm13 # qhasm: b0[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#14,>b0=reg256#14 # asm 2: vpermq $0xdd,<r=%ymm13,>b0=%ymm13 vpermq $0xdd, % ymm13, % ymm13 # qhasm: r5 = a5 & b0 # asm 1: vpand <a5=reg256#4,<b0=reg256#14,>r5=reg256#4 # asm 2: vpand <a5=%ymm3,<b0=%ymm13,>r5=%ymm3 vpand % ymm3, % ymm13, % ymm3 # qhasm: a4[0,1,2,3] = s2[0,0,1,1] # asm 1: vpermq $0x50,<s2=reg256#3,>a4=reg256#3 # asm 2: vpermq $0x50,<s2=%ymm2,>a4=%ymm2 vpermq $0x50, % ymm2, % ymm2 # qhasm: r = a4 & b5 # asm 1: vpand <a4=reg256#3,<b5=reg256#5,>r=reg256#15 # asm 2: vpand <a4=%ymm2,<b5=%ymm4,>r=%ymm14 vpand % ymm2, % ymm4, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#7,<r9=reg256#7 # asm 2: vpxor <r=%ymm14,<r9=%ymm6,<r9=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#7,288(<ptr=int64#4) # asm 2: vmovupd <r9=%ymm6,288(<ptr=%rcx) vmovupd % ymm6, 288( % rcx) # qhasm: r = a4 & b4 # asm 1: vpand <a4=reg256#3,<b4=reg256#6,>r=reg256#7 # asm 2: vpand <a4=%ymm2,<b4=%ymm5,>r=%ymm6 vpand % ymm2, % ymm5, % ymm6 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#7,<r8=reg256#9,<r8=reg256#9 # asm 2: vpxor <r=%ymm6,<r8=%ymm8,<r8=%ymm8 vpxor % ymm6, % ymm8, % ymm8 # qhasm: r = a4 & b3 # asm 1: vpand <a4=reg256#3,<b3=reg256#8,>r=reg256#7 # asm 2: vpand <a4=%ymm2,<b3=%ymm7,>r=%ymm6 vpand % ymm2, % ymm7, % ymm6 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r = a4 & b2 # asm 1: vpand <a4=reg256#3,<b2=reg256#10,>r=reg256#7 # asm 2: vpand <a4=%ymm2,<b2=%ymm9,>r=%ymm6 vpand % ymm2, % ymm9, % ymm6 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#7,<r6=reg256#13,<r6=reg256#13 # asm 2: vpxor <r=%ymm6,<r6=%ymm12,<r6=%ymm12 vpxor % ymm6, % ymm12, % ymm12 # qhasm: r = a4 & b1 # asm 1: vpand <a4=reg256#3,<b1=reg256#12,>r=reg256#7 # asm 2: vpand <a4=%ymm2,<b1=%ymm11,>r=%ymm6 vpand % ymm2, % ymm11, % ymm6 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#7,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm6,<r5=%ymm3,<r5=%ymm3 vpxor % ymm6, % ymm3, % ymm3 # qhasm: r4 = a4 & b0 # asm 1: vpand <a4=reg256#3,<b0=reg256#14,>r4=reg256#3 # asm 2: vpand <a4=%ymm2,<b0=%ymm13,>r4=%ymm2 vpand % ymm2, % ymm13, % ymm2 # qhasm: a3[0,1,2,3] = s1[2,2,3,3] # asm 1: vpermq $0xfa,<s1=reg256#2,>a3=reg256#7 # asm 2: vpermq $0xfa,<s1=%ymm1,>a3=%ymm6 vpermq $0xfa, % ymm1, % ymm6 # qhasm: r = a3 & b5 # asm 1: vpand <a3=reg256#7,<b5=reg256#5,>r=reg256#15 # asm 2: vpand <a3=%ymm6,<b5=%ymm4,>r=%ymm14 vpand % ymm6, % ymm4, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#9,<r8=reg256#9 # asm 2: vpxor <r=%ymm14,<r8=%ymm8,<r8=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#9,256(<ptr=int64#4) # asm 2: vmovupd <r8=%ymm8,256(<ptr=%rcx) vmovupd % ymm8, 256( % rcx) # qhasm: r = a3 & b4 # asm 1: vpand <a3=reg256#7,<b4=reg256#6,>r=reg256#9 # asm 2: vpand <a3=%ymm6,<b4=%ymm5,>r=%ymm8 vpand % ymm6, % ymm5, % ymm8 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#9,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm8,<r7=%ymm10,<r7=%ymm10 vpxor % ymm8, % ymm10, % ymm10 # qhasm: r = a3 & b3 # asm 1: vpand <a3=reg256#7,<b3=reg256#8,>r=reg256#9 # asm 2: vpand <a3=%ymm6,<b3=%ymm7,>r=%ymm8 vpand % ymm6, % ymm7, % ymm8 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#9,<r6=reg256#13,<r6=reg256#13 # asm 2: vpxor <r=%ymm8,<r6=%ymm12,<r6=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r = a3 & b2 # asm 1: vpand <a3=reg256#7,<b2=reg256#10,>r=reg256#9 # asm 2: vpand <a3=%ymm6,<b2=%ymm9,>r=%ymm8 vpand % ymm6, % ymm9, % ymm8 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#9,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm8,<r5=%ymm3,<r5=%ymm3 vpxor % ymm8, % ymm3, % ymm3 # qhasm: r = a3 & b1 # asm 1: vpand <a3=reg256#7,<b1=reg256#12,>r=reg256#9 # asm 2: vpand <a3=%ymm6,<b1=%ymm11,>r=%ymm8 vpand % ymm6, % ymm11, % ymm8 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#9,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm8,<r4=%ymm2,<r4=%ymm2 vpxor % ymm8, % ymm2, % ymm2 # qhasm: r3 = a3 & b0 # asm 1: vpand <a3=reg256#7,<b0=reg256#14,>r3=reg256#7 # asm 2: vpand <a3=%ymm6,<b0=%ymm13,>r3=%ymm6 vpand % ymm6, % ymm13, % ymm6 # qhasm: a2[0,1,2,3] = s1[0,0,1,1] # asm 1: vpermq $0x50,<s1=reg256#2,>a2=reg256#2 # asm 2: vpermq $0x50,<s1=%ymm1,>a2=%ymm1 vpermq $0x50, % ymm1, % ymm1 # qhasm: r = a2 & b5 # asm 1: vpand <a2=reg256#2,<b5=reg256#5,>r=reg256#9 # asm 2: vpand <a2=%ymm1,<b5=%ymm4,>r=%ymm8 vpand % ymm1, % ymm4, % ymm8 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#9,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm8,<r7=%ymm10,<r7=%ymm10 vpxor % ymm8, % ymm10, % ymm10 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#11,224(<ptr=int64#4) # asm 2: vmovupd <r7=%ymm10,224(<ptr=%rcx) vmovupd % ymm10, 224( % rcx) # qhasm: r = a2 & b4 # asm 1: vpand <a2=reg256#2,<b4=reg256#6,>r=reg256#9 # asm 2: vpand <a2=%ymm1,<b4=%ymm5,>r=%ymm8 vpand % ymm1, % ymm5, % ymm8 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#9,<r6=reg256#13,<r6=reg256#13 # asm 2: vpxor <r=%ymm8,<r6=%ymm12,<r6=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r = a2 & b3 # asm 1: vpand <a2=reg256#2,<b3=reg256#8,>r=reg256#9 # asm 2: vpand <a2=%ymm1,<b3=%ymm7,>r=%ymm8 vpand % ymm1, % ymm7, % ymm8 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#9,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm8,<r5=%ymm3,<r5=%ymm3 vpxor % ymm8, % ymm3, % ymm3 # qhasm: r = a2 & b2 # asm 1: vpand <a2=reg256#2,<b2=reg256#10,>r=reg256#9 # asm 2: vpand <a2=%ymm1,<b2=%ymm9,>r=%ymm8 vpand % ymm1, % ymm9, % ymm8 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#9,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm8,<r4=%ymm2,<r4=%ymm2 vpxor % ymm8, % ymm2, % ymm2 # qhasm: r = a2 & b1 # asm 1: vpand <a2=reg256#2,<b1=reg256#12,>r=reg256#9 # asm 2: vpand <a2=%ymm1,<b1=%ymm11,>r=%ymm8 vpand % ymm1, % ymm11, % ymm8 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#9,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm8,<r3=%ymm6,<r3=%ymm6 vpxor % ymm8, % ymm6, % ymm6 # qhasm: r2 = a2 & b0 # asm 1: vpand <a2=reg256#2,<b0=reg256#14,>r2=reg256#2 # asm 2: vpand <a2=%ymm1,<b0=%ymm13,>r2=%ymm1 vpand % ymm1, % ymm13, % ymm1 # qhasm: a1[0,1,2,3] = s0[2,2,3,3] # asm 1: vpermq $0xfa,<s0=reg256#1,>a1=reg256#9 # asm 2: vpermq $0xfa,<s0=%ymm0,>a1=%ymm8 vpermq $0xfa, % ymm0, % ymm8 # qhasm: r = a1 & b5 # asm 1: vpand <a1=reg256#9,<b5=reg256#5,>r=reg256#11 # asm 2: vpand <a1=%ymm8,<b5=%ymm4,>r=%ymm10 vpand % ymm8, % ymm4, % ymm10 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#11,<r6=reg256#13,<r6=reg256#13 # asm 2: vpxor <r=%ymm10,<r6=%ymm12,<r6=%ymm12 vpxor % ymm10, % ymm12, % ymm12 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#13,192(<ptr=int64#4) # asm 2: vmovupd <r6=%ymm12,192(<ptr=%rcx) vmovupd % ymm12, 192( % rcx) # qhasm: r = a1 & b4 # asm 1: vpand <a1=reg256#9,<b4=reg256#6,>r=reg256#11 # asm 2: vpand <a1=%ymm8,<b4=%ymm5,>r=%ymm10 vpand % ymm8, % ymm5, % ymm10 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#11,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm10,<r5=%ymm3,<r5=%ymm3 vpxor % ymm10, % ymm3, % ymm3 # qhasm: r = a1 & b3 # asm 1: vpand <a1=reg256#9,<b3=reg256#8,>r=reg256#11 # asm 2: vpand <a1=%ymm8,<b3=%ymm7,>r=%ymm10 vpand % ymm8, % ymm7, % ymm10 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#11,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm10,<r4=%ymm2,<r4=%ymm2 vpxor % ymm10, % ymm2, % ymm2 # qhasm: r = a1 & b2 # asm 1: vpand <a1=reg256#9,<b2=reg256#10,>r=reg256#11 # asm 2: vpand <a1=%ymm8,<b2=%ymm9,>r=%ymm10 vpand % ymm8, % ymm9, % ymm10 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#11,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm10,<r3=%ymm6,<r3=%ymm6 vpxor % ymm10, % ymm6, % ymm6 # qhasm: r = a1 & b1 # asm 1: vpand <a1=reg256#9,<b1=reg256#12,>r=reg256#11 # asm 2: vpand <a1=%ymm8,<b1=%ymm11,>r=%ymm10 vpand % ymm8, % ymm11, % ymm10 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#11,<r2=reg256#2,<r2=reg256#2 # asm 2: vpxor <r=%ymm10,<r2=%ymm1,<r2=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r1 = a1 & b0 # asm 1: vpand <a1=reg256#9,<b0=reg256#14,>r1=reg256#9 # asm 2: vpand <a1=%ymm8,<b0=%ymm13,>r1=%ymm8 vpand % ymm8, % ymm13, % ymm8 # qhasm: a0[0,1,2,3] = s0[0,0,1,1] # asm 1: vpermq $0x50,<s0=reg256#1,>a0=reg256#1 # asm 2: vpermq $0x50,<s0=%ymm0,>a0=%ymm0 vpermq $0x50, % ymm0, % ymm0 # qhasm: r = a0 & b5 # asm 1: vpand <a0=reg256#1,<b5=reg256#5,>r=reg256#5 # asm 2: vpand <a0=%ymm0,<b5=%ymm4,>r=%ymm4 vpand % ymm0, % ymm4, % ymm4 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#5,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm4,<r5=%ymm3,<r5=%ymm3 vpxor % ymm4, % ymm3, % ymm3 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#4) # asm 2: vmovupd <r5=%ymm3,160(<ptr=%rcx) vmovupd % ymm3, 160( % rcx) # qhasm: r = a0 & b4 # asm 1: vpand <a0=reg256#1,<b4=reg256#6,>r=reg256#4 # asm 2: vpand <a0=%ymm0,<b4=%ymm5,>r=%ymm3 vpand % ymm0, % ymm5, % ymm3 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#4,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm3,<r4=%ymm2,<r4=%ymm2 vpxor % ymm3, % ymm2, % ymm2 # qhasm: r = a0 & b3 # asm 1: vpand <a0=reg256#1,<b3=reg256#8,>r=reg256#4 # asm 2: vpand <a0=%ymm0,<b3=%ymm7,>r=%ymm3 vpand % ymm0, % ymm7, % ymm3 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r = a0 & b2 # asm 1: vpand <a0=reg256#1,<b2=reg256#10,>r=reg256#4 # asm 2: vpand <a0=%ymm0,<b2=%ymm9,>r=%ymm3 vpand % ymm0, % ymm9, % ymm3 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#4,<r2=reg256#2,<r2=reg256#2 # asm 2: vpxor <r=%ymm3,<r2=%ymm1,<r2=%ymm1 vpxor % ymm3, % ymm1, % ymm1 # qhasm: r = a0 & b1 # asm 1: vpand <a0=reg256#1,<b1=reg256#12,>r=reg256#4 # asm 2: vpand <a0=%ymm0,<b1=%ymm11,>r=%ymm3 vpand % ymm0, % ymm11, % ymm3 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#4,<r1=reg256#9,<r1=reg256#9 # asm 2: vpxor <r=%ymm3,<r1=%ymm8,<r1=%ymm8 vpxor % ymm3, % ymm8, % ymm8 # qhasm: r0 = a0 & b0 # asm 1: vpand <a0=reg256#1,<b0=reg256#14,>r0=reg256#1 # asm 2: vpand <a0=%ymm0,<b0=%ymm13,>r0=%ymm0 vpand % ymm0, % ymm13, % ymm0 # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#3,128(<ptr=int64#4) # asm 2: vmovupd <r4=%ymm2,128(<ptr=%rcx) vmovupd % ymm2, 128( % rcx) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#7,96(<ptr=int64#4) # asm 2: vmovupd <r3=%ymm6,96(<ptr=%rcx) vmovupd % ymm6, 96( % rcx) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#2,64(<ptr=int64#4) # asm 2: vmovupd <r2=%ymm1,64(<ptr=%rcx) vmovupd % ymm1, 64( % rcx) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#9,32(<ptr=int64#4) # asm 2: vmovupd <r1=%ymm8,32(<ptr=%rcx) vmovupd % ymm8, 32( % rcx) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#4) # asm 2: vmovupd <r0=%ymm0,0(<ptr=%rcx) vmovupd % ymm0, 0( % rcx) # qhasm: h22 = mem64[ ptr + 344 ] # asm 1: movq 344(<ptr=int64#4),>h22=int64#2 # asm 2: movq 344(<ptr=%rcx),>h22=%rsi movq 344( % rcx), % rsi # qhasm: h13 = h22 # asm 1: mov <h22=int64#2,>h13=int64#3 # asm 2: mov <h22=%rsi,>h13=%rdx mov % rsi, % rdx # qhasm: h10 = h22 # asm 1: mov <h22=int64#2,>h10=int64#2 # asm 2: mov <h22=%rsi,>h10=%rsi mov % rsi, % rsi # qhasm: h21 = mem64[ ptr + 336 ] # asm 1: movq 336(<ptr=int64#4),>h21=int64#5 # asm 2: movq 336(<ptr=%rcx),>h21=%r8 movq 336( % rcx), % r8 # qhasm: h21 ^= *(uint64 *) ( ptr + 328 ) # asm 1: xorq 328(<ptr=int64#4),<h21=int64#5 # asm 2: xorq 328(<ptr=%rcx),<h21=%r8 xorq 328( % rcx), % r8 # qhasm: h12 = h21 # asm 1: mov <h21=int64#5,>h12=int64#6 # asm 2: mov <h21=%r8,>h12=%r9 mov % r8, % r9 # qhasm: h9 = h21 # asm 1: mov <h21=int64#5,>h9=int64#5 # asm 2: mov <h21=%r8,>h9=%r8 mov % r8, % r8 # qhasm: h20 = mem64[ ptr + 312 ] # asm 1: movq 312(<ptr=int64#4),>h20=int64#7 # asm 2: movq 312(<ptr=%rcx),>h20=%rax movq 312( % rcx), % rax # qhasm: h20 ^= *(uint64 *) ( ptr + 320 ) # asm 1: xorq 320(<ptr=int64#4),<h20=int64#7 # asm 2: xorq 320(<ptr=%rcx),<h20=%rax xorq 320( % rcx), % rax # qhasm: h11 = h20 # asm 1: mov <h20=int64#7,>h11=int64#8 # asm 2: mov <h20=%rax,>h11=%r10 mov % rax, % r10 # qhasm: h8 = h20 # asm 1: mov <h20=int64#7,>h8=int64#7 # asm 2: mov <h20=%rax,>h8=%rax mov % rax, % rax # qhasm: h19 = mem64[ ptr + 304 ] # asm 1: movq 304(<ptr=int64#4),>h19=int64#9 # asm 2: movq 304(<ptr=%rcx),>h19=%r11 movq 304( % rcx), % r11 # qhasm: h19 ^= *(uint64 *) ( ptr + 296 ) # asm 1: xorq 296(<ptr=int64#4),<h19=int64#9 # asm 2: xorq 296(<ptr=%rcx),<h19=%r11 xorq 296( % rcx), % r11 # qhasm: h10 ^= h19 # asm 1: xor <h19=int64#9,<h10=int64#2 # asm 2: xor <h19=%r11,<h10=%rsi xor % r11, % rsi # qhasm: h7 = h19 # asm 1: mov <h19=int64#9,>h7=int64#9 # asm 2: mov <h19=%r11,>h7=%r11 mov % r11, % r11 # qhasm: h18 = mem64[ ptr + 280 ] # asm 1: movq 280(<ptr=int64#4),>h18=int64#10 # asm 2: movq 280(<ptr=%rcx),>h18=%r12 movq 280( % rcx), % r12 # qhasm: h18 ^= *(uint64 *) ( ptr + 288 ) # asm 1: xorq 288(<ptr=int64#4),<h18=int64#10 # asm 2: xorq 288(<ptr=%rcx),<h18=%r12 xorq 288( % rcx), % r12 # qhasm: h9 ^= h18 # asm 1: xor <h18=int64#10,<h9=int64#5 # asm 2: xor <h18=%r12,<h9=%r8 xor % r12, % r8 # qhasm: h6 = h18 # asm 1: mov <h18=int64#10,>h6=int64#10 # asm 2: mov <h18=%r12,>h6=%r12 mov % r12, % r12 # qhasm: h17 = mem64[ ptr + 272 ] # asm 1: movq 272(<ptr=int64#4),>h17=int64#11 # asm 2: movq 272(<ptr=%rcx),>h17=%r13 movq 272( % rcx), % r13 # qhasm: h17 ^= *(uint64 *) ( ptr + 264 ) # asm 1: xorq 264(<ptr=int64#4),<h17=int64#11 # asm 2: xorq 264(<ptr=%rcx),<h17=%r13 xorq 264( % rcx), % r13 # qhasm: h8 ^= h17 # asm 1: xor <h17=int64#11,<h8=int64#7 # asm 2: xor <h17=%r13,<h8=%rax xor % r13, % rax # qhasm: h5 = h17 # asm 1: mov <h17=int64#11,>h5=int64#11 # asm 2: mov <h17=%r13,>h5=%r13 mov % r13, % r13 # qhasm: h16 = mem64[ ptr + 248 ] # asm 1: movq 248(<ptr=int64#4),>h16=int64#12 # asm 2: movq 248(<ptr=%rcx),>h16=%r14 movq 248( % rcx), % r14 # qhasm: h16 ^= *(uint64 *) ( ptr + 256 ) # asm 1: xorq 256(<ptr=int64#4),<h16=int64#12 # asm 2: xorq 256(<ptr=%rcx),<h16=%r14 xorq 256( % rcx), % r14 # qhasm: h7 ^= h16 # asm 1: xor <h16=int64#12,<h7=int64#9 # asm 2: xor <h16=%r14,<h7=%r11 xor % r14, % r11 # qhasm: h4 = h16 # asm 1: mov <h16=int64#12,>h4=int64#12 # asm 2: mov <h16=%r14,>h4=%r14 mov % r14, % r14 # qhasm: h15 = mem64[ ptr + 240 ] # asm 1: movq 240(<ptr=int64#4),>h15=int64#13 # asm 2: movq 240(<ptr=%rcx),>h15=%r15 movq 240( % rcx), % r15 # qhasm: h15 ^= *(uint64 *) ( ptr + 232 ) # asm 1: xorq 232(<ptr=int64#4),<h15=int64#13 # asm 2: xorq 232(<ptr=%rcx),<h15=%r15 xorq 232( % rcx), % r15 # qhasm: h6 ^= h15 # asm 1: xor <h15=int64#13,<h6=int64#10 # asm 2: xor <h15=%r15,<h6=%r12 xor % r15, % r12 # qhasm: h3 = h15 # asm 1: mov <h15=int64#13,>h3=int64#13 # asm 2: mov <h15=%r15,>h3=%r15 mov % r15, % r15 # qhasm: h14 = mem64[ ptr + 216 ] # asm 1: movq 216(<ptr=int64#4),>h14=int64#14 # asm 2: movq 216(<ptr=%rcx),>h14=%rbx movq 216( % rcx), % rbx # qhasm: h14 ^= *(uint64 *) ( ptr + 224 ) # asm 1: xorq 224(<ptr=int64#4),<h14=int64#14 # asm 2: xorq 224(<ptr=%rcx),<h14=%rbx xorq 224( % rcx), % rbx # qhasm: h5 ^= h14 # asm 1: xor <h14=int64#14,<h5=int64#11 # asm 2: xor <h14=%rbx,<h5=%r13 xor % rbx, % r13 # qhasm: h2 = h14 # asm 1: mov <h14=int64#14,>h2=int64#14 # asm 2: mov <h14=%rbx,>h2=%rbx mov % rbx, % rbx # qhasm: h13 ^= *(uint64 *) ( ptr + 208 ) # asm 1: xorq 208(<ptr=int64#4),<h13=int64#3 # asm 2: xorq 208(<ptr=%rcx),<h13=%rdx xorq 208( % rcx), % rdx # qhasm: h13 ^= *(uint64 *) ( ptr + 200 ) # asm 1: xorq 200(<ptr=int64#4),<h13=int64#3 # asm 2: xorq 200(<ptr=%rcx),<h13=%rdx xorq 200( % rcx), % rdx # qhasm: h4 ^= h13 # asm 1: xor <h13=int64#3,<h4=int64#12 # asm 2: xor <h13=%rdx,<h4=%r14 xor % rdx, % r14 # qhasm: h1 = h13 # asm 1: mov <h13=int64#3,>h1=int64#3 # asm 2: mov <h13=%rdx,>h1=%rdx mov % rdx, % rdx # qhasm: h12 ^= *(uint64 *) ( ptr + 184 ) # asm 1: xorq 184(<ptr=int64#4),<h12=int64#6 # asm 2: xorq 184(<ptr=%rcx),<h12=%r9 xorq 184( % rcx), % r9 # qhasm: h12 ^= *(uint64 *) ( ptr + 192 ) # asm 1: xorq 192(<ptr=int64#4),<h12=int64#6 # asm 2: xorq 192(<ptr=%rcx),<h12=%r9 xorq 192( % rcx), % r9 # qhasm: h3 ^= h12 # asm 1: xor <h12=int64#6,<h3=int64#13 # asm 2: xor <h12=%r9,<h3=%r15 xor % r9, % r15 # qhasm: h0 = h12 # asm 1: mov <h12=int64#6,>h0=int64#6 # asm 2: mov <h12=%r9,>h0=%r9 mov % r9, % r9 # qhasm: h11 ^= *(uint64 *) ( ptr + 176 ) # asm 1: xorq 176(<ptr=int64#4),<h11=int64#8 # asm 2: xorq 176(<ptr=%rcx),<h11=%r10 xorq 176( % rcx), % r10 # qhasm: h11 ^= *(uint64 *) ( ptr + 168 ) # asm 1: xorq 168(<ptr=int64#4),<h11=int64#8 # asm 2: xorq 168(<ptr=%rcx),<h11=%r10 xorq 168( % rcx), % r10 # qhasm: mem64[ input_0 + 88 ] = h11 # asm 1: movq <h11=int64#8,88(<input_0=int64#1) # asm 2: movq <h11=%r10,88(<input_0=%rdi) movq % r10, 88( % rdi) # qhasm: h10 ^= *(uint64 *) ( ptr + 152 ) # asm 1: xorq 152(<ptr=int64#4),<h10=int64#2 # asm 2: xorq 152(<ptr=%rcx),<h10=%rsi xorq 152( % rcx), % rsi # qhasm: h10 ^= *(uint64 *) ( ptr + 160 ) # asm 1: xorq 160(<ptr=int64#4),<h10=int64#2 # asm 2: xorq 160(<ptr=%rcx),<h10=%rsi xorq 160( % rcx), % rsi # qhasm: mem64[ input_0 + 80 ] = h10 # asm 1: movq <h10=int64#2,80(<input_0=int64#1) # asm 2: movq <h10=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: h9 ^= *(uint64 *) ( ptr + 144 ) # asm 1: xorq 144(<ptr=int64#4),<h9=int64#5 # asm 2: xorq 144(<ptr=%rcx),<h9=%r8 xorq 144( % rcx), % r8 # qhasm: h9 ^= *(uint64 *) ( ptr + 136 ) # asm 1: xorq 136(<ptr=int64#4),<h9=int64#5 # asm 2: xorq 136(<ptr=%rcx),<h9=%r8 xorq 136( % rcx), % r8 # qhasm: mem64[ input_0 + 72 ] = h9 # asm 1: movq <h9=int64#5,72(<input_0=int64#1) # asm 2: movq <h9=%r8,72(<input_0=%rdi) movq % r8, 72( % rdi) # qhasm: h8 ^= *(uint64 *) ( ptr + 120 ) # asm 1: xorq 120(<ptr=int64#4),<h8=int64#7 # asm 2: xorq 120(<ptr=%rcx),<h8=%rax xorq 120( % rcx), % rax # qhasm: h8 ^= *(uint64 *) ( ptr + 128 ) # asm 1: xorq 128(<ptr=int64#4),<h8=int64#7 # asm 2: xorq 128(<ptr=%rcx),<h8=%rax xorq 128( % rcx), % rax # qhasm: mem64[ input_0 + 64 ] = h8 # asm 1: movq <h8=int64#7,64(<input_0=int64#1) # asm 2: movq <h8=%rax,64(<input_0=%rdi) movq % rax, 64( % rdi) # qhasm: h7 ^= *(uint64 *) ( ptr + 112 ) # asm 1: xorq 112(<ptr=int64#4),<h7=int64#9 # asm 2: xorq 112(<ptr=%rcx),<h7=%r11 xorq 112( % rcx), % r11 # qhasm: h7 ^= *(uint64 *) ( ptr + 104 ) # asm 1: xorq 104(<ptr=int64#4),<h7=int64#9 # asm 2: xorq 104(<ptr=%rcx),<h7=%r11 xorq 104( % rcx), % r11 # qhasm: mem64[ input_0 + 56 ] = h7 # asm 1: movq <h7=int64#9,56(<input_0=int64#1) # asm 2: movq <h7=%r11,56(<input_0=%rdi) movq % r11, 56( % rdi) # qhasm: h6 ^= *(uint64 *) ( ptr + 88 ) # asm 1: xorq 88(<ptr=int64#4),<h6=int64#10 # asm 2: xorq 88(<ptr=%rcx),<h6=%r12 xorq 88( % rcx), % r12 # qhasm: h6 ^= *(uint64 *) ( ptr + 96 ) # asm 1: xorq 96(<ptr=int64#4),<h6=int64#10 # asm 2: xorq 96(<ptr=%rcx),<h6=%r12 xorq 96( % rcx), % r12 # qhasm: mem64[ input_0 + 48 ] = h6 # asm 1: movq <h6=int64#10,48(<input_0=int64#1) # asm 2: movq <h6=%r12,48(<input_0=%rdi) movq % r12, 48( % rdi) # qhasm: h5 ^= *(uint64 *) ( ptr + 80 ) # asm 1: xorq 80(<ptr=int64#4),<h5=int64#11 # asm 2: xorq 80(<ptr=%rcx),<h5=%r13 xorq 80( % rcx), % r13 # qhasm: h5 ^= *(uint64 *) ( ptr + 72 ) # asm 1: xorq 72(<ptr=int64#4),<h5=int64#11 # asm 2: xorq 72(<ptr=%rcx),<h5=%r13 xorq 72( % rcx), % r13 # qhasm: mem64[ input_0 + 40 ] = h5 # asm 1: movq <h5=int64#11,40(<input_0=int64#1) # asm 2: movq <h5=%r13,40(<input_0=%rdi) movq % r13, 40( % rdi) # qhasm: h4 ^= *(uint64 *) ( ptr + 56 ) # asm 1: xorq 56(<ptr=int64#4),<h4=int64#12 # asm 2: xorq 56(<ptr=%rcx),<h4=%r14 xorq 56( % rcx), % r14 # qhasm: h4 ^= *(uint64 *) ( ptr + 64 ) # asm 1: xorq 64(<ptr=int64#4),<h4=int64#12 # asm 2: xorq 64(<ptr=%rcx),<h4=%r14 xorq 64( % rcx), % r14 # qhasm: mem64[ input_0 + 32 ] = h4 # asm 1: movq <h4=int64#12,32(<input_0=int64#1) # asm 2: movq <h4=%r14,32(<input_0=%rdi) movq % r14, 32( % rdi) # qhasm: h3 ^= *(uint64 *) ( ptr + 48 ) # asm 1: xorq 48(<ptr=int64#4),<h3=int64#13 # asm 2: xorq 48(<ptr=%rcx),<h3=%r15 xorq 48( % rcx), % r15 # qhasm: h3 ^= *(uint64 *) ( ptr + 40 ) # asm 1: xorq 40(<ptr=int64#4),<h3=int64#13 # asm 2: xorq 40(<ptr=%rcx),<h3=%r15 xorq 40( % rcx), % r15 # qhasm: mem64[ input_0 + 24 ] = h3 # asm 1: movq <h3=int64#13,24(<input_0=int64#1) # asm 2: movq <h3=%r15,24(<input_0=%rdi) movq % r15, 24( % rdi) # qhasm: h2 ^= *(uint64 *) ( ptr + 24 ) # asm 1: xorq 24(<ptr=int64#4),<h2=int64#14 # asm 2: xorq 24(<ptr=%rcx),<h2=%rbx xorq 24( % rcx), % rbx # qhasm: h2 ^= *(uint64 *) ( ptr + 32 ) # asm 1: xorq 32(<ptr=int64#4),<h2=int64#14 # asm 2: xorq 32(<ptr=%rcx),<h2=%rbx xorq 32( % rcx), % rbx # qhasm: mem64[ input_0 + 16 ] = h2 # asm 1: movq <h2=int64#14,16(<input_0=int64#1) # asm 2: movq <h2=%rbx,16(<input_0=%rdi) movq % rbx, 16( % rdi) # qhasm: h1 ^= *(uint64 *) ( ptr + 16 ) # asm 1: xorq 16(<ptr=int64#4),<h1=int64#3 # asm 2: xorq 16(<ptr=%rcx),<h1=%rdx xorq 16( % rcx), % rdx # qhasm: h1 ^= *(uint64 *) ( ptr + 8 ) # asm 1: xorq 8(<ptr=int64#4),<h1=int64#3 # asm 2: xorq 8(<ptr=%rcx),<h1=%rdx xorq 8( % rcx), % rdx # qhasm: mem64[ input_0 + 8 ] = h1 # asm 1: movq <h1=int64#3,8(<input_0=int64#1) # asm 2: movq <h1=%rdx,8(<input_0=%rdi) movq % rdx, 8( % rdi) # qhasm: h0 ^= *(uint64 *) ( ptr + 0 ) # asm 1: xorq 0(<ptr=int64#4),<h0=int64#6 # asm 2: xorq 0(<ptr=%rcx),<h0=%r9 xorq 0( % rcx), % r9 # qhasm: mem64[ input_0 + 0 ] = h0 # asm 1: movq <h0=int64#6,0(<input_0=int64#1) # asm 2: movq <h0=%r9,0(<input_0=%rdi) movq % r9, 0( % rdi) # qhasm: caller_r11 = r11_stack # asm 1: movq <r11_stack=stack64#1,>caller_r11=int64#9 # asm 2: movq <r11_stack=608(%rsp),>caller_r11=%r11 movq 608( % rsp), % r11 # qhasm: caller_r12 = r12_stack # asm 1: movq <r12_stack=stack64#2,>caller_r12=int64#10 # asm 2: movq <r12_stack=616(%rsp),>caller_r12=%r12 movq 616( % rsp), % r12 # qhasm: caller_r13 = r13_stack # asm 1: movq <r13_stack=stack64#3,>caller_r13=int64#11 # asm 2: movq <r13_stack=624(%rsp),>caller_r13=%r13 movq 624( % rsp), % r13 # qhasm: caller_r14 = r14_stack # asm 1: movq <r14_stack=stack64#4,>caller_r14=int64#12 # asm 2: movq <r14_stack=632(%rsp),>caller_r14=%r14 movq 632( % rsp), % r14 # qhasm: caller_r15 = r15_stack # asm 1: movq <r15_stack=stack64#5,>caller_r15=int64#13 # asm 2: movq <r15_stack=640(%rsp),>caller_r15=%r15 movq 640( % rsp), % r15 # qhasm: caller_rbx = rbx_stack # asm 1: movq <rbx_stack=stack64#6,>caller_rbx=int64#14 # asm 2: movq <rbx_stack=648(%rsp),>caller_rbx=%rbx movq 648( % rsp), % rbx # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
262,634
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864/avx2/transpose_64x64_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x64_asm CRYPTO_NAMESPACE(transpose_64x64_asm) #define _transpose_64x64_asm _CRYPTO_NAMESPACE(transpose_64x64_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 r5 # qhasm: reg128 r6 # qhasm: reg128 r7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: int64 buf # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x64_asm .p2align 5 .global _transpose_64x64_asm .global transpose_64x64_asm _transpose_64x64_asm: transpose_64x64_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 64(<input_0=%rdi),>r1=%xmm7 movddup 64( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 128(<input_0=%rdi),>r2=%xmm8 movddup 128( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 192(<input_0=%rdi),>r3=%xmm9 movddup 192( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 256(<input_0=%rdi),>r4=%xmm10 movddup 256( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 320(<input_0=%rdi),>r5=%xmm11 movddup 320( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 384(<input_0=%rdi),>r6=%xmm12 movddup 384( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 448(<input_0=%rdi),>r7=%xmm13 movddup 448( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 0 ] = buf # asm 1: movq <buf=int64#2,0(<input_0=int64#1) # asm 2: movq <buf=%rsi,0(<input_0=%rdi) movq % rsi, 0( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 64 ] = buf # asm 1: movq <buf=int64#2,64(<input_0=int64#1) # asm 2: movq <buf=%rsi,64(<input_0=%rdi) movq % rsi, 64( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 128 ] = buf # asm 1: movq <buf=int64#2,128(<input_0=int64#1) # asm 2: movq <buf=%rsi,128(<input_0=%rdi) movq % rsi, 128( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 192 ] = buf # asm 1: movq <buf=int64#2,192(<input_0=int64#1) # asm 2: movq <buf=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 256 ] = buf # asm 1: movq <buf=int64#2,256(<input_0=int64#1) # asm 2: movq <buf=%rsi,256(<input_0=%rdi) movq % rsi, 256( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 320 ] = buf # asm 1: movq <buf=int64#2,320(<input_0=int64#1) # asm 2: movq <buf=%rsi,320(<input_0=%rdi) movq % rsi, 320( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 384 ] = buf # asm 1: movq <buf=int64#2,384(<input_0=int64#1) # asm 2: movq <buf=%rsi,384(<input_0=%rdi) movq % rsi, 384( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 448 ] = buf # asm 1: movq <buf=int64#2,448(<input_0=int64#1) # asm 2: movq <buf=%rsi,448(<input_0=%rdi) movq % rsi, 448( % rdi) # qhasm: r0 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 8(<input_0=%rdi),>r0=%xmm6 movddup 8( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 136(<input_0=%rdi),>r2=%xmm8 movddup 136( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 200(<input_0=%rdi),>r3=%xmm9 movddup 200( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 264(<input_0=%rdi),>r4=%xmm10 movddup 264( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 328(<input_0=%rdi),>r5=%xmm11 movddup 328( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 392(<input_0=%rdi),>r6=%xmm12 movddup 392( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 456(<input_0=%rdi),>r7=%xmm13 movddup 456( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 8 ] = buf # asm 1: movq <buf=int64#2,8(<input_0=int64#1) # asm 2: movq <buf=%rsi,8(<input_0=%rdi) movq % rsi, 8( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 72 ] = buf # asm 1: movq <buf=int64#2,72(<input_0=int64#1) # asm 2: movq <buf=%rsi,72(<input_0=%rdi) movq % rsi, 72( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 136 ] = buf # asm 1: movq <buf=int64#2,136(<input_0=int64#1) # asm 2: movq <buf=%rsi,136(<input_0=%rdi) movq % rsi, 136( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 200 ] = buf # asm 1: movq <buf=int64#2,200(<input_0=int64#1) # asm 2: movq <buf=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 264 ] = buf # asm 1: movq <buf=int64#2,264(<input_0=int64#1) # asm 2: movq <buf=%rsi,264(<input_0=%rdi) movq % rsi, 264( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 328 ] = buf # asm 1: movq <buf=int64#2,328(<input_0=int64#1) # asm 2: movq <buf=%rsi,328(<input_0=%rdi) movq % rsi, 328( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 392 ] = buf # asm 1: movq <buf=int64#2,392(<input_0=int64#1) # asm 2: movq <buf=%rsi,392(<input_0=%rdi) movq % rsi, 392( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 456 ] = buf # asm 1: movq <buf=int64#2,456(<input_0=int64#1) # asm 2: movq <buf=%rsi,456(<input_0=%rdi) movq % rsi, 456( % rdi) # qhasm: r0 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 16(<input_0=%rdi),>r0=%xmm6 movddup 16( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 80(<input_0=%rdi),>r1=%xmm7 movddup 80( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 208(<input_0=%rdi),>r3=%xmm9 movddup 208( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 272(<input_0=%rdi),>r4=%xmm10 movddup 272( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 336(<input_0=%rdi),>r5=%xmm11 movddup 336( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 400(<input_0=%rdi),>r6=%xmm12 movddup 400( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 464(<input_0=%rdi),>r7=%xmm13 movddup 464( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 16 ] = buf # asm 1: movq <buf=int64#2,16(<input_0=int64#1) # asm 2: movq <buf=%rsi,16(<input_0=%rdi) movq % rsi, 16( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 80 ] = buf # asm 1: movq <buf=int64#2,80(<input_0=int64#1) # asm 2: movq <buf=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 144 ] = buf # asm 1: movq <buf=int64#2,144(<input_0=int64#1) # asm 2: movq <buf=%rsi,144(<input_0=%rdi) movq % rsi, 144( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 208 ] = buf # asm 1: movq <buf=int64#2,208(<input_0=int64#1) # asm 2: movq <buf=%rsi,208(<input_0=%rdi) movq % rsi, 208( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 272 ] = buf # asm 1: movq <buf=int64#2,272(<input_0=int64#1) # asm 2: movq <buf=%rsi,272(<input_0=%rdi) movq % rsi, 272( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 336 ] = buf # asm 1: movq <buf=int64#2,336(<input_0=int64#1) # asm 2: movq <buf=%rsi,336(<input_0=%rdi) movq % rsi, 336( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 400 ] = buf # asm 1: movq <buf=int64#2,400(<input_0=int64#1) # asm 2: movq <buf=%rsi,400(<input_0=%rdi) movq % rsi, 400( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 464 ] = buf # asm 1: movq <buf=int64#2,464(<input_0=int64#1) # asm 2: movq <buf=%rsi,464(<input_0=%rdi) movq % rsi, 464( % rdi) # qhasm: r0 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 24(<input_0=%rdi),>r0=%xmm6 movddup 24( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 88(<input_0=%rdi),>r1=%xmm7 movddup 88( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 152(<input_0=%rdi),>r2=%xmm8 movddup 152( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 280(<input_0=%rdi),>r4=%xmm10 movddup 280( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 344(<input_0=%rdi),>r5=%xmm11 movddup 344( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 408(<input_0=%rdi),>r6=%xmm12 movddup 408( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 472(<input_0=%rdi),>r7=%xmm13 movddup 472( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 24 ] = buf # asm 1: movq <buf=int64#2,24(<input_0=int64#1) # asm 2: movq <buf=%rsi,24(<input_0=%rdi) movq % rsi, 24( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 88 ] = buf # asm 1: movq <buf=int64#2,88(<input_0=int64#1) # asm 2: movq <buf=%rsi,88(<input_0=%rdi) movq % rsi, 88( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 152 ] = buf # asm 1: movq <buf=int64#2,152(<input_0=int64#1) # asm 2: movq <buf=%rsi,152(<input_0=%rdi) movq % rsi, 152( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 216 ] = buf # asm 1: movq <buf=int64#2,216(<input_0=int64#1) # asm 2: movq <buf=%rsi,216(<input_0=%rdi) movq % rsi, 216( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 280 ] = buf # asm 1: movq <buf=int64#2,280(<input_0=int64#1) # asm 2: movq <buf=%rsi,280(<input_0=%rdi) movq % rsi, 280( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 344 ] = buf # asm 1: movq <buf=int64#2,344(<input_0=int64#1) # asm 2: movq <buf=%rsi,344(<input_0=%rdi) movq % rsi, 344( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 408 ] = buf # asm 1: movq <buf=int64#2,408(<input_0=int64#1) # asm 2: movq <buf=%rsi,408(<input_0=%rdi) movq % rsi, 408( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 472 ] = buf # asm 1: movq <buf=int64#2,472(<input_0=int64#1) # asm 2: movq <buf=%rsi,472(<input_0=%rdi) movq % rsi, 472( % rdi) # qhasm: r0 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 32(<input_0=%rdi),>r0=%xmm6 movddup 32( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 96(<input_0=%rdi),>r1=%xmm7 movddup 96( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 160(<input_0=%rdi),>r2=%xmm8 movddup 160( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 224(<input_0=%rdi),>r3=%xmm9 movddup 224( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 352(<input_0=%rdi),>r5=%xmm11 movddup 352( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 416(<input_0=%rdi),>r6=%xmm12 movddup 416( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 480(<input_0=%rdi),>r7=%xmm13 movddup 480( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 32 ] = buf # asm 1: movq <buf=int64#2,32(<input_0=int64#1) # asm 2: movq <buf=%rsi,32(<input_0=%rdi) movq % rsi, 32( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 96 ] = buf # asm 1: movq <buf=int64#2,96(<input_0=int64#1) # asm 2: movq <buf=%rsi,96(<input_0=%rdi) movq % rsi, 96( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 160 ] = buf # asm 1: movq <buf=int64#2,160(<input_0=int64#1) # asm 2: movq <buf=%rsi,160(<input_0=%rdi) movq % rsi, 160( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 224 ] = buf # asm 1: movq <buf=int64#2,224(<input_0=int64#1) # asm 2: movq <buf=%rsi,224(<input_0=%rdi) movq % rsi, 224( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 288 ] = buf # asm 1: movq <buf=int64#2,288(<input_0=int64#1) # asm 2: movq <buf=%rsi,288(<input_0=%rdi) movq % rsi, 288( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 352 ] = buf # asm 1: movq <buf=int64#2,352(<input_0=int64#1) # asm 2: movq <buf=%rsi,352(<input_0=%rdi) movq % rsi, 352( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 416 ] = buf # asm 1: movq <buf=int64#2,416(<input_0=int64#1) # asm 2: movq <buf=%rsi,416(<input_0=%rdi) movq % rsi, 416( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 480 ] = buf # asm 1: movq <buf=int64#2,480(<input_0=int64#1) # asm 2: movq <buf=%rsi,480(<input_0=%rdi) movq % rsi, 480( % rdi) # qhasm: r0 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 40(<input_0=%rdi),>r0=%xmm6 movddup 40( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 104(<input_0=%rdi),>r1=%xmm7 movddup 104( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 168(<input_0=%rdi),>r2=%xmm8 movddup 168( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 232(<input_0=%rdi),>r3=%xmm9 movddup 232( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 296(<input_0=%rdi),>r4=%xmm10 movddup 296( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 424(<input_0=%rdi),>r6=%xmm12 movddup 424( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 488(<input_0=%rdi),>r7=%xmm13 movddup 488( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 40 ] = buf # asm 1: movq <buf=int64#2,40(<input_0=int64#1) # asm 2: movq <buf=%rsi,40(<input_0=%rdi) movq % rsi, 40( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 104 ] = buf # asm 1: movq <buf=int64#2,104(<input_0=int64#1) # asm 2: movq <buf=%rsi,104(<input_0=%rdi) movq % rsi, 104( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 168 ] = buf # asm 1: movq <buf=int64#2,168(<input_0=int64#1) # asm 2: movq <buf=%rsi,168(<input_0=%rdi) movq % rsi, 168( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 232 ] = buf # asm 1: movq <buf=int64#2,232(<input_0=int64#1) # asm 2: movq <buf=%rsi,232(<input_0=%rdi) movq % rsi, 232( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 296 ] = buf # asm 1: movq <buf=int64#2,296(<input_0=int64#1) # asm 2: movq <buf=%rsi,296(<input_0=%rdi) movq % rsi, 296( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 360 ] = buf # asm 1: movq <buf=int64#2,360(<input_0=int64#1) # asm 2: movq <buf=%rsi,360(<input_0=%rdi) movq % rsi, 360( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 424 ] = buf # asm 1: movq <buf=int64#2,424(<input_0=int64#1) # asm 2: movq <buf=%rsi,424(<input_0=%rdi) movq % rsi, 424( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 488 ] = buf # asm 1: movq <buf=int64#2,488(<input_0=int64#1) # asm 2: movq <buf=%rsi,488(<input_0=%rdi) movq % rsi, 488( % rdi) # qhasm: r0 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 48(<input_0=%rdi),>r0=%xmm6 movddup 48( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 112(<input_0=%rdi),>r1=%xmm7 movddup 112( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 176(<input_0=%rdi),>r2=%xmm8 movddup 176( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 240(<input_0=%rdi),>r3=%xmm9 movddup 240( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 304(<input_0=%rdi),>r4=%xmm10 movddup 304( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 368(<input_0=%rdi),>r5=%xmm11 movddup 368( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 496(<input_0=%rdi),>r7=%xmm13 movddup 496( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 48 ] = buf # asm 1: movq <buf=int64#2,48(<input_0=int64#1) # asm 2: movq <buf=%rsi,48(<input_0=%rdi) movq % rsi, 48( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 112 ] = buf # asm 1: movq <buf=int64#2,112(<input_0=int64#1) # asm 2: movq <buf=%rsi,112(<input_0=%rdi) movq % rsi, 112( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 176 ] = buf # asm 1: movq <buf=int64#2,176(<input_0=int64#1) # asm 2: movq <buf=%rsi,176(<input_0=%rdi) movq % rsi, 176( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 240 ] = buf # asm 1: movq <buf=int64#2,240(<input_0=int64#1) # asm 2: movq <buf=%rsi,240(<input_0=%rdi) movq % rsi, 240( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 304 ] = buf # asm 1: movq <buf=int64#2,304(<input_0=int64#1) # asm 2: movq <buf=%rsi,304(<input_0=%rdi) movq % rsi, 304( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 368 ] = buf # asm 1: movq <buf=int64#2,368(<input_0=int64#1) # asm 2: movq <buf=%rsi,368(<input_0=%rdi) movq % rsi, 368( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 432 ] = buf # asm 1: movq <buf=int64#2,432(<input_0=int64#1) # asm 2: movq <buf=%rsi,432(<input_0=%rdi) movq % rsi, 432( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 496 ] = buf # asm 1: movq <buf=int64#2,496(<input_0=int64#1) # asm 2: movq <buf=%rsi,496(<input_0=%rdi) movq % rsi, 496( % rdi) # qhasm: r0 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 56(<input_0=%rdi),>r0=%xmm6 movddup 56( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 120(<input_0=%rdi),>r1=%xmm7 movddup 120( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 184(<input_0=%rdi),>r2=%xmm8 movddup 184( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 248(<input_0=%rdi),>r3=%xmm9 movddup 248( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 312(<input_0=%rdi),>r4=%xmm10 movddup 312( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 376(<input_0=%rdi),>r5=%xmm11 movddup 376( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 440(<input_0=%rdi),>r6=%xmm12 movddup 440( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>r3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>r3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<r3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<r7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>r5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>r5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<r1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<r0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<r3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<r5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<r4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<r7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>r6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>r6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#4,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm3,>buf=%rsi pextrq $0x0, % xmm3, % rsi # qhasm: mem64[ input_0 + 56 ] = buf # asm 1: movq <buf=int64#2,56(<input_0=int64#1) # asm 2: movq <buf=%rsi,56(<input_0=%rdi) movq % rsi, 56( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#8,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm7,>buf=%rsi pextrq $0x0, % xmm7, % rsi # qhasm: mem64[ input_0 + 120 ] = buf # asm 1: movq <buf=int64#2,120(<input_0=int64#1) # asm 2: movq <buf=%rsi,120(<input_0=%rdi) movq % rsi, 120( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 184 ] = buf # asm 1: movq <buf=int64#2,184(<input_0=int64#1) # asm 2: movq <buf=%rsi,184(<input_0=%rdi) movq % rsi, 184( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#1,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm0,>buf=%rsi pextrq $0x0, % xmm0, % rsi # qhasm: mem64[ input_0 + 248 ] = buf # asm 1: movq <buf=int64#2,248(<input_0=int64#1) # asm 2: movq <buf=%rsi,248(<input_0=%rdi) movq % rsi, 248( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 312 ] = buf # asm 1: movq <buf=int64#2,312(<input_0=int64#1) # asm 2: movq <buf=%rsi,312(<input_0=%rdi) movq % rsi, 312( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#3,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm2,>buf=%rsi pextrq $0x0, % xmm2, % rsi # qhasm: mem64[ input_0 + 376 ] = buf # asm 1: movq <buf=int64#2,376(<input_0=int64#1) # asm 2: movq <buf=%rsi,376(<input_0=%rdi) movq % rsi, 376( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#5,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm4,>buf=%rsi pextrq $0x0, % xmm4, % rsi # qhasm: mem64[ input_0 + 440 ] = buf # asm 1: movq <buf=int64#2,440(<input_0=int64#1) # asm 2: movq <buf=%rsi,440(<input_0=%rdi) movq % rsi, 440( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#2,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm1,>buf=%rsi pextrq $0x0, % xmm1, % rsi # qhasm: mem64[ input_0 + 504 ] = buf # asm 1: movq <buf=int64#2,504(<input_0=int64#1) # asm 2: movq <buf=%rsi,504(<input_0=%rdi) movq % rsi, 504( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 8(<input_0=%rdi),>r1=%xmm7 movddup 8( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 16(<input_0=%rdi),>r2=%xmm8 movddup 16( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 24(<input_0=%rdi),>r3=%xmm9 movddup 24( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 32(<input_0=%rdi),>r4=%xmm10 movddup 32( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 40(<input_0=%rdi),>r5=%xmm11 movddup 40( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 48(<input_0=%rdi),>r6=%xmm12 movddup 48( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 56(<input_0=%rdi),>r7=%xmm13 movddup 56( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 0 ] = t0 # asm 1: movdqu <t0=reg128#8,0(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,0(<input_0=%rdi) movdqu % xmm7, 0( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 16 ] = t0 # asm 1: movdqu <t0=reg128#8,16(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,16(<input_0=%rdi) movdqu % xmm7, 16( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 32 ] = t0 # asm 1: movdqu <t0=reg128#8,32(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,32(<input_0=%rdi) movdqu % xmm7, 32( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 48 ] = t0 # asm 1: movdqu <t0=reg128#7,48(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,48(<input_0=%rdi) movdqu % xmm6, 48( % rdi) # qhasm: r0 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 64(<input_0=%rdi),>r0=%xmm6 movddup 64( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 80(<input_0=%rdi),>r2=%xmm8 movddup 80( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 88(<input_0=%rdi),>r3=%xmm9 movddup 88( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 96(<input_0=%rdi),>r4=%xmm10 movddup 96( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 104(<input_0=%rdi),>r5=%xmm11 movddup 104( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 112(<input_0=%rdi),>r6=%xmm12 movddup 112( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 120(<input_0=%rdi),>r7=%xmm13 movddup 120( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 64 ] = t0 # asm 1: movdqu <t0=reg128#8,64(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,64(<input_0=%rdi) movdqu % xmm7, 64( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 80 ] = t0 # asm 1: movdqu <t0=reg128#8,80(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,80(<input_0=%rdi) movdqu % xmm7, 80( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 96 ] = t0 # asm 1: movdqu <t0=reg128#8,96(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,96(<input_0=%rdi) movdqu % xmm7, 96( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 112 ] = t0 # asm 1: movdqu <t0=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: r0 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 128(<input_0=%rdi),>r0=%xmm6 movddup 128( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 136(<input_0=%rdi),>r1=%xmm7 movddup 136( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 152(<input_0=%rdi),>r3=%xmm9 movddup 152( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 160(<input_0=%rdi),>r4=%xmm10 movddup 160( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 168(<input_0=%rdi),>r5=%xmm11 movddup 168( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 176(<input_0=%rdi),>r6=%xmm12 movddup 176( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 184(<input_0=%rdi),>r7=%xmm13 movddup 184( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 128 ] = t0 # asm 1: movdqu <t0=reg128#8,128(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,128(<input_0=%rdi) movdqu % xmm7, 128( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 144 ] = t0 # asm 1: movdqu <t0=reg128#8,144(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,144(<input_0=%rdi) movdqu % xmm7, 144( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 160 ] = t0 # asm 1: movdqu <t0=reg128#8,160(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,160(<input_0=%rdi) movdqu % xmm7, 160( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 176 ] = t0 # asm 1: movdqu <t0=reg128#7,176(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,176(<input_0=%rdi) movdqu % xmm6, 176( % rdi) # qhasm: r0 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 192(<input_0=%rdi),>r0=%xmm6 movddup 192( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 200(<input_0=%rdi),>r1=%xmm7 movddup 200( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 208(<input_0=%rdi),>r2=%xmm8 movddup 208( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 224(<input_0=%rdi),>r4=%xmm10 movddup 224( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 232(<input_0=%rdi),>r5=%xmm11 movddup 232( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 240(<input_0=%rdi),>r6=%xmm12 movddup 240( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 248(<input_0=%rdi),>r7=%xmm13 movddup 248( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 192 ] = t0 # asm 1: movdqu <t0=reg128#8,192(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,192(<input_0=%rdi) movdqu % xmm7, 192( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 208 ] = t0 # asm 1: movdqu <t0=reg128#8,208(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,208(<input_0=%rdi) movdqu % xmm7, 208( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 224 ] = t0 # asm 1: movdqu <t0=reg128#8,224(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,224(<input_0=%rdi) movdqu % xmm7, 224( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 240 ] = t0 # asm 1: movdqu <t0=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: r0 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 256(<input_0=%rdi),>r0=%xmm6 movddup 256( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 264(<input_0=%rdi),>r1=%xmm7 movddup 264( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 272(<input_0=%rdi),>r2=%xmm8 movddup 272( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 280(<input_0=%rdi),>r3=%xmm9 movddup 280( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 296(<input_0=%rdi),>r5=%xmm11 movddup 296( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 304(<input_0=%rdi),>r6=%xmm12 movddup 304( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 312(<input_0=%rdi),>r7=%xmm13 movddup 312( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 256 ] = t0 # asm 1: movdqu <t0=reg128#8,256(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,256(<input_0=%rdi) movdqu % xmm7, 256( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 272 ] = t0 # asm 1: movdqu <t0=reg128#8,272(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,272(<input_0=%rdi) movdqu % xmm7, 272( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 288 ] = t0 # asm 1: movdqu <t0=reg128#8,288(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,288(<input_0=%rdi) movdqu % xmm7, 288( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 304 ] = t0 # asm 1: movdqu <t0=reg128#7,304(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,304(<input_0=%rdi) movdqu % xmm6, 304( % rdi) # qhasm: r0 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 320(<input_0=%rdi),>r0=%xmm6 movddup 320( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 328(<input_0=%rdi),>r1=%xmm7 movddup 328( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 336(<input_0=%rdi),>r2=%xmm8 movddup 336( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 344(<input_0=%rdi),>r3=%xmm9 movddup 344( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 352(<input_0=%rdi),>r4=%xmm10 movddup 352( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 368(<input_0=%rdi),>r6=%xmm12 movddup 368( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 376(<input_0=%rdi),>r7=%xmm13 movddup 376( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 320 ] = t0 # asm 1: movdqu <t0=reg128#8,320(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,320(<input_0=%rdi) movdqu % xmm7, 320( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 336 ] = t0 # asm 1: movdqu <t0=reg128#8,336(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,336(<input_0=%rdi) movdqu % xmm7, 336( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 352 ] = t0 # asm 1: movdqu <t0=reg128#8,352(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,352(<input_0=%rdi) movdqu % xmm7, 352( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 368 ] = t0 # asm 1: movdqu <t0=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: r0 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 384(<input_0=%rdi),>r0=%xmm6 movddup 384( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 392(<input_0=%rdi),>r1=%xmm7 movddup 392( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 400(<input_0=%rdi),>r2=%xmm8 movddup 400( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 408(<input_0=%rdi),>r3=%xmm9 movddup 408( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 416(<input_0=%rdi),>r4=%xmm10 movddup 416( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 424(<input_0=%rdi),>r5=%xmm11 movddup 424( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 440(<input_0=%rdi),>r7=%xmm13 movddup 440( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 384 ] = t0 # asm 1: movdqu <t0=reg128#8,384(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,384(<input_0=%rdi) movdqu % xmm7, 384( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 400 ] = t0 # asm 1: movdqu <t0=reg128#8,400(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,400(<input_0=%rdi) movdqu % xmm7, 400( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 416 ] = t0 # asm 1: movdqu <t0=reg128#8,416(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,416(<input_0=%rdi) movdqu % xmm7, 416( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 432 ] = t0 # asm 1: movdqu <t0=reg128#7,432(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,432(<input_0=%rdi) movdqu % xmm6, 432( % rdi) # qhasm: r0 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 448(<input_0=%rdi),>r0=%xmm6 movddup 448( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 456(<input_0=%rdi),>r1=%xmm7 movddup 456( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 464(<input_0=%rdi),>r2=%xmm8 movddup 464( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 472(<input_0=%rdi),>r3=%xmm9 movddup 472( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 480(<input_0=%rdi),>r4=%xmm10 movddup 480( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 488(<input_0=%rdi),>r5=%xmm11 movddup 488( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 496(<input_0=%rdi),>r6=%xmm12 movddup 496( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>r3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>r3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<r7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>r5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>r5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<r1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<r0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<r3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<r5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<r4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<r7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>r6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>r6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#8,<r0=reg128#4,>t0=reg128#4 # asm 2: vpunpcklqdq <r1=%xmm7,<r0=%xmm3,>t0=%xmm3 vpunpcklqdq % xmm7, % xmm3, % xmm3 # qhasm: mem128[ input_0 + 448 ] = t0 # asm 1: movdqu <t0=reg128#4,448(<input_0=int64#1) # asm 2: movdqu <t0=%xmm3,448(<input_0=%rdi) movdqu % xmm3, 448( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#1,<r2=reg128#9,>t0=reg128#1 # asm 2: vpunpcklqdq <r3=%xmm0,<r2=%xmm8,>t0=%xmm0 vpunpcklqdq % xmm0, % xmm8, % xmm0 # qhasm: mem128[ input_0 + 464 ] = t0 # asm 1: movdqu <t0=reg128#1,464(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,464(<input_0=%rdi) movdqu % xmm0, 464( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#3,<r4=reg128#10,>t0=reg128#1 # asm 2: vpunpcklqdq <r5=%xmm2,<r4=%xmm9,>t0=%xmm0 vpunpcklqdq % xmm2, % xmm9, % xmm0 # qhasm: mem128[ input_0 + 480 ] = t0 # asm 1: movdqu <t0=reg128#1,480(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,480(<input_0=%rdi) movdqu % xmm0, 480( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#2,<r6=reg128#5,>t0=reg128#1 # asm 2: vpunpcklqdq <r7=%xmm1,<r6=%xmm4,>t0=%xmm0 vpunpcklqdq % xmm1, % xmm4, % xmm0 # qhasm: mem128[ input_0 + 496 ] = t0 # asm 1: movdqu <t0=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: return add % r11, % rsp ret