repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
cryptix-network/cryptix-miner-cpu
10,572
src/asm/keccakf1600_x86-64-mingw64.s
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl .text .def __KeccakF1600; .scl 3; .type 32; .endef .p2align 5 __KeccakF1600: .byte 0xf3,0x0f,0x1e,0xfa movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx movq 84(%rdi),%rdx movq 92(%rdi),%rbp jmp .Loop .p2align 5 .Loop: movq -100(%rdi),%r8 movq -52(%rdi),%r9 movq -4(%rdi),%r10 movq 44(%rdi),%r11 xorq -84(%rdi),%rcx xorq -76(%rdi),%rdx xorq %r8,%rax xorq -92(%rdi),%rbx xorq -44(%rdi),%rcx xorq -60(%rdi),%rax movq %rbp,%r12 xorq -68(%rdi),%rbp xorq %r10,%rcx xorq -20(%rdi),%rax xorq -36(%rdi),%rdx xorq %r9,%rbx xorq -28(%rdi),%rbp xorq 36(%rdi),%rcx xorq 20(%rdi),%rax xorq 4(%rdi),%rdx xorq -12(%rdi),%rbx xorq 12(%rdi),%rbp movq %rcx,%r13 rolq $1,%rcx xorq %rax,%rcx xorq %r11,%rdx rolq $1,%rax xorq %rdx,%rax xorq 28(%rdi),%rbx rolq $1,%rdx xorq %rbx,%rdx xorq 52(%rdi),%rbp rolq $1,%rbx xorq %rbp,%rbx rolq $1,%rbp xorq %r13,%rbp xorq %rcx,%r9 xorq %rdx,%r10 rolq $44,%r9 xorq %rbp,%r11 xorq %rax,%r12 rolq $43,%r10 xorq %rbx,%r8 movq %r9,%r13 rolq $21,%r11 orq %r10,%r9 xorq %r8,%r9 rolq $14,%r12 xorq (%r15),%r9 leaq 8(%r15),%r15 movq %r12,%r14 andq %r11,%r12 movq %r9,-100(%rsi) xorq %r10,%r12 notq %r10 movq %r12,-84(%rsi) orq %r11,%r10 movq 76(%rdi),%r12 xorq %r13,%r10 movq %r10,-92(%rsi) andq %r8,%r13 movq -28(%rdi),%r9 xorq %r14,%r13 movq -20(%rdi),%r10 movq %r13,-68(%rsi) orq %r8,%r14 movq -76(%rdi),%r8 xorq %r11,%r14 movq 28(%rdi),%r11 movq %r14,-76(%rsi) xorq %rbp,%r8 xorq %rdx,%r12 rolq $28,%r8 xorq %rcx,%r11 xorq %rax,%r9 rolq $61,%r12 rolq $45,%r11 xorq %rbx,%r10 rolq $20,%r9 movq %r8,%r13 orq %r12,%r8 rolq $3,%r10 xorq %r11,%r8 movq %r8,-36(%rsi) movq %r9,%r14 andq %r13,%r9 movq -92(%rdi),%r8 xorq %r12,%r9 notq %r12 movq %r9,-28(%rsi) orq %r11,%r12 movq -44(%rdi),%r9 xorq %r10,%r12 movq %r12,-44(%rsi) andq %r10,%r11 movq 60(%rdi),%r12 xorq %r14,%r11 movq %r11,-52(%rsi) orq %r10,%r14 movq 4(%rdi),%r10 xorq %r13,%r14 movq 52(%rdi),%r11 movq %r14,-60(%rsi) xorq %rbp,%r10 xorq %rax,%r11 rolq $25,%r10 xorq %rdx,%r9 rolq $8,%r11 xorq %rbx,%r12 rolq $6,%r9 xorq %rcx,%r8 rolq $18,%r12 movq %r10,%r13 andq %r11,%r10 rolq $1,%r8 notq %r11 xorq %r9,%r10 movq %r10,-12(%rsi) movq %r12,%r14 andq %r11,%r12 movq -12(%rdi),%r10 xorq %r13,%r12 movq %r12,-4(%rsi) orq %r9,%r13 movq 84(%rdi),%r12 xorq %r8,%r13 movq %r13,-20(%rsi) andq %r8,%r9 xorq %r14,%r9 movq %r9,12(%rsi) orq %r8,%r14 movq -60(%rdi),%r9 xorq %r11,%r14 movq 36(%rdi),%r11 movq %r14,4(%rsi) movq -68(%rdi),%r8 xorq %rcx,%r10 xorq %rdx,%r11 rolq $10,%r10 xorq %rbx,%r9 rolq $15,%r11 xorq %rbp,%r12 rolq $36,%r9 xorq %rax,%r8 rolq $56,%r12 movq %r10,%r13 orq %r11,%r10 rolq $27,%r8 notq %r11 xorq %r9,%r10 movq %r10,28(%rsi) movq %r12,%r14 orq %r11,%r12 xorq %r13,%r12 movq %r12,36(%rsi) andq %r9,%r13 xorq %r8,%r13 movq %r13,20(%rsi) orq %r8,%r9 xorq %r14,%r9 movq %r9,52(%rsi) andq %r14,%r8 xorq %r11,%r8 movq %r8,44(%rsi) xorq -84(%rdi),%rdx xorq -36(%rdi),%rbp rolq $62,%rdx xorq 68(%rdi),%rcx rolq $55,%rbp xorq 12(%rdi),%rax rolq $2,%rcx xorq 20(%rdi),%rbx xchgq %rsi,%rdi rolq $39,%rax rolq $41,%rbx movq %rdx,%r13 andq %rbp,%rdx notq %rbp xorq %rcx,%rdx movq %rdx,92(%rdi) movq %rax,%r14 andq %rbp,%rax xorq %r13,%rax movq %rax,60(%rdi) orq %rcx,%r13 xorq %rbx,%r13 movq %r13,84(%rdi) andq %rbx,%rcx xorq %r14,%rcx movq %rcx,76(%rdi) orq %r14,%rbx xorq %rbp,%rbx movq %rbx,68(%rdi) movq %rdx,%rbp movq %r13,%rdx testq $255,%r15 jnz .Loop leaq -192(%r15),%r15 .byte 0xf3,0xc3 .globl KeccakF1600 .def KeccakF1600; .scl 2; .type 32; .endef .p2align 5 KeccakF1600: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_KeccakF1600: movq %rcx,%rdi pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 leaq 100(%rdi),%rdi subq $200,%rsp .LSEH_body_KeccakF1600: notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 leaq 100(%rsp),%rsi call __KeccakF1600 notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq -100(%rdi),%rdi leaq 248(%rsp),%r11 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .LSEH_epilogue_KeccakF1600: mov 8(%r11),%rdi mov 16(%r11),%rsi .byte 0xf3,0xc3 .LSEH_end_KeccakF1600: .globl SHA3_absorb .def SHA3_absorb; .scl 2; .type 32; .endef .p2align 5 SHA3_absorb: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_SHA3_absorb: movq %rcx,%rdi movq %rdx,%rsi movq %r8,%rdx movq %r9,%rcx pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 leaq 100(%rdi),%rdi subq $232,%rsp .LSEH_body_SHA3_absorb: movq %rsi,%r9 leaq 100(%rsp),%rsi notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq iotas(%rip),%r15 movq %rcx,216-100(%rsi) .Loop_absorb: cmpq %rcx,%rdx jc .Ldone_absorb shrq $3,%rcx leaq -100(%rdi),%r8 .Lblock_absorb: movq (%r9),%rax leaq 8(%r9),%r9 xorq (%r8),%rax leaq 8(%r8),%r8 subq $8,%rdx movq %rax,-8(%r8) subq $1,%rcx jnz .Lblock_absorb movq %r9,200-100(%rsi) movq %rdx,208-100(%rsi) call __KeccakF1600 movq 200-100(%rsi),%r9 movq 208-100(%rsi),%rdx movq 216-100(%rsi),%rcx jmp .Loop_absorb .p2align 5 .Ldone_absorb: movq %rdx,%rax notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) notq -4(%rdi) notq 36(%rdi) notq 60(%rdi) leaq 280(%rsp),%r11 movq -48(%r11),%r15 movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp .LSEH_epilogue_SHA3_absorb: mov 8(%r11),%rdi mov 16(%r11),%rsi .byte 0xf3,0xc3 .LSEH_end_SHA3_absorb: .globl SHA3_squeeze .def SHA3_squeeze; .scl 2; .type 32; .endef .p2align 5 SHA3_squeeze: .byte 0xf3,0x0f,0x1e,0xfa movq %rdi,8(%rsp) movq %rsi,16(%rsp) movq %rsp,%r11 .LSEH_begin_SHA3_squeeze: movq %rcx,%rdi movq %rdx,%rsi movq %r8,%rdx movq %r9,%rcx pushq %r12 pushq %r13 pushq %r14 subq $32,%rsp .LSEH_body_SHA3_squeeze: shrq $3,%rcx movq %rdi,%r8 movq %rsi,%r12 movq %rdx,%r13 movq %rcx,%r14 jmp .Loop_squeeze .p2align 5 .Loop_squeeze: cmpq $8,%r13 jb .Ltail_squeeze movq (%r8),%rax leaq 8(%r8),%r8 movq %rax,(%r12) leaq 8(%r12),%r12 subq $8,%r13 jz .Ldone_squeeze subq $1,%rcx jnz .Loop_squeeze movq %rdi,%rcx call KeccakF1600 movq %rdi,%r8 movq %r14,%rcx jmp .Loop_squeeze .Ltail_squeeze: movq %r8,%rsi movq %r12,%rdi movq %r13,%rcx .byte 0xf3,0xa4 .Ldone_squeeze: movq 32(%rsp),%r14 movq 40(%rsp),%r13 movq 48(%rsp),%r12 addq $56,%rsp .LSEH_epilogue_SHA3_squeeze: mov 8(%rsp),%rdi mov 16(%rsp),%rsi .byte 0xf3,0xc3 .LSEH_end_SHA3_squeeze: .p2align 8 .quad 0,0,0,0,0,0,0,0 iotas: .quad 0x0000000000000001 .quad 0x0000000000008082 .quad 0x800000000000808a .quad 0x8000000080008000 .quad 0x000000000000808b .quad 0x0000000080000001 .quad 0x8000000080008081 .quad 0x8000000000008009 .quad 0x000000000000008a .quad 0x0000000000000088 .quad 0x0000000080008009 .quad 0x000000008000000a .quad 0x000000008000808b .quad 0x800000000000008b .quad 0x8000000000008089 .quad 0x8000000000008003 .quad 0x8000000000008002 .quad 0x8000000000000080 .quad 0x000000000000800a .quad 0x800000008000000a .quad 0x8000000080008081 .quad 0x8000000000008080 .quad 0x0000000080000001 .quad 0x8000000080008008 .byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .section .pdata .p2align 2 .rva .LSEH_begin_KeccakF1600 .rva .LSEH_body_KeccakF1600 .rva .LSEH_info_KeccakF1600_prologue .rva .LSEH_body_KeccakF1600 .rva .LSEH_epilogue_KeccakF1600 .rva .LSEH_info_KeccakF1600_body .rva .LSEH_epilogue_KeccakF1600 .rva .LSEH_end_KeccakF1600 .rva .LSEH_info_KeccakF1600_epilogue .rva .LSEH_begin_SHA3_absorb .rva .LSEH_body_SHA3_absorb .rva .LSEH_info_SHA3_absorb_prologue .rva .LSEH_body_SHA3_absorb .rva .LSEH_epilogue_SHA3_absorb .rva .LSEH_info_SHA3_absorb_body .rva .LSEH_epilogue_SHA3_absorb .rva .LSEH_end_SHA3_absorb .rva .LSEH_info_SHA3_absorb_epilogue .rva .LSEH_begin_SHA3_squeeze .rva .LSEH_body_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_prologue .rva .LSEH_body_SHA3_squeeze .rva .LSEH_epilogue_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_body .rva .LSEH_epilogue_SHA3_squeeze .rva .LSEH_end_SHA3_squeeze .rva .LSEH_info_SHA3_squeeze_epilogue .section .xdata .p2align 3 .LSEH_info_KeccakF1600_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_KeccakF1600_body: .byte 1,0,18,0 .byte 0x00,0xf4,0x19,0x00 .byte 0x00,0xe4,0x1a,0x00 .byte 0x00,0xd4,0x1b,0x00 .byte 0x00,0xc4,0x1c,0x00 .byte 0x00,0x54,0x1d,0x00 .byte 0x00,0x34,0x1e,0x00 .byte 0x00,0x74,0x20,0x00 .byte 0x00,0x64,0x21,0x00 .byte 0x00,0x01,0x1f,0x00 .byte 0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_KeccakF1600_epilogue: .byte 1,0,5,11 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0xb3 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_absorb_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_SHA3_absorb_body: .byte 1,0,18,0 .byte 0x00,0xf4,0x1d,0x00 .byte 0x00,0xe4,0x1e,0x00 .byte 0x00,0xd4,0x1f,0x00 .byte 0x00,0xc4,0x20,0x00 .byte 0x00,0x54,0x21,0x00 .byte 0x00,0x34,0x22,0x00 .byte 0x00,0x74,0x24,0x00 .byte 0x00,0x64,0x25,0x00 .byte 0x00,0x01,0x23,0x00 .byte 0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_absorb_epilogue: .byte 1,0,5,11 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0xb3 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .byte 0x00,0x00,0x00,0x00 .LSEH_info_SHA3_squeeze_prologue: .byte 1,0,5,0x0b .byte 0,0x74,1,0 .byte 0,0x64,2,0 .byte 0,0xb3 .byte 0,0 .long 0,0 .LSEH_info_SHA3_squeeze_body: .byte 1,0,11,0 .byte 0x00,0xe4,0x04,0x00 .byte 0x00,0xd4,0x05,0x00 .byte 0x00,0xc4,0x06,0x00 .byte 0x00,0x74,0x08,0x00 .byte 0x00,0x64,0x09,0x00 .byte 0x00,0x62 .byte 0x00,0x00,0x00,0x00,0x00,0x00 .LSEH_info_SHA3_squeeze_epilogue: .byte 1,0,4,0 .byte 0x00,0x74,0x01,0x00 .byte 0x00,0x64,0x02,0x00 .byte 0x00,0x00,0x00,0x00
cypppper/core-from-dust
12,192
os/src/link_app.S
.align 3 .section .data .global _num_app _num_app: .quad 52 .quad app_0_start .quad app_1_start .quad app_2_start .quad app_3_start .quad app_4_start .quad app_5_start .quad app_6_start .quad app_7_start .quad app_8_start .quad app_9_start .quad app_10_start .quad app_11_start .quad app_12_start .quad app_13_start .quad app_14_start .quad app_15_start .quad app_16_start .quad app_17_start .quad app_18_start .quad app_19_start .quad app_20_start .quad app_21_start .quad app_22_start .quad app_23_start .quad app_24_start .quad app_25_start .quad app_26_start .quad app_27_start .quad app_28_start .quad app_29_start .quad app_30_start .quad app_31_start .quad app_32_start .quad app_33_start .quad app_34_start .quad app_35_start .quad app_36_start .quad app_37_start .quad app_38_start .quad app_39_start .quad app_40_start .quad app_41_start .quad app_42_start .quad app_43_start .quad app_44_start .quad app_45_start .quad app_46_start .quad app_47_start .quad app_48_start .quad app_49_start .quad app_50_start .quad app_51_start .quad app_51_end .global _app_names _app_names: .string "adder" .string "adder_atomic" .string "adder_mutex_blocking" .string "adder_mutex_spin" .string "adder_peterson_spin" .string "adder_peterson_yield" .string "adder_simple_spin" .string "adder_simple_yield" .string "barrier_condvar" .string "barrier_fail" .string "cat" .string "cmdline_args" .string "condsync_condvar" .string "condsync_sem" .string "count_lines" .string "early_exit" .string "early_exit2" .string "eisenberg" .string "exit" .string "fantastic_text" .string "filetest_simple" .string "forktest" .string "forktest2" .string "forktest_simple" .string "forktree" .string "hello_world" .string "huge_write" .string "infloop" .string "initproc" .string "matrix" .string "mpsc_sem" .string "peterson" .string "phil_din_mutex" .string "pipe_large_test" .string "pipetest" .string "priv_csr" .string "priv_inst" .string "run_pipe_test" .string "sleep" .string "sleep_simple" .string "stack_overflow" .string "stackful_coroutine" .string "stackless_coroutine" .string "store_fault" .string "sync_sem" .string "threads" .string "threads_arg" .string "until_timeout" .string "user_shell" .string "usertests" .string "usertests_simple" .string "yield" .section .data .global app_0_start .global app_0_end .align 3 app_0_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/adder" app_0_end: .section .data .global app_1_start .global app_1_end .align 3 app_1_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/adder_atomic" app_1_end: .section .data .global app_2_start .global app_2_end .align 3 app_2_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/adder_mutex_blocking" app_2_end: .section .data .global app_3_start .global app_3_end .align 3 app_3_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/adder_mutex_spin" app_3_end: .section .data .global app_4_start .global app_4_end .align 3 app_4_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/adder_peterson_spin" app_4_end: .section .data .global app_5_start .global app_5_end .align 3 app_5_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/adder_peterson_yield" app_5_end: .section .data .global app_6_start .global app_6_end .align 3 app_6_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/adder_simple_spin" app_6_end: .section .data .global app_7_start .global app_7_end .align 3 app_7_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/adder_simple_yield" app_7_end: .section .data .global app_8_start .global app_8_end .align 3 app_8_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/barrier_condvar" app_8_end: .section .data .global app_9_start .global app_9_end .align 3 app_9_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/barrier_fail" app_9_end: .section .data .global app_10_start .global app_10_end .align 3 app_10_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/cat" app_10_end: .section .data .global app_11_start .global app_11_end .align 3 app_11_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/cmdline_args" app_11_end: .section .data .global app_12_start .global app_12_end .align 3 app_12_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/condsync_condvar" app_12_end: .section .data .global app_13_start .global app_13_end .align 3 app_13_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/condsync_sem" app_13_end: .section .data .global app_14_start .global app_14_end .align 3 app_14_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/count_lines" app_14_end: .section .data .global app_15_start .global app_15_end .align 3 app_15_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/early_exit" app_15_end: .section .data .global app_16_start .global app_16_end .align 3 app_16_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/early_exit2" app_16_end: .section .data .global app_17_start .global app_17_end .align 3 app_17_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/eisenberg" app_17_end: .section .data .global app_18_start .global app_18_end .align 3 app_18_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/exit" app_18_end: .section .data .global app_19_start .global app_19_end .align 3 app_19_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/fantastic_text" app_19_end: .section .data .global app_20_start .global app_20_end .align 3 app_20_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/filetest_simple" app_20_end: .section .data .global app_21_start .global app_21_end .align 3 app_21_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest" app_21_end: .section .data .global app_22_start .global app_22_end .align 3 app_22_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest2" app_22_end: .section .data .global app_23_start .global app_23_end .align 3 app_23_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest_simple" app_23_end: .section .data .global app_24_start .global app_24_end .align 3 app_24_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/forktree" app_24_end: .section .data .global app_25_start .global app_25_end .align 3 app_25_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/hello_world" app_25_end: .section .data .global app_26_start .global app_26_end .align 3 app_26_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/huge_write" app_26_end: .section .data .global app_27_start .global app_27_end .align 3 app_27_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/infloop" app_27_end: .section .data .global app_28_start .global app_28_end .align 3 app_28_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/initproc" app_28_end: .section .data .global app_29_start .global app_29_end .align 3 app_29_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/matrix" app_29_end: .section .data .global app_30_start .global app_30_end .align 3 app_30_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/mpsc_sem" app_30_end: .section .data .global app_31_start .global app_31_end .align 3 app_31_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/peterson" app_31_end: .section .data .global app_32_start .global app_32_end .align 3 app_32_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/phil_din_mutex" app_32_end: .section .data .global app_33_start .global app_33_end .align 3 app_33_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/pipe_large_test" app_33_end: .section .data .global app_34_start .global app_34_end .align 3 app_34_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/pipetest" app_34_end: .section .data .global app_35_start .global app_35_end .align 3 app_35_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/priv_csr" app_35_end: .section .data .global app_36_start .global app_36_end .align 3 app_36_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/priv_inst" app_36_end: .section .data .global app_37_start .global app_37_end .align 3 app_37_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/run_pipe_test" app_37_end: .section .data .global app_38_start .global app_38_end .align 3 app_38_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep" app_38_end: .section .data .global app_39_start .global app_39_end .align 3 app_39_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep_simple" app_39_end: .section .data .global app_40_start .global app_40_end .align 3 app_40_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/stack_overflow" app_40_end: .section .data .global app_41_start .global app_41_end .align 3 app_41_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/stackful_coroutine" app_41_end: .section .data .global app_42_start .global app_42_end .align 3 app_42_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/stackless_coroutine" app_42_end: .section .data .global app_43_start .global app_43_end .align 3 app_43_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/store_fault" app_43_end: .section .data .global app_44_start .global app_44_end .align 3 app_44_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/sync_sem" app_44_end: .section .data .global app_45_start .global app_45_end .align 3 app_45_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/threads" app_45_end: .section .data .global app_46_start .global app_46_end .align 3 app_46_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/threads_arg" app_46_end: .section .data .global app_47_start .global app_47_end .align 3 app_47_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/until_timeout" app_47_end: .section .data .global app_48_start .global app_48_end .align 3 app_48_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/user_shell" app_48_end: .section .data .global app_49_start .global app_49_end .align 3 app_49_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/usertests" app_49_end: .section .data .global app_50_start .global app_50_end .align 3 app_50_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/usertests_simple" app_50_end: .section .data .global app_51_start .global app_51_end .align 3 app_51_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/yield" app_51_end:
cypppper/core-from-dust
1,640
os/src/trap/trap.S
.altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .section .text.trampoline .globl __alltraps .globl __restore .align 2 __alltraps: csrrw sp, sscratch, sp # now sp->*TrapContext in user space, sscratch->user stack # save other general purpose registers sd x1, 1*8(sp) # skip sp(x2), we will save it later sd x3, 3*8(sp) # skip tp(x4), application does not use it # save x5~x31 .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr # we can use t0/t1/t2 freely, because they have been saved in TrapContext csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) # read user stack from sscratch and save it in TrapContext csrr t2, sscratch sd t2, 2*8(sp) # load kernel_satp into t0 ld t0, 34*8(sp) # load trap_handler into t1 ld t1, 36*8(sp) # move to kernel_sp ld sp, 35*8(sp) # switch to kernel space csrw satp, t0 sfence.vma # jump to trap_handler jr t1 __restore: # a0: *TrapContext in user space(Constant); a1: user space token # switch to user space csrw satp, a1 sfence.vma csrw sscratch, a0 mv sp, a0 # now sp points to TrapContext in user space, start restoring based on it # restore sstatus/sepc ld t0, 32*8(sp) ld t1, 33*8(sp) csrw sstatus, t0 csrw sepc, t1 # restore general purpose registers except x0/sp/tp ld x1, 1*8(sp) ld x3, 3*8(sp) .set n, 5 .rept 27 LOAD_GP %n .set n, n+1 .endr # back to user stack ld sp, 2*8(sp) sret
Darokahn/blocks-language
1,234
assembly test stuff/asem.s
global _start .text: _start: push 42 call uitoa return: call print call end end: mov ebx, [esp+4] ; before operation: push exit code mov eax, 1 int 0x80 print: mov eax, 4 mov ebx, 1 mov ecx, msg mov edx, len int 0x80 ret uitoa: ; +4: num ; -4: digits ; -8: comp ; -12: has_found_digit mov ebp, esp push msg push 1000000 push 0 mov eax, [ebp + 4] cmp eax, [ebp-8] jl divide_comp jmp else divide_comp: mov eax, [ebp-8] mov esi, 10 div esi mov [ebp-8], eax increment_digit: add eax, 1 mov byte [eax], 48 jmp then else: jge subtract_comp subtract_comp: sub eax, ecx jge increment_val increment_val: add byte [eax], 1 then: cmp ebp, 0 jle return jmp uitoa ;if num < comp: ; goto divide code ; if has_found_digit: ; goto increment digit code ;else: ; sub comp from num ; increment value at current place ;if num: ; restart .data: msg resb 80 len equ $ - msg
dda119141/ultrascale_bootloader
4,222
src/main/xfsbl_exit.S
/****************************************************************************** * * Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved. * SPDX-License-Identifier: MIT *******************************************************************************/ /*****************************************************************************/ /** * * @file xfsbl_exit.s * * This is the main file which contains exit code for the FSBL. * * <pre> * MODIFICATION HISTORY: * * Ver Who Date Changes * ----- ---- -------- ------------------------------------------------------- * 1.00 kc 11/13/13 Initial release * mus 02/26/19 Added support for armclang compiler * dp 06/25/20 Added armclang support for Cortex-R5 * * </pre> * * @note * ******************************************************************************/ /***************************** Include Files *********************************/ /************************** Constant Definitions *****************************/ /**************************** Type Definitions *******************************/ /***************** Macros (Inline Functions) Definitions *********************/ /************************** Function Prototypes ******************************/ #if ! defined (__clang__) .globl XFsbl_Exit /************************** Variable Definitions *****************************/ XFsbl_Exit: #ifdef ARMA53_64 mov x30, x0 /* move the destination address into x30 register */ tlbi ALLE3 /* invalidate All E3 translation tables */ ic IALLU /* invalidate I Cache All to PoU, Inner Shareable */ dsb sy isb /* make sure it completes */ mrs x5, SCTLR_EL3 /* Read control register */ mov x6, #0x1005 /* D, I , M bits disable */ bic x5, x5, x6 /* Disable MMU, L1 and L2 I/D cache */ msr SCTLR_EL3, x5 /* */ isb cmp x1, #0 /* exit to wfe */ beq XFsbl_Loop cmp x1, #1 /* x1 is 1 - exit in aarch64 */ beq XFsbl_StartApp mov x2, #2 /* request for warm reset and aarch32 */ dsb sy isb msr RMR_EL3,x2 /* write to reset management register */ isb #endif XFsbl_Loop: wfe /* wait for event */ b XFsbl_Loop .Ldone: b .Ldone /* Paranoia: we should never get here */ XFsbl_StartApp: #ifdef ARMA53_64 br x30 /* branch to */ #else bx lr; #endif .end #else EXPORT XFsbl_Exit AREA |.exit|, CODE XFsbl_Exit #ifdef ARMR5 mov lr, r0 ;move the destination address into link register mcr p15,0,r0,c7,c5,0 ;Invalidate Instruction cache mcr p15,0,r0,c7,c5,6 ;Invalidate branch predictor array dsb isb ;make sure it completes mrc p15,0,r4,c1,c0,0 ;Read SCTLR bic r4, r4, #0x04 ;disable L1 I Cache bic r4, r4, #0x1000 ;Disable L1 D Cache mcr p15,0,r4,c1,c0,0 ;disable the DCache, ICache isb ;make sure it completes ;set exception vector to HIVEC ;this is done because, in LOVEC we can not disable the MPU as ;OCM region is not present in default MPU regions when in LOVEC mrc p15, 0, r0, c1, c0, 0 ;Read SCTLR orr r0, r0, #0x2000 mcr p15, 0, r0, c1, c0, 0 isb ;disable the MPU mrc p15,0,r4,c1,c0,0 ;Read SCTLR bic r4, r4, #0x01 mcr p15,0,r4,c1,c0,0 isb cmp r1, #0 ;exit to wfe beq XFsbl_Loop cmp r1, #2 ;x1 is 2 - exit in aarch32 beq XFsbl_StartApp ;x1 is 1 - exit in aarch64 mov r2, #3 ;request for warm reset and aarch64 dsb isb mcr p15, 0, r2, c12, c0, 2 ;write to reset management register isb #else mov x30, x0 ; move the destination address into x30 register tlbi ALLE3 ; invalidate All E3 translation tables ic IALLU ; invalidate I Cache All to PoU, Inner Shareable dsb sy isb ; make sure it completes mrs x5, SCTLR_EL3 ; Read control register mov x6, #0x1005 ; D, I , M bits disable bic x5, x5, x6 ; Disable MMU, L1 and L2 I/D cache msr SCTLR_EL3, x5 isb cmp x1, #0 ; exit to wfe beq XFsbl_Loop cmp x1, #1 ; x1 is 1 - exit in aarch64 beq XFsbl_StartApp ;x1 is 2 - exit in aarch32 mov x2, #2 ; request for warm reset and aarch32 dsb sy isb msr RMR_EL3,x2 ; write to reset management register isb #endif XFsbl_Loop wfe ;wait for event b XFsbl_Loop XFsbl_StartApp #ifdef ARMR5 bx lr; #else br x30 ; branch to #endif END #endif
dda119141/ultrascale_bootloader
2,107
src/lib/bootup/invalidate_caches.S
/****************************************************************************** * ******************************************************************************/ #include "xparameters.h" #include "bspconfig.h" .globl MMUTableL0 .globl MMUTableL1 .globl MMUTableL2 .section .boot,"ax" .global invalidate_dcaches invalidate_dcaches: dmb ISH mrs x0, CLIDR_EL1 //; x0 = CLIDR ubfx w2, w0, #24, #3 //; w2 = CLIDR.LoC cmp w2, #0 //; LoC is 0? b.eq invalidateCaches_end //; No cleaning required and enable MMU mov w1, #0 //; w1 = level iterator invalidateCaches_flush_level: add w3, w1, w1, lsl #1 //; w3 = w1 * 3 (right-shift for cache type) lsr w3, w0, w3 //; w3 = w0 >> w3 ubfx w3, w3, #0, #3 //; w3 = cache type of this level cmp w3, #2 //; No cache at this level? b.lt invalidateCaches_next_level lsl w4, w1, #1 msr CSSELR_EL1, x4 //; Select current cache level in CSSELR isb //; ISB required to reflect new CSIDR mrs x4, CCSIDR_EL1 //; w4 = CSIDR ubfx w3, w4, #0, #3 add w3, w3, #2 //; w3 = log2(line size) ubfx w5, w4, #13, #15 ubfx w4, w4, #3, #10 //; w4 = Way number clz w6, w4 //; w6 = 32 - log2(number of ways) invalidateCaches_next_level: add w1, w1, #1 //; Next level cmp w2, w1 b.gt invalidateCaches_flush_level invalidateCaches_flush_set: mov w8, w4 //; w8 = Way number invalidateCaches_flush_way: lsl w7, w1, #1 //; Fill level field lsl w9, w5, w3 orr w7, w7, w9 //; Fill index field lsl w9, w8, w6 orr w7, w7, w9 //; Fill way field dc CISW, x7 //; Invalidate by set/way to point of coherency subs w8, w8, #1 //; Decrement way b.ge invalidateCaches_flush_way subs w5, w5, #1 //; Decrement set b.ge invalidateCaches_flush_set invalidateCaches_end: ret .end
dda119141/ultrascale_bootloader
2,008
src/lib/bootup/xil-crt0.S
/****************************************************************************** * Copyright (C) 2014 - 2022 Xilinx, Inc. All rights reserved. * SPDX-License-Identifier: MIT ******************************************************************************/ /*****************************************************************************/ /** * @file xil-crt0.S * * <pre> * MODIFICATION HISTORY: * * Ver Who Date Changes * ----- ---- -------- --------------------------------------------------- * 5.00 pkp 05/21/14 Initial version * 5.04 pkp 12/18/15 Initialized global constructor for C++ applications * 5.04 pkp 01/05/16 Set the reset vector register RVBAR equivalent to * vector table base address * 6.02 pkp 01/22/17 Added support for EL1 non-secure * 6.6 srm 10/18/17 Added timer configuration using XTime_StartTTCTimer API. * Now the TTC instance as specified by the user will be * started. * 6.6 mus 01/29/18 Initialized the xen PV console for Cortexa53 64 bit * EL1 NS BSP. * 7.2 sd 02/23/20 Clock Init is called * 7.2 sd 02/23/20 Clock code added under XCLOCKING flag * 7.7 mus 01/06/22 Added call to Xil_SetTlbAttributes to set correct * attributes for GIC in case of Xen domU guest application. * It fixes CR#974078. * 8.0 mus 07/06/21 Added support for VERSAL NET * 8.0 mus 06/27/22 Enabled PMU counter. * </pre> * * @note * * None. * ******************************************************************************/ #include "xparameters.h" #include "bspconfig.h" .file "xil-crt0.S" .globl _startup _startup: mov x0, #0 /* clear bss */ ldr x5, =__bss_start__ mov x9, #0 ldr x9, =__bss_end__ sub x6, x9, x5 /* calculate bss size */ /* ldr w6, =__bss_size */ 1: cbz w6, 2f str xzr, [x5], #8 sub w6, w6, #1 cbnz w6, 1b /* make sure argc and argv are valid */ 2: mov x0, #0 mov x1, #0 bl main /* Jump to main C code */
dda119141/ultrascale_bootloader
7,534
src/lib/bootup/asm_vectors.S
/****************************************************************************** * Copyright (c) 2014 - 2021 Xilinx, Inc. All rights reserved. * SPDX-License-Identifier: MIT ******************************************************************************/ /*****************************************************************************/ /** * @file asm_vectors.S * * This file contains the initial vector table for the Cortex A53 processor * * <pre> * MODIFICATION HISTORY: * * Ver Who Date Changes * ----- ------- -------- --------------------------------------------------- * @note * * None. * ******************************************************************************/ #include "bspconfig.h" .org 0 .text .globl _boot .globl _vector_table .globl FIQInterrupt .globl IRQInterrupt .globl SErrorInterrupt .globl SynchronousInterrupt .globl FPUStatus /* * FPUContextSize is the size of the array where floating point registers are * stored when required. The default size corresponds to the case when there is no * nested interrupt. If there are nested interrupts in application which are using * floating point operation, the size of FPUContextSize need to be increased as per * requirement */ .set FPUContextSize, 528 .macro macro_saveregister stp X0,X1, [sp,#-0x10]! stp X2,X3, [sp,#-0x10]! stp X4,X5, [sp,#-0x10]! stp X6,X7, [sp,#-0x10]! stp X8,X9, [sp,#-0x10]! stp X10,X11, [sp,#-0x10]! stp X12,X13, [sp,#-0x10]! stp X14,X15, [sp,#-0x10]! stp X16,X17, [sp,#-0x10]! stp X18,X19, [sp,#-0x10]! stp X29,X30, [sp,#-0x10]! .endm .macro macro_restoreregister ldp X29,X30, [sp], #0x10 ldp X18,X19, [sp], #0x10 ldp X16,X17, [sp], #0x10 ldp X14,X15, [sp], #0x10 ldp X12,X13, [sp], #0x10 ldp X10,X11, [sp], #0x10 ldp X8,X9, [sp], #0x10 ldp X6,X7, [sp], #0x10 ldp X4,X5, [sp], #0x10 ldp X2,X3, [sp], #0x10 ldp X0,X1, [sp], #0x10 .endm .macro macro_savefloatregister /* Load the floating point context array address from FPUContextBase */ ldr x1,=FPUContextBase ldr x0, [x1] /* Save all the floating point register to the array */ stp q0,q1, [x0], #0x20 stp q2,q3, [x0], #0x20 stp q4,q5, [x0], #0x20 stp q6,q7, [x0], #0x20 stp q8,q9, [x0], #0x20 stp q10,q11, [x0], #0x20 stp q12,q13, [x0], #0x20 stp q14,q15, [x0], #0x20 stp q16,q17, [x0], #0x20 stp q18,q19, [x0], #0x20 stp q20,q21, [x0], #0x20 stp q22,q23, [x0], #0x20 stp q24,q25, [x0], #0x20 stp q26,q27, [x0], #0x20 stp q28,q29, [x0], #0x20 stp q30,q31, [x0], #0x20 mrs x2, FPCR mrs x3, FPSR stp x2, x3, [x0], #0x10 /* Save current address of floating point context array to FPUContextBase */ str x0, [x1] .endm .macro macro_restorefloatregister /* Restore the address of floating point context array from FPUContextBase */ ldr x1,=FPUContextBase ldr x0, [x1] /* Restore all the floating point register from the array */ ldp x2, x3, [x0,#-0x10]! msr FPCR, x2 msr FPSR, x3 ldp q30,q31, [x0,#-0x20]! ldp q28,q29, [x0,#-0x20]! ldp q26,q27, [x0,#-0x20]! ldp q24,q25, [x0,#-0x20]! ldp q22,q23, [x0,#-0x20]! ldp q20,q21, [x0,#-0x20]! ldp q18,q19, [x0,#-0x20]! ldp q16,q17, [x0,#-0x20]! ldp q14,q15, [x0,#-0x20]! ldp q12,q13, [x0,#-0x20]! ldp q10,q11, [x0,#-0x20]! ldp q8,q9, [x0,#-0x20]! ldp q6,q7, [x0,#-0x20]! ldp q4,q5, [x0,#-0x20]! ldp q2,q3, [x0,#-0x20]! ldp q0,q1, [x0,#-0x20]! /* Save current address of floating point context array to FPUContextBase */ str x0, [x1] .endm .macro macro_exception_return eret .endm /*Each entry is 128 bytes long */ .org 0 .section .vectors, "a" _vector_table: .set VBAR, _vector_table .org VBAR b _boot .org (VBAR + 0x200) b SynchronousInterruptHandler .org (VBAR + 0x280) b IRQInterruptHandler .org (VBAR + 0x300) b FIQInterruptHandler .org (VBAR + 0x380) b SErrorInterruptHandler /* The exception handler for the synchronous * exception from a lower EL or same EL */ SynchronousInterruptHandler: macro_saveregister /* Check if the Synchronous abort is occurred due to floating point access. */ .if (EL3 == 1) mrs x0, ESR_EL3 .endif and x0, x0, #(0x3F << 26) mov x1, #(0x7 << 26) cmp x0, x1 /* If exception is not due to floating point access go to synchronous handler */ bne subroutine_synchronoushandler /* * If exception occurred due to floating point access, Enable the floating point * access i.e. do not trap floating point instruction */ .if (EL3 == 1) mrs x1,CPTR_EL3 bic x1, x1, #(0x1<<10) msr CPTR_EL3, x1 .endif isb /* If the floating point access was previously enabled, store FPU context * registers(storefloat). */ ldr x0, =FPUStatus ldrb w1,[x0] cbnz w1, subroutine_storefloat /* * If the floating point access was not enabled previously, save the status of * floating point accessibility i.e. enabled and store floating point context * array address(FPUContext) to FPUContextBase. */ mov w1, #0x1 strb w1, [x0] ldr x0, =FPUContext ldr x1, =FPUContextBase str x0,[x1] b subroutine_restorecontext /* The exception handler for the IRQ exception * exception from a lower EL or same EL */ IRQInterruptHandler: macro_saveregister /* Save the status of SPSR, ELR and CPTR to stack */ .if (EL3 == 1) mrs x0, CPTR_EL3 mrs x1, ELR_EL3 mrs x2, SPSR_EL3 .endif stp x0, x1, [sp,#-0x10]! str x2, [sp,#-0x10]! /* Trap floating point access */ .if (EL3 == 1) mrs x1,CPTR_EL3 orr x1, x1, #(0x1<<10) msr CPTR_EL3, x1 .endif isb bl IRQInterrupt /* * If floating point access is enabled during interrupt handling, * restore floating point registers. */ .if (EL3 == 1) mrs x0, CPTR_EL3 ands x0, x0, #(0x1<<10) bne RestorePrevState .endif macro_restorefloatregister /* Restore the status of SPSR, ELR and CPTR from stack */ RestorePrevState: ldr x2,[sp],0x10 ldp x0, x1, [sp],0x10 .if (EL3 == 1) msr CPTR_EL3, x0 msr ELR_EL3, x1 msr SPSR_EL3, x2 .endif macro_restoreregister macro_exception_return /* The exception handler for the FIQ * exception from a lower EL or same EL */ FIQInterruptHandler: macro_saveregister /* Save the status of SPSR, ELR and CPTR to stack */ .if (EL3 == 1) mrs x0, CPTR_EL3 mrs x1, ELR_EL3 mrs x2, SPSR_EL3 .endif stp x0, x1, [sp,#-0x10]! str x2, [sp,#-0x10]! /* Trap floating point access */ .if (EL3 == 1) mrs x1,CPTR_EL3 orr x1, x1, #(0x1<<10) msr CPTR_EL3, x1 isb bl FIQInterrupt .endif /* * If floating point access is enabled during interrupt handling, * restore floating point registers. */ .if (EL3 == 1) mrs x0, CPTR_EL3 ands x0, x0, #(0x1<<10) bne subroutine_restorePrevStatefiq .endif macro_restorefloatregister /* The exception handler for the system error * exception from a lower EL or same EL */ SErrorInterruptHandler: macro_saveregister bl SErrorInterrupt macro_restoreregister macro_exception_return /* store floating point context */ subroutine_storefloat: macro_savefloatregister b subroutine_restorecontext /* synchronous handler */ subroutine_synchronoushandler: bl SynchronousInterrupt subroutine_restorecontext: macro_restoreregister macro_exception_return /* Restore the status of SPSR, ELR and CPTR from stack */ subroutine_restorePrevStatefiq: ldr x2,[sp],0x10 ldp x0, x1, [sp],0x10 .if (EL3 == 1) msr CPTR_EL3, x0 msr ELR_EL3, x1 msr SPSR_EL3, x2 .endif macro_restoreregister macro_exception_return .align 8 /* Array to store floating point registers */ FPUContext: .skip FPUContextSize /* Stores address for floating point context array */ FPUContextBase: .skip 8 FPUStatus: .skip 1 .end
dda119141/ultrascale_bootloader
7,832
src/lib/bootup/boot.S
/****************************************************************************** * Copyright (c) 2014 - 2022 Xilinx, Inc. All rights reserved. * SPDX-License-Identifier: MIT ******************************************************************************/ /*****************************************************************************/ /** * @file boot.S * * @addtogroup a53_64_boot_code Cortex A53 64bit Processor Boot Code * @{ * <h2> boot.S </h2> * * The boot code performs minimum configuration which is required for an * application. Cortex-A53 starts by checking current exception level. If the * current exception level is EL3 and BSP is built for EL3, it will do * initialization required for application execution at EL3. Below is a * sequence illustrating what all configuration is performed before control * reaches to main function for EL3 execution. * * 1. Program vector table base for exception handling * 2. Set reset vector table base address * 3. Program stack pointer for EL3 * 4. Routing of interrupts to EL3 * 5. Enable ECC protection * 6. Program generic counter frequency * 7. Invalidate instruction cache, data cache and TLBs * 8. Configure MMU registers and program base address of translation table * 9. Transfer control to _start which clears BSS sections and runs global * constructor before jumping to main application * * If the current exception level is EL1 and BSP is also built for EL1_NONSECURE * it will perform initialization required for application execution at EL1 * non-secure. For all other combination, the execution will go into infinite * loop. Below is a sequence illustrating what all configuration is performed * before control reaches to main function for EL1 execution. * * 1. Program vector table base for exception handling * 2. Program stack pointer for EL1 * 3. Invalidate instruction cache, data cache and TLBs * 4. Configure MMU registers and program base address of translation table * 5. Transfer control to _start which clears BSS sections and runs global * constructor before jumping to main application * * <pre> * MODIFICATION HISTORY: * * Ver Who Date Changes * ----- ------- -------- --------------------------------------------------- * 5.00 pkp 05/21/14 Initial version * 6.00 pkp 07/25/16 Program the counter frequency * 6.02 pkp 01/22/17 Added support for EL1 non-secure * 6.02 pkp 01/24/17 Clearing status of FPUStatus variable to ensure it * holds correct value. * 6.3 mus 04/20/17 CPU Cache protection bit in the L2CTLR_EL1 will be in * set state on reset. So, setting that bit through boot * code is redundant, hence removed the code which sets * CPU cache protection bit. * 6.4 mus 08/11/17 Implemented ARM erratum 855873.It fixes * CR#982209. * 6.6 mus 01/19/18 Added isb after writing to the cpacr_el1/cptr_el3, * to ensure floating-point unit is disabled, before * any subsequent instruction. * 7.0 mus 03/26/18 Updated TCR_EL3/TCR_EL1 as per versal address map * 7.3 mus 04/24/20 Corrected CPACR_EL1 handling at EL1 NS * 8.0 mus 07/06/21 Added support for CortexA78 processor in VERSAL NET SoC * 8.0 mus 10/05/21 Default translation table for VERSAL NET has been configured * for 256 TB address space, due to this page tables size * exceeds OCM size, hence executable size is too large * to fit into OCM. This patch adds option to reduce * page table size, when OCM_ELF flag is defined in * compiler flags, translation table would be configured * for 1 TB address space. It would help to reduce * executable size. * * </pre> * ******************************************************************************/ #include "xparameters.h" #include "bspconfig.h" #include "xil_errata.h" .globl MMUTableL0 .globl MMUTableL1 .globl MMUTableL2 .global _prestart .global _boot .global __el3_stack .global _vector_table .global invalidate_dcaches .set EL3_stack, __el3_stack .set TT_S1_FAULT, 0x0 .set TT_S1_TABLE, 0x3 .set L0Table, MMUTableL0 .set L1Table, MMUTableL1 .set L2Table, MMUTableL2 .set vector_base, _vector_table .set rvbar_base, 0xFD5C0040 .set counterfreq, XPAR_CPU_CORTEXA53_0_TIMESTAMP_CLK_FREQ .set MODE_EL1, 0x5 .set DAIF_BIT, 0x1C0 .section .boot,"ax" /* this initializes the various processor modes */ _prestart: _boot: mov x0, #0 mov x1, #0 mov x2, #0 mov x3, #0 mov x4, #0 mov x5, #0 mov x6, #0 mov x7, #0 mov x8, #0 mov x9, #0 mov x10, #0 mov x11, #0 mov x12, #0 mov x13, #0 mov x14, #0 mov x15, #0 mov x16, #0 mov x17, #0 mov x18, #0 mov x19, #0 mov x20, #0 mov x21, #0 mov x22, #0 mov x23, #0 mov x24, #0 mov x25, #0 mov x26, #0 mov x27, #0 mov x28, #0 mov x29, #0 mov x30, #0 OKToRun: mrs x0, currentEL cmp x0, #0xC beq InitEL3 b error // go to error if current exception level is neither EL3 nor EL1 error: b error InitEL3: /*Set vector table base address*/ ldr x1, = vector_base msr VBAR_EL3,x1 /* Set reset vector address */ /* Get the cpu ID */ mrs x0, MPIDR_EL1 and x0, x0, #0xFF mov w0, w0 ldr w2, =rvbar_base /* calculate the rvbar base address for particular CPU core */ mov w3, #0x8 mul w0, w0, w3 add w2, w2, w0 /* store vector base address to RVBAR */ str x1, [x2] /*Define stack pointer for current exception level*/ ldr x2,=EL3_stack mov sp,x2 /* Enable Trapping of SIMD/FPU register for standalone BSP */ mov x0, #0 orr x0, x0, #(0x1 << 10) msr CPTR_EL3, x0 isb /* * Clear FPUStatus variable to make sure that it contains current * status of FPU i.e. disabled. In case of a warm restart execution * when bss sections are not cleared, it may contain previously updated * value which does not hold true now. */ ldr x0,=FPUStatus str xzr, [x0] /* Asynchronous exception routing determines which exception level is used to * handle an exception. * To route an asynchronous exception to EL3, SCR_EL3 needs to be set */ /* Configure SCR_EL3 */ mov w1, #0 //; Initial value of register is unknown orr w1, w1, #(1 << 11) //; Set ST bit (Secure EL1 can access CNTPS_TVAL_EL1, CNTPS_CTL_EL1 & CNTPS_CVAL_EL1) orr w1, w1, #(1 << 10) //; Set RW bit (EL1 is AArch64, as this is the Secure world) orr w1, w1, #(1 << 3) //; Set EA bit (SError routed to EL3) orr w1, w1, #(1 << 2) //; Set FIQ bit (FIQs routed to EL3) orr w1, w1, #(1 << 1) //; Set IRQ bit (IRQs routed to EL3) msr SCR_EL3, x1 /*configure cpu auxiliary control register EL1 */ ldr x0,=0x80CA000 // L1 Data prefetch control - 5, Enable device split throttle, 2 independent data prefetch streams #if CONFIG_ARM_ERRATA_855873 /* * Set ENDCCASCI bit in CPUACTLR_EL1 register, to execute data * cache clean operations as data cache clean and invalidate * */ orr x0, x0, #(1 << 44) //; Set ENDCCASCI bit #endif msr S3_1_C15_C2_0, x0 //CPUACTLR_EL1 /* program the counter frequency */ ldr x0,=counterfreq msr CNTFRQ_EL0, x0 /*Enable hardware coherency between cores*/ mrs x0, S3_1_c15_c2_1 //Read EL1 CPU Extended Control Register orr x0, x0, #(1 << 6) //Set the SMPEN bit msr S3_1_c15_c2_1, x0 //Write EL1 CPU Extended Control Register isb tlbi ALLE3 ic IALLU //; Invalidate I cache to PoU bl invalidate_dcaches dsb sy isb b _startup //jump to start
dda119141/ultrascale_bootloader
10,384
src/lib/bootup/xfsbl_translation_table_a53_64.S
/****************************************************************************** * * Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved. * SPDX-License-Identifier: MIT ******************************************************************************/ /*****************************************************************************/ /** * @file xfsbl_translation_table.S * * This file contains the initialization for the MMU table in RAM * needed by the Cortex A53 processor (64-bit) * This file is FSBL's local copy of the BSP's file * * <pre> * MODIFICATION HISTORY: * * Ver Who Date Changes * ----- ---- -------- --------------------------------------------------- * 5.00 pkp 05/21/14 Initial version * 5.04 pkp 12/18/15 Updated the address map according to proper address map * 6.0 mus 07/20/16 Added warning for ddrless HW design * 6.2 pkp 11/14/16 DDR memory in 0x800000000 - 0xFFFFFFFFF range is marked * as normal writeback for the size defined in hdf and rest * of the memory in that 32GB range is marked as reserved. * * @note * * None. * ******************************************************************************/ #if ! defined (__clang__) #include "xparameters.h" .globl MMUTableL0 .globl MMUTableL1 .globl MMUTableL2 .set reserved, 0x0 /* Fault*/ .set Memory, 0x405 | (3 << 8) | (0x0) /* normal writeback write allocate inner shared read write */ .set Device, 0x409 | (1 << 53)| (1 << 54) |(0x0) /* strongly ordered read write non executable*/ .section .mmu_tbl0,"a" MMUTableL0: .set SECT, MMUTableL1 /* 0x0000_0000 - 0x7F_FFFF_FFFF */ .8byte SECT + 0x3 .set SECT, MMUTableL1+0x1000 /* 0x80_0000_0000 - 0xFF_FFFF_FFFF */ .8byte SECT + 0x3 .section .mmu_tbl1,"a" MMUTableL1: .set SECT, MMUTableL2 /* 0x0000_0000 - 0x3FFF_FFFF */ .8byte SECT + 0x3 /* 1GB DDR */ .rept 0x3 /* 0x4000_0000 - 0xFFFF_FFFF */ .set SECT, SECT + 0x1000 /*1GB DDR, 1GB PL, 2GB other devices n memory */ .8byte SECT + 0x3 .endr .set SECT,0x100000000 .rept 0xC /* 0x0001_0000_0000 - 0x0003_FFFF_FFFF */ .8byte SECT + reserved /* 12GB Reserved */ .set SECT, SECT + 0x40000000 .endr .rept 0x10 /* 0x0004_0000_0000 - 0x0007_FFFF_FFFF */ .8byte SECT + Device /* 8GB PL, 8GB PCIe */ .set SECT, SECT + 0x40000000 .endr #ifdef XPAR_PSU_DDR_1_S_AXI_BASEADDR .set DDR_1_START, XPAR_PSU_DDR_1_S_AXI_BASEADDR .set DDR_1_END, XPAR_PSU_DDR_1_S_AXI_HIGHADDR .set DDR_1_SIZE, (DDR_1_END - DDR_1_START)+1 .if DDR_1_SIZE > 0x800000000 /* If DDR size is larger than 32GB, truncate to 32GB */ .set DDR_1_REG, 0x20 .else .set DDR_1_REG, DDR_1_SIZE/0x40000000 .endif #else .set DDR_1_REG, 0 #warning "There's no DDR_1 in the HW design. MMU translation table marks 32 GB DDR address space as undefined" #endif .set UNDEF_1_REG, 0x20 - DDR_1_REG .rept DDR_1_REG /* DDR based on size in hdf*/ .8byte SECT + reserved /* this region marked as Memory after DDR init */ .set SECT, SECT+0x40000000 .endr .rept UNDEF_1_REG /* reserved for region where ddr is absent */ .8byte SECT + reserved .set SECT, SECT + 0x40000000 .endr .rept 0x1C0 /* 0x0010_0000_0000 - 0x007F_FFFF_FFFF */ .8byte SECT + Device /* 448 GB PL */ .set SECT, SECT + 0x40000000 .endr .rept 0x100 /* 0x0080_0000_0000 - 0x00BF_FFFF_FFFF */ .8byte SECT + Device /* 256GB PCIe */ .set SECT, SECT + 0x40000000 .endr .rept 0x100 /* 0x00C0_0000_0000 - 0x00FF_FFFF_FFFF */ .8byte SECT + reserved /* 256GB reserved */ .set SECT, SECT + 0x40000000 .endr .section .mmu_tbl2,"a" MMUTableL2: .set SECT, 0 #ifdef XPAR_PSU_DDR_0_S_AXI_BASEADDR .set DDR_0_START, XPAR_PSU_DDR_0_S_AXI_BASEADDR .set DDR_0_END, XPAR_PSU_DDR_0_S_AXI_HIGHADDR .set DDR_0_SIZE, (DDR_0_END - DDR_0_START)+1 .if DDR_0_SIZE > 0x80000000 /* If DDR size is larger than 2GB, truncate to 2GB */ .set DDR_0_REG, 0x400 .else .set DDR_0_REG, DDR_0_SIZE/0x200000 .endif #else .set DDR_0_REG, 0 #warning "There's no DDR_0 in the HW design. MMU translation table marks 2 GB DDR address space as undefined" #endif .set UNDEF_0_REG, 0x400 - DDR_0_REG .rept DDR_0_REG /* DDR based on size in hdf*/ .8byte SECT + reserved /* this region marked as Memory after DDR init */ .set SECT, SECT+0x200000 .endr .rept UNDEF_0_REG /* reserved for region where ddr is absent */ .8byte SECT + reserved .set SECT, SECT+0x200000 .endr .rept 0x0200 /* 0x8000_0000 - 0xBFFF_FFFF */ .8byte SECT + Device /* 1GB lower PL */ .set SECT, SECT+0x200000 .endr .rept 0x0100 /* 0xC000_0000 - 0xDFFF_FFFF */ .8byte SECT + Device /* 512MB QSPI */ .set SECT, SECT+0x200000 .endr .rept 0x080 /* 0xE000_0000 - 0xEFFF_FFFF */ .8byte SECT + Device /* 256MB lower PCIe */ .set SECT, SECT+0x200000 .endr .rept 0x040 /* 0xF000_0000 - 0xF7FF_FFFF */ .8byte SECT + reserved /* 128MB Reserved */ .set SECT, SECT+0x200000 .endr .rept 0x8 /* 0xF800_0000 - 0xF8FF_FFFF */ .8byte SECT + Device /* 16MB coresight */ .set SECT, SECT+0x200000 .endr /* 1MB RPU LLP is marked for 2MB region as the minimum block size in translation table is 2MB and adjacent 63MB reserved region is converted to 62MB */ .rept 0x1 /* 0xF900_0000 - 0xF91F_FFFF */ .8byte SECT + Device /* 2MB RPU low latency port */ .set SECT, SECT+0x200000 .endr .rept 0x1F /* 0xF920_0000 - 0xFCFF_FFFF */ .8byte SECT + reserved /* 62MB Reserved */ .set SECT, SECT+0x200000 .endr .rept 0x8 /* 0xFD00_0000 - 0xFDFF_FFFF */ .8byte SECT + Device /* 16MB FPS */ .set SECT, SECT+0x200000 .endr .rept 0xE /* 0xFE00_0000 - 0xFFBF_FFFF */ .8byte SECT + Device /* 28MB LPS */ .set SECT, SECT+0x200000 .endr /* 0xFFC0_0000 - 0xFFDF_FFFF */ .8byte SECT + Device /*2MB PMU/CSU */ .set SECT, SECT+0x200000 /* 0xFFE0_0000 - 0xFFFF_FFFF*/ .8byte SECT + Memory /*2MB OCM/TCM*/ .end #else #include "xparameters.h" EXPORT MMUTableL0 EXPORT MMUTableL1 EXPORT MMUTableL2 GBLA abscnt GBLA count GBLA sect ; Fault Reserved EQU 0 Memory EQU 0x405:OR:(3:SHL:8):OR:0x0 ; Normal writeback write allocate inner shared read write Device EQU 0x409:OR:(1:SHL:53):OR:(1:SHL:54):OR:0x0 ; Strongly ordered read write non executable AREA |.mmu_tbl0|, CODE, ALIGN=12 MMUTableL0 DCQU MMUTableL1+0x3 ; 0x0000_0000 - 0x7F_FFFF_FFFF DCQU MMUTableL1+0x1000+0x3 ; 0x80_0000_0000 - 0xFF_FFFF_FFFF AREA |.mmu_tbl1|, CODE, ALIGN=12 MMUTableL1 ; ; 0x4000_0000 - 0xFFFF_FFFF ; 1GB DDR, 1GB PL, 2GB other devices n memory ; count SETA 0 WHILE count<0x4 DCQU MMUTableL2+count*0x1000+0x3 count SETA count+1 WEND Fixlocl1 EQU 0x100000000 abscnt SETA 0 ; ; 0x0001_0000_0000 - 0x0003_FFFF_FFFF ; 12GB Reserved ; count SETA 0 WHILE count<0xc DCQU Fixlocl1+abscnt*0x40000000+Reserved count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0x0004_0000_0000 - 0x0007_FFFF_FFFF ; 8GB PL, 8GB PCIe ; count SETA 0 WHILE count<0x10 DCQU Fixlocl1+abscnt*0x40000000+Device count SETA count+1 abscnt SETA abscnt+1 WEND DDR_1_START EQU 0x800000000 DDR_1_END EQU 0x87FFFFFFF DDR_1_SIZE EQU (DDR_1_END - DDR_1_START + 1) DDR_1_REG EQU DDR_1_SIZE / 0x40000000 UNDEF_1_REG EQU (0x20 - DDR_1_REG) ; DDR based on size in hdf count SETA 0 WHILE count<DDR_1_REG DCQU Fixlocl1+abscnt*0x40000000+Reserved count SETA count+1 abscnt SETA abscnt+1 WEND ; Reserved for region where ddr is absent count SETA 0 WHILE count<UNDEF_1_REG DCQU Fixlocl1+abscnt*0x40000000+Reserved count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0x0010_0000_0000 - 0x007F_FFFF_FFFF ; 448 GB PL ; count SETA 0 WHILE count<0x1C0 DCQU Fixlocl1 + abscnt * 0x40000000 + Device count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0x0080_0000_0000 - 0x00BF_FFFF_FFFF ; 256GB PCIe ; count SETA 0 WHILE count<0x100 DCQU Fixlocl1+abscnt*0x40000000+Device count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0x00C0_0000_0000 - 0x00FF_FFFF_FFFF ; 256GB Reserved ; count SETA 0 WHILE count<0x100 DCQU Fixlocl1+abscnt*0x40000000+Reserved count SETA count+1 abscnt SETA abscnt+1 WEND AREA |.mmu_tbl2|, CODE, ALIGN=12 MMUTableL2 abscnt SETA 0 DDR_0_START EQU 0x00000000 DDR_0_END EQU 0x7FFFFFFF DDR_0_SIZE EQU (DDR_0_END - DDR_0_START + 1) DDR_0_REG EQU DDR_0_SIZE / 0x200000 UNDEF_0_REG EQU (0x400 - DDR_0_REG) ; DDR based on size in hdf count SETA 0 WHILE count<DDR_0_REG DCQU abscnt*0x200000+Reserved ; this region marked as Memory after DDR init count SETA count+1 abscnt SETA abscnt+1 WEND ; Reserved for region where ddr is absent count SETA 0 WHILE count<UNDEF_0_REG DCQU abscnt*0x200000+Reserved count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0x8000_0000 - 0xBFFF_FFFF ; 1GB lower PL ; count SETA 0 WHILE count<0x0200 DCQU abscnt*0x200000+Device count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0xC000_0000 - 0xDFFF_FFFF ; 512MB QSPI ; count SETA 0 WHILE count<0x0100 DCQU abscnt*0x200000+Device count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0xE000_0000 - 0xEFFF_FFFF ; 256MB lower PCIe ; count SETA 0 WHILE count<0x080 DCQU abscnt*0x200000+Device count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0xF000_0000 - 0xF7FF_FFFF ; 128MB Reserved ; count SETA 0 WHILE count<0x040 DCQU abscnt*0x200000+Reserved count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0xF800_0000 - 0xF8FF_FFFF ; 16MB Coresight ; count SETA 0 WHILE count<0x8 DCQU abscnt*0x200000+Device count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 1MB RPU LLP is marked for 2MB region as the minimum block size in translation ; table is 2MB and adjacent 63MB reserved region is converted to 62MB ; ; ; 0xF900_0000 - 0xF91F_FFFF ; 2MB RPU low latency port ; count SETA 0 WHILE count<0x1 DCQU abscnt*0x200000+Device count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0xF920_0000 - 0xFCFF_FFFF ; 62MB Reserved ; count SETA 0 WHILE count<0x1f DCQU abscnt*0x200000+Reserved count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0xFD00_0000 - 0xFDFF_FFFF ; 16MB FPS ; count SETA 0 WHILE count<0x8 DCQU abscnt*0x200000+Device count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0xFE00_0000 - 0xFFBF_FFFF ; 28MB LPS ; count SETA 0 WHILE count<0xE DCQU abscnt*0x200000+Device count SETA count+1 abscnt SETA abscnt+1 WEND ; ; 0xFFC0_0000 - 0xFFDF_FFFF ; 2MB PMU/CSU ; DCQU abscnt*0x200000+Device abscnt SETA abscnt+1 ; ; 0xFFE0_0000 - 0xFFFF_FFFF ; 2MB OCM/TCM ; DCQU abscnt*0x200000+Memory END ; ; @} End of "addtogroup a53_64_boot_code" ; #endif
dddimcha/embodiOS
5,453
src/embodi/compiler/native/boot.S
/* EMBODIOS Native Boot Code - Multiboot2 Compliant */ .section .multiboot2 .align 8 multiboot2_header: .long 0xe85250d6 /* Magic number */ .long 0 /* Architecture (i386) */ .long multiboot2_header_end - multiboot2_header .long -(0xe85250d6 + 0 + (multiboot2_header_end - multiboot2_header)) /* Information request tag */ .align 8 .short 1 /* Type */ .short 0 /* Flags */ .long 8 /* Size */ /* End tag */ .align 8 .short 0 /* Type */ .short 0 /* Flags */ .long 8 /* Size */ multiboot2_header_end: .section .text .global _start .extern kernel_main _start: /* We're in 32-bit mode initially */ .code32 /* Disable interrupts */ cli /* Save multiboot info */ mov %eax, multiboot_magic mov %ebx, multiboot_info /* Set up stack */ mov $stack_top, %esp /* Clear direction flag */ cld /* Check if we're on x86_64 capable CPU */ call check_cpuid call check_long_mode /* Set up paging for 64-bit mode */ call setup_page_tables call enable_paging /* Load GDT */ lgdt gdt_descriptor /* Jump to 64-bit code */ ljmp $0x08, $start64 .code64 start64: /* Reload segment registers */ mov $0x10, %ax mov %ax, %ds mov %ax, %es mov %ax, %fs mov %ax, %gs mov %ax, %ss /* Set up 64-bit stack */ mov $stack_top, %rsp /* Clear BSS section */ xor %rax, %rax mov $__bss_start, %rdi mov $__bss_end, %rcx sub %rdi, %rcx rep stosb /* Enable CPU features for AI workloads */ call enable_cpu_features /* Initialize EMBODIOS kernel */ call kernel_main /* Halt if kernel returns */ cli 1: hlt jmp 1b /* Check for CPUID support */ .code32 check_cpuid: pushfl pop %eax mov %eax, %ecx xor $0x200000, %eax /* Toggle ID bit */ push %eax popfl pushfl pop %eax push %ecx popfl cmp %ecx, %eax je no_cpuid ret no_cpuid: mov $no_cpuid_msg, %esi call print_string_32 hlt /* Check for long mode support */ check_long_mode: mov $0x80000000, %eax cpuid cmp $0x80000001, %eax jb no_long_mode mov $0x80000001, %eax cpuid test $(1 << 29), %edx /* Check LM bit */ jz no_long_mode ret no_long_mode: mov $no_long_mode_msg, %esi call print_string_32 hlt /* Set up identity-mapped page tables */ setup_page_tables: /* Clear page tables */ mov $page_table_l4, %edi mov %edi, %cr3 xor %eax, %eax mov $4096*3, %ecx rep stosl mov %cr3, %edi /* PML4[0] -> PDPT */ mov $page_table_l3, %eax or $0x03, %eax /* Present + Writable */ mov %eax, (%edi) /* PDPT[0] -> PDT */ mov $page_table_l2, %eax or $0x03, %eax mov %eax, page_table_l3 /* PDT[0] -> 2MB page (identity map first 2MB) */ mov $0x83, %eax /* Present + Writable + Huge */ mov %eax, page_table_l2 ret /* Enable paging and long mode */ enable_paging: /* Enable PAE */ mov %cr4, %eax or $(1 << 5), %eax mov %eax, %cr4 /* Set long mode bit in EFER */ mov $0xC0000080, %ecx rdmsr or $(1 << 8), %eax wrmsr /* Enable paging */ mov %cr0, %eax or $(1 << 31), %eax mov %eax, %cr0 ret /* Print string in 32-bit mode (for debugging) */ print_string_32: pusha mov $0xb8000, %edi /* VGA buffer */ .loop: lodsb test %al, %al jz .done mov $0x0f, %ah /* White on black */ stosw jmp .loop .done: popa ret /* Enable CPU features for AI workloads */ .code64 enable_cpu_features: /* Enable SSE */ mov %cr0, %rax and $~0x04, %rax /* Clear EM */ or $0x02, %rax /* Set MP */ mov %rax, %cr0 mov %cr4, %rax or $0x600, %rax /* Set OSFXSR and OSXMMEXCPT */ mov %rax, %cr4 /* Check and enable AVX if available */ mov $1, %eax cpuid test $(1 << 28), %ecx /* Check AVX bit */ jz .no_avx /* Enable AVX */ xor %rcx, %rcx xgetbv or $0x07, %rax /* Enable SSE, AVX, and x87 */ xsetbv .no_avx: /* Check and enable AVX-512 if available */ mov $7, %eax xor %ecx, %ecx cpuid test $(1 << 16), %ebx /* Check AVX512F bit */ jz .no_avx512 /* Enable AVX-512 */ xor %rcx, %rcx xgetbv or $0xe0, %rax /* Enable AVX-512 */ xsetbv .no_avx512: ret /* GDT for 64-bit mode */ .section .rodata .align 16 gdt: .quad 0 /* Null descriptor */ .quad 0x00AF9A000000FFFF /* 64-bit code segment */ .quad 0x00AF92000000FFFF /* 64-bit data segment */ gdt_end: gdt_descriptor: .word gdt_end - gdt - 1 .quad gdt /* Error messages */ no_cpuid_msg: .asciz "ERROR: CPUID not supported" no_long_mode_msg: .asciz "ERROR: 64-bit mode not supported" /* BSS section */ .section .bss .align 16 stack_bottom: .space 1048576 /* 1MB stack */ stack_top: .align 4096 page_table_l4: .space 4096 page_table_l3: .space 4096 page_table_l2: .space 4096 /* Multiboot info storage */ .section .data .align 8 multiboot_magic: .long 0 multiboot_info: .long 0
dddimcha/embodiOS
5,544
kernel/arch/x86_64/boot.S
/* EMBODIOS x86_64 Boot Code - Extended from compiler version */ .section .multiboot2 .align 8 multiboot2_header: .long 0xe85250d6 /* Magic number */ .long 0 /* Architecture (i386) */ .long multiboot2_header_end - multiboot2_header .long -(0xe85250d6 + 0 + (multiboot2_header_end - multiboot2_header)) /* Information request tag */ .align 8 .short 1 /* Type */ .short 0 /* Flags */ .long 8 /* Size */ /* End tag */ .align 8 .short 0 /* Type */ .short 0 /* Flags */ .long 8 /* Size */ multiboot2_header_end: .section .text.boot .global _start .extern kernel_main _start: /* We're in 32-bit mode initially */ .code32 /* Disable interrupts */ cli /* Save multiboot info */ mov %eax, multiboot_magic mov %ebx, multiboot_info /* Set up temporary stack */ mov $boot_stack_top, %esp /* Clear direction flag */ cld /* Check CPU features */ call check_cpuid call check_long_mode /* Set up paging for 64-bit mode */ call setup_page_tables call enable_paging /* Load GDT */ lgdt gdt64_descriptor /* Jump to 64-bit code */ ljmp $0x08, $start64 .code64 .global start64 start64: /* Reload segment registers */ mov $0x10, %ax mov %ax, %ds mov %ax, %es mov %ax, %fs mov %ax, %gs mov %ax, %ss /* Set up kernel stack */ mov $kernel_stack_top, %rsp /* Clear frame pointer */ xor %rbp, %rbp /* Enable CPU features for AI workloads */ call enable_cpu_features /* Jump to higher half */ mov $higher_half_start, %rax jmp *%rax .section .text higher_half_start: /* Reload stack pointer to higher half */ mov $kernel_stack_top, %rsp /* Stack is already set up at physical address */ /* Call kernel main */ call kernel_main /* Should not return */ cli 1: hlt jmp 1b /* Check for CPUID support */ .section .text.boot .code32 check_cpuid: pushfl pop %eax mov %eax, %ecx xor $0x200000, %eax push %eax popfl pushfl pop %eax push %ecx popfl cmp %ecx, %eax je no_cpuid ret no_cpuid: mov $no_cpuid_msg, %esi call print_error_32 hlt /* Check for long mode support */ check_long_mode: mov $0x80000000, %eax cpuid cmp $0x80000001, %eax jb no_long_mode mov $0x80000001, %eax cpuid test $(1 << 29), %edx jz no_long_mode ret no_long_mode: mov $no_long_mode_msg, %esi call print_error_32 hlt /* Set up 4-level page tables */ setup_page_tables: /* Clear page tables */ mov $pml4_table, %edi mov %edi, %cr3 xor %eax, %eax mov $4096*5, %ecx rep stosl mov %cr3, %edi /* PML4[0] -> PDPT (identity map) */ mov $pdpt_table, %eax or $0x03, %eax mov %eax, (%edi) /* PML4[511] -> PDPT (higher half) */ mov $pdpt_table, %eax or $0x03, %eax mov %eax, 511*8(%edi) /* PDPT[0] -> PDT */ mov $pdt_table, %eax or $0x03, %eax mov %eax, pdpt_table /* PDPT[510] -> PDT (for higher half) */ mov $pdt_table, %eax or $0x03, %eax mov %eax, pdpt_table + 510*8 /* Map first 1GB using 2MB pages */ mov $pdt_table, %edi mov $0x83, %eax /* Present + Writable + Huge */ mov $512, %ecx .map_pages: mov %eax, (%edi) add $0x200000, %eax add $8, %edi loop .map_pages ret /* Enable paging and long mode */ enable_paging: /* Enable PAE */ mov %cr4, %eax or $(1 << 5), %eax mov %eax, %cr4 /* Set long mode bit in EFER */ mov $0xC0000080, %ecx rdmsr or $(1 << 8), %eax wrmsr /* Enable paging */ mov %cr0, %eax or $(1 << 31), %eax mov %eax, %cr0 ret /* Print error in 32-bit mode */ print_error_32: pusha mov $0xb8000, %edi .loop: lodsb test %al, %al jz .done mov $0x4f, %ah /* Red on black */ stosw jmp .loop .done: popa ret /* Enable CPU features */ .code64 enable_cpu_features: /* Enable SSE */ mov %cr0, %rax and $~0x04, %rax or $0x02, %rax mov %rax, %cr0 mov %cr4, %rax or $0x600, %rax mov %rax, %cr4 /* Check and enable AVX */ mov $1, %eax cpuid test $(1 << 28), %ecx jz .no_avx xor %rcx, %rcx xgetbv or $0x07, %rax xsetbv .no_avx: ret /* Global Descriptor Table */ .section .rodata .align 16 gdt64: .quad 0 /* Null descriptor */ .quad 0x00AF9A000000FFFF /* Code segment */ .quad 0x00AF92000000FFFF /* Data segment */ gdt64_end: gdt64_descriptor: .word gdt64_end - gdt64 - 1 .quad gdt64 /* Error messages */ no_cpuid_msg: .asciz "ERROR: CPUID not supported" no_long_mode_msg: .asciz "ERROR: 64-bit mode not supported" /* BSS section */ .section .bss .align 4096 pml4_table: .space 4096 pdpt_table: .space 4096 pdt_table: .space 4096 pt_table: .space 4096 .align 16 boot_stack_bottom: .space 16384 boot_stack_top: .align 16 .global kernel_stack_bottom kernel_stack_bottom: .space 65536 .global kernel_stack_top kernel_stack_top: /* Data section */ .section .data .align 8 multiboot_magic: .long 0 multiboot_info: .long 0
Deepdive543443/unsafe_asm
1,673
A32/Chapter6/lib/src/stddev.s
.text zero_double: .double 0.0 // void double_stddev(double *arr, int size, double *mean, double *std); .global double_stddev double_stddev: push {r4,r5} cmp r1,#0 ble fin // size > 0 mov r4,#0 // idx init mov r5,r0 // ptr vldr.f64 d0,zero_double // mean init vldr.f64 d1,zero_double // std init mean_loop: vldmia r5!,{d2} vadd.f64 d0,d0,d2 // mean += arr[idx] add r4,#1 cmp r4,r1 blt mean_loop vmov s5,r1 vcvt.f64.s32 d3,s5 vdiv.f64 d0,d0,d3 vstr.f64 d0,[r2] // mean /= size mov r4,#0 // idx init mov r5,r0 stddev_loop: vldmia r5!,{d2} vsub.f64 d3,d0,d2 // d3 = mean - arr[idx] vfma.f64 d1,d3,d3 // d1 += d3 * d3 add r4,#1 cmp r4,r1 blt stddev_loop vmov s5,r1 vcvt.f64.s32 d3,s5 vdiv.f64 d1,d1,d3 vsqrt.f64 d1,d1 vstr.f64 d1,[r3] // d1 /= size fin: pop {r4,r5} bx lr
Deepdive543443/unsafe_asm
1,029
A32/Chapter2/lib/src/mul_asm.s
.text .global asm_mul // int asm_mul(int a, int b); asm_mul: /* It's okay to use r14 here. r14, a.k.a lr, is the link register that response to the function's call and return. r14 could also be use as a general purpose register. */ mul r0,r0,r1 bx r14 .global smull_asm // long long smull_asm(int a, int b); smull_asm: /* SMull stands for signed multiply long, return a 64 bit length data It takes two 32 bit register to store a 64 bit long data. The third and forth args of smul stand for multiplying r0 and r1, then store the result to r0(lower 32 bit) and r1(higher 32 bit) */ smull r0,r1,r0,r1 // smull lower 32 bit 2store, higher 32 bit 2store, [2Mul, 2Mul] bx lr .global umull_asm // unsigned long long umull_asm(unsigned int a, unsigned int b); umull_asm: /* Unsigned mul long, similar to the Smull above and it takes unsigned args */ umull r0,r1,r0,r1 bx lr
Deepdive543443/unsafe_asm
1,303
A32/Chapter2/lib/src/shift_rotate.s
.text .global MovRegA MovRegA: // int MovRegA(unsigned int a, unsigned int *b); push {r4-r7} mov r7,r1 // Storing *b to r7 /* This part of code perform different form of shifting and two form of rotation. */ mov r2,r0,asr #2 mov r3,r0,lsl #4 mov r4,r0,lsr #5 mov r5,r0,ror #3 mov r6,r0,rrx str r2,[r7] str r3,[r7,#4] str r4,[r7,#8] str r5,[r7,#12] str r6,[r7,#16] pop {r4-r7} bx lr .global MovRegB MovRegB: // int MovRegB(unsigned int a, unsigned int *b); push {r4-r7} mov r7,r1 // Storing *b to r7 /* This part of code perform different form of shifting and two form of rotation. */ mov r2,r0,asr #2 mov r3,r0,lsr #2 mov r4,r0,lsr #2 mov r5,r0,ror #2 mov r6,r0,rrx str r2,[r7] str r3,[r7,#4] str r4,[r7,#8] str r5,[r7,#12] str r6,[r7,#16] pop {r4-r7} bx lr
Deepdive543443/unsafe_asm
2,233
A32/Chapter3/4.LocalVarsFP/local_var_fp.s
.text .global local_vars_fp local_vars_fp: // int local_vars_fp(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e); push {r4,fp,lr} // lr is used as a scratch register here mov fp,sp sub sp,#16 // [temp1, temp2, temp3, temp4, r4 <- fp, fp, lr, e, *result] mov lr,#2 add r4,r0,r1 sdiv r4,r4,lr str r4,[fp,#-16] // Storing temp 1 add r4,r1,r2 sdiv r4,r4,lr str r4,[fp,#-12] // Storing temp 2 add r4,r2,r3 sdiv r4,r4,lr str r4,[fp,#-8] // Storing temp 3 ldr r1,[fp,#12] add r4,r3,r1 sdiv r4,r4,lr str r4,[fp,#-4] // Storing temp 4 // Convolution 1 ldr r0,[fp,#-16] ldr r1,[fp,#-12] ldr r2,[fp,#-8] ldr r3,[fp,#-4] add r4,r0,r1 sdiv r4,r4,lr str r4,[fp,#-16] // Storing temp 1 add r4,r1,r2 sdiv r4,r4,lr str r4,[fp,#-12] // Storing temp 2 add r4,r2,r3 sdiv r4,r4,lr str r4,[fp,#-8] // Storing temp 3 @ // Convolution 2 ldr r0,[fp,#-16] ldr r1,[fp,#-12] ldr r2,[fp,#-8] add r4,r0,r1 sdiv r4,r4,lr str r4,[fp,#-16] // Storing temp 1 add r4,r1,r2 sdiv r4,r4,lr str r4,[fp,#-12] // Storing temp 2 // Convolution 3 ldr r0,[fp,#-16] ldr r1,[fp,#-12] add r4,r0,r1 sdiv r4,r4,lr ldr r0,[fp,#16] str r4,[r0] // Deallocating add sp,#16 pop {r4,fp,pc}
Deepdive543443/unsafe_asm
2,200
A32/Chapter3/3.LocalVars/local_vars.s
.text .global local_vars local_vars: // int local_vars(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e); push {r4,r5} sub sp,#16 // Allocate 5 32bits integers //[tmp1, tmp2, tmp3, tmp4, r4, r5, e] // Thought this is call "压栈" in Chinese? mov r5,#2 add r4,r0,r1 sdiv r4,r4,r5 str r4,[sp,#0] // Storing temp 1 add r4,r1,r2 sdiv r4,r4,r5 str r4,[sp,#4] // Storing temp 2 add r4,r2,r3 sdiv r4,r4,r5 str r4,[sp,#8] // Storing temp 3 ldr r1,[sp,#24] add r4,r3,r1 sdiv r4,r4,r5 str r4,[sp,#12] // Storing temp 4 // Start convolution ldr r0,[sp,#0] ldr r1,[sp,#4] ldr r2,[sp,#8] ldr r3,[sp,#12] add r4,r0,r1 sdiv r4,r4,r5 str r4,[sp,#0] // Storing temp 1 add r4,r1,r2 sdiv r4,r4,r5 str r4,[sp,#4] // Storing temp 2 add r4,r2,r3 sdiv r4,r4,r5 str r4,[sp,#8] // Storing temp 3 // convolution 2 ldr r0,[sp,#0] ldr r1,[sp,#4] ldr r2,[sp,#8] add r4,r0,r1 sdiv r4,r4,r5 str r4,[sp,#0] // Storing temp 1 add r4,r1,r2 sdiv r4,r4,r5 str r4,[sp,#4] // Storing temp 2 // convolution 3 ldr r0,[sp,#0] ldr r1,[sp,#4] add r0,r0,r1 sdiv r0,r0,r5 // Deallocate temp vars and register add sp,#16 pop {r4,r5} bx lr
Deepdive543443/unsafe_asm
1,250
A32/Chapter3/2.MixedInteger/sum_cube.s
.text .global sum_cube sum_cube: // int sum_cube(uint32_t a, uint16_t d, uint8_t g, uint8_t h, uint8_t i, uint16_t e, uint16_t f, uint32_t b, uint32_t c); push {r4} mul r4,r0,r0 // r0 = a * a; // mul r0,r0,r0 seems lead to some issue here, why it doesn't mul r0,r4,r0 // r0 = a * a * a; // happen before mul r4,r1,r1 mla r0,r4,r1,r0 mul r4,r2,r2 mla r0,r4,r2,r0 mul r4,r3,r3 mla r0,r4,r3,r0 ldrb r1,[sp,#4] // i (Start from 4 because of pushs) mul r4,r1,r1 mla r0,r4,r1,r0 ldrh r1,[sp,#8] // e (All value are aligned with 32 bits so +4) mul r4,r1,r1 mla r0,r4,r1,r0 ldrh r1,[sp,#12] // f mul r4,r1,r1 mla r0,r4,r1,r0 ldr r1,[sp,#16] // b mul r4,r1,r1 mla r0,r4,r1,r0 ldr r1,[sp,#20] // c mul r4,r1,r1 mla r0,r4,r1,r0 pop {r4} bx lr
Deepdive543443/unsafe_asm
1,383
A32/Chapter4/5.ldmia/ldm.s
.text .global reverse_asm // void reverse_asm(int *dsr, int *src, int n);[r3=idx, r4-r7:load, r8-r11: store ip] reverse_asm: push {r4-r11} cmp r2,#0 // ble fin add r1,r2,lsl #2 sub r1,#4 // Moving *src pointer to the end of arr mov r3,#0 // idx init ldm_loop: sub r2,r3 cmp r2,#4 add r2,r3 // Restore ble res_loop ldmda r1!,{r4-r7} // Load 4 from src and move forward by 4 mov r11,r4 mov r10,r5 mov r9,r6 mov r8,r7 stmia r0!,{r8-r11} // Reverse and store to dst and move backward by 4 add r3,#4 // Update idx bal ldm_loop res_loop: cmp r3,r2 bge fin ldr r4,[r1] str r4,[r0] add r0,#4 // dst++ sub r1,#4 // src-- add r3,#1 // Update idx bal res_loop fin: pop {r4-r11} bx lr
Deepdive543443/unsafe_asm
1,570
A32/Chapter4/1.ArrayArithmetic/arr_sum.s
.text .global calc_sum_asm /* Slightly improved loop compare to C306 */ // int calc_sum_asm(int *nums, int n); [r0: ptr, r1 num, r2: cur_vals, r3: sum] calc_sum_asm: mov r3,#0 cmp r1,#0 ble Finish // Early stop mov r2,r0 loop: ldr r2,[r0],#4 // load and add add r3,r3,r2 subs r1,r1,#1 // check n's val with 0 and update the APSR flags bgt loop Finish: mov r0,r3 bx lr .global calc_sum_64_asm // uint64_t calc_sum_64_asm(uint32_t *nums, uint32_t n); [r0: ptr, r1 num, r2: cur_vals, r3:sum(lower),r4:sum(higher),r5:idx] calc_sum_64_asm: push {r4,r5} mov r3,#0 mov r4,#0 cmp r1,#0 ble Finish_64 mov r5,#0 loop_64: ldr r2,[r0,r5,lsl #2] // r2 = *(r0 + (r5 << 2)) adds r3,r2,r3 adc r4,r4,#0 // Adding 64bits add r5,#1 cmp r5,r1 blt loop_64 Finish_64: mov r0,r3 mov r1,r4 pop {r4,r5} // Forget to add this line will makes CalcSum_64() to output different values bx lr // Wonder what is the reason
Deepdive543443/unsafe_asm
1,077
A32/Chapter4/3.Mat/mat_square.s
.text .global mat_square_asm // void mat_square_asm(int *dst_mat, int *src_mat, int width, int height);[r4=i, r5=j,r6=position,r7=cur_val] mat_square_asm: push {r4-r7} mov r6,#0 mov r4,#0 // int i = 0 loop_w: cmp r4,r3 bge fin // i < height mov r5,#0 // int j = 0 loop_h: cmp r5,r2 addge r4,#1 // j >= width bge loop_w mul r6,r2,r4 // r6 = i * width add r6,r6,r5 // r6 += j ldr r7,[r1,r6,lsl #2] // r7 = src_mat[r6] mul r7,r7,r7 // r7 = r7 ^ 2 str r7,[r0,r6,lsl #2] // dst_mat[r6] = r7 add r5,#1 // j++ bal loop_h // Using 'bl' here gaves an Segmentation fault fin: pop {r4-r7} bx lr
Deepdive543443/unsafe_asm
3,760
A32/Chapter7/lib/src/vec_add.s
// The implementations below appear to have some misalign // with the ground truth results. Not an very successful approuch // Waiting for debugging in future .text zero_single: .single 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 // float sum_f32(float *src, int length); // r2=ptr, r3=idx, r4=quo, r5=rem r3Q0(store)(f32, f32, f32, f32) Q1(add)(f32, f32, f32, f32) .global sum_f32 sum_f32: push {r4,r5} cmp r1,#0 ble fin_f32 mov r2,#4 sdiv r4,r1,r2 // quo refer to num of simd ops mul r3,r4,r2 sub r5,r1,r3 // rem for num of scalers ops ldr r2,=zero_single vldm r2,{q0,q1} // Init Quad 0 and 1 vector mov r2,r0 // r2 = *src mov r3,#0 // r3 = idx loop_f32x4: cmp r3,r4 // Check if we need to jump to scaler loop bge pre_loop_f32 vldm r2,{q1} // Load 4 float32 from mem, perform SIMD adding... vadd.f32 q0,q0,q1 add r3,#1 add r2,#16 // Moving 4 data forward b loop_f32x4 pre_loop_f32: mov r3,#0 // Reset index vadd.f32 d0,d0,d1 // Sum the content of Q0 vadd.f32 s0,s0,s1 loop_f32: cmp r3,r5 bge fin_f32 vldr.f32 s1,[r2] vadd.f32 s0,s0,s1 add r3,#1 // Update idx add r2,#4 // Update pointer b loop_f32 fin_f32: pop {r4,r5} bx lr // uint16_t sum_u16(uint16_t *src, int length); .global sum_u16 sum_u16: push {r4,r5} sub sp,#8 cmp r1,#0 ble fin_u16 mov r2,#8 // Most of the part remain identical sdiv r4,r1,r2 // Except we have 8 num in a vector here mul r3,r4,r2 sub r5,r1,r3 ldr r2,=zero_single vldm r2,{q0,q1} // Init Quad 0 and 1 vector mov r2,r0 // r2 = *src mov r3,#0 // r3 = idx loop_u16x8: cmp r3,r4 bge pre_loop_u16 vldm r2,{q1} // Load 8 u16 to vec vadd.u16 q0,q0,q1 add r3,#1 add r2,#16 b loop_u16x8 pre_loop_u16: mov r3,#0 vadd.u16 d0,d0,d1 vstr.u16 d0,[sp] ldrh r0,[sp] ldrh r4,[sp,#2] add r0,r0,r4 ldrh r4,[sp,#4] add r0,r0,r4 ldrh r4,[sp,#6] add r0,r0,r4 loop_u16: cmp r3,r5 bge fin_u16 ldrh r4,[r2] add r0,r0,r4 add r3,#1 // Update idx add r2,#2 // Update pointer b loop_u16 fin_u16: add sp,#8 pop {r4,r5} bx lr
Deepdive543443/unsafe_asm
1,882
A32/Chapter7/lib/src/vec_cops.s
// typedef { @ F32_CVT_I32 @ I32_CVT_F32 @ F32_CVT_U32 @ U32_CVT_F32 @ F32_CMP @ U32_CMP @ NUM_OPS // } cvtEnum; .text // Addr cvtEnum: .word F32_CVT_I32 // (0x0) .word I32_CVT_F32 // (0x4) .word F32_CVT_U32 // (0x8) .word U32_CVT_F32 // (0xc) .word F32_CMP // ... .word U32_CMP .equ NUM_OPS,(. - cvtEnum) / 4 // int vec_cvt(void *src_ptr, void *dst_ptr, void *out_ptr, cvtEnum ops); .global vec_cvt vec_cvt: push {r4} cmp r3,#NUM_OPS bge err adr r4,cvtEnum ldr r4,[r4,r3,lsl #2] vldm r0,{q0} // load f32 vec vldm r1,{q1} // load s32 vec bx r4 F32_CVT_I32: vcvt.s32.f32 q1,q0 vstm r1,{q1} b fin I32_CVT_F32: vcvt.f32.s32 q0,q1 vstm r0,{q0} b fin F32_CVT_U32: vcvt.u32.f32 q1,q0 vstm r1,{q1} b fin U32_CVT_F32: vcvt.f32.u32 q0,q1 vstm r0,{q0} b fin F32_CMP: vcgt.f32 q2,q0,q1 vstm r2,{q2} b fin U32_CMP: vcgt.u32 q2,q0,q1 vstm r2,{q2} b fin err: mov r0,#-1 fin: mov r0,#0 pop {r4} bx lr
defghij/disassembler
3,108
src/tests/files/file2.s
[BITS 32] ; nasm file2.s -o file2.o ; ndisasm -u file2.o > file2.out ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; xor eax, eax add eax, ecx add eax, edx push ebp mov ebp, esp push edx push ecx mov eax, 041424344h mov edx, dword [ dword ebp + 08h] ; The first dword refers to the ; memory access, the second refers ; to the size of the ; immediate (0x00000008). mov ecx, dword [ dword ebp + 0ch] ; The first dword refers to the ; memory access, the second refers ; to the size of the ; immediate (0x0000000c). add ecx, edx mov eax, ecx pop edx pop ecx pop ebp retn 08h ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; mov dword [ byte esi - 4 ], edi ; expected output in disassembler ; 00000000: 89 7E FC mov [esi-0x4],edi ; -OR- ; 00000000: 89 7E FC mov [esi + 0xfffffffc],edi ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; push ebp mov ebp, esp push edx push ecx cmp ecx, edx jz label_error mov eax, 041424344h mov edx, dword [ byte ebp + 08h] ; By default, the assembler will ; likely make 0x08 a byte, but the ; byte qualifier guarantees it. mov ecx, dword [ byte ebp + 0ch] ; By default, the assembler will ; likely make 0x0c a byte, but the ; byte qualifier guarantees it. add ecx, edx mov eax, ecx label_error: pop edx pop ecx pop ebp retn 08h ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; push ebp push edi retn my_label: mov [eax], edi push ebp push edi push ebp jmp my_label ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;clflush esi ; expected disassmebler ouput db 0x0F ; 00000000: 0F db 0x0f db 0xAE ; 00000001: AE db 0xae db 0xFE ; 00000002: FE db 0xfe ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; db 0x8d ; lea edi, ecx (invalid so need to emit) db 0xf9 ; expected output of disassembler: ;00000000: 8d db 0x8d ;00000001: f9 db 0xf9 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; call with missing bytes db 0xe8 ; expected output from disassembler: db 0x00 db 0x00 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
DharitriOne/wasmer
1,732
lib/runtime-core/image-loading-linux-x86-64.s
# NOTE: Keep this consistent with `fault.rs`. .globl run_on_alternative_stack run_on_alternative_stack: # (stack_end, stack_begin) # We need to ensure 16-byte alignment here. pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rbp movq %rsp, -16(%rdi) leaq run_on_alternative_stack.returning(%rip), %rax movq %rax, -24(%rdi) movq %rsi, %rsp movq (%rsp), %xmm0 add $8, %rsp movq (%rsp), %xmm1 add $8, %rsp movq (%rsp), %xmm2 add $8, %rsp movq (%rsp), %xmm3 add $8, %rsp movq (%rsp), %xmm4 add $8, %rsp movq (%rsp), %xmm5 add $8, %rsp movq (%rsp), %xmm6 add $8, %rsp movq (%rsp), %xmm7 add $8, %rsp movq (%rsp), %xmm8 add $8, %rsp movq (%rsp), %xmm9 add $8, %rsp movq (%rsp), %xmm10 add $8, %rsp movq (%rsp), %xmm11 add $8, %rsp movq (%rsp), %xmm12 add $8, %rsp movq (%rsp), %xmm13 add $8, %rsp movq (%rsp), %xmm14 add $8, %rsp movq (%rsp), %xmm15 add $8, %rsp popq %rbp popq %rax popq %rbx popq %rcx popq %rdx popq %rdi popq %rsi popq %r8 popq %r9 popq %r10 popq %r11 popq %r12 popq %r13 popq %r14 popq %r15 retq run_on_alternative_stack.returning: movq (%rsp), %rsp popq %rbp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq # For switching into a backend without information about where registers are preserved. .globl register_preservation_trampoline register_preservation_trampoline: subq $8, %rsp pushq %rax pushq %rcx pushq %rdx pushq %rdi pushq %rsi pushq %r8 pushq %r9 pushq %r10 callq get_boundary_register_preservation@PLT # Keep this consistent with BoundaryRegisterPreservation movq %r15, 0(%rax) movq %r14, 8(%rax) movq %r13, 16(%rax) movq %r12, 24(%rax) movq %rbx, 32(%rax) popq %r10 popq %r9 popq %r8 popq %rsi popq %rdi popq %rdx popq %rcx popq %rax addq $8, %rsp jmpq *%rax
DharitriOne/wasmer
1,732
lib/runtime-core/image-loading-freebsd-x86-64.s
# NOTE: Keep this consistent with `fault.rs`. .globl run_on_alternative_stack run_on_alternative_stack: # (stack_end, stack_begin) # We need to ensure 16-byte alignment here. pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rbp movq %rsp, -16(%rdi) leaq run_on_alternative_stack.returning(%rip), %rax movq %rax, -24(%rdi) movq %rsi, %rsp movq (%rsp), %xmm0 add $8, %rsp movq (%rsp), %xmm1 add $8, %rsp movq (%rsp), %xmm2 add $8, %rsp movq (%rsp), %xmm3 add $8, %rsp movq (%rsp), %xmm4 add $8, %rsp movq (%rsp), %xmm5 add $8, %rsp movq (%rsp), %xmm6 add $8, %rsp movq (%rsp), %xmm7 add $8, %rsp movq (%rsp), %xmm8 add $8, %rsp movq (%rsp), %xmm9 add $8, %rsp movq (%rsp), %xmm10 add $8, %rsp movq (%rsp), %xmm11 add $8, %rsp movq (%rsp), %xmm12 add $8, %rsp movq (%rsp), %xmm13 add $8, %rsp movq (%rsp), %xmm14 add $8, %rsp movq (%rsp), %xmm15 add $8, %rsp popq %rbp popq %rax popq %rbx popq %rcx popq %rdx popq %rdi popq %rsi popq %r8 popq %r9 popq %r10 popq %r11 popq %r12 popq %r13 popq %r14 popq %r15 retq run_on_alternative_stack.returning: movq (%rsp), %rsp popq %rbp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq # For switching into a backend without information about where registers are preserved. .globl register_preservation_trampoline register_preservation_trampoline: subq $8, %rsp pushq %rax pushq %rcx pushq %rdx pushq %rdi pushq %rsi pushq %r8 pushq %r9 pushq %r10 callq get_boundary_register_preservation@PLT # Keep this consistent with BoundaryRegisterPreservation movq %r15, 0(%rax) movq %r14, 8(%rax) movq %r13, 16(%rax) movq %r12, 24(%rax) movq %rbx, 32(%rax) popq %r10 popq %r9 popq %r8 popq %rsi popq %rdi popq %rdx popq %rcx popq %rax addq $8, %rsp jmpq *%rax
DharitriOne/wasmer
1,735
lib/runtime-core/image-loading-macos-x86-64.s
# NOTE: Keep this consistent with `fault.rs`. .globl _run_on_alternative_stack _run_on_alternative_stack: # (stack_end, stack_begin) # We need to ensure 16-byte alignment here. pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rbp movq %rsp, -16(%rdi) leaq _run_on_alternative_stack.returning(%rip), %rax movq %rax, -24(%rdi) movq %rsi, %rsp movq (%rsp), %xmm0 add $8, %rsp movq (%rsp), %xmm1 add $8, %rsp movq (%rsp), %xmm2 add $8, %rsp movq (%rsp), %xmm3 add $8, %rsp movq (%rsp), %xmm4 add $8, %rsp movq (%rsp), %xmm5 add $8, %rsp movq (%rsp), %xmm6 add $8, %rsp movq (%rsp), %xmm7 add $8, %rsp movq (%rsp), %xmm8 add $8, %rsp movq (%rsp), %xmm9 add $8, %rsp movq (%rsp), %xmm10 add $8, %rsp movq (%rsp), %xmm11 add $8, %rsp movq (%rsp), %xmm12 add $8, %rsp movq (%rsp), %xmm13 add $8, %rsp movq (%rsp), %xmm14 add $8, %rsp movq (%rsp), %xmm15 add $8, %rsp popq %rbp popq %rax popq %rbx popq %rcx popq %rdx popq %rdi popq %rsi popq %r8 popq %r9 popq %r10 popq %r11 popq %r12 popq %r13 popq %r14 popq %r15 retq _run_on_alternative_stack.returning: movq (%rsp), %rsp popq %rbp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq # For switching into a backend without information about where registers are preserved. .globl _register_preservation_trampoline _register_preservation_trampoline: subq $8, %rsp pushq %rax pushq %rcx pushq %rdx pushq %rdi pushq %rsi pushq %r8 pushq %r9 pushq %r10 callq _get_boundary_register_preservation # Keep this consistent with BoundaryRegisterPreservation movq %r15, 0(%rax) movq %r14, 8(%rax) movq %r13, 16(%rax) movq %r12, 24(%rax) movq %rbx, 32(%rax) popq %r10 popq %r9 popq %r8 popq %rsi popq %rdi popq %rdx popq %rcx popq %rax addq $8, %rsp jmpq *%rax
diandianjunA/rCore_sys
1,494
os/src/link_app.S
.align 3 .section .data .global _num_app _num_app: .quad 7 .quad app_0_start .quad app_1_start .quad app_2_start .quad app_3_start .quad app_4_start .quad app_5_start .quad app_6_start .quad app_6_end .section .data .global app_0_start .global app_0_end .align 3 app_0_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/00power_3" app_0_end: .section .data .global app_1_start .global app_1_end .align 3 app_1_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/01power_5" app_1_end: .section .data .global app_2_start .global app_2_end .align 3 app_2_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/02power_7" app_2_end: .section .data .global app_3_start .global app_3_end .align 3 app_3_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/03sleep" app_3_end: .section .data .global app_4_start .global app_4_end .align 3 app_4_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/04load_fault" app_4_end: .section .data .global app_5_start .global app_5_end .align 3 app_5_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/05store_fault" app_5_end: .section .data .global app_6_start .global app_6_end .align 3 app_6_start: .incbin "../user/target/riscv64gc-unknown-none-elf/release/sbrk_test" app_6_end:
diandianjunA/rCore_sys
1,569
os/src/trap/trap.S
.altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .section .text.trampoline .globl __alltraps .globl __restore .align 2 __alltraps: # 进入__alltraps时,sp指向用户态的栈顶,sscratch寄存器指向应用地址空间中存放Trap上下文的位置 csrrw sp, sscratch, sp # 现在 sp 指向了TrapContext,sscrtach 寄存器中保存了原来的栈指针。 # 将原来的一些通用寄存器和CSR保存到 TrapContext 中。 sd x1, 1*8(sp) sd x3, 3*8(sp) # 循环保存 x5-x31 .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr # 保存 sstatus 和 sepc 寄存器 # 因为t0-t1是临时寄存器,已经被保存,所以可以直接使用 csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) # 将用户态的栈指针保存到内核栈中 csrr t2, sscratch sd t2, 2*8(sp) # 加载kernel_satp,即内核页表的起始物理地址 ld t0, 34*8(sp) # 加载trap_handler的入口点虚拟地址 ld t1, 36*8(sp) # 加载内核栈顶的虚拟地址 ld sp, 35*8(sp) # 转换地址空间到内核态 csrw satp, t0 sfence.vma # 跳转到异常处理程序 jr t1 # 调用__restore有两种情况 # 1. 从内核态返回用户态 # 2. 开始运行用户态程序 __restore: # a0是函数的第一个参数,指向TrapContext,a1是第二个参数,保存了用户地址空间的起始地址 # 切换回应用地址空间 csrw satp, a1 sfence.vma # 将TrapContext的地址保存到sscratch寄存器 # 这样__alltraps就可以通过sscratch寄存器找到TrapContext csrw sscratch, a0 # 将sp指向Trap上下文的位置,并基于它恢复各通用寄存器和CSR mv sp, a0 # 从内核栈中恢复sstatus和sepc寄存器 ld t0, 32*8(sp) ld t1, 33*8(sp) csrw sstatus, t0 csrw sepc, t1 # 恢复x1和x3 ld x1, 1*8(sp) ld x3, 3*8(sp) # 循环恢复 x5-x31 .set n, 5 .rept 27 LOAD_GP %n .set n, n+1 .endr # 恢复用户态栈指针 ld sp, 2*8(sp) # 从特权级切换到用户态 sret
Diff-fusion/dhm-2024
3,497
flag-generator-extended/deploy/challenge/main/ulp-fsm/decrypt.S
#include "sdkconfig.h" #include "soc/rtc_cntl_reg.h" #include "soc/rtc_io_reg.h" #include "soc/soc_ulp.h" #include "soc/sens_reg.h" .data key_add: .short 35835, 34792, 315, 58643, 57287, 19882, 28091, 14325 /* Define variables, which go into .bss section (zero-initialized data) */ .bss .global key key: .fill 8, 2 .global iv iv: .fill 8, 2 .global data data: .fill 2048, 2 .global data_len data_len: .long 0 data_ctr: .long 0 mix_block_tmp: .long 0 gen_block_key: .long 0 gen_block_iv: .long 0 gen_block_ctr: .long 0 add_blocks_ret: .long 0 gen_block_ret: .long 0 /* Code goes into .text section */ .text .global entry entry: move r1, iv move r2, key move r0, ret1 jump gen_block ret1: // load addr of data + ctr move r3, data_ctr ld r2, r3, 0 move r1, data add r1, r1, r2 move r2, iv move r0, ret2 jump add_blocks ret2: // inc counter move r3, data_ctr ld r2, r3, 0 add r2, r2, 4 st r2, r3, 0 // compare with data_len move r3, data_len ld r1, r3, 0 // data_ctr - data_len sub r0, r2, r1 // will set overflow as long as data_ctr is less than data_len jump entry, OV exit: WRITE_RTC_FIELD(RTC_CNTL_ULP_CP_TIMER_REG, RTC_CNTL_ULP_CP_SLP_TIMER_EN, 0) wake halt // function add_blocks: // r0: ret address // r1: block1 src and dst // r2: block2 src // save return address move r3, add_blocks_ret st r0, r3, 0 stage_rst add_blocks_loop: ldl r0, r1, 0 ldl r3, r2, 0 add r0, r0, r3 stl r0, r1, 0 ldh r0, r1, 0 ldh r3, r2, 0 add r0, r0, r3 sth r0, r1, 0 add r1, r1, 1 add r2, r2, 1 stage_inc 1 jumps add_blocks_loop, 4, LT // return move r3, add_blocks_ret ld r0, r3, 0 jump r0 // function mix_block: // r0: ret address // r1: block // save last value and load prev ldh r2, r1, 15 //move r3, mix_block_tmp //stl r2, r3, 0 stage_rst mix_block_loop: // load curr ldl r3, r1, 0 // add and store add r2, r2, r3 stl r2, r1, 0 // set prev = curr move r2, r3 // same with high bits ldh r3, r1, 0 add r2, r2, r3 sth r2, r1, 0 move r2, r3 add r1, r1, 1 stage_inc 1 jumps mix_block_loop, 4, LT // return jump r0 // function gen_block: // r0: ret address // r1: iv // r2: key // save return address move r3, gen_block_ret st r0, r3, 0 // save iv move r3, gen_block_iv st r1, r3, 0 // save key move r3, gen_block_key st r2, r3, 0 // add iv, key move r0, gen_block_ret1 jump add_blocks gen_block_ret1: // set ctr zero move r3, gen_block_ctr move r0, 0 st r0, r3, 0 gen_block_loop: // mix iv move r3, gen_block_iv ld r1, r3, 0 move r0, gen_block_ret2 jump mix_block gen_block_ret2: // add key, key_add move r3, gen_block_key ld r1, r3, 0 move r2, key_add move r0, gen_block_ret3 jump add_blocks gen_block_ret3: // add iv, key move r3, gen_block_iv ld r1, r3, 0 move r3, gen_block_key ld r2, r3, 0 move r0, gen_block_ret4 jump add_blocks gen_block_ret4: // inc ctr move r3, gen_block_ctr ld r0, r3, 0 add r0, r0, 1 st r0, r3, 0 // loop 8 times jumpr gen_block_loop, 8, LT // return move r3, gen_block_ret ld r0, r3, 0 jump r0
dionysusliu/Snake-Compiler
15,048
runtime/compiled_code.s
section .text global start_here extern snake_error extern print_snake_val start_here: call main ret main: mov rax, 12 ;;; Let mov [rsp + -8], rax ;;; FunDefs103_body mov rax, [rsp + -8] ;;; Let mov [rsp + -16], rax mov rax, [rsp + -8] ;;; Let mov [rsp + -24], rax ;;; InCall mov rax, [rsp + -16] mov [rsp + -88], rax mov rax, [rsp + -24] mov [rsp + -96], rax mov rax, [rsp + -88] mov [rsp + -8], rax mov rax, [rsp + -96] mov [rsp + -16], rax jmp collatz#66 ret ;;; FunDefs103_decls when_odd#66: mov rax, 186 ;;; Let mov [rsp + -24], rax ;;; Prim1 mov rax, [rsp + -24] ;;; Print mov rdi, rax sub rsp, 80 call print_snake_val add rsp, 80 ;;; Let mov [rsp + -24], rax mov rax, [rsp + -8] ;;; Let mov [rsp + -32], rax mov rax, 6 ;;; Let mov [rsp + -40], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -48], rax ;;; Prim2 mov rax, [rsp + -40] ;;; Check Whether Num mov rdi, 0x0000000000000000 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jnz snake_err mov r10, [rsp + -48] ;;; Check Whether Num mov rdi, 0x0000000000000000 mov rsi, r10 mov rbx, 0x0000000000000001 test rbx, r10 jnz snake_err ;;; Mul sar rax, 0x00000001 imul rax, r10 ;;; Check overflow mov rdi, 0x0000000000000004 mov rsi, rax jo snake_err ;;; Let mov [rsp + -40], rax mov rax, 2 ;;; Let mov [rsp + -48], rax ;;; Prim2 mov rax, [rsp + -40] ;;; Check Whether Num mov rdi, 0x0000000000000000 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jnz snake_err mov r10, [rsp + -48] ;;; Check Whether Num mov rdi, 0x0000000000000000 mov rsi, r10 mov rbx, 0x0000000000000001 test rbx, r10 jnz snake_err ;;; Add add rax, r10 ;;; Check overflow mov rdi, 0x0000000000000004 mov rsi, rax jo snake_err ;;; Let mov [rsp + -40], rax ;;; InCall mov rax, [rsp + -32] mov [rsp + -88], rax mov rax, [rsp + -40] mov [rsp + -96], rax mov rax, [rsp + -88] mov [rsp + -8], rax mov rax, [rsp + -96] mov [rsp + -16], rax jmp collatz#66 ret when_even#66: mov rax, 188 ;;; Let mov [rsp + -24], rax ;;; Prim1 mov rax, [rsp + -24] ;;; Print mov rdi, rax sub rsp, 80 call print_snake_val add rsp, 80 ;;; Let mov [rsp + -24], rax mov rax, [rsp + -8] ;;; Let mov [rsp + -32], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -40], rax mov rax, 4 ;;; Let mov [rsp + -48], rax mov rax, 0 ;;; Let mov [rsp + -56], rax ;;; ExCall mov rax, [rsp + -40] mov [rsp + -88], rax mov rax, [rsp + -48] mov [rsp + -96], rax mov rax, [rsp + -56] mov [rsp + -104], rax sub rsp, 72 call div#21 add rsp, 72 ;;; Let mov [rsp + -40], rax ;;; InCall mov rax, [rsp + -32] mov [rsp + -88], rax mov rax, [rsp + -40] mov [rsp + -96], rax mov rax, [rsp + -88] mov [rsp + -8], rax mov rax, [rsp + -96] mov [rsp + -16], rax jmp collatz#66 ret collatz#66: mov rax, [rsp + -16] ;;; Let mov [rsp + -24], rax ;;; Prim1 mov rax, [rsp + -24] ;;; Print mov rdi, rax sub rsp, 80 call print_snake_val add rsp, 80 ;;; Let mov [rsp + -24], rax mov rax, [rsp + -8] ;;; Let mov [rsp + -32], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -40], rax ;;; ExCall mov rax, [rsp + -32] mov [rsp + -88], rax mov rax, [rsp + -40] mov [rsp + -96], rax sub rsp, 72 call base_case#47 add rsp, 72 ;;; Let mov [rsp + -32], rax ;;; If mov rax, [rsp + -32] ;;; Check Whether Bool mov rdi, 0x0000000000000002 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jz snake_err mov r10, 0x7fffffffffffffff cmp rax, r10 je if_false#146 mov rax, 0 jmp done#146 if_false#146: mov rax, [rsp + -8] ;;; Let mov [rsp + -40], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -48], rax ;;; ExCall mov rax, [rsp + -40] mov [rsp + -88], rax mov rax, [rsp + -48] mov [rsp + -96], rax sub rsp, 72 call is_even#3 add rsp, 72 ;;; Let mov [rsp + -40], rax ;;; If mov rax, [rsp + -40] ;;; Check Whether Bool mov rdi, 0x0000000000000002 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jz snake_err mov r10, 0x7fffffffffffffff cmp rax, r10 je if_false#154 mov rax, [rsp + -8] ;;; Let mov [rsp + -48], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -56], rax ;;; InCall mov rax, [rsp + -48] mov [rsp + -88], rax mov rax, [rsp + -56] mov [rsp + -96], rax mov rax, [rsp + -88] mov [rsp + -8], rax mov rax, [rsp + -96] mov [rsp + -16], rax jmp when_even#66 jmp done#154 if_false#154: mov rax, [rsp + -8] ;;; Let mov [rsp + -48], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -56], rax ;;; InCall mov rax, [rsp + -48] mov [rsp + -88], rax mov rax, [rsp + -56] mov [rsp + -96], rax mov rax, [rsp + -88] mov [rsp + -8], rax mov rax, [rsp + -96] mov [rsp + -16], rax jmp when_odd#66 done#154: done#146: ret ret ;;; Global FunDecls is_even#3: mov rax, 180 ;;; Let mov [rsp + -24], rax ;;; Prim1 mov rax, [rsp + -24] ;;; Print mov rdi, rax sub rsp, 80 call print_snake_val add rsp, 80 ;;; Let mov [rsp + -24], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -32], rax mov rax, 0 ;;; Let mov [rsp + -40], rax ;;; Prim2 mov rax, [rsp + -32] mov r10, [rsp + -40] ;;; Compare cmp rax, r10 mov rax, 0xffffffffffffffff je equal#11 mov rax, 0x7fffffffffffffff equal#11: ;;; Let mov [rsp + -32], rax ;;; If mov rax, [rsp + -32] ;;; Check Whether Bool mov rdi, 0x0000000000000002 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jz snake_err mov r10, 0x7fffffffffffffff cmp rax, r10 je if_false#12 mov rax, 0xffffffffffffffff jmp done#12 if_false#12: mov rax, [rsp + -16] ;;; Let mov [rsp + -40], rax mov rax, 2 ;;; Let mov [rsp + -48], rax ;;; Prim2 mov rax, [rsp + -40] mov r10, [rsp + -48] ;;; Compare cmp rax, r10 mov rax, 0xffffffffffffffff je equal#19 mov rax, 0x7fffffffffffffff equal#19: ;;; Let mov [rsp + -40], rax ;;; If mov rax, [rsp + -40] ;;; Check Whether Bool mov rdi, 0x0000000000000002 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jz snake_err mov r10, 0x7fffffffffffffff cmp rax, r10 je if_false#20 mov rax, 0x7fffffffffffffff jmp done#20 if_false#20: mov rax, [rsp + -8] ;;; Let mov [rsp + -48], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -56], rax mov rax, 4 ;;; Let mov [rsp + -64], rax ;;; Prim2 mov rax, [rsp + -56] ;;; Check Whether Num mov rdi, 0x0000000000000000 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jnz snake_err mov r10, [rsp + -64] ;;; Check Whether Num mov rdi, 0x0000000000000000 mov rsi, r10 mov rbx, 0x0000000000000001 test rbx, r10 jnz snake_err ;;; Sub sub rax, r10 ;;; Check overflow mov rdi, 0x0000000000000004 mov rsi, rax jo snake_err ;;; Let mov [rsp + -56], rax ;;; ExCall mov rax, [rsp + -48] mov [rsp + -88], rax mov rax, [rsp + -56] mov [rsp + -96], rax mov rax, [rsp + -88] mov [rsp + -8], rax mov rax, [rsp + -96] mov [rsp + -16], rax jmp is_even#3 done#20: done#12: ret div#21: mov rax, 182 ;;; Let mov [rsp + -40], rax ;;; Prim1 mov rax, [rsp + -40] ;;; Print mov rdi, rax sub rsp, 128 call print_snake_val add rsp, 128 ;;; Let mov [rsp + -40], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -48], rax ;;; Prim1 mov rax, [rsp + -48] ;;; Print mov rdi, rax sub rsp, 128 call print_snake_val add rsp, 128 ;;; Let mov [rsp + -48], rax mov rax, [rsp + -24] ;;; Let mov [rsp + -56], rax ;;; Prim1 mov rax, [rsp + -56] ;;; Print mov rdi, rax sub rsp, 128 call print_snake_val add rsp, 128 ;;; Let mov [rsp + -56], rax mov rax, [rsp + -32] ;;; Let mov [rsp + -64], rax ;;; Prim1 mov rax, [rsp + -64] ;;; Print mov rdi, rax sub rsp, 128 call print_snake_val add rsp, 128 ;;; Let mov [rsp + -64], rax mov rax, [rsp + -24] ;;; Let mov [rsp + -72], rax mov rax, [rsp + -32] ;;; Let mov [rsp + -80], rax ;;; Prim2 mov rax, [rsp + -72] ;;; Check Whether Num mov rdi, 0x0000000000000000 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jnz snake_err mov r10, [rsp + -80] ;;; Check Whether Num mov rdi, 0x0000000000000000 mov rsi, r10 mov rbx, 0x0000000000000001 test rbx, r10 jnz snake_err ;;; Mul sar rax, 0x00000001 imul rax, r10 ;;; Check overflow mov rdi, 0x0000000000000004 mov rsi, rax jo snake_err ;;; Let mov [rsp + -72], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -80], rax ;;; Prim2 mov rax, [rsp + -72] mov r10, [rsp + -80] ;;; Compare cmp rax, r10 mov rax, 0xffffffffffffffff je equal#56 mov rax, 0x7fffffffffffffff equal#56: ;;; Let mov [rsp + -72], rax ;;; If mov rax, [rsp + -72] ;;; Check Whether Bool mov rdi, 0x0000000000000002 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jz snake_err mov r10, 0x7fffffffffffffff cmp rax, r10 je if_false#57 mov rax, [rsp + -32] jmp done#57 if_false#57: mov rax, [rsp + -8] ;;; Let mov [rsp + -80], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -88], rax mov rax, [rsp + -24] ;;; Let mov [rsp + -96], rax mov rax, [rsp + -32] ;;; Let mov [rsp + -104], rax mov rax, 2 ;;; Let mov [rsp + -112], rax ;;; Prim2 mov rax, [rsp + -104] ;;; Check Whether Num mov rdi, 0x0000000000000000 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jnz snake_err mov r10, [rsp + -112] ;;; Check Whether Num mov rdi, 0x0000000000000000 mov rsi, r10 mov rbx, 0x0000000000000001 test rbx, r10 jnz snake_err ;;; Add add rax, r10 ;;; Check overflow mov rdi, 0x0000000000000004 mov rsi, rax jo snake_err ;;; Let mov [rsp + -104], rax ;;; ExCall mov rax, [rsp + -80] mov [rsp + -136], rax mov rax, [rsp + -88] mov [rsp + -144], rax mov rax, [rsp + -96] mov [rsp + -152], rax mov rax, [rsp + -104] mov [rsp + -160], rax mov rax, [rsp + -136] mov [rsp + -8], rax mov rax, [rsp + -144] mov [rsp + -16], rax mov rax, [rsp + -152] mov [rsp + -24], rax mov rax, [rsp + -160] mov [rsp + -32], rax jmp div#21 done#57: ret base_case#47: mov rax, 184 ;;; Let mov [rsp + -24], rax ;;; Prim1 mov rax, [rsp + -24] ;;; Print mov rdi, rax sub rsp, 64 call print_snake_val add rsp, 64 ;;; Let mov [rsp + -24], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -32], rax mov rax, 2 ;;; Let mov [rsp + -40], rax ;;; Prim2 mov rax, [rsp + -32] mov r10, [rsp + -40] ;;; Compare cmp rax, r10 mov rax, 0xffffffffffffffff je equal#82 mov rax, 0x7fffffffffffffff equal#82: ;;; Let mov [rsp + -32], rax mov rax, [rsp + -16] ;;; Let mov [rsp + -40], rax mov rax, 4 ;;; Let mov [rsp + -48], rax ;;; Prim2 mov rax, [rsp + -40] mov r10, [rsp + -48] ;;; Compare cmp rax, r10 mov rax, 0xffffffffffffffff je equal#88 mov rax, 0x7fffffffffffffff equal#88: ;;; Let mov [rsp + -40], rax ;;; Prim2 mov rax, [rsp + -32] ;;; Check Whether Bool mov rdi, 0x0000000000000003 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jz snake_err mov r10, [rsp + -40] ;;; Check Whether Bool mov rdi, 0x0000000000000003 mov rsi, r10 mov rbx, 0x0000000000000001 test rbx, r10 jz snake_err or rax, r10 ;;; Let mov [rsp + -32], rax ;;; If mov rax, [rsp + -32] ;;; Check Whether Bool mov rdi, 0x0000000000000002 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jz snake_err mov r10, 0x7fffffffffffffff cmp rax, r10 je if_false#90 mov rax, 0xffffffffffffffff jmp done#90 if_false#90: mov rax, [rsp + -16] ;;; Let mov [rsp + -40], rax mov rax, 8 ;;; Let mov [rsp + -48], rax ;;; Prim2 mov rax, [rsp + -40] mov r10, [rsp + -48] ;;; Compare cmp rax, r10 mov rax, 0xffffffffffffffff je equal#97 mov rax, 0x7fffffffffffffff equal#97: ;;; Let mov [rsp + -40], rax ;;; If mov rax, [rsp + -40] ;;; Check Whether Bool mov rdi, 0x0000000000000002 mov rsi, rax mov rbx, 0x0000000000000001 test rbx, rax jz snake_err mov r10, 0x7fffffffffffffff cmp rax, r10 je if_false#98 mov rax, 0xffffffffffffffff jmp done#98 if_false#98: mov rax, 0x7fffffffffffffff done#98: done#90: ret snake_err: call snake_error
diptobiswasanime4/Learn_Rust
9,700
basic_concepts/getting_started/hello_world/main.s
.text .def @feat.00; .scl 3; .type 0; .endef .globl @feat.00 .set @feat.00, 0 .file "main.aaccb3f0a8cfdec6-cgu.0" .def _ZN3std10sys_common9backtrace28__rust_begin_short_backtrace17h4d28f88769ff0699E; .scl 3; .type 32; .endef .section .text,"xr",one_only,_ZN3std10sys_common9backtrace28__rust_begin_short_backtrace17h4d28f88769ff0699E .p2align 4, 0x90 _ZN3std10sys_common9backtrace28__rust_begin_short_backtrace17h4d28f88769ff0699E: .seh_proc _ZN3std10sys_common9backtrace28__rust_begin_short_backtrace17h4d28f88769ff0699E subq $40, %rsp .seh_stackalloc 40 .seh_endprologue callq _ZN4core3ops8function6FnOnce9call_once17h075c253ec62ed82aE #APP #NO_APP nop addq $40, %rsp retq .seh_endproc .def _ZN3std2rt10lang_start17ha354c19399061396E; .scl 2; .type 32; .endef .section .text,"xr",one_only,_ZN3std2rt10lang_start17ha354c19399061396E .globl _ZN3std2rt10lang_start17ha354c19399061396E .p2align 4, 0x90 _ZN3std2rt10lang_start17ha354c19399061396E: .seh_proc _ZN3std2rt10lang_start17ha354c19399061396E subq $56, %rsp .seh_stackalloc 56 .seh_endprologue movb %r9b, %al movq %r8, %r9 movq %rdx, %r8 movq %rcx, 48(%rsp) leaq 48(%rsp), %rcx leaq __unnamed_1(%rip), %rdx movb %al, 32(%rsp) callq _ZN3std2rt19lang_start_internal17h125867de5e07cdbcE movq %rax, 40(%rsp) movq 40(%rsp), %rax addq $56, %rsp retq .seh_endproc .def _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17heaac80a69f899197E; .scl 3; .type 32; .endef .section .text,"xr",one_only,_ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17heaac80a69f899197E .p2align 4, 0x90 _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17heaac80a69f899197E: .seh_proc _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17heaac80a69f899197E subq $40, %rsp .seh_stackalloc 40 .seh_endprologue movq (%rcx), %rcx callq _ZN3std10sys_common9backtrace28__rust_begin_short_backtrace17h4d28f88769ff0699E callq _ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17h805bfd6df14db643E nop addq $40, %rsp retq .seh_endproc .def _ZN4core3fmt9Arguments9new_const17h4b2ecf046b247fb0E; .scl 3; .type 32; .endef .section .text,"xr",one_only,_ZN4core3fmt9Arguments9new_const17h4b2ecf046b247fb0E .p2align 4, 0x90 _ZN4core3fmt9Arguments9new_const17h4b2ecf046b247fb0E: .seh_proc _ZN4core3fmt9Arguments9new_const17h4b2ecf046b247fb0E subq $136, %rsp .seh_stackalloc 136 .seh_endprologue movq %r8, 40(%rsp) movq %rdx, 48(%rsp) movq %rcx, 56(%rsp) movq %rcx, 64(%rsp) cmpq $1, %r8 ja .LBB3_2 movq 64(%rsp), %rax movq 56(%rsp), %rcx movq 40(%rsp), %rdx movq 48(%rsp), %r8 movq $0, 120(%rsp) movq %r8, (%rcx) movq %rdx, 8(%rcx) movq 120(%rsp), %r8 movq 128(%rsp), %rdx movq %r8, 32(%rcx) movq %rdx, 40(%rcx) leaq __unnamed_2(%rip), %rdx movq %rdx, 16(%rcx) movq $0, 24(%rcx) addq $136, %rsp retq .LBB3_2: leaq __unnamed_3(%rip), %rdx leaq 72(%rsp), %rcx movq %rcx, 32(%rsp) movl $1, %r8d callq _ZN4core3fmt9Arguments9new_const17h4b2ecf046b247fb0E movq 32(%rsp), %rcx leaq __unnamed_4(%rip), %rdx callq _ZN4core9panicking9panic_fmt17hf9ca4b93f563e888E ud2 .seh_endproc .def _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h9698054b1f0a54f3E; .scl 3; .type 32; .endef .section .text,"xr",one_only,_ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h9698054b1f0a54f3E .p2align 4, 0x90 _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h9698054b1f0a54f3E: .seh_proc _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h9698054b1f0a54f3E subq $40, %rsp .seh_stackalloc 40 .seh_endprologue movq (%rcx), %rcx callq _ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E nop addq $40, %rsp retq .seh_endproc .def _ZN4core3ops8function6FnOnce9call_once17h075c253ec62ed82aE; .scl 3; .type 32; .endef .section .text,"xr",one_only,_ZN4core3ops8function6FnOnce9call_once17h075c253ec62ed82aE .p2align 4, 0x90 _ZN4core3ops8function6FnOnce9call_once17h075c253ec62ed82aE: .seh_proc _ZN4core3ops8function6FnOnce9call_once17h075c253ec62ed82aE subq $40, %rsp .seh_stackalloc 40 .seh_endprologue callq *%rcx nop addq $40, %rsp retq .seh_endproc .def _ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E; .scl 3; .type 32; .endef .section .text,"xr",one_only,_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E .p2align 4, 0x90 _ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E: .Lfunc_begin0: .seh_proc _ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E .seh_handler __CxxFrameHandler3, @unwind, @except pushq %rbp .seh_pushreg %rbp subq $64, %rsp .seh_stackalloc 64 leaq 64(%rsp), %rbp .seh_setframe %rbp, 64 .seh_endprologue movq $-2, -8(%rbp) movq %rcx, -16(%rbp) .Ltmp0: leaq -16(%rbp), %rcx callq _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17heaac80a69f899197E .Ltmp1: movl %eax, -20(%rbp) jmp .LBB6_2 .LBB6_2: movl -20(%rbp), %eax addq $64, %rsp popq %rbp retq .seh_handlerdata .long ($cppxdata$_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E)@IMGREL .section .text,"xr",one_only,_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E .seh_endproc .def "?dtor$1@?0?_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E@4HA"; .scl 3; .type 32; .endef .p2align 4, 0x90 "?dtor$1@?0?_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E@4HA": .seh_proc "?dtor$1@?0?_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E@4HA" .LBB6_1: movq %rdx, 16(%rsp) pushq %rbp .seh_pushreg %rbp subq $32, %rsp .seh_stackalloc 32 leaq 64(%rdx), %rbp .seh_endprologue addq $32, %rsp popq %rbp retq .Lfunc_end0: .seh_handlerdata .section .text,"xr",one_only,_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E .seh_endproc .section .xdata,"dr",associative,_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E .p2align 2, 0x0 $cppxdata$_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E: .long 429065506 .long 1 .long ($stateUnwindMap$_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E)@IMGREL .long 0 .long 0 .long 3 .long ($ip2state$_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E)@IMGREL .long 56 .long 0 .long 1 $stateUnwindMap$_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E: .long -1 .long "?dtor$1@?0?_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E@4HA"@IMGREL $ip2state$_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E: .long .Lfunc_begin0@IMGREL .long -1 .long .Ltmp0@IMGREL+1 .long 0 .long .Ltmp1@IMGREL+1 .long -1 .section .text,"xr",one_only,_ZN4core3ops8function6FnOnce9call_once17hd41aba6c4937dc53E .def _ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17h4487fcdff88bdf98E; .scl 3; .type 32; .endef .section .text,"xr",one_only,_ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17h4487fcdff88bdf98E .p2align 4, 0x90 _ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17h4487fcdff88bdf98E: retq .def _ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17h805bfd6df14db643E; .scl 3; .type 32; .endef .section .text,"xr",one_only,_ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17h805bfd6df14db643E .p2align 4, 0x90 _ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17h805bfd6df14db643E: xorl %eax, %eax retq .def _ZN4main4main17hd80a274883898d96E; .scl 3; .type 32; .endef .section .text,"xr",one_only,_ZN4main4main17hd80a274883898d96E .p2align 4, 0x90 _ZN4main4main17hd80a274883898d96E: .seh_proc _ZN4main4main17hd80a274883898d96E subq $88, %rsp .seh_stackalloc 88 .seh_endprologue leaq 40(%rsp), %rcx leaq __unnamed_5(%rip), %rdx movl $1, %r8d callq _ZN4core3fmt9Arguments9new_const17h4b2ecf046b247fb0E leaq 40(%rsp), %rcx callq _ZN3std2io5stdio6_print17h52999c199d5d532aE nop addq $88, %rsp retq .seh_endproc .def main; .scl 2; .type 32; .endef .section .text,"xr",one_only,main .globl main .p2align 4, 0x90 main: .seh_proc main subq $40, %rsp .seh_stackalloc 40 .seh_endprologue movq %rdx, %r8 movslq %ecx, %rdx leaq _ZN4main4main17hd80a274883898d96E(%rip), %rcx xorl %r9d, %r9d callq _ZN3std2rt10lang_start17ha354c19399061396E nop addq $40, %rsp retq .seh_endproc .section .rdata,"dr",one_only,__unnamed_1 .p2align 3, 0x0 __unnamed_1: .quad _ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17h4487fcdff88bdf98E .asciz "\b\000\000\000\000\000\000\000\b\000\000\000\000\000\000" .quad _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h9698054b1f0a54f3E .quad _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17heaac80a69f899197E .quad _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17heaac80a69f899197E .section .rdata,"dr",one_only,__unnamed_2 .p2align 3, 0x0 __unnamed_2: .section .rdata,"dr",one_only,__unnamed_6 __unnamed_6: .ascii "invalid args" .section .rdata,"dr",one_only,__unnamed_3 .p2align 3, 0x0 __unnamed_3: .quad __unnamed_6 .asciz "\f\000\000\000\000\000\000" .section .rdata,"dr",one_only,__unnamed_7 __unnamed_7: .ascii "/rustc/79e9716c980570bfd1f666e3b16ac583f0168962\\library\\core\\src\\fmt\\mod.rs" .section .rdata,"dr",one_only,__unnamed_4 .p2align 3, 0x0 __unnamed_4: .quad __unnamed_7 .asciz "K\000\000\000\000\000\000\000?\001\000\000\r\000\000" .section .rdata,"dr",one_only,__unnamed_8 __unnamed_8: .ascii "Hello World\n" .section .rdata,"dr",one_only,__unnamed_5 .p2align 3, 0x0 __unnamed_5: .quad __unnamed_8 .asciz "\f\000\000\000\000\000\000"
EclesioMeloJunior/sand
1,311
aarch64.S
.data msg: .ascii "Sum = " .equ len, . - msg .bss buffer: .zero 16 .text .global _start .align 4 _start: mov x0, #5 mov x1, #10 add x2, x0, x1 adrp x0, buffer@PAGE add x0, x0, buffer@PAGEOFF mov x1, x2 bl itoa mov x0, #1 adrp x1, msg@PAGE add x1, x1, msg@PAGEOFF mov x2, #len mov x16, #4 svc 0 mov x0, #1 adrp x1, buffer@PAGE add x1, x1, buffer@PAGEOFF mov x2, #16 mov x16, #4 svc 0 mov x0, #0 mov x16, #1 svc 0 // Convert integer to ASCII (itoa function) itoa: mov x2, #10 // Base 10 mov x3, #15 // Index to store result (buffer size - 1) mov w4, #0x30 // ASCII offset ('0') itoa_loop: udiv x5, x1, x2 // Divide x1 by 10, x5 = x1 / 10 msub x6, x5, x2, x1 // Remainder = x1 - (x5 * 10) add w6, w6, w4 // Convert remainder to ASCII ('0' + remainder) strb w6, [x0, x3] // Store the ASCII character in buffer sub x3, x3, #1 // Move to the next position in the buffer mov x1, x5 // Update x1 to the quotient cbnz x1, itoa_loop // If x1 is not zero, continue looping // Null-terminate the string strb wzr, [x0, x3] // Null-terminate the string ret // Return from function
edl-lang/edl
4,337
library/compiler-builtins/compiler-builtins/src/hexagon/dfsqrt.s
.text .global __hexagon_sqrtdf2 .type __hexagon_sqrtdf2,@function .global __hexagon_sqrt .type __hexagon_sqrt,@function .global __qdsp_sqrtdf2 ; .set __qdsp_sqrtdf2, __hexagon_sqrtdf2; .type __qdsp_sqrtdf2,@function .global __qdsp_sqrt ; .set __qdsp_sqrt, __hexagon_sqrt; .type __qdsp_sqrt,@function .global __hexagon_fast_sqrtdf2 ; .set __hexagon_fast_sqrtdf2, __hexagon_sqrtdf2; .type __hexagon_fast_sqrtdf2,@function .global __hexagon_fast_sqrt ; .set __hexagon_fast_sqrt, __hexagon_sqrt; .type __hexagon_fast_sqrt,@function .global __hexagon_fast2_sqrtdf2 ; .set __hexagon_fast2_sqrtdf2, __hexagon_sqrtdf2; .type __hexagon_fast2_sqrtdf2,@function .global __hexagon_fast2_sqrt ; .set __hexagon_fast2_sqrt, __hexagon_sqrt; .type __hexagon_fast2_sqrt,@function .type sqrt,@function .p2align 5 __hexagon_sqrtdf2: __hexagon_sqrt: { r15:14 = extractu(r1:0,#23 +1,#52 -23) r28 = extractu(r1,#11,#52 -32) r5:4 = combine(##0x3f000004,#1) } { p2 = dfclass(r1:0,#0x02) p2 = cmp.gt(r1,#-1) if (!p2.new) jump:nt .Lsqrt_abnormal r9 = or(r5,r14) } .Ldenormal_restart: { r11:10 = r1:0 r7,p0 = sfinvsqrta(r9) r5 = and(r5,#-16) r3:2 = #0 } { r3 += sfmpy(r7,r9):lib r2 += sfmpy(r7,r5):lib r6 = r5 r9 = and(r28,#1) } { r6 -= sfmpy(r3,r2):lib r11 = insert(r4,#11 +1,#52 -32) p1 = cmp.gtu(r9,#0) } { r3 += sfmpy(r3,r6):lib r2 += sfmpy(r2,r6):lib r6 = r5 r9 = mux(p1,#8,#9) } { r6 -= sfmpy(r3,r2):lib r11:10 = asl(r11:10,r9) r9 = mux(p1,#3,#2) } { r2 += sfmpy(r2,r6):lib r15:14 = asl(r11:10,r9) } { r2 = and(r2,##0x007fffff) } { r2 = add(r2,##0x00800000 - 3) r9 = mux(p1,#7,#8) } { r8 = asl(r2,r9) r9 = mux(p1,#15-(1+1),#15-(1+0)) } { r13:12 = mpyu(r8,r15) } { r1:0 = asl(r11:10,#15) r15:14 = mpyu(r13,r13) p1 = cmp.eq(r0,r0) } { r1:0 -= asl(r15:14,#15) r15:14 = mpyu(r13,r12) p2 = cmp.eq(r0,r0) } { r1:0 -= lsr(r15:14,#16) p3 = cmp.eq(r0,r0) } { r1:0 = mpyu(r1,r8) } { r13:12 += lsr(r1:0,r9) r9 = add(r9,#16) r1:0 = asl(r11:10,#31) } { r15:14 = mpyu(r13,r13) r1:0 -= mpyu(r13,r12) } { r1:0 -= asl(r15:14,#31) r15:14 = mpyu(r12,r12) } { r1:0 -= lsr(r15:14,#33) } { r1:0 = mpyu(r1,r8) } { r13:12 += lsr(r1:0,r9) r9 = add(r9,#16) r1:0 = asl(r11:10,#47) } { r15:14 = mpyu(r13,r13) } { r1:0 -= asl(r15:14,#47) r15:14 = mpyu(r13,r12) } { r1:0 -= asl(r15:14,#16) r15:14 = mpyu(r12,r12) } { r1:0 -= lsr(r15:14,#17) } { r1:0 = mpyu(r1,r8) } { r13:12 += lsr(r1:0,r9) } { r3:2 = mpyu(r13,r12) r5:4 = mpyu(r12,r12) r15:14 = #0 r1:0 = #0 } { r3:2 += lsr(r5:4,#33) r5:4 += asl(r3:2,#33) p1 = cmp.eq(r0,r0) } { r7:6 = mpyu(r13,r13) r1:0 = sub(r1:0,r5:4,p1):carry r9:8 = #1 } { r7:6 += lsr(r3:2,#31) r9:8 += asl(r13:12,#1) } { r15:14 = sub(r11:10,r7:6,p1):carry r5:4 = sub(r1:0,r9:8,p2):carry r7:6 = #1 r11:10 = #0 } { r3:2 = sub(r15:14,r11:10,p2):carry r7:6 = add(r13:12,r7:6) r28 = add(r28,#-0x3ff) } { if (p2) r13:12 = r7:6 if (p2) r1:0 = r5:4 if (p2) r15:14 = r3:2 } { r5:4 = sub(r1:0,r9:8,p3):carry r7:6 = #1 r28 = asr(r28,#1) } { r3:2 = sub(r15:14,r11:10,p3):carry r7:6 = add(r13:12,r7:6) } { if (p3) r13:12 = r7:6 if (p3) r1:0 = r5:4 r2 = #1 } { p0 = cmp.eq(r1:0,r11:10) if (!p0.new) r12 = or(r12,r2) r3 = cl0(r13:12) r28 = add(r28,#-63) } { r1:0 = convert_ud2df(r13:12) r28 = add(r28,r3) } { r1 += asl(r28,#52 -32) jumpr r31 } .Lsqrt_abnormal: { p0 = dfclass(r1:0,#0x01) if (p0.new) jumpr:t r31 } { p0 = dfclass(r1:0,#0x10) if (p0.new) jump:nt .Lsqrt_nan } { p0 = cmp.gt(r1,#-1) if (!p0.new) jump:nt .Lsqrt_invalid_neg if (!p0.new) r28 = ##0x7F800001 } { p0 = dfclass(r1:0,#0x08) if (p0.new) jumpr:nt r31 } { r1:0 = extractu(r1:0,#52,#0) } { r28 = add(clb(r1:0),#-11) } { r1:0 = asl(r1:0,r28) r28 = sub(#1,r28) } { r1 = insert(r28,#1,#52 -32) } { r3:2 = extractu(r1:0,#23 +1,#52 -23) r5 = ##0x3f000004 } { r9 = or(r5,r2) r5 = and(r5,#-16) jump .Ldenormal_restart } .Lsqrt_nan: { r28 = convert_df2sf(r1:0) r1:0 = #-1 jumpr r31 } .Lsqrt_invalid_neg: { r1:0 = convert_sf2df(r28) jumpr r31 } .size __hexagon_sqrt,.-__hexagon_sqrt .size __hexagon_sqrtdf2,.-__hexagon_sqrtdf2
edl-lang/edl
3,885
library/compiler-builtins/compiler-builtins/src/hexagon/fastmath2_ldlib_asm.s
.text .global __hexagon_fast2ldadd_asm .type __hexagon_fast2ldadd_asm, @function __hexagon_fast2ldadd_asm: .falign { R4 = memw(r29+#8) R5 = memw(r29+#24) r7 = r0 } { R6 = sub(R4, R5):sat P0 = CMP.GT(R4, R5); if ( P0.new) R8 = add(R4, #1) if (!P0.new) R8 = add(R5, #1) } { R6 = abs(R6):sat if ( P0) R4 = #1 if (!P0) R5 = #1 R9 = #62 } { R6 = MIN(R6, R9) R1:0 = memd(r29+#0) R3:2 = memd(r29+#16) } { if (!P0) R4 = add(R6, #1) if ( P0) R5 = add(R6, #1) } { R1:0 = ASR(R1:0, R4) R3:2 = ASR(R3:2, R5) } { R1:0 = add(R1:0, R3:2) R3:2 = #0 } { R4 = clb(R1:0) R9.L =#0x0001 } { R8 -= add(R4, #-1) R4 = add(R4, #-1) p0 = cmp.gt(R4, #58) R9.H =#0x8000 } { if(!p0)memw(r7+#8) = R8 R1:0 = ASL(R1:0, R4) if(p0) jump .Ldenorma1 } { memd(r7+#0) = R1:0 jumpr r31 } .Ldenorma1: memd(r7+#0) = R3:2 { memw(r7+#8) = R9 jumpr r31 } .text .global __hexagon_fast2ldsub_asm .type __hexagon_fast2ldsub_asm, @function __hexagon_fast2ldsub_asm: .falign { R4 = memw(r29+#8) R5 = memw(r29+#24) r7 = r0 } { R6 = sub(R4, R5):sat P0 = CMP.GT(R4, R5); if ( P0.new) R8 = add(R4, #1) if (!P0.new) R8 = add(R5, #1) } { R6 = abs(R6):sat if ( P0) R4 = #1 if (!P0) R5 = #1 R9 = #62 } { R6 = min(R6, R9) R1:0 = memd(r29+#0) R3:2 = memd(r29+#16) } { if (!P0) R4 = add(R6, #1) if ( P0) R5 = add(R6, #1) } { R1:0 = ASR(R1:0, R4) R3:2 = ASR(R3:2, R5) } { R1:0 = sub(R1:0, R3:2) R3:2 = #0 } { R4 = clb(R1:0) R9.L =#0x0001 } { R8 -= add(R4, #-1) R4 = add(R4, #-1) p0 = cmp.gt(R4, #58) R9.H =#0x8000 } { if(!p0)memw(r7+#8) = R8 R1:0 = asl(R1:0, R4) if(p0) jump .Ldenorma_s } { memd(r7+#0) = R1:0 jumpr r31 } .Ldenorma_s: memd(r7+#0) = R3:2 { memw(r7+#8) = R9 jumpr r31 } .text .global __hexagon_fast2ldmpy_asm .type __hexagon_fast2ldmpy_asm, @function __hexagon_fast2ldmpy_asm: .falign { R15:14 = memd(r29+#0) R3:2 = memd(r29+#16) R13:12 = #0 } { R8= extractu(R2, #31, #1) R9= extractu(R14, #31, #1) R13.H = #0x8000 } { R11:10 = mpy(R15, R3) R7:6 = mpy(R15, R8) R4 = memw(r29+#8) R5 = memw(r29+#24) } { R11:10 = add(R11:10, R11:10) R7:6 += mpy(R3, R9) } { R7:6 = asr(R7:6, #30) R8.L = #0x0001 p1 = cmp.eq(R15:14, R3:2) } { R7:6 = add(R7:6, R11:10) R4= add(R4, R5) p2 = cmp.eq(R3:2, R13:12) } { R9 = clb(R7:6) R8.H = #0x8000 p1 = and(p1, p2) } { R4-= add(R9, #-1) R9 = add(R9, #-1) if(p1) jump .Lsat1 } { R7:6 = asl(R7:6, R9) memw(R0+#8) = R4 p0 = cmp.gt(R9, #58) if(p0.new) jump:NT .Ldenorm1 } { memd(R0+#0) = R7:6 jumpr r31 } .Lsat1: { R13:12 = #0 R4+= add(R9, #1) } { R13.H = #0x4000 memw(R0+#8) = R4 } { memd(R0+#0) = R13:12 jumpr r31 } .Ldenorm1: { memw(R0+#8) = R8 R15:14 = #0 } { memd(R0+#0) = R15:14 jumpr r31 }
edl-lang/edl
4,378
library/compiler-builtins/compiler-builtins/src/hexagon/dfmul.s
.text .global __hexagon_muldf3 .type __hexagon_muldf3,@function .global __qdsp_muldf3 ; .set __qdsp_muldf3, __hexagon_muldf3 .global __hexagon_fast_muldf3 ; .set __hexagon_fast_muldf3, __hexagon_muldf3 .global __hexagon_fast2_muldf3 ; .set __hexagon_fast2_muldf3, __hexagon_muldf3 .p2align 5 __hexagon_muldf3: { p0 = dfclass(r1:0,#2) p0 = dfclass(r3:2,#2) r13:12 = combine(##0x40000000,#0) } { r13:12 = insert(r1:0,#52,#11 -1) r5:4 = asl(r3:2,#11 -1) r28 = #-1024 r9:8 = #1 } { r7:6 = mpyu(r4,r13) r5:4 = insert(r9:8,#2,#62) } { r15:14 = mpyu(r12,r4) r7:6 += mpyu(r12,r5) } { r7:6 += lsr(r15:14,#32) r11:10 = mpyu(r13,r5) r5:4 = combine(##1024 +1024 -4,#0) } { r11:10 += lsr(r7:6,#32) if (!p0) jump .Lmul_abnormal p1 = cmp.eq(r14,#0) p1 = cmp.eq(r6,#0) } { if (!p1) r10 = or(r10,r8) r6 = extractu(r1,#11,#20) r7 = extractu(r3,#11,#20) } { r15:14 = neg(r11:10) r6 += add(r28,r7) r28 = xor(r1,r3) } { if (!p2.new) r11:10 = r15:14 p2 = cmp.gt(r28,#-1) p0 = !cmp.gt(r6,r5) p0 = cmp.gt(r6,r4) if (!p0.new) jump:nt .Lmul_ovf_unf } { r1:0 = convert_d2df(r11:10) r6 = add(r6,#-1024 -58) } { r1 += asl(r6,#20) jumpr r31 } .falign .Lpossible_unf1: { p0 = cmp.eq(r0,#0) p0 = bitsclr(r1,r4) if (!p0.new) jumpr:t r31 r5 = #0x7fff } { p0 = bitsset(r13,r5) r4 = USR r5 = #0x030 } { if (p0) r4 = or(r4,r5) } { USR = r4 } { p0 = dfcmp.eq(r1:0,r1:0) jumpr r31 } .falign .Lmul_ovf_unf: { r1:0 = convert_d2df(r11:10) r13:12 = abs(r11:10) r7 = add(r6,#-1024 -58) } { r1 += asl(r7,#20) r7 = extractu(r1,#11,#20) r4 = ##0x7FEFFFFF } { r7 += add(r6,##-1024 -58) r5 = #0 } { p0 = cmp.gt(r7,##1024 +1024 -2) if (p0.new) jump:nt .Lmul_ovf } { p0 = cmp.gt(r7,#0) if (p0.new) jump:nt .Lpossible_unf1 r5 = sub(r6,r5) r28 = #63 } { r4 = #0 r5 = sub(#5,r5) } { p3 = cmp.gt(r11,#-1) r5 = min(r5,r28) r11:10 = r13:12 } { r28 = USR r15:14 = extractu(r11:10,r5:4) } { r11:10 = asr(r11:10,r5) r4 = #0x0030 r1 = insert(r9,#11,#20) } { p0 = cmp.gtu(r9:8,r15:14) if (!p0.new) r10 = or(r10,r8) r11 = setbit(r11,#20 +3) } { r15:14 = neg(r11:10) p1 = bitsclr(r10,#0x7) if (!p1.new) r28 = or(r4,r28) } { if (!p3) r11:10 = r15:14 USR = r28 } { r1:0 = convert_d2df(r11:10) p0 = dfcmp.eq(r1:0,r1:0) } { r1 = insert(r9,#11 -1,#20 +1) jumpr r31 } .falign .Lmul_ovf: { r28 = USR r13:12 = combine(##0x7fefffff,#-1) r1:0 = r11:10 } { r14 = extractu(r28,#2,#22) r28 = or(r28,#0x28) r5:4 = combine(##0x7ff00000,#0) } { USR = r28 r14 ^= lsr(r1,#31) r28 = r14 } { p0 = !cmp.eq(r28,#1) p0 = !cmp.eq(r14,#2) if (p0.new) r13:12 = r5:4 p0 = dfcmp.eq(r1:0,r1:0) } { r1:0 = insert(r13:12,#63,#0) jumpr r31 } .Lmul_abnormal: { r13:12 = extractu(r1:0,#63,#0) r5:4 = extractu(r3:2,#63,#0) } { p3 = cmp.gtu(r13:12,r5:4) if (!p3.new) r1:0 = r3:2 if (!p3.new) r3:2 = r1:0 } { p0 = dfclass(r1:0,#0x0f) if (!p0.new) jump:nt .Linvalid_nan if (!p3) r13:12 = r5:4 if (!p3) r5:4 = r13:12 } { p1 = dfclass(r1:0,#0x08) p1 = dfclass(r3:2,#0x0e) } { p0 = dfclass(r1:0,#0x08) p0 = dfclass(r3:2,#0x01) } { if (p1) jump .Ltrue_inf p2 = dfclass(r3:2,#0x01) } { if (p0) jump .Linvalid_zeroinf if (p2) jump .Ltrue_zero r28 = ##0x7c000000 } { p0 = bitsclr(r1,r28) if (p0.new) jump:nt .Lmul_tiny } { r28 = cl0(r5:4) } { r28 = add(r28,#-11) } { r5:4 = asl(r5:4,r28) } { r3:2 = insert(r5:4,#63,#0) r1 -= asl(r28,#20) } jump __hexagon_muldf3 .Lmul_tiny: { r28 = USR r1:0 = xor(r1:0,r3:2) } { r28 = or(r28,#0x30) r1:0 = insert(r9:8,#63,#0) r5 = extractu(r28,#2,#22) } { USR = r28 p0 = cmp.gt(r5,#1) if (!p0.new) r0 = #0 r5 ^= lsr(r1,#31) } { p0 = cmp.eq(r5,#3) if (!p0.new) r0 = #0 jumpr r31 } .Linvalid_zeroinf: { r28 = USR } { r1:0 = #-1 r28 = or(r28,#2) } { USR = r28 } { p0 = dfcmp.uo(r1:0,r1:0) jumpr r31 } .Linvalid_nan: { p0 = dfclass(r3:2,#0x0f) r28 = convert_df2sf(r1:0) if (p0.new) r3:2 = r1:0 } { r2 = convert_df2sf(r3:2) r1:0 = #-1 jumpr r31 } .falign .Ltrue_zero: { r1:0 = r3:2 r3:2 = r1:0 } .Ltrue_inf: { r3 = extract(r3,#1,#31) } { r1 ^= asl(r3,#31) jumpr r31 } .size __hexagon_muldf3,.-__hexagon_muldf3
edl-lang/edl
7,236
library/compiler-builtins/compiler-builtins/src/hexagon/dffma.s
.text .global __hexagon_fmadf4 .type __hexagon_fmadf4,@function .global __hexagon_fmadf5 .type __hexagon_fmadf5,@function .global __qdsp_fmadf5 ; .set __qdsp_fmadf5, __hexagon_fmadf5 .p2align 5 __hexagon_fmadf4: __hexagon_fmadf5: fma: { p0 = dfclass(r1:0,#2) p0 = dfclass(r3:2,#2) r13:12 = #0 r15:14 = #0 } { r13:12 = insert(r1:0,#52,#11 -3) r15:14 = insert(r3:2,#52,#11 -3) r7 = ##0x10000000 allocframe(#32) } { r9:8 = mpyu(r12,r14) if (!p0) jump .Lfma_abnormal_ab r13 = or(r13,r7) r15 = or(r15,r7) } { p0 = dfclass(r5:4,#2) if (!p0.new) jump:nt .Lfma_abnormal_c r11:10 = combine(r7,#0) r7:6 = combine(#0,r9) } .Lfma_abnormal_c_restart: { r7:6 += mpyu(r14,r13) r11:10 = insert(r5:4,#52,#11 -3) memd(r29+#0) = r17:16 memd(r29+#8) = r19:18 } { r7:6 += mpyu(r12,r15) r19:18 = neg(r11:10) p0 = cmp.gt(r5,#-1) r28 = xor(r1,r3) } { r18 = extractu(r1,#11,#20) r19 = extractu(r3,#11,#20) r17:16 = combine(#0,r7) if (!p0) r11:10 = r19:18 } { r17:16 += mpyu(r13,r15) r9:8 = combine(r6,r8) r18 = add(r18,r19) r19 = extractu(r5,#11,#20) } { r18 = add(r18,#-1023 +(4)) p3 = !cmp.gt(r28,#-1) r7:6 = #0 r15:14 = #0 } { r7:6 = sub(r7:6,r9:8,p3):carry p0 = !cmp.gt(r28,#-1) p1 = cmp.gt(r19,r18) if (p1.new) r19:18 = combine(r18,r19) } { r15:14 = sub(r15:14,r17:16,p3):carry if (p0) r9:8 = r7:6 r7:6 = #0 r19 = sub(r18,r19) } { if (p0) r17:16 = r15:14 p0 = cmp.gt(r19,#63) if (p1) r9:8 = r7:6 if (p1) r7:6 = r9:8 } { if (p1) r17:16 = r11:10 if (p1) r11:10 = r17:16 if (p0) r19 = add(r19,#-64) r28 = #63 } { if (p0) r7:6 = r11:10 r28 = asr(r11,#31) r13 = min(r19,r28) r12 = #0 } { if (p0) r11:10 = combine(r28,r28) r5:4 = extract(r7:6,r13:12) r7:6 = lsr(r7:6,r13) r12 = sub(#64,r13) } { r15:14 = #0 r28 = #-2 r7:6 |= lsl(r11:10,r12) r11:10 = asr(r11:10,r13) } { p3 = cmp.gtu(r5:4,r15:14) if (p3.new) r6 = and(r6,r28) r15:14 = #1 r5:4 = #0 } { r9:8 = add(r7:6,r9:8,p3):carry } { r17:16 = add(r11:10,r17:16,p3):carry r28 = #62 } { r12 = add(clb(r17:16),#-2) if (!cmp.eq(r12.new,r28)) jump:t 1f } { r11:10 = extractu(r9:8,#62,#2) r9:8 = asl(r9:8,#62) r18 = add(r18,#-62) } { r17:16 = insert(r11:10,#62,#0) } { r12 = add(clb(r17:16),#-2) } .falign 1: { r11:10 = asl(r17:16,r12) r5:4 |= asl(r9:8,r12) r13 = sub(#64,r12) r18 = sub(r18,r12) } { r11:10 |= lsr(r9:8,r13) p2 = cmp.gtu(r15:14,r5:4) r28 = #1023 +1023 -2 } { if (!p2) r10 = or(r10,r14) p0 = !cmp.gt(r18,r28) p0 = cmp.gt(r18,#1) if (!p0.new) jump:nt .Lfma_ovf_unf } { p0 = cmp.gtu(r15:14,r11:10) r1:0 = convert_d2df(r11:10) r18 = add(r18,#-1023 -60) r17:16 = memd(r29+#0) } { r1 += asl(r18,#20) r19:18 = memd(r29+#8) if (!p0) dealloc_return } .Ladd_yields_zero: { r28 = USR r1:0 = #0 } { r28 = extractu(r28,#2,#22) r17:16 = memd(r29+#0) r19:18 = memd(r29+#8) } { p0 = cmp.eq(r28,#2) if (p0.new) r1 = ##0x80000000 dealloc_return } .Lfma_ovf_unf: { p0 = cmp.gtu(r15:14,r11:10) if (p0.new) jump:nt .Ladd_yields_zero } { r1:0 = convert_d2df(r11:10) r18 = add(r18,#-1023 -60) r28 = r18 } { r1 += asl(r18,#20) r7 = extractu(r1,#11,#20) } { r6 = add(r18,r7) r17:16 = memd(r29+#0) r19:18 = memd(r29+#8) r9:8 = abs(r11:10) } { p0 = cmp.gt(r6,##1023 +1023) if (p0.new) jump:nt .Lfma_ovf } { p0 = cmp.gt(r6,#0) if (p0.new) jump:nt .Lpossible_unf0 } { r7 = add(clb(r9:8),#-2) r6 = sub(#1+5,r28) p3 = cmp.gt(r11,#-1) } { r6 = add(r6,r7) r9:8 = asl(r9:8,r7) r1 = USR r28 = #63 } { r7 = min(r6,r28) r6 = #0 r0 = #0x0030 } { r3:2 = extractu(r9:8,r7:6) r9:8 = asr(r9:8,r7) } { p0 = cmp.gtu(r15:14,r3:2) if (!p0.new) r8 = or(r8,r14) r9 = setbit(r9,#20 +3) } { r11:10 = neg(r9:8) p1 = bitsclr(r8,#(1<<3)-1) if (!p1.new) r1 = or(r1,r0) r3:2 = #0 } { if (p3) r11:10 = r9:8 USR = r1 r28 = #-1023 -(52 +3) } { r1:0 = convert_d2df(r11:10) } { r1 += asl(r28,#20) dealloc_return } .Lpossible_unf0: { r28 = ##0x7fefffff r9:8 = abs(r11:10) } { p0 = cmp.eq(r0,#0) p0 = bitsclr(r1,r28) if (!p0.new) dealloc_return:t r28 = #0x7fff } { p0 = bitsset(r9,r28) r3 = USR r2 = #0x0030 } { if (p0) r3 = or(r3,r2) } { USR = r3 } { p0 = dfcmp.eq(r1:0,r1:0) dealloc_return } .Lfma_ovf: { r28 = USR r11:10 = combine(##0x7fefffff,#-1) r1:0 = r11:10 } { r9:8 = combine(##0x7ff00000,#0) r3 = extractu(r28,#2,#22) r28 = or(r28,#0x28) } { USR = r28 r3 ^= lsr(r1,#31) r2 = r3 } { p0 = !cmp.eq(r2,#1) p0 = !cmp.eq(r3,#2) } { p0 = dfcmp.eq(r9:8,r9:8) if (p0.new) r11:10 = r9:8 } { r1:0 = insert(r11:10,#63,#0) dealloc_return } .Lfma_abnormal_ab: { r9:8 = extractu(r1:0,#63,#0) r11:10 = extractu(r3:2,#63,#0) deallocframe } { p3 = cmp.gtu(r9:8,r11:10) if (!p3.new) r1:0 = r3:2 if (!p3.new) r3:2 = r1:0 } { p0 = dfclass(r1:0,#0x0f) if (!p0.new) jump:nt .Lnan if (!p3) r9:8 = r11:10 if (!p3) r11:10 = r9:8 } { p1 = dfclass(r1:0,#0x08) p1 = dfclass(r3:2,#0x0e) } { p0 = dfclass(r1:0,#0x08) p0 = dfclass(r3:2,#0x01) } { if (p1) jump .Lab_inf p2 = dfclass(r3:2,#0x01) } { if (p0) jump .Linvalid if (p2) jump .Lab_true_zero r28 = ##0x7c000000 } { p0 = bitsclr(r1,r28) if (p0.new) jump:nt .Lfma_ab_tiny } { r28 = add(clb(r11:10),#-11) } { r11:10 = asl(r11:10,r28) } { r3:2 = insert(r11:10,#63,#0) r1 -= asl(r28,#20) } jump fma .Lfma_ab_tiny: r9:8 = combine(##0x00100000,#0) { r1:0 = insert(r9:8,#63,#0) r3:2 = insert(r9:8,#63,#0) } jump fma .Lab_inf: { r3:2 = lsr(r3:2,#63) p0 = dfclass(r5:4,#0x10) } { r1:0 ^= asl(r3:2,#63) if (p0) jump .Lnan } { p1 = dfclass(r5:4,#0x08) if (p1.new) jump:nt .Lfma_inf_plus_inf } { jumpr r31 } .falign .Lfma_inf_plus_inf: { p0 = dfcmp.eq(r1:0,r5:4) if (!p0.new) jump:nt .Linvalid } { jumpr r31 } .Lnan: { p0 = dfclass(r3:2,#0x10) p1 = dfclass(r5:4,#0x10) if (!p0.new) r3:2 = r1:0 if (!p1.new) r5:4 = r1:0 } { r3 = convert_df2sf(r3:2) r2 = convert_df2sf(r5:4) } { r3 = convert_df2sf(r1:0) r1:0 = #-1 jumpr r31 } .Linvalid: { r28 = ##0x7f800001 } { r1:0 = convert_sf2df(r28) jumpr r31 } .Lab_true_zero: { p0 = dfclass(r5:4,#0x10) if (p0.new) jump:nt .Lnan if (p0.new) r1:0 = r5:4 } { p0 = dfcmp.eq(r3:2,r5:4) r1 = lsr(r1,#31) } { r3 ^= asl(r1,#31) if (!p0) r1:0 = r5:4 if (!p0) jumpr r31 } { p0 = cmp.eq(r3:2,r5:4) if (p0.new) jumpr:t r31 r1:0 = r3:2 } { r28 = USR } { r28 = extractu(r28,#2,#22) r1:0 = #0 } { p0 = cmp.eq(r28,#2) if (p0.new) r1 = ##0x80000000 jumpr r31 } .falign .Lfma_abnormal_c: { p0 = dfclass(r5:4,#0x10) if (p0.new) jump:nt .Lnan if (p0.new) r1:0 = r5:4 deallocframe } { p0 = dfclass(r5:4,#0x08) if (p0.new) r1:0 = r5:4 if (p0.new) jumpr:nt r31 } { p0 = dfclass(r5:4,#0x01) if (p0.new) jump:nt __hexagon_muldf3 r28 = #1 } { allocframe(#32) r11:10 = #0 r5 = insert(r28,#11,#20) jump .Lfma_abnormal_c_restart } .size fma,.-fma
edl-lang/edl
4,801
library/compiler-builtins/compiler-builtins/src/hexagon/dfaddsub.s
.text .global __hexagon_adddf3 .global __hexagon_subdf3 .type __hexagon_adddf3, @function .type __hexagon_subdf3, @function .global __qdsp_adddf3 ; .set __qdsp_adddf3, __hexagon_adddf3 .global __hexagon_fast_adddf3 ; .set __hexagon_fast_adddf3, __hexagon_adddf3 .global __hexagon_fast2_adddf3 ; .set __hexagon_fast2_adddf3, __hexagon_adddf3 .global __qdsp_subdf3 ; .set __qdsp_subdf3, __hexagon_subdf3 .global __hexagon_fast_subdf3 ; .set __hexagon_fast_subdf3, __hexagon_subdf3 .global __hexagon_fast2_subdf3 ; .set __hexagon_fast2_subdf3, __hexagon_subdf3 .p2align 5 __hexagon_adddf3: { r4 = extractu(r1,#11,#20) r5 = extractu(r3,#11,#20) r13:12 = combine(##0x20000000,#0) } { p3 = dfclass(r1:0,#2) p3 = dfclass(r3:2,#2) r9:8 = r13:12 p2 = cmp.gtu(r5,r4) } { if (!p3) jump .Ladd_abnormal if (p2) r1:0 = r3:2 if (p2) r3:2 = r1:0 if (p2) r5:4 = combine(r4,r5) } { r13:12 = insert(r1:0,#52,#11 -2) r9:8 = insert(r3:2,#52,#11 -2) r15 = sub(r4,r5) r7:6 = combine(#62,#1) } .Ladd_continue: { r15 = min(r15,r7) r11:10 = neg(r13:12) p2 = cmp.gt(r1,#-1) r14 = #0 } { if (!p2) r13:12 = r11:10 r11:10 = extractu(r9:8,r15:14) r9:8 = ASR(r9:8,r15) r15:14 = #0 } { p1 = cmp.eq(r11:10,r15:14) if (!p1.new) r8 = or(r8,r6) r5 = add(r4,#-1024 -60) p3 = cmp.gt(r3,#-1) } { r13:12 = add(r13:12,r9:8) r11:10 = sub(r13:12,r9:8) r7:6 = combine(#54,##2045) } { p0 = cmp.gtu(r4,r7) p0 = !cmp.gtu(r4,r6) if (!p0.new) jump:nt .Ladd_ovf_unf if (!p3) r13:12 = r11:10 } { r1:0 = convert_d2df(r13:12) p0 = cmp.eq(r13,#0) p0 = cmp.eq(r12,#0) if (p0.new) jump:nt .Ladd_zero } { r1 += asl(r5,#20) jumpr r31 } .falign __hexagon_subdf3: { r3 = togglebit(r3,#31) jump __qdsp_adddf3 } .falign .Ladd_zero: { r28 = USR r1:0 = #0 r3 = #1 } { r28 = extractu(r28,#2,#22) r3 = asl(r3,#31) } { p0 = cmp.eq(r28,#2) if (p0.new) r1 = xor(r1,r3) jumpr r31 } .falign .Ladd_ovf_unf: { r1:0 = convert_d2df(r13:12) p0 = cmp.eq(r13,#0) p0 = cmp.eq(r12,#0) if (p0.new) jump:nt .Ladd_zero } { r28 = extractu(r1,#11,#20) r1 += asl(r5,#20) } { r5 = add(r5,r28) r3:2 = combine(##0x00100000,#0) } { p0 = cmp.gt(r5,##1024 +1024 -2) if (p0.new) jump:nt .Ladd_ovf } { p0 = cmp.gt(r5,#0) if (p0.new) jumpr:t r31 r28 = sub(#1,r5) } { r3:2 = insert(r1:0,#52,#0) r1:0 = r13:12 } { r3:2 = lsr(r3:2,r28) } { r1:0 = insert(r3:2,#63,#0) jumpr r31 } .falign .Ladd_ovf: { r1:0 = r13:12 r28 = USR r13:12 = combine(##0x7fefffff,#-1) } { r5 = extractu(r28,#2,#22) r28 = or(r28,#0x28) r9:8 = combine(##0x7ff00000,#0) } { USR = r28 r5 ^= lsr(r1,#31) r28 = r5 } { p0 = !cmp.eq(r28,#1) p0 = !cmp.eq(r5,#2) if (p0.new) r13:12 = r9:8 } { r1:0 = insert(r13:12,#63,#0) } { p0 = dfcmp.eq(r1:0,r1:0) jumpr r31 } .Ladd_abnormal: { r13:12 = extractu(r1:0,#63,#0) r9:8 = extractu(r3:2,#63,#0) } { p3 = cmp.gtu(r13:12,r9:8) if (!p3.new) r1:0 = r3:2 if (!p3.new) r3:2 = r1:0 } { p0 = dfclass(r1:0,#0x0f) if (!p0.new) jump:nt .Linvalid_nan_add if (!p3) r13:12 = r9:8 if (!p3) r9:8 = r13:12 } { p1 = dfclass(r1:0,#0x08) if (p1.new) jump:nt .Linf_add } { p2 = dfclass(r3:2,#0x01) if (p2.new) jump:nt .LB_zero r13:12 = #0 } { p0 = dfclass(r1:0,#4) if (p0.new) jump:nt .Ladd_two_subnormal r13:12 = combine(##0x20000000,#0) } { r4 = extractu(r1,#11,#20) r5 = #1 r9:8 = asl(r9:8,#11 -2) } { r13:12 = insert(r1:0,#52,#11 -2) r15 = sub(r4,r5) r7:6 = combine(#62,#1) jump .Ladd_continue } .Ladd_two_subnormal: { r13:12 = extractu(r1:0,#63,#0) r9:8 = extractu(r3:2,#63,#0) } { r13:12 = neg(r13:12) r9:8 = neg(r9:8) p0 = cmp.gt(r1,#-1) p1 = cmp.gt(r3,#-1) } { if (p0) r13:12 = r1:0 if (p1) r9:8 = r3:2 } { r13:12 = add(r13:12,r9:8) } { r9:8 = neg(r13:12) p0 = cmp.gt(r13,#-1) r3:2 = #0 } { if (!p0) r1:0 = r9:8 if (p0) r1:0 = r13:12 r3 = ##0x80000000 } { if (!p0) r1 = or(r1,r3) p0 = dfcmp.eq(r1:0,r3:2) if (p0.new) jump:nt .Lzero_plus_zero } { jumpr r31 } .Linvalid_nan_add: { r28 = convert_df2sf(r1:0) p0 = dfclass(r3:2,#0x0f) if (p0.new) r3:2 = r1:0 } { r2 = convert_df2sf(r3:2) r1:0 = #-1 jumpr r31 } .falign .LB_zero: { p0 = dfcmp.eq(r13:12,r1:0) if (!p0.new) jumpr:t r31 } .Lzero_plus_zero: { p0 = cmp.eq(r1:0,r3:2) if (p0.new) jumpr:t r31 } { r28 = USR } { r28 = extractu(r28,#2,#22) r1:0 = #0 } { p0 = cmp.eq(r28,#2) if (p0.new) r1 = ##0x80000000 jumpr r31 } .Linf_add: { p0 = !cmp.eq(r1,r3) p0 = dfclass(r3:2,#8) if (!p0.new) jumpr:t r31 } { r2 = ##0x7f800001 } { r1:0 = convert_sf2df(r2) jumpr r31 } .size __hexagon_adddf3,.-__hexagon_adddf3
edl-lang/edl
1,295
library/compiler-builtins/compiler-builtins/src/hexagon/memcpy_forward_vp4cp4n2.s
.text .globl hexagon_memcpy_forward_vp4cp4n2 .balign 32 .type hexagon_memcpy_forward_vp4cp4n2,@function hexagon_memcpy_forward_vp4cp4n2: { r3 = sub(##4096, r1) r5 = lsr(r2, #3) } { r3 = extractu(r3, #10, #2) r4 = extractu(r3, #7, #5) } { r3 = minu(r2, r3) r4 = minu(r5, r4) } { r4 = or(r4, ##2105344) p0 = cmp.eq(r3, #0) if (p0.new) jump:nt .Lskipprolog } l2fetch(r1, r4) { loop0(.Lprolog, r3) r2 = sub(r2, r3) } .falign .Lprolog: { r4 = memw(r1++#4) memw(r0++#4) = r4.new } :endloop0 .Lskipprolog: { r3 = lsr(r2, #10) if (cmp.eq(r3.new, #0)) jump:nt .Lskipmain } { loop1(.Lout, r3) r2 = extractu(r2, #10, #0) r3 = ##2105472 } .falign .Lout: l2fetch(r1, r3) loop0(.Lpage, #512) .falign .Lpage: r5:4 = memd(r1++#8) { memw(r0++#8) = r4 memw(r0+#4) = r5 } :endloop0:endloop1 .Lskipmain: { r3 = ##2105344 r4 = lsr(r2, #3) p0 = cmp.eq(r2, #0) if (p0.new) jumpr:nt r31 } { r3 = or(r3, r4) loop0(.Lepilog, r2) } l2fetch(r1, r3) .falign .Lepilog: { r4 = memw(r1++#4) memw(r0++#4) = r4.new } :endloop0 jumpr r31 .size hexagon_memcpy_forward_vp4cp4n2, . - hexagon_memcpy_forward_vp4cp4n2
edl-lang/edl
5,659
library/compiler-builtins/compiler-builtins/src/hexagon/dfdiv.s
.text .global __hexagon_divdf3 .type __hexagon_divdf3,@function .global __qdsp_divdf3 ; .set __qdsp_divdf3, __hexagon_divdf3 .global __hexagon_fast_divdf3 ; .set __hexagon_fast_divdf3, __hexagon_divdf3 .global __hexagon_fast2_divdf3 ; .set __hexagon_fast2_divdf3, __hexagon_divdf3 .p2align 5 __hexagon_divdf3: { p2 = dfclass(r1:0,#0x02) p2 = dfclass(r3:2,#0x02) r13:12 = combine(r3,r1) r28 = xor(r1,r3) } { if (!p2) jump .Ldiv_abnormal r7:6 = extractu(r3:2,#23,#52 -23) r8 = ##0x3f800001 } { r9 = or(r8,r6) r13 = extractu(r13,#11,#52 -32) r12 = extractu(r12,#11,#52 -32) p3 = cmp.gt(r28,#-1) } .Ldenorm_continue: { r11,p0 = sfrecipa(r8,r9) r10 = and(r8,#-2) r28 = #1 r12 = sub(r12,r13) } { r10 -= sfmpy(r11,r9):lib r1 = insert(r28,#11 +1,#52 -32) r13 = ##0x00800000 << 3 } { r11 += sfmpy(r11,r10):lib r3 = insert(r28,#11 +1,#52 -32) r10 = and(r8,#-2) } { r10 -= sfmpy(r11,r9):lib r5 = #-0x3ff +1 r4 = #0x3ff -1 } { r11 += sfmpy(r11,r10):lib p1 = cmp.gt(r12,r5) p1 = !cmp.gt(r12,r4) } { r13 = insert(r11,#23,#3) r5:4 = #0 r12 = add(r12,#-61) } { r13 = add(r13,#((-3) << 3)) } { r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASL(r7:6, # ( 14 )); r1:0 -= asl(r15:14, # 32); } { r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 1 )); r1:0 -= asl(r15:14, # 32); } { r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 16 )); r1:0 -= asl(r15:14, # 32); } { r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 31 )); r1:0 -= asl(r15:14, # 32); r7:6=# ( 0 ); } { r15:14 = sub(r1:0,r3:2) p0 = cmp.gtu(r3:2,r1:0) if (!p0.new) r6 = #2 } { r5:4 = add(r5:4,r7:6) if (!p0) r1:0 = r15:14 r15:14 = #0 } { p0 = cmp.eq(r1:0,r15:14) if (!p0.new) r4 = or(r4,r28) } { r7:6 = neg(r5:4) } { if (!p3) r5:4 = r7:6 } { r1:0 = convert_d2df(r5:4) if (!p1) jump .Ldiv_ovf_unf } { r1 += asl(r12,#52 -32) jumpr r31 } .Ldiv_ovf_unf: { r1 += asl(r12,#52 -32) r13 = extractu(r1,#11,#52 -32) } { r7:6 = abs(r5:4) r12 = add(r12,r13) } { p0 = cmp.gt(r12,##0x3ff +0x3ff) if (p0.new) jump:nt .Ldiv_ovf } { p0 = cmp.gt(r12,#0) if (p0.new) jump:nt .Lpossible_unf2 } { r13 = add(clb(r7:6),#-1) r12 = sub(#7,r12) r10 = USR r11 = #63 } { r13 = min(r12,r11) r11 = or(r10,#0x030) r7:6 = asl(r7:6,r13) r12 = #0 } { r15:14 = extractu(r7:6,r13:12) r7:6 = lsr(r7:6,r13) r3:2 = #1 } { p0 = cmp.gtu(r3:2,r15:14) if (!p0.new) r6 = or(r2,r6) r7 = setbit(r7,#52 -32+4) } { r5:4 = neg(r7:6) p0 = bitsclr(r6,#(1<<4)-1) if (!p0.new) r10 = r11 } { USR = r10 if (p3) r5:4 = r7:6 r10 = #-0x3ff -(52 +4) } { r1:0 = convert_d2df(r5:4) } { r1 += asl(r10,#52 -32) jumpr r31 } .Lpossible_unf2: { r3:2 = extractu(r1:0,#63,#0) r15:14 = combine(##0x00100000,#0) r10 = #0x7FFF } { p0 = dfcmp.eq(r15:14,r3:2) p0 = bitsset(r7,r10) } { if (!p0) jumpr r31 r10 = USR } { r10 = or(r10,#0x30) } { USR = r10 } { p0 = dfcmp.eq(r1:0,r1:0) jumpr r31 } .Ldiv_ovf: { r10 = USR r3:2 = combine(##0x7fefffff,#-1) r1 = mux(p3,#0,#-1) } { r7:6 = combine(##0x7ff00000,#0) r5 = extractu(r10,#2,#22) r10 = or(r10,#0x28) } { USR = r10 r5 ^= lsr(r1,#31) r4 = r5 } { p0 = !cmp.eq(r4,#1) p0 = !cmp.eq(r5,#2) if (p0.new) r3:2 = r7:6 p0 = dfcmp.eq(r3:2,r3:2) } { r1:0 = insert(r3:2,#63,#0) jumpr r31 } .Ldiv_abnormal: { p0 = dfclass(r1:0,#0x0F) p0 = dfclass(r3:2,#0x0F) p3 = cmp.gt(r28,#-1) } { p1 = dfclass(r1:0,#0x08) p1 = dfclass(r3:2,#0x08) } { p2 = dfclass(r1:0,#0x01) p2 = dfclass(r3:2,#0x01) } { if (!p0) jump .Ldiv_nan if (p1) jump .Ldiv_invalid } { if (p2) jump .Ldiv_invalid } { p2 = dfclass(r1:0,#(0x0F ^ 0x01)) p2 = dfclass(r3:2,#(0x0F ^ 0x08)) } { p1 = dfclass(r1:0,#(0x0F ^ 0x08)) p1 = dfclass(r3:2,#(0x0F ^ 0x01)) } { if (!p2) jump .Ldiv_zero_result if (!p1) jump .Ldiv_inf_result } { p0 = dfclass(r1:0,#0x02) p1 = dfclass(r3:2,#0x02) r10 = ##0x00100000 } { r13:12 = combine(r3,r1) r1 = insert(r10,#11 +1,#52 -32) r3 = insert(r10,#11 +1,#52 -32) } { if (p0) r1 = or(r1,r10) if (p1) r3 = or(r3,r10) } { r5 = add(clb(r1:0),#-11) r4 = add(clb(r3:2),#-11) r10 = #1 } { r12 = extractu(r12,#11,#52 -32) r13 = extractu(r13,#11,#52 -32) } { r1:0 = asl(r1:0,r5) r3:2 = asl(r3:2,r4) if (!p0) r12 = sub(r10,r5) if (!p1) r13 = sub(r10,r4) } { r7:6 = extractu(r3:2,#23,#52 -23) } { r9 = or(r8,r6) jump .Ldenorm_continue } .Ldiv_zero_result: { r1 = xor(r1,r3) r3:2 = #0 } { r1:0 = insert(r3:2,#63,#0) jumpr r31 } .Ldiv_inf_result: { p2 = dfclass(r3:2,#0x01) p2 = dfclass(r1:0,#(0x0F ^ 0x08)) } { r10 = USR if (!p2) jump 1f r1 = xor(r1,r3) } { r10 = or(r10,#0x04) } { USR = r10 } 1: { r3:2 = combine(##0x7ff00000,#0) p0 = dfcmp.uo(r3:2,r3:2) } { r1:0 = insert(r3:2,#63,#0) jumpr r31 } .Ldiv_nan: { p0 = dfclass(r1:0,#0x10) p1 = dfclass(r3:2,#0x10) if (!p0.new) r1:0 = r3:2 if (!p1.new) r3:2 = r1:0 } { r5 = convert_df2sf(r1:0) r4 = convert_df2sf(r3:2) } { r1:0 = #-1 jumpr r31 } .Ldiv_invalid: { r10 = ##0x7f800001 } { r1:0 = convert_sf2df(r10) jumpr r31 } .size __hexagon_divdf3,.-__hexagon_divdf3
edl-lang/edl
5,120
library/compiler-builtins/compiler-builtins/src/hexagon/fastmath2_dlib_asm.s
.text .global __hexagon_fast2_dadd_asm .type __hexagon_fast2_dadd_asm, @function __hexagon_fast2_dadd_asm: .falign { R7:6 = VABSDIFFH(R1:0, R3:2) R9 = #62 R4 = SXTH(R0) R5 = SXTH(R2) } { R6 = SXTH(R6) P0 = CMP.GT(R4, R5); if ( P0.new) R8 = add(R4, #1) if (!P0.new) R8 = add(R5, #1) } { if ( P0) R4 = #1 if (!P0) R5 = #1 R0.L = #0 R6 = MIN(R6, R9) } { if (!P0) R4 = add(R6, #1) if ( P0) R5 = add(R6, #1) R2.L = #0 R11:10 = #0 } { R1:0 = ASR(R1:0, R4) R3:2 = ASR(R3:2, R5) } { R1:0 = add(R1:0, R3:2) R10.L = #0x8001 } { R4 = clb(R1:0) R9 = #58 } { R4 = add(R4, #-1) p0 = cmp.gt(R4, R9) } { R1:0 = ASL(R1:0, R4) R8 = SUB(R8, R4) if(p0) jump .Ldenorma } { R0 = insert(R8, #16, #0) jumpr r31 } .Ldenorma: { R1:0 = R11:10 jumpr r31 } .text .global __hexagon_fast2_dsub_asm .type __hexagon_fast2_dsub_asm, @function __hexagon_fast2_dsub_asm: .falign { R7:6 = VABSDIFFH(R1:0, R3:2) R9 = #62 R4 = SXTH(R0) R5 = SXTH(R2) } { R6 = SXTH(R6) P0 = CMP.GT(R4, R5); if ( P0.new) R8 = add(R4, #1) if (!P0.new) R8 = add(R5, #1) } { if ( P0) R4 = #1 if (!P0) R5 = #1 R0.L = #0 R6 = MIN(R6, R9) } { if (!P0) R4 = add(R6, #1) if ( P0) R5 = add(R6, #1) R2.L = #0 R11:10 = #0 } { R1:0 = ASR(R1:0, R4) R3:2 = ASR(R3:2, R5) } { R1:0 = sub(R1:0, R3:2) R10.L = #0x8001 } { R4 = clb(R1:0) R9 = #58 } { R4 = add(R4, #-1) p0 = cmp.gt(R4, R9) } { R1:0 = ASL(R1:0, R4) R8 = SUB(R8, R4) if(p0) jump .Ldenorm } { R0 = insert(R8, #16, #0) jumpr r31 } .Ldenorm: { R1:0 = R11:10 jumpr r31 } .text .global __hexagon_fast2_dmpy_asm .type __hexagon_fast2_dmpy_asm, @function __hexagon_fast2_dmpy_asm: .falign { R13= lsr(R2, #16) R5 = sxth(R2) R4 = sxth(R0) R12= lsr(R0, #16) } { R11:10 = mpy(R1, R3) R7:6 = mpy(R1, R13) R0.L = #0x0 R15:14 = #0 } { R11:10 = add(R11:10, R11:10) R7:6 += mpy(R3, R12) R2.L = #0x0 R15.H = #0x8000 } { R7:6 = asr(R7:6, #15) R12.L = #0x8001 p1 = cmp.eq(R1:0, R3:2) } { R7:6 = add(R7:6, R11:10) R8 = add(R4, R5) p2 = cmp.eq(R1:0, R15:14) } { R9 = clb(R7:6) R3:2 = abs(R7:6) R11 = #58 } { p1 = and(p1, p2) R8 = sub(R8, R9) R9 = add(R9, #-1) p0 = cmp.gt(R9, R11) } { R8 = add(R8, #1) R1:0 = asl(R7:6, R9) if(p1) jump .Lsat } { R0 = insert(R8,#16, #0) if(!p0) jumpr r31 } { R0 = insert(R12,#16, #0) jumpr r31 } .Lsat: { R1:0 = #-1 } { R1:0 = lsr(R1:0, #1) } { R0 = insert(R8,#16, #0) jumpr r31 } .text .global __hexagon_fast2_qd2f_asm .type __hexagon_fast2_qd2f_asm, @function __hexagon_fast2_qd2f_asm: .falign { R3 = abs(R1):sat R4 = sxth(R0) R5 = #0x40 R6.L = #0xffc0 } { R0 = extractu(R3, #8, #0) p2 = cmp.gt(R4, #126) p3 = cmp.ge(R4, #-126) R6.H = #0x7fff } { p1 = cmp.eq(R0,#0x40) if(p1.new) R5 = #0 R4 = add(R4, #126) if(!p3) jump .Lmin } { p0 = bitsset(R3, R6) R0.L = #0x0000 R2 = add(R3, R5) R7 = lsr(R6, #8) } { if(p0) R4 = add(R4, #1) if(p0) R3 = #0 R2 = lsr(R2, #7) R0.H = #0x8000 } { R0 = and(R0, R1) R6 &= asl(R4, #23) if(!p0) R3 = and(R2, R7) if(p2) jump .Lmax } { R0 += add(R6, R3) jumpr r31 } .Lmax: { R0.L = #0xffff; } { R0.H = #0x7f7f; jumpr r31 } .Lmin: { R0 = #0x0 jumpr r31 } .text .global __hexagon_fast2_f2qd_asm .type __hexagon_fast2_f2qd_asm, @function __hexagon_fast2_f2qd_asm: .falign { R1 = asl(R0, #7) p0 = tstbit(R0, #31) R5:4 = #0 R3 = add(R0,R0) } { R1 = setbit(R1, #30) R0= extractu(R0,#8,#23) R4.L = #0x8001 p1 = cmp.eq(R3, #0) } { R1= extractu(R1, #31, #0) R0= add(R0, #-126) R2 = #0 if(p1) jump .Lminqd } { R0 = zxth(R0) if(p0) R1= sub(R2, R1) jumpr r31 } .Lminqd: { R1:0 = R5:4 jumpr r31 }
edl-lang/edl
11,809
library/std/src/sys/pal/sgx/abi/entry.S
/* This symbol is used at runtime to figure out the virtual address that the */ /* enclave is loaded at. */ .section absolute .global IMAGE_BASE IMAGE_BASE: .section ".note.x86_64-fortanix-unknown-sgx", "", @note .align 4 .long 1f - 0f /* name length (not including padding) */ .long 3f - 2f /* desc length (not including padding) */ .long 1 /* type = NT_VERSION */ 0: .asciz "toolchain-version" /* name */ 1: .align 4 2: .long 1 /* desc - toolchain version number, 32-bit LE */ 3: .align 4 .section .rodata /* The XSAVE area needs to be a large chunk of readable memory, but since we are */ /* going to restore everything to its initial state (XSTATE_BV=0), only certain */ /* parts need to have a defined value. In particular: */ /* */ /* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */ /* RFBM[2] is set, regardless of the value of XSTATE_BV */ /* * XSAVE header */ .align 64 .Lxsave_clear: .org .+24 .Lxsave_mxcsr: .short 0x1fbf /* We can store a bunch of data in the gap between MXCSR and the XSAVE header */ /* The following symbols point at read-only data that will be filled in by the */ /* post-linker. */ /* When using this macro, don't forget to adjust the linker version script! */ .macro globvar name:req size:req .global \name .protected \name .align \size .size \name , \size \name : .org .+\size .endm /* The base address (relative to enclave start) of the heap area */ globvar HEAP_BASE 8 /* The heap size in bytes */ globvar HEAP_SIZE 8 /* Value of the RELA entry in the dynamic table */ globvar RELA 8 /* Value of the RELACOUNT entry in the dynamic table */ globvar RELACOUNT 8 /* The enclave size in bytes */ globvar ENCLAVE_SIZE 8 /* The base address (relative to enclave start) of the enclave configuration area */ globvar CFGDATA_BASE 8 /* Non-zero if debugging is enabled, zero otherwise */ globvar DEBUG 1 /* The base address (relative to enclave start) of the enclave text section */ globvar TEXT_BASE 8 /* The size in bytes of enclave text section */ globvar TEXT_SIZE 8 /* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_OFFSET 8 /* The size in bytes of enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_LEN 8 /* The base address (relative to enclave start) of the enclave .eh_frame section */ globvar EH_FRM_OFFSET 8 /* The size in bytes of enclave .eh_frame section */ globvar EH_FRM_LEN 8 .org .Lxsave_clear+512 .Lxsave_header: .int 0, 0 /* XSTATE_BV */ .int 0, 0 /* XCOMP_BV */ .org .+48 /* reserved bits */ .data .Laborted: .byte 0 /* TCS local storage section */ .equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */ .equ tcsls_flags, 0x08 /* initialized by loader */ .equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */ .equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */ /* 14 unused bits */ .equ tcsls_user_fcw, 0x0a .equ tcsls_user_mxcsr, 0x0c .equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */ .equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */ .equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */ .equ tcsls_user_rsp, 0x28 .equ tcsls_user_retip, 0x30 .equ tcsls_user_rbp, 0x38 .equ tcsls_user_r12, 0x40 .equ tcsls_user_r13, 0x48 .equ tcsls_user_r14, 0x50 .equ tcsls_user_r15, 0x58 .equ tcsls_tls_ptr, 0x60 .equ tcsls_tcs_addr, 0x68 .macro load_tcsls_flag_secondary_bool reg:req comments:vararg .ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */ .abort .endif mov $(1<<tcsls_flag_secondary),%e\reg and %gs:tcsls_flags,%\reg .endm /* We place the ELF entry point in a separate section so it can be removed by elf2sgxs */ .section .text_no_sgx, "ax" .Lelf_entry_error_msg: .ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n" .Lelf_entry_error_msg_end: .global elf_entry .type elf_entry,function elf_entry: /* print error message */ movq $2,%rdi /* write to stderr (fd 2) */ lea .Lelf_entry_error_msg(%rip),%rsi movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx .Lelf_entry_call: movq $1,%rax /* write() syscall */ syscall test %rax,%rax jle .Lelf_exit /* exit on error */ add %rax,%rsi sub %rax,%rdx /* all chars written? */ jnz .Lelf_entry_call .Lelf_exit: movq $60,%rax /* exit() syscall */ movq $1,%rdi /* exit code 1 */ syscall ud2 /* should not be reached */ /* end elf_entry */ /* This code needs to be called *after* the enclave stack has been setup. */ /* There are 3 places where this needs to happen, so this is put in a macro. */ .macro entry_sanitize_final /* Sanitize rflags received from user */ /* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */ /* - AC flag: AEX on misaligned memory accesses leaks side channel info */ pushfq andq $~0x40400, (%rsp) popfq /* check for abort */ bt $0,.Laborted(%rip) jc .Lreentry_panic .endm .text .global sgx_entry .type sgx_entry,function sgx_entry: /* save user registers */ mov %rcx,%gs:tcsls_user_retip mov %rsp,%gs:tcsls_user_rsp mov %rbp,%gs:tcsls_user_rbp mov %r12,%gs:tcsls_user_r12 mov %r13,%gs:tcsls_user_r13 mov %r14,%gs:tcsls_user_r14 mov %r15,%gs:tcsls_user_r15 mov %rbx,%gs:tcsls_tcs_addr stmxcsr %gs:tcsls_user_mxcsr fnstcw %gs:tcsls_user_fcw /* check for debug buffer pointer */ testb $0xff,DEBUG(%rip) jz .Lskip_debug_init mov %r10,%gs:tcsls_debug_panic_buf_ptr .Lskip_debug_init: /* reset cpu state */ mov %rdx, %r10 mov $-1, %rax mov $-1, %rdx xrstor .Lxsave_clear(%rip) lfence mov %r10, %rdx /* check if returning from usercall */ mov %gs:tcsls_last_rsp,%r11 test %r11,%r11 jnz .Lusercall_ret /* setup stack */ mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */ /* here. This is fixed below under "adjust stack". */ /* check for thread init */ bts $tcsls_flag_init_once,%gs:tcsls_flags jc .Lskip_init /* adjust stack */ lea IMAGE_BASE(%rip),%rax add %rax,%rsp mov %rsp,%gs:tcsls_tos entry_sanitize_final /* call tcs_init */ /* store caller-saved registers in callee-saved registers */ mov %rdi,%rbx mov %rsi,%r12 mov %rdx,%r13 mov %r8,%r14 mov %r9,%r15 load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */ call tcs_init /* reload caller-saved registers */ mov %rbx,%rdi mov %r12,%rsi mov %r13,%rdx mov %r14,%r8 mov %r15,%r9 jmp .Lafter_init .Lskip_init: entry_sanitize_final .Lafter_init: /* call into main entry point */ load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */ call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */ mov %rax,%rsi /* RSI = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ xor %rdi,%rdi /* RDI = normal exit */ .Lexit: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set later */ /* RCX overwritten by ENCLU */ /* RDX contains return value */ /* RSP set later */ /* RBP set later */ /* RDI contains exit mode */ /* RSI contains return value */ xor %r8,%r8 xor %r9,%r9 xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ .Lsgx_exit: /* clear extended register state */ mov %rdx, %rcx /* save RDX */ mov $-1, %rax mov %rax, %rdx xrstor .Lxsave_clear(%rip) mov %rcx, %rdx /* restore RDX */ /* clear flags */ pushq $0 popfq /* restore user registers */ mov %gs:tcsls_user_r12,%r12 mov %gs:tcsls_user_r13,%r13 mov %gs:tcsls_user_r14,%r14 mov %gs:tcsls_user_r15,%r15 mov %gs:tcsls_user_retip,%rbx mov %gs:tcsls_user_rsp,%rsp mov %gs:tcsls_user_rbp,%rbp fldcw %gs:tcsls_user_fcw ldmxcsr %gs:tcsls_user_mxcsr /* exit enclave */ mov $0x4,%eax /* EEXIT */ enclu /* end sgx_entry */ .Lreentry_panic: orq $8,%rsp jmp abort_reentry /* This *MUST* be called with 6 parameters, otherwise register information */ /* might leak! */ .global usercall usercall: test %rcx,%rcx /* check `abort` function argument */ jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */ jmp .Lusercall_save_state /* non-aborting usercall */ .Lusercall_abort: /* set aborted bit */ movb $1,.Laborted(%rip) /* save registers in DEBUG mode, so that debugger can reconstruct the stack */ testb $0xff,DEBUG(%rip) jz .Lusercall_noreturn .Lusercall_save_state: /* save callee-saved state */ push %r15 push %r14 push %r13 push %r12 push %rbp push %rbx sub $8, %rsp fstcw 4(%rsp) stmxcsr (%rsp) movq %rsp,%gs:tcsls_last_rsp .Lusercall_noreturn: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set by sgx_exit */ /* RCX overwritten by ENCLU */ /* RDX contains parameter */ /* RSP set by sgx_exit */ /* RBP set by sgx_exit */ /* RDI contains parameter */ /* RSI contains parameter */ /* R8 contains parameter */ /* R9 contains parameter */ xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ /* extended registers/flags cleared by sgx_exit */ /* exit */ jmp .Lsgx_exit .Lusercall_ret: movq $0,%gs:tcsls_last_rsp /* restore callee-saved state, cf. "save" above */ mov %r11,%rsp /* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */ /* vector instructions is used. We omit the lfence here as one is required before */ /* the jmp instruction anyway. */ ldmxcsr (%rsp) fldcw 4(%rsp) add $8, %rsp entry_sanitize_final pop %rbx pop %rbp pop %r12 pop %r13 pop %r14 pop %r15 /* return */ mov %rsi,%rax /* RAX = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ pop %r11 lfence jmp *%r11 /* The following functions need to be defined externally: ``` // Called by entry code on re-entry after exit extern "C" fn abort_reentry() -> !; // Called once when a TCS is first entered extern "C" fn tcs_init(secondary: bool); // Standard TCS entrypoint extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64); ``` */ .global get_tcs_addr get_tcs_addr: mov %gs:tcsls_tcs_addr,%rax pop %r11 lfence jmp *%r11 .global get_tls_ptr get_tls_ptr: mov %gs:tcsls_tls_ptr,%rax pop %r11 lfence jmp *%r11 .global set_tls_ptr set_tls_ptr: mov %rdi,%gs:tcsls_tls_ptr pop %r11 lfence jmp *%r11 .global take_debug_panic_buf_ptr take_debug_panic_buf_ptr: xor %rax,%rax xchg %gs:tcsls_debug_panic_buf_ptr,%rax pop %r11 lfence jmp *%r11
ekene-e/MinimISA
1,670
chip8/cpu.s
; CPU management: functions to access registers and stack (kinda repetitive). ; cpu_getPC() -> r0 = PC cpu_getPC: leti r0 0x88080 ; PC getctr a0 r1 setctr a0 r0 readze a0 16 r0 setctr a0 r1 return ; cpu_setPC(r1 = new_pc) cpu_setPC: leti r0 0x88080 ; PC getctr a0 r2 setctr a0 r0 write a0 16 r1 setctr a0 r2 return ; cpu_getI() -> r0 = I cpu_getI: leti r0 0x88090 ; I getctr a0 r1 setctr a0 r0 readze a0 16 r0 setctr a0 r1 return ; cpu_setI(r1 = new_i) cpu_setI: leti r0 0x88090 ; I getctr a0 r2 setctr a0 r0 write a0 16 r1 setctr a0 r2 return ; cpu_opcode() -> r0 = next opcode and PC += 2 cpu_opcode: push 64 r7 push 64 r6 call cpu_getPC let r6 r0 let r1 r0 call mem_opcode add3i r1 r6 2 let r6 r0 call cpu_setPC let r0 r6 pop 64 r6 pop 64 r7 return ; cpu_getReg(r1 = reg) -> r0 = v[r1] cpu_getReg: leti r0 0x88000 ; Register base shift left r1 3 add2 r0 r1 getctr a0 r1 setctr a0 r0 readze a0 8 r0 setctr a0 r1 return ; cpu_setReg(r1 = reg, r2 = val) cpu_setReg: leti r0 0x88000 ; Register base shift left r1 3 add2 r0 r1 getctr a0 r1 setctr a0 r0 write a0 8 r2 setctr a0 r1 return ; cpu_push(r1 = pc) cpu_push: leti r0 0x881a0 ; SP getctr a0 r3 setctr a0 r0 readze a0 16 r2 add2i r2 1 setctr a0 r0 write a0 16 r2 leti r0 0x880a0 ; Stack base sub2i r2 1 shift left r2 4 add2 r0 r2 setctr a0 r0 write a0 16 r1 setctr a0 r3 return ; cpu_pop() -> r0 = pc cpu_pop: leti r0 0x881a0 ; SP getctr a0 r3 setctr a0 r0 readze a0 16 r2 sub2i r2 1 setctr a0 r0 write a0 16 r2 leti r0 0x880a0 ; Stack base shift left r2 4 add2 r0 r2 setctr a0 r0 readze a0 16 r0 setctr a0 r3 return
ekene-e/MinimISA
8,811
chip8/main.s
; Main program main: ; PC = 0x200 leti r1 0x200 call cpu_setPC ; Clear the screen to black leti r1 0x0000 call clear_screen ; Load the character data in the beginning of the memory call load_hexa ; Set the frequency counter (limiter) to 0 leti r6 0x1450 leti r0 0 setctr a1 r6 write a1 8 r0 _von_neumann: ; Wait until we can execute an instruction call wait ; Fetch an instruction code, and PC += 2 call cpu_opcode ; Execute this opcode. This is more or less a gigantic switch/case with ; a lot of poorly-written instruction decoding let r4 r0 ; These few lines are intended for debugging purposes. The or2i r0 0 ; instruction will only be executed when a specific opcode (which is ; specified in cmpi) is found. Placing a breakpoint on this or2i allows ; to quickly "jump" to a point in the chip8 program while debugging. debug: cmpi r4 0x6e17 jumpif neq debug_end debug_marker: or2i r0 0 debug_end: _0000: ; Halt program (?) cmpi r4 0x0000 jumpif eq _main_end _00e0: ; Clear screen cmpi r4 0x00e0 jumpif neq _00ee leti r1 0 call clear_screen jump _von_neumann _00ee: ; Return cmpi r4 0x00ee jumpif neq _0nnn call cpu_pop let r1 r0 call cpu_setPC jump _von_neumann _0nnn: ; Ignore RCA 1082 programs cmpi r4 0x1000 jumpif lt _von_neumann _1nnn: ; Goto cmpi r4 0x2000 jumpif ge _2nnn and3i r1 r4 0xfff call cpu_setPC jump _von_neumann _2nnn: ; Call cmpi r4 0x3000 jumpif ge _3xnn call cpu_getPC let r1 r0 call cpu_push and3 r1 r4 0xfff call cpu_setPC jump _von_neumann _3xnn: ; Snif vx = nn cmpi r4 0x4000 jumpif ge _4xnn let r1 r4 shift right r1 8 and2i r1 0xf call cpu_getReg and3i r1 r4 0xff cmp r0 r1 jumpif neq _von_neumann call cpu_getPC add3i r1 r0 2 call cpu_setPC jump _von_neumann _4xnn: ; Snif vx != nn cmpi r4 0x5000 jumpif ge _5xy0 let r1 r4 shift right r1 8 and2i r1 0xf call cpu_getReg and3i r1 r4 0xff cmp r0 r1 jumpif eq _von_neumann call cpu_getPC add3i r1 r0 2 call cpu_setPC jump _von_neumann _5xy0: ; Snif vx == vy cmpi r4 0x6000 jumpif ge _6xnn let r1 r4 shift right r1 8 and2i r1 0xf call cpu_getReg let r5 r0 and3i r1 r4 0xf0 shift right r1 4 call cpu_getReg cmp r5 r0 jumpif neq _von_neumann call cpu_getPC add3i r1 r0 2 call cpu_setPC jump _von_neumann _6xnn: ; vx = nn cmpi r4 0x7000 jumpif ge _7xnn let r1 r4 shift right r1 8 and2i r1 0xf and3i r2 r4 0xff call cpu_setReg jump _von_neumann _7xnn: ; vx += nn (no carry) cmpi r4 0x8000 jumpif ge _8xyn_entry let r1 r4 shift right r1 8 and2i r1 0xf let r5 r1 call cpu_getReg and3i r2 r4 0xff add2 r2 r0 let r1 r5 call cpu_setReg jump _von_neumann _8xyn_entry: cmpi r4 0x9000 jumpif ge _9xy0 and3i r6 r4 0xf shift right r4 4 and3i r5 r4 0xf shift right r4 4 and2i r4 0xf _8xy0: ; vx = vy cmpi r6 0 jumpif gt _8xy1 let r1 r5 call cpu_getReg let r1 r4 let r2 r0 call cpu_setReg jump _von_neumann _8xy1: ; vx |= vy cmpi r6 1 jumpif gt _8xy2 let r1 r5 call cpu_getReg let r5 r0 let r1 r4 call cpu_getReg let r1 r4 or3 r2 r0 r5 call cpu_setReg jump _von_neumann _8xy2: ; vx &= vy cmpi r6 2 jumpif gt _8xy3 let r1 r5 call cpu_getReg let r5 r0 let r1 r4 call cpu_getReg let r1 r4 and3 r2 r0 r5 call cpu_setReg jump _von_neumann _8xy3: ; vx |= vy cmpi r6 3 jumpif gt _8xy4 let r1 r5 call cpu_getReg let r5 r0 let r1 r4 call cpu_getReg let r1 r4 xor3 r2 r0 r5 call cpu_setReg jump _von_neumann _8xy4: ; vx += vy cmpi r6 4 jumpif gt _8xy5 let r1 r5 call cpu_getReg let r5 r0 let r1 r4 call cpu_getReg let r1 r4 add2 r5 r0 let r2 r5 call cpu_setReg ; Calculate carry shift right r5 8 leti r1 15 let r2 r5 call cpu_setReg jump _von_neumann _8xy5: ; vx -= vy cmpi r6 5 jumpif gt _8xy6 let r1 r5 call cpu_getReg let r5 r0 let r1 r4 call cpu_getReg let r1 r4 sub3 r5 r0 r5 let r2 r5 call cpu_setReg ; Calculate carry shift right r5 8 and3i r2 r5 1 xor3i r2 r2 1 leti r1 15 call cpu_setReg jump _von_neumann _8xy6: ; vx = vy = vy >> 1 cmpi r6 6 jumpif gt _8xy7 let r1 r5 call cpu_getReg let r6 r0 ; Calculate carry and3i r2 r6 1 leti r1 15 call cpu_setReg shift right r6 1 leti r1 r4 leti r2 r6 call cpu_setReg let r1 r5 leti r2 r6 call cpu_setReg jump _von_neumann _8xy7: ; vx = vy - vx cmpi r6 7 jumpif gt _8xye let r1 r5 call cpu_getReg let r5 r0 let r1 r4 call cpu_getReg let r1 r4 sub2 r5 r0 let r2 r5 call cpu_setReg ; Calculate borrow shift right r5 8 and3i r2 r5 1 xor3i r2 r2 1 leti r1 15 call cpu_setReg jump _von_neumann _8xye: ; vx = vy = vy << 1 cmpi r6 0xe jumpif neq _von_neumann let r1 r5 call cpu_getReg let r6 r0 ; Calculate carry let r2 r6 shift right r2 7 and2i r2 1 leti r1 15 call cpu_setReg shift left r6 1 leti r1 r4 leti r2 r6 call cpu_setReg let r1 r5 leti r2 r6 call cpu_setReg jump _von_neumann _9xy0: ; Snif vx != vy cmpi r4 0xa000 jumpif ge _annn let r1 r4 shift right r1 8 and2i r1 0xf call cpu_getReg let r5 r0 and3i r1 r4 0xf0 shift right r1 4 call cpu_getReg cmp r5 r0 jumpif eq _von_neumann call cpu_getPC add3i r1 r0 2 call cpu_setPC jump _von_neumann _annn: ; I = nnn cmpi r4 0xb000 jumpif ge _bnnn and3i r1 r4 0xfff call cpu_setI jump _von_neumann _bnnn: ; PC = v0 + nnn cmpi r4 0xc000 jumpif ge _cxnn leti r1 0 call cpu_getReg and3i r1 r4 0xfff add2 r1 r0 call cpu_setPC jump _von_neumann _cxnn: ; vx = rand() & 0xnn cmpi r4 0xd000 jumpif ge _dxyn rand r0 and3 r2 r0 r4 and2i r2 0xff shift right r4 8 and3i r1 r4 0xf call cpu_setReg jump _von_neumann _dxyn: ; Draw sprite at vx, vy with height n cmpi r4 0xe000 jumpif ge _ennn and3i r6 r4 0xf shift right r4 4 and3i r5 r4 0xf shift right r4 4 and2i r4 0xf let r1 r4 call cpu_getReg let r4 r0 let r1 r5 call cpu_getReg let r2 r0 let r1 r4 let r3 r6 call draw jump _von_neumann ; Frow now on, we'll need the low nibble to distinguish between instructions _ennn: and3i r5 r4 0xff let r0 r4 shift right r4 8 and2i r4 0xf cmpi r0 0xf000 jumpif ge _fx07 _ex9e: ; snif key(vx) is pressed cmpi r5 0x9e jumpif neq _exa1 let r1 r4 call cpu_getReg add2i r0 0x881b0 ; Key buffer getctr a0 r2 setctr a0 r0 readze a0 1 r1 setctr a0 r2 cmpi r1 0 jumpif z _von_neumann call cpu_getPC add3i r1 r0 2 call cpu_setPC jump _von_neumann _exa1: ; snif key(vx) is released cmpi r5 0xa1 jumpif neq _von_neumann let r1 r4 call cpu_getReg add2i r0 0x881b0 ; Key buffer getctr a0 r2 setctr a0 r0 readze a0 1 r1 setctr a0 r2 cmpi r1 0 jumpif nz _von_neumann call cpu_getPC add3i r1 r0 2 call cpu_setPC jump _von_neumann _fx07: ; vx = delay timer cmpi r5 0x07 jumpif neq _fx0a leti r0 0x881e0 ; Delay timer getctr a0 r1 setctr a0 r0 readze a0 8 r2 setctr a0 r1 let r1 r4 call cpu_setReg jump _von_neumann _fx0a: ; vx = getkey() cmpi r5 0x0a jumpif neq _fx15 call getkey let r1 r4 let r2 r0 call cpu_setReg jump _von_neumann _fx15: ; delay timer = vx cmpi r5 0x15 jumpif neq _fx18 let r1 r4 call cpu_getReg leti r1 0x881e0 ; Delay timer getctr a0 r2 setctr a0 r1 write a0 8 r0 setctr a0 r2 jump _von_neumann _fx18: ; sound timer = vx cmpi r5 0x18 jumpif neq _fx1e let r1 r4 call cpu_getReg leti r1 0x881f0 ; Sound timer getctr a0 r2 setctr a0 r1 write a0 8 r0 setctr a0 r2 jump _von_neumann _fx1e: ; I += vx cmpi r5 0x1e jumpif neq _fx29 let r1 r4 call cpu_getReg let r4 r0 call cpu_getI add2 r4 r0 and3i r1 r4 0xfff call cpu_setI ; Calculate carry shift right r4 12 leti r1 15 and2i r2 r4 1 call cpu_setReg jump _von_neumann _fx29: ; I = hexa sprite address (vx) cmpi r5 0x29 jumpif neq _fx33 let r1 r4 call cpu_getReg let r1 r0 shift left r0 2 add2 r1 r0 call cpu_setI jump _von_neumann _fx33: ; Load BCD representation of vx into memory at I, I+1, I+2 cmpi r5 0x33 jumpif neq _fx55 let r1 r4 call cpu_getReg let r1 r0 call bcd let r4 r0 call cpu_getI shift left r0 3 add2i r0 0x80000 ; Start of memory getctr a0 r1 setctr a0 r0 write a0 8 r4 shift right r4 8 write a0 8 r4 shift right r4 8 write a0 8 r4 setctr a0 r1 jump _von_neumann _fx55: ; Save registers to memory at I cmpi r5 0x55 jumpif neq _fx65 let r1 r4 call mem_dump jump _von_neumann _fx65: ; Load registers from memory at I cmpi r5 0x65 jumpif neq _fx75 let r1 r4 call mem_load jump _von_neumann _fx75: ; Save registers to flags register (HP48) cmpi r5 0x75 jumpif neq _fx85 let r1 r4 call mem_flags_dump jump _von_neumann _fx85: ; Load register from flags register (HP48) cmpi r5 0x85 jumpif neq _von_neumann let r1 r4 call mem_flags_load jump _von_neumann _main_end: jump -13 .include cpu.s .include mem.s .include util.s .include draw.s .include keyboard.s
ekene-e/MinimISA
2,890
chip8/draw.s
; clear_screen() clear_screen: ; Load VRAM pointer and counter leti r0 0x100000 ; VRAM base leti r3 160 getctr a0 r2 setctr a0 r0 let r0 r1 shift left r0 16 or2 r1 r0 let r0 r1 shift left r0 32 or2 r1 r0 _clear_screen_loop: write a0 64 r1 write a0 64 r1 write a0 64 r1 write a0 64 r1 write a0 64 r1 write a0 64 r1 write a0 64 r1 write a0 64 r1 sub2i r3 1 jumpif nz _clear_screen_loop setctr a0 r2 return ; load_hexa() -> load character table at 0x80000 load_hexa: push 64 r7 call _load_hexa_lea pop 64 r7 leti r1 0x80000 ; Start of memory getctr a0 r2 getctr a1 r3 setctr a0 r0 setctr a1 r1 readze a0 64 r0 write a1 64 r0 readze a0 64 r0 write a1 64 r0 readze a0 64 r0 write a1 64 r0 readze a0 64 r0 write a1 64 r0 readze a0 64 r0 write a1 64 r0 readze a0 64 r0 write a1 64 r0 readze a0 64 r0 write a1 64 r0 readze a0 64 r0 write a1 64 r0 readze a0 64 r0 write a1 64 r0 readze a0 64 r0 write a1 64 r0 setctr a0 r2 setctr a1 r3 return _load_hexa_lea: getctr pc r0 add2i r0 24 return _load_hexa_data: .const 640 #1111000010010000100100001001000011110000001000000110000000100000001000000111000011110000000100001111000010000000111100001111000000010000111100000001000011110000100100001001000011110000000100000001000011110000100000001111000000010000111100001111000010000000111100001001000011110000111100000001000000100000010000000100000011110000100100001111000010010000111100001111000010010000111100000001000011110000111100001001000011110000100100001001000011100000100100001110000010010000111000001111000010000000100000001000000011110000111000001001000010010000100100001110000011110000100000001111000010000000111100001111000010000000111100001000000010000000 ; draw(r1 = x, r2 = y, r3 = n) -> draw sprite from memory at I draw: getctr a0 r0 push 64 r0 getctr a1 r0 push 64 r0 push 64 r4 push 64 r5 push 64 r7 ; Get destination pointer leti r0 0x100000 ; VRAM base shift left r2 10 add2 r0 r2 shift left r1 4 add2 r0 r1 setctr a1 r0 ; Number of pixels to write let r4 r3 ; Get source pointer call cpu_getI shift left r0 3 add2i r0 0x80000 ; Memory base setctr a0 r0 leti r1 8 let r5 0 ; Perform the drawing _draw_loop: readze a0 1 r0 cmpi r0 0 jumpif z _draw_clear ; Before setting the pixel, heed for collisions (r5) getctr a1 r2 readze a1 16 r3 or2 r5 r3 setctr a1 r2 ; Flip the color of the pixel xor3i r0 r3 0xffff write a1 16 r0 jump _draw_next _draw_clear: getctr a1 r2 add2i r2 16 setctr a1 r2 _draw_next: sub2i r1 1 jumpif nz _draw_loop _draw_newline: getctr a1 r2 add2i r2 896 setctr a1 r2 leti r1 8 sub2i r4 1 jumpif nz _draw_loop _draw_end: ; Collision results leti r1 15 leti r2 1 cmpi r5 0 jumpif nz _draw_collision leti r2 0 _draw_collision: call cpu_setReg pop 64 r7 pop 64 r5 pop 64 r4 pop 64 r0 setctr a1 r0 pop 64 r0 setctr a0 r0 return
ekene-e/MinimISA
1,107
chip8/util.s
; Various utility functions ; mult(r1 = x, r2 = y) -> r0 = x * y mult: leti r0 0 _mult_nonzero: shift right r1 1 jumpif nc _mult_next add2 r0 r2 _mult_next: shift left r2 1 cmpi r1 0 jumpif nz _mult_nonzero return ; bcd(r1 = x) -> r0 = BCD representation of x (24 bits) bcd: push 64 r4 push 64 r5 push 64 r6 push 64 r7 leti r4 0 leti r5 3 leti r6 r1 _bcd_one: ; r0 = r6 / 10 let r1 r6 leti r2 0x199a call mult shift right r0 16 let r2 r0 ; r1 = r6 % 10 shift left r0 1 let r1 r0 shift left r0 2 add2 r1 r0 sub3 r1 r6 r1 ; Add a digit to r4 shift left r4 8 or2 r4 r1 let r6 r2 sub2i r5 1 jumpif nz _bcd_one let r0 r4 pop 64 r7 pop 64 r6 pop 64 r5 pop 64 r4 return ; wait() -> wait for available execution cycles wait: push 64 r6 getctr a1 r0 push 64 r0 leti r6 0x88240 _wait_loop: setctr a1 r6 readze a1 8 r0 readze a1 8 r1 cmp r0 r1 jumpif neq _wait_end sleep 0 jump _wait_loop _wait_end: ; Increase the number of instructions executed so far add2i r1 1 add2i r6 8 setctr a1 r6 write a1 8 r1 pop 64 r0 setctr a1 r0 pop 64 r6 return
ekene-e/MinimISA
1,865
chip8/mem.s
; Memory management: basic memory access (CHIP-8 is basic in all regards). ; mem_opcode(r1 = PC) -> r0 = 16-bit opcode taken from PC mem_opcode: leti r0 0x80000 ; Start of memory shift left r1 3 add2 r0 r1 getctr a0 r1 setctr a0 r0 readze a0 16 r0 setctr a0 r1 return ; mem_dump(r1 = x) -> stores v0..vx at the location given by I mem_dump: push 64 r7 push 64 r4 add3i r4 r1 1 call cpu_getI add3i r1 r0 r4 shift left r0 3 add2i r0 0x80000 ; Memory base leti r3 0x88000 ; Register base getctr a0 r2 setctr a0 r0 getctr a1 r0 setctr a1 r3 _mem_dump_one: readze a1 8 r3 write a0 8 r3 sub2i r4 1 jumpif nz _mem_dump_one setctr a0 r2 setctr a1 r0 call cpu_setI pop 64 r4 pop 64 r7 return ; mem_load(r1 = x) -> load v0..vx from the location given by I mem_load: push 64 r7 push 64 r4 add3i r4 r1 1 call cpu_getI add3i r1 r0 r4 shift left r0 3 add2i r0 0x80000 ; Memory base leti r3 0x88000 ; Register base getctr a0 r2 setctr a0 r0 getctr a1 r0 setctr a1 r3 _mem_load_one: readze a0 8 r3 write a1 8 r3 sub2i r4 1 jumpif nz _mem_load_one setctr a0 r2 setctr a1 r0 call cpu_setI pop 64 r4 pop 64 r7 return ; mem_flags_dump(r1 = x) -> store v0..vx to flags register (HP48) mem_flags_dump: leti r0 0x88000 ; Register base getctr a0 r2 setctr a0 r0 leti r0 0x88200 ; HP48 flags getctr a1 r3 setctr a1 r0 add2i r1 1 _mem_flags_dump_one: readze a0 8 r0 write a1 8 r0 sub2i r1 1 jumpif nz _mem_flags_dump_one setctr a0 r2 setctr a0 r3 return ; mem_flags_load(r1 = x) -> load v0..vx from flags register (HP48) mem_flags_load: leti r0 0x88000 ; Register base getctr a0 r2 setctr a0 r0 leti r0 0x88200 ; HP48 flags getctr a1 r3 setctr a1 r0 add2i r1 1 _mem_flags_load_one: readze a1 8 r0 write a0 8 r0 sub2i r1 1 jumpif nz _mem_flags_load_one setctr a0 r2 setctr a0 r3 return
ekene-e/MinimISA
7,096
prog/lib_font.s
;--- ; Font library ; No dependencies. 16-bit colors. ;--- ; putc() ; Puts a character on the screen; (x, y) is the coordinate of the top- ; left corner. ; ; @args x, y, c ; @stack color(16) putc: ; Quickly change coordinate system (before function starts) leti r0 122 sub3 r2 r0 r2 ; Get a pointer to the glyph data like a boss. I know that I should ; save r1-r3, but I don't care that much (since it's font_lea) push 64 r7 call font_lea pop 64 r7 ; Add offset (35 * c) add2 r0 r3 add2 r0 r3 add2 r0 r3 shift left r3 5 add2 r0 r3 ; Get a pointer to the appropriate video RAM location leti r3 0x10000 ; Add 160 * y (* 16) shift left r2 9 add2 r3 r2 shift left r2 2 add2 r3 r2 ; Add x (* 16) shift left r1 4 add2 r3 r1 pop 16 r1 getctr a0 r2 setctr a0 r0 getctr a1 r0 setctr a1 r3 ; At this point: ; r0 = saved a1, r1 = color, r2 = saved a0 ; a0 = source glyph, a1 = destination VRAM ; And in the end of the routine: ; r3 = x-counter, r4 = glyph bits (sometimes a1), r5 = y-counter push 64 r4 push 64 r5 leti r5 7 _putc_line: leti r3 5 _putc_pixel: readze a0 1 r4 cmpi r4 0 jumpif z _putc_clear _putc_set: write a1 16 r1 jump _putc_pixel_end _putc_clear: getctr a1 r4 add2i r4 16 setctr a1 r4 _putc_pixel_end: sub2i r3 1 jumpif nz _putc_pixel leti r3 155 shift left r3 4 getctr a1 r4 add2i r4 r3 setctr a1 r4 sub2i r5 1 jumpif nz _putc_line ; Restore context and leave pop 64 r5 pop 64 r4 setctr a0 r2 setctr a1 r0 return ; puts() ; Writes a zero-terminated string on the screen by repeatedly calling ; putc() for each of the characters. ; ; @args x, y, str ; @stack color(16) puts: pop 16 r0 push 64 r7 push 64 r6 push 64 r5 push 64 r4 let r6 r0 getctr a0 r4 push 64 r4 setctr a0 r3 let r4 r1 let r5 r2 _puts_one: readze a0 8 r3 cmpi r3 0 jumpif z _puts_end let r1 r4 let r2 r5 add2i r4 6 push 16 r6 call putc jump _puts_one _puts_end: pop 64 r4 setctr a0 r4 pop 64 r4 pop 64 r5 pop 64 r6 pop 64 r7 return ; Here be dragons (fonts) ; Encoding: ; - Each character (5 * 7) takes 35 bits ; - Each glyph is stored from top to bottom, then from left to right ; - All ASCII characters are present (thus 128 * 35 = 4480), in order ; Algorithm to draw a character (c, x0, y0, color): ; y <- y0 ; Get a pointer to font + 35 * c ; Repeat 7 times ; x <- x0 ; Repeat 5 times ; Fetch a bit ; If it's 1, plot color at (x, y), otherwise do nothing ; x <- x + 1 ; y <- y + 1 font_lea: getctr pc r0 ; Add the size of this routine add2i r0 24 return font: .const 4480 #1111110001100011000110001100011111100000000100011001110001100001000000000000100001100011100110001000000000000000010111110010011111010000000000000100010101000100010101000100000000000010000000111110000000100000000000000100111100000111110001000000001100000001110001100011000110011110000000000011110100001110010000111100000000100000101111100010001000000000001000010010101001111110100000100000100010001000111110000011111000000100000100000101111100000111110000000000001000111010101001000010000000000000010000100101010111000100000000000000100010001111101000001000000001000001000111000001011111000101111001000101001110000010111110001011110100000100011101000111111100000111000010001000111010001111111000001110001000101001110100011111110000011100101000000011101000111111100000111000100010100000001100001000010001110010100000000000011000010000100011100010001010000000111010001100010111001010000000111010001100011000101110000000111010000100000111000100111000000000000011011001010010100100110100000001100100101110010010110110010000000111010001111111000110001011100000000100011101010110101011100010000000000001111101010010100101010011000000000000000000000000000000000000010000100001000010000100000000010001010010100000000000000000000000000010100101011111010101111101010010100010001111101000111000101111100010011000110010001000100010001001100011011001001010100010001010110010011010110000100010000000000000000000000000010001000100001000010000010000010010000010000010000100001000100010000000000100101010111001110101010010000000001000010011111001000010000000000000000000000000000011000010001000000000000000001111100000000000000000000000000000000000000000110001100000000000100010001000100010000000000111010001100111010111001100010111000100011000010000100001000010001110011101000100001000100010001000111111111100010001000001000001100010111000010001100101010010111110001000010111111000011110000010000110001011100011001000100001111010001100010111011111000010001000100001000010000100011101000110001011101000110001011100111010001100010111100001000100110000000011000110000000011000110000000000000110001100000000110000100010000001000100010001000001000001000001000000000001111100000111110000000000010000010000010000010001000100010000111010001000010001000100000000010001110100010000101101101011010101110011101000110001100011111110001100011111010001100011111010001100011111001110100011000010000100001000101110111101000110001100011000110001111101111110000100001111010000100001111111111100001000011110100001000010000011101000110000101111000110001011111000110001100011111110001100011000101110001000010000100001000010001110001110001000010000100001010010011001000110010101001100010100100101000110000100001000010000100001000011111100011101110101100011000110001100011000111001101011001110001100011000101110100011000110001100011000101110111101000110001100011111010000100000111010001100011000110001011100011111110100011000110001111101001010001011101000110000011100000110001011101111100100001000010000100001000010010001100011000110001100011000101110100011000110001100011000101010001001000110001101011010110101101010101010001100010101000100010101000110001100011000110001010100010000100001001111100001000100010001000100001111101110010000100001000010000100001110000001000001000001000001000001000000111000010000100001000010000100111000100010101000100000000000000000000000000000000000000000000000000111110110001000001000000000000000000000000000000000111000001011111000101111100001000011110100011000110001111100000000000011101000110000100010111000001000010111110001100011000101111000000000001110100011111110000011100001100100001000111000100001000010000000000000111110001011110000101110100001000010110110011000110001100010010000000011000010000100001000111000010000000011000010000101001001100010000100001001010100110001010010010110000100001000010000100001000111000000000001101010101101011010110101000000000010110110011000110001100010000000000011101000110001100010111000000000001111010001111101000010000000000000001111100010111100001000010000000000101101100110000100001000000000000000111110000011100000111110010001110001000010000100001001001100000000000100011000110001100110110100000000001000110001100010101000100000000000010001101011010110101010100000000000110010011000100011001001100000000001000101001001100010011000000000000011111000100010001000111110011000100001000100000100001000011000100001000010000100001000010000100011000010000100000100010000100011000000000000011011011000000000000000011111100011000110001100011000111111
ekene-e/MinimISA
1,316
prog/mull.s
;-----------------------------------------------------------------------------; ; Signed 64 * 64 -> 128 multiplication ; ;-----------------------------------------------------------------------------; ; Initialization (program input) leti r0 -0x374bc563deb482 leti r1 0x97b6af21f376 ; r0 * r1 = 0xffffffdf'3ad8dfa1'591295b1'6f3f6614 (negative) ; Main program ([r3r2] = r0 * [r6r1]) leti r2 0 leti r3 0 leti r6 0 ; Manage signs (r4 < 0 iff we need to swap signs after dividing) xor3 r4 r0 r1 ; We need to have 0 in a register to calculate rx = 0 - rx leti r5 0 cmpi r0 0 jumpif sgt r0_positive sub3 r0 r5 r0 r0_positive: cmpi r1 0 jumpif sgt r1_positive sub3 r1 r5 r1 r1_positive: ; Calculate the product! ; The size of this loop is 149 bits. loop: shift right r0 1 jumpif nc next ; Add with carry [r6r1] to [r3r2] add2 r3 r6 add2 r2 r1 jumpif nc next add2i r3 1 next: ; Shift [r6r1] left 1 place shift left r6 1 shift left r1 1 jumpif nc nc1 or2i r6 1 nc1: cmpi r0 0 jumpif nz loop ; Now swap signs if it was required shift left r4 1 jumpif nc halt ; Perform susbtraction with carry sub3 r2 r5 r2 jumpif nc nc2 add2i r3 1 nc2: sub3 r3 r5 r3 ; Halt program (the emulator will detect this and avoid looping forever) halt: jump -13
ekene-e/MinimISA
5,400
prog/lib_draw.s
;--- ; Drawing library ; All of the colors in this module are in 16-bit format. ;--- ; clear_screen() ; Clears the whole screen in an efficient way. ; ; @arg color clear_screen: ; Load VRAM pointer and counter leti r0 0x10000 leti r3 640 getctr a0 r2 setctr a0 r0 let r0 r1 shift left r0 16 or2 r1 r0 let r0 r1 shift left r0 32 or2 r1 r0 _loop: write a0 64 r1 write a0 64 r1 write a0 64 r1 write a0 64 r1 write a0 64 r1 write a0 64 r1 write a0 64 r1 write a0 64 r1 sub2i r3 1 jumpif nz _loop setctr a0 r2 return ; plot() ; Changes the color of a pixel. Returns a pointer to the pixel at ; location (x, y) (provided it exists). ; ; @args x, y, color ; @ret Pointer to the cell at (x, y) plot: ; Bit offset in VRAM area leti r0 127 sub3 r2 r0 r2 shift left r2 5 add2 r1 r2 shift left r2 2 add2 r1 r2 shift left r1 4 ; VRAM pointer leti r0 0x10000 add2 r1 r0 ; Load it to memory, and perform the plot getctr a0 r2 setctr a0 r1 write a0 16 r3 setctr a0 r2 return ; draw() ; Draws a line between (x1, y1) and (x2, y2) (both included). This ; functions tries to circumvent the limitations imposed by the stack and ; the calling conventions to efficiently manipulate a lot of variables. ; ; @args x1, y1, x2 ; @stack y2(16), color(16) draw: ; Tricky swapping: get y2 and color in a single pop, then push r4 ; through r7, eventually push back color at the top and keep only y2. pop 32 r0 push 64 r7 push 64 r6 push 64 r5 push 64 r4 push 16 r0 shift right r0 16 let r4 r0 ; Turn x2 into dx and y2 into dy. We don't want to change the ; coordinate system since plot does it for us sub2 r3 r1 sub2 r4 r2 ; Here is the state of things at this point: ; Registers (free) x y dx dy (free) (free) (free) ; Stack color(16) r4(64) r5(64) r6(64) r7(64) ; Plot the first pixel pop 16 r0 push 64 r3 push 64 r2 push 64 r1 push 16 r0 let r3 r0 call plot pop 16 r0 pop 64 r1 pop 64 r2 pop 64 r3 push 16 r0 ; Distinguish vertical lines (one pixel per y) from horizontal lines ; (one pixel per x). We need to compare abs(dx) and abs(dy), but ; calculating both would be too long. So I use the following trick: ; abs(dx) > abs(dy) <=> sgn(dx + dy) = sgn(dx - dy) ; which avoids branches. leti r0 0 add3 r5 r3 r4 sub3 r6 r3 r4 xor3 r5 r5 r6 shift left r5 1 jumpif c _draw_vert ; Here is the storage used in the rest of the function. For the ; vertical section, r6 is sx instead of sy (but it works the same way) ; Registers (calculations) x y dx dy cumul sy i ; Stack color(16) [things...] r4(64) r5(64) r6(64) r7(64) ; First situation: the line is horizontal _draw_horiz: ; Switch endpoints to ensure dx >= 0, saving a variable cmpi r3 -1 jumpif sgt _draw_horiz_init add2 r1 r3 add2 r2 r4 sub3 r3 r0 r3 sub3 r4 r0 r4 _draw_horiz_init: ; Calculate sy (in terms of +1 / -1, no 0 case is required) into r6 let r6 r4 asr3 r6 r6 63 add2 r6 r6 add2i r6 1 ; Take absolute value of dy cmpi r4 -1 jumpif sgt _draw_horiz_init2 sub3 r4 r0 r4 _draw_horiz_init2: let r5 r3 shift right r5 1 let r7 r3 _draw_horiz_loop: add2i r1 1 add2 r5 r4 cmp r3 r5 jumpif ge _draw_horiz_plot sub2 r5 r3 add2 r2 r6 _draw_horiz_plot: ; Save pretty much everything, perform the plot, and get the values ; back. Having r0 as a buffer is very useful at this point pop 16 r0 push 64 r7 push 64 r3 push 64 r2 push 64 r1 push 16 r0 let r3 r0 call plot pop 16 r0 pop 64 r1 pop 64 r2 pop 64 r3 pop 64 r7 push 16 r0 sub2i r7 1 jumpif nz _draw_horiz_loop jump _draw_end ; Second situation: the line is vertical _draw_vert: ; Make sure dy >= 0 cmpi r4 -1 jumpif sgt _draw_vert_init add2 r1 r3 add2 r2 r4 sub3 r3 r0 r3 sub3 r4 r0 r4 _draw_vert_init: ; This time, we need sx rather than sy let r6 r3 asr3 r6 r6 63 add2 r6 r6 add2i r6 1 ; Take absolute value of dx cmpi r3 -1 jumpif sgt _draw_vert_init2 sub3 r3 r0 r3 _draw_vert_init2: let r5 r4 shift right r5 1 let r7 r4 _draw_vert_loop: add2i r2 1 add2 r5 r3 cmp r4 r5 jumpif ge _draw_vert_plot sub2 r5 r4 add2 r1 r6 _draw_vert_plot: ; Same thing as before, there's a lot of things to save and restore pop 16 r0 push 64 r7 push 64 r3 push 64 r2 push 64 r1 push 16 r0 let r3 r0 call plot pop 16 r0 pop 64 r1 pop 64 r2 pop 64 r3 pop 64 r7 push 16 r0 sub2i r7 1 jumpif nz _draw_vert_loop ; End of function: restore and leave _draw_end: pop 16 r0 pop 64 r4 pop 64 r5 pop 64 r6 pop 64 r7 return ; fill() ; Fills a rectangle defined by its top-left corner (x1, y1) and its ; bottom-right corner (x2, y2). Both endpoints are included. ; ; @args x1, y1, x2 ; @stack y2(16), color(16) fill: ; Pop parameters and push callee-saved registers pop 32 r0 push 64 r5 push 64 r4 let r4 r0 shift right r4 16 and3i r5 r0 0xffff sub2 r3 r1 sub2 r4 r2 ; Change coordinate system xor3i r2 r2 127 sub2 r2 r4 let r0 r1 sub2i r2 1 shift left r2 5 add2 r0 r2 shift left r2 2 add2 r0 r2 shift left r0 4 add2i r0 0x10000 getctr a0 r1 setctr a0 r0 ; State of CPU for this function: ; Registers vram saved_a0 x_ctr w y_ctr color (r6) (r7) ; Stack r5(65) r4(65) _fill_row: add2i r0 2560 setctr a0 r0 let r2 r3 _fill_pixel: write a0 16 r5 sub2i r2 1 jumpif nz _fill_pixel sub2i r4 1 jumpif nz _fill_row _fill_end: setctr a0 r1 pop 64 r4 pop 64 r5 return
ekene-e/MinimISA
6,174
prog/test_exhaust.s
; To test the instructions, execute the program step-by-step and check the ; values of the simulator's registers, PC and flags when suggested. ; Untested instructions: readze, readse, write, push (memory-related) ; Note: small values are represented as decimal; 64-bit values are hexadecimal ; let and leti leti r0 0 leti r1 1 leti r2 -1 ; CHECK that 1 is stored on 8 bits (10_00000001) and -1 is stored on 2 ; bits (0_1) in assembled code. (The ISA says signed 1-bit.) ; If they're not, a correct simulator will set r1 = r2 = -1. let r3 r2 leti r4 0x4f3c leti r5 -0x76528 leti r6 0x73926fc86c76b765 let r7 r4 ; CHECK that all values correspond (if your registers are smaller than ; 64 bits, drop the higher bits) ; add2, add2i, sub2, sub2i add2 r4 r2 add2i r4 15 add2i r4 0x87937cb4 ; CHECK r4 = 00000000'8793cbfe add2 r6 r6 ; CHECK zero clear, negative set, carry clear, overflow set add2 r6 r6 ; CHECK zero clear, negative set, carry set, overflow clear sub2 r3 r3 ; CHECK zero set, negative clear, carry *clear*, overflow clear ; If you implemented a carry instead of a borrow, you will know it here leti r5 0x100 sub2i r5 0x7f3c ; CHECK r5 = ffffffff'ffff81c4, zero clear, negative set, carry set, ; overflow clear ; cmp, cmpi cmpi r5 0 ; CHECK zero clear, negative set, carry clear, overflow clear ; Note: this one is important, make sure it's carry *clear* (you should ; not catch the carry coming out of calculating -0) leti r2 15 leti r3 40 cmp r2 r3 ; CHECK zero clear, negative set, carry set, overflow clear leti r2 -15 cmp r2 r3 ; CHECK zero clear, negative set, carry clear, overflow clear cmpi r3 40 ; CHECK zero set, negative clear, carry clear, overflow clear leti r3 0x7fffffffffffffff cmp r2 r3 ; CHECK zero clear, negative clear, carry clear, overflow set ; shift leti r0 3 shift left r0 61 ; CHECK r0 = 60000000'00000000, zero clear, negative clear, carry clear let r1 r0 shift left r0 2 ; CHECK r0 = 80000000'00000000, zero clear, negative set, carry set shift left r0 1 ; CHECK r0 = 0, zero set, negative clear, carry set shift left r0 1 ; CHECK r0 = 0, zero set, negative clear, carry clear shift right r1 60 ; CHECK r1 = 6, zero clear, negative clear, carry clear shift right r1 2 ; CHECK r1 = 1, zero clear, negative clear, carry set shift right r1 1 ; CHECK r1 = 0, zero set, negative clear, carry set shift right r1 1 ; CHECK r1 = 0, zero set, negative clear, carry clear ; readze, readse ; Not tested (a memory view would be useful for these) ; jump, jumpif jump 0 ; CHECK that it does nothing (PC after this instruction should be PC ; before this instruction + 13) leti r0 1 jump 10 add2 r0 r0 ; CHECK r0 = 1 leti r4 -80 leti r5 -78 cmp r4 r5 ; CHECK zero clear, negative set, carry set, overflow clear leti r0 1 leti r1 1 leti r2 1 leti r3 1 leti r4 1 leti r5 1 leti r6 1 leti r7 1 jumpif eq 9 leti r0 0 jumpif neq 9 leti r1 0 jumpif sgt 9 leti r2 0 jumpif slt 9 leti r3 0 jumpif gt 9 leti r4 0 jumpif ge 9 leti r5 0 jumpif lt 9 leti r6 0 jumpif v 9 leti r7 0 ; CHECK that r0..r7 = 0, 1, 0, 1, 0, 0, 1, 0 ; or2, or2i, and2, and2i leti r0 6 leti r1 12 or2 r1 r0 ; CHECK r1 = 14, zero clear, negative clear, carry clear or2i r0 0xff ; CHECK r0 = 0xff, zero clear, negative clear, carry clear leti r4 -1 or2 r0 r4 ; CHECK r0 = -1, zero clear, negative set, carry clear leti r0 6 and2 r1 r0 ; CHECK r1 = 6, zero clear, negative clear, carry clear and2i r0 0xfa ; CHECK r0 = 2, zero clear, negative clear, carry clear and2i r1 128 ; CHECK r1 = 0, zero set, negative clear, carry clear ; call, return ; This section assumes that your program starts at address 0. Because I used no ; labels, your compiler must choose the minimal argument size for all ; constants, otherwise the call addresses will get messed up. ; You will need more "interactive" execution for the following tests. Pay ; extra attention to the values of PC and r7 at each step. main: ; If you can get the address of the add2i below, write it down leti r0 0 call 0x545 add2i r0 100 ; When you reach this point, check that r0 = 112 jump 0x6c func1: ; Sub-function 1, calls sub-function 2 twice then returns ; CHECK r7 = address of the add2i above let r5 r7 add2i r0 10 call 0x5a1 call 0x5a1 let r7 r5 return func2: ; Sub-function 2 add2i r0 1 return ; setctr, getctr getctr a0 r7 add2i r7 4 setctr a0 r7 ; CHECK a0 = <your initial value> + 4 leti r0 24 setctr a1 r0 ; CHECK a1 = 24 getctr sp r4 sub2i r4 8 setctr sp r4 ; CHECK sp = <your initial value> - 8 ; push, pop ; Not tested (a memory view would be useful for these) ; add3, add3i, sub3, sub3i leti r0 10 leti r1 -15 add3 r2 r1 r0 ; CHECK r2 = ffffffff'fffffffb, zero clear, negative set, carry clear, ; overflow clear add3i r2 r0 5 ; CHECK r2 = 15, zero clear, negative clear, carry clear, overflow ; clear leti r2 -0x7fffffffffffffff leti r3 2 sub3 r3 r2 r3 ; CHECK r3 = -r2, zero clear, negative clear, carry clear, overflow set sub3i r0 r1 20 ; CHECK r0 = ffffffff'ffffffdd, zero clear, negative set, carry clear, ; overflow clear ; or3, or3i, and3, and3i leti r4 15 leti r5 70 or3 r4 r4 r5 ; CHECK r4 = 79, zero clear, negative clear, carry clear or3i r4 r4 0xff ; CHECK r4 = 0xff, zero clear, negative clear, carry clear leti r7 -1 or3 r4 r4 r7 ; CHECK r4 = -1, zero clear, negative set, carry clear leti r6 12 and3 r5 r4 r6 ; CHECK r5 = 12, zero clear, negative clear, carry clear and3i r5 r6 2 ; CHECK r5 = 0, zero set, negative clear, carry clear ; xor3, xor3i leti r7 12 leti r5 28 xor3 r6 r5 r7 ; CHECK r6 = 16, zero clear, negative clear, carry clear xor3i r6 r6 0x10 ; CHECK r6 = 0, zero set, negative clear, carry clear ; asr3 leti r5 15 asr3 r7 r5 2 ; CHECK r7 = 3, zero clear, negative clear, carry set asr3 r7 r7 2 ; CHECK r7 = 0, zero set, negative clear, carry set leti r5 -15 asr3 r7 r5 1 ; CHECK r7 = -8, zero clear, negative set, carry set asr3 r7 r7 22 ; CHECK r7 = -1, zero clear, negative set, carry set ; You may need this to stop the program jump -13
ekene-e/MinimISA
1,843
prog/test_all.s
let r4 2 add r1 r2 add r1 r2 push 1 r4 sub r3 5 getctr sp r0 cmp r3 r5 shift left r5 3 push 0x40 r0 pop 0x20 r0 and r2 3 ; J'aime les nouilles add r2 r3 5 add r0 r0 add r0 2000 add r0 r0 r0 add r0 r0 2000 sub r0 r0 sub r0 2000 sub r0 r0 r0 sub r0 r0 2000 cmp r0 r0 cmp r0 10000 cmp r0 -1123000 let r0 r0 let r0 10000 let r0 -1123000 shift left r0 1 shift left r0 32 shift right r0 0 shift right r0 23 readze pc 64 r0 readse a0 32 r0 setctr a1 r0 getctr pc r0 jump 34821318 jump -34821318 jump z 34821318 jump eq -34821318 jump nz 34821318 jump neq -34821318 jump slt 34821318 jump sgt -34821318 jump gt 34821318 jump ge -34821318 jump nc 34821318 jump lt -34821318 jump c -34821318 jump v -34821318 or r0 r0 or r0 10000 or r0 1023011123000 or r0 r0 r0 or r0 r0 10000 or r0 r0 1023011123000 and r0 r0 and r0 10000 and r0 1023011123000 and r0 r0 r0 and r0 r0 10000 and r0 r0 1023011123000 write sp 8 r0 call 58129 push 4 r0 pop 16 r0 return ; j'aime les poneys xor r0 r0 r0 xor r0 r0 10000 xor r0 r0 1023011123000 test: call test
ekene-e/MinimISA
1,950
prog/div.s
;-----------------------------------------------------------------------------; ; Signed 64 / 64 -> 64 division ; ;-----------------------------------------------------------------------------; ; Initialization (program input) leti r0 -0x538ba20c467c034b leti r1 0x6527dbd63 ; Negative 64 / Positive 64 division, r0 / r1 = 0xffffffff'f2c9133c ; Main program (r2 = r0 / r1) leti r2 0 leti r3 1 ; Manage signs (r4 < 0 iff we need to swap signs after dividing) xor3 r4 r0 r1 ; We need to have 0 in a register to calculate rx = 0 - rx leti r5 0 cmpi r0 0 jumpif sgt r0_positive sub3 r0 r5 r0 r0_positive: cmpi r1 0 jumpif sgt r1_positive sub3 r1 r5 r1 r1_positive: ; First, we need to shift r1 left so that its most significant bit reaches ; position 63. This naive method is costly, however we cannot just compute ; lg(r1) and shift left by 64 - lg(r1) because we can only perform constant ; shifts. A logarithmic method is presented below. shft: cmpi r1 0 jumpif slt nonzero shift left r3 1 shift left r1 1 jump shft ; Alternative method in C syntax (logarithmic complexity): ; x = r1; ; if(r1 & 0xffffffff == r1) x <<= 32; ; else r1 >>= 32; ; if(r1 & 0xffff == r1) x <<= 16; ; else r1 >>= 16; ; if(r1 & 0xff == r1) x <<= 8; ; else r1 >>= 8; ; ... ; if(r1 & 0x1 == r1) x <<= 1; ; r1 = x; ; Then we remove the divisor from the dividend when possible, and we shift ; right before trying again. This loop runs exactly 63 - lg(r1) times. ; The size of this loop is 80 bits. nonzero: ; If r1 >= r0, increase result and decrease r1 cmp r0 r1 jumpif lt next add2 r2 r3 sub2 r0 r1 next: ; In all cases, try with a smaller divisor in the next iteration shift right r1 1 shift right r3 1 jumpif nz nonzero ; Now swap signs if it was required shift left r4 1 jumpif nc halt sub3 r2 r5 r2 ; Halt program (the emulator will detect this and avoid looping forever) halt: jump -13
ekene-e/MinimISA
1,081
prog/muls.s
;-----------------------------------------------------------------------------; ; Signed 64 * 64 -> 64 multiplication ; ;-----------------------------------------------------------------------------; ; Initializing operands. leti r0 -0x738b6c leti r1 0xc4b3213e ; r0 * r1 = 0xffa73867'd2874fd8 ; Getting the sign of the result. xor3 r4 r0 r1 ; Used to calculate sub r1 0 r1 (ie. taking the opposite). leti r3 0 ; r0 = |r0| cmpi r0 0 jumpif sgt r0_is_positive sub3 r0 r3 r0 r0_is_positive: ; r1 = |r1| cmpi r1 0 jumpif sgt r1_is_positive sub3 r1 r3 r1 r1_is_positive: ; Calculating product of two positive integers. nonzero: shift right r0 1 jump nc next add2 r2 r1 next: shift left r1 1 cmp r0 0 jump nz nonzero ; Adjusting the sign of the result. shift left r4 1 jumpif nc r2_is_nonnegative sub3 r2 r3 r2 r2_is_nonnegative: ; End of the program. halt: jump halt
ekene-e/MinimISA
3,156
prog/drawing.s
;-----------------------------------------------------------------------------; ; Graphical rendering using memory-mapped video RAM ; ;-----------------------------------------------------------------------------; ; Specify an "entry point" jump main ; This file exposes the clear_screen, plot, fill and draw functions .include lib_draw.s ; This file exposes the putc and puts function .include lib_font.s main: ; Clear the screen leti r1 0x2105 call clear_screen ; Plot a line of pixels leti r4 1 _plot_one: leti r1 r4 leti r2 1 leti r3 0xdefa call plot add2i r4 2 cmpi r4 159 jumpif neq _plot_one ; Write "SM & AD", character by character, then "Hello, World!" using a string let r5 r3 ; 'S' push 16 r5 leti r1 4 leti r2 7 leti r3 0x53 call putc ; 'M' push 16 r5 leti r1 10 leti r2 7 leti r3 0x4d call putc ; '&' push 16 r5 leti r1 22 leti r2 7 leti r3 0x26 call putc ; 'A' push 16 r5 leti r1 34 leti r2 7 leti r3 0x41 call putc ; 'D' push 16 r5 leti r1 40 leti r2 7 leti r3 0x44 call putc ; _str_lea retrieves the address of the string for us (see below) call _str_lea leti r3 0xdefa push 16 r3 leti r1 4 leti r2 18 let r3 r0 call puts ; Display a filled rectangle leti r1 10 leti r2 98 leti r3 42 leti r0 0xdefa push 16 r0 leti r0 118 push 16 r0 call fill ; Draw some lines in 8 directions ; NEE leti r1 40 leti r2 60 leti r3 60 leti r0 0xdefa push 16 r0 leti r0 70 push 16 r0 call draw ; NNE leti r1 40 leti r2 60 leti r3 50 leti r0 0xdefa push 16 r0 leti r0 80 push 16 r0 call draw ; NNW leti r1 40 leti r2 60 leti r3 30 leti r0 0xdefa push 16 r0 leti r0 80 push 16 r0 call draw ; NWW leti r1 40 leti r2 60 leti r3 20 leti r0 0xdefa push 16 r0 leti r0 70 push 16 r0 call draw ; SWW leti r1 40 leti r2 60 leti r3 20 leti r0 0xdefa push 16 r0 leti r0 50 push 16 r0 call draw ; SSW leti r1 40 leti r2 60 leti r3 30 leti r0 0xdefa push 16 r0 leti r0 40 push 16 r0 call draw ; SSE leti r1 40 leti r2 60 leti r3 50 leti r0 0xdefa push 16 r0 leti r0 40 push 16 r0 call draw ; SEE leti r1 40 leti r2 60 leti r3 60 leti r0 0xdefa push 16 r0 leti r0 50 push 16 r0 call draw ; Display a pentagon ; These are the coordinates of the vertices. Each byte is a coordinate; ; x values are in r4, y values are in r5. leti r4 0x6d8a7f5c516d leti r5 0x6e593737596e leti r6 5 _line_one: ; Get the coordinate of the current point and3i r1 r4 0xff and3i r2 r5 0xff shift right r4 8 shift right r5 8 ; Draw a line from the current point to the next one and3i r3 r4 0xff leti r0 0xdefa push 16 r0 and3i r0 r5 0xff push 16 r0 call draw sub2i r6 1 jumpif nz _line_one ; Stop the program (the emulator will detect this and avoid looping forever) jump -13 ; This is the string "Hello, World!", along with a "load" routine to retrieve ; its address _str_lea: getctr pc r0 add2i r0 24 return _str: ; '#' means binary (the ISA does not specify) .const 112 #0100100001100101011011000110110001101111001011000010000001010111011011110111001001101100011001000010000100000000
ekureina/oxiv6
4,352
oxiv6-kernel/src/trampoline.S
# Copyright 2024 Claire Moore # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # low-level code to handle traps from user space into # the kernel, and returns from kernel to user. # # the kernel maps the page holding this code # at the same virtual address (TRAMPOLINE) # in user and kernel space so that it continues # to work when it switches page tables. # kernel.ld causes this code to start at # a page boundary. # #include "riscv.h" #include "memlayout.h" .section trampsec .globl trampoline trampoline: .align 4 .globl uservec uservec: # # trap.c sets stvec to point here, so # traps from user space start here, # in supervisor mode, but with a # user page table. # # save user a0 in sscratch so # a0 can be used to get at TRAPFRAME. csrw sscratch, a0 # each process has a separate p->trapframe memory area, # but it's mapped to the same virtual address # (TRAPFRAME) in every process's user page table. li a0, {TRAPFRAME} # save the user registers in TRAPFRAME sd ra, 40(a0) sd sp, 48(a0) sd gp, 56(a0) sd tp, 64(a0) sd t0, 72(a0) sd t1, 80(a0) sd t2, 88(a0) sd s0, 96(a0) sd s1, 104(a0) sd a1, 120(a0) sd a2, 128(a0) sd a3, 136(a0) sd a4, 144(a0) sd a5, 152(a0) sd a6, 160(a0) sd a7, 168(a0) sd s2, 176(a0) sd s3, 184(a0) sd s4, 192(a0) sd s5, 200(a0) sd s6, 208(a0) sd s7, 216(a0) sd s8, 224(a0) sd s9, 232(a0) sd s10, 240(a0) sd s11, 248(a0) sd t3, 256(a0) sd t4, 264(a0) sd t5, 272(a0) sd t6, 280(a0) # save the user a0 in p->trapframe->a0 csrr t0, sscratch sd t0, 112(a0) # initialize kernel stack pointer, from p->trapframe->kernel_sp ld sp, 8(a0) # make tp hold the current hartid, from p->trapframe->kernel_hartid ld tp, 32(a0) # load the address of usertrap(), from p->trapframe->kernel_trap ld t0, 16(a0) # fetch the kernel page table address, from p->trapframe->kernel_satp. ld t1, 0(a0) # wait for any previous memory operations to complete, so that # they use the user page table. sfence.vma zero, zero # install the kernel page table. csrw satp, t1 # flush now-stale user entries from the TLB. sfence.vma zero, zero # jump to usertrap(), which does not return jr t0 .globl userret userret: # userret(pagetable) # called by usertrapret() in trap.c to # switch from kernel to user. # a0: user page table, for satp. # switch to the user page table. sfence.vma zero, zero csrw satp, a0 sfence.vma zero, zero li a0, {TRAPFRAME} # restore all but a0 from TRAPFRAME ld ra, 40(a0) ld sp, 48(a0) ld gp, 56(a0) ld tp, 64(a0) ld t0, 72(a0) ld t1, 80(a0) ld t2, 88(a0) ld s0, 96(a0) ld s1, 104(a0) ld a1, 120(a0) ld a2, 128(a0) ld a3, 136(a0) ld a4, 144(a0) ld a5, 152(a0) ld a6, 160(a0) ld a7, 168(a0) ld s2, 176(a0) ld s3, 184(a0) ld s4, 192(a0) ld s5, 200(a0) ld s6, 208(a0) ld s7, 216(a0) ld s8, 224(a0) ld s9, 232(a0) ld s10, 240(a0) ld s11, 248(a0) ld t3, 256(a0) ld t4, 264(a0) ld t5, 272(a0) ld t6, 280(a0) # restore user a0 ld a0, 112(a0) # return to user mode and user pc. # usertrapret() set up sstatus and sepc. sret
EleisonC/Green-Thread-Library
1,112
src/krono_context/asm/aarch64.s
.global context_switch context_switch: ;Save callee-saved registers (x19 to x29) ;from the 'from' kronocontext (pointed to by x0) str x19, [x0, #0] str x20, [x0, #8] str x21, [x0, #16] str x22, [x0, #24] str x23, [x0, #32] str x24, [x0, #40] str x25, [x0, #48] str x26, [x0, #56] str x27, [x0, #64] str x28, [x0, #72] str x29, [x0, #80] ;save the current stack pointer and link register mov x9, sp ;Move current SP(stack_pointer) to temporary register x9 ;here is where the extra memory we stored comes in handy str x9, [x0, #88] ;Store SP at offset 88 in the context struct ;return address str lr, [x0, #96] ;Store the link register at offset 96 ;Load callee-saved registers from the 'to' kronocontext ;pointed to by x1 ldr x19, [x1, #0] ldr x20, [x1, #8] ldr x21, [x1, #16] ldr x22, [x1, #24] ldr x23, [x1, #32] ldr x24, [x1, #40] ldr x25, [x1, #48] ldr x26, [x1, #56] ldr x27, [x1, #64] ldr x28, [x1, #72] ldr x29, [x1, #80] ;load stack pointer and link register from the 'to' context ldr x9, [x1, #88] move sp, x9 ldr lr, [x1, #96] ;Return to the new context ret
ElliotLockerman/pdp-11
3,058
examples/byte_queue.s
; Queue ; 0 buf: &u8 Underlying buffer. ; 2 head: u16 Index in to buf. ; 4 tail: u16 Index in to buf. ; 6 cap: u16 Length of buf in bytes. ; 10 len: u16 Number of elements in queue. QUEUE_BUF = 0 QUEUE_HEAD = 2 QUEUE_TAIL = 4 QUEUE_CAP = 6 QUEUE_LEN = 10 STATUS_Z_SHIFT = 177776 ; -1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn byte_queue_push(r0 queue: &Queue, r1 val: u8) -> r0 success: bool ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; byte_queue_push: mov r2, -(sp) ; If full, return false. cmp QUEUE_CAP(r0), QUEUE_LEN(r0) beq 3f ; Move r1 to buf[tail], increment len. mov QUEUE_BUF(r0), r2 add QUEUE_TAIL(r0), r2 movb r1, (r2) inc QUEUE_LEN(r0) ; Increment tail and wrap if needed. inc QUEUE_TAIL(r0) cmp QUEUE_CAP(r0), QUEUE_TAIL(r0) bne 1f clr QUEUE_TAIL(r0) ; Wrap tail ; Success, set return value to 1. 1: mov #1, r0 ; Return. 2: mov (sp)+, r2 rts pc ; Full, set return value to 0. 3: mov #0, r0 br 2b ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn byte_queue_pop(r0 queue: &Queue) -> (r0 success: bool, r1 val: u8) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; byte_queue_pop: ; If empty, return false tst QUEUE_LEN(r0) beq 3f ; Move buf[head] to r1, decrement len mov QUEUE_BUF(r0), r1 add QUEUE_HEAD(r0), r1 movb (r1), r1 dec QUEUE_LEN(r0) ; Increment head and wrap if needed inc QUEUE_HEAD(r0) cmp QUEUE_CAP(r0), QUEUE_HEAD(r0) bne 1f clr QUEUE_HEAD(r0) ; Wrap head ; Success, set return value to 1. 1: mov #1, r0 ; Return. 2: rts pc ; Empty, set return value to 0. 3: mov #0, r0 br 2b ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn byte_queue_len(r0 queue: &Queue) -> r0 len: u16 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; byte_queue_len: mov QUEUE_LEN(r0), r0 rts pc ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn byte_queue_full(r0 queue: &Queue) -> r0 full: bool ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; byte_queue_full: cmp QUEUE_CAP(r0), QUEUE_LEN(r0) mov @#STATUS, r0 ash #STATUS_Z_SHIFT, r0 bic #177776, r0 rts pc ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn byte_queue_empty(r0 queue: &Queue) -> r0 empty: bool ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; byte_queue_empty: tst QUEUE_LEN(r0) mov @#STATUS, r0 ash #STATUS_Z_SHIFT, r0 bic #177776, r0 rts pc
ElliotLockerman/pdp-11
1,543
examples/timer_ticks.s
; timer_ticks.s ; Prints digits 0 - 9 (one every 2^8 ticks), followed by a newline, then halts. STACK_TOP = 150000 LKS = 177546 LKS_INT_ENB = 100 TPS = 177564 TPB = TPS + 2 TPS_READY_MASK = 177 . = 100 .word clock, 300 . = 400 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn _start() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; _start: mov #STACK_TOP, sp mov #LKS_INT_ENB, @#LKS ; Enable clock interrupts. ; Just spin; the rest of the program happens in clock() in response to interrupts. loop: wait br loop ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn clock() ; Handles clock interrupt. Every 2^8 ticks, prints count and increments it. ; After 9, prints \n and halts. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; clock: mov r0, -(sp) mov r1, -(sp) mov r2, -(sp) mov r3, -(sp) mov r4, -(sp) mov r5, -(sp) mov LKS, r0 ; clear clock bit ; Increment counter and print it. inc count mov count, r0 add #'0, r0 jsr pc, putc ; If we haven't reached 9 yet, just return. cmp #9., count bgt done ; If we have just printed 9, print \n and halt. mov #12, r0 ; '\n' jsr pc, putc halt done: mov (sp)+, r5 mov (sp)+, r4 mov (sp)+, r3 mov (sp)+, r2 mov (sp)+, r1 mov (sp)+, r0 rti ; Counter to print. count: .word 0
ElliotLockerman/pdp-11
1,033
examples/echo_spin.s
STACK_TOP = 150000 . = 400 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn _start() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; _start: mov #STACK_TOP, sp mov #buf, r1 ; Main loop. 1: jsr pc, getc ; If we're done with the line, print it. cmpb #'\n, r0 beq 2f ; If we hit the line length limit, drop characters other than \n and loop. cmpb r1, #buf_end beq 1b ; If we have room, save character in buffer, echo it and loop. movb r0, (r1)+ jsr pc, putc br 1b ; Print the buffer, then loop. 2: ; Echo the newline terminating the original line. movb #'\n, r0 jsr pc, putc ; Save the terminating newline to the buffer and print the line. movb #'\n, (r1)+ mov #buf, r0 jsr pc, putline mov #buf, r1 br 1b BUFLEN = 72. buf: . = . + BUFLEN buf_end: .word 0 ; extra space for a newline
ElliotLockerman/pdp-11
1,274
examples/teletype_spin.s
TPS = 177564 TPB = TPS + 2 TPS_READY_CMASK = 177 TKS = 177560 TKB = TKS + 2 TKS_DONE_CMASK = 177577 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn putc(r0 char: u8) ; Blocks until ready to print, then prints r0. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; putc: ; Loop until the teletype is ready to accept another character. bicb #TPS_READY_CMASK, @#TPS beq putc movb r0, @#TPB rts pc ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn putline(r0 start: char*, r1 end: char*) ; print (r0) through (r1) (exclusive). ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; putline: mov r2, -(sp) mov r1, r2 mov r0, r1 1: cmp r1, r2 beq 2f movb (r1)+, r0 jsr pc, putc br 1b 2: mov (sp)+, r2 rts pc ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn read() -> r0 char: u8 ; Blocks until char available; returns read char in r0. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; getc: bic #TKS_DONE_CMASK, @#TKS beq getc movb @#TKB, r0 rts pc
ElliotLockerman/pdp-11
2,199
examples/fib.s
.even _start: mov #150000, sp mov #0, r1 1: ; Call fib. mov r1, r0 jsr pc, fib ; Print the result. jsr pc, printu ; Print a newline. mov #'\n, r0 jsr pc, putc ; Increment the induction variable and loop until it hits 10. inc r1 cmp #10., r1 bne 1b halt ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fib(r0 num: u16) -> u16 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; fib: ; Base case 1: fib(0) = 0. cmp #0, r0 beq 1f ; Base case 2: fib(1) = 1. cmp #1, r0 beq 1f ; Recursive case. ; Save variables we're using. mov r1, -(sp) mov r2, -(sp) ; fib(num - 1). dec r0 mov r0, r1 jsr pc, fib ; fib(num - 2). mov r0, r2 ; Save fib(num - 1) in r2. mov r1, r0 dec r0 jsr pc, fib add r2, r0 mov (sp)+, r2 mov (sp)+, r1 1: rts pc ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn printu(r0 num: u16) ; Prints unsigned num in decimal. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; printu: mov r1, -(sp) mov r2, -(sp) ; Lower half of 32-bit dividend in r0, the argument. clr r1 ; Upper half of 32-bit dividend, which we're not using. mov sp, r2 ; Save top of stack (one past last digit). 1: clr r1 ; Upper half of 32-bit dividend, which we're not using. div #10., r0 ; Quotient in r0, remainder in r1 mov r1, -(sp) ; Save remainder. cmp #0, r0 ; If quotient isn't 0, loop. bne 1b ; Now we have all the decimal digits on the stack in the range [sp, r2), and there must be at ; least one digit. 2: mov (sp)+, r0 ; Pop a char. add #48., r0 ; Convert to ascii. jsr pc, putc ; Print it. cmp sp, r2 ; Not at the end? bne 2b ; continue mov (sp)+, r2 mov (sp)+, r1 rts pc
ElliotLockerman/pdp-11
5,456
examples/echo_interrupt.s
STACK_TOP = 150000 TPS = 177564 TPB = TPS + 2 TPS_READY_CMASK = 177 TPS_INT_ENB = 100 TKS = 177560 TKB = TKS + 2 TKS_DONE_CMASK = 177577 TKS_INT_ENB = 100 LINE_LEN = 72. KEYBOARD_BUF_LEN = LINE_LEN + 1 LINE_BUF_LEN = LINE_LEN + 1 ; line + \n PRINT_BUF_LEN = LINE_LEN + 1 STATUS = 177776 PRIO7 = 340 . = 60 .word keyboard, PRIO7 .word printer, PRIO7 . = 400 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn _start() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; _start: mov #STACK_TOP, sp bis #TKS_INT_ENB, @#TKS bis #TPS_INT_ENB, @#TPS 1: wait 2: ; Pop our new char from the keyboard queue (with interrupts disabled for synchronization). bis #PRIO7, @#STATUS mov #keyboard_queue, r0 jsr pc, byte_queue_pop bic #PRIO7, @#STATUS ; If the queue was empty, wait for an interrupt. tst r0 beq 1b ; Check if the character was \n; it gets special handling. cmpb #'\n, r1 beq 3f ; If we got a character, check if we have room for it. If we already have a ; line-lengths worth in the queue, drop the character. ; We only echo ; characters we have room for, so the user will see that the character was ; dropped. mov #line_queue, r0 jsr pc, byte_queue_len cmp r0, #LINE_LEN bge 1b ; Echo the character and push for later. mov r1, r0 jsr pc, printer_push mov #line_queue, r0 jsr pc, byte_queue_push ; We checked the length, so this can't fail. br 2b 3: ; If the character was \n, we have room reserved, echo it, push it and print the line. movb r1, r0 jsr pc, printer_push mov #line_queue, r0 jsr pc, byte_queue_push mov #line_queue, r0 jsr pc, printer_push_queue br 2b line_queue: .word line_buf ; buf .word 0 ; head .word 0 ; tail .word LINE_BUF_LEN ; cap .word 0 ; len line_buf: . = . + LINE_BUF_LEN .even ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn print_push_queue() ; Pop all elements from line queue and push on print queue. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; printer_push_queue: mov r1, -(sp) 1: mov #line_queue, r0 jsr pc, byte_queue_pop tst r0 beq 2f movb r1, r0 jsr pc, printer_push br 1b 2: mov (sp)+, r1 rts pc ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn printer_push(r0 char: u8) ; Enqueues character to be printed, waiting if full. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; printer_push: mov r1, -(sp) movb r0, r1 1: mov #print_queue, r0 bis #PRIO7, @#STATUS jsr pc, byte_queue_push bic #PRIO7, @#STATUS tst r0 bne 2f ; Its full. Wait for an interrupt and try again. wait br 1b 2: ; The printer interrupt will disable interrupts once the queue is empty. ; enable interrupts in case that has occured to start up printing again. bis #TPS_INT_ENB, @#TPS mov (sp)+, r1 rts pc ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn printer() ; Received printer interrupt, printing a character from print_queue (if present). ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; printer: mov r0, -(sp) mov r1, -(sp) mov r2, -(sp) mov r3, -(sp) mov r4, -(sp) mov r5, -(sp) ; Pop a character to print. mov #print_queue, r0 jsr pc, byte_queue_pop tst r0 bne 1f ; print queue was empty. Disabled interrupts so when printer_push is called, ; it can reenabled interrupts and get this called again. bic #TPS_INT_ENB, @#TPS br 2f 1: ; We go a character. movb r1, @#TPB 2: mov (sp)+, r5 mov (sp)+, r4 mov (sp)+, r3 mov (sp)+, r2 mov (sp)+, r1 mov (sp)+, r0 rti print_queue: .word print_buf ; buf .word 0 ; head .word 0 ; tail .word PRINT_BUF_LEN ; cap .word 0 ; len print_buf: . = . + PRINT_BUF_LEN .even ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn keyboard() ; Received keyboard interrupt, push new character on to keyboard_queue. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; keyboard: mov r0, -(sp) mov r1, -(sp) mov r2, -(sp) mov r3, -(sp) mov r4, -(sp) mov r5, -(sp) ; Read character and push on to queue. movb @#TKB, r1 mov #keyboard_queue, r0 jsr pc, byte_queue_push ; Ignore failure, they're nothing to do, but that's why the queue is oversized. mov (sp)+, r5 mov (sp)+, r4 mov (sp)+, r3 mov (sp)+, r2 mov (sp)+, r1 mov (sp)+, r0 rti keyboard_queue: .word keyboard_buf ; buf .word 0 ; head .word 0 ; tail .word KEYBOARD_BUF_LEN ; cap .word 0 ; len keyboard_buf: . = . + KEYBOARD_BUF_LEN .even
ElliotLockerman/pdp-11
3,546
examples/threads.s
; threads.s ; Two threads run concurrently and print their thread id. STACK_0 = 150000 STACK_1 = 140000 LKS = 177546 LKS_INT_ENB = 100 TPS = 177564 TPB = TPS + 2 TPS_READY_MASK = 177 STATUS = 177776; PRIO7 = 340 SWAP_PERIOD = 20 ; In ticks . = 100 .word clock, 300 ; Clock interrupt vector . = 400 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn _start() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; _start: ; Set up thread 1 for a return from clock(). mov #STACK_1, sp mov #0, -(sp) ; dummy return address mov #0, -(sp) ; ps mov #run, -(sp) ; pc mov #'1, -(sp) ; r0: tid (as char) to print mov #0, -(sp) ; r1 mov #0, -(sp) ; r2 mov #0, -(sp) ; r3 mov #0, -(sp) ; r4 mov #0, -(sp) ; r5 mov sp, tcb_1 ; Set up thread 0; it will be jumped to directly when it starts rather than ; returning from clock(). mov #STACK_0, sp mov #0, -(sp) ; Dummy return address. mov #'0, r0 ; Tid (as char) to print. mov #LKS_INT_ENB, @#LKS ; Enable clock interrupts. ; Launch thread 0 (never returns). br run ; Index of currently running thread in tcb array. tid: .word 0 ; Thread Control Block (tcb) array. tcbs: tcb_0: .word 0 ; saved sp tcb_1: .word 0 ; saved sp ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn run(tid: r0) ; Main function for each thread - loop and print thread id every 2^16 iterations. ; tid is the thread id in its ascii digit. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; run: ; loop counter in r1. clr r1 ; loop forever, printing tid once every 2^8 iterations. 1: inc r1 cmpb #0, r1 bne 1b jsr pc, putc br 1b ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn clock() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; clock: mov r0, -(sp) mov r1, -(sp) mov r2, -(sp) mov r3, -(sp) mov r4, -(sp) mov r5, -(sp) mov LKS, r0 ; clear clock bit ; Increment tick counter; if it hasn't rolled over, just return. incb ticks cmpb #SWAP_PERIOD, ticks bne 1f ; Every time the tick counter rolls over, swap threads. mov #0, ticks ; Save currently running thread's sp mov tid, r0 asl r0 ; index *= sizeof(tcb) mov sp, tcbs(r0) ; Toggle tid between 0 and 1 inc tid bic #177776, tid ; Restore sp of new thread mov tid, r0 asl r0 ; index *= sizeof(tcb) mov tcbs(r0), sp 1: mov (sp)+, r5 mov (sp)+, r4 mov (sp)+, r3 mov (sp)+, r2 mov (sp)+, r1 mov (sp)+, r0 rti ; Total number of timer ticks, wrapping. ticks: .word 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; putc(char) ; char to print in r0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; putc: mov @#STATUS, -(sp) 1: ; Loop until the teleprinter is ready to accept another character. bicb #PRIO7, @#STATUS ; Enable interrupts bicb #TPS_READY_MASK, @#TPS beq 1b ; Mask interrupts and check one last time bis #PRIO7, @#STATUS bicb #TPS_READY_MASK, @#TPS beq 1b ; Actually print the character movb r0, @#TPB mov (sp)+, @#STATUS rts pc
ElliotLockerman/pdp-11
1,102
examples/hello.s
; hello.s ; Prints hello, world!\n . = 64 .word tp_ready, 200 . = 400 STACK_TOP = 150000 TPS = 177564 TPB = TPS + 2 TPS_READY_MASK = 177 TPS_INT_ENB = 100 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; fn _start() ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; _start: mov #STACK_TOP, sp mov #TPS_INT_ENB, @#TPS loop: wait br loop msg: .ascii "hello, world!" .byte '\n, '\0 .even next: .word msg ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; tp_ready() ; Teleprinter ready to accept another character; print it! ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; tp_ready: mov r0, -(sp) mov r1, -(sp) mov r2, -(sp) mov r3, -(sp) mov r4, -(sp) mov r5, -(sp) movb @next, r0 inc next cmp #0, r0 bne cont halt cont: movb r0, @#TPB mov (sp)+, r5 mov (sp)+, r4 mov (sp)+, r3 mov (sp)+, r2 mov (sp)+, r1 mov (sp)+, r0 rti
elmiliano/portfolio
25,888
fruit-rgb/Core/Startup/startup_stm32f767zitx.s
/** ****************************************************************************** * @file startup_stm32f767xx.s * @author MCD Application Team * @brief STM32F767xx Devices vector table for GCC based toolchain. * This module performs: * - Set the initial SP * - Set the initial PC == Reset_Handler, * - Set the vector table entries with the exceptions ISR address * - Branches to main in the C library (which eventually * calls main()). * After Reset the Cortex-M7 processor is in Thread mode, * priority is Privileged, and the Stack is set to Main. ****************************************************************************** * @attention * * Copyright (c) 2016 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ .syntax unified .cpu cortex-m7 .fpu softvfp .thumb .global g_pfnVectors .global Default_Handler /* start address for the initialization values of the .data section. defined in linker script */ .word _sidata /* start address for the .data section. defined in linker script */ .word _sdata /* end address for the .data section. defined in linker script */ .word _edata /* start address for the .bss section. defined in linker script */ .word _sbss /* end address for the .bss section. defined in linker script */ .word _ebss /* stack used for SystemInit_ExtMemCtl; always internal RAM used */ /** * @brief This is the code that gets called when the processor first * starts execution following a reset event. Only the absolutely * necessary set is performed, after which the application * supplied main() routine is called. * @param None * @retval : None */ .section .text.Reset_Handler .weak Reset_Handler .type Reset_Handler, %function Reset_Handler: ldr sp, =_estack /* set stack pointer */ /* Copy the data segment initializers from flash to SRAM */ ldr r0, =_sdata ldr r1, =_edata ldr r2, =_sidata movs r3, #0 b LoopCopyDataInit CopyDataInit: ldr r4, [r2, r3] str r4, [r0, r3] adds r3, r3, #4 LoopCopyDataInit: adds r4, r0, r3 cmp r4, r1 bcc CopyDataInit /* Zero fill the bss segment. */ ldr r2, =_sbss ldr r4, =_ebss movs r3, #0 b LoopFillZerobss FillZerobss: str r3, [r2] adds r2, r2, #4 LoopFillZerobss: cmp r2, r4 bcc FillZerobss /* Call the clock system initialization function.*/ bl SystemInit /* Call static constructors */ bl __libc_init_array /* Call the application's entry point.*/ bl main bx lr .size Reset_Handler, .-Reset_Handler /** * @brief This is the code that gets called when the processor receives an * unexpected interrupt. This simply enters an infinite loop, preserving * the system state for examination by a debugger. * @param None * @retval None */ .section .text.Default_Handler,"ax",%progbits Default_Handler: Infinite_Loop: b Infinite_Loop .size Default_Handler, .-Default_Handler /****************************************************************************** * * The minimal vector table for a Cortex M7. Note that the proper constructs * must be placed on this to ensure that it ends up at physical address * 0x0000.0000. * *******************************************************************************/ .section .isr_vector,"a",%progbits .type g_pfnVectors, %object .size g_pfnVectors, .-g_pfnVectors g_pfnVectors: .word _estack .word Reset_Handler .word NMI_Handler .word HardFault_Handler .word MemManage_Handler .word BusFault_Handler .word UsageFault_Handler .word 0 .word 0 .word 0 .word 0 .word SVC_Handler .word DebugMon_Handler .word 0 .word PendSV_Handler .word SysTick_Handler /* External Interrupts */ .word WWDG_IRQHandler /* Window WatchDog */ .word PVD_IRQHandler /* PVD through EXTI Line detection */ .word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */ .word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */ .word FLASH_IRQHandler /* FLASH */ .word RCC_IRQHandler /* RCC */ .word EXTI0_IRQHandler /* EXTI Line0 */ .word EXTI1_IRQHandler /* EXTI Line1 */ .word EXTI2_IRQHandler /* EXTI Line2 */ .word EXTI3_IRQHandler /* EXTI Line3 */ .word EXTI4_IRQHandler /* EXTI Line4 */ .word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */ .word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */ .word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */ .word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */ .word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */ .word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */ .word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */ .word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */ .word CAN1_TX_IRQHandler /* CAN1 TX */ .word CAN1_RX0_IRQHandler /* CAN1 RX0 */ .word CAN1_RX1_IRQHandler /* CAN1 RX1 */ .word CAN1_SCE_IRQHandler /* CAN1 SCE */ .word EXTI9_5_IRQHandler /* External Line[9:5]s */ .word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */ .word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */ .word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */ .word TIM1_CC_IRQHandler /* TIM1 Capture Compare */ .word TIM2_IRQHandler /* TIM2 */ .word TIM3_IRQHandler /* TIM3 */ .word TIM4_IRQHandler /* TIM4 */ .word I2C1_EV_IRQHandler /* I2C1 Event */ .word I2C1_ER_IRQHandler /* I2C1 Error */ .word I2C2_EV_IRQHandler /* I2C2 Event */ .word I2C2_ER_IRQHandler /* I2C2 Error */ .word SPI1_IRQHandler /* SPI1 */ .word SPI2_IRQHandler /* SPI2 */ .word USART1_IRQHandler /* USART1 */ .word USART2_IRQHandler /* USART2 */ .word USART3_IRQHandler /* USART3 */ .word EXTI15_10_IRQHandler /* External Line[15:10]s */ .word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */ .word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */ .word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */ .word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */ .word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */ .word TIM8_CC_IRQHandler /* TIM8 Capture Compare */ .word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */ .word FMC_IRQHandler /* FMC */ .word SDMMC1_IRQHandler /* SDMMC1 */ .word TIM5_IRQHandler /* TIM5 */ .word SPI3_IRQHandler /* SPI3 */ .word UART4_IRQHandler /* UART4 */ .word UART5_IRQHandler /* UART5 */ .word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */ .word TIM7_IRQHandler /* TIM7 */ .word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */ .word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */ .word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */ .word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */ .word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */ .word ETH_IRQHandler /* Ethernet */ .word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */ .word CAN2_TX_IRQHandler /* CAN2 TX */ .word CAN2_RX0_IRQHandler /* CAN2 RX0 */ .word CAN2_RX1_IRQHandler /* CAN2 RX1 */ .word CAN2_SCE_IRQHandler /* CAN2 SCE */ .word OTG_FS_IRQHandler /* USB OTG FS */ .word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */ .word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */ .word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */ .word USART6_IRQHandler /* USART6 */ .word I2C3_EV_IRQHandler /* I2C3 event */ .word I2C3_ER_IRQHandler /* I2C3 error */ .word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */ .word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */ .word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */ .word OTG_HS_IRQHandler /* USB OTG HS */ .word DCMI_IRQHandler /* DCMI */ .word 0 /* Reserved */ .word RNG_IRQHandler /* RNG */ .word FPU_IRQHandler /* FPU */ .word UART7_IRQHandler /* UART7 */ .word UART8_IRQHandler /* UART8 */ .word SPI4_IRQHandler /* SPI4 */ .word SPI5_IRQHandler /* SPI5 */ .word SPI6_IRQHandler /* SPI6 */ .word SAI1_IRQHandler /* SAI1 */ .word LTDC_IRQHandler /* LTDC */ .word LTDC_ER_IRQHandler /* LTDC error */ .word DMA2D_IRQHandler /* DMA2D */ .word SAI2_IRQHandler /* SAI2 */ .word QUADSPI_IRQHandler /* QUADSPI */ .word LPTIM1_IRQHandler /* LPTIM1 */ .word CEC_IRQHandler /* HDMI_CEC */ .word I2C4_EV_IRQHandler /* I2C4 Event */ .word I2C4_ER_IRQHandler /* I2C4 Error */ .word SPDIF_RX_IRQHandler /* SPDIF_RX */ .word 0 /* Reserved */ .word DFSDM1_FLT0_IRQHandler /* DFSDM1 Filter 0 global Interrupt */ .word DFSDM1_FLT1_IRQHandler /* DFSDM1 Filter 1 global Interrupt */ .word DFSDM1_FLT2_IRQHandler /* DFSDM1 Filter 2 global Interrupt */ .word DFSDM1_FLT3_IRQHandler /* DFSDM1 Filter 3 global Interrupt */ .word SDMMC2_IRQHandler /* SDMMC2 */ .word CAN3_TX_IRQHandler /* CAN3 TX */ .word CAN3_RX0_IRQHandler /* CAN3 RX0 */ .word CAN3_RX1_IRQHandler /* CAN3 RX1 */ .word CAN3_SCE_IRQHandler /* CAN3 SCE */ .word JPEG_IRQHandler /* JPEG */ .word MDIOS_IRQHandler /* MDIOS */ /******************************************************************************* * * Provide weak aliases for each Exception handler to the Default_Handler. * As they are weak aliases, any function with the same name will override * this definition. * *******************************************************************************/ .weak NMI_Handler .thumb_set NMI_Handler,Default_Handler .weak HardFault_Handler .thumb_set HardFault_Handler,Default_Handler .weak MemManage_Handler .thumb_set MemManage_Handler,Default_Handler .weak BusFault_Handler .thumb_set BusFault_Handler,Default_Handler .weak UsageFault_Handler .thumb_set UsageFault_Handler,Default_Handler .weak SVC_Handler .thumb_set SVC_Handler,Default_Handler .weak DebugMon_Handler .thumb_set DebugMon_Handler,Default_Handler .weak PendSV_Handler .thumb_set PendSV_Handler,Default_Handler .weak SysTick_Handler .thumb_set SysTick_Handler,Default_Handler .weak WWDG_IRQHandler .thumb_set WWDG_IRQHandler,Default_Handler .weak PVD_IRQHandler .thumb_set PVD_IRQHandler,Default_Handler .weak TAMP_STAMP_IRQHandler .thumb_set TAMP_STAMP_IRQHandler,Default_Handler .weak RTC_WKUP_IRQHandler .thumb_set RTC_WKUP_IRQHandler,Default_Handler .weak FLASH_IRQHandler .thumb_set FLASH_IRQHandler,Default_Handler .weak RCC_IRQHandler .thumb_set RCC_IRQHandler,Default_Handler .weak EXTI0_IRQHandler .thumb_set EXTI0_IRQHandler,Default_Handler .weak EXTI1_IRQHandler .thumb_set EXTI1_IRQHandler,Default_Handler .weak EXTI2_IRQHandler .thumb_set EXTI2_IRQHandler,Default_Handler .weak EXTI3_IRQHandler .thumb_set EXTI3_IRQHandler,Default_Handler .weak EXTI4_IRQHandler .thumb_set EXTI4_IRQHandler,Default_Handler .weak DMA1_Stream0_IRQHandler .thumb_set DMA1_Stream0_IRQHandler,Default_Handler .weak DMA1_Stream1_IRQHandler .thumb_set DMA1_Stream1_IRQHandler,Default_Handler .weak DMA1_Stream2_IRQHandler .thumb_set DMA1_Stream2_IRQHandler,Default_Handler .weak DMA1_Stream3_IRQHandler .thumb_set DMA1_Stream3_IRQHandler,Default_Handler .weak DMA1_Stream4_IRQHandler .thumb_set DMA1_Stream4_IRQHandler,Default_Handler .weak DMA1_Stream5_IRQHandler .thumb_set DMA1_Stream5_IRQHandler,Default_Handler .weak DMA1_Stream6_IRQHandler .thumb_set DMA1_Stream6_IRQHandler,Default_Handler .weak ADC_IRQHandler .thumb_set ADC_IRQHandler,Default_Handler .weak CAN1_TX_IRQHandler .thumb_set CAN1_TX_IRQHandler,Default_Handler .weak CAN1_RX0_IRQHandler .thumb_set CAN1_RX0_IRQHandler,Default_Handler .weak CAN1_RX1_IRQHandler .thumb_set CAN1_RX1_IRQHandler,Default_Handler .weak CAN1_SCE_IRQHandler .thumb_set CAN1_SCE_IRQHandler,Default_Handler .weak EXTI9_5_IRQHandler .thumb_set EXTI9_5_IRQHandler,Default_Handler .weak TIM1_BRK_TIM9_IRQHandler .thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler .weak TIM1_UP_TIM10_IRQHandler .thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler .weak TIM1_TRG_COM_TIM11_IRQHandler .thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler .weak TIM1_CC_IRQHandler .thumb_set TIM1_CC_IRQHandler,Default_Handler .weak TIM2_IRQHandler .thumb_set TIM2_IRQHandler,Default_Handler .weak TIM3_IRQHandler .thumb_set TIM3_IRQHandler,Default_Handler .weak TIM4_IRQHandler .thumb_set TIM4_IRQHandler,Default_Handler .weak I2C1_EV_IRQHandler .thumb_set I2C1_EV_IRQHandler,Default_Handler .weak I2C1_ER_IRQHandler .thumb_set I2C1_ER_IRQHandler,Default_Handler .weak I2C2_EV_IRQHandler .thumb_set I2C2_EV_IRQHandler,Default_Handler .weak I2C2_ER_IRQHandler .thumb_set I2C2_ER_IRQHandler,Default_Handler .weak SPI1_IRQHandler .thumb_set SPI1_IRQHandler,Default_Handler .weak SPI2_IRQHandler .thumb_set SPI2_IRQHandler,Default_Handler .weak USART1_IRQHandler .thumb_set USART1_IRQHandler,Default_Handler .weak USART2_IRQHandler .thumb_set USART2_IRQHandler,Default_Handler .weak USART3_IRQHandler .thumb_set USART3_IRQHandler,Default_Handler .weak EXTI15_10_IRQHandler .thumb_set EXTI15_10_IRQHandler,Default_Handler .weak RTC_Alarm_IRQHandler .thumb_set RTC_Alarm_IRQHandler,Default_Handler .weak OTG_FS_WKUP_IRQHandler .thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler .weak TIM8_BRK_TIM12_IRQHandler .thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler .weak TIM8_UP_TIM13_IRQHandler .thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler .weak TIM8_TRG_COM_TIM14_IRQHandler .thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler .weak TIM8_CC_IRQHandler .thumb_set TIM8_CC_IRQHandler,Default_Handler .weak DMA1_Stream7_IRQHandler .thumb_set DMA1_Stream7_IRQHandler,Default_Handler .weak FMC_IRQHandler .thumb_set FMC_IRQHandler,Default_Handler .weak SDMMC1_IRQHandler .thumb_set SDMMC1_IRQHandler,Default_Handler .weak TIM5_IRQHandler .thumb_set TIM5_IRQHandler,Default_Handler .weak SPI3_IRQHandler .thumb_set SPI3_IRQHandler,Default_Handler .weak UART4_IRQHandler .thumb_set UART4_IRQHandler,Default_Handler .weak UART5_IRQHandler .thumb_set UART5_IRQHandler,Default_Handler .weak TIM6_DAC_IRQHandler .thumb_set TIM6_DAC_IRQHandler,Default_Handler .weak TIM7_IRQHandler .thumb_set TIM7_IRQHandler,Default_Handler .weak DMA2_Stream0_IRQHandler .thumb_set DMA2_Stream0_IRQHandler,Default_Handler .weak DMA2_Stream1_IRQHandler .thumb_set DMA2_Stream1_IRQHandler,Default_Handler .weak DMA2_Stream2_IRQHandler .thumb_set DMA2_Stream2_IRQHandler,Default_Handler .weak DMA2_Stream3_IRQHandler .thumb_set DMA2_Stream3_IRQHandler,Default_Handler .weak DMA2_Stream4_IRQHandler .thumb_set DMA2_Stream4_IRQHandler,Default_Handler .weak ETH_IRQHandler .thumb_set ETH_IRQHandler,Default_Handler .weak ETH_WKUP_IRQHandler .thumb_set ETH_WKUP_IRQHandler,Default_Handler .weak CAN2_TX_IRQHandler .thumb_set CAN2_TX_IRQHandler,Default_Handler .weak CAN2_RX0_IRQHandler .thumb_set CAN2_RX0_IRQHandler,Default_Handler .weak CAN2_RX1_IRQHandler .thumb_set CAN2_RX1_IRQHandler,Default_Handler .weak CAN2_SCE_IRQHandler .thumb_set CAN2_SCE_IRQHandler,Default_Handler .weak OTG_FS_IRQHandler .thumb_set OTG_FS_IRQHandler,Default_Handler .weak DMA2_Stream5_IRQHandler .thumb_set DMA2_Stream5_IRQHandler,Default_Handler .weak DMA2_Stream6_IRQHandler .thumb_set DMA2_Stream6_IRQHandler,Default_Handler .weak DMA2_Stream7_IRQHandler .thumb_set DMA2_Stream7_IRQHandler,Default_Handler .weak USART6_IRQHandler .thumb_set USART6_IRQHandler,Default_Handler .weak I2C3_EV_IRQHandler .thumb_set I2C3_EV_IRQHandler,Default_Handler .weak I2C3_ER_IRQHandler .thumb_set I2C3_ER_IRQHandler,Default_Handler .weak OTG_HS_EP1_OUT_IRQHandler .thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler .weak OTG_HS_EP1_IN_IRQHandler .thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler .weak OTG_HS_WKUP_IRQHandler .thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler .weak OTG_HS_IRQHandler .thumb_set OTG_HS_IRQHandler,Default_Handler .weak DCMI_IRQHandler .thumb_set DCMI_IRQHandler,Default_Handler .weak RNG_IRQHandler .thumb_set RNG_IRQHandler,Default_Handler .weak FPU_IRQHandler .thumb_set FPU_IRQHandler,Default_Handler .weak UART7_IRQHandler .thumb_set UART7_IRQHandler,Default_Handler .weak UART8_IRQHandler .thumb_set UART8_IRQHandler,Default_Handler .weak SPI4_IRQHandler .thumb_set SPI4_IRQHandler,Default_Handler .weak SPI5_IRQHandler .thumb_set SPI5_IRQHandler,Default_Handler .weak SPI6_IRQHandler .thumb_set SPI6_IRQHandler,Default_Handler .weak SAI1_IRQHandler .thumb_set SAI1_IRQHandler,Default_Handler .weak LTDC_IRQHandler .thumb_set LTDC_IRQHandler,Default_Handler .weak LTDC_ER_IRQHandler .thumb_set LTDC_ER_IRQHandler,Default_Handler .weak DMA2D_IRQHandler .thumb_set DMA2D_IRQHandler,Default_Handler .weak SAI2_IRQHandler .thumb_set SAI2_IRQHandler,Default_Handler .weak QUADSPI_IRQHandler .thumb_set QUADSPI_IRQHandler,Default_Handler .weak LPTIM1_IRQHandler .thumb_set LPTIM1_IRQHandler,Default_Handler .weak CEC_IRQHandler .thumb_set CEC_IRQHandler,Default_Handler .weak I2C4_EV_IRQHandler .thumb_set I2C4_EV_IRQHandler,Default_Handler .weak I2C4_ER_IRQHandler .thumb_set I2C4_ER_IRQHandler,Default_Handler .weak SPDIF_RX_IRQHandler .thumb_set SPDIF_RX_IRQHandler,Default_Handler .weak DFSDM1_FLT0_IRQHandler .thumb_set DFSDM1_FLT0_IRQHandler,Default_Handler .weak DFSDM1_FLT1_IRQHandler .thumb_set DFSDM1_FLT1_IRQHandler,Default_Handler .weak DFSDM1_FLT2_IRQHandler .thumb_set DFSDM1_FLT2_IRQHandler,Default_Handler .weak DFSDM1_FLT3_IRQHandler .thumb_set DFSDM1_FLT3_IRQHandler,Default_Handler .weak SDMMC2_IRQHandler .thumb_set SDMMC2_IRQHandler,Default_Handler .weak CAN3_TX_IRQHandler .thumb_set CAN3_TX_IRQHandler,Default_Handler .weak CAN3_RX0_IRQHandler .thumb_set CAN3_RX0_IRQHandler,Default_Handler .weak CAN3_RX1_IRQHandler .thumb_set CAN3_RX1_IRQHandler,Default_Handler .weak CAN3_SCE_IRQHandler .thumb_set CAN3_SCE_IRQHandler,Default_Handler .weak JPEG_IRQHandler .thumb_set JPEG_IRQHandler,Default_Handler .weak MDIOS_IRQHandler .thumb_set MDIOS_IRQHandler,Default_Handler
ElShroomster/bril_to_riscv
1,261
bril-riscv/riscv.S
# Function: main .main: # Arguments to main addi x2, x2, -8 addi x18, x0, 633 sw x18, 0(x2) addi x18, x0, 844 sw x18, 4(x2) addi x2, x2, -28 sw x1, 0(x2) addi x18, x0, 0 sw x18, 4(x2) lw x19, 32(x2) sw x19, 8(x2) lw x19, 28(x2) sw x19, 12(x2) .cmp.val: lw x19, 8(x2) lw x20, 12(x2) blt x19, x20, .lt_0 addi x18, x0, 0 jal x0, .exit_cond_0 .lt_0: addi x18, x0, 1 .exit_cond_0: sw x18, 16(x2) lw x19, 16(x2) beq x19, x0, .else.1 jal x0, .if.1 .if.1: lw x19, 12(x2) lw x20, 8(x2) sub x18, x19, x20 sw x18, 20(x2) jal x0, .loop.bound .else.1: lw x19, 8(x2) lw x20, 12(x2) sub x18, x19, x20 sw x18, 20(x2) jal x0, .loop.bound .loop.bound: lw x19, 20(x2) lw x20, 4(x2) beq x19, x20, .eq_1 addi x18, x0, 0 jal x0, .exit_cond_1 .eq_1: addi x18, x0, 1 .exit_cond_1: sw x18, 24(x2) lw x19, 24(x2) beq x19, x0, .update.val jal x0, .program.end .update.val: lw x19, 16(x2) beq x19, x0, .else.2 jal x0, .if.2 .if.2: lw x19, 20(x2) sw x19, 12(x2) jal x0, .cmp.val .else.2: lw x19, 20(x2) sw x19, 8(x2) jal x0, .cmp.val .program.end: lw x11, 12(x2) addi x10, x0, 1 ecall addi x11, x0, '\n' addi x10, x0, 11 ecall addi x11, x0, 0 addi x10, x0, 17 ecall
ElSargo/wezpy
43,257
wezterm-src/deps/cairo/pixman/pixman/pixman-arm-simd-asm.S
/* * Copyright © 2012 Raspberry Pi Foundation * Copyright © 2012 RISC OS Open Ltd * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of the copyright holders not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. The copyright holders make no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Ben Avison (bavison@riscosopen.org) * */ /* Prevent the stack from becoming executable */ #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .arch armv6 .object_arch armv4 .arm .altmacro .p2align 2 #include "pixman-arm-asm.h" #include "pixman-arm-simd-asm.h" /* A head macro should do all processing which results in an output of up to * 16 bytes, as far as the final load instruction. The corresponding tail macro * should complete the processing of the up-to-16 bytes. The calling macro will * sometimes choose to insert a preload or a decrement of X between them. * cond ARM condition code for code block * numbytes Number of output bytes that should be generated this time * firstreg First WK register in which to place output * unaligned_src Whether to use non-wordaligned loads of source image * unaligned_mask Whether to use non-wordaligned loads of mask image * preload If outputting 16 bytes causes 64 bytes to be read, whether an extra preload should be output */ .macro blit_init line_saved_regs STRIDE_D, STRIDE_S .endm .macro blit_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload pixld cond, numbytes, firstreg, SRC, unaligned_src .endm .macro blit_inner_loop process_head, process_tail, unaligned_src, unaligned_mask, dst_alignment WK4 .req STRIDE_D WK5 .req STRIDE_S WK6 .req MASK WK7 .req STRIDE_M 110: pixld , 16, 0, SRC, unaligned_src pixld , 16, 4, SRC, unaligned_src pld [SRC, SCRATCH] pixst , 16, 0, DST pixst , 16, 4, DST subs X, X, #32*8/src_bpp bhs 110b .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm generate_composite_function \ pixman_composite_src_8888_8888_asm_armv6, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_SPILL_LINE_VARS_WIDE | FLAG_PROCESS_PRESERVES_SCRATCH, \ 4, /* prefetch distance */ \ blit_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ blit_process_head, \ nop_macro, /* process tail */ \ blit_inner_loop generate_composite_function \ pixman_composite_src_0565_0565_asm_armv6, 16, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_SPILL_LINE_VARS_WIDE | FLAG_PROCESS_PRESERVES_SCRATCH, \ 4, /* prefetch distance */ \ blit_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ blit_process_head, \ nop_macro, /* process tail */ \ blit_inner_loop generate_composite_function \ pixman_composite_src_8_8_asm_armv6, 8, 0, 8, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_SPILL_LINE_VARS_WIDE | FLAG_PROCESS_PRESERVES_SCRATCH, \ 3, /* prefetch distance */ \ blit_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ blit_process_head, \ nop_macro, /* process tail */ \ blit_inner_loop /******************************************************************************/ .macro src_n_8888_init ldr SRC, [sp, #ARGS_STACK_OFFSET] mov STRIDE_S, SRC mov MASK, SRC mov STRIDE_M, SRC .endm .macro src_n_0565_init ldrh SRC, [sp, #ARGS_STACK_OFFSET] orr SRC, SRC, lsl #16 mov STRIDE_S, SRC mov MASK, SRC mov STRIDE_M, SRC .endm .macro src_n_8_init ldrb SRC, [sp, #ARGS_STACK_OFFSET] orr SRC, SRC, lsl #8 orr SRC, SRC, lsl #16 mov STRIDE_S, SRC mov MASK, SRC mov STRIDE_M, SRC .endm .macro fill_process_tail cond, numbytes, firstreg WK4 .req SRC WK5 .req STRIDE_S WK6 .req MASK WK7 .req STRIDE_M pixst cond, numbytes, 4, DST .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm generate_composite_function \ pixman_composite_src_n_8888_asm_armv6, 0, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \ 0, /* prefetch distance doesn't apply */ \ src_n_8888_init \ nop_macro, /* newline */ \ nop_macro /* cleanup */ \ nop_macro /* process head */ \ fill_process_tail generate_composite_function \ pixman_composite_src_n_0565_asm_armv6, 0, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \ 0, /* prefetch distance doesn't apply */ \ src_n_0565_init \ nop_macro, /* newline */ \ nop_macro /* cleanup */ \ nop_macro /* process head */ \ fill_process_tail generate_composite_function \ pixman_composite_src_n_8_asm_armv6, 0, 0, 8, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \ 0, /* prefetch distance doesn't apply */ \ src_n_8_init \ nop_macro, /* newline */ \ nop_macro /* cleanup */ \ nop_macro /* process head */ \ fill_process_tail /******************************************************************************/ .macro src_x888_8888_pixel, cond, reg orr&cond WK&reg, WK&reg, #0xFF000000 .endm .macro pixman_composite_src_x888_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload pixld cond, numbytes, firstreg, SRC, unaligned_src .endm .macro pixman_composite_src_x888_8888_process_tail cond, numbytes, firstreg src_x888_8888_pixel cond, %(firstreg+0) .if numbytes >= 8 src_x888_8888_pixel cond, %(firstreg+1) .if numbytes == 16 src_x888_8888_pixel cond, %(firstreg+2) src_x888_8888_pixel cond, %(firstreg+3) .endif .endif .endm generate_composite_function \ pixman_composite_src_x888_8888_asm_armv6, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_SCRATCH, \ 3, /* prefetch distance */ \ nop_macro, /* init */ \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ pixman_composite_src_x888_8888_process_head, \ pixman_composite_src_x888_8888_process_tail /******************************************************************************/ .macro src_0565_8888_init /* Hold loop invariants in MASK and STRIDE_M */ ldr MASK, =0x07E007E0 mov STRIDE_M, #0xFF000000 /* Set GE[3:0] to 1010 so SEL instructions do what we want */ ldr SCRATCH, =0x80008000 uadd8 SCRATCH, SCRATCH, SCRATCH .endm .macro src_0565_8888_2pixels, reg1, reg2 and SCRATCH, WK&reg1, MASK @ 00000GGGGGG0000000000gggggg00000 bic WK&reg2, WK&reg1, MASK @ RRRRR000000BBBBBrrrrr000000bbbbb orr SCRATCH, SCRATCH, SCRATCH, lsr #6 @ 00000GGGGGGGGGGGG0000ggggggggggg mov WK&reg1, WK&reg2, lsl #16 @ rrrrr000000bbbbb0000000000000000 mov SCRATCH, SCRATCH, ror #19 @ GGGG0000ggggggggggg00000GGGGGGGG bic WK&reg2, WK&reg2, WK&reg1, lsr #16 @ RRRRR000000BBBBB0000000000000000 orr WK&reg1, WK&reg1, WK&reg1, lsr #5 @ rrrrrrrrrr0bbbbbbbbbb00000000000 orr WK&reg2, WK&reg2, WK&reg2, lsr #5 @ RRRRRRRRRR0BBBBBBBBBB00000000000 pkhtb WK&reg1, WK&reg1, WK&reg1, asr #5 @ rrrrrrrr--------bbbbbbbb-------- sel WK&reg1, WK&reg1, SCRATCH @ rrrrrrrrggggggggbbbbbbbb-------- mov SCRATCH, SCRATCH, ror #16 @ ggg00000GGGGGGGGGGGG0000gggggggg pkhtb WK&reg2, WK&reg2, WK&reg2, asr #5 @ RRRRRRRR--------BBBBBBBB-------- sel WK&reg2, WK&reg2, SCRATCH @ RRRRRRRRGGGGGGGGBBBBBBBB-------- orr WK&reg1, STRIDE_M, WK&reg1, lsr #8 @ 11111111rrrrrrrrggggggggbbbbbbbb orr WK&reg2, STRIDE_M, WK&reg2, lsr #8 @ 11111111RRRRRRRRGGGGGGGGBBBBBBBB .endm /* This version doesn't need STRIDE_M, but is one instruction longer. It would however be preferable for an XRGB target, since we could knock off the last 2 instructions, but is that a common case? and SCRATCH, WK&reg1, MASK @ 00000GGGGGG0000000000gggggg00000 bic WK&reg1, WK&reg1, MASK @ RRRRR000000BBBBBrrrrr000000bbbbb orr SCRATCH, SCRATCH, SCRATCH, lsr #6 @ 00000GGGGGGGGGGGG0000ggggggggggg mov WK&reg2, WK&reg1, lsr #16 @ 0000000000000000RRRRR000000BBBBB mov SCRATCH, SCRATCH, ror #27 @ GGGGGGGGGGGG0000ggggggggggg00000 bic WK&reg1, WK&reg1, WK&reg2, lsl #16 @ 0000000000000000rrrrr000000bbbbb mov WK&reg2, WK&reg2, lsl #3 @ 0000000000000RRRRR000000BBBBB000 mov WK&reg1, WK&reg1, lsl #3 @ 0000000000000rrrrr000000bbbbb000 orr WK&reg2, WK&reg2, WK&reg2, lsr #5 @ 0000000000000RRRRRRRRRR0BBBBBBBB orr WK&reg1, WK&reg1, WK&reg1, lsr #5 @ 0000000000000rrrrrrrrrr0bbbbbbbb pkhbt WK&reg2, WK&reg2, WK&reg2, lsl #5 @ --------RRRRRRRR--------BBBBBBBB pkhbt WK&reg1, WK&reg1, WK&reg1, lsl #5 @ --------rrrrrrrr--------bbbbbbbb sel WK&reg2, SCRATCH, WK&reg2 @ --------RRRRRRRRGGGGGGGGBBBBBBBB sel WK&reg1, SCRATCH, WK&reg1 @ --------rrrrrrrrggggggggbbbbbbbb orr WK&reg2, WK&reg2, #0xFF000000 @ 11111111RRRRRRRRGGGGGGGGBBBBBBBB orr WK&reg1, WK&reg1, #0xFF000000 @ 11111111rrrrrrrrggggggggbbbbbbbb */ .macro src_0565_8888_1pixel, reg bic SCRATCH, WK&reg, MASK @ 0000000000000000rrrrr000000bbbbb and WK&reg, WK&reg, MASK @ 000000000000000000000gggggg00000 mov SCRATCH, SCRATCH, lsl #3 @ 0000000000000rrrrr000000bbbbb000 mov WK&reg, WK&reg, lsl #5 @ 0000000000000000gggggg0000000000 orr SCRATCH, SCRATCH, SCRATCH, lsr #5 @ 0000000000000rrrrrrrrrr0bbbbbbbb orr WK&reg, WK&reg, WK&reg, lsr #6 @ 000000000000000gggggggggggg00000 pkhbt SCRATCH, SCRATCH, SCRATCH, lsl #5 @ --------rrrrrrrr--------bbbbbbbb sel WK&reg, WK&reg, SCRATCH @ --------rrrrrrrrggggggggbbbbbbbb orr WK&reg, WK&reg, #0xFF000000 @ 11111111rrrrrrrrggggggggbbbbbbbb .endm .macro src_0565_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload .if numbytes == 16 pixldst ld,, 8, firstreg, %(firstreg+2),,, SRC, unaligned_src .elseif numbytes == 8 pixld , 4, firstreg, SRC, unaligned_src .elseif numbytes == 4 pixld , 2, firstreg, SRC, unaligned_src .endif .endm .macro src_0565_8888_process_tail cond, numbytes, firstreg .if numbytes == 16 src_0565_8888_2pixels firstreg, %(firstreg+1) src_0565_8888_2pixels %(firstreg+2), %(firstreg+3) .elseif numbytes == 8 src_0565_8888_2pixels firstreg, %(firstreg+1) .else src_0565_8888_1pixel firstreg .endif .endm generate_composite_function \ pixman_composite_src_0565_8888_asm_armv6, 16, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_BRANCH_OVER, \ 3, /* prefetch distance */ \ src_0565_8888_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ src_0565_8888_process_head, \ src_0565_8888_process_tail /******************************************************************************/ .macro src_x888_0565_init /* Hold loop invariant in MASK */ ldr MASK, =0x001F001F line_saved_regs STRIDE_S, ORIG_W .endm .macro src_x888_0565_1pixel s, d and WK&d, MASK, WK&s, lsr #3 @ 00000000000rrrrr00000000000bbbbb and STRIDE_S, WK&s, #0xFC00 @ 0000000000000000gggggg0000000000 orr WK&d, WK&d, WK&d, lsr #5 @ 00000000000-----rrrrr000000bbbbb orr WK&d, WK&d, STRIDE_S, lsr #5 @ 00000000000-----rrrrrggggggbbbbb /* Top 16 bits are discarded during the following STRH */ .endm .macro src_x888_0565_2pixels slo, shi, d, tmp and SCRATCH, WK&shi, #0xFC00 @ 0000000000000000GGGGGG0000000000 and WK&tmp, MASK, WK&shi, lsr #3 @ 00000000000RRRRR00000000000BBBBB and WK&shi, MASK, WK&slo, lsr #3 @ 00000000000rrrrr00000000000bbbbb orr WK&tmp, WK&tmp, WK&tmp, lsr #5 @ 00000000000-----RRRRR000000BBBBB orr WK&tmp, WK&tmp, SCRATCH, lsr #5 @ 00000000000-----RRRRRGGGGGGBBBBB and SCRATCH, WK&slo, #0xFC00 @ 0000000000000000gggggg0000000000 orr WK&shi, WK&shi, WK&shi, lsr #5 @ 00000000000-----rrrrr000000bbbbb orr WK&shi, WK&shi, SCRATCH, lsr #5 @ 00000000000-----rrrrrggggggbbbbb pkhbt WK&d, WK&shi, WK&tmp, lsl #16 @ RRRRRGGGGGGBBBBBrrrrrggggggbbbbb .endm .macro src_x888_0565_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload WK4 .req STRIDE_S WK5 .req STRIDE_M WK6 .req WK3 WK7 .req ORIG_W .if numbytes == 16 pixld , 16, 4, SRC, 0 src_x888_0565_2pixels 4, 5, 0, 0 pixld , 8, 4, SRC, 0 src_x888_0565_2pixels 6, 7, 1, 1 pixld , 8, 6, SRC, 0 .else pixld , numbytes*2, 4, SRC, 0 .endif .endm .macro src_x888_0565_process_tail cond, numbytes, firstreg .if numbytes == 16 src_x888_0565_2pixels 4, 5, 2, 2 src_x888_0565_2pixels 6, 7, 3, 4 .elseif numbytes == 8 src_x888_0565_2pixels 4, 5, 1, 1 src_x888_0565_2pixels 6, 7, 2, 2 .elseif numbytes == 4 src_x888_0565_2pixels 4, 5, 1, 1 .else src_x888_0565_1pixel 4, 1 .endif .if numbytes == 16 pixst , numbytes, 0, DST .else pixst , numbytes, 1, DST .endif .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm generate_composite_function \ pixman_composite_src_x888_0565_asm_armv6, 32, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_BRANCH_OVER | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH, \ 3, /* prefetch distance */ \ src_x888_0565_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ src_x888_0565_process_head, \ src_x888_0565_process_tail /******************************************************************************/ .macro add_8_8_8pixels cond, dst1, dst2 uqadd8&cond WK&dst1, WK&dst1, MASK uqadd8&cond WK&dst2, WK&dst2, STRIDE_M .endm .macro add_8_8_4pixels cond, dst uqadd8&cond WK&dst, WK&dst, MASK .endm .macro add_8_8_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload WK4 .req MASK WK5 .req STRIDE_M .if numbytes == 16 pixld cond, 8, 4, SRC, unaligned_src pixld cond, 16, firstreg, DST, 0 add_8_8_8pixels cond, firstreg, %(firstreg+1) pixld cond, 8, 4, SRC, unaligned_src .else pixld cond, numbytes, 4, SRC, unaligned_src pixld cond, numbytes, firstreg, DST, 0 .endif .unreq WK4 .unreq WK5 .endm .macro add_8_8_process_tail cond, numbytes, firstreg .if numbytes == 16 add_8_8_8pixels cond, %(firstreg+2), %(firstreg+3) .elseif numbytes == 8 add_8_8_8pixels cond, firstreg, %(firstreg+1) .else add_8_8_4pixels cond, firstreg .endif .endm generate_composite_function \ pixman_composite_add_8_8_asm_armv6, 8, 0, 8, \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_PRESERVES_SCRATCH, \ 2, /* prefetch distance */ \ nop_macro, /* init */ \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ add_8_8_process_head, \ add_8_8_process_tail /******************************************************************************/ .macro over_8888_8888_init /* Hold loop invariant in MASK */ ldr MASK, =0x00800080 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, MASK, MASK line_saved_regs STRIDE_D, STRIDE_S, ORIG_W .endm .macro over_8888_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload WK4 .req STRIDE_D WK5 .req STRIDE_S WK6 .req STRIDE_M WK7 .req ORIG_W pixld , numbytes, %(4+firstreg), SRC, unaligned_src pixld , numbytes, firstreg, DST, 0 .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm .macro over_8888_8888_check_transparent numbytes, reg0, reg1, reg2, reg3 /* Since these colours a premultiplied by alpha, only 0 indicates transparent (any other colour with 0 in the alpha byte is luminous) */ teq WK&reg0, #0 .if numbytes > 4 teqeq WK&reg1, #0 .if numbytes > 8 teqeq WK&reg2, #0 teqeq WK&reg3, #0 .endif .endif .endm .macro over_8888_8888_prepare next mov WK&next, WK&next, lsr #24 .endm .macro over_8888_8888_1pixel src, dst, offset, next /* src = destination component multiplier */ rsb WK&src, WK&src, #255 /* Split even/odd bytes of dst into SCRATCH/dst */ uxtb16 SCRATCH, WK&dst uxtb16 WK&dst, WK&dst, ror #8 /* Multiply through, adding 0.5 to the upper byte of result for rounding */ mla SCRATCH, SCRATCH, WK&src, MASK mla WK&dst, WK&dst, WK&src, MASK /* Where we would have had a stall between the result of the first MLA and the shifter input, * reload the complete source pixel */ ldr WK&src, [SRC, #offset] /* Multiply by 257/256 to approximate 256/255 */ uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8 /* In this stall, start processing the next pixel */ .if offset < -4 mov WK&next, WK&next, lsr #24 .endif uxtab16 WK&dst, WK&dst, WK&dst, ror #8 /* Recombine even/odd bytes of multiplied destination */ mov SCRATCH, SCRATCH, ror #8 sel WK&dst, SCRATCH, WK&dst /* Saturated add of source to multiplied destination */ uqadd8 WK&dst, WK&dst, WK&src .endm .macro over_8888_8888_process_tail cond, numbytes, firstreg WK4 .req STRIDE_D WK5 .req STRIDE_S WK6 .req STRIDE_M WK7 .req ORIG_W over_8888_8888_check_transparent numbytes, %(4+firstreg), %(5+firstreg), %(6+firstreg), %(7+firstreg) beq 10f over_8888_8888_prepare %(4+firstreg) .set PROCESS_REG, firstreg .set PROCESS_OFF, -numbytes .rept numbytes / 4 over_8888_8888_1pixel %(4+PROCESS_REG), %(0+PROCESS_REG), PROCESS_OFF, %(5+PROCESS_REG) .set PROCESS_REG, PROCESS_REG+1 .set PROCESS_OFF, PROCESS_OFF+4 .endr pixst , numbytes, firstreg, DST 10: .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm generate_composite_function \ pixman_composite_over_8888_8888_asm_armv6, 32, 0, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS \ 2, /* prefetch distance */ \ over_8888_8888_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ over_8888_8888_process_head, \ over_8888_8888_process_tail /******************************************************************************/ /* Multiply each byte of a word by a byte. * Useful when there aren't any obvious ways to fill the stalls with other instructions. * word Register containing 4 bytes * byte Register containing byte multiplier (bits 8-31 must be 0) * tmp Scratch register * half Register containing the constant 0x00800080 * GE[3:0] bits must contain 0101 */ .macro mul_8888_8 word, byte, tmp, half /* Split even/odd bytes of word apart */ uxtb16 tmp, word uxtb16 word, word, ror #8 /* Multiply bytes together with rounding, then by 257/256 */ mla tmp, tmp, byte, half mla word, word, byte, half /* 1 stall follows */ uxtab16 tmp, tmp, tmp, ror #8 /* 1 stall follows */ uxtab16 word, word, word, ror #8 /* Recombine bytes */ mov tmp, tmp, ror #8 sel word, tmp, word .endm /******************************************************************************/ .macro over_8888_n_8888_init /* Mask is constant */ ldr MASK, [sp, #ARGS_STACK_OFFSET+8] /* Hold loop invariant in STRIDE_M */ ldr STRIDE_M, =0x00800080 /* We only want the alpha bits of the constant mask */ mov MASK, MASK, lsr #24 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, STRIDE_M, STRIDE_M line_saved_regs Y, STRIDE_D, STRIDE_S, ORIG_W .endm .macro over_8888_n_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload WK4 .req Y WK5 .req STRIDE_D WK6 .req STRIDE_S WK7 .req ORIG_W pixld , numbytes, %(4+(firstreg%2)), SRC, unaligned_src pixld , numbytes, firstreg, DST, 0 .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm .macro over_8888_n_8888_1pixel src, dst mul_8888_8 WK&src, MASK, SCRATCH, STRIDE_M sub WK7, WK6, WK&src, lsr #24 mul_8888_8 WK&dst, WK7, SCRATCH, STRIDE_M uqadd8 WK&dst, WK&dst, WK&src .endm .macro over_8888_n_8888_process_tail cond, numbytes, firstreg WK4 .req Y WK5 .req STRIDE_D WK6 .req STRIDE_S WK7 .req ORIG_W over_8888_8888_check_transparent numbytes, %(4+(firstreg%2)), %(5+(firstreg%2)), %(6+firstreg), %(7+firstreg) beq 10f mov WK6, #255 .set PROCESS_REG, firstreg .rept numbytes / 4 .if numbytes == 16 && PROCESS_REG == 2 /* We're using WK6 and WK7 as temporaries, so half way through * 4 pixels, reload the second two source pixels but this time * into WK4 and WK5 */ ldmdb SRC, {WK4, WK5} .endif over_8888_n_8888_1pixel %(4+(PROCESS_REG%2)), %(PROCESS_REG) .set PROCESS_REG, PROCESS_REG+1 .endr pixst , numbytes, firstreg, DST 10: .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm generate_composite_function \ pixman_composite_over_8888_n_8888_asm_armv6, 32, 0, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS \ 2, /* prefetch distance */ \ over_8888_n_8888_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ over_8888_n_8888_process_head, \ over_8888_n_8888_process_tail /******************************************************************************/ .macro over_n_8_8888_init /* Source is constant, but splitting it into even/odd bytes is a loop invariant */ ldr SRC, [sp, #ARGS_STACK_OFFSET] /* Not enough registers to hold this constant, but we still use it here to set GE[3:0] */ ldr SCRATCH, =0x00800080 uxtb16 STRIDE_S, SRC uxtb16 SRC, SRC, ror #8 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, SCRATCH, SCRATCH line_saved_regs Y, STRIDE_D, STRIDE_M, ORIG_W .endm .macro over_n_8_8888_newline ldr STRIDE_D, =0x00800080 b 1f .ltorg 1: .endm .macro over_n_8_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload WK4 .req STRIDE_M pixld , numbytes/4, 4, MASK, unaligned_mask pixld , numbytes, firstreg, DST, 0 .unreq WK4 .endm .macro over_n_8_8888_1pixel src, dst uxtb Y, WK4, ror #src*8 /* Trailing part of multiplication of source */ mla SCRATCH, STRIDE_S, Y, STRIDE_D mla Y, SRC, Y, STRIDE_D mov ORIG_W, #255 uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8 uxtab16 Y, Y, Y, ror #8 mov SCRATCH, SCRATCH, ror #8 sub ORIG_W, ORIG_W, Y, lsr #24 sel Y, SCRATCH, Y /* Then multiply the destination */ mul_8888_8 WK&dst, ORIG_W, SCRATCH, STRIDE_D uqadd8 WK&dst, WK&dst, Y .endm .macro over_n_8_8888_process_tail cond, numbytes, firstreg WK4 .req STRIDE_M teq WK4, #0 beq 10f .set PROCESS_REG, firstreg .rept numbytes / 4 over_n_8_8888_1pixel %(PROCESS_REG-firstreg), %(PROCESS_REG) .set PROCESS_REG, PROCESS_REG+1 .endr pixst , numbytes, firstreg, DST 10: .unreq WK4 .endm generate_composite_function \ pixman_composite_over_n_8_8888_asm_armv6, 0, 8, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS \ 2, /* prefetch distance */ \ over_n_8_8888_init, \ over_n_8_8888_newline, \ nop_macro, /* cleanup */ \ over_n_8_8888_process_head, \ over_n_8_8888_process_tail /******************************************************************************/ .macro over_reverse_n_8888_init ldr SRC, [sp, #ARGS_STACK_OFFSET] ldr MASK, =0x00800080 /* Split source pixel into RB/AG parts */ uxtb16 STRIDE_S, SRC uxtb16 STRIDE_M, SRC, ror #8 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, MASK, MASK line_saved_regs STRIDE_D, ORIG_W .endm .macro over_reverse_n_8888_newline mov STRIDE_D, #0xFF .endm .macro over_reverse_n_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload pixld , numbytes, firstreg, DST, 0 .endm .macro over_reverse_n_8888_1pixel d, is_only teq WK&d, #0 beq 8f /* replace with source */ bics ORIG_W, STRIDE_D, WK&d, lsr #24 .if is_only == 1 beq 49f /* skip store */ .else beq 9f /* write same value back */ .endif mla SCRATCH, STRIDE_S, ORIG_W, MASK /* red/blue */ mla ORIG_W, STRIDE_M, ORIG_W, MASK /* alpha/green */ uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8 uxtab16 ORIG_W, ORIG_W, ORIG_W, ror #8 mov SCRATCH, SCRATCH, ror #8 sel ORIG_W, SCRATCH, ORIG_W uqadd8 WK&d, WK&d, ORIG_W b 9f 8: mov WK&d, SRC 9: .endm .macro over_reverse_n_8888_tail numbytes, reg1, reg2, reg3, reg4 .if numbytes == 4 over_reverse_n_8888_1pixel reg1, 1 .else and SCRATCH, WK&reg1, WK&reg2 .if numbytes == 16 and SCRATCH, SCRATCH, WK&reg3 and SCRATCH, SCRATCH, WK&reg4 .endif mvns SCRATCH, SCRATCH, asr #24 beq 49f /* skip store if all opaque */ over_reverse_n_8888_1pixel reg1, 0 over_reverse_n_8888_1pixel reg2, 0 .if numbytes == 16 over_reverse_n_8888_1pixel reg3, 0 over_reverse_n_8888_1pixel reg4, 0 .endif .endif pixst , numbytes, reg1, DST 49: .endm .macro over_reverse_n_8888_process_tail cond, numbytes, firstreg over_reverse_n_8888_tail numbytes, firstreg, %(firstreg+1), %(firstreg+2), %(firstreg+3) .endm generate_composite_function \ pixman_composite_over_reverse_n_8888_asm_armv6, 0, 0, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH, \ 3, /* prefetch distance */ \ over_reverse_n_8888_init, \ over_reverse_n_8888_newline, \ nop_macro, /* cleanup */ \ over_reverse_n_8888_process_head, \ over_reverse_n_8888_process_tail /******************************************************************************/ .macro over_white_8888_8888_ca_init HALF .req SRC TMP0 .req STRIDE_D TMP1 .req STRIDE_S TMP2 .req STRIDE_M TMP3 .req ORIG_W WK4 .req SCRATCH line_saved_regs STRIDE_D, STRIDE_M, ORIG_W ldr SCRATCH, =0x800080 mov HALF, #0x80 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, SCRATCH, SCRATCH .set DST_PRELOAD_BIAS, 8 .endm .macro over_white_8888_8888_ca_cleanup .set DST_PRELOAD_BIAS, 0 .unreq HALF .unreq TMP0 .unreq TMP1 .unreq TMP2 .unreq TMP3 .unreq WK4 .endm .macro over_white_8888_8888_ca_combine m, d uxtb16 TMP1, TMP0 /* rb_notmask */ uxtb16 TMP2, d /* rb_dest; 1 stall follows */ smlatt TMP3, TMP2, TMP1, HALF /* red */ smlabb TMP2, TMP2, TMP1, HALF /* blue */ uxtb16 TMP0, TMP0, ror #8 /* ag_notmask */ uxtb16 TMP1, d, ror #8 /* ag_dest; 1 stall follows */ smlatt d, TMP1, TMP0, HALF /* alpha */ smlabb TMP1, TMP1, TMP0, HALF /* green */ pkhbt TMP0, TMP2, TMP3, lsl #16 /* rb; 1 stall follows */ pkhbt TMP1, TMP1, d, lsl #16 /* ag */ uxtab16 TMP0, TMP0, TMP0, ror #8 uxtab16 TMP1, TMP1, TMP1, ror #8 mov TMP0, TMP0, ror #8 sel d, TMP0, TMP1 uqadd8 d, d, m /* d is a late result */ .endm .macro over_white_8888_8888_ca_1pixel_head pixld , 4, 1, MASK, 0 pixld , 4, 3, DST, 0 .endm .macro over_white_8888_8888_ca_1pixel_tail mvn TMP0, WK1 teq WK1, WK1, asr #32 bne 01f bcc 03f mov WK3, WK1 b 02f 01: over_white_8888_8888_ca_combine WK1, WK3 02: pixst , 4, 3, DST 03: .endm .macro over_white_8888_8888_ca_2pixels_head pixld , 8, 1, MASK, 0 .endm .macro over_white_8888_8888_ca_2pixels_tail pixld , 8, 3, DST mvn TMP0, WK1 teq WK1, WK1, asr #32 bne 01f movcs WK3, WK1 bcs 02f teq WK2, #0 beq 05f b 02f 01: over_white_8888_8888_ca_combine WK1, WK3 02: mvn TMP0, WK2 teq WK2, WK2, asr #32 bne 03f movcs WK4, WK2 b 04f 03: over_white_8888_8888_ca_combine WK2, WK4 04: pixst , 8, 3, DST 05: .endm .macro over_white_8888_8888_ca_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload .if numbytes == 4 over_white_8888_8888_ca_1pixel_head .else .if numbytes == 16 over_white_8888_8888_ca_2pixels_head over_white_8888_8888_ca_2pixels_tail .endif over_white_8888_8888_ca_2pixels_head .endif .endm .macro over_white_8888_8888_ca_process_tail cond, numbytes, firstreg .if numbytes == 4 over_white_8888_8888_ca_1pixel_tail .else over_white_8888_8888_ca_2pixels_tail .endif .endm generate_composite_function \ pixman_composite_over_white_8888_8888_ca_asm_armv6, 0, 32, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH \ 2, /* prefetch distance */ \ over_white_8888_8888_ca_init, \ nop_macro, /* newline */ \ over_white_8888_8888_ca_cleanup, \ over_white_8888_8888_ca_process_head, \ over_white_8888_8888_ca_process_tail .macro over_n_8888_8888_ca_init /* Set up constants. RB_SRC and AG_SRC are in registers; * RB_FLDS, A_SRC, and the two HALF values need to go on the * stack (and the ful SRC value is already there) */ ldr SCRATCH, [sp, #ARGS_STACK_OFFSET] mov WK0, #0x00FF0000 orr WK0, WK0, #0xFF /* RB_FLDS (0x00FF00FF) */ mov WK1, #0x80 /* HALF default value */ mov WK2, SCRATCH, lsr #24 /* A_SRC */ orr WK3, WK1, WK1, lsl #16 /* HALF alternate value (0x00800080) */ push {WK0-WK3} .set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET+16 uxtb16 SRC, SCRATCH uxtb16 STRIDE_S, SCRATCH, ror #8 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, WK3, WK3 .unreq WK0 .unreq WK1 .unreq WK2 .unreq WK3 WK0 .req Y WK1 .req STRIDE_D RB_SRC .req SRC AG_SRC .req STRIDE_S WK2 .req STRIDE_M RB_FLDS .req r8 /* the reloaded constants have to be at consecutive registers starting at an even one */ A_SRC .req r8 HALF .req r9 WK3 .req r10 WK4 .req r11 WK5 .req SCRATCH WK6 .req ORIG_W line_saved_regs Y, STRIDE_D, STRIDE_M, ORIG_W .endm .macro over_n_8888_8888_ca_cleanup add sp, sp, #16 .set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET-16 .unreq WK0 .unreq WK1 .unreq RB_SRC .unreq AG_SRC .unreq WK2 .unreq RB_FLDS .unreq A_SRC .unreq HALF .unreq WK3 .unreq WK4 .unreq WK5 .unreq WK6 WK0 .req r8 WK1 .req r9 WK2 .req r10 WK3 .req r11 .endm .macro over_n_8888_8888_ca_1pixel_head pixld , 4, 6, MASK, 0 pixld , 4, 0, DST, 0 .endm .macro over_n_8888_8888_ca_1pixel_tail ldrd A_SRC, HALF, [sp, #LOCALS_STACK_OFFSET+8] uxtb16 WK1, WK6 /* rb_mask (first step of hard case placed in what would otherwise be a stall) */ teq WK6, WK6, asr #32 /* Zc if transparent, ZC if opaque */ bne 20f bcc 40f /* Mask is fully opaque (all channels) */ ldr WK6, [sp, #ARGS_STACK_OFFSET] /* get SRC back */ eors A_SRC, A_SRC, #0xFF bne 10f /* Source is also opaque - same as src_8888_8888 */ mov WK0, WK6 b 30f 10: /* Same as over_8888_8888 */ mul_8888_8 WK0, A_SRC, WK5, HALF uqadd8 WK0, WK0, WK6 b 30f 20: /* No simplifications possible - do it the hard way */ uxtb16 WK2, WK6, ror #8 /* ag_mask */ mla WK3, WK1, A_SRC, HALF /* rb_mul; 2 cycles */ mla WK4, WK2, A_SRC, HALF /* ag_mul; 2 cycles */ ldrd RB_FLDS, HALF, [sp, #LOCALS_STACK_OFFSET] uxtb16 WK5, WK0 /* rb_dest */ uxtab16 WK3, WK3, WK3, ror #8 uxtb16 WK6, WK0, ror #8 /* ag_dest */ uxtab16 WK4, WK4, WK4, ror #8 smlatt WK0, RB_SRC, WK1, HALF /* red1 */ smlabb WK1, RB_SRC, WK1, HALF /* blue1 */ bic WK3, RB_FLDS, WK3, lsr #8 bic WK4, RB_FLDS, WK4, lsr #8 pkhbt WK1, WK1, WK0, lsl #16 /* rb1 */ smlatt WK0, WK5, WK3, HALF /* red2 */ smlabb WK3, WK5, WK3, HALF /* blue2 */ uxtab16 WK1, WK1, WK1, ror #8 smlatt WK5, AG_SRC, WK2, HALF /* alpha1 */ pkhbt WK3, WK3, WK0, lsl #16 /* rb2 */ smlabb WK0, AG_SRC, WK2, HALF /* green1 */ smlatt WK2, WK6, WK4, HALF /* alpha2 */ smlabb WK4, WK6, WK4, HALF /* green2 */ pkhbt WK0, WK0, WK5, lsl #16 /* ag1 */ uxtab16 WK3, WK3, WK3, ror #8 pkhbt WK4, WK4, WK2, lsl #16 /* ag2 */ uxtab16 WK0, WK0, WK0, ror #8 uxtab16 WK4, WK4, WK4, ror #8 mov WK1, WK1, ror #8 mov WK3, WK3, ror #8 sel WK2, WK1, WK0 /* recombine source*mask */ sel WK1, WK3, WK4 /* recombine dest*(1-source_alpha*mask) */ uqadd8 WK0, WK1, WK2 /* followed by 1 stall */ 30: /* The destination buffer is already in the L1 cache, so * there's little point in amalgamating writes */ pixst , 4, 0, DST 40: .endm .macro over_n_8888_8888_ca_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload .rept (numbytes / 4) - 1 over_n_8888_8888_ca_1pixel_head over_n_8888_8888_ca_1pixel_tail .endr over_n_8888_8888_ca_1pixel_head .endm .macro over_n_8888_8888_ca_process_tail cond, numbytes, firstreg over_n_8888_8888_ca_1pixel_tail .endm pixman_asm_function pixman_composite_over_n_8888_8888_ca_asm_armv6 ldr ip, [sp] cmp ip, #-1 beq pixman_composite_over_white_8888_8888_ca_asm_armv6 /* else drop through... */ .endfunc generate_composite_function \ pixman_composite_over_n_8888_8888_ca_asm_armv6_helper, 0, 32, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH | FLAG_PROCESS_CORRUPTS_WK0 \ 2, /* prefetch distance */ \ over_n_8888_8888_ca_init, \ nop_macro, /* newline */ \ over_n_8888_8888_ca_cleanup, \ over_n_8888_8888_ca_process_head, \ over_n_8888_8888_ca_process_tail /******************************************************************************/ .macro in_reverse_8888_8888_init /* Hold loop invariant in MASK */ ldr MASK, =0x00800080 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, MASK, MASK /* Offset the source pointer: we only need the alpha bytes */ add SRC, SRC, #3 line_saved_regs ORIG_W .endm .macro in_reverse_8888_8888_head numbytes, reg1, reg2, reg3 ldrb ORIG_W, [SRC], #4 .if numbytes >= 8 ldrb WK&reg1, [SRC], #4 .if numbytes == 16 ldrb WK&reg2, [SRC], #4 ldrb WK&reg3, [SRC], #4 .endif .endif add DST, DST, #numbytes .endm .macro in_reverse_8888_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload in_reverse_8888_8888_head numbytes, firstreg, %(firstreg+1), %(firstreg+2) .endm .macro in_reverse_8888_8888_1pixel s, d, offset, is_only .if is_only != 1 movs s, ORIG_W .if offset != 0 ldrb ORIG_W, [SRC, #offset] .endif beq 01f teq STRIDE_M, #0xFF beq 02f .endif uxtb16 SCRATCH, d /* rb_dest */ uxtb16 d, d, ror #8 /* ag_dest */ mla SCRATCH, SCRATCH, s, MASK mla d, d, s, MASK uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8 uxtab16 d, d, d, ror #8 mov SCRATCH, SCRATCH, ror #8 sel d, SCRATCH, d b 02f .if offset == 0 48: /* Last mov d,#0 of the set - used as part of shortcut for * source values all 0 */ .endif 01: mov d, #0 02: .endm .macro in_reverse_8888_8888_tail numbytes, reg1, reg2, reg3, reg4 .if numbytes == 4 teq ORIG_W, ORIG_W, asr #32 ldrne WK&reg1, [DST, #-4] .elseif numbytes == 8 teq ORIG_W, WK&reg1 teqeq ORIG_W, ORIG_W, asr #32 /* all 0 or all -1? */ ldmnedb DST, {WK&reg1-WK&reg2} .else teq ORIG_W, WK&reg1 teqeq ORIG_W, WK&reg2 teqeq ORIG_W, WK&reg3 teqeq ORIG_W, ORIG_W, asr #32 /* all 0 or all -1? */ ldmnedb DST, {WK&reg1-WK&reg4} .endif cmnne DST, #0 /* clear C if NE */ bcs 49f /* no writes to dest if source all -1 */ beq 48f /* set dest to all 0 if source all 0 */ .if numbytes == 4 in_reverse_8888_8888_1pixel ORIG_W, WK&reg1, 0, 1 str WK&reg1, [DST, #-4] .elseif numbytes == 8 in_reverse_8888_8888_1pixel STRIDE_M, WK&reg1, -4, 0 in_reverse_8888_8888_1pixel STRIDE_M, WK&reg2, 0, 0 stmdb DST, {WK&reg1-WK&reg2} .else in_reverse_8888_8888_1pixel STRIDE_M, WK&reg1, -12, 0 in_reverse_8888_8888_1pixel STRIDE_M, WK&reg2, -8, 0 in_reverse_8888_8888_1pixel STRIDE_M, WK&reg3, -4, 0 in_reverse_8888_8888_1pixel STRIDE_M, WK&reg4, 0, 0 stmdb DST, {WK&reg1-WK&reg4} .endif 49: .endm .macro in_reverse_8888_8888_process_tail cond, numbytes, firstreg in_reverse_8888_8888_tail numbytes, firstreg, %(firstreg+1), %(firstreg+2), %(firstreg+3) .endm generate_composite_function \ pixman_composite_in_reverse_8888_8888_asm_armv6, 32, 0, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH | FLAG_NO_PRELOAD_DST \ 2, /* prefetch distance */ \ in_reverse_8888_8888_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ in_reverse_8888_8888_process_head, \ in_reverse_8888_8888_process_tail /******************************************************************************/ .macro over_n_8888_init ldr SRC, [sp, #ARGS_STACK_OFFSET] /* Hold loop invariant in MASK */ ldr MASK, =0x00800080 /* Hold multiplier for destination in STRIDE_M */ mov STRIDE_M, #255 sub STRIDE_M, STRIDE_M, SRC, lsr #24 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, MASK, MASK .endm .macro over_n_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload pixld , numbytes, firstreg, DST, 0 .endm .macro over_n_8888_1pixel dst mul_8888_8 WK&dst, STRIDE_M, SCRATCH, MASK uqadd8 WK&dst, WK&dst, SRC .endm .macro over_n_8888_process_tail cond, numbytes, firstreg .set PROCESS_REG, firstreg .rept numbytes / 4 over_n_8888_1pixel %(PROCESS_REG) .set PROCESS_REG, PROCESS_REG+1 .endr pixst , numbytes, firstreg, DST .endm generate_composite_function \ pixman_composite_over_n_8888_asm_armv6, 0, 0, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_DOES_STORE \ 2, /* prefetch distance */ \ over_n_8888_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ over_n_8888_process_head, \ over_n_8888_process_tail /******************************************************************************/
ElSargo/wezpy
139,688
wezterm-src/deps/cairo/pixman/pixman/pixman-arma64-neon-asm.S
/* * Copyright © 2009 Nokia Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) */ /* * This file contains implementations of NEON optimized pixel processing * functions. There is no full and detailed tutorial, but some functions * (those which are exposing some new or interesting features) are * extensively commented and can be used as examples. * * You may want to have a look at the comments for following functions: * - pixman_composite_over_8888_0565_asm_neon * - pixman_composite_over_n_8_0565_asm_neon */ /* Prevent the stack from becoming executable for no reason... */ #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .arch armv8-a .altmacro .p2align 2 #include "pixman-private.h" #include "pixman-arm-asm.h" #include "pixman-arma64-neon-asm.h" /* Global configuration options and preferences */ /* * The code can optionally make use of unaligned memory accesses to improve * performance of handling leading/trailing pixels for each scanline. * Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for * example in linux if unaligned memory accesses are not configured to * generate.exceptions. */ .set RESPECT_STRICT_ALIGNMENT, 1 /* * Set default prefetch type. There is a choice between the following options: * * PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work * as NOP to workaround some HW bugs or for whatever other reason) * * PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where * advanced prefetch intruduces heavy overhead) * * PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8 * which can run ARM and NEON instructions simultaneously so that extra ARM * instructions do not add (many) extra cycles, but improve prefetch efficiency) * * Note: some types of function can't support advanced prefetch and fallback * to simple one (those which handle 24bpp pixels) */ .set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED /* Prefetch distance in pixels for simple prefetch */ .set PREFETCH_DISTANCE_SIMPLE, 64 /* * Implementation of pixman_composite_over_8888_0565_asm_neon * * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and * performs OVER compositing operation. Function fast_composite_over_8888_0565 * from pixman-fast-path.c does the same in C and can be used as a reference. * * First we need to have some NEON assembly code which can do the actual * operation on the pixels and provide it to the template macro. * * Template macro quite conveniently takes care of emitting all the necessary * code for memory reading and writing (including quite tricky cases of * handling unaligned leading/trailing pixels), so we only need to deal with * the data in NEON registers. * * NEON registers allocation in general is recommented to be the following: * v0, v1, v2, v3 - contain loaded source pixel data * v4, v5, v6, v7 - contain loaded destination pixels (if they are needed) * v24, v25, v26, v27 - contain loading mask pixel data (if mask is used) * v28, v29, v30, v31 - place for storing the result (destination pixels) * * As can be seen above, four 64-bit NEON registers are used for keeping * intermediate pixel data and up to 8 pixels can be processed in one step * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp). * * This particular function uses the following registers allocation: * v0, v1, v2, v3 - contain loaded source pixel data * v4, v5 - contain loaded destination pixels (they are needed) * v28, v29 - place for storing the result (destination pixels) */ /* * Step one. We need to have some code to do some arithmetics on pixel data. * This is implemented as a pair of macros: '*_head' and '*_tail'. When used * back-to-back, they take pixel data from {v0, v1, v2, v3} and {v4, v5}, * perform all the needed calculations and write the result to {v28, v29}. * The rationale for having two macros and not just one will be explained * later. In practice, any single monolitic function which does the work can * be split into two parts in any arbitrary way without affecting correctness. * * There is one special trick here too. Common template macro can optionally * make our life a bit easier by doing R, G, B, A color components * deinterleaving for 32bpp pixel formats (and this feature is used in * 'pixman_composite_over_8888_0565_asm_neon' function). So it means that * instead of having 8 packed pixels in {v0, v1, v2, v3} registers, we * actually use v0 register for blue channel (a vector of eight 8-bit * values), v1 register for green, v2 for red and v3 for alpha. This * simple conversion can be also done with a few NEON instructions: * * Packed to planar conversion: // vuzp8 is a wrapper macro * vuzp8 v0, v1 * vuzp8 v2, v3 * vuzp8 v1, v3 * vuzp8 v0, v2 * * Planar to packed conversion: // vzip8 is a wrapper macro * vzip8 v0, v2 * vzip8 v1, v3 * vzip8 v2, v3 * vzip8 v0, v1 * * But pixel can be loaded directly in planar format using LD4 / b NEON * instruction. It is 1 cycle slower than LD1 / s, so this is not always * desirable, that's why deinterleaving is optional. * * But anyway, here is the code: */ .macro pixman_composite_over_8888_0565_process_pixblock_head /* convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format and put data into v6 - red, v7 - green, v30 - blue */ mov v4.d[1], v5.d[0] shrn v6.8b, v4.8h, #8 shrn v7.8b, v4.8h, #3 sli v4.8h, v4.8h, #5 sri v6.8b, v6.8b, #5 mvn v3.8b, v3.8b /* invert source alpha */ sri v7.8b, v7.8b, #6 shrn v30.8b, v4.8h, #2 /* now do alpha blending, storing results in 8-bit planar format into v20 - red, v23 - green, v22 - blue */ umull v10.8h, v3.8b, v6.8b umull v11.8h, v3.8b, v7.8b umull v12.8h, v3.8b, v30.8b urshr v17.8h, v10.8h, #8 urshr v18.8h, v11.8h, #8 urshr v19.8h, v12.8h, #8 raddhn v20.8b, v10.8h, v17.8h raddhn v23.8b, v11.8h, v18.8h raddhn v22.8b, v12.8h, v19.8h .endm .macro pixman_composite_over_8888_0565_process_pixblock_tail /* ... continue alpha blending */ uqadd v17.8b, v2.8b, v20.8b uqadd v18.8b, v0.8b, v22.8b uqadd v19.8b, v1.8b, v23.8b /* convert the result to r5g6b5 and store it into {v14} */ ushll v14.8h, v17.8b, #7 sli v14.8h, v14.8h, #1 ushll v8.8h, v19.8b, #7 sli v8.8h, v8.8h, #1 ushll v9.8h, v18.8b, #7 sli v9.8h, v9.8h, #1 sri v14.8h, v8.8h, #5 sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm /* * OK, now we got almost everything that we need. Using the above two * macros, the work can be done right. But now we want to optimize * it a bit. ARM Cortex-A8 is an in-order core, and benefits really * a lot from good code scheduling and software pipelining. * * Let's construct some code, which will run in the core main loop. * Some pseudo-code of the main loop will look like this: * head * while (...) { * tail * head * } * tail * * It may look a bit weird, but this setup allows to hide instruction * latencies better and also utilize dual-issue capability more * efficiently (make pairs of load-store and ALU instructions). * * So what we need now is a '*_tail_head' macro, which will be used * in the core main loop. A trivial straightforward implementation * of this macro would look like this: * * pixman_composite_over_8888_0565_process_pixblock_tail * st1 {v28.4h, v29.4h}, [DST_W], #32 * ld1 {v4.4h, v5.4h}, [DST_R], #16 * ld4 {v0.2s, v1.2s, v2.2s, v3.2s}, [SRC], #32 * pixman_composite_over_8888_0565_process_pixblock_head * cache_preload 8, 8 * * Now it also got some VLD/VST instructions. We simply can't move from * processing one block of pixels to the other one with just arithmetics. * The previously processed data needs to be written to memory and new * data needs to be fetched. Fortunately, this main loop does not deal * with partial leading/trailing pixels and can load/store a full block * of pixels in a bulk. Additionally, destination buffer is already * 16 bytes aligned here (which is good for performance). * * New things here are DST_R, DST_W, SRC and MASK identifiers. These * are the aliases for ARM registers which are used as pointers for * accessing data. We maintain separate pointers for reading and writing * destination buffer (DST_R and DST_W). * * Another new thing is 'cache_preload' macro. It is used for prefetching * data into CPU L2 cache and improve performance when dealing with large * images which are far larger than cache size. It uses one argument * (actually two, but they need to be the same here) - number of pixels * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some * details about this macro. Moreover, if good performance is needed * the code from this macro needs to be copied into '*_tail_head' macro * and mixed with the rest of code for optimal instructions scheduling. * We are actually doing it below. * * Now after all the explanations, here is the optimized code. * Different instruction streams (originaling from '*_head', '*_tail' * and 'cache_preload' macro) use different indentation levels for * better readability. Actually taking the code from one of these * indentation levels and ignoring a few LD/ST instructions would * result in exactly the code from '*_head', '*_tail' or 'cache_preload' * macro! */ #if 1 .macro pixman_composite_over_8888_0565_process_pixblock_tail_head uqadd v17.8b, v2.8b, v20.8b ld1 {v4.4h, v5.4h}, [DST_R], #16 mov v4.d[1], v5.d[0] uqadd v18.8b, v0.8b, v22.8b uqadd v19.8b, v1.8b, v23.8b shrn v6.8b, v4.8h, #8 fetch_src_pixblock shrn v7.8b, v4.8h, #3 sli v4.8h, v4.8h, #5 ushll v14.8h, v17.8b, #7 sli v14.8h, v14.8h, #1 PF add PF_X, PF_X, #8 ushll v8.8h, v19.8b, #7 sli v8.8h, v8.8h, #1 PF tst PF_CTL, #0xF sri v6.8b, v6.8b, #5 PF beq 10f PF add PF_X, PF_X, #8 10: mvn v3.8b, v3.8b PF beq 10f PF sub PF_CTL, PF_CTL, #1 10: sri v7.8b, v7.8b, #6 shrn v30.8b, v4.8h, #2 umull v10.8h, v3.8b, v6.8b PF lsl DUMMY, PF_X, #src_bpp_shift PF prfm PREFETCH_MODE, [PF_SRC, DUMMY] umull v11.8h, v3.8b, v7.8b umull v12.8h, v3.8b, v30.8b PF lsl DUMMY, PF_X, #dst_bpp_shift PF prfm PREFETCH_MODE, [PF_DST, DUMMY] sri v14.8h, v8.8h, #5 PF cmp PF_X, ORIG_W ushll v9.8h, v18.8b, #7 sli v9.8h, v9.8h, #1 urshr v17.8h, v10.8h, #8 PF ble 10f PF sub PF_X, PF_X, ORIG_W 10: urshr v19.8h, v11.8h, #8 urshr v18.8h, v12.8h, #8 PF ble 10f PF subs PF_CTL, PF_CTL, #0x10 10: sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] PF ble 10f PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb DUMMY, [PF_SRC, DUMMY] PF add PF_SRC, PF_SRC, #1 10: raddhn v20.8b, v10.8h, v17.8h raddhn v23.8b, v11.8h, v19.8h PF ble 10f PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb DUMMY, [PF_DST, DUMMY] PF add PF_DST, PF_SRC, #1 10: raddhn v22.8b, v12.8h, v18.8h st1 {v14.8h}, [DST_W], #16 .endm #else /* If we did not care much about the performance, we would just use this... */ .macro pixman_composite_over_8888_0565_process_pixblock_tail_head pixman_composite_over_8888_0565_process_pixblock_tail st1 {v14.8h}, [DST_W], #16 ld1 {v4.4h, v4.5h}, [DST_R], #16 fetch_src_pixblock pixman_composite_over_8888_0565_process_pixblock_head cache_preload 8, 8 .endm #endif /* * And now the final part. We are using 'generate_composite_function' macro * to put all the stuff together. We are specifying the name of the function * which we want to get, number of bits per pixel for the source, mask and * destination (0 if unused, like mask in this case). Next come some bit * flags: * FLAG_DST_READWRITE - tells that the destination buffer is both read * and written, for write-only buffer we would use * FLAG_DST_WRITEONLY flag instead * FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data * and separate color channels for 32bpp format. * The next things are: * - the number of pixels processed per iteration (8 in this case, because * that's the maximum what can fit into four 64-bit NEON registers). * - prefetch distance, measured in pixel blocks. In this case it is 5 times * by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal * prefetch distance can be selected by running some benchmarks. * * After that we specify some macros, these are 'default_init', * 'default_cleanup' here which are empty (but it is possible to have custom * init/cleanup macros to be able to save/restore some extra NEON registers * like d8-d15 or do anything else) followed by * 'pixman_composite_over_8888_0565_process_pixblock_head', * 'pixman_composite_over_8888_0565_process_pixblock_tail' and * 'pixman_composite_over_8888_0565_process_pixblock_tail_head' * which we got implemented above. * * The last part is the NEON registers allocation scheme. */ generate_composite_function \ pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_0565_process_pixblock_head, \ pixman_composite_over_8888_0565_process_pixblock_tail, \ pixman_composite_over_8888_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_n_0565_process_pixblock_head /* convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format and put data into v6 - red, v7 - green, v30 - blue */ mov v4.d[1], v5.d[0] shrn v6.8b, v4.8h, #8 shrn v7.8b, v4.8h, #3 sli v4.8h, v4.8h, #5 sri v6.8b, v6.8b, #5 sri v7.8b, v7.8b, #6 shrn v30.8b, v4.8h, #2 /* now do alpha blending, storing results in 8-bit planar format into v20 - red, v23 - green, v22 - blue */ umull v10.8h, v3.8b, v6.8b umull v11.8h, v3.8b, v7.8b umull v12.8h, v3.8b, v30.8b urshr v13.8h, v10.8h, #8 urshr v14.8h, v11.8h, #8 urshr v15.8h, v12.8h, #8 raddhn v20.8b, v10.8h, v13.8h raddhn v23.8b, v11.8h, v14.8h raddhn v22.8b, v12.8h, v15.8h .endm .macro pixman_composite_over_n_0565_process_pixblock_tail /* ... continue alpha blending */ uqadd v17.8b, v2.8b, v20.8b uqadd v18.8b, v0.8b, v22.8b uqadd v19.8b, v1.8b, v23.8b /* convert the result to r5g6b5 and store it into {v14} */ ushll v14.8h, v17.8b, #7 sli v14.8h, v14.8h, #1 ushll v8.8h, v19.8b, #7 sli v8.8h, v8.8h, #1 ushll v9.8h, v18.8b, #7 sli v9.8h, v9.8h, #1 sri v14.8h, v8.8h, #5 sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_n_0565_process_pixblock_tail_head pixman_composite_over_n_0565_process_pixblock_tail ld1 {v4.4h, v5.4h}, [DST_R], #16 st1 {v14.8h}, [DST_W], #16 pixman_composite_over_n_0565_process_pixblock_head cache_preload 8, 8 .endm .macro pixman_composite_over_n_0565_init mov v3.s[0], w4 dup v0.8b, v3.b[0] dup v1.8b, v3.b[1] dup v2.8b, v3.b[2] dup v3.8b, v3.b[3] mvn v3.8b, v3.8b /* invert source alpha */ .endm generate_composite_function \ pixman_composite_over_n_0565_asm_neon, 0, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_0565_init, \ default_cleanup, \ pixman_composite_over_n_0565_process_pixblock_head, \ pixman_composite_over_n_0565_process_pixblock_tail, \ pixman_composite_over_n_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_8888_0565_process_pixblock_head ushll v8.8h, v1.8b, #7 sli v8.8h, v8.8h, #1 ushll v14.8h, v2.8b, #7 sli v14.8h, v14.8h, #1 ushll v9.8h, v0.8b, #7 sli v9.8h, v9.8h, #1 .endm .macro pixman_composite_src_8888_0565_process_pixblock_tail sri v14.8h, v8.8h, #5 sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm .macro pixman_composite_src_8888_0565_process_pixblock_tail_head sri v14.8h, v8.8h, #5 PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF fetch_src_pixblock PF beq 10f PF add PF_X, PF_X, #8 PF sub PF_CTL, PF_CTL, #1 10: sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] PF cmp PF_X, ORIG_W PF lsl DUMMY, PF_X, #src_bpp_shift PF prfm PREFETCH_MODE, [PF_SRC, DUMMY] ushll v8.8h, v1.8b, #7 sli v8.8h, v8.8h, #1 st1 {v14.8h}, [DST_W], #16 PF ble 10f PF sub PF_X, PF_X, ORIG_W PF subs PF_CTL, PF_CTL, #0x10 10: ushll v14.8h, v2.8b, #7 sli v14.8h, v14.8h, #1 PF ble 10f PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb DUMMY, [PF_SRC, DUMMY] PF add PF_SRC, PF_SRC, #1 10: ushll v9.8h, v0.8b, #7 sli v9.8h, v9.8h, #1 .endm generate_composite_function \ pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_0565_process_pixblock_head, \ pixman_composite_src_8888_0565_process_pixblock_tail, \ pixman_composite_src_8888_0565_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_src_0565_8888_process_pixblock_head mov v0.d[1], v1.d[0] shrn v30.8b, v0.8h, #8 shrn v29.8b, v0.8h, #3 sli v0.8h, v0.8h, #5 movi v31.8b, #255 sri v30.8b, v30.8b, #5 sri v29.8b, v29.8b, #6 shrn v28.8b, v0.8h, #2 .endm .macro pixman_composite_src_0565_8888_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_src_0565_8888_process_pixblock_tail_head pixman_composite_src_0565_8888_process_pixblock_tail st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 fetch_src_pixblock pixman_composite_src_0565_8888_process_pixblock_head cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_0565_8888_asm_neon, 16, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_8888_process_pixblock_head, \ pixman_composite_src_0565_8888_process_pixblock_tail, \ pixman_composite_src_0565_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8_8_process_pixblock_head uqadd v28.8b, v0.8b, v4.8b uqadd v29.8b, v1.8b, v5.8b uqadd v30.8b, v2.8b, v6.8b uqadd v31.8b, v3.8b, v7.8b .endm .macro pixman_composite_add_8_8_process_pixblock_tail .endm .macro pixman_composite_add_8_8_process_pixblock_tail_head fetch_src_pixblock PF add PF_X, PF_X, #32 PF tst PF_CTL, #0xF ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 PF beq 10f PF add PF_X, PF_X, #32 PF sub PF_CTL, PF_CTL, #1 10: st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF cmp PF_X, ORIG_W PF lsl DUMMY, PF_X, #src_bpp_shift PF prfm PREFETCH_MODE, [PF_SRC, DUMMY] PF lsl DUMMY, PF_X, #dst_bpp_shift PF prfm PREFETCH_MODE, [PF_DST, DUMMY] PF ble 10f PF sub PF_X, PF_X, ORIG_W PF subs PF_CTL, PF_CTL, #0x10 10: uqadd v28.8b, v0.8b, v4.8b PF ble 10f PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb DUMMY, [PF_SRC, DUMMY] PF add PF_SRC, PF_SRC, #1 PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb DUMMY, [PF_DST, DUMMY] PF add PF_DST, PF_DST, #1 10: uqadd v29.8b, v1.8b, v5.8b uqadd v30.8b, v2.8b, v6.8b uqadd v31.8b, v3.8b, v7.8b .endm generate_composite_function \ pixman_composite_add_8_8_asm_neon, 8, 0, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8888_8888_process_pixblock_tail_head fetch_src_pixblock PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 PF beq 10f PF add PF_X, PF_X, #8 PF sub PF_CTL, PF_CTL, #1 10: st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF cmp PF_X, ORIG_W PF lsl DUMMY, PF_X, #src_bpp_shift PF prfm PREFETCH_MODE, [PF_SRC, DUMMY] PF lsl DUMMY, PF_X, #dst_bpp_shift PF prfm PREFETCH_MODE, [PF_DST, DUMMY] PF ble 10f PF sub PF_X, PF_X, ORIG_W PF subs PF_CTL, PF_CTL, #0x10 10: uqadd v28.8b, v0.8b, v4.8b PF ble 10f PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb DUMMY, [PF_SRC, DUMMY] PF add PF_SRC, PF_SRC, #1 PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb DUMMY, [PF_DST, DUMMY] PF add PF_DST, PF_DST, #1 10: uqadd v29.8b, v1.8b, v5.8b uqadd v30.8b, v2.8b, v6.8b uqadd v31.8b, v3.8b, v7.8b .endm generate_composite_function \ pixman_composite_add_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8888_8888_process_pixblock_tail_head generate_composite_function_single_scanline \ pixman_composite_scanline_add_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_out_reverse_8888_8888_process_pixblock_head mvn v24.8b, v3.8b /* get inverted alpha */ /* do alpha blending */ umull v8.8h, v24.8b, v4.8b umull v9.8h, v24.8b, v5.8b umull v10.8h, v24.8b, v6.8b umull v11.8h, v24.8b, v7.8b .endm .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail urshr v14.8h, v8.8h, #8 urshr v15.8h, v9.8h, #8 urshr v16.8h, v10.8h, #8 urshr v17.8h, v11.8h, #8 raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h raddhn v30.8b, v16.8h, v10.8h raddhn v31.8b, v17.8h, v11.8h .endm .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 urshr v14.8h, v8.8h, #8 PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF urshr v15.8h, v9.8h, #8 urshr v16.8h, v10.8h, #8 urshr v17.8h, v11.8h, #8 PF beq 10f PF add PF_X, PF_X, #8 PF sub PF_CTL, PF_CTL, #1 10: raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h PF cmp PF_X, ORIG_W raddhn v30.8b, v16.8h, v10.8h raddhn v31.8b, v17.8h, v11.8h fetch_src_pixblock PF lsl DUMMY, PF_X, #src_bpp_shift PF prfm PREFETCH_MODE, [PF_SRC, DUMMY] mvn v22.8b, v3.8b PF lsl DUMMY, PF_X, #dst_bpp_shift PF prfm PREFETCH_MODE, [PF_DST, DUMMY] st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF ble 10f PF sub PF_X, PF_X, ORIG_W 10: umull v8.8h, v22.8b, v4.8b PF ble 10f PF subs PF_CTL, PF_CTL, #0x10 10: umull v9.8h, v22.8b, v5.8b PF ble 10f PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb DUMMY, [PF_SRC, DUMMY] PF add PF_SRC, PF_SRC, #1 10: umull v10.8h, v22.8b, v6.8b PF ble 10f PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb DUMMY, [PF_DST, DUMMY] PF add PF_DST, PF_DST, #1 10: umull v11.8h, v22.8b, v7.8b .endm generate_composite_function_single_scanline \ pixman_composite_scanline_out_reverse_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_out_reverse_8888_8888_process_pixblock_head, \ pixman_composite_out_reverse_8888_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_8888_8888_process_pixblock_head pixman_composite_out_reverse_8888_8888_process_pixblock_head .endm .macro pixman_composite_over_8888_8888_process_pixblock_tail pixman_composite_out_reverse_8888_8888_process_pixblock_tail uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm .macro pixman_composite_over_8888_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 urshr v14.8h, v8.8h, #8 PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF urshr v15.8h, v9.8h, #8 urshr v16.8h, v10.8h, #8 urshr v17.8h, v11.8h, #8 PF beq 10f PF add PF_X, PF_X, #8 PF sub PF_CTL, PF_CTL, #1 10: raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h PF cmp PF_X, ORIG_W raddhn v30.8b, v16.8h, v10.8h raddhn v31.8b, v17.8h, v11.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b fetch_src_pixblock PF lsl DUMMY, PF_X, #src_bpp_shift PF prfm PREFETCH_MODE, [PF_SRC, DUMMY] mvn v22.8b, v3.8b PF lsl DUMMY, PF_X, #dst_bpp_shift PF prfm PREFETCH_MODE, [PF_DST, DUMMY] st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF ble 10f PF sub PF_X, PF_X, ORIG_W 10: umull v8.8h, v22.8b, v4.8b PF ble 10f PF subs PF_CTL, PF_CTL, #0x10 10: umull v9.8h, v22.8b, v5.8b PF ble 10f PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb DUMMY, [PF_SRC, DUMMY] PF add PF_SRC, PF_SRC, #1 10: umull v10.8h, v22.8b, v6.8b PF ble 10f PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb DUMMY, [PF_DST, DUMMY] PF add PF_DST, PF_DST, #1 10: umull v11.8h, v22.8b, v7.8b .endm generate_composite_function \ pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head generate_composite_function_single_scanline \ pixman_composite_scanline_over_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_process_pixblock_head /* deinterleaved source pixels in {v0, v1, v2, v3} */ /* inverted alpha in {v24} */ /* destination pixels in {v4, v5, v6, v7} */ umull v8.8h, v24.8b, v4.8b umull v9.8h, v24.8b, v5.8b umull v10.8h, v24.8b, v6.8b umull v11.8h, v24.8b, v7.8b .endm .macro pixman_composite_over_n_8888_process_pixblock_tail urshr v14.8h, v8.8h, #8 urshr v15.8h, v9.8h, #8 urshr v16.8h, v10.8h, #8 urshr v17.8h, v11.8h, #8 raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h raddhn v30.8b, v16.8h, v10.8h raddhn v31.8b, v17.8h, v11.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm .macro pixman_composite_over_n_8888_process_pixblock_tail_head urshr v14.8h, v8.8h, #8 urshr v15.8h, v9.8h, #8 urshr v16.8h, v10.8h, #8 urshr v17.8h, v11.8h, #8 raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h raddhn v30.8b, v16.8h, v10.8h raddhn v31.8b, v17.8h, v11.8h ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 uqadd v28.8b, v0.8b, v28.8b PF add PF_X, PF_X, #8 PF tst PF_CTL, #0x0F PF beq 10f PF add PF_X, PF_X, #8 PF sub PF_CTL, PF_CTL, #1 10: uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b PF cmp PF_X, ORIG_W umull v8.8h, v24.8b, v4.8b PF lsl DUMMY, PF_X, #dst_bpp_shift PF prfm PREFETCH_MODE, [PF_DST, DUMMY] umull v9.8h, v24.8b, v5.8b PF ble 10f PF sub PF_X, PF_X, ORIG_W 10: umull v10.8h, v24.8b, v6.8b PF subs PF_CTL, PF_CTL, #0x10 umull v11.8h, v24.8b, v7.8b PF ble 10f PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb DUMMY, [PF_DST, DUMMY] PF add PF_DST, PF_DST, #1 10: st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm .macro pixman_composite_over_n_8888_init mov v3.s[0], w4 dup v0.8b, v3.b[0] dup v1.8b, v3.b[1] dup v2.8b, v3.b[2] dup v3.8b, v3.b[3] mvn v24.8b, v3.8b /* get inverted alpha */ .endm generate_composite_function \ pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_n_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_reverse_n_8888_process_pixblock_tail_head urshr v14.8h, v8.8h, #8 PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF urshr v15.8h, v9.8h, #8 urshr v12.8h, v10.8h, #8 urshr v13.8h, v11.8h, #8 PF beq 10f PF add PF_X, PF_X, #8 PF sub PF_CTL, PF_CTL, #1 10: raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h PF cmp PF_X, ORIG_W raddhn v30.8b, v12.8h, v10.8h raddhn v31.8b, v13.8h, v11.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_R], #32 mvn v22.8b, v3.8b PF lsl DUMMY, PF_X, #dst_bpp_shift PF prfm PREFETCH_MODE, [PF_DST, DUMMY] st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF blt 10f PF sub PF_X, PF_X, ORIG_W 10: umull v8.8h, v22.8b, v4.8b PF blt 10f PF subs PF_CTL, PF_CTL, #0x10 10: umull v9.8h, v22.8b, v5.8b umull v10.8h, v22.8b, v6.8b PF blt 10f PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb DUMMY, [PF_DST, DUMMY] PF add PF_DST, PF_DST, #1 10: umull v11.8h, v22.8b, v7.8b .endm .macro pixman_composite_over_reverse_n_8888_init mov v7.s[0], w4 dup v4.8b, v7.b[0] dup v5.8b, v7.b[1] dup v6.8b, v7.b[2] dup v7.8b, v7.b[3] .endm generate_composite_function \ pixman_composite_over_reverse_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_reverse_n_8888_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_reverse_n_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 4, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_8888_8_0565_process_pixblock_head umull v0.8h, v24.8b, v8.8b /* IN for SRC pixels (part1) */ umull v1.8h, v24.8b, v9.8b umull v2.8h, v24.8b, v10.8b umull v3.8h, v24.8b, v11.8b mov v4.d[1], v5.d[0] shrn v25.8b, v4.8h, #8 /* convert DST_R data to 32-bpp (part1) */ shrn v26.8b, v4.8h, #3 sli v4.8h, v4.8h, #5 urshr v17.8h, v0.8h, #8 /* IN for SRC pixels (part2) */ urshr v18.8h, v1.8h, #8 urshr v19.8h, v2.8h, #8 urshr v20.8h, v3.8h, #8 raddhn v0.8b, v0.8h, v17.8h raddhn v1.8b, v1.8h, v18.8h raddhn v2.8b, v2.8h, v19.8h raddhn v3.8b, v3.8h, v20.8h sri v25.8b, v25.8b, #5 /* convert DST_R data to 32-bpp (part2) */ sri v26.8b, v26.8b, #6 mvn v3.8b, v3.8b shrn v30.8b, v4.8h, #2 umull v18.8h, v3.8b, v25.8b /* now do alpha blending */ umull v19.8h, v3.8b, v26.8b umull v20.8h, v3.8b, v30.8b .endm .macro pixman_composite_over_8888_8_0565_process_pixblock_tail /* 3 cycle bubble (after vmull.u8) */ urshr v5.8h, v18.8h, #8 urshr v6.8h, v19.8h, #8 urshr v7.8h, v20.8h, #8 raddhn v17.8b, v18.8h, v5.8h raddhn v19.8b, v19.8h, v6.8h raddhn v18.8b, v20.8h, v7.8h uqadd v5.8b, v2.8b, v17.8b /* 1 cycle bubble */ uqadd v6.8b, v0.8b, v18.8b uqadd v7.8b, v1.8b, v19.8b ushll v14.8h, v5.8b, #7 /* convert to 16bpp */ sli v14.8h, v14.8h, #1 ushll v18.8h, v7.8b, #7 sli v18.8h, v18.8h, #1 ushll v19.8h, v6.8b, #7 sli v19.8h, v19.8h, #1 sri v14.8h, v18.8h, #5 /* 1 cycle bubble */ sri v14.8h, v19.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm .macro pixman_composite_over_8888_8_0565_process_pixblock_tail_head #if 0 ld1 {v4.8h}, [DST_R], #16 shrn v25.8b, v4.8h, #8 fetch_mask_pixblock shrn v26.8b, v4.8h, #3 fetch_src_pixblock umull v22.8h, v24.8b, v10.8b urshr v13.8h, v18.8h, #8 urshr v11.8h, v19.8h, #8 urshr v15.8h, v20.8h, #8 raddhn v17.8b, v18.8h, v13.8h raddhn v19.8b, v19.8h, v11.8h raddhn v18.8b, v20.8h, v15.8h uqadd v17.8b, v2.8b, v17.8b umull v21.8h, v24.8b, v9.8b uqadd v18.8b, v0.8b, v18.8b uqadd v19.8b, v1.8b, v19.8b ushll v14.8h, v17.8b, #7 sli v14.8h, v14.8h, #1 umull v20.8h, v24.8b, v8.8b ushll v18.8h, v18.8b, #7 sli v18.8h, v18.8h, #1 ushll v19.8h, v19.8b, #7 sli v19.8h, v19.8h, #1 sri v14.8h, v18.8h, #5 umull v23.8h, v24.8b, v11.8b sri v14.8h, v19.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] cache_preload 8, 8 sli v4.8h, v4.8h, #5 urshr v16.8h, v20.8h, #8 urshr v17.8h, v21.8h, #8 urshr v18.8h, v22.8h, #8 urshr v19.8h, v23.8h, #8 raddhn v0.8b, v20.8h, v16.8h raddhn v1.8b, v21.8h, v17.8h raddhn v2.8b, v22.8h, v18.8h raddhn v3.8b, v23.8h, v19.8h sri v25.8b, v25.8b, #5 sri v26.8b, v26.8b, #6 mvn v3.8b, v3.8b shrn v30.8b, v4.8h, #2 st1 {v14.8h}, [DST_W], #16 umull v18.8h, v3.8b, v25.8b umull v19.8h, v3.8b, v26.8b umull v20.8h, v3.8b, v30.8b #else pixman_composite_over_8888_8_0565_process_pixblock_tail st1 {v28.4h, v29.4h}, [DST_W], #16 ld1 {v4.4h, v5.4h}, [DST_R], #16 fetch_mask_pixblock fetch_src_pixblock pixman_composite_over_8888_8_0565_process_pixblock_head #endif .endm generate_composite_function \ pixman_composite_over_8888_8_0565_asm_neon, 32, 8, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ /* * This function needs a special initialization of solid mask. * Solid source pixel data is fetched from stack at ARGS_STACK_OFFSET * offset, split into color components and replicated in d8-d11 * registers. Additionally, this function needs all the NEON registers, * so it has to save d8-d15 registers which are callee saved according * to ABI. These registers are restored from 'cleanup' macro. All the * other NEON registers are caller saved, so can be clobbered freely * without introducing any problems. */ .macro pixman_composite_over_n_8_0565_init mov v11.s[0], w4 dup v8.8b, v11.b[0] dup v9.8b, v11.b[1] dup v10.8b, v11.b[2] dup v11.8b, v11.b[3] .endm .macro pixman_composite_over_n_8_0565_cleanup .endm generate_composite_function \ pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_0565_init, \ pixman_composite_over_n_8_0565_cleanup, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_8888_n_0565_init mov v24.s[0], w6 dup v24.8b, v24.b[3] .endm .macro pixman_composite_over_8888_n_0565_cleanup .endm generate_composite_function \ pixman_composite_over_8888_n_0565_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_8888_n_0565_init, \ pixman_composite_over_8888_n_0565_cleanup, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0565_0565_process_pixblock_head .endm .macro pixman_composite_src_0565_0565_process_pixblock_tail .endm .macro pixman_composite_src_0565_0565_process_pixblock_tail_head st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [DST_W], #32 fetch_src_pixblock cache_preload 16, 16 .endm generate_composite_function \ pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \ FLAG_DST_WRITEONLY, \ 16, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_0565_process_pixblock_head, \ pixman_composite_src_0565_0565_process_pixblock_tail, \ pixman_composite_src_0565_0565_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8_process_pixblock_head .endm .macro pixman_composite_src_n_8_process_pixblock_tail .endm .macro pixman_composite_src_n_8_process_pixblock_tail_head st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_W], 32 .endm .macro pixman_composite_src_n_8_init mov v0.s[0], w4 dup v3.8b, v0.b[0] dup v2.8b, v0.b[0] dup v1.8b, v0.b[0] dup v0.8b, v0.b[0] .endm .macro pixman_composite_src_n_8_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_asm_neon, 0, 0, 8, \ FLAG_DST_WRITEONLY, \ 32, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_8_init, \ pixman_composite_src_n_8_cleanup, \ pixman_composite_src_n_8_process_pixblock_head, \ pixman_composite_src_n_8_process_pixblock_tail, \ pixman_composite_src_n_8_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_0565_process_pixblock_head .endm .macro pixman_composite_src_n_0565_process_pixblock_tail .endm .macro pixman_composite_src_n_0565_process_pixblock_tail_head st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [DST_W], #32 .endm .macro pixman_composite_src_n_0565_init mov v0.s[0], w4 dup v3.4h, v0.h[0] dup v2.4h, v0.h[0] dup v1.4h, v0.h[0] dup v0.4h, v0.h[0] .endm .macro pixman_composite_src_n_0565_cleanup .endm generate_composite_function \ pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \ FLAG_DST_WRITEONLY, \ 16, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_0565_init, \ pixman_composite_src_n_0565_cleanup, \ pixman_composite_src_n_0565_process_pixblock_head, \ pixman_composite_src_n_0565_process_pixblock_tail, \ pixman_composite_src_n_0565_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8888_process_pixblock_head .endm .macro pixman_composite_src_n_8888_process_pixblock_tail .endm .macro pixman_composite_src_n_8888_process_pixblock_tail_head st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32 .endm .macro pixman_composite_src_n_8888_init mov v0.s[0], w4 dup v3.2s, v0.s[0] dup v2.2s, v0.s[0] dup v1.2s, v0.s[0] dup v0.2s, v0.s[0] .endm .macro pixman_composite_src_n_8888_cleanup .endm generate_composite_function \ pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_8888_init, \ pixman_composite_src_n_8888_cleanup, \ pixman_composite_src_n_8888_process_pixblock_head, \ pixman_composite_src_n_8888_process_pixblock_tail, \ pixman_composite_src_n_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_8888_8888_process_pixblock_head .endm .macro pixman_composite_src_8888_8888_process_pixblock_tail .endm .macro pixman_composite_src_8888_8888_process_pixblock_tail_head st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32 fetch_src_pixblock cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_8888_process_pixblock_head, \ pixman_composite_src_8888_8888_process_pixblock_tail, \ pixman_composite_src_8888_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_x888_8888_process_pixblock_head orr v0.8b, v0.8b, v4.8b orr v1.8b, v1.8b, v4.8b orr v2.8b, v2.8b, v4.8b orr v3.8b, v3.8b, v4.8b .endm .macro pixman_composite_src_x888_8888_process_pixblock_tail .endm .macro pixman_composite_src_x888_8888_process_pixblock_tail_head st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32 fetch_src_pixblock orr v0.8b, v0.8b, v4.8b orr v1.8b, v1.8b, v4.8b orr v2.8b, v2.8b, v4.8b orr v3.8b, v3.8b, v4.8b cache_preload 8, 8 .endm .macro pixman_composite_src_x888_8888_init movi v4.2s, #0xff, lsl 24 .endm generate_composite_function \ pixman_composite_src_x888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ pixman_composite_src_x888_8888_init, \ default_cleanup, \ pixman_composite_src_x888_8888_process_pixblock_head, \ pixman_composite_src_x888_8888_process_pixblock_tail, \ pixman_composite_src_x888_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8_8888_process_pixblock_head /* expecting solid source in {v0, v1, v2, v3} */ /* mask is in v24 (v25, v26, v27 are unused) */ /* in */ umull v8.8h, v24.8b, v0.8b umull v9.8h, v24.8b, v1.8b umull v10.8h, v24.8b, v2.8b umull v11.8h, v24.8b, v3.8b ursra v8.8h, v8.8h, #8 ursra v9.8h, v9.8h, #8 ursra v10.8h, v10.8h, #8 ursra v11.8h, v11.8h, #8 .endm .macro pixman_composite_src_n_8_8888_process_pixblock_tail rshrn v28.8b, v8.8h, #8 rshrn v29.8b, v9.8h, #8 rshrn v30.8b, v10.8h, #8 rshrn v31.8b, v11.8h, #8 .endm .macro pixman_composite_src_n_8_8888_process_pixblock_tail_head fetch_mask_pixblock PF add PF_X, PF_X, #8 rshrn v28.8b, v8.8h, #8 PF tst PF_CTL, #0x0F rshrn v29.8b, v9.8h, #8 PF beq 10f PF add PF_X, PF_X, #8 10: rshrn v30.8b, v10.8h, #8 PF beq 10f PF sub PF_CTL, PF_CTL, #1 10: rshrn v31.8b, v11.8h, #8 PF cmp PF_X, ORIG_W umull v8.8h, v24.8b, v0.8b PF lsl DUMMY, PF_X, #mask_bpp_shift PF prfm PREFETCH_MODE, [PF_MASK, DUMMY] umull v9.8h, v24.8b, v1.8b PF ble 10f PF sub PF_X, PF_X, ORIG_W 10: umull v10.8h, v24.8b, v2.8b PF ble 10f PF subs PF_CTL, PF_CTL, #0x10 10: umull v11.8h, v24.8b, v3.8b PF ble 10f PF lsl DUMMY, MASK_STRIDE, #mask_bpp_shift PF ldrsb DUMMY, [PF_MASK, DUMMY] PF add PF_MASK, PF_MASK, #1 10: st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 ursra v8.8h, v8.8h, #8 ursra v9.8h, v9.8h, #8 ursra v10.8h, v10.8h, #8 ursra v11.8h, v11.8h, #8 .endm .macro pixman_composite_src_n_8_8888_init mov v3.s[0], w4 dup v0.8b, v3.b[0] dup v1.8b, v3.b[1] dup v2.8b, v3.b[2] dup v3.8b, v3.b[3] .endm .macro pixman_composite_src_n_8_8888_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_src_n_8_8888_init, \ pixman_composite_src_n_8_8888_cleanup, \ pixman_composite_src_n_8_8888_process_pixblock_head, \ pixman_composite_src_n_8_8888_process_pixblock_tail, \ pixman_composite_src_n_8_8888_process_pixblock_tail_head, \ /******************************************************************************/ .macro pixman_composite_src_n_8_8_process_pixblock_head umull v0.8h, v24.8b, v16.8b umull v1.8h, v25.8b, v16.8b umull v2.8h, v26.8b, v16.8b umull v3.8h, v27.8b, v16.8b ursra v0.8h, v0.8h, #8 ursra v1.8h, v1.8h, #8 ursra v2.8h, v2.8h, #8 ursra v3.8h, v3.8h, #8 .endm .macro pixman_composite_src_n_8_8_process_pixblock_tail rshrn v28.8b, v0.8h, #8 rshrn v29.8b, v1.8h, #8 rshrn v30.8b, v2.8h, #8 rshrn v31.8b, v3.8h, #8 .endm .macro pixman_composite_src_n_8_8_process_pixblock_tail_head fetch_mask_pixblock PF add PF_X, PF_X, #8 rshrn v28.8b, v0.8h, #8 PF tst PF_CTL, #0x0F rshrn v29.8b, v1.8h, #8 PF beq 10f PF add PF_X, PF_X, #8 10: rshrn v30.8b, v2.8h, #8 PF beq 10f PF sub PF_CTL, PF_CTL, #1 10: rshrn v31.8b, v3.8h, #8 PF cmp PF_X, ORIG_W umull v0.8h, v24.8b, v16.8b PF lsl DUMMY, PF_X, mask_bpp_shift PF prfm PREFETCH_MODE, [PF_MASK, DUMMY] umull v1.8h, v25.8b, v16.8b PF ble 10f PF sub PF_X, PF_X, ORIG_W 10: umull v2.8h, v26.8b, v16.8b PF ble 10f PF subs PF_CTL, PF_CTL, #0x10 10: umull v3.8h, v27.8b, v16.8b PF ble 10f PF lsl DUMMY, MASK_STRIDE, #mask_bpp_shift PF ldrsb DUMMY, [PF_MASK, DUMMY] PF add PF_MASK, PF_MASK, #1 10: st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 ursra v0.8h, v0.8h, #8 ursra v1.8h, v1.8h, #8 ursra v2.8h, v2.8h, #8 ursra v3.8h, v3.8h, #8 .endm .macro pixman_composite_src_n_8_8_init mov v16.s[0], w4 dup v16.8b, v16.b[3] .endm .macro pixman_composite_src_n_8_8_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_WRITEONLY, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_src_n_8_8_init, \ pixman_composite_src_n_8_8_cleanup, \ pixman_composite_src_n_8_8_process_pixblock_head, \ pixman_composite_src_n_8_8_process_pixblock_tail, \ pixman_composite_src_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8_8888_process_pixblock_head /* expecting deinterleaved source data in {v8, v9, v10, v11} */ /* v8 - blue, v9 - green, v10 - red, v11 - alpha */ /* and destination data in {v4, v5, v6, v7} */ /* mask is in v24 (v25, v26, v27 are unused) */ /* in */ umull v12.8h, v24.8b, v8.8b umull v13.8h, v24.8b, v9.8b umull v14.8h, v24.8b, v10.8b umull v15.8h, v24.8b, v11.8b urshr v16.8h, v12.8h, #8 urshr v17.8h, v13.8h, #8 urshr v18.8h, v14.8h, #8 urshr v19.8h, v15.8h, #8 raddhn v0.8b, v12.8h, v16.8h raddhn v1.8b, v13.8h, v17.8h raddhn v2.8b, v14.8h, v18.8h raddhn v3.8b, v15.8h, v19.8h mvn v25.8b, v3.8b /* get inverted alpha */ /* source: v0 - blue, v1 - green, v2 - red, v3 - alpha */ /* destination: v4 - blue, v5 - green, v6 - red, v7 - alpha */ /* now do alpha blending */ umull v12.8h, v25.8b, v4.8b umull v13.8h, v25.8b, v5.8b umull v14.8h, v25.8b, v6.8b umull v15.8h, v25.8b, v7.8b .endm .macro pixman_composite_over_n_8_8888_process_pixblock_tail urshr v16.8h, v12.8h, #8 urshr v17.8h, v13.8h, #8 urshr v18.8h, v14.8h, #8 urshr v19.8h, v15.8h, #8 raddhn v28.8b, v16.8h, v12.8h raddhn v29.8b, v17.8h, v13.8h raddhn v30.8b, v18.8h, v14.8h raddhn v31.8b, v19.8h, v15.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm .macro pixman_composite_over_n_8_8888_process_pixblock_tail_head urshr v16.8h, v12.8h, #8 ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 urshr v17.8h, v13.8h, #8 fetch_mask_pixblock urshr v18.8h, v14.8h, #8 PF add PF_X, PF_X, #8 urshr v19.8h, v15.8h, #8 PF tst PF_CTL, #0x0F raddhn v28.8b, v16.8h, v12.8h PF beq 10f PF add PF_X, PF_X, #8 10: raddhn v29.8b, v17.8h, v13.8h PF beq 10f PF sub PF_CTL, PF_CTL, #1 10: raddhn v30.8b, v18.8h, v14.8h PF cmp PF_X, ORIG_W raddhn v31.8b, v19.8h, v15.8h PF lsl DUMMY, PF_X, #dst_bpp_shift PF prfm PREFETCH_MODE, [PF_DST, DUMMY] umull v16.8h, v24.8b, v8.8b PF lsl DUMMY, PF_X, #mask_bpp_shift PF prfm PREFETCH_MODE, [PF_MASK, DUMMY] umull v17.8h, v24.8b, v9.8b PF ble 10f PF sub PF_X, PF_X, ORIG_W 10: umull v18.8h, v24.8b, v10.8b PF ble 10f PF subs PF_CTL, PF_CTL, #0x10 10: umull v19.8h, v24.8b, v11.8b PF ble 10f PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb DUMMY, [PF_DST, DUMMY] PF add PF_DST, PF_DST, #1 10: uqadd v28.8b, v0.8b, v28.8b PF ble 10f PF lsl DUMMY, MASK_STRIDE, #mask_bpp_shift PF ldrsb DUMMY, [PF_MASK, DUMMY] PF add PF_MASK, PF_MASK, #1 10: uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b urshr v12.8h, v16.8h, #8 urshr v13.8h, v17.8h, #8 urshr v14.8h, v18.8h, #8 urshr v15.8h, v19.8h, #8 raddhn v0.8b, v16.8h, v12.8h raddhn v1.8b, v17.8h, v13.8h raddhn v2.8b, v18.8h, v14.8h raddhn v3.8b, v19.8h, v15.8h st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 mvn v25.8b, v3.8b umull v12.8h, v25.8b, v4.8b umull v13.8h, v25.8b, v5.8b umull v14.8h, v25.8b, v6.8b umull v15.8h, v25.8b, v7.8b .endm .macro pixman_composite_over_n_8_8888_init mov v11.s[0], w4 dup v8.8b, v11.b[0] dup v9.8b, v11.b[1] dup v10.8b, v11.b[2] dup v11.8b, v11.b[3] .endm .macro pixman_composite_over_n_8_8888_cleanup .endm generate_composite_function \ pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_8888_init, \ pixman_composite_over_n_8_8888_cleanup, \ pixman_composite_over_n_8_8888_process_pixblock_head, \ pixman_composite_over_n_8_8888_process_pixblock_tail, \ pixman_composite_over_n_8_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8_8_process_pixblock_head umull v0.8h, v24.8b, v8.8b umull v1.8h, v25.8b, v8.8b umull v2.8h, v26.8b, v8.8b umull v3.8h, v27.8b, v8.8b urshr v10.8h, v0.8h, #8 urshr v11.8h, v1.8h, #8 urshr v12.8h, v2.8h, #8 urshr v13.8h, v3.8h, #8 raddhn v0.8b, v0.8h, v10.8h raddhn v1.8b, v1.8h, v11.8h raddhn v2.8b, v2.8h, v12.8h raddhn v3.8b, v3.8h, v13.8h mvn v24.8b, v0.8b mvn v25.8b, v1.8b mvn v26.8b, v2.8b mvn v27.8b, v3.8b umull v10.8h, v24.8b, v4.8b umull v11.8h, v25.8b, v5.8b umull v12.8h, v26.8b, v6.8b umull v13.8h, v27.8b, v7.8b .endm .macro pixman_composite_over_n_8_8_process_pixblock_tail urshr v14.8h, v10.8h, #8 urshr v15.8h, v11.8h, #8 urshr v16.8h, v12.8h, #8 urshr v17.8h, v13.8h, #8 raddhn v28.8b, v14.8h, v10.8h raddhn v29.8b, v15.8h, v11.8h raddhn v30.8b, v16.8h, v12.8h raddhn v31.8b, v17.8h, v13.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_n_8_8_process_pixblock_tail_head ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 pixman_composite_over_n_8_8_process_pixblock_tail fetch_mask_pixblock cache_preload 32, 32 st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 pixman_composite_over_n_8_8_process_pixblock_head .endm .macro pixman_composite_over_n_8_8_init mov v8.s[0], w4 dup v8.8b, v8.b[3] .endm .macro pixman_composite_over_n_8_8_cleanup .endm generate_composite_function \ pixman_composite_over_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_8_init, \ pixman_composite_over_n_8_8_cleanup, \ pixman_composite_over_n_8_8_process_pixblock_head, \ pixman_composite_over_n_8_8_process_pixblock_tail, \ pixman_composite_over_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_head /* * 'combine_mask_ca' replacement * * input: solid src (n) in {v8, v9, v10, v11} * dest in {v4, v5, v6, v7 } * mask in {v24, v25, v26, v27} * output: updated src in {v0, v1, v2, v3 } * updated mask in {v24, v25, v26, v3 } */ umull v0.8h, v24.8b, v8.8b umull v1.8h, v25.8b, v9.8b umull v2.8h, v26.8b, v10.8b umull v3.8h, v27.8b, v11.8b umull v12.8h, v11.8b, v25.8b umull v13.8h, v11.8b, v24.8b umull v14.8h, v11.8b, v26.8b urshr v15.8h, v0.8h, #8 urshr v16.8h, v1.8h, #8 urshr v17.8h, v2.8h, #8 raddhn v0.8b, v0.8h, v15.8h raddhn v1.8b, v1.8h, v16.8h raddhn v2.8b, v2.8h, v17.8h urshr v15.8h, v13.8h, #8 urshr v16.8h, v12.8h, #8 urshr v17.8h, v14.8h, #8 urshr v18.8h, v3.8h, #8 raddhn v24.8b, v13.8h, v15.8h raddhn v25.8b, v12.8h, v16.8h raddhn v26.8b, v14.8h, v17.8h raddhn v3.8b, v3.8h, v18.8h /* * 'combine_over_ca' replacement * * output: updated dest in {v28, v29, v30, v31} */ mvn v24.8b, v24.8b mvn v25.8b, v25.8b mvn v26.8b, v26.8b mvn v27.8b, v3.8b umull v12.8h, v24.8b, v4.8b umull v13.8h, v25.8b, v5.8b umull v14.8h, v26.8b, v6.8b umull v15.8h, v27.8b, v7.8b .endm .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail /* ... continue 'combine_over_ca' replacement */ urshr v16.8h, v12.8h, #8 urshr v17.8h, v13.8h, #8 urshr v18.8h, v14.8h, #8 urshr v19.8h, v15.8h, #8 raddhn v28.8b, v16.8h, v12.8h raddhn v29.8b, v17.8h, v13.8h raddhn v30.8b, v18.8h, v14.8h raddhn v31.8b, v19.8h, v15.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head urshr v16.8h, v12.8h, #8 urshr v17.8h, v13.8h, #8 ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 urshr v18.8h, v14.8h, #8 urshr v19.8h, v15.8h, #8 raddhn v28.8b, v16.8h, v12.8h raddhn v29.8b, v17.8h, v13.8h raddhn v30.8b, v18.8h, v14.8h raddhn v31.8b, v19.8h, v15.8h fetch_mask_pixblock uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b cache_preload 8, 8 pixman_composite_over_n_8888_8888_ca_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm .macro pixman_composite_over_n_8888_8888_ca_init mov v13.s[0], w4 dup v8.8b, v13.b[0] dup v9.8b, v13.b[1] dup v10.8b, v13.b[2] dup v11.8b, v13.b[3] .endm .macro pixman_composite_over_n_8888_8888_ca_cleanup .endm generate_composite_function \ pixman_composite_over_n_8888_8888_ca_asm_neon, 0, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_8888_ca_init, \ pixman_composite_over_n_8888_8888_ca_cleanup, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_head, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_tail, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_head /* * 'combine_mask_ca' replacement * * input: solid src (n) in {v8, v9, v10, v11} [B, G, R, A] * mask in {v24, v25, v26} [B, G, R] * output: updated src in {v0, v1, v2 } [B, G, R] * updated mask in {v24, v25, v26} [B, G, R] */ umull v0.8h, v24.8b, v8.8b umull v1.8h, v25.8b, v9.8b umull v2.8h, v26.8b, v10.8b umull v12.8h, v11.8b, v24.8b umull v13.8h, v11.8b, v25.8b umull v14.8h, v11.8b, v26.8b urshr v15.8h, v0.8h, #8 urshr v16.8h, v1.8h, #8 urshr v17.8h, v2.8h, #8 raddhn v0.8b, v0.8h, v15.8h raddhn v1.8b, v1.8h, v16.8h raddhn v2.8b, v2.8h, v17.8h urshr v19.8h, v12.8h, #8 urshr v20.8h, v13.8h, #8 urshr v21.8h, v14.8h, #8 raddhn v24.8b, v12.8h, v19.8h raddhn v25.8b, v13.8h, v20.8h /* * convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format * and put data into v16 - blue, v17 - green, v18 - red */ mov v4.d[1], v5.d[0] shrn v17.8b, v4.8h, #3 shrn v18.8b, v4.8h, #8 raddhn v26.8b, v14.8h, v21.8h sli v4.8h, v4.8h, #5 sri v18.8b, v18.8b, #5 sri v17.8b, v17.8b, #6 /* * 'combine_over_ca' replacement * * output: updated dest in v16 - blue, v17 - green, v18 - red */ mvn v24.8b, v24.8b mvn v25.8b, v25.8b shrn v16.8b, v4.8h, #2 mvn v26.8b, v26.8b umull v5.8h, v16.8b, v24.8b umull v6.8h, v17.8b, v25.8b umull v7.8h, v18.8b, v26.8b .endm .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail /* ... continue 'combine_over_ca' replacement */ urshr v13.8h, v5.8h, #8 urshr v14.8h, v6.8h, #8 urshr v15.8h, v7.8h, #8 raddhn v16.8b, v13.8h, v5.8h raddhn v17.8b, v14.8h, v6.8h raddhn v18.8b, v15.8h, v7.8h uqadd v16.8b, v0.8b, v16.8b uqadd v17.8b, v1.8b, v17.8b uqadd v18.8b, v2.8b, v18.8b /* * convert the results in v16, v17, v18 to r5g6b5 and store * them into {v14} */ ushll v14.8h, v18.8b, #7 sli v14.8h, v14.8h, #1 ushll v12.8h, v17.8b, #7 sli v12.8h, v12.8h, #1 ushll v13.8h, v16.8b, #7 sli v13.8h, v13.8h, #1 sri v14.8h, v12.8h, #5 sri v14.8h, v13.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head fetch_mask_pixblock urshr v13.8h, v5.8h, #8 urshr v14.8h, v6.8h, #8 ld1 {v4.8h}, [DST_R], #16 urshr v15.8h, v7.8h, #8 raddhn v16.8b, v13.8h, v5.8h raddhn v17.8b, v14.8h, v6.8h raddhn v18.8b, v15.8h, v7.8h mov v5.d[0], v4.d[1] /* process_pixblock_head */ /* * 'combine_mask_ca' replacement * * input: solid src (n) in {v8, v9, v10, v11} [B, G, R, A] * mask in {v24, v25, v26} [B, G, R] * output: updated src in {v0, v1, v2 } [B, G, R] * updated mask in {v24, v25, v26} [B, G, R] */ uqadd v16.8b, v0.8b, v16.8b uqadd v17.8b, v1.8b, v17.8b uqadd v18.8b, v2.8b, v18.8b umull v0.8h, v24.8b, v8.8b umull v1.8h, v25.8b, v9.8b umull v2.8h, v26.8b, v10.8b /* * convert the result in v16, v17, v18 to r5g6b5 and store * it into {v14} */ ushll v14.8h, v18.8b, #7 sli v14.8h, v14.8h, #1 ushll v18.8h, v16.8b, #7 sli v18.8h, v18.8h, #1 ushll v19.8h, v17.8b, #7 sli v19.8h, v19.8h, #1 umull v12.8h, v11.8b, v24.8b sri v14.8h, v19.8h, #5 umull v13.8h, v11.8b, v25.8b umull v15.8h, v11.8b, v26.8b sri v14.8h, v18.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] cache_preload 8, 8 urshr v16.8h, v0.8h, #8 urshr v17.8h, v1.8h, #8 urshr v18.8h, v2.8h, #8 raddhn v0.8b, v0.8h, v16.8h raddhn v1.8b, v1.8h, v17.8h raddhn v2.8b, v2.8h, v18.8h urshr v19.8h, v12.8h, #8 urshr v20.8h, v13.8h, #8 urshr v21.8h, v15.8h, #8 raddhn v24.8b, v12.8h, v19.8h raddhn v25.8b, v13.8h, v20.8h /* * convert 8 r5g6b5 pixel data from {v4, v5} to planar * 8-bit format and put data into v16 - blue, v17 - green, * v18 - red */ mov v4.d[1], v5.d[0] shrn v17.8b, v4.8h, #3 shrn v18.8b, v4.8h, #8 raddhn v26.8b, v15.8h, v21.8h sli v4.8h, v4.8h, #5 sri v17.8b, v17.8b, #6 sri v18.8b, v18.8b, #5 /* * 'combine_over_ca' replacement * * output: updated dest in v16 - blue, v17 - green, v18 - red */ mvn v24.8b, v24.8b mvn v25.8b, v25.8b shrn v16.8b, v4.8h, #2 mvn v26.8b, v26.8b umull v5.8h, v16.8b, v24.8b umull v6.8h, v17.8b, v25.8b umull v7.8h, v18.8b, v26.8b st1 {v14.8h}, [DST_W], #16 .endm .macro pixman_composite_over_n_8888_0565_ca_init mov v13.s[0], w4 dup v8.8b, v13.b[0] dup v9.8b, v13.b[1] dup v10.8b, v13.b[2] dup v11.8b, v13.b[3] .endm .macro pixman_composite_over_n_8888_0565_ca_cleanup .endm generate_composite_function \ pixman_composite_over_n_8888_0565_ca_asm_neon, 0, 32, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_0565_ca_init, \ pixman_composite_over_n_8888_0565_ca_cleanup, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_head, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_tail, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_in_n_8_process_pixblock_head /* expecting source data in {v0, v1, v2, v3} */ /* and destination data in {v4, v5, v6, v7} */ umull v8.8h, v4.8b, v3.8b umull v9.8h, v5.8b, v3.8b umull v10.8h, v6.8b, v3.8b umull v11.8h, v7.8b, v3.8b .endm .macro pixman_composite_in_n_8_process_pixblock_tail urshr v14.8h, v8.8h, #8 urshr v15.8h, v9.8h, #8 urshr v12.8h, v10.8h, #8 urshr v13.8h, v11.8h, #8 raddhn v28.8b, v8.8h, v14.8h raddhn v29.8b, v9.8h, v15.8h raddhn v30.8b, v10.8h, v12.8h raddhn v31.8b, v11.8h, v13.8h .endm .macro pixman_composite_in_n_8_process_pixblock_tail_head pixman_composite_in_n_8_process_pixblock_tail ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 cache_preload 32, 32 pixman_composite_in_n_8_process_pixblock_head st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm .macro pixman_composite_in_n_8_init mov v3.s[0], w4 dup v3.8b, v3.b[3] .endm .macro pixman_composite_in_n_8_cleanup .endm generate_composite_function \ pixman_composite_in_n_8_asm_neon, 0, 0, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_in_n_8_init, \ pixman_composite_in_n_8_cleanup, \ pixman_composite_in_n_8_process_pixblock_head, \ pixman_composite_in_n_8_process_pixblock_tail, \ pixman_composite_in_n_8_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ .macro pixman_composite_add_n_8_8_process_pixblock_head /* expecting source data in {v8, v9, v10, v11} */ /* v8 - blue, v9 - green, v10 - red, v11 - alpha */ /* and destination data in {v4, v5, v6, v7} */ /* mask is in v24, v25, v26, v27 */ umull v0.8h, v24.8b, v11.8b umull v1.8h, v25.8b, v11.8b umull v2.8h, v26.8b, v11.8b umull v3.8h, v27.8b, v11.8b urshr v12.8h, v0.8h, #8 urshr v13.8h, v1.8h, #8 urshr v14.8h, v2.8h, #8 urshr v15.8h, v3.8h, #8 raddhn v0.8b, v0.8h, v12.8h raddhn v1.8b, v1.8h, v13.8h raddhn v2.8b, v2.8h, v14.8h raddhn v3.8b, v3.8h, v15.8h uqadd v28.8b, v0.8b, v4.8b uqadd v29.8b, v1.8b, v5.8b uqadd v30.8b, v2.8b, v6.8b uqadd v31.8b, v3.8b, v7.8b .endm .macro pixman_composite_add_n_8_8_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_n_8_8_process_pixblock_tail_head pixman_composite_add_n_8_8_process_pixblock_tail st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 fetch_mask_pixblock cache_preload 32, 32 pixman_composite_add_n_8_8_process_pixblock_head .endm .macro pixman_composite_add_n_8_8_init mov v11.s[0], w4 dup v11.8b, v11.b[3] .endm .macro pixman_composite_add_n_8_8_cleanup .endm generate_composite_function \ pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_n_8_8_init, \ pixman_composite_add_n_8_8_cleanup, \ pixman_composite_add_n_8_8_process_pixblock_head, \ pixman_composite_add_n_8_8_process_pixblock_tail, \ pixman_composite_add_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8_8_8_process_pixblock_head /* expecting source data in {v0, v1, v2, v3} */ /* destination data in {v4, v5, v6, v7} */ /* mask in {v24, v25, v26, v27} */ umull v8.8h, v24.8b, v0.8b umull v9.8h, v25.8b, v1.8b umull v10.8h, v26.8b, v2.8b umull v11.8h, v27.8b, v3.8b urshr v0.8h, v8.8h, #8 urshr v1.8h, v9.8h, #8 urshr v12.8h, v10.8h, #8 urshr v13.8h, v11.8h, #8 raddhn v0.8b, v0.8h, v8.8h raddhn v1.8b, v1.8h, v9.8h raddhn v2.8b, v12.8h, v10.8h raddhn v3.8b, v13.8h, v11.8h uqadd v28.8b, v0.8b, v4.8b uqadd v29.8b, v1.8b, v5.8b uqadd v30.8b, v2.8b, v6.8b uqadd v31.8b, v3.8b, v7.8b .endm .macro pixman_composite_add_8_8_8_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_8_8_8_process_pixblock_tail_head pixman_composite_add_8_8_8_process_pixblock_tail st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 fetch_mask_pixblock fetch_src_pixblock cache_preload 32, 32 pixman_composite_add_8_8_8_process_pixblock_head .endm .macro pixman_composite_add_8_8_8_init .endm .macro pixman_composite_add_8_8_8_cleanup .endm generate_composite_function \ pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_8_8_8_init, \ pixman_composite_add_8_8_8_cleanup, \ pixman_composite_add_8_8_8_process_pixblock_head, \ pixman_composite_add_8_8_8_process_pixblock_tail, \ pixman_composite_add_8_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8888_8888_8888_process_pixblock_head /* expecting source data in {v0, v1, v2, v3} */ /* destination data in {v4, v5, v6, v7} */ /* mask in {v24, v25, v26, v27} */ umull v8.8h, v27.8b, v0.8b umull v9.8h, v27.8b, v1.8b umull v10.8h, v27.8b, v2.8b umull v11.8h, v27.8b, v3.8b /* 1 cycle bubble */ ursra v8.8h, v8.8h, #8 ursra v9.8h, v9.8h, #8 ursra v10.8h, v10.8h, #8 ursra v11.8h, v11.8h, #8 .endm .macro pixman_composite_add_8888_8888_8888_process_pixblock_tail /* 2 cycle bubble */ rshrn v28.8b, v8.8h, #8 rshrn v29.8b, v9.8h, #8 rshrn v30.8b, v10.8h, #8 rshrn v31.8b, v11.8h, #8 uqadd v28.8b, v4.8b, v28.8b uqadd v29.8b, v5.8b, v29.8b uqadd v30.8b, v6.8b, v30.8b uqadd v31.8b, v7.8b, v31.8b .endm .macro pixman_composite_add_8888_8888_8888_process_pixblock_tail_head fetch_src_pixblock rshrn v28.8b, v8.8h, #8 fetch_mask_pixblock rshrn v29.8b, v9.8h, #8 umull v8.8h, v27.8b, v0.8b rshrn v30.8b, v10.8h, #8 umull v9.8h, v27.8b, v1.8b rshrn v31.8b, v11.8h, #8 umull v10.8h, v27.8b, v2.8b umull v11.8h, v27.8b, v3.8b uqadd v28.8b, v4.8b, v28.8b uqadd v29.8b, v5.8b, v29.8b uqadd v30.8b, v6.8b, v30.8b uqadd v31.8b, v7.8b, v31.8b ursra v8.8h, v8.8h, #8 ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 ursra v9.8h, v9.8h, #8 st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 ursra v10.8h, v10.8h, #8 cache_preload 8, 8 ursra v11.8h, v11.8h, #8 .endm generate_composite_function \ pixman_composite_add_8888_8888_8888_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ generate_composite_function_single_scanline \ pixman_composite_scanline_add_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ generate_composite_function \ pixman_composite_add_8888_8_8888_asm_neon, 32, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_n_8_8888_init mov v3.s[0], w4 dup v0.8b, v3.b[0] dup v1.8b, v3.b[1] dup v2.8b, v3.b[2] dup v3.8b, v3.b[3] .endm .macro pixman_composite_add_n_8_8888_cleanup .endm generate_composite_function \ pixman_composite_add_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_n_8_8888_init, \ pixman_composite_add_n_8_8888_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_8888_n_8888_init mov v27.s[0], w6 dup v27.8b, v27.b[3] .endm .macro pixman_composite_add_8888_n_8888_cleanup .endm generate_composite_function \ pixman_composite_add_8888_n_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_8888_n_8888_init, \ pixman_composite_add_8888_n_8888_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_head /* expecting source data in {v0, v1, v2, v3} */ /* destination data in {v4, v5, v6, v7} */ /* solid mask is in v15 */ /* 'in' */ umull v11.8h, v15.8b, v3.8b umull v10.8h, v15.8b, v2.8b umull v9.8h, v15.8b, v1.8b umull v8.8h, v15.8b, v0.8b urshr v16.8h, v11.8h, #8 urshr v14.8h, v10.8h, #8 urshr v13.8h, v9.8h, #8 urshr v12.8h, v8.8h, #8 raddhn v3.8b, v11.8h, v16.8h raddhn v2.8b, v10.8h, v14.8h raddhn v1.8b, v9.8h, v13.8h raddhn v0.8b, v8.8h, v12.8h mvn v24.8b, v3.8b /* get inverted alpha */ /* now do alpha blending */ umull v8.8h, v24.8b, v4.8b umull v9.8h, v24.8b, v5.8b umull v10.8h, v24.8b, v6.8b umull v11.8h, v24.8b, v7.8b .endm .macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail urshr v16.8h, v8.8h, #8 urshr v17.8h, v9.8h, #8 urshr v18.8h, v10.8h, #8 urshr v19.8h, v11.8h, #8 raddhn v28.8b, v16.8h, v8.8h raddhn v29.8b, v17.8h, v9.8h raddhn v30.8b, v18.8h, v10.8h raddhn v31.8b, v19.8h, v11.8h .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_out_reverse_8888_n_8888_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm generate_composite_function_single_scanline \ pixman_composite_scanline_out_reverse_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_out_reverse_8888_n_8888_process_pixblock_head, \ pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_8888_n_8888_process_pixblock_head pixman_composite_out_reverse_8888_n_8888_process_pixblock_head .endm .macro pixman_composite_over_8888_n_8888_process_pixblock_tail pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 pixman_composite_over_8888_n_8888_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm .macro pixman_composite_over_8888_n_8888_init mov v15.s[0], w6 dup v15.8b, v15.b[3] .endm .macro pixman_composite_over_8888_n_8888_cleanup .endm generate_composite_function \ pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_8888_n_8888_init, \ pixman_composite_over_8888_n_8888_cleanup, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_n_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ /******************************************************************************/ /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_8888_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_over_8888_n_8888_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm generate_composite_function \ pixman_composite_over_8888_8888_8888_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ generate_composite_function_single_scanline \ pixman_composite_scanline_over_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ /******************************************************************************/ /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_8_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_over_8888_n_8888_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm generate_composite_function \ pixman_composite_over_8888_8_8888_asm_neon, 32, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_0888_process_pixblock_head .endm .macro pixman_composite_src_0888_0888_process_pixblock_tail .endm .macro pixman_composite_src_0888_0888_process_pixblock_tail_head st3 {v0.8b, v1.8b, v2.8b}, [DST_W], #24 fetch_src_pixblock cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_0888_0888_asm_neon, 24, 0, 24, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0888_0888_process_pixblock_head, \ pixman_composite_src_0888_0888_process_pixblock_tail, \ pixman_composite_src_0888_0888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_8888_rev_process_pixblock_head mov v31.8b, v2.8b mov v2.8b, v0.8b mov v0.8b, v31.8b .endm .macro pixman_composite_src_0888_8888_rev_process_pixblock_tail .endm .macro pixman_composite_src_0888_8888_rev_process_pixblock_tail_head st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_W], #32 fetch_src_pixblock mov v31.8b, v2.8b mov v2.8b, v0.8b mov v0.8b, v31.8b cache_preload 8, 8 .endm .macro pixman_composite_src_0888_8888_rev_init eor v3.8b, v3.8b, v3.8b .endm generate_composite_function \ pixman_composite_src_0888_8888_rev_asm_neon, 24, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ pixman_composite_src_0888_8888_rev_init, \ default_cleanup, \ pixman_composite_src_0888_8888_rev_process_pixblock_head, \ pixman_composite_src_0888_8888_rev_process_pixblock_tail, \ pixman_composite_src_0888_8888_rev_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_0565_rev_process_pixblock_head ushll v8.8h, v1.8b, #7 sli v8.8h, v8.8h, #1 ushll v9.8h, v2.8b, #7 sli v9.8h, v9.8h, #1 .endm .macro pixman_composite_src_0888_0565_rev_process_pixblock_tail ushll v14.8h, v0.8b, #7 sli v14.8h, v14.8h, #1 sri v14.8h, v8.8h, #5 sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm .macro pixman_composite_src_0888_0565_rev_process_pixblock_tail_head ushll v14.8h, v0.8b, #7 sli v14.8h, v14.8h, #1 fetch_src_pixblock sri v14.8h, v8.8h, #5 sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] ushll v8.8h, v1.8b, #7 sli v8.8h, v8.8h, #1 st1 {v14.8h}, [DST_W], #16 ushll v9.8h, v2.8b, #7 sli v9.8h, v9.8h, #1 .endm generate_composite_function \ pixman_composite_src_0888_0565_rev_asm_neon, 24, 0, 16, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0888_0565_rev_process_pixblock_head, \ pixman_composite_src_0888_0565_rev_process_pixblock_tail, \ pixman_composite_src_0888_0565_rev_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_pixbuf_8888_process_pixblock_head umull v8.8h, v3.8b, v0.8b umull v9.8h, v3.8b, v1.8b umull v10.8h, v3.8b, v2.8b .endm .macro pixman_composite_src_pixbuf_8888_process_pixblock_tail urshr v11.8h, v8.8h, #8 mov v30.8b, v31.8b mov v31.8b, v3.8b mov v3.8b, v30.8b urshr v12.8h, v9.8h, #8 urshr v13.8h, v10.8h, #8 raddhn v30.8b, v11.8h, v8.8h raddhn v29.8b, v12.8h, v9.8h raddhn v28.8b, v13.8h, v10.8h .endm .macro pixman_composite_src_pixbuf_8888_process_pixblock_tail_head urshr v11.8h, v8.8h, #8 mov v30.8b, v31.8b mov v31.8b, v3.8b mov v3.8b, v31.8b urshr v12.8h, v9.8h, #8 urshr v13.8h, v10.8h, #8 fetch_src_pixblock raddhn v30.8b, v11.8h, v8.8h PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF PF beq 10f PF add PF_X, PF_X, #8 PF sub PF_CTL, PF_CTL, #1 10: raddhn v29.8b, v12.8h, v9.8h raddhn v28.8b, v13.8h, v10.8h umull v8.8h, v3.8b, v0.8b umull v9.8h, v3.8b, v1.8b umull v10.8h, v3.8b, v2.8b st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF cmp PF_X, ORIG_W PF lsl DUMMY, PF_X, src_bpp_shift PF prfm PREFETCH_MODE, [PF_SRC, DUMMY] PF ble 10f PF sub PF_X, PF_X, ORIG_W PF subs PF_CTL, PF_CTL, #0x10 PF ble 10f PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb DUMMY, [PF_SRC, DUMMY] PF add PF_SRC, PF_SRC, #1 10: .endm generate_composite_function \ pixman_composite_src_pixbuf_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_pixbuf_8888_process_pixblock_head, \ pixman_composite_src_pixbuf_8888_process_pixblock_tail, \ pixman_composite_src_pixbuf_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_rpixbuf_8888_process_pixblock_head umull v8.8h, v3.8b, v0.8b umull v9.8h, v3.8b, v1.8b umull v10.8h, v3.8b, v2.8b .endm .macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail urshr v11.8h, v8.8h, #8 mov v30.8b, v31.8b mov v31.8b, v3.8b mov v3.8b, v30.8b urshr v12.8h, v9.8h, #8 urshr v13.8h, v10.8h, #8 raddhn v28.8b, v11.8h, v8.8h raddhn v29.8b, v12.8h, v9.8h raddhn v30.8b, v13.8h, v10.8h .endm .macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head urshr v11.8h, v8.8h, #8 mov v30.8b, v31.8b mov v31.8b, v3.8b mov v3.8b, v30.8b urshr v12.8h, v9.8h, #8 urshr v13.8h, v10.8h, #8 fetch_src_pixblock raddhn v28.8b, v11.8h, v8.8h PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF PF beq 10f PF add PF_X, PF_X, #8 PF sub PF_CTL, PF_CTL, #1 10: raddhn v29.8b, v12.8h, v9.8h raddhn v30.8b, v13.8h, v10.8h umull v8.8h, v3.8b, v0.8b umull v9.8h, v3.8b, v1.8b umull v10.8h, v3.8b, v2.8b st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF cmp PF_X, ORIG_W PF lsl DUMMY, PF_X, src_bpp_shift PF prfm PREFETCH_MODE, [PF_SRC, DUMMY] PF ble 10f PF sub PF_X, PF_X, ORIG_W PF subs PF_CTL, PF_CTL, #0x10 PF ble 10f PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb DUMMY, [PF_SRC, DUMMY] PF add PF_SRC, PF_SRC, #1 10: .endm generate_composite_function \ pixman_composite_src_rpixbuf_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_rpixbuf_8888_process_pixblock_head, \ pixman_composite_src_rpixbuf_8888_process_pixblock_tail, \ pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_0565_8_0565_process_pixblock_head /* mask is in v15 */ mov v4.d[0], v8.d[0] mov v4.d[1], v9.d[0] mov v13.d[0], v10.d[0] mov v13.d[1], v11.d[0] convert_0565_to_x888 v4, v2, v1, v0 convert_0565_to_x888 v13, v6, v5, v4 /* source pixel data is in {v0, v1, v2, XX} */ /* destination pixel data is in {v4, v5, v6, XX} */ mvn v7.8b, v15.8b umull v10.8h, v15.8b, v2.8b umull v9.8h, v15.8b, v1.8b umull v8.8h, v15.8b, v0.8b umull v11.8h, v7.8b, v4.8b umull v12.8h, v7.8b, v5.8b umull v13.8h, v7.8b, v6.8b urshr v19.8h, v10.8h, #8 urshr v18.8h, v9.8h, #8 urshr v17.8h, v8.8h, #8 raddhn v2.8b, v10.8h, v19.8h raddhn v1.8b, v9.8h, v18.8h raddhn v0.8b, v8.8h, v17.8h .endm .macro pixman_composite_over_0565_8_0565_process_pixblock_tail urshr v17.8h, v11.8h, #8 urshr v18.8h, v12.8h, #8 urshr v19.8h, v13.8h, #8 raddhn v28.8b, v17.8h, v11.8h raddhn v29.8b, v18.8h, v12.8h raddhn v30.8b, v19.8h, v13.8h uqadd v0.8b, v0.8b, v28.8b uqadd v1.8b, v1.8b, v29.8b uqadd v2.8b, v2.8b, v30.8b /* 32bpp result is in {v0, v1, v2, XX} */ convert_8888_to_0565 v2, v1, v0, v14, v30, v13 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_0565_8_0565_process_pixblock_tail_head fetch_mask_pixblock pixman_composite_over_0565_8_0565_process_pixblock_tail fetch_src_pixblock ld1 {v10.4h, v11.4h}, [DST_R], #16 cache_preload 8, 8 pixman_composite_over_0565_8_0565_process_pixblock_head st1 {v14.8h}, [DST_W], #16 .endm generate_composite_function \ pixman_composite_over_0565_8_0565_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_0565_n_0565_init mov v15.s[0], w6 dup v15.8b, v15.b[3] .endm .macro pixman_composite_over_0565_n_0565_cleanup .endm generate_composite_function \ pixman_composite_over_0565_n_0565_asm_neon, 16, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_0565_n_0565_init, \ pixman_composite_over_0565_n_0565_cleanup, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_0565_8_0565_process_pixblock_head /* mask is in v15 */ mov v4.d[0], v8.d[0] mov v4.d[1], v9.d[0] mov v13.d[0], v10.d[0] mov v13.d[1], v11.d[0] convert_0565_to_x888 v4, v2, v1, v0 convert_0565_to_x888 v13, v6, v5, v4 /* source pixel data is in {v0, v1, v2, XX} */ /* destination pixel data is in {v4, v5, v6, XX} */ umull v9.8h, v15.8b, v2.8b umull v8.8h, v15.8b, v1.8b umull v7.8h, v15.8b, v0.8b urshr v12.8h, v9.8h, #8 urshr v11.8h, v8.8h, #8 urshr v10.8h, v7.8h, #8 raddhn v2.8b, v9.8h, v12.8h raddhn v1.8b, v8.8h, v11.8h raddhn v0.8b, v7.8h, v10.8h .endm .macro pixman_composite_add_0565_8_0565_process_pixblock_tail uqadd v0.8b, v0.8b, v4.8b uqadd v1.8b, v1.8b, v5.8b uqadd v2.8b, v2.8b, v6.8b /* 32bpp result is in {v0, v1, v2, XX} */ convert_8888_to_0565 v2, v1, v0, v14, v30, v13 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_0565_8_0565_process_pixblock_tail_head fetch_mask_pixblock pixman_composite_add_0565_8_0565_process_pixblock_tail fetch_src_pixblock ld1 {v10.4h, v11.4h}, [DST_R], #16 cache_preload 8, 8 pixman_composite_add_0565_8_0565_process_pixblock_head st1 {v14.8h}, [DST_W], #16 .endm generate_composite_function \ pixman_composite_add_0565_8_0565_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_add_0565_8_0565_process_pixblock_head, \ pixman_composite_add_0565_8_0565_process_pixblock_tail, \ pixman_composite_add_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8_0565_process_pixblock_head /* mask is in v15 */ mov v12.d[0], v10.d[0] mov v12.d[1], v11.d[0] convert_0565_to_x888 v12, v6, v5, v4 /* destination pixel data is in {v4, v5, v6, xx} */ mvn v24.8b, v15.8b /* get inverted alpha */ /* now do alpha blending */ umull v8.8h, v24.8b, v4.8b umull v9.8h, v24.8b, v5.8b umull v10.8h, v24.8b, v6.8b .endm .macro pixman_composite_out_reverse_8_0565_process_pixblock_tail urshr v11.8h, v8.8h, #8 urshr v12.8h, v9.8h, #8 urshr v13.8h, v10.8h, #8 raddhn v0.8b, v11.8h, v8.8h raddhn v1.8b, v12.8h, v9.8h raddhn v2.8b, v13.8h, v10.8h /* 32bpp result is in {v0, v1, v2, XX} */ convert_8888_to_0565 v2, v1, v0, v14, v12, v3 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8_0565_process_pixblock_tail_head fetch_src_pixblock pixman_composite_out_reverse_8_0565_process_pixblock_tail ld1 {v10.4h, v11.4h}, [DST_R], #16 cache_preload 8, 8 pixman_composite_out_reverse_8_0565_process_pixblock_head st1 {v14.8h}, [DST_W], #16 .endm generate_composite_function \ pixman_composite_out_reverse_8_0565_asm_neon, 8, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_out_reverse_8_0565_process_pixblock_head, \ pixman_composite_out_reverse_8_0565_process_pixblock_tail, \ pixman_composite_out_reverse_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 15, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8_8888_process_pixblock_head /* src is in v0 */ /* destination pixel data is in {v4, v5, v6, v7} */ mvn v1.8b, v0.8b /* get inverted alpha */ /* now do alpha blending */ umull v8.8h, v1.8b, v4.8b umull v9.8h, v1.8b, v5.8b umull v10.8h, v1.8b, v6.8b umull v11.8h, v1.8b, v7.8b .endm .macro pixman_composite_out_reverse_8_8888_process_pixblock_tail urshr v14.8h, v8.8h, #8 urshr v15.8h, v9.8h, #8 urshr v12.8h, v10.8h, #8 urshr v13.8h, v11.8h, #8 raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h raddhn v30.8b, v12.8h, v10.8h raddhn v31.8b, v13.8h, v11.8h /* 32bpp result is in {v28, v29, v30, v31} */ .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8_8888_process_pixblock_tail_head fetch_src_pixblock pixman_composite_out_reverse_8_8888_process_pixblock_tail ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 cache_preload 8, 8 pixman_composite_out_reverse_8_8888_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm generate_composite_function \ pixman_composite_out_reverse_8_8888_asm_neon, 8, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_out_reverse_8_8888_process_pixblock_head, \ pixman_composite_out_reverse_8_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_8888_OVER_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_0565_OVER_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_0565_process_pixblock_head, \ pixman_composite_over_8888_0565_process_pixblock_tail, \ pixman_composite_over_8888_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_0565_SRC_asm_neon, 32, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_0565_process_pixblock_head, \ pixman_composite_src_8888_0565_process_pixblock_tail, \ pixman_composite_src_8888_0565_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_0565_8888_SRC_asm_neon, 16, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_8888_process_pixblock_head, \ pixman_composite_src_0565_8888_process_pixblock_tail, \ pixman_composite_src_0565_8888_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_neon, 32, 8, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ /* * Bilinear scaling support code which tries to provide pixel fetching, color * format conversion, and interpolation as separate macros which can be used * as the basic building blocks for constructing bilinear scanline functions. */ .macro bilinear_load_8888 reg1, reg2, tmp asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 ld1 {&reg1&.2s}, [TMP1], STRIDE ld1 {&reg2&.2s}, [TMP1] .endm .macro bilinear_load_0565 reg1, reg2, tmp asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 ld1 {&reg2&.s}[0], [TMP1], STRIDE ld1 {&reg2&.s}[1], [TMP1] convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp .endm .macro bilinear_load_and_vertical_interpolate_two_8888 \ acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2 bilinear_load_8888 reg1, reg2, tmp1 umull &acc1&.8h, &reg1&.8b, v28.8b umlal &acc1&.8h, &reg2&.8b, v29.8b bilinear_load_8888 reg3, reg4, tmp2 umull &acc2&.8h, &reg3&.8b, v28.8b umlal &acc2&.8h, &reg4&.8b, v29.8b .endm .macro bilinear_load_and_vertical_interpolate_four_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi .endm .macro vzip reg1, reg2 umov TMP4, v31.d[0] zip1 v31.8b, reg1, reg2 zip2 reg2, reg1, reg2 mov reg1, v31.8b mov v31.d[0], TMP4 .endm .macro vuzp reg1, reg2 umov TMP4, v31.d[0] uzp1 v31.8b, reg1, reg2 uzp2 reg2, reg1, reg2 mov reg1, v31.8b mov v31.d[0], TMP4 .endm .macro bilinear_load_and_vertical_interpolate_two_0565 \ acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr TMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {&acc2&.s}[0], [TMP1], STRIDE ld1 {&acc2&.s}[2], [TMP2], STRIDE ld1 {&acc2&.s}[1], [TMP1] ld1 {&acc2&.s}[3], [TMP2] convert_0565_to_x888 acc2, reg3, reg2, reg1 vzip &reg1&.8b, &reg3&.8b vzip &reg2&.8b, &reg4&.8b vzip &reg3&.8b, &reg4&.8b vzip &reg1&.8b, &reg2&.8b umull &acc1&.8h, &reg1&.8b, v28.8b umlal &acc1&.8h, &reg2&.8b, v29.8b umull &acc2&.8h, &reg3&.8b, v28.8b umlal &acc2&.8h, &reg4&.8b, v29.8b .endm .macro bilinear_load_and_vertical_interpolate_four_0565 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr TMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {&xacc2&.s}[0], [TMP1], STRIDE ld1 {&xacc2&.s}[2], [TMP2], STRIDE ld1 {&xacc2&.s}[1], [TMP1] ld1 {&xacc2&.s}[3], [TMP2] convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1 asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr TMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {&yacc2&.s}[0], [TMP1], STRIDE vzip &xreg1&.8b, &xreg3&.8b ld1 {&yacc2&.s}[2], [TMP2], STRIDE vzip &xreg2&.8b, &xreg4&.8b ld1 {&yacc2&.s}[1], [TMP1] vzip &xreg3&.8b, &xreg4&.8b ld1 {&yacc2&.s}[3], [TMP2] vzip &xreg1&.8b, &xreg2&.8b convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1 umull &xacc1&.8h, &xreg1&.8b, v28.8b vzip &yreg1&.8b, &yreg3&.8b umlal &xacc1&.8h, &xreg2&.8b, v29.8b vzip &yreg2&.8b, &yreg4&.8b umull &xacc2&.8h, &xreg3&.8b, v28.8b vzip &yreg3&.8b, &yreg4&.8b umlal &xacc2&.8h, &xreg4&.8b, v29.8b vzip &yreg1&.8b, &yreg2&.8b umull &yacc1&.8h, &yreg1&.8b, v28.8b umlal &yacc1&.8h, &yreg2&.8b, v29.8b umull &yacc2&.8h, &yreg3&.8b, v28.8b umlal &yacc2&.8h, &yreg4&.8b, v29.8b .endm .macro bilinear_store_8888 numpix, tmp1, tmp2 .if numpix == 4 st1 {v0.2s, v1.2s}, [OUT], #16 .elseif numpix == 2 st1 {v0.2s}, [OUT], #8 .elseif numpix == 1 st1 {v0.s}[0], [OUT], #4 .else .error bilinear_store_8888 numpix is unsupported .endif .endm .macro bilinear_store_0565 numpix, tmp1, tmp2 vuzp v0.8b, v1.8b vuzp v2.8b, v3.8b vuzp v1.8b, v3.8b vuzp v0.8b, v2.8b convert_8888_to_0565 v2, v1, v0, v1, tmp1, tmp2 .if numpix == 4 st1 {v1.4h}, [OUT], #8 .elseif numpix == 2 st1 {v1.s}[0], [OUT], #4 .elseif numpix == 1 st1 {v1.h}[0], [OUT], #2 .else .error bilinear_store_0565 numpix is unsupported .endif .endm .macro bilinear_interpolate_last_pixel src_fmt, dst_fmt bilinear_load_&src_fmt v0, v1, v2 umull v2.8h, v0.8b, v28.8b umlal v2.8h, v1.8b, v29.8b /* 5 cycles bubble */ ushll v0.4s, v2.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v2.4h, v15.h[0] umlal2 v0.4s, v2.8h, v15.h[0] /* 5 cycles bubble */ shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) /* 3 cycles bubble */ xtn v0.8b, v0.8h /* 1 cycle bubble */ bilinear_store_&dst_fmt 1, v3, v4 .endm .macro bilinear_interpolate_two_pixels src_fmt, dst_fmt bilinear_load_and_vertical_interpolate_two_&src_fmt \ v1, v11, v2, v3, v20, v21, v22, v23 ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v1.4h, v15.h[0] umlal2 v0.4s, v1.8h, v15.h[0] ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v10.4s, v11.4h, v15.h[4] umlal2 v10.4s, v11.8h, v15.h[4] shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h xtn v0.8b, v0.8h bilinear_store_&dst_fmt 2, v3, v4 .endm .macro bilinear_interpolate_four_pixels src_fmt, dst_fmt bilinear_load_and_vertical_interpolate_four_&src_fmt \ v1, v11, v14, v20, v16, v17, v22, v23 \ v3, v9, v24, v25, v26, v27, v18, v19 prfm PREFETCH_MODE, [TMP1, PF_OFFS] sub TMP1, TMP1, STRIDE ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v1.4h, v15.h[0] umlal2 v0.4s, v1.8h, v15.h[0] ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v10.4s, v11.4h, v15.h[4] umlal2 v10.4s, v11.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ushll v2.4s, v3.4h, #BILINEAR_INTERPOLATION_BITS umlsl v2.4s, v3.4h, v15.h[0] umlal2 v2.4s, v3.8h, v15.h[0] ushll v8.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS prfm PREFETCH_MODE, [TMP2, PF_OFFS] umlsl v8.4s, v9.4h, v15.h[4] umlal2 v8.4s, v9.8h, v15.h[4] add v12.8h, v12.8h, v13.8h shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v2.8h, v8.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) xtn v0.8b, v0.8h xtn v1.8b, v2.8h add v12.8h, v12.8h, v13.8h bilinear_store_&dst_fmt 4, v3, v4 .endm .macro bilinear_interpolate_four_pixels_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_head .else bilinear_interpolate_four_pixels src_fmt, dst_fmt .endif .endm .macro bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail .endif .endm .macro bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail_head .else bilinear_interpolate_four_pixels src_fmt, dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_head .else bilinear_interpolate_four_pixels_head src_fmt, dst_fmt bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail .else bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail_head .else bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt .endif .endm .set BILINEAR_FLAG_UNROLL_4, 0 .set BILINEAR_FLAG_UNROLL_8, 1 .set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2 /* * Main template macro for generating NEON optimized bilinear scanline * functions. * * Bilinear scanline scaler macro template uses the following arguments: * fname - name of the function to generate * src_fmt - source color format (8888 or 0565) * dst_fmt - destination color format (8888 or 0565) * bpp_shift - (1 << bpp_shift) is the size of source pixel in bytes * prefetch_distance - prefetch in the source image by that many * pixels ahead */ .macro generate_bilinear_scanline_func fname, src_fmt, dst_fmt, \ src_bpp_shift, dst_bpp_shift, \ prefetch_distance, flags pixman_asm_function fname OUT .req x0 TOP .req x1 BOTTOM .req x2 WT .req x3 WB .req x4 X .req x5 UX .req x6 WIDTH .req x7 TMP1 .req x8 TMP2 .req x9 PF_OFFS .req x10 TMP3 .req x11 TMP4 .req x12 STRIDE .req x13 sxtw x3, w3 sxtw x4, w4 sxtw x5, w5 sxtw x6, w6 sxtw x7, w7 stp x29, x30, [sp, -16]! mov x29, sp sub sp, sp, 112 /* push all registers */ sub x29, x29, 64 st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], #32 st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], #32 stp x8, x9, [x29, -80] stp x10, x11, [x29, -96] stp x12, x13, [x29, -112] mov PF_OFFS, #prefetch_distance mul PF_OFFS, PF_OFFS, UX subs STRIDE, BOTTOM, TOP .unreq BOTTOM cmp WIDTH, #0 ble 300f dup v12.8h, w5 dup v13.8h, w6 dup v28.8b, w3 dup v29.8b, w4 mov v25.d[0], v12.d[1] mov v26.d[0], v13.d[0] add v25.4h, v25.4h, v26.4h mov v12.d[1], v25.d[0] /* ensure good destination alignment */ cmp WIDTH, #1 blt 100f tst OUT, #(1 << dst_bpp_shift) beq 100f ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h bilinear_interpolate_last_pixel src_fmt, dst_fmt sub WIDTH, WIDTH, #1 100: add v13.8h, v13.8h, v13.8h ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h cmp WIDTH, #2 blt 100f tst OUT, #(1 << (dst_bpp_shift + 1)) beq 100f bilinear_interpolate_two_pixels src_fmt, dst_fmt sub WIDTH, WIDTH, #2 100: .if ((flags) & BILINEAR_FLAG_UNROLL_8) != 0 /*********** 8 pixels per iteration *****************/ cmp WIDTH, #4 blt 100f tst OUT, #(1 << (dst_bpp_shift + 2)) beq 100f bilinear_interpolate_four_pixels src_fmt, dst_fmt sub WIDTH, WIDTH, #4 100: subs WIDTH, WIDTH, #8 blt 100f asr PF_OFFS, PF_OFFS, #(16 - src_bpp_shift) bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt subs WIDTH, WIDTH, #8 blt 500f 1000: bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt subs WIDTH, WIDTH, #8 bge 1000b 500: bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt 100: tst WIDTH, #4 beq 200f bilinear_interpolate_four_pixels src_fmt, dst_fmt 200: .else /*********** 4 pixels per iteration *****************/ subs WIDTH, WIDTH, #4 blt 100f asr PF_OFFS, PF_OFFS, #(16 - src_bpp_shift) bilinear_interpolate_four_pixels_head src_fmt, dst_fmt subs WIDTH, WIDTH, #4 blt 500f 1000: bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt subs WIDTH, WIDTH, #4 bge 1000b 500: bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt 100: /****************************************************/ .endif /* handle the remaining trailing pixels */ tst WIDTH, #2 beq 200f bilinear_interpolate_two_pixels src_fmt, dst_fmt 200: tst WIDTH, #1 beq 300f bilinear_interpolate_last_pixel src_fmt, dst_fmt 300: sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], #32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], #32 ldp x8, x9, [x29, -80] ldp x10, x11, [x29, -96] ldp x12, x13, [x29, -104] mov sp, x29 ldp x29, x30, [sp], 16 ret .unreq OUT .unreq TOP .unreq WT .unreq WB .unreq X .unreq UX .unreq WIDTH .unreq TMP1 .unreq TMP2 .unreq PF_OFFS .unreq TMP3 .unreq TMP4 .unreq STRIDE .endfunc .endm /*****************************************************************************/ .set have_bilinear_interpolate_four_pixels_8888_8888, 1 .macro bilinear_interpolate_four_pixels_8888_8888_head asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 asr TMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #2 ld1 {v22.2s}, [TMP1], STRIDE ld1 {v23.2s}, [TMP1] asr TMP3, X, #16 add X, X, UX add TMP3, TOP, TMP3, lsl #2 umull v8.8h, v22.8b, v28.8b umlal v8.8h, v23.8b, v29.8b ld1 {v22.2s}, [TMP2], STRIDE ld1 {v23.2s}, [TMP2] asr TMP4, X, #16 add X, X, UX add TMP4, TOP, TMP4, lsl #2 umull v9.8h, v22.8b, v28.8b umlal v9.8h, v23.8b, v29.8b ld1 {v22.2s}, [TMP3], STRIDE ld1 {v23.2s}, [TMP3] umull v10.8h, v22.8b, v28.8b umlal v10.8h, v23.8b, v29.8b ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v8.4h, v15.h[0] umlal2 v0.4s, v8.8h, v15.h[0] prfm PREFETCH_MODE, [TMP4, PF_OFFS] ld1 {v16.2s}, [TMP4], STRIDE ld1 {v17.2s}, [TMP4] prfm PREFETCH_MODE, [TMP4, PF_OFFS] umull v11.8h, v16.8b, v28.8b umlal v11.8h, v17.8b, v29.8b ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS umlsl v1.4s, v9.4h, v15.h[4] .endm .macro bilinear_interpolate_four_pixels_8888_8888_tail umlal2 v1.4s, v9.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS umlsl v2.4s, v10.4h, v15.h[0] umlal2 v2.4s, v10.8h, v15.h[0] ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v3.4s, v11.4h, v15.h[4] umlal2 v3.4s, v11.8h, v15.h[4] add v12.8h, v12.8h, v13.8h shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) shrn2 v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS) xtn v6.8b, v0.8h xtn v7.8b, v2.8h add v12.8h, v12.8h, v13.8h st1 {v6.2s, v7.2s}, [OUT], #16 .endm .macro bilinear_interpolate_four_pixels_8888_8888_tail_head asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 asr TMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #2 umlal2 v1.4s, v9.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS umlsl v2.4s, v10.4h, v15.h[0] umlal2 v2.4s, v10.8h, v15.h[0] ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS ld1 {v20.2s}, [TMP1], STRIDE umlsl v3.4s, v11.4h, v15.h[4] umlal2 v3.4s, v11.8h, v15.h[4] ld1 {v21.2s}, [TMP1] umull v8.8h, v20.8b, v28.8b umlal v8.8h, v21.8b, v29.8b shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v4.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ld1 {v22.2s}, [TMP2], STRIDE shrn2 v4.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h ld1 {v23.2s}, [TMP2] umull v9.8h, v22.8b, v28.8b asr TMP3, X, #16 add X, X, UX add TMP3, TOP, TMP3, lsl #2 asr TMP4, X, #16 add X, X, UX add TMP4, TOP, TMP4, lsl #2 umlal v9.8h, v23.8b, v29.8b ld1 {v22.2s}, [TMP3], STRIDE ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ld1 {v23.2s}, [TMP3] umull v10.8h, v22.8b, v28.8b umlal v10.8h, v23.8b, v29.8b xtn v6.8b, v0.8h ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS xtn v7.8b, v4.8h umlsl v0.4s, v8.4h, v15.h[0] umlal2 v0.4s, v8.8h, v15.h[0] prfm PREFETCH_MODE, [TMP4, PF_OFFS] ld1 {v16.2s}, [TMP4], STRIDE add v12.8h, v12.8h, v13.8h ld1 {v17.2s}, [TMP4] prfm PREFETCH_MODE, [TMP4, PF_OFFS] umull v11.8h, v16.8b, v28.8b umlal v11.8h, v17.8b, v29.8b st1 {v6.2s, v7.2s}, [OUT], #16 ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS umlsl v1.4s, v9.4h, v15.h[4] .endm /*****************************************************************************/ generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_neon, 8888, 8888, \ 2, 2, 28, BILINEAR_FLAG_UNROLL_4 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_neon, 8888, 0565, \ 2, 1, 28, BILINEAR_FLAG_UNROLL_8 | BILINEAR_FLAG_USE_ALL_NEON_REGS generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_x888_SRC_asm_neon, 0565, 8888, \ 1, 2, 28, BILINEAR_FLAG_UNROLL_4 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_neon, 0565, 0565, \ 1, 1, 28, BILINEAR_FLAG_UNROLL_4
ElSargo/wezpy
10,556
wezterm-src/deps/cairo/pixman/pixman/pixman-mips-memcpy-asm.S
/* * Copyright (c) 2012 * MIPS Technologies, Inc., California. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "pixman-mips-dspr2-asm.h" /* * This routine could be optimized for MIPS64. The current code only * uses MIPS32 instructions. */ #ifdef EB # define LWHI lwl /* high part is left in big-endian */ # define SWHI swl /* high part is left in big-endian */ # define LWLO lwr /* low part is right in big-endian */ # define SWLO swr /* low part is right in big-endian */ #else # define LWHI lwr /* high part is right in little-endian */ # define SWHI swr /* high part is right in little-endian */ # define LWLO lwl /* low part is left in big-endian */ # define SWLO swl /* low part is left in big-endian */ #endif LEAF_MIPS32R2(pixman_mips_fast_memcpy) slti AT, a2, 8 bne AT, zero, $last8 move v0, a0 /* memcpy returns the dst pointer */ /* Test if the src and dst are word-aligned, or can be made word-aligned */ xor t8, a1, a0 andi t8, t8, 0x3 /* t8 is a0/a1 word-displacement */ bne t8, zero, $unaligned negu a3, a0 andi a3, a3, 0x3 /* we need to copy a3 bytes to make a0/a1 aligned */ beq a3, zero, $chk16w /* when a3=0 then the dst (a0) is word-aligned */ subu a2, a2, a3 /* now a2 is the remining bytes count */ LWHI t8, 0(a1) addu a1, a1, a3 SWHI t8, 0(a0) addu a0, a0, a3 /* Now the dst/src are mutually word-aligned with word-aligned addresses */ $chk16w: andi t8, a2, 0x3f /* any whole 64-byte chunks? */ /* t8 is the byte count after 64-byte chunks */ beq a2, t8, $chk8w /* if a2==t8, no 64-byte chunks */ /* There will be at most 1 32-byte chunk after it */ subu a3, a2, t8 /* subtract from a2 the reminder */ /* Here a3 counts bytes in 16w chunks */ addu a3, a0, a3 /* Now a3 is the final dst after 64-byte chunks */ addu t0, a0, a2 /* t0 is the "past the end" address */ /* * When in the loop we exercise "pref 30, x(a0)", the a0+x should not be past * the "t0-32" address * This means: for x=128 the last "safe" a0 address is "t0-160" * Alternatively, for x=64 the last "safe" a0 address is "t0-96" * In the current version we use "pref 30, 128(a0)", so "t0-160" is the limit */ subu t9, t0, 160 /* t9 is the "last safe pref 30, 128(a0)" address */ pref 0, 0(a1) /* bring the first line of src, addr 0 */ pref 0, 32(a1) /* bring the second line of src, addr 32 */ pref 0, 64(a1) /* bring the third line of src, addr 64 */ pref 30, 32(a0) /* safe, as we have at least 64 bytes ahead */ /* In case the a0 > t9 don't use "pref 30" at all */ sgtu v1, a0, t9 bgtz v1, $loop16w /* skip "pref 30, 64(a0)" for too short arrays */ nop /* otherwise, start with using pref30 */ pref 30, 64(a0) $loop16w: pref 0, 96(a1) lw t0, 0(a1) bgtz v1, $skip_pref30_96 /* skip "pref 30, 96(a0)" */ lw t1, 4(a1) pref 30, 96(a0) /* continue setting up the dest, addr 96 */ $skip_pref30_96: lw t2, 8(a1) lw t3, 12(a1) lw t4, 16(a1) lw t5, 20(a1) lw t6, 24(a1) lw t7, 28(a1) pref 0, 128(a1) /* bring the next lines of src, addr 128 */ sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) lw t0, 32(a1) bgtz v1, $skip_pref30_128 /* skip "pref 30, 128(a0)" */ lw t1, 36(a1) pref 30, 128(a0) /* continue setting up the dest, addr 128 */ $skip_pref30_128: lw t2, 40(a1) lw t3, 44(a1) lw t4, 48(a1) lw t5, 52(a1) lw t6, 56(a1) lw t7, 60(a1) pref 0, 160(a1) /* bring the next lines of src, addr 160 */ sw t0, 32(a0) sw t1, 36(a0) sw t2, 40(a0) sw t3, 44(a0) sw t4, 48(a0) sw t5, 52(a0) sw t6, 56(a0) sw t7, 60(a0) addiu a0, a0, 64 /* adding 64 to dest */ sgtu v1, a0, t9 bne a0, a3, $loop16w addiu a1, a1, 64 /* adding 64 to src */ move a2, t8 /* Here we have src and dest word-aligned but less than 64-bytes to go */ $chk8w: pref 0, 0x0(a1) andi t8, a2, 0x1f /* is there a 32-byte chunk? */ /* the t8 is the reminder count past 32-bytes */ beq a2, t8, $chk1w /* when a2=t8, no 32-byte chunk */ nop lw t0, 0(a1) lw t1, 4(a1) lw t2, 8(a1) lw t3, 12(a1) lw t4, 16(a1) lw t5, 20(a1) lw t6, 24(a1) lw t7, 28(a1) addiu a1, a1, 32 sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) addiu a0, a0, 32 $chk1w: andi a2, t8, 0x3 /* now a2 is the reminder past 1w chunks */ beq a2, t8, $last8 subu a3, t8, a2 /* a3 is count of bytes in 1w chunks */ addu a3, a0, a3 /* now a3 is the dst address past the 1w chunks */ /* copying in words (4-byte chunks) */ $wordCopy_loop: lw t3, 0(a1) /* the first t3 may be equal t0 ... optimize? */ addiu a1, a1, 4 addiu a0, a0, 4 bne a0, a3, $wordCopy_loop sw t3, -4(a0) /* For the last (<8) bytes */ $last8: blez a2, leave addu a3, a0, a2 /* a3 is the last dst address */ $last8loop: lb v1, 0(a1) addiu a1, a1, 1 addiu a0, a0, 1 bne a0, a3, $last8loop sb v1, -1(a0) leave: j ra nop /* * UNALIGNED case */ $unaligned: /* got here with a3="negu a0" */ andi a3, a3, 0x3 /* test if the a0 is word aligned */ beqz a3, $ua_chk16w subu a2, a2, a3 /* bytes left after initial a3 bytes */ LWHI v1, 0(a1) LWLO v1, 3(a1) addu a1, a1, a3 /* a3 may be here 1, 2 or 3 */ SWHI v1, 0(a0) addu a0, a0, a3 /* below the dst will be word aligned (NOTE1) */ $ua_chk16w: andi t8, a2, 0x3f /* any whole 64-byte chunks? */ /* t8 is the byte count after 64-byte chunks */ beq a2, t8, $ua_chk8w /* if a2==t8, no 64-byte chunks */ /* There will be at most 1 32-byte chunk after it */ subu a3, a2, t8 /* subtract from a2 the reminder */ /* Here a3 counts bytes in 16w chunks */ addu a3, a0, a3 /* Now a3 is the final dst after 64-byte chunks */ addu t0, a0, a2 /* t0 is the "past the end" address */ subu t9, t0, 160 /* t9 is the "last safe pref 30, 128(a0)" address */ pref 0, 0(a1) /* bring the first line of src, addr 0 */ pref 0, 32(a1) /* bring the second line of src, addr 32 */ pref 0, 64(a1) /* bring the third line of src, addr 64 */ pref 30, 32(a0) /* safe, as we have at least 64 bytes ahead */ /* In case the a0 > t9 don't use "pref 30" at all */ sgtu v1, a0, t9 bgtz v1, $ua_loop16w /* skip "pref 30, 64(a0)" for too short arrays */ nop /* otherwise, start with using pref30 */ pref 30, 64(a0) $ua_loop16w: pref 0, 96(a1) LWHI t0, 0(a1) LWLO t0, 3(a1) LWHI t1, 4(a1) bgtz v1, $ua_skip_pref30_96 LWLO t1, 7(a1) pref 30, 96(a0) /* continue setting up the dest, addr 96 */ $ua_skip_pref30_96: LWHI t2, 8(a1) LWLO t2, 11(a1) LWHI t3, 12(a1) LWLO t3, 15(a1) LWHI t4, 16(a1) LWLO t4, 19(a1) LWHI t5, 20(a1) LWLO t5, 23(a1) LWHI t6, 24(a1) LWLO t6, 27(a1) LWHI t7, 28(a1) LWLO t7, 31(a1) pref 0, 128(a1) /* bring the next lines of src, addr 128 */ sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) LWHI t0, 32(a1) LWLO t0, 35(a1) LWHI t1, 36(a1) bgtz v1, $ua_skip_pref30_128 LWLO t1, 39(a1) pref 30, 128(a0) /* continue setting up the dest, addr 128 */ $ua_skip_pref30_128: LWHI t2, 40(a1) LWLO t2, 43(a1) LWHI t3, 44(a1) LWLO t3, 47(a1) LWHI t4, 48(a1) LWLO t4, 51(a1) LWHI t5, 52(a1) LWLO t5, 55(a1) LWHI t6, 56(a1) LWLO t6, 59(a1) LWHI t7, 60(a1) LWLO t7, 63(a1) pref 0, 160(a1) /* bring the next lines of src, addr 160 */ sw t0, 32(a0) sw t1, 36(a0) sw t2, 40(a0) sw t3, 44(a0) sw t4, 48(a0) sw t5, 52(a0) sw t6, 56(a0) sw t7, 60(a0) addiu a0, a0, 64 /* adding 64 to dest */ sgtu v1, a0, t9 bne a0, a3, $ua_loop16w addiu a1, a1, 64 /* adding 64 to src */ move a2, t8 /* Here we have src and dest word-aligned but less than 64-bytes to go */ $ua_chk8w: pref 0, 0x0(a1) andi t8, a2, 0x1f /* is there a 32-byte chunk? */ /* the t8 is the reminder count */ beq a2, t8, $ua_chk1w /* when a2=t8, no 32-byte chunk */ LWHI t0, 0(a1) LWLO t0, 3(a1) LWHI t1, 4(a1) LWLO t1, 7(a1) LWHI t2, 8(a1) LWLO t2, 11(a1) LWHI t3, 12(a1) LWLO t3, 15(a1) LWHI t4, 16(a1) LWLO t4, 19(a1) LWHI t5, 20(a1) LWLO t5, 23(a1) LWHI t6, 24(a1) LWLO t6, 27(a1) LWHI t7, 28(a1) LWLO t7, 31(a1) addiu a1, a1, 32 sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) addiu a0, a0, 32 $ua_chk1w: andi a2, t8, 0x3 /* now a2 is the reminder past 1w chunks */ beq a2, t8, $ua_smallCopy subu a3, t8, a2 /* a3 is count of bytes in 1w chunks */ addu a3, a0, a3 /* now a3 is the dst address past the 1w chunks */ /* copying in words (4-byte chunks) */ $ua_wordCopy_loop: LWHI v1, 0(a1) LWLO v1, 3(a1) addiu a1, a1, 4 addiu a0, a0, 4 /* note: dst=a0 is word aligned here, see NOTE1 */ bne a0, a3, $ua_wordCopy_loop sw v1, -4(a0) /* Now less than 4 bytes (value in a2) left to copy */ $ua_smallCopy: beqz a2, leave addu a3, a0, a2 /* a3 is the last dst address */ $ua_smallCopy_loop: lb v1, 0(a1) addiu a1, a1, 1 addiu a0, a0, 1 bne a0, a3, $ua_smallCopy_loop sb v1, -1(a0) j ra nop END(pixman_mips_fast_memcpy)
ElSargo/wezpy
128,848
wezterm-src/deps/cairo/pixman/pixman/pixman-arm-neon-asm.S
/* * Copyright © 2009 Nokia Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) */ /* * This file contains implementations of NEON optimized pixel processing * functions. There is no full and detailed tutorial, but some functions * (those which are exposing some new or interesting features) are * extensively commented and can be used as examples. * * You may want to have a look at the comments for following functions: * - pixman_composite_over_8888_0565_asm_neon * - pixman_composite_over_n_8_0565_asm_neon */ /* Prevent the stack from becoming executable for no reason... */ #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .fpu neon .arch armv7a .object_arch armv4 .eabi_attribute 10, 0 /* suppress Tag_FP_arch */ .eabi_attribute 12, 0 /* suppress Tag_Advanced_SIMD_arch */ .arm .altmacro .p2align 2 #include "pixman-private.h" #include "pixman-arm-asm.h" #include "pixman-arm-neon-asm.h" /* Global configuration options and preferences */ /* * The code can optionally make use of unaligned memory accesses to improve * performance of handling leading/trailing pixels for each scanline. * Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for * example in linux if unaligned memory accesses are not configured to * generate.exceptions. */ .set RESPECT_STRICT_ALIGNMENT, 1 /* * Set default prefetch type. There is a choice between the following options: * * PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work * as NOP to workaround some HW bugs or for whatever other reason) * * PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where * advanced prefetch intruduces heavy overhead) * * PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8 * which can run ARM and NEON instructions simultaneously so that extra ARM * instructions do not add (many) extra cycles, but improve prefetch efficiency) * * Note: some types of function can't support advanced prefetch and fallback * to simple one (those which handle 24bpp pixels) */ .set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED /* Prefetch distance in pixels for simple prefetch */ .set PREFETCH_DISTANCE_SIMPLE, 64 /* * Implementation of pixman_composite_over_8888_0565_asm_neon * * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and * performs OVER compositing operation. Function fast_composite_over_8888_0565 * from pixman-fast-path.c does the same in C and can be used as a reference. * * First we need to have some NEON assembly code which can do the actual * operation on the pixels and provide it to the template macro. * * Template macro quite conveniently takes care of emitting all the necessary * code for memory reading and writing (including quite tricky cases of * handling unaligned leading/trailing pixels), so we only need to deal with * the data in NEON registers. * * NEON registers allocation in general is recommented to be the following: * d0, d1, d2, d3 - contain loaded source pixel data * d4, d5, d6, d7 - contain loaded destination pixels (if they are needed) * d24, d25, d26, d27 - contain loading mask pixel data (if mask is used) * d28, d29, d30, d31 - place for storing the result (destination pixels) * * As can be seen above, four 64-bit NEON registers are used for keeping * intermediate pixel data and up to 8 pixels can be processed in one step * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp). * * This particular function uses the following registers allocation: * d0, d1, d2, d3 - contain loaded source pixel data * d4, d5 - contain loaded destination pixels (they are needed) * d28, d29 - place for storing the result (destination pixels) */ /* * Step one. We need to have some code to do some arithmetics on pixel data. * This is implemented as a pair of macros: '*_head' and '*_tail'. When used * back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5}, * perform all the needed calculations and write the result to {d28, d29}. * The rationale for having two macros and not just one will be explained * later. In practice, any single monolitic function which does the work can * be split into two parts in any arbitrary way without affecting correctness. * * There is one special trick here too. Common template macro can optionally * make our life a bit easier by doing R, G, B, A color components * deinterleaving for 32bpp pixel formats (and this feature is used in * 'pixman_composite_over_8888_0565_asm_neon' function). So it means that * instead of having 8 packed pixels in {d0, d1, d2, d3} registers, we * actually use d0 register for blue channel (a vector of eight 8-bit * values), d1 register for green, d2 for red and d3 for alpha. This * simple conversion can be also done with a few NEON instructions: * * Packed to planar conversion: * vuzp.8 d0, d1 * vuzp.8 d2, d3 * vuzp.8 d1, d3 * vuzp.8 d0, d2 * * Planar to packed conversion: * vzip.8 d0, d2 * vzip.8 d1, d3 * vzip.8 d2, d3 * vzip.8 d0, d1 * * But pixel can be loaded directly in planar format using VLD4.8 NEON * instruction. It is 1 cycle slower than VLD1.32, so this is not always * desirable, that's why deinterleaving is optional. * * But anyway, here is the code: */ .macro pixman_composite_over_8888_0565_process_pixblock_head /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format and put data into d6 - red, d7 - green, d30 - blue */ vshrn.u16 d6, q2, #8 vshrn.u16 d7, q2, #3 vsli.u16 q2, q2, #5 vsri.u8 d6, d6, #5 vmvn.8 d3, d3 /* invert source alpha */ vsri.u8 d7, d7, #6 vshrn.u16 d30, q2, #2 /* now do alpha blending, storing results in 8-bit planar format into d16 - red, d19 - green, d18 - blue */ vmull.u8 q10, d3, d6 vmull.u8 q11, d3, d7 vmull.u8 q12, d3, d30 vrshr.u16 q13, q10, #8 vrshr.u16 q3, q11, #8 vrshr.u16 q15, q12, #8 vraddhn.u16 d20, q10, q13 vraddhn.u16 d23, q11, q3 vraddhn.u16 d22, q12, q15 .endm .macro pixman_composite_over_8888_0565_process_pixblock_tail /* ... continue alpha blending */ vqadd.u8 d16, d2, d20 vqadd.u8 q9, q0, q11 /* convert the result to r5g6b5 and store it into {d28, d29} */ vshll.u8 q14, d16, #8 vshll.u8 q8, d19, #8 vshll.u8 q9, d18, #8 vsri.u16 q14, q8, #5 vsri.u16 q14, q9, #11 .endm /* * OK, now we got almost everything that we need. Using the above two * macros, the work can be done right. But now we want to optimize * it a bit. ARM Cortex-A8 is an in-order core, and benefits really * a lot from good code scheduling and software pipelining. * * Let's construct some code, which will run in the core main loop. * Some pseudo-code of the main loop will look like this: * head * while (...) { * tail * head * } * tail * * It may look a bit weird, but this setup allows to hide instruction * latencies better and also utilize dual-issue capability more * efficiently (make pairs of load-store and ALU instructions). * * So what we need now is a '*_tail_head' macro, which will be used * in the core main loop. A trivial straightforward implementation * of this macro would look like this: * * pixman_composite_over_8888_0565_process_pixblock_tail * vst1.16 {d28, d29}, [DST_W, :128]! * vld1.16 {d4, d5}, [DST_R, :128]! * vld4.32 {d0, d1, d2, d3}, [SRC]! * pixman_composite_over_8888_0565_process_pixblock_head * cache_preload 8, 8 * * Now it also got some VLD/VST instructions. We simply can't move from * processing one block of pixels to the other one with just arithmetics. * The previously processed data needs to be written to memory and new * data needs to be fetched. Fortunately, this main loop does not deal * with partial leading/trailing pixels and can load/store a full block * of pixels in a bulk. Additionally, destination buffer is already * 16 bytes aligned here (which is good for performance). * * New things here are DST_R, DST_W, SRC and MASK identifiers. These * are the aliases for ARM registers which are used as pointers for * accessing data. We maintain separate pointers for reading and writing * destination buffer (DST_R and DST_W). * * Another new thing is 'cache_preload' macro. It is used for prefetching * data into CPU L2 cache and improve performance when dealing with large * images which are far larger than cache size. It uses one argument * (actually two, but they need to be the same here) - number of pixels * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some * details about this macro. Moreover, if good performance is needed * the code from this macro needs to be copied into '*_tail_head' macro * and mixed with the rest of code for optimal instructions scheduling. * We are actually doing it below. * * Now after all the explanations, here is the optimized code. * Different instruction streams (originaling from '*_head', '*_tail' * and 'cache_preload' macro) use different indentation levels for * better readability. Actually taking the code from one of these * indentation levels and ignoring a few VLD/VST instructions would * result in exactly the code from '*_head', '*_tail' or 'cache_preload' * macro! */ #if 1 .macro pixman_composite_over_8888_0565_process_pixblock_tail_head vqadd.u8 d16, d2, d20 vld1.16 {d4, d5}, [DST_R, :128]! vqadd.u8 q9, q0, q11 vshrn.u16 d6, q2, #8 fetch_src_pixblock vshrn.u16 d7, q2, #3 vsli.u16 q2, q2, #5 vshll.u8 q14, d16, #8 PF add PF_X, PF_X, #8 vshll.u8 q8, d19, #8 PF tst PF_CTL, #0xF vsri.u8 d6, d6, #5 PF addne PF_X, PF_X, #8 vmvn.8 d3, d3 PF subne PF_CTL, PF_CTL, #1 vsri.u8 d7, d7, #6 vshrn.u16 d30, q2, #2 vmull.u8 q10, d3, d6 PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] vmull.u8 q11, d3, d7 vmull.u8 q12, d3, d30 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vsri.u16 q14, q8, #5 PF cmp PF_X, ORIG_W vshll.u8 q9, d18, #8 vrshr.u16 q13, q10, #8 PF subge PF_X, PF_X, ORIG_W vrshr.u16 q3, q11, #8 vrshr.u16 q15, q12, #8 PF subges PF_CTL, PF_CTL, #0x10 vsri.u16 q14, q9, #11 PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! vraddhn.u16 d20, q10, q13 vraddhn.u16 d23, q11, q3 PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vraddhn.u16 d22, q12, q15 vst1.16 {d28, d29}, [DST_W, :128]! .endm #else /* If we did not care much about the performance, we would just use this... */ .macro pixman_composite_over_8888_0565_process_pixblock_tail_head pixman_composite_over_8888_0565_process_pixblock_tail vst1.16 {d28, d29}, [DST_W, :128]! vld1.16 {d4, d5}, [DST_R, :128]! fetch_src_pixblock pixman_composite_over_8888_0565_process_pixblock_head cache_preload 8, 8 .endm #endif /* * And now the final part. We are using 'generate_composite_function' macro * to put all the stuff together. We are specifying the name of the function * which we want to get, number of bits per pixel for the source, mask and * destination (0 if unused, like mask in this case). Next come some bit * flags: * FLAG_DST_READWRITE - tells that the destination buffer is both read * and written, for write-only buffer we would use * FLAG_DST_WRITEONLY flag instead * FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data * and separate color channels for 32bpp format. * The next things are: * - the number of pixels processed per iteration (8 in this case, because * that's the maximum what can fit into four 64-bit NEON registers). * - prefetch distance, measured in pixel blocks. In this case it is 5 times * by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal * prefetch distance can be selected by running some benchmarks. * * After that we specify some macros, these are 'default_init', * 'default_cleanup' here which are empty (but it is possible to have custom * init/cleanup macros to be able to save/restore some extra NEON registers * like d8-d15 or do anything else) followed by * 'pixman_composite_over_8888_0565_process_pixblock_head', * 'pixman_composite_over_8888_0565_process_pixblock_tail' and * 'pixman_composite_over_8888_0565_process_pixblock_tail_head' * which we got implemented above. * * The last part is the NEON registers allocation scheme. */ generate_composite_function \ pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_0565_process_pixblock_head, \ pixman_composite_over_8888_0565_process_pixblock_tail, \ pixman_composite_over_8888_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_n_0565_process_pixblock_head /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format and put data into d6 - red, d7 - green, d30 - blue */ vshrn.u16 d6, q2, #8 vshrn.u16 d7, q2, #3 vsli.u16 q2, q2, #5 vsri.u8 d6, d6, #5 vsri.u8 d7, d7, #6 vshrn.u16 d30, q2, #2 /* now do alpha blending, storing results in 8-bit planar format into d16 - red, d19 - green, d18 - blue */ vmull.u8 q10, d3, d6 vmull.u8 q11, d3, d7 vmull.u8 q12, d3, d30 vrshr.u16 q13, q10, #8 vrshr.u16 q3, q11, #8 vrshr.u16 q15, q12, #8 vraddhn.u16 d20, q10, q13 vraddhn.u16 d23, q11, q3 vraddhn.u16 d22, q12, q15 .endm .macro pixman_composite_over_n_0565_process_pixblock_tail /* ... continue alpha blending */ vqadd.u8 d16, d2, d20 vqadd.u8 q9, q0, q11 /* convert the result to r5g6b5 and store it into {d28, d29} */ vshll.u8 q14, d16, #8 vshll.u8 q8, d19, #8 vshll.u8 q9, d18, #8 vsri.u16 q14, q8, #5 vsri.u16 q14, q9, #11 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_n_0565_process_pixblock_tail_head pixman_composite_over_n_0565_process_pixblock_tail vld1.16 {d4, d5}, [DST_R, :128]! vst1.16 {d28, d29}, [DST_W, :128]! pixman_composite_over_n_0565_process_pixblock_head cache_preload 8, 8 .endm .macro pixman_composite_over_n_0565_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d3[0]}, [DUMMY] vdup.8 d0, d3[0] vdup.8 d1, d3[1] vdup.8 d2, d3[2] vdup.8 d3, d3[3] vmvn.8 d3, d3 /* invert source alpha */ .endm generate_composite_function \ pixman_composite_over_n_0565_asm_neon, 0, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_0565_init, \ default_cleanup, \ pixman_composite_over_n_0565_process_pixblock_head, \ pixman_composite_over_n_0565_process_pixblock_tail, \ pixman_composite_over_n_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_8888_0565_process_pixblock_head vshll.u8 q8, d1, #8 vshll.u8 q14, d2, #8 vshll.u8 q9, d0, #8 .endm .macro pixman_composite_src_8888_0565_process_pixblock_tail vsri.u16 q14, q8, #5 vsri.u16 q14, q9, #11 .endm .macro pixman_composite_src_8888_0565_process_pixblock_tail_head vsri.u16 q14, q8, #5 PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF fetch_src_pixblock PF addne PF_X, PF_X, #8 PF subne PF_CTL, PF_CTL, #1 vsri.u16 q14, q9, #11 PF cmp PF_X, ORIG_W PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] vshll.u8 q8, d1, #8 vst1.16 {d28, d29}, [DST_W, :128]! PF subge PF_X, PF_X, ORIG_W PF subges PF_CTL, PF_CTL, #0x10 vshll.u8 q14, d2, #8 PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! vshll.u8 q9, d0, #8 .endm generate_composite_function \ pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_0565_process_pixblock_head, \ pixman_composite_src_8888_0565_process_pixblock_tail, \ pixman_composite_src_8888_0565_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_src_0565_8888_process_pixblock_head vshrn.u16 d30, q0, #8 vshrn.u16 d29, q0, #3 vsli.u16 q0, q0, #5 vmov.u8 d31, #255 vsri.u8 d30, d30, #5 vsri.u8 d29, d29, #6 vshrn.u16 d28, q0, #2 .endm .macro pixman_composite_src_0565_8888_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_src_0565_8888_process_pixblock_tail_head pixman_composite_src_0565_8888_process_pixblock_tail vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! fetch_src_pixblock pixman_composite_src_0565_8888_process_pixblock_head cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_0565_8888_asm_neon, 16, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_8888_process_pixblock_head, \ pixman_composite_src_0565_8888_process_pixblock_tail, \ pixman_composite_src_0565_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8_8_process_pixblock_head vqadd.u8 q14, q0, q2 vqadd.u8 q15, q1, q3 .endm .macro pixman_composite_add_8_8_process_pixblock_tail .endm .macro pixman_composite_add_8_8_process_pixblock_tail_head fetch_src_pixblock PF add PF_X, PF_X, #32 PF tst PF_CTL, #0xF vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! PF addne PF_X, PF_X, #32 PF subne PF_CTL, PF_CTL, #1 vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! PF cmp PF_X, ORIG_W PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] PF subge PF_X, PF_X, ORIG_W PF subges PF_CTL, PF_CTL, #0x10 vqadd.u8 q14, q0, q2 PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vqadd.u8 q15, q1, q3 .endm generate_composite_function \ pixman_composite_add_8_8_asm_neon, 8, 0, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8888_8888_process_pixblock_tail_head fetch_src_pixblock PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF vld1.32 {d4, d5, d6, d7}, [DST_R, :128]! PF addne PF_X, PF_X, #8 PF subne PF_CTL, PF_CTL, #1 vst1.32 {d28, d29, d30, d31}, [DST_W, :128]! PF cmp PF_X, ORIG_W PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] PF subge PF_X, PF_X, ORIG_W PF subges PF_CTL, PF_CTL, #0x10 vqadd.u8 q14, q0, q2 PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vqadd.u8 q15, q1, q3 .endm generate_composite_function \ pixman_composite_add_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8888_8888_process_pixblock_tail_head generate_composite_function_single_scanline \ pixman_composite_scanline_add_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_out_reverse_8888_8888_process_pixblock_head vmvn.8 d24, d3 /* get inverted alpha */ /* do alpha blending */ vmull.u8 q8, d24, d4 vmull.u8 q9, d24, d5 vmull.u8 q10, d24, d6 vmull.u8 q11, d24, d7 .endm .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 .endm .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vrshr.u16 q14, q8, #8 PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 PF addne PF_X, PF_X, #8 PF subne PF_CTL, PF_CTL, #1 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 PF cmp PF_X, ORIG_W vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 fetch_src_pixblock PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] vmvn.8 d22, d3 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! PF subge PF_X, PF_X, ORIG_W vmull.u8 q8, d22, d4 PF subges PF_CTL, PF_CTL, #0x10 vmull.u8 q9, d22, d5 PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! vmull.u8 q10, d22, d6 PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vmull.u8 q11, d22, d7 .endm generate_composite_function_single_scanline \ pixman_composite_scanline_out_reverse_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_out_reverse_8888_8888_process_pixblock_head, \ pixman_composite_out_reverse_8888_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_8888_8888_process_pixblock_head pixman_composite_out_reverse_8888_8888_process_pixblock_head .endm .macro pixman_composite_over_8888_8888_process_pixblock_tail pixman_composite_out_reverse_8888_8888_process_pixblock_tail vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm .macro pixman_composite_over_8888_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vrshr.u16 q14, q8, #8 PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 PF addne PF_X, PF_X, #8 PF subne PF_CTL, PF_CTL, #1 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 PF cmp PF_X, ORIG_W vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 fetch_src_pixblock PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] vmvn.8 d22, d3 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! PF subge PF_X, PF_X, ORIG_W vmull.u8 q8, d22, d4 PF subges PF_CTL, PF_CTL, #0x10 vmull.u8 q9, d22, d5 PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! vmull.u8 q10, d22, d6 PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vmull.u8 q11, d22, d7 .endm generate_composite_function \ pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head generate_composite_function_single_scanline \ pixman_composite_scanline_over_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_process_pixblock_head /* deinterleaved source pixels in {d0, d1, d2, d3} */ /* inverted alpha in {d24} */ /* destination pixels in {d4, d5, d6, d7} */ vmull.u8 q8, d24, d4 vmull.u8 q9, d24, d5 vmull.u8 q10, d24, d6 vmull.u8 q11, d24, d7 .endm .macro pixman_composite_over_n_8888_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q2, q10, #8 vrshr.u16 q3, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q2, q10 vraddhn.u16 d31, q3, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm .macro pixman_composite_over_n_8888_process_pixblock_tail_head vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q2, q10, #8 vrshr.u16 q3, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q2, q10 vraddhn.u16 d31, q3, q11 vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vqadd.u8 q14, q0, q14 PF add PF_X, PF_X, #8 PF tst PF_CTL, #0x0F PF addne PF_X, PF_X, #8 PF subne PF_CTL, PF_CTL, #1 vqadd.u8 q15, q1, q15 PF cmp PF_X, ORIG_W vmull.u8 q8, d24, d4 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vmull.u8 q9, d24, d5 PF subge PF_X, PF_X, ORIG_W vmull.u8 q10, d24, d6 PF subges PF_CTL, PF_CTL, #0x10 vmull.u8 q11, d24, d7 PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm .macro pixman_composite_over_n_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d3[0]}, [DUMMY] vdup.8 d0, d3[0] vdup.8 d1, d3[1] vdup.8 d2, d3[2] vdup.8 d3, d3[3] vmvn.8 d24, d3 /* get inverted alpha */ .endm generate_composite_function \ pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_n_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_reverse_n_8888_process_pixblock_tail_head vrshr.u16 q14, q8, #8 PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 PF addne PF_X, PF_X, #8 PF subne PF_CTL, PF_CTL, #1 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 PF cmp PF_X, ORIG_W vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 vld4.8 {d0, d1, d2, d3}, [DST_R, :128]! vmvn.8 d22, d3 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! PF subge PF_X, PF_X, ORIG_W vmull.u8 q8, d22, d4 PF subges PF_CTL, PF_CTL, #0x10 vmull.u8 q9, d22, d5 vmull.u8 q10, d22, d6 PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vmull.u8 q11, d22, d7 .endm .macro pixman_composite_over_reverse_n_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d7[0]}, [DUMMY] vdup.8 d4, d7[0] vdup.8 d5, d7[1] vdup.8 d6, d7[2] vdup.8 d7, d7[3] .endm generate_composite_function \ pixman_composite_over_reverse_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_reverse_n_8888_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_reverse_n_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 4, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_8888_8_0565_process_pixblock_head vmull.u8 q0, d24, d8 /* IN for SRC pixels (part1) */ vmull.u8 q1, d24, d9 vmull.u8 q6, d24, d10 vmull.u8 q7, d24, d11 vshrn.u16 d6, q2, #8 /* convert DST_R data to 32-bpp (part1) */ vshrn.u16 d7, q2, #3 vsli.u16 q2, q2, #5 vrshr.u16 q8, q0, #8 /* IN for SRC pixels (part2) */ vrshr.u16 q9, q1, #8 vrshr.u16 q10, q6, #8 vrshr.u16 q11, q7, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q9 vraddhn.u16 d2, q6, q10 vraddhn.u16 d3, q7, q11 vsri.u8 d6, d6, #5 /* convert DST_R data to 32-bpp (part2) */ vsri.u8 d7, d7, #6 vmvn.8 d3, d3 vshrn.u16 d30, q2, #2 vmull.u8 q8, d3, d6 /* now do alpha blending */ vmull.u8 q9, d3, d7 vmull.u8 q10, d3, d30 .endm .macro pixman_composite_over_8888_8_0565_process_pixblock_tail /* 3 cycle bubble (after vmull.u8) */ vrshr.u16 q13, q8, #8 vrshr.u16 q11, q9, #8 vrshr.u16 q15, q10, #8 vraddhn.u16 d16, q8, q13 vraddhn.u16 d27, q9, q11 vraddhn.u16 d26, q10, q15 vqadd.u8 d16, d2, d16 /* 1 cycle bubble */ vqadd.u8 q9, q0, q13 vshll.u8 q14, d16, #8 /* convert to 16bpp */ vshll.u8 q8, d19, #8 vshll.u8 q9, d18, #8 vsri.u16 q14, q8, #5 /* 1 cycle bubble */ vsri.u16 q14, q9, #11 .endm .macro pixman_composite_over_8888_8_0565_process_pixblock_tail_head vld1.16 {d4, d5}, [DST_R, :128]! vshrn.u16 d6, q2, #8 fetch_mask_pixblock vshrn.u16 d7, q2, #3 fetch_src_pixblock vmull.u8 q6, d24, d10 vrshr.u16 q13, q8, #8 vrshr.u16 q11, q9, #8 vrshr.u16 q15, q10, #8 vraddhn.u16 d16, q8, q13 vraddhn.u16 d27, q9, q11 vraddhn.u16 d26, q10, q15 vqadd.u8 d16, d2, d16 vmull.u8 q1, d24, d9 vqadd.u8 q9, q0, q13 vshll.u8 q14, d16, #8 vmull.u8 q0, d24, d8 vshll.u8 q8, d19, #8 vshll.u8 q9, d18, #8 vsri.u16 q14, q8, #5 vmull.u8 q7, d24, d11 vsri.u16 q14, q9, #11 cache_preload 8, 8 vsli.u16 q2, q2, #5 vrshr.u16 q8, q0, #8 vrshr.u16 q9, q1, #8 vrshr.u16 q10, q6, #8 vrshr.u16 q11, q7, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q9 vraddhn.u16 d2, q6, q10 vraddhn.u16 d3, q7, q11 vsri.u8 d6, d6, #5 vsri.u8 d7, d7, #6 vmvn.8 d3, d3 vshrn.u16 d30, q2, #2 vst1.16 {d28, d29}, [DST_W, :128]! vmull.u8 q8, d3, d6 vmull.u8 q9, d3, d7 vmull.u8 q10, d3, d30 .endm generate_composite_function \ pixman_composite_over_8888_8_0565_asm_neon, 32, 8, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ /* * This function needs a special initialization of solid mask. * Solid source pixel data is fetched from stack at ARGS_STACK_OFFSET * offset, split into color components and replicated in d8-d11 * registers. Additionally, this function needs all the NEON registers, * so it has to save d8-d15 registers which are callee saved according * to ABI. These registers are restored from 'cleanup' macro. All the * other NEON registers are caller saved, so can be clobbered freely * without introducing any problems. */ .macro pixman_composite_over_n_8_0565_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d11[0]}, [DUMMY] vdup.8 d8, d11[0] vdup.8 d9, d11[1] vdup.8 d10, d11[2] vdup.8 d11, d11[3] .endm .macro pixman_composite_over_n_8_0565_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_0565_init, \ pixman_composite_over_n_8_0565_cleanup, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_8888_n_0565_init add DUMMY, sp, #(ARGS_STACK_OFFSET + 8) vpush {d8-d15} vld1.32 {d24[0]}, [DUMMY] vdup.8 d24, d24[3] .endm .macro pixman_composite_over_8888_n_0565_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_8888_n_0565_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_8888_n_0565_init, \ pixman_composite_over_8888_n_0565_cleanup, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0565_0565_process_pixblock_head .endm .macro pixman_composite_src_0565_0565_process_pixblock_tail .endm .macro pixman_composite_src_0565_0565_process_pixblock_tail_head vst1.16 {d0, d1, d2, d3}, [DST_W, :128]! fetch_src_pixblock cache_preload 16, 16 .endm generate_composite_function \ pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \ FLAG_DST_WRITEONLY, \ 16, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_0565_process_pixblock_head, \ pixman_composite_src_0565_0565_process_pixblock_tail, \ pixman_composite_src_0565_0565_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8_process_pixblock_head .endm .macro pixman_composite_src_n_8_process_pixblock_tail .endm .macro pixman_composite_src_n_8_process_pixblock_tail_head vst1.8 {d0, d1, d2, d3}, [DST_W, :128]! .endm .macro pixman_composite_src_n_8_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d0[0]}, [DUMMY] vsli.u64 d0, d0, #8 vsli.u64 d0, d0, #16 vsli.u64 d0, d0, #32 vorr d1, d0, d0 vorr q1, q0, q0 .endm .macro pixman_composite_src_n_8_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_asm_neon, 0, 0, 8, \ FLAG_DST_WRITEONLY, \ 32, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_8_init, \ pixman_composite_src_n_8_cleanup, \ pixman_composite_src_n_8_process_pixblock_head, \ pixman_composite_src_n_8_process_pixblock_tail, \ pixman_composite_src_n_8_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_0565_process_pixblock_head .endm .macro pixman_composite_src_n_0565_process_pixblock_tail .endm .macro pixman_composite_src_n_0565_process_pixblock_tail_head vst1.16 {d0, d1, d2, d3}, [DST_W, :128]! .endm .macro pixman_composite_src_n_0565_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d0[0]}, [DUMMY] vsli.u64 d0, d0, #16 vsli.u64 d0, d0, #32 vorr d1, d0, d0 vorr q1, q0, q0 .endm .macro pixman_composite_src_n_0565_cleanup .endm generate_composite_function \ pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \ FLAG_DST_WRITEONLY, \ 16, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_0565_init, \ pixman_composite_src_n_0565_cleanup, \ pixman_composite_src_n_0565_process_pixblock_head, \ pixman_composite_src_n_0565_process_pixblock_tail, \ pixman_composite_src_n_0565_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8888_process_pixblock_head .endm .macro pixman_composite_src_n_8888_process_pixblock_tail .endm .macro pixman_composite_src_n_8888_process_pixblock_tail_head vst1.32 {d0, d1, d2, d3}, [DST_W, :128]! .endm .macro pixman_composite_src_n_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d0[0]}, [DUMMY] vsli.u64 d0, d0, #32 vorr d1, d0, d0 vorr q1, q0, q0 .endm .macro pixman_composite_src_n_8888_cleanup .endm generate_composite_function \ pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_8888_init, \ pixman_composite_src_n_8888_cleanup, \ pixman_composite_src_n_8888_process_pixblock_head, \ pixman_composite_src_n_8888_process_pixblock_tail, \ pixman_composite_src_n_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_8888_8888_process_pixblock_head .endm .macro pixman_composite_src_8888_8888_process_pixblock_tail .endm .macro pixman_composite_src_8888_8888_process_pixblock_tail_head vst1.32 {d0, d1, d2, d3}, [DST_W, :128]! fetch_src_pixblock cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_8888_process_pixblock_head, \ pixman_composite_src_8888_8888_process_pixblock_tail, \ pixman_composite_src_8888_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_x888_8888_process_pixblock_head vorr q0, q0, q2 vorr q1, q1, q2 .endm .macro pixman_composite_src_x888_8888_process_pixblock_tail .endm .macro pixman_composite_src_x888_8888_process_pixblock_tail_head vst1.32 {d0, d1, d2, d3}, [DST_W, :128]! fetch_src_pixblock vorr q0, q0, q2 vorr q1, q1, q2 cache_preload 8, 8 .endm .macro pixman_composite_src_x888_8888_init vmov.u8 q2, #0xFF vshl.u32 q2, q2, #24 .endm generate_composite_function \ pixman_composite_src_x888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ pixman_composite_src_x888_8888_init, \ default_cleanup, \ pixman_composite_src_x888_8888_process_pixblock_head, \ pixman_composite_src_x888_8888_process_pixblock_tail, \ pixman_composite_src_x888_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8_8888_process_pixblock_head /* expecting solid source in {d0, d1, d2, d3} */ /* mask is in d24 (d25, d26, d27 are unused) */ /* in */ vmull.u8 q8, d24, d0 vmull.u8 q9, d24, d1 vmull.u8 q10, d24, d2 vmull.u8 q11, d24, d3 vrsra.u16 q8, q8, #8 vrsra.u16 q9, q9, #8 vrsra.u16 q10, q10, #8 vrsra.u16 q11, q11, #8 .endm .macro pixman_composite_src_n_8_8888_process_pixblock_tail vrshrn.u16 d28, q8, #8 vrshrn.u16 d29, q9, #8 vrshrn.u16 d30, q10, #8 vrshrn.u16 d31, q11, #8 .endm .macro pixman_composite_src_n_8_8888_process_pixblock_tail_head fetch_mask_pixblock PF add PF_X, PF_X, #8 vrshrn.u16 d28, q8, #8 PF tst PF_CTL, #0x0F vrshrn.u16 d29, q9, #8 PF addne PF_X, PF_X, #8 vrshrn.u16 d30, q10, #8 PF subne PF_CTL, PF_CTL, #1 vrshrn.u16 d31, q11, #8 PF cmp PF_X, ORIG_W vmull.u8 q8, d24, d0 PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] vmull.u8 q9, d24, d1 PF subge PF_X, PF_X, ORIG_W vmull.u8 q10, d24, d2 PF subges PF_CTL, PF_CTL, #0x10 vmull.u8 q11, d24, d3 PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! vrsra.u16 q8, q8, #8 vrsra.u16 q9, q9, #8 vrsra.u16 q10, q10, #8 vrsra.u16 q11, q11, #8 .endm .macro pixman_composite_src_n_8_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d3[0]}, [DUMMY] vdup.8 d0, d3[0] vdup.8 d1, d3[1] vdup.8 d2, d3[2] vdup.8 d3, d3[3] .endm .macro pixman_composite_src_n_8_8888_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_src_n_8_8888_init, \ pixman_composite_src_n_8_8888_cleanup, \ pixman_composite_src_n_8_8888_process_pixblock_head, \ pixman_composite_src_n_8_8888_process_pixblock_tail, \ pixman_composite_src_n_8_8888_process_pixblock_tail_head, \ /******************************************************************************/ .macro pixman_composite_src_n_8_8_process_pixblock_head vmull.u8 q0, d24, d16 vmull.u8 q1, d25, d16 vmull.u8 q2, d26, d16 vmull.u8 q3, d27, d16 vrsra.u16 q0, q0, #8 vrsra.u16 q1, q1, #8 vrsra.u16 q2, q2, #8 vrsra.u16 q3, q3, #8 .endm .macro pixman_composite_src_n_8_8_process_pixblock_tail vrshrn.u16 d28, q0, #8 vrshrn.u16 d29, q1, #8 vrshrn.u16 d30, q2, #8 vrshrn.u16 d31, q3, #8 .endm .macro pixman_composite_src_n_8_8_process_pixblock_tail_head fetch_mask_pixblock PF add PF_X, PF_X, #8 vrshrn.u16 d28, q0, #8 PF tst PF_CTL, #0x0F vrshrn.u16 d29, q1, #8 PF addne PF_X, PF_X, #8 vrshrn.u16 d30, q2, #8 PF subne PF_CTL, PF_CTL, #1 vrshrn.u16 d31, q3, #8 PF cmp PF_X, ORIG_W vmull.u8 q0, d24, d16 PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] vmull.u8 q1, d25, d16 PF subge PF_X, PF_X, ORIG_W vmull.u8 q2, d26, d16 PF subges PF_CTL, PF_CTL, #0x10 vmull.u8 q3, d27, d16 PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! vrsra.u16 q0, q0, #8 vrsra.u16 q1, q1, #8 vrsra.u16 q2, q2, #8 vrsra.u16 q3, q3, #8 .endm .macro pixman_composite_src_n_8_8_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d16[0]}, [DUMMY] vdup.8 d16, d16[3] .endm .macro pixman_composite_src_n_8_8_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_WRITEONLY, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_src_n_8_8_init, \ pixman_composite_src_n_8_8_cleanup, \ pixman_composite_src_n_8_8_process_pixblock_head, \ pixman_composite_src_n_8_8_process_pixblock_tail, \ pixman_composite_src_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8_8888_process_pixblock_head /* expecting deinterleaved source data in {d8, d9, d10, d11} */ /* d8 - blue, d9 - green, d10 - red, d11 - alpha */ /* and destination data in {d4, d5, d6, d7} */ /* mask is in d24 (d25, d26, d27 are unused) */ /* in */ vmull.u8 q6, d24, d8 vmull.u8 q7, d24, d9 vmull.u8 q8, d24, d10 vmull.u8 q9, d24, d11 vrshr.u16 q10, q6, #8 vrshr.u16 q11, q7, #8 vrshr.u16 q12, q8, #8 vrshr.u16 q13, q9, #8 vraddhn.u16 d0, q6, q10 vraddhn.u16 d1, q7, q11 vraddhn.u16 d2, q8, q12 vraddhn.u16 d3, q9, q13 vmvn.8 d25, d3 /* get inverted alpha */ /* source: d0 - blue, d1 - green, d2 - red, d3 - alpha */ /* destination: d4 - blue, d5 - green, d6 - red, d7 - alpha */ /* now do alpha blending */ vmull.u8 q8, d25, d4 vmull.u8 q9, d25, d5 vmull.u8 q10, d25, d6 vmull.u8 q11, d25, d7 .endm .macro pixman_composite_over_n_8_8888_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q6, q10, #8 vrshr.u16 q7, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q6, q10 vraddhn.u16 d31, q7, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm .macro pixman_composite_over_n_8_8888_process_pixblock_tail_head vrshr.u16 q14, q8, #8 vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vrshr.u16 q15, q9, #8 fetch_mask_pixblock vrshr.u16 q6, q10, #8 PF add PF_X, PF_X, #8 vrshr.u16 q7, q11, #8 PF tst PF_CTL, #0x0F vraddhn.u16 d28, q14, q8 PF addne PF_X, PF_X, #8 vraddhn.u16 d29, q15, q9 PF subne PF_CTL, PF_CTL, #1 vraddhn.u16 d30, q6, q10 PF cmp PF_X, ORIG_W vraddhn.u16 d31, q7, q11 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vmull.u8 q6, d24, d8 PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] vmull.u8 q7, d24, d9 PF subge PF_X, PF_X, ORIG_W vmull.u8 q8, d24, d10 PF subges PF_CTL, PF_CTL, #0x10 vmull.u8 q9, d24, d11 PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vqadd.u8 q14, q0, q14 PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! vqadd.u8 q15, q1, q15 vrshr.u16 q10, q6, #8 vrshr.u16 q11, q7, #8 vrshr.u16 q12, q8, #8 vrshr.u16 q13, q9, #8 vraddhn.u16 d0, q6, q10 vraddhn.u16 d1, q7, q11 vraddhn.u16 d2, q8, q12 vraddhn.u16 d3, q9, q13 vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! vmvn.8 d25, d3 vmull.u8 q8, d25, d4 vmull.u8 q9, d25, d5 vmull.u8 q10, d25, d6 vmull.u8 q11, d25, d7 .endm .macro pixman_composite_over_n_8_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d11[0]}, [DUMMY] vdup.8 d8, d11[0] vdup.8 d9, d11[1] vdup.8 d10, d11[2] vdup.8 d11, d11[3] .endm .macro pixman_composite_over_n_8_8888_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_8888_init, \ pixman_composite_over_n_8_8888_cleanup, \ pixman_composite_over_n_8_8888_process_pixblock_head, \ pixman_composite_over_n_8_8888_process_pixblock_tail, \ pixman_composite_over_n_8_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8_8_process_pixblock_head vmull.u8 q0, d24, d8 vmull.u8 q1, d25, d8 vmull.u8 q6, d26, d8 vmull.u8 q7, d27, d8 vrshr.u16 q10, q0, #8 vrshr.u16 q11, q1, #8 vrshr.u16 q12, q6, #8 vrshr.u16 q13, q7, #8 vraddhn.u16 d0, q0, q10 vraddhn.u16 d1, q1, q11 vraddhn.u16 d2, q6, q12 vraddhn.u16 d3, q7, q13 vmvn.8 q12, q0 vmvn.8 q13, q1 vmull.u8 q8, d24, d4 vmull.u8 q9, d25, d5 vmull.u8 q10, d26, d6 vmull.u8 q11, d27, d7 .endm .macro pixman_composite_over_n_8_8_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_n_8_8_process_pixblock_tail_head vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! pixman_composite_over_n_8_8_process_pixblock_tail fetch_mask_pixblock cache_preload 32, 32 vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! pixman_composite_over_n_8_8_process_pixblock_head .endm .macro pixman_composite_over_n_8_8_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d8[0]}, [DUMMY] vdup.8 d8, d8[3] .endm .macro pixman_composite_over_n_8_8_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_8_init, \ pixman_composite_over_n_8_8_cleanup, \ pixman_composite_over_n_8_8_process_pixblock_head, \ pixman_composite_over_n_8_8_process_pixblock_tail, \ pixman_composite_over_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_head /* * 'combine_mask_ca' replacement * * input: solid src (n) in {d8, d9, d10, d11} * dest in {d4, d5, d6, d7 } * mask in {d24, d25, d26, d27} * output: updated src in {d0, d1, d2, d3 } * updated mask in {d24, d25, d26, d3 } */ vmull.u8 q0, d24, d8 vmull.u8 q1, d25, d9 vmull.u8 q6, d26, d10 vmull.u8 q7, d27, d11 vmull.u8 q9, d11, d25 vmull.u8 q12, d11, d24 vmull.u8 q13, d11, d26 vrshr.u16 q8, q0, #8 vrshr.u16 q10, q1, #8 vrshr.u16 q11, q6, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q10 vraddhn.u16 d2, q6, q11 vrshr.u16 q11, q12, #8 vrshr.u16 q8, q9, #8 vrshr.u16 q6, q13, #8 vrshr.u16 q10, q7, #8 vraddhn.u16 d24, q12, q11 vraddhn.u16 d25, q9, q8 vraddhn.u16 d26, q13, q6 vraddhn.u16 d3, q7, q10 /* * 'combine_over_ca' replacement * * output: updated dest in {d28, d29, d30, d31} */ vmvn.8 q12, q12 vmvn.8 d26, d26 vmull.u8 q8, d24, d4 vmull.u8 q9, d25, d5 vmvn.8 d27, d3 vmull.u8 q10, d26, d6 vmull.u8 q11, d27, d7 .endm .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail /* ... continue 'combine_over_ca' replacement */ vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q6, q10, #8 vrshr.u16 q7, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q6, q10 vraddhn.u16 d31, q7, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vrshr.u16 q6, q10, #8 vrshr.u16 q7, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q6, q10 vraddhn.u16 d31, q7, q11 fetch_mask_pixblock vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 cache_preload 8, 8 pixman_composite_over_n_8888_8888_ca_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm .macro pixman_composite_over_n_8888_8888_ca_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d11[0]}, [DUMMY] vdup.8 d8, d11[0] vdup.8 d9, d11[1] vdup.8 d10, d11[2] vdup.8 d11, d11[3] .endm .macro pixman_composite_over_n_8888_8888_ca_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_n_8888_8888_ca_asm_neon, 0, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_8888_ca_init, \ pixman_composite_over_n_8888_8888_ca_cleanup, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_head, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_tail, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_head /* * 'combine_mask_ca' replacement * * input: solid src (n) in {d8, d9, d10, d11} [B, G, R, A] * mask in {d24, d25, d26} [B, G, R] * output: updated src in {d0, d1, d2 } [B, G, R] * updated mask in {d24, d25, d26} [B, G, R] */ vmull.u8 q0, d24, d8 vmull.u8 q1, d25, d9 vmull.u8 q6, d26, d10 vmull.u8 q9, d11, d25 vmull.u8 q12, d11, d24 vmull.u8 q13, d11, d26 vrshr.u16 q8, q0, #8 vrshr.u16 q10, q1, #8 vrshr.u16 q11, q6, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q10 vraddhn.u16 d2, q6, q11 vrshr.u16 q11, q12, #8 vrshr.u16 q8, q9, #8 vrshr.u16 q6, q13, #8 vraddhn.u16 d24, q12, q11 vraddhn.u16 d25, q9, q8 /* * convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format * and put data into d16 - blue, d17 - green, d18 - red */ vshrn.u16 d17, q2, #3 vshrn.u16 d18, q2, #8 vraddhn.u16 d26, q13, q6 vsli.u16 q2, q2, #5 vsri.u8 d18, d18, #5 vsri.u8 d17, d17, #6 /* * 'combine_over_ca' replacement * * output: updated dest in d16 - blue, d17 - green, d18 - red */ vmvn.8 q12, q12 vshrn.u16 d16, q2, #2 vmvn.8 d26, d26 vmull.u8 q6, d16, d24 vmull.u8 q7, d17, d25 vmull.u8 q11, d18, d26 .endm .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail /* ... continue 'combine_over_ca' replacement */ vrshr.u16 q10, q6, #8 vrshr.u16 q14, q7, #8 vrshr.u16 q15, q11, #8 vraddhn.u16 d16, q10, q6 vraddhn.u16 d17, q14, q7 vraddhn.u16 d18, q15, q11 vqadd.u8 q8, q0, q8 vqadd.u8 d18, d2, d18 /* * convert the results in d16, d17, d18 to r5g6b5 and store * them into {d28, d29} */ vshll.u8 q14, d18, #8 vshll.u8 q10, d17, #8 vshll.u8 q15, d16, #8 vsri.u16 q14, q10, #5 vsri.u16 q14, q15, #11 .endm .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head fetch_mask_pixblock vrshr.u16 q10, q6, #8 vrshr.u16 q14, q7, #8 vld1.16 {d4, d5}, [DST_R, :128]! vrshr.u16 q15, q11, #8 vraddhn.u16 d16, q10, q6 vraddhn.u16 d17, q14, q7 vraddhn.u16 d22, q15, q11 /* process_pixblock_head */ /* * 'combine_mask_ca' replacement * * input: solid src (n) in {d8, d9, d10, d11} [B, G, R, A] * mask in {d24, d25, d26} [B, G, R] * output: updated src in {d0, d1, d2 } [B, G, R] * updated mask in {d24, d25, d26} [B, G, R] */ vmull.u8 q6, d26, d10 vqadd.u8 q8, q0, q8 vmull.u8 q0, d24, d8 vqadd.u8 d22, d2, d22 vmull.u8 q1, d25, d9 /* * convert the result in d16, d17, d22 to r5g6b5 and store * it into {d28, d29} */ vshll.u8 q14, d22, #8 vshll.u8 q10, d17, #8 vshll.u8 q15, d16, #8 vmull.u8 q9, d11, d25 vsri.u16 q14, q10, #5 vmull.u8 q12, d11, d24 vmull.u8 q13, d11, d26 vsri.u16 q14, q15, #11 cache_preload 8, 8 vrshr.u16 q8, q0, #8 vrshr.u16 q10, q1, #8 vrshr.u16 q11, q6, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q10 vraddhn.u16 d2, q6, q11 vrshr.u16 q11, q12, #8 vrshr.u16 q8, q9, #8 vrshr.u16 q6, q13, #8 vraddhn.u16 d24, q12, q11 vraddhn.u16 d25, q9, q8 /* * convert 8 r5g6b5 pixel data from {d4, d5} to planar * 8-bit format and put data into d16 - blue, d17 - green, * d18 - red */ vshrn.u16 d17, q2, #3 vshrn.u16 d18, q2, #8 vraddhn.u16 d26, q13, q6 vsli.u16 q2, q2, #5 vsri.u8 d17, d17, #6 vsri.u8 d18, d18, #5 /* * 'combine_over_ca' replacement * * output: updated dest in d16 - blue, d17 - green, d18 - red */ vmvn.8 q12, q12 vshrn.u16 d16, q2, #2 vmvn.8 d26, d26 vmull.u8 q7, d17, d25 vmull.u8 q6, d16, d24 vmull.u8 q11, d18, d26 vst1.16 {d28, d29}, [DST_W, :128]! .endm .macro pixman_composite_over_n_8888_0565_ca_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d11[0]}, [DUMMY] vdup.8 d8, d11[0] vdup.8 d9, d11[1] vdup.8 d10, d11[2] vdup.8 d11, d11[3] .endm .macro pixman_composite_over_n_8888_0565_ca_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_n_8888_0565_ca_asm_neon, 0, 32, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_0565_ca_init, \ pixman_composite_over_n_8888_0565_ca_cleanup, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_head, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_tail, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_in_n_8_process_pixblock_head /* expecting source data in {d0, d1, d2, d3} */ /* and destination data in {d4, d5, d6, d7} */ vmull.u8 q8, d4, d3 vmull.u8 q9, d5, d3 vmull.u8 q10, d6, d3 vmull.u8 q11, d7, d3 .endm .macro pixman_composite_in_n_8_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d28, q8, q14 vraddhn.u16 d29, q9, q15 vraddhn.u16 d30, q10, q12 vraddhn.u16 d31, q11, q13 .endm .macro pixman_composite_in_n_8_process_pixblock_tail_head pixman_composite_in_n_8_process_pixblock_tail vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! cache_preload 32, 32 pixman_composite_in_n_8_process_pixblock_head vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm .macro pixman_composite_in_n_8_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d3[0]}, [DUMMY] vdup.8 d3, d3[3] .endm .macro pixman_composite_in_n_8_cleanup .endm generate_composite_function \ pixman_composite_in_n_8_asm_neon, 0, 0, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_in_n_8_init, \ pixman_composite_in_n_8_cleanup, \ pixman_composite_in_n_8_process_pixblock_head, \ pixman_composite_in_n_8_process_pixblock_tail, \ pixman_composite_in_n_8_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ .macro pixman_composite_add_n_8_8_process_pixblock_head /* expecting source data in {d8, d9, d10, d11} */ /* d8 - blue, d9 - green, d10 - red, d11 - alpha */ /* and destination data in {d4, d5, d6, d7} */ /* mask is in d24, d25, d26, d27 */ vmull.u8 q0, d24, d11 vmull.u8 q1, d25, d11 vmull.u8 q6, d26, d11 vmull.u8 q7, d27, d11 vrshr.u16 q10, q0, #8 vrshr.u16 q11, q1, #8 vrshr.u16 q12, q6, #8 vrshr.u16 q13, q7, #8 vraddhn.u16 d0, q0, q10 vraddhn.u16 d1, q1, q11 vraddhn.u16 d2, q6, q12 vraddhn.u16 d3, q7, q13 vqadd.u8 q14, q0, q2 vqadd.u8 q15, q1, q3 .endm .macro pixman_composite_add_n_8_8_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_n_8_8_process_pixblock_tail_head pixman_composite_add_n_8_8_process_pixblock_tail vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! fetch_mask_pixblock cache_preload 32, 32 pixman_composite_add_n_8_8_process_pixblock_head .endm .macro pixman_composite_add_n_8_8_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d11[0]}, [DUMMY] vdup.8 d11, d11[3] .endm .macro pixman_composite_add_n_8_8_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_n_8_8_init, \ pixman_composite_add_n_8_8_cleanup, \ pixman_composite_add_n_8_8_process_pixblock_head, \ pixman_composite_add_n_8_8_process_pixblock_tail, \ pixman_composite_add_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8_8_8_process_pixblock_head /* expecting source data in {d0, d1, d2, d3} */ /* destination data in {d4, d5, d6, d7} */ /* mask in {d24, d25, d26, d27} */ vmull.u8 q8, d24, d0 vmull.u8 q9, d25, d1 vmull.u8 q10, d26, d2 vmull.u8 q11, d27, d3 vrshr.u16 q0, q8, #8 vrshr.u16 q1, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q9 vraddhn.u16 d2, q12, q10 vraddhn.u16 d3, q13, q11 vqadd.u8 q14, q0, q2 vqadd.u8 q15, q1, q3 .endm .macro pixman_composite_add_8_8_8_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_8_8_8_process_pixblock_tail_head pixman_composite_add_8_8_8_process_pixblock_tail vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! fetch_mask_pixblock fetch_src_pixblock cache_preload 32, 32 pixman_composite_add_8_8_8_process_pixblock_head .endm .macro pixman_composite_add_8_8_8_init .endm .macro pixman_composite_add_8_8_8_cleanup .endm generate_composite_function \ pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_8_8_8_init, \ pixman_composite_add_8_8_8_cleanup, \ pixman_composite_add_8_8_8_process_pixblock_head, \ pixman_composite_add_8_8_8_process_pixblock_tail, \ pixman_composite_add_8_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8888_8888_8888_process_pixblock_head /* expecting source data in {d0, d1, d2, d3} */ /* destination data in {d4, d5, d6, d7} */ /* mask in {d24, d25, d26, d27} */ vmull.u8 q8, d27, d0 vmull.u8 q9, d27, d1 vmull.u8 q10, d27, d2 vmull.u8 q11, d27, d3 /* 1 cycle bubble */ vrsra.u16 q8, q8, #8 vrsra.u16 q9, q9, #8 vrsra.u16 q10, q10, #8 vrsra.u16 q11, q11, #8 .endm .macro pixman_composite_add_8888_8888_8888_process_pixblock_tail /* 2 cycle bubble */ vrshrn.u16 d28, q8, #8 vrshrn.u16 d29, q9, #8 vrshrn.u16 d30, q10, #8 vrshrn.u16 d31, q11, #8 vqadd.u8 q14, q2, q14 /* 1 cycle bubble */ vqadd.u8 q15, q3, q15 .endm .macro pixman_composite_add_8888_8888_8888_process_pixblock_tail_head fetch_src_pixblock vrshrn.u16 d28, q8, #8 fetch_mask_pixblock vrshrn.u16 d29, q9, #8 vmull.u8 q8, d27, d0 vrshrn.u16 d30, q10, #8 vmull.u8 q9, d27, d1 vrshrn.u16 d31, q11, #8 vmull.u8 q10, d27, d2 vqadd.u8 q14, q2, q14 vmull.u8 q11, d27, d3 vqadd.u8 q15, q3, q15 vrsra.u16 q8, q8, #8 vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vrsra.u16 q9, q9, #8 vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! vrsra.u16 q10, q10, #8 cache_preload 8, 8 vrsra.u16 q11, q11, #8 .endm generate_composite_function \ pixman_composite_add_8888_8888_8888_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head generate_composite_function_single_scanline \ pixman_composite_scanline_add_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head /******************************************************************************/ generate_composite_function \ pixman_composite_add_8888_8_8888_asm_neon, 32, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_n_8_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d3[0]}, [DUMMY] vdup.8 d0, d3[0] vdup.8 d1, d3[1] vdup.8 d2, d3[2] vdup.8 d3, d3[3] .endm .macro pixman_composite_add_n_8_8888_cleanup .endm generate_composite_function \ pixman_composite_add_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_n_8_8888_init, \ pixman_composite_add_n_8_8888_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_8888_n_8888_init add DUMMY, sp, #(ARGS_STACK_OFFSET + 8) vld1.32 {d27[0]}, [DUMMY] vdup.8 d27, d27[3] .endm .macro pixman_composite_add_8888_n_8888_cleanup .endm generate_composite_function \ pixman_composite_add_8888_n_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_8888_n_8888_init, \ pixman_composite_add_8888_n_8888_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_head /* expecting source data in {d0, d1, d2, d3} */ /* destination data in {d4, d5, d6, d7} */ /* solid mask is in d15 */ /* 'in' */ vmull.u8 q8, d15, d3 vmull.u8 q6, d15, d2 vmull.u8 q5, d15, d1 vmull.u8 q4, d15, d0 vrshr.u16 q13, q8, #8 vrshr.u16 q12, q6, #8 vrshr.u16 q11, q5, #8 vrshr.u16 q10, q4, #8 vraddhn.u16 d3, q8, q13 vraddhn.u16 d2, q6, q12 vraddhn.u16 d1, q5, q11 vraddhn.u16 d0, q4, q10 vmvn.8 d24, d3 /* get inverted alpha */ /* now do alpha blending */ vmull.u8 q8, d24, d4 vmull.u8 q9, d24, d5 vmull.u8 q10, d24, d6 vmull.u8 q11, d24, d7 .endm .macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_out_reverse_8888_n_8888_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm generate_composite_function_single_scanline \ pixman_composite_scanline_out_reverse_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_out_reverse_8888_n_8888_process_pixblock_head, \ pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_8888_n_8888_process_pixblock_head pixman_composite_out_reverse_8888_n_8888_process_pixblock_head .endm .macro pixman_composite_over_8888_n_8888_process_pixblock_tail pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 pixman_composite_over_8888_n_8888_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm .macro pixman_composite_over_8888_n_8888_init add DUMMY, sp, #48 vpush {d8-d15} vld1.32 {d15[0]}, [DUMMY] vdup.8 d15, d15[3] .endm .macro pixman_composite_over_8888_n_8888_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_8888_n_8888_init, \ pixman_composite_over_8888_n_8888_cleanup, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_n_8888_process_pixblock_tail_head /******************************************************************************/ /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_8888_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_over_8888_n_8888_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_over_8888_8888_8888_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ generate_composite_function_single_scanline \ pixman_composite_scanline_over_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ /******************************************************************************/ /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_8_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_over_8888_n_8888_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_over_8888_8_8888_asm_neon, 32, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_0888_process_pixblock_head .endm .macro pixman_composite_src_0888_0888_process_pixblock_tail .endm .macro pixman_composite_src_0888_0888_process_pixblock_tail_head vst3.8 {d0, d1, d2}, [DST_W]! fetch_src_pixblock cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_0888_0888_asm_neon, 24, 0, 24, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0888_0888_process_pixblock_head, \ pixman_composite_src_0888_0888_process_pixblock_tail, \ pixman_composite_src_0888_0888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_8888_rev_process_pixblock_head vswp d0, d2 .endm .macro pixman_composite_src_0888_8888_rev_process_pixblock_tail .endm .macro pixman_composite_src_0888_8888_rev_process_pixblock_tail_head vst4.8 {d0, d1, d2, d3}, [DST_W]! fetch_src_pixblock vswp d0, d2 cache_preload 8, 8 .endm .macro pixman_composite_src_0888_8888_rev_init veor d3, d3, d3 .endm generate_composite_function \ pixman_composite_src_0888_8888_rev_asm_neon, 24, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ pixman_composite_src_0888_8888_rev_init, \ default_cleanup, \ pixman_composite_src_0888_8888_rev_process_pixblock_head, \ pixman_composite_src_0888_8888_rev_process_pixblock_tail, \ pixman_composite_src_0888_8888_rev_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_0565_rev_process_pixblock_head vshll.u8 q8, d1, #8 vshll.u8 q9, d2, #8 .endm .macro pixman_composite_src_0888_0565_rev_process_pixblock_tail vshll.u8 q14, d0, #8 vsri.u16 q14, q8, #5 vsri.u16 q14, q9, #11 .endm .macro pixman_composite_src_0888_0565_rev_process_pixblock_tail_head vshll.u8 q14, d0, #8 fetch_src_pixblock vsri.u16 q14, q8, #5 vsri.u16 q14, q9, #11 vshll.u8 q8, d1, #8 vst1.16 {d28, d29}, [DST_W, :128]! vshll.u8 q9, d2, #8 .endm generate_composite_function \ pixman_composite_src_0888_0565_rev_asm_neon, 24, 0, 16, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0888_0565_rev_process_pixblock_head, \ pixman_composite_src_0888_0565_rev_process_pixblock_tail, \ pixman_composite_src_0888_0565_rev_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_pixbuf_8888_process_pixblock_head vmull.u8 q8, d3, d0 vmull.u8 q9, d3, d1 vmull.u8 q10, d3, d2 .endm .macro pixman_composite_src_pixbuf_8888_process_pixblock_tail vrshr.u16 q11, q8, #8 vswp d3, d31 vrshr.u16 q12, q9, #8 vrshr.u16 q13, q10, #8 vraddhn.u16 d30, q11, q8 vraddhn.u16 d29, q12, q9 vraddhn.u16 d28, q13, q10 .endm .macro pixman_composite_src_pixbuf_8888_process_pixblock_tail_head vrshr.u16 q11, q8, #8 vswp d3, d31 vrshr.u16 q12, q9, #8 vrshr.u16 q13, q10, #8 fetch_src_pixblock vraddhn.u16 d30, q11, q8 PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF PF addne PF_X, PF_X, #8 PF subne PF_CTL, PF_CTL, #1 vraddhn.u16 d29, q12, q9 vraddhn.u16 d28, q13, q10 vmull.u8 q8, d3, d0 vmull.u8 q9, d3, d1 vmull.u8 q10, d3, d2 vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! PF cmp PF_X, ORIG_W PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] PF subge PF_X, PF_X, ORIG_W PF subges PF_CTL, PF_CTL, #0x10 PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! .endm generate_composite_function \ pixman_composite_src_pixbuf_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_pixbuf_8888_process_pixblock_head, \ pixman_composite_src_pixbuf_8888_process_pixblock_tail, \ pixman_composite_src_pixbuf_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_rpixbuf_8888_process_pixblock_head vmull.u8 q8, d3, d0 vmull.u8 q9, d3, d1 vmull.u8 q10, d3, d2 .endm .macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail vrshr.u16 q11, q8, #8 vswp d3, d31 vrshr.u16 q12, q9, #8 vrshr.u16 q13, q10, #8 vraddhn.u16 d28, q11, q8 vraddhn.u16 d29, q12, q9 vraddhn.u16 d30, q13, q10 .endm .macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head vrshr.u16 q11, q8, #8 vswp d3, d31 vrshr.u16 q12, q9, #8 vrshr.u16 q13, q10, #8 fetch_src_pixblock vraddhn.u16 d28, q11, q8 PF add PF_X, PF_X, #8 PF tst PF_CTL, #0xF PF addne PF_X, PF_X, #8 PF subne PF_CTL, PF_CTL, #1 vraddhn.u16 d29, q12, q9 vraddhn.u16 d30, q13, q10 vmull.u8 q8, d3, d0 vmull.u8 q9, d3, d1 vmull.u8 q10, d3, d2 vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! PF cmp PF_X, ORIG_W PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] PF subge PF_X, PF_X, ORIG_W PF subges PF_CTL, PF_CTL, #0x10 PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! .endm generate_composite_function \ pixman_composite_src_rpixbuf_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_rpixbuf_8888_process_pixblock_head, \ pixman_composite_src_rpixbuf_8888_process_pixblock_tail, \ pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_0565_8_0565_process_pixblock_head /* mask is in d15 */ convert_0565_to_x888 q4, d2, d1, d0 convert_0565_to_x888 q5, d6, d5, d4 /* source pixel data is in {d0, d1, d2, XX} */ /* destination pixel data is in {d4, d5, d6, XX} */ vmvn.8 d7, d15 vmull.u8 q6, d15, d2 vmull.u8 q5, d15, d1 vmull.u8 q4, d15, d0 vmull.u8 q8, d7, d4 vmull.u8 q9, d7, d5 vmull.u8 q13, d7, d6 vrshr.u16 q12, q6, #8 vrshr.u16 q11, q5, #8 vrshr.u16 q10, q4, #8 vraddhn.u16 d2, q6, q12 vraddhn.u16 d1, q5, q11 vraddhn.u16 d0, q4, q10 .endm .macro pixman_composite_over_0565_8_0565_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q13, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q12, q13 vqadd.u8 q0, q0, q14 vqadd.u8 q1, q1, q15 /* 32bpp result is in {d0, d1, d2, XX} */ convert_8888_to_0565 d2, d1, d0, q14, q15, q3 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_0565_8_0565_process_pixblock_tail_head fetch_mask_pixblock pixman_composite_over_0565_8_0565_process_pixblock_tail fetch_src_pixblock vld1.16 {d10, d11}, [DST_R, :128]! cache_preload 8, 8 pixman_composite_over_0565_8_0565_process_pixblock_head vst1.16 {d28, d29}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_over_0565_8_0565_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_0565_n_0565_init add DUMMY, sp, #(ARGS_STACK_OFFSET + 8) vpush {d8-d15} vld1.32 {d15[0]}, [DUMMY] vdup.8 d15, d15[3] .endm .macro pixman_composite_over_0565_n_0565_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_0565_n_0565_asm_neon, 16, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_0565_n_0565_init, \ pixman_composite_over_0565_n_0565_cleanup, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_0565_8_0565_process_pixblock_head /* mask is in d15 */ convert_0565_to_x888 q4, d2, d1, d0 convert_0565_to_x888 q5, d6, d5, d4 /* source pixel data is in {d0, d1, d2, XX} */ /* destination pixel data is in {d4, d5, d6, XX} */ vmull.u8 q6, d15, d2 vmull.u8 q5, d15, d1 vmull.u8 q4, d15, d0 vrshr.u16 q12, q6, #8 vrshr.u16 q11, q5, #8 vrshr.u16 q10, q4, #8 vraddhn.u16 d2, q6, q12 vraddhn.u16 d1, q5, q11 vraddhn.u16 d0, q4, q10 .endm .macro pixman_composite_add_0565_8_0565_process_pixblock_tail vqadd.u8 q0, q0, q2 vqadd.u8 q1, q1, q3 /* 32bpp result is in {d0, d1, d2, XX} */ convert_8888_to_0565 d2, d1, d0, q14, q15, q3 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_0565_8_0565_process_pixblock_tail_head fetch_mask_pixblock pixman_composite_add_0565_8_0565_process_pixblock_tail fetch_src_pixblock vld1.16 {d10, d11}, [DST_R, :128]! cache_preload 8, 8 pixman_composite_add_0565_8_0565_process_pixblock_head vst1.16 {d28, d29}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_add_0565_8_0565_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_add_0565_8_0565_process_pixblock_head, \ pixman_composite_add_0565_8_0565_process_pixblock_tail, \ pixman_composite_add_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8_0565_process_pixblock_head /* mask is in d15 */ convert_0565_to_x888 q5, d6, d5, d4 /* destination pixel data is in {d4, d5, d6, xx} */ vmvn.8 d24, d15 /* get inverted alpha */ /* now do alpha blending */ vmull.u8 q8, d24, d4 vmull.u8 q9, d24, d5 vmull.u8 q10, d24, d6 .endm .macro pixman_composite_out_reverse_8_0565_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vraddhn.u16 d0, q14, q8 vraddhn.u16 d1, q15, q9 vraddhn.u16 d2, q12, q10 /* 32bpp result is in {d0, d1, d2, XX} */ convert_8888_to_0565 d2, d1, d0, q14, q15, q3 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8_0565_process_pixblock_tail_head fetch_src_pixblock pixman_composite_out_reverse_8_0565_process_pixblock_tail vld1.16 {d10, d11}, [DST_R, :128]! cache_preload 8, 8 pixman_composite_out_reverse_8_0565_process_pixblock_head vst1.16 {d28, d29}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_out_reverse_8_0565_asm_neon, 8, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_out_reverse_8_0565_process_pixblock_head, \ pixman_composite_out_reverse_8_0565_process_pixblock_tail, \ pixman_composite_out_reverse_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 15, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8_8888_process_pixblock_head /* src is in d0 */ /* destination pixel data is in {d4, d5, d6, d7} */ vmvn.8 d1, d0 /* get inverted alpha */ /* now do alpha blending */ vmull.u8 q8, d1, d4 vmull.u8 q9, d1, d5 vmull.u8 q10, d1, d6 vmull.u8 q11, d1, d7 .endm .macro pixman_composite_out_reverse_8_8888_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 /* 32bpp result is in {d28, d29, d30, d31} */ .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8_8888_process_pixblock_tail_head fetch_src_pixblock pixman_composite_out_reverse_8_8888_process_pixblock_tail vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! cache_preload 8, 8 pixman_composite_out_reverse_8_8888_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_out_reverse_8_8888_asm_neon, 8, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_out_reverse_8_8888_process_pixblock_head, \ pixman_composite_out_reverse_8_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_8888_OVER_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_0565_OVER_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_0565_process_pixblock_head, \ pixman_composite_over_8888_0565_process_pixblock_tail, \ pixman_composite_over_8888_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_0565_SRC_asm_neon, 32, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_0565_process_pixblock_head, \ pixman_composite_src_8888_0565_process_pixblock_tail, \ pixman_composite_src_8888_0565_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_0565_8888_SRC_asm_neon, 16, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_8888_process_pixblock_head, \ pixman_composite_src_0565_8888_process_pixblock_tail, \ pixman_composite_src_0565_8888_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_neon, 32, 8, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ /* * Bilinear scaling support code which tries to provide pixel fetching, color * format conversion, and interpolation as separate macros which can be used * as the basic building blocks for constructing bilinear scanline functions. */ .macro bilinear_load_8888 reg1, reg2, tmp mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 vld1.32 {reg1}, [TMP1], STRIDE vld1.32 {reg2}, [TMP1] .endm .macro bilinear_load_0565 reg1, reg2, tmp mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 vld1.32 {reg2[0]}, [TMP1], STRIDE vld1.32 {reg2[1]}, [TMP1] convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp .endm .macro bilinear_load_and_vertical_interpolate_two_8888 \ acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2 bilinear_load_8888 reg1, reg2, tmp1 vmull.u8 acc1, reg1, d28 vmlal.u8 acc1, reg2, d29 bilinear_load_8888 reg3, reg4, tmp2 vmull.u8 acc2, reg3, d28 vmlal.u8 acc2, reg4, d29 .endm .macro bilinear_load_and_vertical_interpolate_four_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi .endm .macro bilinear_load_and_vertical_interpolate_two_0565 \ acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {acc2lo[0]}, [TMP1], STRIDE vld1.32 {acc2hi[0]}, [TMP2], STRIDE vld1.32 {acc2lo[1]}, [TMP1] vld1.32 {acc2hi[1]}, [TMP2] convert_0565_to_x888 acc2, reg3, reg2, reg1 vzip.u8 reg1, reg3 vzip.u8 reg2, reg4 vzip.u8 reg3, reg4 vzip.u8 reg1, reg2 vmull.u8 acc1, reg1, d28 vmlal.u8 acc1, reg2, d29 vmull.u8 acc2, reg3, d28 vmlal.u8 acc2, reg4, d29 .endm .macro bilinear_load_and_vertical_interpolate_four_0565 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {xacc2lo[0]}, [TMP1], STRIDE vld1.32 {xacc2hi[0]}, [TMP2], STRIDE vld1.32 {xacc2lo[1]}, [TMP1] vld1.32 {xacc2hi[1]}, [TMP2] convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1 mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {yacc2lo[0]}, [TMP1], STRIDE vzip.u8 xreg1, xreg3 vld1.32 {yacc2hi[0]}, [TMP2], STRIDE vzip.u8 xreg2, xreg4 vld1.32 {yacc2lo[1]}, [TMP1] vzip.u8 xreg3, xreg4 vld1.32 {yacc2hi[1]}, [TMP2] vzip.u8 xreg1, xreg2 convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1 vmull.u8 xacc1, xreg1, d28 vzip.u8 yreg1, yreg3 vmlal.u8 xacc1, xreg2, d29 vzip.u8 yreg2, yreg4 vmull.u8 xacc2, xreg3, d28 vzip.u8 yreg3, yreg4 vmlal.u8 xacc2, xreg4, d29 vzip.u8 yreg1, yreg2 vmull.u8 yacc1, yreg1, d28 vmlal.u8 yacc1, yreg2, d29 vmull.u8 yacc2, yreg3, d28 vmlal.u8 yacc2, yreg4, d29 .endm .macro bilinear_store_8888 numpix, tmp1, tmp2 .if numpix == 4 vst1.32 {d0, d1}, [OUT, :128]! .elseif numpix == 2 vst1.32 {d0}, [OUT, :64]! .elseif numpix == 1 vst1.32 {d0[0]}, [OUT, :32]! .else .error bilinear_store_8888 numpix is unsupported .endif .endm .macro bilinear_store_0565 numpix, tmp1, tmp2 vuzp.u8 d0, d1 vuzp.u8 d2, d3 vuzp.u8 d1, d3 vuzp.u8 d0, d2 convert_8888_to_0565 d2, d1, d0, q1, tmp1, tmp2 .if numpix == 4 vst1.16 {d2}, [OUT, :64]! .elseif numpix == 2 vst1.32 {d2[0]}, [OUT, :32]! .elseif numpix == 1 vst1.16 {d2[0]}, [OUT, :16]! .else .error bilinear_store_0565 numpix is unsupported .endif .endm .macro bilinear_interpolate_last_pixel src_fmt, dst_fmt bilinear_load_&src_fmt d0, d1, d2 vmull.u8 q1, d0, d28 vmlal.u8 q1, d1, d29 /* 5 cycles bubble */ vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 /* 5 cycles bubble */ vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) /* 3 cycles bubble */ vmovn.u16 d0, q0 /* 1 cycle bubble */ bilinear_store_&dst_fmt 1, q2, q3 .endm .macro bilinear_interpolate_two_pixels src_fmt, dst_fmt bilinear_load_and_vertical_interpolate_two_&src_fmt \ q1, q11, d0, d1, d20, d21, d22, d23 vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q10, d22, d31 vmlal.u16 q10, d23, d31 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vmovn.u16 d0, q0 bilinear_store_&dst_fmt 2, q2, q3 .endm .macro bilinear_interpolate_four_pixels src_fmt, dst_fmt bilinear_load_and_vertical_interpolate_four_&src_fmt \ q1, q11, d0, d1, d20, d21, d22, d23 \ q3, q9, d4, d5, d16, d17, d18, d19 pld [TMP1, PF_OFFS] sub TMP1, TMP1, STRIDE vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q10, d22, d31 vmlal.u16 q10, d23, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d6, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d6, d30 vmlal.u16 q2, d7, d30 vshll.u16 q8, d18, #BILINEAR_INTERPOLATION_BITS pld [TMP2, PF_OFFS] vmlsl.u16 q8, d18, d31 vmlal.u16 q8, d19, d31 vadd.u16 q12, q12, q13 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d5, q8, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vmovn.u16 d0, q0 vmovn.u16 d1, q2 vadd.u16 q12, q12, q13 bilinear_store_&dst_fmt 4, q2, q3 .endm .macro bilinear_interpolate_four_pixels_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_head .else bilinear_interpolate_four_pixels src_fmt, dst_fmt .endif .endm .macro bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail .endif .endm .macro bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail_head .else bilinear_interpolate_four_pixels src_fmt, dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_head .else bilinear_interpolate_four_pixels_head src_fmt, dst_fmt bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail .else bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail_head .else bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt .endif .endm .set BILINEAR_FLAG_UNROLL_4, 0 .set BILINEAR_FLAG_UNROLL_8, 1 .set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2 /* * Main template macro for generating NEON optimized bilinear scanline * functions. * * Bilinear scanline scaler macro template uses the following arguments: * fname - name of the function to generate * src_fmt - source color format (8888 or 0565) * dst_fmt - destination color format (8888 or 0565) * bpp_shift - (1 << bpp_shift) is the size of source pixel in bytes * prefetch_distance - prefetch in the source image by that many * pixels ahead */ .macro generate_bilinear_scanline_func fname, src_fmt, dst_fmt, \ src_bpp_shift, dst_bpp_shift, \ prefetch_distance, flags pixman_asm_function fname OUT .req r0 TOP .req r1 BOTTOM .req r2 WT .req r3 WB .req r4 X .req r5 UX .req r6 WIDTH .req ip TMP1 .req r3 TMP2 .req r4 PF_OFFS .req r7 TMP3 .req r8 TMP4 .req r9 STRIDE .req r2 mov ip, sp push {r4, r5, r6, r7, r8, r9} mov PF_OFFS, #prefetch_distance ldmia ip, {WB, X, UX, WIDTH} mul PF_OFFS, PF_OFFS, UX .if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0 vpush {d8-d15} .endif sub STRIDE, BOTTOM, TOP .unreq BOTTOM cmp WIDTH, #0 ble 3f vdup.u16 q12, X vdup.u16 q13, UX vdup.u8 d28, WT vdup.u8 d29, WB vadd.u16 d25, d25, d26 /* ensure good destination alignment */ cmp WIDTH, #1 blt 0f tst OUT, #(1 << dst_bpp_shift) beq 0f vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 bilinear_interpolate_last_pixel src_fmt, dst_fmt sub WIDTH, WIDTH, #1 0: vadd.u16 q13, q13, q13 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 cmp WIDTH, #2 blt 0f tst OUT, #(1 << (dst_bpp_shift + 1)) beq 0f bilinear_interpolate_two_pixels src_fmt, dst_fmt sub WIDTH, WIDTH, #2 0: .if ((flags) & BILINEAR_FLAG_UNROLL_8) != 0 /*********** 8 pixels per iteration *****************/ cmp WIDTH, #4 blt 0f tst OUT, #(1 << (dst_bpp_shift + 2)) beq 0f bilinear_interpolate_four_pixels src_fmt, dst_fmt sub WIDTH, WIDTH, #4 0: subs WIDTH, WIDTH, #8 blt 1f mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift) bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt subs WIDTH, WIDTH, #8 blt 5f 0: bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt subs WIDTH, WIDTH, #8 bge 0b 5: bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt 1: tst WIDTH, #4 beq 2f bilinear_interpolate_four_pixels src_fmt, dst_fmt 2: .else /*********** 4 pixels per iteration *****************/ subs WIDTH, WIDTH, #4 blt 1f mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift) bilinear_interpolate_four_pixels_head src_fmt, dst_fmt subs WIDTH, WIDTH, #4 blt 5f 0: bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt subs WIDTH, WIDTH, #4 bge 0b 5: bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt 1: /****************************************************/ .endif /* handle the remaining trailing pixels */ tst WIDTH, #2 beq 2f bilinear_interpolate_two_pixels src_fmt, dst_fmt 2: tst WIDTH, #1 beq 3f bilinear_interpolate_last_pixel src_fmt, dst_fmt 3: .if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0 vpop {d8-d15} .endif pop {r4, r5, r6, r7, r8, r9} bx lr .unreq OUT .unreq TOP .unreq WT .unreq WB .unreq X .unreq UX .unreq WIDTH .unreq TMP1 .unreq TMP2 .unreq PF_OFFS .unreq TMP3 .unreq TMP4 .unreq STRIDE .endfunc .endm /*****************************************************************************/ .set have_bilinear_interpolate_four_pixels_8888_8888, 1 .macro bilinear_interpolate_four_pixels_8888_8888_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vld1.32 {d22}, [TMP1], STRIDE vld1.32 {d23}, [TMP1] mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 vmull.u8 q8, d22, d28 vmlal.u8 q8, d23, d29 vld1.32 {d22}, [TMP2], STRIDE vld1.32 {d23}, [TMP2] mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmull.u8 q9, d22, d28 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 .endm .macro bilinear_interpolate_four_pixels_8888_8888_tail vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vadd.u16 q12, q12, q13 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vmovn.u16 d6, q0 vmovn.u16 d7, q2 vadd.u16 q12, q12, q13 vst1.32 {d6, d7}, [OUT, :128]! .endm .macro bilinear_interpolate_four_pixels_8888_8888_tail_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vld1.32 {d20}, [TMP1], STRIDE vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d22}, [TMP2], STRIDE vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vmovn.u16 d6, q0 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmovn.u16 d7, q2 vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vadd.u16 q12, q12, q13 vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vst1.32 {d6, d7}, [OUT, :128]! vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 .endm /*****************************************************************************/ .set have_bilinear_interpolate_eight_pixels_8888_0565, 1 .macro bilinear_interpolate_eight_pixels_8888_0565_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vld1.32 {d20}, [TMP1], STRIDE vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vld1.32 {d22}, [TMP2], STRIDE vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vld1.32 {d20}, [TMP1], STRIDE vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d22}, [TMP2], STRIDE vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vmovn.u16 d8, q0 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmovn.u16 d9, q2 vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vadd.u16 q12, q12, q13 vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 .endm .macro bilinear_interpolate_eight_pixels_8888_0565_tail vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vadd.u16 q12, q12, q13 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vmovn.u16 d10, q0 vmovn.u16 d11, q2 vadd.u16 q12, q12, q13 vuzp.u8 d8, d9 vuzp.u8 d10, d11 vuzp.u8 d9, d11 vuzp.u8 d8, d10 vshll.u8 q6, d9, #8 vshll.u8 q5, d10, #8 vshll.u8 q7, d8, #8 vsri.u16 q5, q6, #5 vsri.u16 q5, q7, #11 vst1.32 {d10, d11}, [OUT, :128]! .endm .macro bilinear_interpolate_eight_pixels_8888_0565_tail_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vuzp.u8 d8, d9 vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vld1.32 {d20}, [TMP1], STRIDE vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d22}, [TMP2], STRIDE vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vmovn.u16 d10, q0 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmovn.u16 d11, q2 vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vadd.u16 q12, q12, q13 vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vuzp.u8 d10, d11 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlal.u16 q1, d19, d31 vuzp.u8 d9, d11 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vuzp.u8 d8, d10 vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vld1.32 {d20}, [TMP1], STRIDE vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vshll.u8 q6, d9, #8 vshll.u8 q5, d10, #8 vshll.u8 q7, d8, #8 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vsri.u16 q5, q6, #5 vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vsri.u16 q5, q7, #11 vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d22}, [TMP2], STRIDE vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vmovn.u16 d8, q0 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmovn.u16 d9, q2 vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vadd.u16 q12, q12, q13 vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vst1.32 {d10, d11}, [OUT, :128]! vmlsl.u16 q1, d18, d31 .endm /*****************************************************************************/ generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_neon, 8888, 8888, \ 2, 2, 28, BILINEAR_FLAG_UNROLL_4 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_neon, 8888, 0565, \ 2, 1, 28, BILINEAR_FLAG_UNROLL_8 | BILINEAR_FLAG_USE_ALL_NEON_REGS generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_x888_SRC_asm_neon, 0565, 8888, \ 1, 2, 28, BILINEAR_FLAG_UNROLL_4 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_neon, 0565, 0565, \ 1, 1, 28, BILINEAR_FLAG_UNROLL_4
ElSargo/wezpy
45,185
wezterm-src/deps/cairo/pixman/pixman/pixman-arm-neon-asm-bilinear.S
/* * Copyright © 2011 SCore Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) * Author: Taekyun Kim (tkq.kim@samsung.com) */ /* * This file contains scaled bilinear scanline functions implemented * using older siarhei's bilinear macro template. * * << General scanline function procedures >> * 1. bilinear interpolate source pixels * 2. load mask pixels * 3. load destination pixels * 4. duplicate mask to fill whole register * 5. interleave source & destination pixels * 6. apply mask to source pixels * 7. combine source & destination pixels * 8, Deinterleave final result * 9. store destination pixels * * All registers with single number (i.e. src0, tmp0) are 64-bits registers. * Registers with double numbers(src01, dst01) are 128-bits registers. * All temp registers can be used freely outside the code block. * Assume that symbol(register .req) OUT and MASK are defined at caller of these macro blocks. * * Remarks * There can be lots of pipeline stalls inside code block and between code blocks. * Further optimizations will be done by new macro templates using head/tail_head/tail scheme. */ /* Prevent the stack from becoming executable for no reason... */ #if defined(__linux__) && defined (__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .fpu neon .arch armv7a .object_arch armv4 .eabi_attribute 10, 0 .eabi_attribute 12, 0 .arm .altmacro .p2align 2 #include "pixman-private.h" #include "pixman-arm-asm.h" #include "pixman-arm-neon-asm.h" /* * Bilinear macros from pixman-arm-neon-asm.S */ /* * Bilinear scaling support code which tries to provide pixel fetching, color * format conversion, and interpolation as separate macros which can be used * as the basic building blocks for constructing bilinear scanline functions. */ .macro bilinear_load_8888 reg1, reg2, tmp mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 vld1.32 {reg1}, [TMP1], STRIDE vld1.32 {reg2}, [TMP1] .endm .macro bilinear_load_0565 reg1, reg2, tmp mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 vld1.32 {reg2[0]}, [TMP1], STRIDE vld1.32 {reg2[1]}, [TMP1] convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp .endm .macro bilinear_load_and_vertical_interpolate_two_8888 \ acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2 bilinear_load_8888 reg1, reg2, tmp1 vmull.u8 acc1, reg1, d28 vmlal.u8 acc1, reg2, d29 bilinear_load_8888 reg3, reg4, tmp2 vmull.u8 acc2, reg3, d28 vmlal.u8 acc2, reg4, d29 .endm .macro bilinear_load_and_vertical_interpolate_four_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi .endm .macro bilinear_load_and_vertical_interpolate_two_0565 \ acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {acc2lo[0]}, [TMP1], STRIDE vld1.32 {acc2hi[0]}, [TMP2], STRIDE vld1.32 {acc2lo[1]}, [TMP1] vld1.32 {acc2hi[1]}, [TMP2] convert_0565_to_x888 acc2, reg3, reg2, reg1 vzip.u8 reg1, reg3 vzip.u8 reg2, reg4 vzip.u8 reg3, reg4 vzip.u8 reg1, reg2 vmull.u8 acc1, reg1, d28 vmlal.u8 acc1, reg2, d29 vmull.u8 acc2, reg3, d28 vmlal.u8 acc2, reg4, d29 .endm .macro bilinear_load_and_vertical_interpolate_four_0565 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {xacc2lo[0]}, [TMP1], STRIDE vld1.32 {xacc2hi[0]}, [TMP2], STRIDE vld1.32 {xacc2lo[1]}, [TMP1] vld1.32 {xacc2hi[1]}, [TMP2] convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1 mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {yacc2lo[0]}, [TMP1], STRIDE vzip.u8 xreg1, xreg3 vld1.32 {yacc2hi[0]}, [TMP2], STRIDE vzip.u8 xreg2, xreg4 vld1.32 {yacc2lo[1]}, [TMP1] vzip.u8 xreg3, xreg4 vld1.32 {yacc2hi[1]}, [TMP2] vzip.u8 xreg1, xreg2 convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1 vmull.u8 xacc1, xreg1, d28 vzip.u8 yreg1, yreg3 vmlal.u8 xacc1, xreg2, d29 vzip.u8 yreg2, yreg4 vmull.u8 xacc2, xreg3, d28 vzip.u8 yreg3, yreg4 vmlal.u8 xacc2, xreg4, d29 vzip.u8 yreg1, yreg2 vmull.u8 yacc1, yreg1, d28 vmlal.u8 yacc1, yreg2, d29 vmull.u8 yacc2, yreg3, d28 vmlal.u8 yacc2, yreg4, d29 .endm .macro bilinear_store_8888 numpix, tmp1, tmp2 .if numpix == 4 vst1.32 {d0, d1}, [OUT]! .elseif numpix == 2 vst1.32 {d0}, [OUT]! .elseif numpix == 1 vst1.32 {d0[0]}, [OUT, :32]! .else .error bilinear_store_8888 numpix is unsupported .endif .endm .macro bilinear_store_0565 numpix, tmp1, tmp2 vuzp.u8 d0, d1 vuzp.u8 d2, d3 vuzp.u8 d1, d3 vuzp.u8 d0, d2 convert_8888_to_0565 d2, d1, d0, q1, tmp1, tmp2 .if numpix == 4 vst1.16 {d2}, [OUT]! .elseif numpix == 2 vst1.32 {d2[0]}, [OUT]! .elseif numpix == 1 vst1.16 {d2[0]}, [OUT]! .else .error bilinear_store_0565 numpix is unsupported .endif .endm /* * Macros for loading mask pixels into register 'mask'. * vdup must be done in somewhere else. */ .macro bilinear_load_mask_x numpix, mask .endm .macro bilinear_load_mask_8 numpix, mask .if numpix == 4 vld1.32 {mask[0]}, [MASK]! .elseif numpix == 2 vld1.16 {mask[0]}, [MASK]! .elseif numpix == 1 vld1.8 {mask[0]}, [MASK]! .else .error bilinear_load_mask_8 numpix is unsupported .endif pld [MASK, #prefetch_offset] .endm .macro bilinear_load_mask mask_fmt, numpix, mask bilinear_load_mask_&mask_fmt numpix, mask .endm /* * Macros for loading destination pixels into register 'dst0' and 'dst1'. * Interleave should be done somewhere else. */ .macro bilinear_load_dst_0565_src numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst_8888_src numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst_8888 numpix, dst0, dst1, dst01 .if numpix == 4 vld1.32 {dst0, dst1}, [OUT] .elseif numpix == 2 vld1.32 {dst0}, [OUT] .elseif numpix == 1 vld1.32 {dst0[0]}, [OUT] .else .error bilinear_load_dst_8888 numpix is unsupported .endif pld [OUT, #(prefetch_offset * 4)] .endm .macro bilinear_load_dst_8888_over numpix, dst0, dst1, dst01 bilinear_load_dst_8888 numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst_8888_add numpix, dst0, dst1, dst01 bilinear_load_dst_8888 numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst dst_fmt, op, numpix, dst0, dst1, dst01 bilinear_load_dst_&dst_fmt&_&op numpix, dst0, dst1, dst01 .endm /* * Macros for duplicating partially loaded mask to fill entire register. * We will apply mask to interleaved source pixels, that is * (r0, r1, r2, r3, g0, g1, g2, g3) x (m0, m1, m2, m3, m0, m1, m2, m3) * (b0, b1, b2, b3, a0, a1, a2, a3) x (m0, m1, m2, m3, m0, m1, m2, m3) * So, we need to duplicate loaded mask into whole register. * * For two pixel case * (r0, r1, x, x, g0, g1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1) * (b0, b1, x, x, a0, a1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1) * We can do some optimizations for this including last pixel cases. */ .macro bilinear_duplicate_mask_x numpix, mask .endm .macro bilinear_duplicate_mask_8 numpix, mask .if numpix == 4 vdup.32 mask, mask[0] .elseif numpix == 2 vdup.16 mask, mask[0] .elseif numpix == 1 vdup.8 mask, mask[0] .else .error bilinear_duplicate_mask_8 is unsupported .endif .endm .macro bilinear_duplicate_mask mask_fmt, numpix, mask bilinear_duplicate_mask_&mask_fmt numpix, mask .endm /* * Macros for interleaving src and dst pixels to rrrr gggg bbbb aaaa form. * Interleave should be done when maks is enabled or operator is 'over'. */ .macro bilinear_interleave src0, src1, dst0, dst1 vuzp.8 src0, src1 vuzp.8 dst0, dst1 vuzp.8 src0, src1 vuzp.8 dst0, dst1 .endm .macro bilinear_interleave_src_dst_x_src \ numpix, src0, src1, src01, dst0, dst1, dst01 .endm .macro bilinear_interleave_src_dst_x_over \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave src0, src1, dst0, dst1 .endm .macro bilinear_interleave_src_dst_x_add \ numpix, src0, src1, src01, dst0, dst1, dst01 .endm .macro bilinear_interleave_src_dst_8_src \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave src0, src1, dst0, dst1 .endm .macro bilinear_interleave_src_dst_8_over \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave src0, src1, dst0, dst1 .endm .macro bilinear_interleave_src_dst_8_add \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave src0, src1, dst0, dst1 .endm .macro bilinear_interleave_src_dst \ mask_fmt, op, numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave_src_dst_&mask_fmt&_&op \ numpix, src0, src1, src01, dst0, dst1, dst01 .endm /* * Macros for applying masks to src pixels. (see combine_mask_u() function) * src, dst should be in interleaved form. * mask register should be in form (m0, m1, m2, m3). */ .macro bilinear_apply_mask_to_src_x \ numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 .endm .macro bilinear_apply_mask_to_src_8 \ numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 vmull.u8 tmp01, src0, mask vmull.u8 tmp23, src1, mask /* bubbles */ vrshr.u16 tmp45, tmp01, #8 vrshr.u16 tmp67, tmp23, #8 /* bubbles */ vraddhn.u16 src0, tmp45, tmp01 vraddhn.u16 src1, tmp67, tmp23 .endm .macro bilinear_apply_mask_to_src \ mask_fmt, numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 bilinear_apply_mask_to_src_&mask_fmt \ numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 .endm /* * Macros for combining src and destination pixels. * Interleave or not is depending on operator 'op'. */ .macro bilinear_combine_src \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 .endm .macro bilinear_combine_over \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 vdup.32 tmp8, src1[1] /* bubbles */ vmvn.8 tmp8, tmp8 /* bubbles */ vmull.u8 tmp01, dst0, tmp8 /* bubbles */ vmull.u8 tmp23, dst1, tmp8 /* bubbles */ vrshr.u16 tmp45, tmp01, #8 vrshr.u16 tmp67, tmp23, #8 /* bubbles */ vraddhn.u16 dst0, tmp45, tmp01 vraddhn.u16 dst1, tmp67, tmp23 /* bubbles */ vqadd.u8 src01, dst01, src01 .endm .macro bilinear_combine_add \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 vqadd.u8 src01, dst01, src01 .endm .macro bilinear_combine \ op, numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 bilinear_combine_&op \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 .endm /* * Macros for final deinterleaving of destination pixels if needed. */ .macro bilinear_deinterleave numpix, dst0, dst1, dst01 vuzp.8 dst0, dst1 /* bubbles */ vuzp.8 dst0, dst1 .endm .macro bilinear_deinterleave_dst_x_src numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_x_over numpix, dst0, dst1, dst01 bilinear_deinterleave numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_x_add numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_8_src numpix, dst0, dst1, dst01 bilinear_deinterleave numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_8_over numpix, dst0, dst1, dst01 bilinear_deinterleave numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_8_add numpix, dst0, dst1, dst01 bilinear_deinterleave numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst mask_fmt, op, numpix, dst0, dst1, dst01 bilinear_deinterleave_dst_&mask_fmt&_&op numpix, dst0, dst1, dst01 .endm .macro bilinear_interpolate_last_pixel src_fmt, mask_fmt, dst_fmt, op bilinear_load_&src_fmt d0, d1, d2 bilinear_load_mask mask_fmt, 1, d4 bilinear_load_dst dst_fmt, op, 1, d18, d19, q9 vmull.u8 q1, d0, d28 vmlal.u8 q1, d1, d29 /* 5 cycles bubble */ vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 /* 5 cycles bubble */ bilinear_duplicate_mask mask_fmt, 1, d4 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) /* 3 cycles bubble */ vmovn.u16 d0, q0 /* 1 cycle bubble */ bilinear_interleave_src_dst \ mask_fmt, op, 1, d0, d1, q0, d18, d19, q9 bilinear_apply_mask_to_src \ mask_fmt, 1, d0, d1, q0, d4, \ q3, q8, q10, q11 bilinear_combine \ op, 1, d0, d1, q0, d18, d19, q9, \ q3, q8, q10, q11, d5 bilinear_deinterleave_dst mask_fmt, op, 1, d0, d1, q0 bilinear_store_&dst_fmt 1, q2, q3 .endm .macro bilinear_interpolate_two_pixels src_fmt, mask_fmt, dst_fmt, op bilinear_load_and_vertical_interpolate_two_&src_fmt \ q1, q11, d0, d1, d20, d21, d22, d23 bilinear_load_mask mask_fmt, 2, d4 bilinear_load_dst dst_fmt, op, 2, d18, d19, q9 vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q10, d22, d31 vmlal.u16 q10, d23, d31 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS) bilinear_duplicate_mask mask_fmt, 2, d4 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vmovn.u16 d0, q0 bilinear_interleave_src_dst \ mask_fmt, op, 2, d0, d1, q0, d18, d19, q9 bilinear_apply_mask_to_src \ mask_fmt, 2, d0, d1, q0, d4, \ q3, q8, q10, q11 bilinear_combine \ op, 2, d0, d1, q0, d18, d19, q9, \ q3, q8, q10, q11, d5 bilinear_deinterleave_dst mask_fmt, op, 2, d0, d1, q0 bilinear_store_&dst_fmt 2, q2, q3 .endm .macro bilinear_interpolate_four_pixels src_fmt, mask_fmt, dst_fmt, op bilinear_load_and_vertical_interpolate_four_&src_fmt \ q1, q11, d0, d1, d20, d21, d22, d23 \ q3, q9, d4, d5, d16, d17, d18, d19 pld [TMP1, PF_OFFS] sub TMP1, TMP1, STRIDE vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q10, d22, d31 vmlal.u16 q10, d23, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d6, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d6, d30 vmlal.u16 q2, d7, d30 vshll.u16 q8, d18, #BILINEAR_INTERPOLATION_BITS bilinear_load_mask mask_fmt, 4, d22 bilinear_load_dst dst_fmt, op, 4, d2, d3, q1 pld [TMP1, PF_OFFS] vmlsl.u16 q8, d18, d31 vmlal.u16 q8, d19, d31 vadd.u16 q12, q12, q13 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d5, q8, #(2 * BILINEAR_INTERPOLATION_BITS) bilinear_duplicate_mask mask_fmt, 4, d22 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vmovn.u16 d0, q0 vmovn.u16 d1, q2 vadd.u16 q12, q12, q13 bilinear_interleave_src_dst \ mask_fmt, op, 4, d0, d1, q0, d2, d3, q1 bilinear_apply_mask_to_src \ mask_fmt, 4, d0, d1, q0, d22, \ q3, q8, q9, q10 bilinear_combine \ op, 4, d0, d1, q0, d2, d3, q1, \ q3, q8, q9, q10, d23 bilinear_deinterleave_dst mask_fmt, op, 4, d0, d1, q0 bilinear_store_&dst_fmt 4, q2, q3 .endm .set BILINEAR_FLAG_USE_MASK, 1 .set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2 /* * Main template macro for generating NEON optimized bilinear scanline functions. * * Bilinear scanline generator macro take folling arguments: * fname - name of the function to generate * src_fmt - source color format (8888 or 0565) * dst_fmt - destination color format (8888 or 0565) * src/dst_bpp_shift - (1 << bpp_shift) is the size of src/dst pixel in bytes * process_last_pixel - code block that interpolate one pixel and does not * update horizontal weight * process_two_pixels - code block that interpolate two pixels and update * horizontal weight * process_four_pixels - code block that interpolate four pixels and update * horizontal weight * process_pixblock_head - head part of middle loop * process_pixblock_tail - tail part of middle loop * process_pixblock_tail_head - tail_head of middle loop * pixblock_size - number of pixels processed in a single middle loop * prefetch_distance - prefetch in the source image by that many pixels ahead */ .macro generate_bilinear_scanline_func \ fname, \ src_fmt, dst_fmt, src_bpp_shift, dst_bpp_shift, \ bilinear_process_last_pixel, \ bilinear_process_two_pixels, \ bilinear_process_four_pixels, \ bilinear_process_pixblock_head, \ bilinear_process_pixblock_tail, \ bilinear_process_pixblock_tail_head, \ pixblock_size, \ prefetch_distance, \ flags pixman_asm_function fname .if pixblock_size == 8 .elseif pixblock_size == 4 .else .error unsupported pixblock size .endif .if ((flags) & BILINEAR_FLAG_USE_MASK) == 0 OUT .req r0 TOP .req r1 BOTTOM .req r2 WT .req r3 WB .req r4 X .req r5 UX .req r6 WIDTH .req ip TMP1 .req r3 TMP2 .req r4 PF_OFFS .req r7 TMP3 .req r8 TMP4 .req r9 STRIDE .req r2 mov ip, sp push {r4, r5, r6, r7, r8, r9} mov PF_OFFS, #prefetch_distance ldmia ip, {WB, X, UX, WIDTH} .else OUT .req r0 MASK .req r1 TOP .req r2 BOTTOM .req r3 WT .req r4 WB .req r5 X .req r6 UX .req r7 WIDTH .req ip TMP1 .req r4 TMP2 .req r5 PF_OFFS .req r8 TMP3 .req r9 TMP4 .req r10 STRIDE .req r3 .set prefetch_offset, prefetch_distance mov ip, sp push {r4, r5, r6, r7, r8, r9, r10, ip} mov PF_OFFS, #prefetch_distance ldmia ip, {WT, WB, X, UX, WIDTH} .endif mul PF_OFFS, PF_OFFS, UX .if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0 vpush {d8-d15} .endif sub STRIDE, BOTTOM, TOP .unreq BOTTOM cmp WIDTH, #0 ble 3f vdup.u16 q12, X vdup.u16 q13, UX vdup.u8 d28, WT vdup.u8 d29, WB vadd.u16 d25, d25, d26 /* ensure good destination alignment */ cmp WIDTH, #1 blt 0f tst OUT, #(1 << dst_bpp_shift) beq 0f vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 bilinear_process_last_pixel sub WIDTH, WIDTH, #1 0: vadd.u16 q13, q13, q13 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 cmp WIDTH, #2 blt 0f tst OUT, #(1 << (dst_bpp_shift + 1)) beq 0f bilinear_process_two_pixels sub WIDTH, WIDTH, #2 0: .if pixblock_size == 8 cmp WIDTH, #4 blt 0f tst OUT, #(1 << (dst_bpp_shift + 2)) beq 0f bilinear_process_four_pixels sub WIDTH, WIDTH, #4 0: .endif subs WIDTH, WIDTH, #pixblock_size blt 1f mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift) bilinear_process_pixblock_head subs WIDTH, WIDTH, #pixblock_size blt 5f 0: bilinear_process_pixblock_tail_head subs WIDTH, WIDTH, #pixblock_size bge 0b 5: bilinear_process_pixblock_tail 1: .if pixblock_size == 8 tst WIDTH, #4 beq 2f bilinear_process_four_pixels 2: .endif /* handle the remaining trailing pixels */ tst WIDTH, #2 beq 2f bilinear_process_two_pixels 2: tst WIDTH, #1 beq 3f bilinear_process_last_pixel 3: .if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0 vpop {d8-d15} .endif .if ((flags) & BILINEAR_FLAG_USE_MASK) == 0 pop {r4, r5, r6, r7, r8, r9} .else pop {r4, r5, r6, r7, r8, r9, r10, ip} .endif bx lr .unreq OUT .unreq TOP .unreq WT .unreq WB .unreq X .unreq UX .unreq WIDTH .unreq TMP1 .unreq TMP2 .unreq PF_OFFS .unreq TMP3 .unreq TMP4 .unreq STRIDE .if ((flags) & BILINEAR_FLAG_USE_MASK) != 0 .unreq MASK .endif .endfunc .endm /* src_8888_8_8888 */ .macro bilinear_src_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_pixblock_head bilinear_src_8888_8_8888_process_four_pixels .endm .macro bilinear_src_8888_8_8888_process_pixblock_tail .endm .macro bilinear_src_8888_8_8888_process_pixblock_tail_head bilinear_src_8888_8_8888_process_pixblock_tail bilinear_src_8888_8_8888_process_pixblock_head .endm /* src_8888_8_0565 */ .macro bilinear_src_8888_8_0565_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_pixblock_head bilinear_src_8888_8_0565_process_four_pixels .endm .macro bilinear_src_8888_8_0565_process_pixblock_tail .endm .macro bilinear_src_8888_8_0565_process_pixblock_tail_head bilinear_src_8888_8_0565_process_pixblock_tail bilinear_src_8888_8_0565_process_pixblock_head .endm /* src_0565_8_x888 */ .macro bilinear_src_0565_8_x888_process_last_pixel bilinear_interpolate_last_pixel 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_two_pixels bilinear_interpolate_two_pixels 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_four_pixels bilinear_interpolate_four_pixels 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_pixblock_head bilinear_src_0565_8_x888_process_four_pixels .endm .macro bilinear_src_0565_8_x888_process_pixblock_tail .endm .macro bilinear_src_0565_8_x888_process_pixblock_tail_head bilinear_src_0565_8_x888_process_pixblock_tail bilinear_src_0565_8_x888_process_pixblock_head .endm /* src_0565_8_0565 */ .macro bilinear_src_0565_8_0565_process_last_pixel bilinear_interpolate_last_pixel 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_two_pixels bilinear_interpolate_two_pixels 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_four_pixels bilinear_interpolate_four_pixels 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_pixblock_head bilinear_src_0565_8_0565_process_four_pixels .endm .macro bilinear_src_0565_8_0565_process_pixblock_tail .endm .macro bilinear_src_0565_8_0565_process_pixblock_tail_head bilinear_src_0565_8_0565_process_pixblock_tail bilinear_src_0565_8_0565_process_pixblock_head .endm /* over_8888_8888 */ .macro bilinear_over_8888_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_pixblock_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vld1.32 {d22}, [TMP1], STRIDE vld1.32 {d23}, [TMP1] mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 vmull.u8 q8, d22, d28 vmlal.u8 q8, d23, d29 vld1.32 {d22}, [TMP2], STRIDE vld1.32 {d23}, [TMP2] mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmull.u8 q9, d22, d28 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 .endm .macro bilinear_over_8888_8888_process_pixblock_tail vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d2, d3}, [OUT, :128] pld [OUT, #(prefetch_offset * 4)] vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vmovn.u16 d6, q0 vmovn.u16 d7, q2 vuzp.8 d6, d7 vuzp.8 d2, d3 vuzp.8 d6, d7 vuzp.8 d2, d3 vdup.32 d4, d7[1] vmvn.8 d4, d4 vmull.u8 q11, d2, d4 vmull.u8 q2, d3, d4 vrshr.u16 q1, q11, #8 vrshr.u16 q10, q2, #8 vraddhn.u16 d2, q1, q11 vraddhn.u16 d3, q10, q2 vqadd.u8 q3, q1, q3 vuzp.8 d6, d7 vuzp.8 d6, d7 vadd.u16 q12, q12, q13 vst1.32 {d6, d7}, [OUT, :128]! .endm .macro bilinear_over_8888_8888_process_pixblock_tail_head vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 vmlsl.u16 q2, d20, d30 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vld1.32 {d20}, [TMP1], STRIDE vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d2, d3}, [OUT, :128] pld [OUT, PF_OFFS] vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d22}, [TMP2], STRIDE vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vmovn.u16 d6, q0 vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vmovn.u16 d7, q2 vld1.32 {d22}, [TMP3], STRIDE vuzp.8 d6, d7 vuzp.8 d2, d3 vuzp.8 d6, d7 vuzp.8 d2, d3 vdup.32 d4, d7[1] vld1.32 {d23}, [TMP3] vmvn.8 d4, d4 vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vmull.u8 q11, d2, d4 vmull.u8 q2, d3, d4 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d16, d30 vrshr.u16 q1, q11, #8 vmlal.u16 q0, d17, d30 vrshr.u16 q8, q2, #8 vraddhn.u16 d2, q1, q11 vraddhn.u16 d3, q8, q2 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vqadd.u8 q3, q1, q3 vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vuzp.8 d6, d7 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vuzp.8 d6, d7 vmlsl.u16 q1, d18, d31 vadd.u16 q12, q12, q13 vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vst1.32 {d6, d7}, [OUT, :128]! .endm /* over_8888_8_8888 */ .macro bilinear_over_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_pixblock_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 vld1.32 {d0}, [TMP1], STRIDE mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vld1.32 {d1}, [TMP1] mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 vld1.32 {d2}, [TMP2], STRIDE mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vld1.32 {d3}, [TMP2] vmull.u8 q2, d0, d28 vmull.u8 q3, d2, d28 vmlal.u8 q2, d1, d29 vmlal.u8 q3, d3, d29 vshll.u16 q0, d4, #BILINEAR_INTERPOLATION_BITS vshll.u16 q1, d6, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d4, d30 vmlsl.u16 q1, d6, d31 vmlal.u16 q0, d5, d30 vmlal.u16 q1, d7, d31 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d2}, [TMP3], STRIDE vld1.32 {d3}, [TMP3] pld [TMP4, PF_OFFS] vld1.32 {d4}, [TMP4], STRIDE vld1.32 {d5}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q3, d2, d28 vmlal.u8 q3, d3, d29 vmull.u8 q1, d4, d28 vmlal.u8 q1, d5, d29 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d22[0]}, [MASK]! pld [MASK, #prefetch_offset] vadd.u16 q12, q12, q13 vmovn.u16 d16, q0 .endm .macro bilinear_over_8888_8_8888_process_pixblock_tail vshll.u16 q9, d6, #BILINEAR_INTERPOLATION_BITS vshll.u16 q10, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q9, d6, d30 vmlsl.u16 q10, d2, d31 vmlal.u16 q9, d7, d30 vmlal.u16 q10, d3, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vdup.32 d22, d22[0] vshrn.u32 d18, q9, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d19, q10, #(2 * BILINEAR_INTERPOLATION_BITS) vmovn.u16 d17, q9 vld1.32 {d18, d19}, [OUT, :128] pld [OUT, PF_OFFS] vuzp.8 d16, d17 vuzp.8 d18, d19 vuzp.8 d16, d17 vuzp.8 d18, d19 vmull.u8 q10, d16, d22 vmull.u8 q11, d17, d22 vrsra.u16 q10, q10, #8 vrsra.u16 q11, q11, #8 vrshrn.u16 d16, q10, #8 vrshrn.u16 d17, q11, #8 vdup.32 d22, d17[1] vmvn.8 d22, d22 vmull.u8 q10, d18, d22 vmull.u8 q11, d19, d22 vrshr.u16 q9, q10, #8 vrshr.u16 q0, q11, #8 vraddhn.u16 d18, q9, q10 vraddhn.u16 d19, q0, q11 vqadd.u8 q9, q8, q9 vuzp.8 d18, d19 vuzp.8 d18, d19 vst1.32 {d18, d19}, [OUT, :128]! .endm .macro bilinear_over_8888_8_8888_process_pixblock_tail_head vshll.u16 q9, d6, #BILINEAR_INTERPOLATION_BITS mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 vshll.u16 q10, d2, #BILINEAR_INTERPOLATION_BITS vld1.32 {d0}, [TMP1], STRIDE mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlsl.u16 q9, d6, d30 vmlsl.u16 q10, d2, d31 vld1.32 {d1}, [TMP1] mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 vmlal.u16 q9, d7, d30 vmlal.u16 q10, d3, d31 vld1.32 {d2}, [TMP2], STRIDE mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vld1.32 {d3}, [TMP2] vdup.32 d22, d22[0] vshrn.u32 d18, q9, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d19, q10, #(2 * BILINEAR_INTERPOLATION_BITS) vmull.u8 q2, d0, d28 vmull.u8 q3, d2, d28 vmovn.u16 d17, q9 vld1.32 {d18, d19}, [OUT, :128] pld [OUT, #(prefetch_offset * 4)] vmlal.u8 q2, d1, d29 vmlal.u8 q3, d3, d29 vuzp.8 d16, d17 vuzp.8 d18, d19 vshll.u16 q0, d4, #BILINEAR_INTERPOLATION_BITS vshll.u16 q1, d6, #BILINEAR_INTERPOLATION_BITS vuzp.8 d16, d17 vuzp.8 d18, d19 vmlsl.u16 q0, d4, d30 vmlsl.u16 q1, d6, d31 vmull.u8 q10, d16, d22 vmull.u8 q11, d17, d22 vmlal.u16 q0, d5, d30 vmlal.u16 q1, d7, d31 vrsra.u16 q10, q10, #8 vrsra.u16 q11, q11, #8 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vrshrn.u16 d16, q10, #8 vrshrn.u16 d17, q11, #8 vld1.32 {d2}, [TMP3], STRIDE vdup.32 d22, d17[1] vld1.32 {d3}, [TMP3] vmvn.8 d22, d22 pld [TMP4, PF_OFFS] vld1.32 {d4}, [TMP4], STRIDE vmull.u8 q10, d18, d22 vmull.u8 q11, d19, d22 vld1.32 {d5}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q3, d2, d28 vrshr.u16 q9, q10, #8 vrshr.u16 q15, q11, #8 vmlal.u8 q3, d3, d29 vmull.u8 q1, d4, d28 vraddhn.u16 d18, q9, q10 vraddhn.u16 d19, q15, q11 vmlal.u8 q1, d5, d29 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vqadd.u8 q9, q8, q9 vld1.32 {d22[0]}, [MASK]! vuzp.8 d18, d19 vadd.u16 q12, q12, q13 vuzp.8 d18, d19 vmovn.u16 d16, q0 vst1.32 {d18, d19}, [OUT, :128]! .endm /* add_8888_8888 */ .macro bilinear_add_8888_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_pixblock_head bilinear_add_8888_8888_process_four_pixels .endm .macro bilinear_add_8888_8888_process_pixblock_tail .endm .macro bilinear_add_8888_8888_process_pixblock_tail_head bilinear_add_8888_8888_process_pixblock_tail bilinear_add_8888_8888_process_pixblock_head .endm /* add_8888_8_8888 */ .macro bilinear_add_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_pixblock_head bilinear_add_8888_8_8888_process_four_pixels .endm .macro bilinear_add_8888_8_8888_process_pixblock_tail .endm .macro bilinear_add_8888_8_8888_process_pixblock_tail_head bilinear_add_8888_8_8888_process_pixblock_tail bilinear_add_8888_8_8888_process_pixblock_head .endm /* Bilinear scanline functions */ generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_src_8888_8_8888_process_last_pixel, \ bilinear_src_8888_8_8888_process_two_pixels, \ bilinear_src_8888_8_8888_process_four_pixels, \ bilinear_src_8888_8_8888_process_pixblock_head, \ bilinear_src_8888_8_8888_process_pixblock_tail, \ bilinear_src_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_neon, \ 8888, 0565, 2, 1, \ bilinear_src_8888_8_0565_process_last_pixel, \ bilinear_src_8888_8_0565_process_two_pixels, \ bilinear_src_8888_8_0565_process_four_pixels, \ bilinear_src_8888_8_0565_process_pixblock_head, \ bilinear_src_8888_8_0565_process_pixblock_tail, \ bilinear_src_8888_8_0565_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_neon, \ 0565, 8888, 1, 2, \ bilinear_src_0565_8_x888_process_last_pixel, \ bilinear_src_0565_8_x888_process_two_pixels, \ bilinear_src_0565_8_x888_process_four_pixels, \ bilinear_src_0565_8_x888_process_pixblock_head, \ bilinear_src_0565_8_x888_process_pixblock_tail, \ bilinear_src_0565_8_x888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_neon, \ 0565, 0565, 1, 1, \ bilinear_src_0565_8_0565_process_last_pixel, \ bilinear_src_0565_8_0565_process_two_pixels, \ bilinear_src_0565_8_0565_process_four_pixels, \ bilinear_src_0565_8_0565_process_pixblock_head, \ bilinear_src_0565_8_0565_process_pixblock_tail, \ bilinear_src_0565_8_0565_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_over_8888_8888_process_last_pixel, \ bilinear_over_8888_8888_process_two_pixels, \ bilinear_over_8888_8888_process_four_pixels, \ bilinear_over_8888_8888_process_pixblock_head, \ bilinear_over_8888_8888_process_pixblock_tail, \ bilinear_over_8888_8888_process_pixblock_tail_head, \ 4, 28, 0 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_over_8888_8_8888_process_last_pixel, \ bilinear_over_8888_8_8888_process_two_pixels, \ bilinear_over_8888_8_8888_process_four_pixels, \ bilinear_over_8888_8_8888_process_pixblock_head, \ bilinear_over_8888_8_8888_process_pixblock_tail, \ bilinear_over_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_add_8888_8888_process_last_pixel, \ bilinear_add_8888_8888_process_two_pixels, \ bilinear_add_8888_8888_process_four_pixels, \ bilinear_add_8888_8888_process_pixblock_head, \ bilinear_add_8888_8888_process_pixblock_tail, \ bilinear_add_8888_8888_process_pixblock_tail_head, \ 4, 28, 0 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_add_8888_8_8888_process_last_pixel, \ bilinear_add_8888_8_8888_process_two_pixels, \ bilinear_add_8888_8_8888_process_four_pixels, \ bilinear_add_8888_8_8888_process_pixblock_head, \ bilinear_add_8888_8_8888_process_pixblock_tail, \ bilinear_add_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK
ElSargo/wezpy
4,651
wezterm-src/deps/cairo/pixman/pixman/pixman-arm-simd-asm-scaled.S
/* * Copyright © 2008 Mozilla Corporation * Copyright © 2010 Nokia Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Mozilla Corporation not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Mozilla Corporation makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Jeff Muizelaar (jeff@infidigm.net) * */ /* Prevent the stack from becoming executable */ #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .arch armv6 .object_arch armv4 .arm .altmacro .p2align 2 #include "pixman-arm-asm.h" /* * Note: This code is only using armv5te instructions (not even armv6), * but is scheduled for ARM Cortex-A8 pipeline. So it might need to * be split into a few variants, tuned for each microarchitecture. * * TODO: In order to get good performance on ARM9/ARM11 cores (which don't * have efficient write combining), it needs to be changed to use 16-byte * aligned writes using STM instruction. * * Nearest scanline scaler macro template uses the following arguments: * fname - name of the function to generate * bpp_shift - (1 << bpp_shift) is the size of pixel in bytes * t - type suffix for LDR/STR instructions * prefetch_distance - prefetch in the source image by that many * pixels ahead * prefetch_braking_distance - stop prefetching when that many pixels are * remaining before the end of scanline */ .macro generate_nearest_scanline_func fname, bpp_shift, t, \ prefetch_distance, \ prefetch_braking_distance pixman_asm_function fname W .req r0 DST .req r1 SRC .req r2 VX .req r3 UNIT_X .req ip TMP1 .req r4 TMP2 .req r5 VXMASK .req r6 PF_OFFS .req r7 SRC_WIDTH_FIXED .req r8 ldr UNIT_X, [sp] push {r4, r5, r6, r7, r8, r10} mvn VXMASK, #((1 << bpp_shift) - 1) ldr SRC_WIDTH_FIXED, [sp, #28] /* define helper macro */ .macro scale_2_pixels ldr&t TMP1, [SRC, TMP1] and TMP2, VXMASK, VX, asr #(16 - bpp_shift) adds VX, VX, UNIT_X str&t TMP1, [DST], #(1 << bpp_shift) 9: subpls VX, VX, SRC_WIDTH_FIXED bpl 9b ldr&t TMP2, [SRC, TMP2] and TMP1, VXMASK, VX, asr #(16 - bpp_shift) adds VX, VX, UNIT_X str&t TMP2, [DST], #(1 << bpp_shift) 9: subpls VX, VX, SRC_WIDTH_FIXED bpl 9b .endm /* now do the scaling */ and TMP1, VXMASK, VX, asr #(16 - bpp_shift) adds VX, VX, UNIT_X 9: subpls VX, VX, SRC_WIDTH_FIXED bpl 9b subs W, W, #(8 + prefetch_braking_distance) blt 2f /* calculate prefetch offset */ mov PF_OFFS, #prefetch_distance mla PF_OFFS, UNIT_X, PF_OFFS, VX 1: /* main loop, process 8 pixels per iteration with prefetch */ pld [SRC, PF_OFFS, asr #(16 - bpp_shift)] add PF_OFFS, UNIT_X, lsl #3 scale_2_pixels scale_2_pixels scale_2_pixels scale_2_pixels subs W, W, #8 bge 1b 2: subs W, W, #(4 - 8 - prefetch_braking_distance) blt 2f 1: /* process the remaining pixels */ scale_2_pixels scale_2_pixels subs W, W, #4 bge 1b 2: tst W, #2 beq 2f scale_2_pixels 2: tst W, #1 ldrne&t TMP1, [SRC, TMP1] strne&t TMP1, [DST] /* cleanup helper macro */ .purgem scale_2_pixels .unreq DST .unreq SRC .unreq W .unreq VX .unreq UNIT_X .unreq TMP1 .unreq TMP2 .unreq VXMASK .unreq PF_OFFS .unreq SRC_WIDTH_FIXED /* return */ pop {r4, r5, r6, r7, r8, r10} bx lr .endfunc .endm generate_nearest_scanline_func \ pixman_scaled_nearest_scanline_0565_0565_SRC_asm_armv6, 1, h, 80, 32 generate_nearest_scanline_func \ pixman_scaled_nearest_scanline_8888_8888_SRC_asm_armv6, 2, , 48, 32
ElSargo/wezpy
43,532
wezterm-src/deps/cairo/pixman/pixman/pixman-arma64-neon-asm-bilinear.S
/* * Copyright © 2011 SCore Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) * Author: Taekyun Kim (tkq.kim@samsung.com) */ /* * This file contains scaled bilinear scanline functions implemented * using older siarhei's bilinear macro template. * * << General scanline function procedures >> * 1. bilinear interpolate source pixels * 2. load mask pixels * 3. load destination pixels * 4. duplicate mask to fill whole register * 5. interleave source & destination pixels * 6. apply mask to source pixels * 7. combine source & destination pixels * 8, Deinterleave final result * 9. store destination pixels * * All registers with single number (i.e. src0, tmp0) are 64-bits registers. * Registers with double numbers(src01, dst01) are 128-bits registers. * All temp registers can be used freely outside the code block. * Assume that symbol(register .req) OUT and MASK are defined at caller of these macro blocks. * * Remarks * There can be lots of pipeline stalls inside code block and between code blocks. * Further optimizations will be done by new macro templates using head/tail_head/tail scheme. */ /* Prevent the stack from becoming executable for no reason... */ #if defined(__linux__) && defined (__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .arch armv8-a .altmacro .p2align 2 #include "pixman-private.h" #include "pixman-arm-asm.h" #include "pixman-arma64-neon-asm.h" /* * Bilinear macros from pixman-arm-neon-asm.S */ /* * Bilinear scaling support code which tries to provide pixel fetching, color * format conversion, and interpolation as separate macros which can be used * as the basic building blocks for constructing bilinear scanline functions. */ .macro bilinear_load_8888 reg1, reg2, tmp asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 ld1 {&reg1&.2s}, [TMP1], STRIDE ld1 {&reg2&.2s}, [TMP1] .endm .macro bilinear_load_0565 reg1, reg2, tmp asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 ld1 {&reg2&.s}[0], [TMP1], STRIDE ld1 {&reg2&.s}[1], [TMP1] convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp .endm .macro bilinear_load_and_vertical_interpolate_two_8888 \ acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2 bilinear_load_8888 reg1, reg2, tmp1 umull &acc1&.8h, &reg1&.8b, v28.8b umlal &acc1&.8h, &reg2&.8b, v29.8b bilinear_load_8888 reg3, reg4, tmp2 umull &acc2&.8h, &reg3&.8b, v28.8b umlal &acc2&.8h, &reg4&.8b, v29.8b .endm .macro bilinear_load_and_vertical_interpolate_four_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi .endm .macro vzip reg1, reg2 zip1 v24.8b, reg1, reg2 zip2 reg2, reg1, reg2 mov reg1, v24.8b .endm .macro vuzp reg1, reg2 uzp1 v24.8b, reg1, reg2 uzp2 reg2, reg1, reg2 mov reg1, v24.8b .endm .macro bilinear_load_and_vertical_interpolate_two_0565 \ acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr WTMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {&acc2&.s}[0], [TMP1], STRIDE ld1 {&acc2&.s}[2], [TMP2], STRIDE ld1 {&acc2&.s}[1], [TMP1] ld1 {&acc2&.s}[3], [TMP2] convert_0565_to_x888 acc2, reg3, reg2, reg1 vzip &reg1&.8b, &reg3&.8b vzip &reg2&.8b, &reg4&.8b vzip &reg3&.8b, &reg4&.8b vzip &reg1&.8b, &reg2&.8b umull &acc1&.8h, &reg1&.8b, v28.8b umlal &acc1&.8h, &reg2&.8b, v29.8b umull &acc2&.8h, &reg3&.8b, v28.8b umlal &acc2&.8h, &reg4&.8b, v29.8b .endm .macro bilinear_load_and_vertical_interpolate_four_0565 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr WTMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {&xacc2&.s}[0], [TMP1], STRIDE ld1 {&xacc2&.s}[2], [TMP2], STRIDE ld1 {&xacc2&.s}[1], [TMP1] ld1 {&xacc2&.s}[3], [TMP2] convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1 asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr WTMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {&yacc2&.s}[0], [TMP1], STRIDE vzip &xreg1&.8b, &xreg3&.8b ld1 {&yacc2&.s}[2], [TMP2], STRIDE vzip &xreg2&.8b, &xreg4&.8b ld1 {&yacc2&.s}[1], [TMP1] vzip &xreg3&.8b, &xreg4&.8b ld1 {&yacc2&.s}[3], [TMP2] vzip &xreg1&.8b, &xreg2&.8b convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1 umull &xacc1&.8h, &xreg1&.8b, v28.8b vzip &yreg1&.8b, &yreg3&.8b umlal &xacc1&.8h, &xreg2&.8b, v29.8b vzip &yreg2&.8b, &yreg4&.8b umull &xacc2&.8h, &xreg3&.8b, v28.8b vzip &yreg3&.8b, &yreg4&.8b umlal &xacc2&.8h, &xreg4&.8b, v29.8b vzip &yreg1&.8b, &yreg2&.8b umull &yacc1&.8h, &yreg1&.8b, v28.8b umlal &yacc1&.8h, &yreg2&.8b, v29.8b umull &yacc2&.8h, &yreg3&.8b, v28.8b umlal &yacc2&.8h, &yreg4&.8b, v29.8b .endm .macro bilinear_store_8888 numpix, tmp1, tmp2 .if numpix == 4 st1 {v0.2s, v1.2s}, [OUT], #16 .elseif numpix == 2 st1 {v0.2s}, [OUT], #8 .elseif numpix == 1 st1 {v0.s}[0], [OUT], #4 .else .error bilinear_store_8888 numpix is unsupported .endif .endm .macro bilinear_store_0565 numpix, tmp1, tmp2 vuzp v0.8b, v1.8b vuzp v2.8b, v3.8b vuzp v1.8b, v3.8b vuzp v0.8b, v2.8b convert_8888_to_0565 v2, v1, v0, v1, tmp1, tmp2 .if numpix == 4 st1 {v1.4h}, [OUT], #8 .elseif numpix == 2 st1 {v1.s}[0], [OUT], #4 .elseif numpix == 1 st1 {v1.h}[0], [OUT], #2 .else .error bilinear_store_0565 numpix is unsupported .endif .endm /* * Macros for loading mask pixels into register 'mask'. * dup must be done in somewhere else. */ .macro bilinear_load_mask_x numpix, mask .endm .macro bilinear_load_mask_8 numpix, mask .if numpix == 4 ld1 {&mask&.s}[0], [MASK], #4 .elseif numpix == 2 ld1 {&mask&.h}[0], [MASK], #2 .elseif numpix == 1 ld1 {&mask&.b}[0], [MASK], #1 .else .error bilinear_load_mask_8 numpix is unsupported .endif prfm PREFETCH_MODE, [MASK, #prefetch_offset] .endm .macro bilinear_load_mask mask_fmt, numpix, mask bilinear_load_mask_&mask_fmt numpix, mask .endm /* * Macros for loading destination pixels into register 'dst0' and 'dst1'. * Interleave should be done somewhere else. */ .macro bilinear_load_dst_0565_src numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst_8888_src numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst_8888 numpix, dst0, dst1, dst01 .if numpix == 4 ld1 {&dst0&.2s, &dst1&.2s}, [OUT] .elseif numpix == 2 ld1 {&dst0&.2s}, [OUT] .elseif numpix == 1 ld1 {&dst0&.s}[0], [OUT] .else .error bilinear_load_dst_8888 numpix is unsupported .endif mov &dst01&.d[0], &dst0&.d[0] mov &dst01&.d[1], &dst1&.d[0] prfm PREFETCH_MODE, [OUT, #(prefetch_offset * 4)] .endm .macro bilinear_load_dst_8888_over numpix, dst0, dst1, dst01 bilinear_load_dst_8888 numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst_8888_add numpix, dst0, dst1, dst01 bilinear_load_dst_8888 numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst dst_fmt, op, numpix, dst0, dst1, dst01 bilinear_load_dst_&dst_fmt&_&op numpix, dst0, dst1, dst01 .endm /* * Macros for duplicating partially loaded mask to fill entire register. * We will apply mask to interleaved source pixels, that is * (r0, r1, r2, r3, g0, g1, g2, g3) x (m0, m1, m2, m3, m0, m1, m2, m3) * (b0, b1, b2, b3, a0, a1, a2, a3) x (m0, m1, m2, m3, m0, m1, m2, m3) * So, we need to duplicate loaded mask into whole register. * * For two pixel case * (r0, r1, x, x, g0, g1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1) * (b0, b1, x, x, a0, a1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1) * We can do some optimizations for this including last pixel cases. */ .macro bilinear_duplicate_mask_x numpix, mask .endm .macro bilinear_duplicate_mask_8 numpix, mask .if numpix == 4 dup &mask&.2s, &mask&.s[0] .elseif numpix == 2 dup &mask&.4h, &mask&.h[0] .elseif numpix == 1 dup &mask&.8b, &mask&.b[0] .else .error bilinear_duplicate_mask_8 is unsupported .endif .endm .macro bilinear_duplicate_mask mask_fmt, numpix, mask bilinear_duplicate_mask_&mask_fmt numpix, mask .endm /* * Macros for interleaving src and dst pixels to rrrr gggg bbbb aaaa form. * Interleave should be done when maks is enabled or operator is 'over'. */ .macro bilinear_interleave src0, src1, src01, dst0, dst1, dst01 vuzp &src0&.8b, &src1&.8b vuzp &dst0&.8b, &dst1&.8b vuzp &src0&.8b, &src1&.8b vuzp &dst0&.8b, &dst1&.8b mov &src01&.d[1], &src1&.d[0] mov &src01&.d[0], &src0&.d[0] mov &dst01&.d[1], &dst1&.d[0] mov &dst01&.d[0], &dst0&.d[0] .endm .macro bilinear_interleave_src_dst_x_src \ numpix, src0, src1, src01, dst0, dst1, dst01 .endm .macro bilinear_interleave_src_dst_x_over \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave src0, src1, src01, dst0, dst1, dst01 .endm .macro bilinear_interleave_src_dst_x_add \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave src0, src1, src01, dst0, dst1, dst01 .endm .macro bilinear_interleave_src_dst_8_src \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave src0, src1, src01, dst0, dst1, dst01 .endm .macro bilinear_interleave_src_dst_8_over \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave src0, src1, src01, dst0, dst1, dst01 .endm .macro bilinear_interleave_src_dst_8_add \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave src0, src1, src01, dst0, dst1, dst01 .endm .macro bilinear_interleave_src_dst \ mask_fmt, op, numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave_src_dst_&mask_fmt&_&op \ numpix, src0, src1, src01, dst0, dst1, dst01 .endm /* * Macros for applying masks to src pixels. (see combine_mask_u() function) * src, dst should be in interleaved form. * mask register should be in form (m0, m1, m2, m3). */ .macro bilinear_apply_mask_to_src_x \ numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 .endm .macro bilinear_apply_mask_to_src_8 \ numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 umull &tmp01&.8h, &src0&.8b, &mask&.8b umull &tmp23&.8h, &src1&.8b, &mask&.8b /* bubbles */ urshr &tmp45&.8h, &tmp01&.8h, #8 urshr &tmp67&.8h, &tmp23&.8h, #8 /* bubbles */ raddhn &src0&.8b, &tmp45&.8h, &tmp01&.8h raddhn &src1&.8b, &tmp67&.8h, &tmp23&.8h mov &src01&.d[0], &src0&.d[0] mov &src01&.d[1], &src1&.d[0] .endm .macro bilinear_apply_mask_to_src \ mask_fmt, numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 bilinear_apply_mask_to_src_&mask_fmt \ numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 .endm /* * Macros for combining src and destination pixels. * Interleave or not is depending on operator 'op'. */ .macro bilinear_combine_src \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 .endm .macro bilinear_combine_over \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 dup &tmp8&.2s, &src1&.s[1] /* bubbles */ mvn &tmp8&.8b, &tmp8&.8b /* bubbles */ umull &tmp01&.8h, &dst0&.8b, &tmp8&.8b /* bubbles */ umull &tmp23&.8h, &dst1&.8b, &tmp8&.8b /* bubbles */ urshr &tmp45&.8h, &tmp01&.8h, #8 urshr &tmp67&.8h, &tmp23&.8h, #8 /* bubbles */ raddhn &dst0&.8b, &tmp45&.8h, &tmp01&.8h raddhn &dst1&.8b, &tmp67&.8h, &tmp23&.8h mov &dst01&.d[0], &dst0&.d[0] mov &dst01&.d[1], &dst1&.d[0] /* bubbles */ uqadd &src0&.8b, &dst0&.8b, &src0&.8b uqadd &src1&.8b, &dst1&.8b, &src1&.8b mov &src01&.d[0], &src0&.d[0] mov &src01&.d[1], &src1&.d[0] .endm .macro bilinear_combine_add \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 uqadd &src0&.8b, &dst0&.8b, &src0&.8b uqadd &src1&.8b, &dst1&.8b, &src1&.8b mov &src01&.d[0], &src0&.d[0] mov &src01&.d[1], &src1&.d[0] .endm .macro bilinear_combine \ op, numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 bilinear_combine_&op \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 .endm /* * Macros for final deinterleaving of destination pixels if needed. */ .macro bilinear_deinterleave numpix, dst0, dst1, dst01 vuzp &dst0&.8b, &dst1&.8b /* bubbles */ vuzp &dst0&.8b, &dst1&.8b mov &dst01&.d[0], &dst0&.d[0] mov &dst01&.d[1], &dst1&.d[0] .endm .macro bilinear_deinterleave_dst_x_src numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_x_over numpix, dst0, dst1, dst01 bilinear_deinterleave numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_x_add numpix, dst0, dst1, dst01 bilinear_deinterleave numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_8_src numpix, dst0, dst1, dst01 bilinear_deinterleave numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_8_over numpix, dst0, dst1, dst01 bilinear_deinterleave numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_8_add numpix, dst0, dst1, dst01 bilinear_deinterleave numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst mask_fmt, op, numpix, dst0, dst1, dst01 bilinear_deinterleave_dst_&mask_fmt&_&op numpix, dst0, dst1, dst01 .endm .macro bilinear_interpolate_last_pixel src_fmt, mask_fmt, dst_fmt, op bilinear_load_&src_fmt v0, v1, v2 bilinear_load_mask mask_fmt, 1, v4 bilinear_load_dst dst_fmt, op, 1, v18, v19, v9 umull v2.8h, v0.8b, v28.8b umlal v2.8h, v1.8b, v29.8b /* 5 cycles bubble */ ushll v0.4s, v2.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v2.4h, v15.h[0] umlal2 v0.4s, v2.8h, v15.h[0] /* 5 cycles bubble */ bilinear_duplicate_mask mask_fmt, 1, v4 shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) /* 3 cycles bubble */ xtn v0.8b, v0.8h /* 1 cycle bubble */ bilinear_interleave_src_dst \ mask_fmt, op, 1, v0, v1, v0, v18, v19, v9 bilinear_apply_mask_to_src \ mask_fmt, 1, v0, v1, v0, v4, \ v3, v8, v10, v11 bilinear_combine \ op, 1, v0, v1, v0, v18, v19, v9, \ v3, v8, v10, v11, v5 bilinear_deinterleave_dst mask_fmt, op, 1, v0, v1, v0 bilinear_store_&dst_fmt 1, v17, v18 .endm .macro bilinear_interpolate_two_pixels src_fmt, mask_fmt, dst_fmt, op bilinear_load_and_vertical_interpolate_two_&src_fmt \ v1, v11, v18, v19, v20, v21, v22, v23 bilinear_load_mask mask_fmt, 2, v4 bilinear_load_dst dst_fmt, op, 2, v18, v19, v9 ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v1.4h, v15.h[0] umlal2 v0.4s, v1.8h, v15.h[0] ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v10.4s, v11.4h, v15.h[4] umlal2 v10.4s, v11.8h, v15.h[4] shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS) bilinear_duplicate_mask mask_fmt, 2, v4 ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h xtn v0.8b, v0.8h bilinear_interleave_src_dst \ mask_fmt, op, 2, v0, v1, v0, v18, v19, v9 bilinear_apply_mask_to_src \ mask_fmt, 2, v0, v1, v0, v4, \ v3, v8, v10, v11 bilinear_combine \ op, 2, v0, v1, v0, v18, v19, v9, \ v3, v8, v10, v11, v5 bilinear_deinterleave_dst mask_fmt, op, 2, v0, v1, v0 bilinear_store_&dst_fmt 2, v16, v17 .endm .macro bilinear_interpolate_four_pixels src_fmt, mask_fmt, dst_fmt, op bilinear_load_and_vertical_interpolate_four_&src_fmt \ v1, v11, v4, v5, v6, v7, v22, v23 \ v3, v9, v16, v17, v20, v21, v18, v19 prfm PREFETCH_MODE, [TMP1, PF_OFFS] sub TMP1, TMP1, STRIDE prfm PREFETCH_MODE, [TMP1, PF_OFFS] ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v1.4h, v15.h[0] umlal2 v0.4s, v1.8h, v15.h[0] ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v10.4s, v11.4h, v15.h[4] umlal2 v10.4s, v11.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ushll v2.4s, v3.4h, #BILINEAR_INTERPOLATION_BITS umlsl v2.4s, v3.4h, v15.h[0] umlal2 v2.4s, v3.8h, v15.h[0] ushll v8.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS umlsl v8.4s, v9.4h, v15.h[4] umlal2 v8.4s, v9.8h, v15.h[4] add v12.8h, v12.8h, v13.8h shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v2.8h, v8.4s, #(2 * BILINEAR_INTERPOLATION_BITS) bilinear_load_mask mask_fmt, 4, v4 bilinear_duplicate_mask mask_fmt, 4, v4 ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) xtn v0.8b, v0.8h xtn v1.8b, v2.8h add v12.8h, v12.8h, v13.8h bilinear_load_dst dst_fmt, op, 4, v2, v3, v21 bilinear_interleave_src_dst \ mask_fmt, op, 4, v0, v1, v0, v2, v3, v11 bilinear_apply_mask_to_src \ mask_fmt, 4, v0, v1, v0, v4, \ v6, v8, v9, v10 bilinear_combine \ op, 4, v0, v1, v0, v2, v3, v1, \ v6, v8, v9, v10, v23 bilinear_deinterleave_dst mask_fmt, op, 4, v0, v1, v0 bilinear_store_&dst_fmt 4, v6, v7 .endm .set BILINEAR_FLAG_USE_MASK, 1 .set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2 /* * Main template macro for generating NEON optimized bilinear scanline functions. * * Bilinear scanline generator macro take folling arguments: * fname - name of the function to generate * src_fmt - source color format (8888 or 0565) * dst_fmt - destination color format (8888 or 0565) * src/dst_bpp_shift - (1 << bpp_shift) is the size of src/dst pixel in bytes * process_last_pixel - code block that interpolate one pixel and does not * update horizontal weight * process_two_pixels - code block that interpolate two pixels and update * horizontal weight * process_four_pixels - code block that interpolate four pixels and update * horizontal weight * process_pixblock_head - head part of middle loop * process_pixblock_tail - tail part of middle loop * process_pixblock_tail_head - tail_head of middle loop * pixblock_size - number of pixels processed in a single middle loop * prefetch_distance - prefetch in the source image by that many pixels ahead */ .macro generate_bilinear_scanline_func \ fname, \ src_fmt, dst_fmt, src_bpp_shift, dst_bpp_shift, \ bilinear_process_last_pixel, \ bilinear_process_two_pixels, \ bilinear_process_four_pixels, \ bilinear_process_pixblock_head, \ bilinear_process_pixblock_tail, \ bilinear_process_pixblock_tail_head, \ pixblock_size, \ prefetch_distance, \ flags pixman_asm_function fname .if pixblock_size == 8 .elseif pixblock_size == 4 .else .error unsupported pixblock size .endif .if ((flags) & BILINEAR_FLAG_USE_MASK) == 0 OUT .req x0 TOP .req x1 BOTTOM .req x2 WT .req x3 WWT .req w3 WB .req x4 WWB .req w4 X .req w5 UX .req w6 WIDTH .req x7 TMP1 .req x10 WTMP1 .req w10 TMP2 .req x11 WTMP2 .req w11 PF_OFFS .req x12 TMP3 .req x13 WTMP3 .req w13 TMP4 .req x14 WTMP4 .req w14 STRIDE .req x15 DUMMY .req x30 stp x29, x30, [sp, -16]! mov x29, sp sub sp, sp, 112 sub x29, x29, 64 st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 stp x10, x11, [x29, -80] stp x12, x13, [x29, -96] stp x14, x15, [x29, -112] .else OUT .req x0 MASK .req x1 TOP .req x2 BOTTOM .req x3 WT .req x4 WWT .req w4 WB .req x5 WWB .req w5 X .req w6 UX .req w7 WIDTH .req x8 TMP1 .req x10 WTMP1 .req w10 TMP2 .req x11 WTMP2 .req w11 PF_OFFS .req x12 TMP3 .req x13 WTMP3 .req w13 TMP4 .req x14 WTMP4 .req w14 STRIDE .req x15 DUMMY .req x30 .set prefetch_offset, prefetch_distance stp x29, x30, [sp, -16]! mov x29, sp sub x29, x29, 64 st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 stp x10, x11, [x29, -80] stp x12, x13, [x29, -96] stp x14, x15, [x29, -112] str x8, [x29, -120] ldr w8, [x29, 16] sub sp, sp, 120 .endif mov WTMP1, #prefetch_distance umull PF_OFFS, WTMP1, UX sub STRIDE, BOTTOM, TOP .unreq BOTTOM cmp WIDTH, #0 ble 300f dup v12.8h, X dup v13.8h, UX dup v28.8b, WWT dup v29.8b, WWB mov v25.d[0], v12.d[1] mov v26.d[0], v13.d[0] add v25.4h, v25.4h, v26.4h mov v12.d[1], v25.d[0] /* ensure good destination alignment */ cmp WIDTH, #1 blt 100f tst OUT, #(1 << dst_bpp_shift) beq 100f ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h bilinear_process_last_pixel sub WIDTH, WIDTH, #1 100: add v13.8h, v13.8h, v13.8h ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h cmp WIDTH, #2 blt 100f tst OUT, #(1 << (dst_bpp_shift + 1)) beq 100f bilinear_process_two_pixels sub WIDTH, WIDTH, #2 100: .if pixblock_size == 8 cmp WIDTH, #4 blt 100f tst OUT, #(1 << (dst_bpp_shift + 2)) beq 100f bilinear_process_four_pixels sub WIDTH, WIDTH, #4 100: .endif subs WIDTH, WIDTH, #pixblock_size blt 100f asr PF_OFFS, PF_OFFS, #(16 - src_bpp_shift) bilinear_process_pixblock_head subs WIDTH, WIDTH, #pixblock_size blt 500f 0: bilinear_process_pixblock_tail_head subs WIDTH, WIDTH, #pixblock_size bge 0b 500: bilinear_process_pixblock_tail 100: .if pixblock_size == 8 tst WIDTH, #4 beq 200f bilinear_process_four_pixels 200: .endif /* handle the remaining trailing pixels */ tst WIDTH, #2 beq 200f bilinear_process_two_pixels 200: tst WIDTH, #1 beq 300f bilinear_process_last_pixel 300: .if ((flags) & BILINEAR_FLAG_USE_MASK) == 0 sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 ldp x10, x11, [x29, -80] ldp x12, x13, [x29, -96] ldp x14, x15, [x29, -112] mov sp, x29 ldp x29, x30, [sp], 16 .else sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 ldp x10, x11, [x29, -80] ldp x12, x13, [x29, -96] ldp x14, x15, [x29, -112] ldr x8, [x29, -120] mov sp, x29 ldp x29, x30, [sp], 16 .endif ret .unreq OUT .unreq TOP .unreq WT .unreq WWT .unreq WB .unreq WWB .unreq X .unreq UX .unreq WIDTH .unreq TMP1 .unreq WTMP1 .unreq TMP2 .unreq PF_OFFS .unreq TMP3 .unreq TMP4 .unreq STRIDE .if ((flags) & BILINEAR_FLAG_USE_MASK) != 0 .unreq MASK .endif .endfunc .endm /* src_8888_8_8888 */ .macro bilinear_src_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_pixblock_head bilinear_src_8888_8_8888_process_four_pixels .endm .macro bilinear_src_8888_8_8888_process_pixblock_tail .endm .macro bilinear_src_8888_8_8888_process_pixblock_tail_head bilinear_src_8888_8_8888_process_pixblock_tail bilinear_src_8888_8_8888_process_pixblock_head .endm /* src_8888_8_0565 */ .macro bilinear_src_8888_8_0565_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_pixblock_head bilinear_src_8888_8_0565_process_four_pixels .endm .macro bilinear_src_8888_8_0565_process_pixblock_tail .endm .macro bilinear_src_8888_8_0565_process_pixblock_tail_head bilinear_src_8888_8_0565_process_pixblock_tail bilinear_src_8888_8_0565_process_pixblock_head .endm /* src_0565_8_x888 */ .macro bilinear_src_0565_8_x888_process_last_pixel bilinear_interpolate_last_pixel 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_two_pixels bilinear_interpolate_two_pixels 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_four_pixels bilinear_interpolate_four_pixels 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_pixblock_head bilinear_src_0565_8_x888_process_four_pixels .endm .macro bilinear_src_0565_8_x888_process_pixblock_tail .endm .macro bilinear_src_0565_8_x888_process_pixblock_tail_head bilinear_src_0565_8_x888_process_pixblock_tail bilinear_src_0565_8_x888_process_pixblock_head .endm /* src_0565_8_0565 */ .macro bilinear_src_0565_8_0565_process_last_pixel bilinear_interpolate_last_pixel 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_two_pixels bilinear_interpolate_two_pixels 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_four_pixels bilinear_interpolate_four_pixels 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_pixblock_head bilinear_src_0565_8_0565_process_four_pixels .endm .macro bilinear_src_0565_8_0565_process_pixblock_tail .endm .macro bilinear_src_0565_8_0565_process_pixblock_tail_head bilinear_src_0565_8_0565_process_pixblock_tail bilinear_src_0565_8_0565_process_pixblock_head .endm /* over_8888_8888 */ .macro bilinear_over_8888_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_pixblock_head asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 asr WTMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #2 ld1 {v22.2s}, [TMP1], STRIDE ld1 {v23.2s}, [TMP1] asr WTMP3, X, #16 add X, X, UX add TMP3, TOP, TMP3, lsl #2 umull v8.8h, v22.8b, v28.8b umlal v8.8h, v23.8b, v29.8b ld1 {v22.2s}, [TMP2], STRIDE ld1 {v23.2s}, [TMP2] asr WTMP4, X, #16 add X, X, UX add TMP4, TOP, TMP4, lsl #2 umull v9.8h, v22.8b, v28.8b umlal v9.8h, v23.8b, v29.8b ld1 {v22.2s}, [TMP3], STRIDE ld1 {v23.2s}, [TMP3] umull v10.8h, v22.8b, v28.8b umlal v10.8h, v23.8b, v29.8b ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v8.4h, v15.h[0] umlal2 v0.4s, v8.8h, v15.h[0] prfm PREFETCH_MODE, [TMP4, PF_OFFS] ld1 {v16.2s}, [TMP4], STRIDE ld1 {v17.2s}, [TMP4] prfm PREFETCH_MODE, [TMP4, PF_OFFS] umull v11.8h, v16.8b, v28.8b umlal v11.8h, v17.8b, v29.8b ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS umlsl v1.4s, v9.4h, v15.h[4] umlal2 v1.4s, v9.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h .endm .macro bilinear_over_8888_8888_process_pixblock_tail ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS umlsl v2.4s, v10.4h, v15.h[0] umlal2 v2.4s, v10.8h, v15.h[0] ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v3.4s, v11.4h, v15.h[4] umlal2 v3.4s, v11.8h, v15.h[4] shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) shrn2 v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS) xtn v6.8b, v0.8h xtn v7.8b, v2.8h ld1 {v2.2s, v3.2s}, [OUT] prfm PREFETCH_MODE, [OUT, #(prefetch_offset * 4)] vuzp v6.8b, v7.8b vuzp v2.8b, v3.8b vuzp v6.8b, v7.8b vuzp v2.8b, v3.8b dup v4.2s, v7.s[1] mvn v4.8b, v4.8b umull v11.8h, v2.8b, v4.8b umull v2.8h, v3.8b, v4.8b urshr v1.8h, v11.8h, #8 urshr v10.8h, v2.8h, #8 raddhn v3.8b, v10.8h, v2.8h raddhn v2.8b, v1.8h, v11.8h uqadd v6.8b, v2.8b, v6.8b uqadd v7.8b, v3.8b, v7.8b vuzp v6.8b, v7.8b vuzp v6.8b, v7.8b add v12.8h, v12.8h, v13.8h st1 {v6.2s, v7.2s}, [OUT], #16 .endm .macro bilinear_over_8888_8888_process_pixblock_tail_head ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 umlsl v2.4s, v10.4h, v15.h[0] asr WTMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #2 umlal2 v2.4s, v10.8h, v15.h[0] ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS ld1 {v20.2s}, [TMP1], STRIDE umlsl v3.4s, v11.4h, v15.h[4] umlal2 v3.4s, v11.8h, v15.h[4] ld1 {v21.2s}, [TMP1] umull v8.8h, v20.8b, v28.8b umlal v8.8h, v21.8b, v29.8b shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ld1 {v22.2s}, [TMP2], STRIDE shrn2 v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS) xtn v6.8b, v0.8h ld1 {v23.2s}, [TMP2] umull v9.8h, v22.8b, v28.8b asr WTMP3, X, #16 add X, X, UX add TMP3, TOP, TMP3, lsl #2 asr WTMP4, X, #16 add X, X, UX add TMP4, TOP, TMP4, lsl #2 umlal v9.8h, v23.8b, v29.8b xtn v7.8b, v2.8h ld1 {v2.2s, v3.2s}, [OUT] prfm PREFETCH_MODE, [OUT, PF_OFFS] ld1 {v22.2s}, [TMP3], STRIDE vuzp v6.8b, v7.8b vuzp v2.8b, v3.8b vuzp v6.8b, v7.8b vuzp v2.8b, v3.8b dup v4.2s, v7.s[1] ld1 {v23.2s}, [TMP3] mvn v4.8b, v4.8b umull v10.8h, v22.8b, v28.8b umlal v10.8h, v23.8b, v29.8b umull v11.8h, v2.8b, v4.8b umull v2.8h, v3.8b, v4.8b ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v8.4h, v15.h[0] urshr v1.8h, v11.8h, #8 umlal2 v0.4s, v8.8h, v15.h[0] urshr v8.8h, v2.8h, #8 raddhn v3.8b, v8.8h, v2.8h raddhn v2.8b, v1.8h, v11.8h prfm PREFETCH_MODE, [TMP4, PF_OFFS] ld1 {v16.2s}, [TMP4], STRIDE uqadd v6.8b, v2.8b, v6.8b uqadd v7.8b, v3.8b, v7.8b ld1 {v17.2s}, [TMP4] prfm PREFETCH_MODE, [TMP4, PF_OFFS] umull v11.8h, v16.8b, v28.8b umlal v11.8h, v17.8b, v29.8b vuzp v6.8b, v7.8b ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS vuzp v6.8b, v7.8b umlsl v1.4s, v9.4h, v15.h[4] add v12.8h, v12.8h, v13.8h umlal2 v1.4s, v9.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h st1 {v6.2s, v7.2s}, [OUT], #16 .endm /* over_8888_8_8888 */ .macro bilinear_over_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_four_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, over bilinear_interpolate_two_pixels 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_pixblock_head bilinear_over_8888_8_8888_process_four_pixels .endm .macro bilinear_over_8888_8_8888_process_pixblock_tail .endm .macro bilinear_over_8888_8_8888_process_pixblock_tail_head bilinear_over_8888_8_8888_process_pixblock_tail bilinear_over_8888_8_8888_process_pixblock_head .endm /* add_8888_8888 */ .macro bilinear_add_8888_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_four_pixels bilinear_interpolate_two_pixels 8888, x, 8888, add bilinear_interpolate_two_pixels 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_pixblock_head bilinear_add_8888_8888_process_four_pixels .endm .macro bilinear_add_8888_8888_process_pixblock_tail .endm .macro bilinear_add_8888_8888_process_pixblock_tail_head bilinear_add_8888_8888_process_pixblock_tail bilinear_add_8888_8888_process_pixblock_head .endm /* add_8888_8_8888 */ .macro bilinear_add_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_pixblock_head bilinear_add_8888_8_8888_process_four_pixels .endm .macro bilinear_add_8888_8_8888_process_pixblock_tail .endm .macro bilinear_add_8888_8_8888_process_pixblock_tail_head bilinear_add_8888_8_8888_process_pixblock_tail bilinear_add_8888_8_8888_process_pixblock_head .endm /* Bilinear scanline functions */ generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_src_8888_8_8888_process_last_pixel, \ bilinear_src_8888_8_8888_process_two_pixels, \ bilinear_src_8888_8_8888_process_four_pixels, \ bilinear_src_8888_8_8888_process_pixblock_head, \ bilinear_src_8888_8_8888_process_pixblock_tail, \ bilinear_src_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_neon, \ 8888, 0565, 2, 1, \ bilinear_src_8888_8_0565_process_last_pixel, \ bilinear_src_8888_8_0565_process_two_pixels, \ bilinear_src_8888_8_0565_process_four_pixels, \ bilinear_src_8888_8_0565_process_pixblock_head, \ bilinear_src_8888_8_0565_process_pixblock_tail, \ bilinear_src_8888_8_0565_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_neon, \ 0565, 8888, 1, 2, \ bilinear_src_0565_8_x888_process_last_pixel, \ bilinear_src_0565_8_x888_process_two_pixels, \ bilinear_src_0565_8_x888_process_four_pixels, \ bilinear_src_0565_8_x888_process_pixblock_head, \ bilinear_src_0565_8_x888_process_pixblock_tail, \ bilinear_src_0565_8_x888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_neon, \ 0565, 0565, 1, 1, \ bilinear_src_0565_8_0565_process_last_pixel, \ bilinear_src_0565_8_0565_process_two_pixels, \ bilinear_src_0565_8_0565_process_four_pixels, \ bilinear_src_0565_8_0565_process_pixblock_head, \ bilinear_src_0565_8_0565_process_pixblock_tail, \ bilinear_src_0565_8_0565_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_over_8888_8888_process_last_pixel, \ bilinear_over_8888_8888_process_two_pixels, \ bilinear_over_8888_8888_process_four_pixels, \ bilinear_over_8888_8888_process_pixblock_head, \ bilinear_over_8888_8888_process_pixblock_tail, \ bilinear_over_8888_8888_process_pixblock_tail_head, \ 4, 28, 0 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_over_8888_8_8888_process_last_pixel, \ bilinear_over_8888_8_8888_process_two_pixels, \ bilinear_over_8888_8_8888_process_four_pixels, \ bilinear_over_8888_8_8888_process_pixblock_head, \ bilinear_over_8888_8_8888_process_pixblock_tail, \ bilinear_over_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_add_8888_8888_process_last_pixel, \ bilinear_add_8888_8888_process_two_pixels, \ bilinear_add_8888_8888_process_four_pixels, \ bilinear_add_8888_8888_process_pixblock_head, \ bilinear_add_8888_8888_process_pixblock_tail, \ bilinear_add_8888_8888_process_pixblock_tail_head, \ 4, 28, 0 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_add_8888_8_8888_process_last_pixel, \ bilinear_add_8888_8_8888_process_two_pixels, \ bilinear_add_8888_8_8888_process_four_pixels, \ bilinear_add_8888_8_8888_process_pixblock_head, \ bilinear_add_8888_8_8888_process_pixblock_tail, \ bilinear_add_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK
ElSargo/wezpy
120,733
wezterm-src/deps/cairo/pixman/pixman/pixman-mips-dspr2-asm.S
/* * Copyright (c) 2012 * MIPS Technologies, Inc., California. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Nemanja Lukic (nemanja.lukic@rt-rk.com) */ #include "pixman-private.h" #include "pixman-mips-dspr2-asm.h" LEAF_MIPS_DSPR2(pixman_fill_buff16_mips) /* * a0 - *dest * a1 - count (bytes) * a2 - value to fill buffer with */ beqz a1, 3f andi t1, a0, 0x0002 beqz t1, 0f /* check if address is 4-byte aligned */ nop sh a2, 0(a0) addiu a0, a0, 2 addiu a1, a1, -2 0: srl t1, a1, 5 /* t1 how many multiples of 32 bytes */ replv.ph a2, a2 /* replicate fill value (16bit) in a2 */ beqz t1, 2f nop 1: addiu t1, t1, -1 beqz t1, 11f addiu a1, a1, -32 pref 30, 32(a0) sw a2, 0(a0) sw a2, 4(a0) sw a2, 8(a0) sw a2, 12(a0) sw a2, 16(a0) sw a2, 20(a0) sw a2, 24(a0) sw a2, 28(a0) b 1b addiu a0, a0, 32 11: sw a2, 0(a0) sw a2, 4(a0) sw a2, 8(a0) sw a2, 12(a0) sw a2, 16(a0) sw a2, 20(a0) sw a2, 24(a0) sw a2, 28(a0) addiu a0, a0, 32 2: blez a1, 3f addiu a1, a1, -2 sh a2, 0(a0) b 2b addiu a0, a0, 2 3: jr ra nop END(pixman_fill_buff16_mips) LEAF_MIPS32R2(pixman_fill_buff32_mips) /* * a0 - *dest * a1 - count (bytes) * a2 - value to fill buffer with */ beqz a1, 3f nop srl t1, a1, 5 /* t1 how many multiples of 32 bytes */ beqz t1, 2f nop 1: addiu t1, t1, -1 beqz t1, 11f addiu a1, a1, -32 pref 30, 32(a0) sw a2, 0(a0) sw a2, 4(a0) sw a2, 8(a0) sw a2, 12(a0) sw a2, 16(a0) sw a2, 20(a0) sw a2, 24(a0) sw a2, 28(a0) b 1b addiu a0, a0, 32 11: sw a2, 0(a0) sw a2, 4(a0) sw a2, 8(a0) sw a2, 12(a0) sw a2, 16(a0) sw a2, 20(a0) sw a2, 24(a0) sw a2, 28(a0) addiu a0, a0, 32 2: blez a1, 3f addiu a1, a1, -4 sw a2, 0(a0) b 2b addiu a0, a0, 4 3: jr ra nop END(pixman_fill_buff32_mips) LEAF_MIPS_DSPR2(pixman_composite_src_8888_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - w */ beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop li t4, 0xf800f800 li t5, 0x07e007e0 li t6, 0x001f001f 1: lw t0, 0(a1) lw t1, 4(a1) addiu a1, a1, 8 addiu a2, a2, -2 CONVERT_2x8888_TO_2x0565 t0, t1, t2, t3, t4, t5, t6, t7, t8 sh t2, 0(a0) sh t3, 2(a0) addiu t2, a2, -1 bgtz t2, 1b addiu a0, a0, 4 2: beqz a2, 3f nop lw t0, 0(a1) CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3 sh t1, 0(a0) 3: j ra nop END(pixman_composite_src_8888_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_0565_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (r5g6b5) * a2 - w */ beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop li t4, 0x07e007e0 li t5, 0x001F001F 1: lhu t0, 0(a1) lhu t1, 2(a1) addiu a1, a1, 4 addiu a2, a2, -2 CONVERT_2x0565_TO_2x8888 t0, t1, t2, t3, t4, t5, t6, t7, t8, t9 sw t2, 0(a0) sw t3, 4(a0) addiu t2, a2, -1 bgtz t2, 1b addiu a0, a0, 8 2: beqz a2, 3f nop lhu t0, 0(a1) CONVERT_1x0565_TO_1x8888 t0, t1, t2, t3 sw t1, 0(a0) 3: j ra nop END(pixman_composite_src_0565_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_x888_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (x8r8g8b8) * a2 - w */ beqz a2, 4f nop li t9, 0xff000000 srl t8, a2, 3 /* t1 = how many multiples of 8 src pixels */ beqz t8, 3f /* branch if less than 8 src pixels */ nop 1: addiu t8, t8, -1 beqz t8, 2f addiu a2, a2, -8 pref 0, 32(a1) lw t0, 0(a1) lw t1, 4(a1) lw t2, 8(a1) lw t3, 12(a1) lw t4, 16(a1) lw t5, 20(a1) lw t6, 24(a1) lw t7, 28(a1) addiu a1, a1, 32 or t0, t0, t9 or t1, t1, t9 or t2, t2, t9 or t3, t3, t9 or t4, t4, t9 or t5, t5, t9 or t6, t6, t9 or t7, t7, t9 pref 30, 32(a0) sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) b 1b addiu a0, a0, 32 2: lw t0, 0(a1) lw t1, 4(a1) lw t2, 8(a1) lw t3, 12(a1) lw t4, 16(a1) lw t5, 20(a1) lw t6, 24(a1) lw t7, 28(a1) addiu a1, a1, 32 or t0, t0, t9 or t1, t1, t9 or t2, t2, t9 or t3, t3, t9 or t4, t4, t9 or t5, t5, t9 or t6, t6, t9 or t7, t7, t9 sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) beqz a2, 4f addiu a0, a0, 32 3: lw t0, 0(a1) addiu a1, a1, 4 addiu a2, a2, -1 or t1, t0, t9 sw t1, 0(a0) bnez a2, 3b addiu a0, a0, 4 4: jr ra nop END(pixman_composite_src_x888_8888_asm_mips) #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) LEAF_MIPS_DSPR2(pixman_composite_src_0888_8888_rev_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (b8g8r8) * a2 - w */ beqz a2, 6f nop lui t8, 0xff00; srl t9, a2, 2 /* t9 = how many multiples of 4 src pixels */ beqz t9, 4f /* branch if less than 4 src pixels */ nop li t0, 0x1 li t1, 0x2 li t2, 0x3 andi t3, a1, 0x3 beq t3, t0, 1f nop beq t3, t1, 2f nop beq t3, t2, 3f nop 0: beqz t9, 4f addiu t9, t9, -1 lw t0, 0(a1) /* t0 = R2 | B1 | G1 | R1 */ lw t1, 4(a1) /* t1 = G3 | R3 | B2 | G2 */ lw t2, 8(a1) /* t2 = B4 | G4 | R4 | B3 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = B1 | R2 | R1 | G1 */ wsbh t1, t1 /* t1 = R3 | G3 | G2 | B2 */ wsbh t2, t2 /* t2 = G4 | B4 | B3 | R4 */ packrl.ph t3, t1, t0 /* t3 = G2 | B2 | B1 | R2 */ packrl.ph t4, t0, t0 /* t4 = R1 | G1 | B1 | R2 */ rotr t3, t3, 16 /* t3 = B1 | R2 | G2 | B2 */ or t3, t3, t8 /* t3 = FF | R2 | G2 | B2 */ srl t4, t4, 8 /* t4 = 0 | R1 | G1 | B1 */ or t4, t4, t8 /* t4 = FF | R1 | G1 | B1 */ packrl.ph t5, t2, t1 /* t5 = B3 | R4 | R3 | G3 */ rotr t5, t5, 24 /* t5 = R4 | R3 | G3 | B3 */ or t5, t5, t8 /* t5 = FF | R3 | G3 | B3 */ rotr t2, t2, 16 /* t2 = B3 | R4 | G4 | B4 */ or t2, t2, t8 /* t5 = FF | R3 | G3 | B3 */ sw t4, 0(a0) sw t3, 4(a0) sw t5, 8(a0) sw t2, 12(a0) b 0b addiu a0, a0, 16 1: lbu t6, 0(a1) /* t6 = 0 | 0 | 0 | R1 */ lhu t7, 1(a1) /* t7 = 0 | 0 | B1 | G1 */ sll t6, t6, 16 /* t6 = 0 | R1 | 0 | 0 */ wsbh t7, t7 /* t7 = 0 | 0 | G1 | B1 */ or t7, t6, t7 /* t7 = 0 | R1 | G1 | B1 */ 11: beqz t9, 4f addiu t9, t9, -1 lw t0, 3(a1) /* t0 = R3 | B2 | G2 | R2 */ lw t1, 7(a1) /* t1 = G4 | R4 | B3 | G3 */ lw t2, 11(a1) /* t2 = B5 | G5 | R5 | B4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = B2 | R3 | R2 | G2 */ wsbh t1, t1 /* t1 = R4 | G4 | G3 | B3 */ wsbh t2, t2 /* t2 = G5 | B5 | B4 | R5 */ packrl.ph t3, t1, t0 /* t3 = G3 | B3 | B2 | R3 */ packrl.ph t4, t2, t1 /* t4 = B4 | R5 | R4 | G4 */ rotr t0, t0, 24 /* t0 = R3 | R2 | G2 | B2 */ rotr t3, t3, 16 /* t3 = B2 | R3 | G3 | B3 */ rotr t4, t4, 24 /* t4 = R5 | R4 | G4 | B4 */ or t7, t7, t8 /* t7 = FF | R1 | G1 | B1 */ or t0, t0, t8 /* t0 = FF | R2 | G2 | B2 */ or t3, t3, t8 /* t1 = FF | R3 | G3 | B3 */ or t4, t4, t8 /* t3 = FF | R4 | G4 | B4 */ sw t7, 0(a0) sw t0, 4(a0) sw t3, 8(a0) sw t4, 12(a0) rotr t7, t2, 16 /* t7 = xx | R5 | G5 | B5 */ b 11b addiu a0, a0, 16 2: lhu t7, 0(a1) /* t7 = 0 | 0 | G1 | R1 */ wsbh t7, t7 /* t7 = 0 | 0 | R1 | G1 */ 21: beqz t9, 4f addiu t9, t9, -1 lw t0, 2(a1) /* t0 = B2 | G2 | R2 | B1 */ lw t1, 6(a1) /* t1 = R4 | B3 | G3 | R3 */ lw t2, 10(a1) /* t2 = G5 | R5 | B4 | G4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = G2 | B2 | B1 | R2 */ wsbh t1, t1 /* t1 = B3 | R4 | R3 | G3 */ wsbh t2, t2 /* t2 = R5 | G5 | G4 | B4 */ precr_sra.ph.w t7, t0, 0 /* t7 = R1 | G1 | B1 | R2 */ rotr t0, t0, 16 /* t0 = B1 | R2 | G2 | B2 */ packrl.ph t3, t2, t1 /* t3 = G4 | B4 | B3 | R4 */ rotr t1, t1, 24 /* t1 = R4 | R3 | G3 | B3 */ srl t7, t7, 8 /* t7 = 0 | R1 | G1 | B1 */ rotr t3, t3, 16 /* t3 = B3 | R4 | G4 | B4 */ or t7, t7, t8 /* t7 = FF | R1 | G1 | B1 */ or t0, t0, t8 /* t0 = FF | R2 | G2 | B2 */ or t1, t1, t8 /* t1 = FF | R3 | G3 | B3 */ or t3, t3, t8 /* t3 = FF | R4 | G4 | B4 */ sw t7, 0(a0) sw t0, 4(a0) sw t1, 8(a0) sw t3, 12(a0) srl t7, t2, 16 /* t7 = 0 | 0 | R5 | G5 */ b 21b addiu a0, a0, 16 3: lbu t7, 0(a1) /* t7 = 0 | 0 | 0 | R1 */ 31: beqz t9, 4f addiu t9, t9, -1 lw t0, 1(a1) /* t0 = G2 | R2 | B1 | G1 */ lw t1, 5(a1) /* t1 = B3 | G3 | R3 | B2 */ lw t2, 9(a1) /* t2 = R5 | B4 | G4 | R4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = R2 | G2 | G1 | B1 */ wsbh t1, t1 /* t1 = G3 | B3 | B2 | R3 */ wsbh t2, t2 /* t2 = B4 | R5 | R4 | G4 */ precr_sra.ph.w t7, t0, 0 /* t7 = xx | R1 | G1 | B1 */ packrl.ph t3, t1, t0 /* t3 = B2 | R3 | R2 | G2 */ rotr t1, t1, 16 /* t1 = B2 | R3 | G3 | B3 */ rotr t4, t2, 24 /* t4 = R5 | R4 | G4 | B4 */ rotr t3, t3, 24 /* t3 = R3 | R2 | G2 | B2 */ or t7, t7, t8 /* t7 = FF | R1 | G1 | B1 */ or t3, t3, t8 /* t3 = FF | R2 | G2 | B2 */ or t1, t1, t8 /* t1 = FF | R3 | G3 | B3 */ or t4, t4, t8 /* t4 = FF | R4 | G4 | B4 */ sw t7, 0(a0) sw t3, 4(a0) sw t1, 8(a0) sw t4, 12(a0) srl t7, t2, 16 /* t7 = 0 | 0 | xx | R5 */ b 31b addiu a0, a0, 16 4: beqz a2, 6f nop 5: lbu t0, 0(a1) /* t0 = 0 | 0 | 0 | R */ lbu t1, 1(a1) /* t1 = 0 | 0 | 0 | G */ lbu t2, 2(a1) /* t2 = 0 | 0 | 0 | B */ addiu a1, a1, 3 sll t0, t0, 16 /* t2 = 0 | R | 0 | 0 */ sll t1, t1, 8 /* t1 = 0 | 0 | G | 0 */ or t2, t2, t1 /* t2 = 0 | 0 | G | B */ or t2, t2, t0 /* t2 = 0 | R | G | B */ or t2, t2, t8 /* t2 = FF | R | G | B */ sw t2, 0(a0) addiu a2, a2, -1 bnez a2, 5b addiu a0, a0, 4 6: j ra nop END(pixman_composite_src_0888_8888_rev_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_0888_0565_rev_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (b8g8r8) * a2 - w */ SAVE_REGS_ON_STACK 0, v0, v1 beqz a2, 6f nop li t6, 0xf800f800 li t7, 0x07e007e0 li t8, 0x001F001F srl t9, a2, 2 /* t9 = how many multiples of 4 src pixels */ beqz t9, 4f /* branch if less than 4 src pixels */ nop li t0, 0x1 li t1, 0x2 li t2, 0x3 andi t3, a1, 0x3 beq t3, t0, 1f nop beq t3, t1, 2f nop beq t3, t2, 3f nop 0: beqz t9, 4f addiu t9, t9, -1 lw t0, 0(a1) /* t0 = R2 | B1 | G1 | R1 */ lw t1, 4(a1) /* t1 = G3 | R3 | B2 | G2 */ lw t2, 8(a1) /* t2 = B4 | G4 | R4 | B3 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = B1 | R2 | R1 | G1 */ wsbh t1, t1 /* t1 = R3 | G3 | G2 | B2 */ wsbh t2, t2 /* t2 = G4 | B4 | B3 | R4 */ packrl.ph t3, t1, t0 /* t3 = G2 | B2 | B1 | R2 */ packrl.ph t4, t0, t0 /* t4 = R1 | G1 | B1 | R2 */ rotr t3, t3, 16 /* t3 = B1 | R2 | G2 | B2 */ srl t4, t4, 8 /* t4 = 0 | R1 | G1 | B1 */ packrl.ph t5, t2, t1 /* t5 = B3 | R4 | R3 | G3 */ rotr t5, t5, 24 /* t5 = R4 | R3 | G3 | B3 */ rotr t2, t2, 16 /* t2 = B3 | R4 | G4 | B4 */ CONVERT_2x8888_TO_2x0565 t4, t3, t4, t3, t6, t7, t8, v0, v1 CONVERT_2x8888_TO_2x0565 t5, t2, t5, t2, t6, t7, t8, v0, v1 sh t4, 0(a0) sh t3, 2(a0) sh t5, 4(a0) sh t2, 6(a0) b 0b addiu a0, a0, 8 1: lbu t4, 0(a1) /* t4 = 0 | 0 | 0 | R1 */ lhu t5, 1(a1) /* t5 = 0 | 0 | B1 | G1 */ sll t4, t4, 16 /* t4 = 0 | R1 | 0 | 0 */ wsbh t5, t5 /* t5 = 0 | 0 | G1 | B1 */ or t5, t4, t5 /* t5 = 0 | R1 | G1 | B1 */ 11: beqz t9, 4f addiu t9, t9, -1 lw t0, 3(a1) /* t0 = R3 | B2 | G2 | R2 */ lw t1, 7(a1) /* t1 = G4 | R4 | B3 | G3 */ lw t2, 11(a1) /* t2 = B5 | G5 | R5 | B4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = B2 | R3 | R2 | G2 */ wsbh t1, t1 /* t1 = R4 | G4 | G3 | B3 */ wsbh t2, t2 /* t2 = G5 | B5 | B4 | R5 */ packrl.ph t3, t1, t0 /* t3 = G3 | B3 | B2 | R3 */ packrl.ph t4, t2, t1 /* t4 = B4 | R5 | R4 | G4 */ rotr t0, t0, 24 /* t0 = R3 | R2 | G2 | B2 */ rotr t3, t3, 16 /* t3 = B2 | R3 | G3 | B3 */ rotr t4, t4, 24 /* t4 = R5 | R4 | G4 | B4 */ CONVERT_2x8888_TO_2x0565 t5, t0, t5, t0, t6, t7, t8, v0, v1 CONVERT_2x8888_TO_2x0565 t3, t4, t3, t4, t6, t7, t8, v0, v1 sh t5, 0(a0) sh t0, 2(a0) sh t3, 4(a0) sh t4, 6(a0) rotr t5, t2, 16 /* t5 = xx | R5 | G5 | B5 */ b 11b addiu a0, a0, 8 2: lhu t5, 0(a1) /* t5 = 0 | 0 | G1 | R1 */ wsbh t5, t5 /* t5 = 0 | 0 | R1 | G1 */ 21: beqz t9, 4f addiu t9, t9, -1 lw t0, 2(a1) /* t0 = B2 | G2 | R2 | B1 */ lw t1, 6(a1) /* t1 = R4 | B3 | G3 | R3 */ lw t2, 10(a1) /* t2 = G5 | R5 | B4 | G4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = G2 | B2 | B1 | R2 */ wsbh t1, t1 /* t1 = B3 | R4 | R3 | G3 */ wsbh t2, t2 /* t2 = R5 | G5 | G4 | B4 */ precr_sra.ph.w t5, t0, 0 /* t5 = R1 | G1 | B1 | R2 */ rotr t0, t0, 16 /* t0 = B1 | R2 | G2 | B2 */ packrl.ph t3, t2, t1 /* t3 = G4 | B4 | B3 | R4 */ rotr t1, t1, 24 /* t1 = R4 | R3 | G3 | B3 */ srl t5, t5, 8 /* t5 = 0 | R1 | G1 | B1 */ rotr t3, t3, 16 /* t3 = B3 | R4 | G4 | B4 */ CONVERT_2x8888_TO_2x0565 t5, t0, t5, t0, t6, t7, t8, v0, v1 CONVERT_2x8888_TO_2x0565 t1, t3, t1, t3, t6, t7, t8, v0, v1 sh t5, 0(a0) sh t0, 2(a0) sh t1, 4(a0) sh t3, 6(a0) srl t5, t2, 16 /* t5 = 0 | 0 | R5 | G5 */ b 21b addiu a0, a0, 8 3: lbu t5, 0(a1) /* t5 = 0 | 0 | 0 | R1 */ 31: beqz t9, 4f addiu t9, t9, -1 lw t0, 1(a1) /* t0 = G2 | R2 | B1 | G1 */ lw t1, 5(a1) /* t1 = B3 | G3 | R3 | B2 */ lw t2, 9(a1) /* t2 = R5 | B4 | G4 | R4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = R2 | G2 | G1 | B1 */ wsbh t1, t1 /* t1 = G3 | B3 | B2 | R3 */ wsbh t2, t2 /* t2 = B4 | R5 | R4 | G4 */ precr_sra.ph.w t5, t0, 0 /* t5 = xx | R1 | G1 | B1 */ packrl.ph t3, t1, t0 /* t3 = B2 | R3 | R2 | G2 */ rotr t1, t1, 16 /* t1 = B2 | R3 | G3 | B3 */ rotr t4, t2, 24 /* t4 = R5 | R4 | G4 | B4 */ rotr t3, t3, 24 /* t3 = R3 | R2 | G2 | B2 */ CONVERT_2x8888_TO_2x0565 t5, t3, t5, t3, t6, t7, t8, v0, v1 CONVERT_2x8888_TO_2x0565 t1, t4, t1, t4, t6, t7, t8, v0, v1 sh t5, 0(a0) sh t3, 2(a0) sh t1, 4(a0) sh t4, 6(a0) srl t5, t2, 16 /* t5 = 0 | 0 | xx | R5 */ b 31b addiu a0, a0, 8 4: beqz a2, 6f nop 5: lbu t0, 0(a1) /* t0 = 0 | 0 | 0 | R */ lbu t1, 1(a1) /* t1 = 0 | 0 | 0 | G */ lbu t2, 2(a1) /* t2 = 0 | 0 | 0 | B */ addiu a1, a1, 3 sll t0, t0, 16 /* t2 = 0 | R | 0 | 0 */ sll t1, t1, 8 /* t1 = 0 | 0 | G | 0 */ or t2, t2, t1 /* t2 = 0 | 0 | G | B */ or t2, t2, t0 /* t2 = 0 | R | G | B */ CONVERT_1x8888_TO_1x0565 t2, t3, t4, t5 sh t3, 0(a0) addiu a2, a2, -1 bnez a2, 5b addiu a0, a0, 2 6: RESTORE_REGS_FROM_STACK 0, v0, v1 j ra nop END(pixman_composite_src_0888_0565_rev_asm_mips) #endif LEAF_MIPS_DSPR2(pixman_composite_src_pixbuf_8888_asm_mips) /* * a0 - dst (a8b8g8r8) * a1 - src (a8r8g8b8) * a2 - w */ SAVE_REGS_ON_STACK 0, v0 li v0, 0x00ff00ff beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: lw t0, 0(a1) lw t1, 4(a1) addiu a1, a1, 8 addiu a2, a2, -2 srl t2, t0, 24 srl t3, t1, 24 MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t2, t3, t0, t1, v0, t4, t5, t6, t7, t8, t9 sll t0, t0, 8 sll t1, t1, 8 andi t2, t2, 0xff andi t3, t3, 0xff or t0, t0, t2 or t1, t1, t3 wsbh t0, t0 wsbh t1, t1 rotr t0, t0, 16 rotr t1, t1, 16 sw t0, 0(a0) sw t1, 4(a0) addiu t2, a2, -1 bgtz t2, 1b addiu a0, a0, 8 2: beqz a2, 3f nop lw t0, 0(a1) srl t1, t0, 24 MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t3, t4, t5 sll t0, t0, 8 andi t1, t1, 0xff or t0, t0, t1 wsbh t0, t0 rotr t0, t0, 16 sw t0, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_composite_src_pixbuf_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_rpixbuf_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - w */ SAVE_REGS_ON_STACK 0, v0 li v0, 0x00ff00ff beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: lw t0, 0(a1) lw t1, 4(a1) addiu a1, a1, 8 addiu a2, a2, -2 srl t2, t0, 24 srl t3, t1, 24 MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t2, t3, t0, t1, v0, t4, t5, t6, t7, t8, t9 sll t0, t0, 8 sll t1, t1, 8 andi t2, t2, 0xff andi t3, t3, 0xff or t0, t0, t2 or t1, t1, t3 rotr t0, t0, 8 rotr t1, t1, 8 sw t0, 0(a0) sw t1, 4(a0) addiu t2, a2, -1 bgtz t2, 1b addiu a0, a0, 8 2: beqz a2, 3f nop lw t0, 0(a1) srl t1, t0, 24 MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t3, t4, t5 sll t0, t0, 8 andi t1, t1, 0xff or t0, t0, t1 rotr t0, t0, 8 sw t0, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_composite_src_rpixbuf_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_n_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, v0 li v0, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: /* a1 = source (32bit constant) */ lbu t0, 0(a2) /* t2 = mask (a8) */ lbu t1, 1(a2) /* t3 = mask (a8) */ addiu a2, a2, 2 MIPS_2xUN8x4_MUL_2xUN8 a1, a1, t0, t1, t2, t3, v0, t4, t5, t6, t7, t8, t9 sw t2, 0(a0) sw t3, 4(a0) addiu a3, a3, -2 addiu t2, a3, -1 bgtz t2, 1b addiu a0, a0, 8 beqz a3, 3f nop 2: lbu t0, 0(a2) addiu a2, a2, 1 MIPS_UN8x4_MUL_UN8 a1, t0, t1, v0, t3, t4, t5 sw t1, 0(a0) addiu a3, a3, -1 addiu a0, a0, 4 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_composite_src_n_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_n_8_8_asm_mips) /* * a0 - dst (a8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ li t9, 0x00ff00ff beqz a3, 3f nop srl t7, a3, 2 /* t7 = how many multiples of 4 dst pixels */ beqz t7, 1f /* branch if less than 4 src pixels */ nop srl t8, a1, 24 replv.ph t8, t8 0: beqz t7, 1f addiu t7, t7, -1 lbu t0, 0(a2) lbu t1, 1(a2) lbu t2, 2(a2) lbu t3, 3(a2) addiu a2, a2, 4 precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr.qb.ph t0, t3, t1 muleu_s.ph.qbl t2, t0, t8 muleu_s.ph.qbr t3, t0, t8 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t2, t2, t3 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a3, a3, -4 b 0b addiu a0, a0, 4 1: beqz a3, 3f nop srl t8, a1, 24 2: lbu t0, 0(a2) addiu a2, a2, 1 mul t2, t0, t8 shra_r.ph t3, t2, 8 andi t3, t3, 0x00ff addq.ph t2, t2, t3 shra_r.ph t2, t2, 8 sb t2, 0(a0) addiu a3, a3, -1 bnez a3, 2b addiu a0, a0, 1 3: j ra nop END(pixman_composite_src_n_8_8_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8888_8888_ca_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - mask (a8r8g8b8) * a3 - w */ beqz a3, 8f nop SAVE_REGS_ON_STACK 8, s0, s1, s2, s3, s4, s5 li t6, 0xff addiu t7, zero, -1 /* t7 = 0xffffffff */ srl t8, a1, 24 /* t8 = srca */ li t9, 0x00ff00ff addiu t1, a3, -1 beqz t1, 4f /* last pixel */ nop 0: lw t0, 0(a2) /* t0 = mask */ lw t1, 4(a2) /* t1 = mask */ addiu a3, a3, -2 /* w = w - 2 */ or t2, t0, t1 beqz t2, 3f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 8 and t2, t0, t1 beq t2, t7, 1f /* if (t0 == 0xffffffff) && (t1 == 0xffffffff) */ nop //if(ma) lw t2, 0(a0) /* t2 = dst */ lw t3, 4(a0) /* t3 = dst */ MIPS_2xUN8x4_MUL_2xUN8x4 a1, a1, t0, t1, t4, t5, t9, s0, s1, s2, s3, s4, s5 MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t8, t8, t0, t1, t9, s0, s1, s2, s3, s4, s5 not t0, t0 not t1, t1 MIPS_2xUN8x4_MUL_2xUN8x4 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5 addu_s.qb t2, t4, t2 addu_s.qb t3, t5, t3 sw t2, 0(a0) sw t3, 4(a0) addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 8 b 4f nop 1: //if (t0 == 0xffffffff) && (t1 == 0xffffffff): beq t8, t6, 2f /* if (srca == 0xff) */ nop lw t2, 0(a0) /* t2 = dst */ lw t3, 4(a0) /* t3 = dst */ not t0, a1 not t1, a1 srl t0, t0, 24 srl t1, t1, 24 MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5 addu_s.qb t2, a1, t2 addu_s.qb t3, a1, t3 sw t2, 0(a0) sw t3, 4(a0) addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 8 b 4f nop 2: sw a1, 0(a0) sw a1, 4(a0) 3: addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 8 4: beqz a3, 7f nop /* a1 = src */ lw t0, 0(a2) /* t0 = mask */ beqz t0, 7f /* if (t0 == 0) */ nop beq t0, t7, 5f /* if (t0 == 0xffffffff) */ nop //if(ma) lw t1, 0(a0) /* t1 = dst */ MIPS_UN8x4_MUL_UN8x4 a1, t0, t2, t9, t3, t4, t5, s0 MIPS_UN8x4_MUL_UN8 t0, t8, t0, t9, t3, t4, t5 not t0, t0 MIPS_UN8x4_MUL_UN8x4 t1, t0, t1, t9, t3, t4, t5, s0 addu_s.qb t1, t2, t1 sw t1, 0(a0) RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5 j ra nop 5: //if (t0 == 0xffffffff) beq t8, t6, 6f /* if (srca == 0xff) */ nop lw t1, 0(a0) /* t1 = dst */ not t0, a1 srl t0, t0, 24 MIPS_UN8x4_MUL_UN8 t1, t0, t1, t9, t2, t3, t4 addu_s.qb t1, a1, t1 sw t1, 0(a0) RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5 j ra nop 6: sw a1, 0(a0) 7: RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5 8: j ra nop END(pixman_composite_over_n_8888_8888_ca_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8888_0565_ca_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (32bit constant) * a2 - mask (a8r8g8b8) * a3 - w */ beqz a3, 8f nop SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8 li t6, 0xff addiu t7, zero, -1 /* t7 = 0xffffffff */ srl t8, a1, 24 /* t8 = srca */ li t9, 0x00ff00ff li s6, 0xf800f800 li s7, 0x07e007e0 li s8, 0x001F001F addiu t1, a3, -1 beqz t1, 4f /* last pixel */ nop 0: lw t0, 0(a2) /* t0 = mask */ lw t1, 4(a2) /* t1 = mask */ addiu a3, a3, -2 /* w = w - 2 */ or t2, t0, t1 beqz t2, 3f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 8 and t2, t0, t1 beq t2, t7, 1f /* if (t0 == 0xffffffff) && (t1 == 0xffffffff) */ nop //if(ma) lhu t2, 0(a0) /* t2 = dst */ lhu t3, 2(a0) /* t3 = dst */ MIPS_2xUN8x4_MUL_2xUN8x4 a1, a1, t0, t1, t4, t5, t9, s0, s1, s2, s3, s4, s5 MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t8, t8, t0, t1, t9, s0, s1, s2, s3, s4, s5 not t0, t0 not t1, t1 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, s7, s8, s0, s1, s2, s3 MIPS_2xUN8x4_MUL_2xUN8x4 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5 addu_s.qb t2, t4, t2 addu_s.qb t3, t5, t3 CONVERT_2x8888_TO_2x0565 t2, t3, t2, t3, s6, s7, s8, s0, s1 sh t2, 0(a0) sh t3, 2(a0) addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 4 b 4f nop 1: //if (t0 == 0xffffffff) && (t1 == 0xffffffff): beq t8, t6, 2f /* if (srca == 0xff) */ nop lhu t2, 0(a0) /* t2 = dst */ lhu t3, 2(a0) /* t3 = dst */ not t0, a1 not t1, a1 srl t0, t0, 24 srl t1, t1, 24 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, s7, s8, s0, s1, s2, s3 MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5 addu_s.qb t2, a1, t2 addu_s.qb t3, a1, t3 CONVERT_2x8888_TO_2x0565 t2, t3, t2, t3, s6, s7, s8, s0, s1 sh t2, 0(a0) sh t3, 2(a0) addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 4 b 4f nop 2: CONVERT_1x8888_TO_1x0565 a1, t2, s0, s1 sh t2, 0(a0) sh t2, 2(a0) 3: addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 4 4: beqz a3, 7f nop /* a1 = src */ lw t0, 0(a2) /* t0 = mask */ beqz t0, 7f /* if (t0 == 0) */ nop beq t0, t7, 5f /* if (t0 == 0xffffffff) */ nop //if(ma) lhu t1, 0(a0) /* t1 = dst */ MIPS_UN8x4_MUL_UN8x4 a1, t0, t2, t9, t3, t4, t5, s0 MIPS_UN8x4_MUL_UN8 t0, t8, t0, t9, t3, t4, t5 not t0, t0 CONVERT_1x0565_TO_1x8888 t1, s1, s2, s3 MIPS_UN8x4_MUL_UN8x4 s1, t0, s1, t9, t3, t4, t5, s0 addu_s.qb s1, t2, s1 CONVERT_1x8888_TO_1x0565 s1, t1, s0, s2 sh t1, 0(a0) RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8 j ra nop 5: //if (t0 == 0xffffffff) beq t8, t6, 6f /* if (srca == 0xff) */ nop lhu t1, 0(a0) /* t1 = dst */ not t0, a1 srl t0, t0, 24 CONVERT_1x0565_TO_1x8888 t1, s1, s2, s3 MIPS_UN8x4_MUL_UN8 s1, t0, s1, t9, t2, t3, t4 addu_s.qb s1, a1, s1 CONVERT_1x8888_TO_1x0565 s1, t1, s0, s2 sh t1, 0(a0) RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8 j ra nop 6: CONVERT_1x8888_TO_1x0565 a1, t1, s0, s2 sh t1, 0(a0) 7: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8 8: j ra nop END(pixman_composite_over_n_8888_0565_ca_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8_8_asm_mips) /* * a0 - dst (a8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, v0 li t9, 0x00ff00ff beqz a3, 3f nop srl v0, a3, 2 /* v0 = how many multiples of 4 dst pixels */ beqz v0, 1f /* branch if less than 4 src pixels */ nop srl t8, a1, 24 replv.ph t8, t8 0: beqz v0, 1f addiu v0, v0, -1 lbu t0, 0(a2) lbu t1, 1(a2) lbu t2, 2(a2) lbu t3, 3(a2) lbu t4, 0(a0) lbu t5, 1(a0) lbu t6, 2(a0) lbu t7, 3(a0) addiu a2, a2, 4 precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr_sra.ph.w t5, t4, 0 precr_sra.ph.w t7, t6, 0 precr.qb.ph t0, t3, t1 precr.qb.ph t1, t7, t5 muleu_s.ph.qbl t2, t0, t8 muleu_s.ph.qbr t3, t0, t8 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t0, t2, t3 not t6, t0 preceu.ph.qbl t7, t6 preceu.ph.qbr t6, t6 muleu_s.ph.qbl t2, t1, t7 muleu_s.ph.qbr t3, t1, t6 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t1, t2, t3 addu_s.qb t2, t0, t1 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a3, a3, -4 b 0b addiu a0, a0, 4 1: beqz a3, 3f nop srl t8, a1, 24 2: lbu t0, 0(a2) lbu t1, 0(a0) addiu a2, a2, 1 mul t2, t0, t8 shra_r.ph t3, t2, 8 andi t3, t3, 0x00ff addq.ph t2, t2, t3 shra_r.ph t2, t2, 8 not t3, t2 andi t3, t3, 0x00ff mul t4, t1, t3 shra_r.ph t5, t4, 8 andi t5, t5, 0x00ff addq.ph t4, t4, t5 shra_r.ph t4, t4, 8 andi t4, t4, 0x00ff addu_s.qb t2, t2, t4 sb t2, 0(a0) addiu a3, a3, -1 bnez a3, 2b addiu a0, a0, 1 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_composite_over_n_8_8_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 4, s0, s1, s2, s3, s4 beqz a3, 4f nop li t4, 0x00ff00ff li t5, 0xff addiu t0, a3, -1 beqz t0, 3f /* last pixel */ srl t6, a1, 24 /* t6 = srca */ not s4, a1 beq t5, t6, 2f /* if (srca == 0xff) */ srl s4, s4, 24 1: /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ lbu t1, 1(a2) /* t1 = mask */ or t2, t0, t1 beqz t2, 111f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 2 and t3, t0, t1 lw t2, 0(a0) /* t2 = dst */ beq t3, t5, 11f /* if (t0 == 0xff) && (t1 == 0xff) */ lw t3, 4(a0) /* t3 = dst */ MIPS_2xUN8x4_MUL_2xUN8 a1, a1, t0, t1, s0, s1, t4, t6, t7, t8, t9, s2, s3 not s2, s0 not s3, s1 srl s2, s2, 24 srl s3, s3, 24 MIPS_2xUN8x4_MUL_2xUN8 t2, t3, s2, s3, t2, t3, t4, t0, t1, t6, t7, t8, t9 addu_s.qb s2, t2, s0 addu_s.qb s3, t3, s1 sw s2, 0(a0) b 111f sw s3, 4(a0) 11: MIPS_2xUN8x4_MUL_2xUN8 t2, t3, s4, s4, t2, t3, t4, t0, t1, t6, t7, t8, t9 addu_s.qb s2, t2, a1 addu_s.qb s3, t3, a1 sw s2, 0(a0) sw s3, 4(a0) 111: addiu a3, a3, -2 addiu t0, a3, -1 bgtz t0, 1b addiu a0, a0, 8 b 3f nop 2: /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ lbu t1, 1(a2) /* t1 = mask */ or t2, t0, t1 beqz t2, 222f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 2 and t3, t0, t1 beq t3, t5, 22f /* if (t0 == 0xff) && (t1 == 0xff) */ nop lw t2, 0(a0) /* t2 = dst */ lw t3, 4(a0) /* t3 = dst */ OVER_2x8888_2x8_2x8888 a1, a1, t0, t1, t2, t3, \ t6, t7, t4, t8, t9, s0, s1, s2, s3 sw t6, 0(a0) b 222f sw t7, 4(a0) 22: sw a1, 0(a0) sw a1, 4(a0) 222: addiu a3, a3, -2 addiu t0, a3, -1 bgtz t0, 2b addiu a0, a0, 8 3: blez a3, 4f nop /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ beqz t0, 4f /* if (t0 == 0) */ addiu a2, a2, 1 move t3, a1 beq t0, t5, 31f /* if (t0 == 0xff) */ lw t1, 0(a0) /* t1 = dst */ MIPS_UN8x4_MUL_UN8 a1, t0, t3, t4, t6, t7, t8 31: not t2, t3 srl t2, t2, 24 MIPS_UN8x4_MUL_UN8 t1, t2, t1, t4, t6, t7, t8 addu_s.qb t2, t1, t3 sw t2, 0(a0) 4: RESTORE_REGS_FROM_STACK 4, s0, s1, s2, s3, s4 j ra nop END(pixman_composite_over_n_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8 beqz a3, 4f nop li t4, 0x00ff00ff li t5, 0xff li t6, 0xf800f800 li t7, 0x07e007e0 li t8, 0x001F001F addiu t1, a3, -1 beqz t1, 3f /* last pixel */ srl t0, a1, 24 /* t0 = srca */ not v0, a1 beq t0, t5, 2f /* if (srca == 0xff) */ srl v0, v0, 24 1: /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ lbu t1, 1(a2) /* t1 = mask */ or t2, t0, t1 beqz t2, 111f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 2 lhu t2, 0(a0) /* t2 = dst */ lhu t3, 2(a0) /* t3 = dst */ CONVERT_2x0565_TO_2x8888 t2, t3, s0, s1, t7, t8, t9, s2, s3, s4 and t9, t0, t1 beq t9, t5, 11f /* if (t0 == 0xff) && (t1 == 0xff) */ nop MIPS_2xUN8x4_MUL_2xUN8 a1, a1, t0, t1, s2, s3, t4, t9, s4, s5, s6, s7, s8 not s4, s2 not s5, s3 srl s4, s4, 24 srl s5, s5, 24 MIPS_2xUN8x4_MUL_2xUN8 s0, s1, s4, s5, s0, s1, t4, t9, t0, t1, s6, s7, s8 addu_s.qb s4, s2, s0 addu_s.qb s5, s3, s1 CONVERT_2x8888_TO_2x0565 s4, s5, t2, t3, t6, t7, t8, s0, s1 sh t2, 0(a0) b 111f sh t3, 2(a0) 11: MIPS_2xUN8x4_MUL_2xUN8 s0, s1, v0, v0, s0, s1, t4, t9, t0, t1, s6, s7, s8 addu_s.qb s4, a1, s0 addu_s.qb s5, a1, s1 CONVERT_2x8888_TO_2x0565 s4, s5, t2, t3, t6, t7, t8, s0, s1 sh t2, 0(a0) sh t3, 2(a0) 111: addiu a3, a3, -2 addiu t0, a3, -1 bgtz t0, 1b addiu a0, a0, 4 b 3f nop 2: CONVERT_1x8888_TO_1x0565 a1, s0, s1, s2 21: /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ lbu t1, 1(a2) /* t1 = mask */ or t2, t0, t1 beqz t2, 222f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 2 and t9, t0, t1 move s2, s0 beq t9, t5, 22f /* if (t0 == 0xff) && (t2 == 0xff) */ move s3, s0 lhu t2, 0(a0) /* t2 = dst */ lhu t3, 2(a0) /* t3 = dst */ CONVERT_2x0565_TO_2x8888 t2, t3, s2, s3, t7, t8, s4, s5, s6, s7 OVER_2x8888_2x8_2x8888 a1, a1, t0, t1, s2, s3, \ t2, t3, t4, t9, s4, s5, s6, s7, s8 CONVERT_2x8888_TO_2x0565 t2, t3, s2, s3, t6, t7, t8, s4, s5 22: sh s2, 0(a0) sh s3, 2(a0) 222: addiu a3, a3, -2 addiu t0, a3, -1 bgtz t0, 21b addiu a0, a0, 4 3: blez a3, 4f nop /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ beqz t0, 4f /* if (t0 == 0) */ nop lhu t1, 0(a0) /* t1 = dst */ CONVERT_1x0565_TO_1x8888 t1, t2, t3, t7 beq t0, t5, 31f /* if (t0 == 0xff) */ move t3, a1 MIPS_UN8x4_MUL_UN8 a1, t0, t3, t4, t7, t8, t9 31: not t6, t3 srl t6, t6, 24 MIPS_UN8x4_MUL_UN8 t2, t6, t2, t4, t7, t8, t9 addu_s.qb t1, t2, t3 CONVERT_1x8888_TO_1x0565 t1, t2, t3, t7 sh t2, 0(a0) 4: RESTORE_REGS_FROM_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8 j ra nop END(pixman_composite_over_n_8_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_n_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (32bit constant) * a3 - w */ SAVE_REGS_ON_STACK 0, s0 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 srl a2, a2, 24 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ addiu a1, a1, 8 OVER_2x8888_2x8_2x8888 t0, t1, a2, a2, t2, t3, \ t5, t6, t4, t7, t8, t9, t0, t1, s0 sw t5, 0(a0) sw t6, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ OVER_8888_8_8888 t0, a2, t1, t3, t4, t5, t6, t7, t8 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0 j ra nop END(pixman_composite_over_8888_n_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_n_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - mask (32bit constant) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2, s3 li t6, 0x00ff00ff li t7, 0xf800f800 li t8, 0x07e007e0 li t9, 0x001F001F beqz a3, 3f nop srl a2, a2, 24 addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ lhu t3, 2(a0) /* t2 = destination (r5g6b5) */ addiu a1, a1, 8 CONVERT_2x0565_TO_2x8888 t2, t3, t4, t5, t8, t9, s0, s1, t2, t3 OVER_2x8888_2x8_2x8888 t0, t1, a2, a2, t4, t5, \ t2, t3, t6, t0, t1, s0, s1, s2, s3 CONVERT_2x8888_TO_2x0565 t2, t3, t4, t5, t7, t8, t9, s0, s1 sh t4, 0(a0) sh t5, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t1, t2, t4, t5 OVER_8888_8_8888 t0, a2, t2, t1, t6, t3, t4, t5, t7 CONVERT_1x8888_TO_1x0565 t1, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2, s3 j ra nop END(pixman_composite_over_8888_n_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_0565_n_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (r5g6b5) * a2 - mask (32bit constant) * a3 - w */ SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5 li t6, 0x00ff00ff li t7, 0xf800f800 li t8, 0x07e007e0 li t9, 0x001F001F beqz a3, 3f nop srl a2, a2, 24 addiu t1, a3, -1 beqz t1, 2f nop 1: lhu t0, 0(a1) /* t0 = source (r5g6b5) */ lhu t1, 2(a1) /* t1 = source (r5g6b5) */ /* a2 = mask (32bit constant) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ lhu t3, 2(a0) /* t3 = destination (r5g6b5) */ addiu a1, a1, 4 CONVERT_2x0565_TO_2x8888 t0, t1, t4, t5, t8, t9, s0, s1, s2, s3 CONVERT_2x0565_TO_2x8888 t2, t3, s0, s1, t8, t9, s2, s3, s4, s5 OVER_2x8888_2x8_2x8888 t4, t5, a2, a2, s0, s1, \ t0, t1, t6, s2, s3, s4, s5, t4, t5 CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t7, t8, t9, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop lhu t0, 0(a1) /* t0 = source (r5g6b5) */ /* a2 = mask (32bit constant) */ lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t0, t2, t4, t5 CONVERT_1x0565_TO_1x8888 t1, t3, t4, t5 OVER_8888_8_8888 t2, a2, t3, t0, t6, t1, t4, t5, t7 CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5 j ra nop END(pixman_composite_over_0565_n_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */ lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */ addiu a1, a1, 8 addiu a2, a2, 2 OVER_2x8888_2x8_2x8888 t0, t1, t2, t3, t5, t6, \ t7, t8, t4, t9, s0, s1, t0, t1, t2 sw t7, 0(a0) sw t8, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ OVER_8888_8_8888 t0, t1, t2, t3, t4, t5, t6, t7, t8 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1 j ra nop END(pixman_composite_over_8888_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_8_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5 li t6, 0x00ff00ff li t7, 0xf800f800 li t8, 0x07e007e0 li t9, 0x001F001F beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lhu t4, 0(a0) /* t4 = destination (r5g6b5) */ lhu t5, 2(a0) /* t5 = destination (r5g6b5) */ addiu a1, a1, 8 addiu a2, a2, 2 CONVERT_2x0565_TO_2x8888 t4, t5, s0, s1, t8, t9, s2, s3, s4, s5 OVER_2x8888_2x8_2x8888 t0, t1, t2, t3, s0, s1, \ t4, t5, t6, s2, s3, s4, s5, t0, t1 CONVERT_2x8888_TO_2x0565 t4, t5, s0, s1, t7, t8, t9, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t2, t3, t4, t5 OVER_8888_8_8888 t0, t1, t3, t2, t6, t4, t5, t7, t8 CONVERT_1x8888_TO_1x0565 t2, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5 j ra nop END(pixman_composite_over_8888_8_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_0565_8_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (r5g6b5) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5 li t4, 0xf800f800 li t5, 0x07e007e0 li t6, 0x001F001F li t7, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lhu t0, 0(a1) /* t0 = source (r5g6b5) */ lhu t1, 2(a1) /* t1 = source (r5g6b5) */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lhu t8, 0(a0) /* t8 = destination (r5g6b5) */ lhu t9, 2(a0) /* t9 = destination (r5g6b5) */ addiu a1, a1, 4 addiu a2, a2, 2 CONVERT_2x0565_TO_2x8888 t0, t1, s0, s1, t5, t6, s2, s3, s4, s5 CONVERT_2x0565_TO_2x8888 t8, t9, s2, s3, t5, t6, s4, s5, t0, t1 OVER_2x8888_2x8_2x8888 s0, s1, t2, t3, s2, s3, \ t0, t1, t7, s4, s5, t8, t9, s0, s1 CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t4, t5, t6, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop lhu t0, 0(a1) /* t0 = source (r5g6b5) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t0, t3, t4, t5 CONVERT_1x0565_TO_1x8888 t2, t4, t5, t6 OVER_8888_8_8888 t3, t1, t4, t0, t7, t2, t5, t6, t8 CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5 j ra nop END(pixman_composite_over_0565_8_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_8888_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (a8r8g8b8) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lw t2, 0(a2) /* t2 = mask (a8r8g8b8) */ lw t3, 4(a2) /* t3 = mask (a8r8g8b8) */ lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */ lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */ addiu a1, a1, 8 addiu a2, a2, 8 srl t2, t2, 24 srl t3, t3, 24 OVER_2x8888_2x8_2x8888 t0, t1, t2, t3, t5, t6, t7, t8, t4, t9, s0, s1, s2, t0, t1 sw t7, 0(a0) sw t8, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 0(a2) /* t1 = mask (a8r8g8b8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ srl t1, t1, 24 OVER_8888_8_8888 t0, t1, t2, t3, t4, t5, t6, t7, t8 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_over_8888_8888_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ addiu a1, a1, 8 not t5, t0 srl t5, t5, 24 not t6, t1 srl t6, t6, 24 or t7, t5, t6 beqz t7, 11f or t8, t0, t1 beqz t8, 12f MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t5, t6, t7, t8, t4, t9, s0, s1, s2, t2, t3 addu_s.qb t0, t7, t0 addu_s.qb t1, t8, t1 11: sw t0, 0(a0) sw t1, 4(a0) 12: addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a2, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ addiu a1, a1, 4 not t2, t0 srl t2, t2, 24 beqz t2, 21f nop beqz t0, 3f MIPS_UN8x4_MUL_UN8 t1, t2, t3, t4, t5, t6, t7 addu_s.qb t0, t3, t0 21: sw t0, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_over_8888_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - w */ SAVE_REGS_ON_STACK 8, s0, s1, s2, s3, s4, s5 li t4, 0x00ff00ff li s3, 0xf800f800 li s4, 0x07e007e0 li s5, 0x001F001F beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ lhu t3, 2(a0) /* t3 = destination (r5g6b5) */ addiu a1, a1, 8 not t5, t0 srl t5, t5, 24 not t6, t1 srl t6, t6, 24 or t7, t5, t6 beqz t7, 11f or t8, t0, t1 beqz t8, 12f CONVERT_2x0565_TO_2x8888 t2, t3, s0, s1, s4, s5, t7, t8, t9, s2 MIPS_2xUN8x4_MUL_2xUN8 s0, s1, t5, t6, t7, t8, t4, t9, t2, t3, s2, s0, s1 addu_s.qb t0, t7, t0 addu_s.qb t1, t8, t1 11: CONVERT_2x8888_TO_2x0565 t0, t1, t7, t8, s3, s4, s5, t2, t3 sh t7, 0(a0) sh t8, 2(a0) 12: addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a2, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ addiu a1, a1, 4 not t2, t0 srl t2, t2, 24 beqz t2, 21f nop beqz t0, 3f CONVERT_1x0565_TO_1x8888 t1, s0, t8, t9 MIPS_UN8x4_MUL_UN8 s0, t2, t3, t4, t5, t6, t7 addu_s.qb t0, t3, t0 21: CONVERT_1x8888_TO_1x0565 t0, s0, t8, t9 sh s0, 0(a0) 3: RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5 j ra nop END(pixman_composite_over_8888_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (32bit constant) * a2 - w */ beqz a2, 5f nop not t0, a1 srl t0, t0, 24 bgtz t0, 1f nop CONVERT_1x8888_TO_1x0565 a1, t1, t2, t3 0: sh t1, 0(a0) addiu a2, a2, -1 bgtz a2, 0b addiu a0, a0, 2 j ra nop 1: SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff li t5, 0xf800f800 li t6, 0x07e007e0 li t7, 0x001F001F addiu t1, a2, -1 beqz t1, 3f nop 2: lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ lhu t2, 2(a0) /* t2 = destination (r5g6b5) */ CONVERT_2x0565_TO_2x8888 t1, t2, t3, t8, t6, t7, t9, s0, s1, s2 MIPS_2xUN8x4_MUL_2xUN8 t3, t8, t0, t0, t1, t2, t4, t9, s0, s1, s2, t3, t8 addu_s.qb t1, t1, a1 addu_s.qb t2, t2, a1 CONVERT_2x8888_TO_2x0565 t1, t2, t3, t8, t5, t6, t7, s0, s1 sh t3, 0(a0) sh t8, 2(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 2b addiu a0, a0, 4 3: beqz a2, 4f nop lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t1, t2, s0, s1 MIPS_UN8x4_MUL_UN8 t2, t0, t1, t4, s0, s1, s2 addu_s.qb t1, t1, a1 CONVERT_1x8888_TO_1x0565 t1, t2, s0, s1 sh t2, 0(a0) 4: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 5: j ra nop END(pixman_composite_over_n_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - w */ beqz a2, 5f nop not t0, a1 srl t0, t0, 24 bgtz t0, 1f nop 0: sw a1, 0(a0) addiu a2, a2, -1 bgtz a2, 0b addiu a0, a0, 4 j ra nop 1: SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff addiu t1, a2, -1 beqz t1, 3f nop 2: lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t0, t7, t8, t4, t9, s0, s1, s2, t2, t3 addu_s.qb t7, t7, a1 addu_s.qb t8, t8, a1 sw t7, 0(a0) sw t8, 4(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 2b addiu a0, a0, 8 3: beqz a2, 4f nop lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ MIPS_UN8x4_MUL_UN8 t1, t0, t3, t4, t5, t6, t7 addu_s.qb t3, t3, a1 sw t3, 0(a0) 4: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 5: j ra nop END(pixman_composite_over_n_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8_8_8_asm_mips) /* * a0 - dst (a8) * a1 - src (a8) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, v0, v1 li t9, 0x00ff00ff beqz a3, 3f nop srl v0, a3, 2 /* v0 = how many multiples of 4 dst pixels */ beqz v0, 1f /* branch if less than 4 src pixels */ nop 0: beqz v0, 1f addiu v0, v0, -1 lbu t0, 0(a2) lbu t1, 1(a2) lbu t2, 2(a2) lbu t3, 3(a2) lbu t4, 0(a0) lbu t5, 1(a0) lbu t6, 2(a0) lbu t7, 3(a0) addiu a2, a2, 4 precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr_sra.ph.w t5, t4, 0 precr_sra.ph.w t7, t6, 0 precr.qb.ph t0, t3, t1 precr.qb.ph t1, t7, t5 lbu t4, 0(a1) lbu v1, 1(a1) lbu t7, 2(a1) lbu t8, 3(a1) addiu a1, a1, 4 precr_sra.ph.w v1, t4, 0 precr_sra.ph.w t8, t7, 0 muleu_s.ph.qbl t2, t0, t8 muleu_s.ph.qbr t3, t0, v1 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t0, t2, t3 addu_s.qb t2, t0, t1 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a3, a3, -4 b 0b addiu a0, a0, 4 1: beqz a3, 3f nop 2: lbu t8, 0(a1) lbu t0, 0(a2) lbu t1, 0(a0) addiu a1, a1, 1 addiu a2, a2, 1 mul t2, t0, t8 shra_r.ph t3, t2, 8 andi t3, t3, 0xff addq.ph t2, t2, t3 shra_r.ph t2, t2, 8 andi t2, t2, 0xff addu_s.qb t2, t2, t1 sb t2, 0(a0) addiu a3, a3, -1 bnez a3, 2b addiu a0, a0, 1 3: RESTORE_REGS_FROM_STACK 0, v0, v1 j ra nop END(pixman_composite_add_8_8_8_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_n_8_8_asm_mips) /* * a0 - dst (a8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, v0 li t9, 0x00ff00ff beqz a3, 3f nop srl v0, a3, 2 /* v0 = how many multiples of 4 dst pixels */ beqz v0, 1f /* branch if less than 4 src pixels */ nop srl t8, a1, 24 replv.ph t8, t8 0: beqz v0, 1f addiu v0, v0, -1 lbu t0, 0(a2) lbu t1, 1(a2) lbu t2, 2(a2) lbu t3, 3(a2) lbu t4, 0(a0) lbu t5, 1(a0) lbu t6, 2(a0) lbu t7, 3(a0) addiu a2, a2, 4 precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr_sra.ph.w t5, t4, 0 precr_sra.ph.w t7, t6, 0 precr.qb.ph t0, t3, t1 precr.qb.ph t1, t7, t5 muleu_s.ph.qbl t2, t0, t8 muleu_s.ph.qbr t3, t0, t8 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t0, t2, t3 addu_s.qb t2, t0, t1 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a3, a3, -4 b 0b addiu a0, a0, 4 1: beqz a3, 3f nop srl t8, a1, 24 2: lbu t0, 0(a2) lbu t1, 0(a0) addiu a2, a2, 1 mul t2, t0, t8 shra_r.ph t3, t2, 8 andi t3, t3, 0xff addq.ph t2, t2, t3 shra_r.ph t2, t2, 8 andi t2, t2, 0xff addu_s.qb t2, t2, t1 sb t2, 0(a0) addiu a3, a3, -1 bnez a3, 2b addiu a0, a0, 1 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_composite_add_n_8_8_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_n_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: /* a1 = source (32bit constant) */ lbu t0, 0(a2) /* t0 = mask (a8) */ lbu t1, 1(a2) /* t1 = mask (a8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ addiu a2, a2, 2 MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 a1, a1, \ t0, t1, \ t2, t3, \ t5, t6, \ t4, t7, t8, t9, s0, s1, s2 sw t5, 0(a0) sw t6, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop /* a1 = source (32bit constant) */ lbu t0, 0(a2) /* t0 = mask (a8) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ MIPS_UN8x4_MUL_UN8_ADD_UN8x4 a1, t0, t1, t2, t4, t3, t5, t6 sw t2, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_add_n_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_0565_8_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (r5g6b5) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7 li t4, 0xf800f800 li t5, 0x07e007e0 li t6, 0x001F001F li t7, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lhu t0, 0(a1) /* t0 = source (r5g6b5) */ lhu t1, 2(a1) /* t1 = source (r5g6b5) */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lhu t8, 0(a0) /* t8 = destination (r5g6b5) */ lhu t9, 2(a0) /* t9 = destination (r5g6b5) */ addiu a1, a1, 4 addiu a2, a2, 2 CONVERT_2x0565_TO_2x8888 t0, t1, s0, s1, t5, t6, s2, s3, s4, s5 CONVERT_2x0565_TO_2x8888 t8, t9, s2, s3, t5, t6, s4, s5, s6, s7 MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 s0, s1, \ t2, t3, \ s2, s3, \ t0, t1, \ t7, s4, s5, s6, s7, t8, t9 CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t4, t5, t6, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop lhu t0, 0(a1) /* t0 = source (r5g6b5) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t0, t3, t4, t5 CONVERT_1x0565_TO_1x8888 t2, t4, t5, t6 MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t3, t1, t4, t0, t7, t2, t5, t6 CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7 j ra nop END(pixman_composite_add_0565_8_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8888_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */ lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */ addiu a1, a1, 8 addiu a2, a2, 2 MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 t0, t1, \ t2, t3, \ t5, t6, \ t7, t8, \ t4, t9, s0, s1, s2, t0, t1 sw t7, 0(a0) sw t8, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, t1, t2, t3, t4, t5, t6, t7 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_add_8888_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8888_n_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (32bit constant) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a3, 3f nop srl a2, a2, 24 addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ addiu a1, a1, 8 MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 t0, t1, \ a2, a2, \ t2, t3, \ t5, t6, \ t4, t7, t8, t9, s0, s1, s2 sw t5, 0(a0) sw t6, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, a2, t1, t3, t4, t5, t6, t7 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_add_8888_n_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8888_8888_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (a8r8g8b8) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lw t2, 0(a2) /* t2 = mask (a8r8g8b8) */ lw t3, 4(a2) /* t3 = mask (a8r8g8b8) */ lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */ lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */ addiu a1, a1, 8 addiu a2, a2, 8 srl t2, t2, 24 srl t3, t3, 24 MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 t0, t1, \ t2, t3, \ t5, t6, \ t7, t8, \ t4, t9, s0, s1, s2, t0, t1 sw t7, 0(a0) sw t8, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 0(a2) /* t1 = mask (a8r8g8b8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ srl t1, t1, 24 MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, t1, t2, t3, t4, t5, t6, t7 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_add_8888_8888_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8_8_asm_mips) /* * a0 - dst (a8) * a1 - src (a8) * a2 - w */ beqz a2, 3f nop srl t9, a2, 2 /* t9 = how many multiples of 4 dst pixels */ beqz t9, 1f /* branch if less than 4 src pixels */ nop 0: beqz t9, 1f addiu t9, t9, -1 lbu t0, 0(a1) lbu t1, 1(a1) lbu t2, 2(a1) lbu t3, 3(a1) lbu t4, 0(a0) lbu t5, 1(a0) lbu t6, 2(a0) lbu t7, 3(a0) addiu a1, a1, 4 precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr_sra.ph.w t5, t4, 0 precr_sra.ph.w t7, t6, 0 precr.qb.ph t0, t3, t1 precr.qb.ph t1, t7, t5 addu_s.qb t2, t0, t1 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a2, a2, -4 b 0b addiu a0, a0, 4 1: beqz a2, 3f nop 2: lbu t0, 0(a1) lbu t1, 0(a0) addiu a1, a1, 1 addu_s.qb t2, t0, t1 sb t2, 0(a0) addiu a2, a2, -1 bnez a2, 2b addiu a0, a0, 1 3: j ra nop END(pixman_composite_add_8_8_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8888_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - w */ beqz a2, 4f nop srl t9, a2, 2 /* t1 = how many multiples of 4 src pixels */ beqz t9, 3f /* branch if less than 4 src pixels */ nop 1: addiu t9, t9, -1 beqz t9, 2f addiu a2, a2, -4 lw t0, 0(a1) lw t1, 4(a1) lw t2, 8(a1) lw t3, 12(a1) lw t4, 0(a0) lw t5, 4(a0) lw t6, 8(a0) lw t7, 12(a0) addiu a1, a1, 16 addu_s.qb t4, t4, t0 addu_s.qb t5, t5, t1 addu_s.qb t6, t6, t2 addu_s.qb t7, t7, t3 sw t4, 0(a0) sw t5, 4(a0) sw t6, 8(a0) sw t7, 12(a0) b 1b addiu a0, a0, 16 2: lw t0, 0(a1) lw t1, 4(a1) lw t2, 8(a1) lw t3, 12(a1) lw t4, 0(a0) lw t5, 4(a0) lw t6, 8(a0) lw t7, 12(a0) addiu a1, a1, 16 addu_s.qb t4, t4, t0 addu_s.qb t5, t5, t1 addu_s.qb t6, t6, t2 addu_s.qb t7, t7, t3 sw t4, 0(a0) sw t5, 4(a0) sw t6, 8(a0) sw t7, 12(a0) beqz a2, 4f addiu a0, a0, 16 3: lw t0, 0(a1) lw t1, 0(a0) addiu a1, a1, 4 addiu a2, a2, -1 addu_s.qb t1, t1, t0 sw t1, 0(a0) bnez a2, 3b addiu a0, a0, 4 4: jr ra nop END(pixman_composite_add_8888_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_out_reverse_8_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8) * a2 - w */ beqz a2, 4f nop SAVE_REGS_ON_STACK 0, s0, s1, s2, s3 li t2, 0xf800f800 li t3, 0x07e007e0 li t4, 0x001F001F li t5, 0x00ff00ff addiu t1, a2, -1 beqz t1, 2f nop 1: lbu t0, 0(a1) /* t0 = source (a8) */ lbu t1, 1(a1) /* t1 = source (a8) */ lhu t6, 0(a0) /* t6 = destination (r5g6b5) */ lhu t7, 2(a0) /* t7 = destination (r5g6b5) */ addiu a1, a1, 2 not t0, t0 not t1, t1 andi t0, 0xff /* t0 = neg source1 */ andi t1, 0xff /* t1 = neg source2 */ CONVERT_2x0565_TO_2x8888 t6, t7, t8, t9, t3, t4, s0, s1, s2, s3 MIPS_2xUN8x4_MUL_2xUN8 t8, t9, t0, t1, t6, t7, t5, s0, s1, s2, s3, t8, t9 CONVERT_2x8888_TO_2x0565 t6, t7, t8, t9, t2, t3, t4, s0, s1 sh t8, 0(a0) sh t9, 2(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a2, 3f nop lbu t0, 0(a1) /* t0 = source (a8) */ lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ not t0, t0 andi t0, 0xff /* t0 = neg source */ CONVERT_1x0565_TO_1x8888 t1, t2, t3, t4 MIPS_UN8x4_MUL_UN8 t2, t0, t1, t5, t3, t4, t6 CONVERT_1x8888_TO_1x0565 t1, t2, t3, t4 sh t2, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2, s3 4: j ra nop END(pixman_composite_out_reverse_8_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_out_reverse_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8) * a2 - w */ beqz a2, 3f nop li t4, 0x00ff00ff addiu t1, a2, -1 beqz t1, 2f nop 1: lbu t0, 0(a1) /* t0 = source (a8) */ lbu t1, 1(a1) /* t1 = source (a8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ addiu a1, a1, 2 not t0, t0 not t1, t1 andi t0, 0xff /* t0 = neg source */ andi t1, 0xff /* t1 = neg source */ MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t1, t5, t6, t4, t7, t8, t9, t2, t3, t0 sw t5, 0(a0) sw t6, 4(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a2, 3f nop lbu t0, 0(a1) /* t0 = source (a8) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ not t0, t0 andi t0, 0xff /* t0 = neg source */ MIPS_UN8x4_MUL_UN8 t1, t0, t2, t4, t3, t5, t6 sw t2, 0(a0) 3: j ra nop END(pixman_composite_out_reverse_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_reverse_n_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - w */ beqz a2, 5f nop SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7 li t0, 0x00ff00ff srl t9, a2, 2 /* t9 = how many multiples of 4 src pixels */ beqz t9, 2f /* branch if less than 4 src pixels */ nop 1: beqz t9, 2f addiu t9, t9, -1 lw t1, 0(a0) lw t2, 4(a0) lw t3, 8(a0) lw t4, 12(a0) addiu a2, a2, -4 not t5, t1 not t6, t2 not t7, t3 not t8, t4 srl t5, t5, 24 srl t6, t6, 24 srl t7, t7, 24 srl t8, t8, 24 replv.ph t5, t5 replv.ph t6, t6 replv.ph t7, t7 replv.ph t8, t8 muleu_s.ph.qbl s0, a1, t5 muleu_s.ph.qbr s1, a1, t5 muleu_s.ph.qbl s2, a1, t6 muleu_s.ph.qbr s3, a1, t6 muleu_s.ph.qbl s4, a1, t7 muleu_s.ph.qbr s5, a1, t7 muleu_s.ph.qbl s6, a1, t8 muleu_s.ph.qbr s7, a1, t8 shra_r.ph t5, s0, 8 shra_r.ph t6, s1, 8 shra_r.ph t7, s2, 8 shra_r.ph t8, s3, 8 and t5, t5, t0 and t6, t6, t0 and t7, t7, t0 and t8, t8, t0 addq.ph s0, s0, t5 addq.ph s1, s1, t6 addq.ph s2, s2, t7 addq.ph s3, s3, t8 shra_r.ph s0, s0, 8 shra_r.ph s1, s1, 8 shra_r.ph s2, s2, 8 shra_r.ph s3, s3, 8 shra_r.ph t5, s4, 8 shra_r.ph t6, s5, 8 shra_r.ph t7, s6, 8 shra_r.ph t8, s7, 8 and t5, t5, t0 and t6, t6, t0 and t7, t7, t0 and t8, t8, t0 addq.ph s4, s4, t5 addq.ph s5, s5, t6 addq.ph s6, s6, t7 addq.ph s7, s7, t8 shra_r.ph s4, s4, 8 shra_r.ph s5, s5, 8 shra_r.ph s6, s6, 8 shra_r.ph s7, s7, 8 precr.qb.ph t5, s0, s1 precr.qb.ph t6, s2, s3 precr.qb.ph t7, s4, s5 precr.qb.ph t8, s6, s7 addu_s.qb t5, t1, t5 addu_s.qb t6, t2, t6 addu_s.qb t7, t3, t7 addu_s.qb t8, t4, t8 sw t5, 0(a0) sw t6, 4(a0) sw t7, 8(a0) sw t8, 12(a0) b 1b addiu a0, a0, 16 2: beqz a2, 4f nop 3: lw t1, 0(a0) not t2, t1 srl t2, t2, 24 replv.ph t2, t2 muleu_s.ph.qbl t4, a1, t2 muleu_s.ph.qbr t5, a1, t2 shra_r.ph t6, t4, 8 shra_r.ph t7, t5, 8 and t6,t6,t0 and t7,t7,t0 addq.ph t8, t4, t6 addq.ph t9, t5, t7 shra_r.ph t8, t8, 8 shra_r.ph t9, t9, 8 precr.qb.ph t9, t8, t9 addu_s.qb t9, t1, t9 sw t9, 0(a0) addiu a2, a2, -1 bnez a2, 3b addiu a0, a0, 4 4: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7 5: j ra nop END(pixman_composite_over_reverse_n_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_in_n_8_asm_mips) /* * a0 - dst (a8) * a1 - src (32bit constant) * a2 - w */ li t9, 0x00ff00ff beqz a2, 3f nop srl t7, a2, 2 /* t7 = how many multiples of 4 dst pixels */ beqz t7, 1f /* branch if less than 4 src pixels */ nop srl t8, a1, 24 replv.ph t8, t8 0: beqz t7, 1f addiu t7, t7, -1 lbu t0, 0(a0) lbu t1, 1(a0) lbu t2, 2(a0) lbu t3, 3(a0) precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr.qb.ph t0, t3, t1 muleu_s.ph.qbl t2, t0, t8 muleu_s.ph.qbr t3, t0, t8 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t2, t2, t3 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a2, a2, -4 b 0b addiu a0, a0, 4 1: beqz a2, 3f nop srl t8, a1, 24 2: lbu t0, 0(a0) mul t2, t0, t8 shra_r.ph t3, t2, 8 andi t3, t3, 0x00ff addq.ph t2, t2, t3 shra_r.ph t2, t2, 8 sb t2, 0(a0) addiu a2, a2, -1 bnez a2, 2b addiu a0, a0, 1 3: j ra nop END(pixman_composite_in_n_8_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_8888_8888_OVER_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - w * a3 - vx * 16(sp) - unit_x */ SAVE_REGS_ON_STACK 0, s0, s1, s2, s3 lw t8, 16(sp) /* t8 = unit_x */ li t6, 0x00ff00ff beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ addu a3, a3, t8 /* a3 = vx + unit_x */ sra t1, a3, 16 /* t0 = vx >> 16 */ sll t1, t1, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t1, a1, t1 lw t1, 0(t1) /* t1 = source (a8r8g8b8) */ addu a3, a3, t8 /* a3 = vx + unit_x */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ OVER_2x8888_2x8888 t0, t1, t2, t3, t4, t5, t6, t7, t9, s0, s1, s2, s3 sw t4, 0(a0) sw t5, 4(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a2, 3f nop sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ addu a3, a3, t8 /* a3 = vx + unit_x */ OVER_8888_8888 t0, t1, t2, t6, t4, t5, t3, t7 sw t2, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2, s3 j ra nop END(pixman_scaled_nearest_scanline_8888_8888_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_8888_0565_OVER_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - w * a3 - vx * 16(sp) - unit_x */ SAVE_REGS_ON_STACK 24, s0, s1, s2, s3, s4, v0, v1 lw t8, 40(sp) /* t8 = unit_x */ li t4, 0x00ff00ff li t5, 0xf800f800 li t6, 0x07e007e0 li t7, 0x001F001F beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ addu a3, a3, t8 /* a3 = vx + unit_x */ sra t1, a3, 16 /* t0 = vx >> 16 */ sll t1, t1, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t1, a1, t1 lw t1, 0(t1) /* t1 = source (a8r8g8b8) */ addu a3, a3, t8 /* a3 = vx + unit_x */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ lhu t3, 2(a0) /* t3 = destination (r5g6b5) */ CONVERT_2x0565_TO_2x8888 t2, t3, v0, v1, t6, t7, s0, s1, s2, s3 OVER_2x8888_2x8888 t0, t1, v0, v1, t2, t3, t4, t9, s0, s1, s2, s3, s4 CONVERT_2x8888_TO_2x0565 t2, t3, v0, v1, t5, t6, t7, t9, s2 sh v0, 0(a0) sh v1, 2(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a2, 3f nop sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ addu a3, a3, t8 /* a3 = vx + unit_x */ CONVERT_1x0565_TO_1x8888 t1, t2, t5, t6 OVER_8888_8888 t0, t2, t1, t4, t3, t5, t6, t7 CONVERT_1x8888_TO_1x0565 t1, t2, t5, t6 sh t2, 0(a0) 3: RESTORE_REGS_FROM_STACK 24, s0, s1, s2, s3, s4, v0, v1 j ra nop END(pixman_scaled_nearest_scanline_8888_0565_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_0565_8888_SRC_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (r5g6b5) * a2 - w * a3 - vx * 16(sp) - unit_x */ SAVE_REGS_ON_STACK 0, v0 beqz a2, 3f nop lw v0, 16(sp) /* v0 = unit_x */ addiu t1, a2, -1 beqz t1, 2f nop li t4, 0x07e007e0 li t5, 0x001F001F 1: sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 1 /* t0 = t0 * 2 ((r5g6b5)) */ addu t0, a1, t0 lhu t0, 0(t0) /* t0 = source ((r5g6b5)) */ addu a3, a3, v0 /* a3 = vx + unit_x */ sra t1, a3, 16 /* t1 = vx >> 16 */ sll t1, t1, 1 /* t1 = t1 * 2 ((r5g6b5)) */ addu t1, a1, t1 lhu t1, 0(t1) /* t1 = source ((r5g6b5)) */ addu a3, a3, v0 /* a3 = vx + unit_x */ addiu a2, a2, -2 CONVERT_2x0565_TO_2x8888 t0, t1, t2, t3, t4, t5, t6, t7, t8, t9 sw t2, 0(a0) sw t3, 4(a0) addiu t2, a2, -1 bgtz t2, 1b addiu a0, a0, 8 2: beqz a2, 3f nop sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 1 /* t0 = t0 * 2 ((r5g6b5)) */ addu t0, a1, t0 lhu t0, 0(t0) /* t0 = source ((r5g6b5)) */ CONVERT_1x0565_TO_1x8888 t0, t1, t2, t3 sw t1, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_scaled_nearest_scanline_0565_8888_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - mask (a8) * a3 - w * 16(sp) - vx * 20(sp) - unit_x */ beqz a3, 4f nop SAVE_REGS_ON_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5 lw v0, 36(sp) /* v0 = vx */ lw v1, 40(sp) /* v1 = unit_x */ li t6, 0x00ff00ff li t7, 0xf800f800 li t8, 0x07e007e0 li t9, 0x001F001F addiu t1, a3, -1 beqz t1, 2f nop 1: sra t0, v0, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ addu v0, v0, v1 /* v0 = vx + unit_x */ sra t1, v0, 16 /* t1 = vx >> 16 */ sll t1, t1, 2 /* t1 = t1 * 4 (a8r8g8b8) */ addu t1, a1, t1 lw t1, 0(t1) /* t1 = source (a8r8g8b8) */ addu v0, v0, v1 /* v0 = vx + unit_x */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lhu t4, 0(a0) /* t4 = destination (r5g6b5) */ lhu t5, 2(a0) /* t5 = destination (r5g6b5) */ addiu a2, a2, 2 CONVERT_2x0565_TO_2x8888 t4, t5, s0, s1, t8, t9, s2, s3, s4, s5 OVER_2x8888_2x8_2x8888 t0, t1, \ t2, t3, \ s0, s1, \ t4, t5, \ t6, s2, s3, s4, s5, t2, t3 CONVERT_2x8888_TO_2x0565 t4, t5, s0, s1, t7, t8, t9, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop sra t0, v0, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t2, t3, t4, t5 OVER_8888_8_8888 t0, t1, t3, t2, t6, t4, t5, t7, t8 CONVERT_1x8888_TO_1x0565 t2, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5 4: j ra nop END(pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (r5g6b5) * a2 - mask (a8) * a3 - w * 16(sp) - vx * 20(sp) - unit_x */ beqz a3, 4f nop SAVE_REGS_ON_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5 lw v0, 36(sp) /* v0 = vx */ lw v1, 40(sp) /* v1 = unit_x */ li t4, 0xf800f800 li t5, 0x07e007e0 li t6, 0x001F001F li t7, 0x00ff00ff addiu t1, a3, -1 beqz t1, 2f nop 1: sra t0, v0, 16 /* t0 = vx >> 16 */ sll t0, t0, 1 /* t0 = t0 * 2 (r5g6b5) */ addu t0, a1, t0 lhu t0, 0(t0) /* t0 = source (r5g6b5) */ addu v0, v0, v1 /* v0 = vx + unit_x */ sra t1, v0, 16 /* t1 = vx >> 16 */ sll t1, t1, 1 /* t1 = t1 * 2 (r5g6b5) */ addu t1, a1, t1 lhu t1, 0(t1) /* t1 = source (r5g6b5) */ addu v0, v0, v1 /* v0 = vx + unit_x */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lhu t8, 0(a0) /* t8 = destination (r5g6b5) */ lhu t9, 2(a0) /* t9 = destination (r5g6b5) */ addiu a2, a2, 2 CONVERT_2x0565_TO_2x8888 t0, t1, s0, s1, t5, t6, s2, s3, s4, s5 CONVERT_2x0565_TO_2x8888 t8, t9, s2, s3, t5, t6, s4, s5, t0, t1 OVER_2x8888_2x8_2x8888 s0, s1, \ t2, t3, \ s2, s3, \ t0, t1, \ t7, t8, t9, s4, s5, s0, s1 CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t4, t5, t6, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop sra t0, v0, 16 /* t0 = vx >> 16 */ sll t0, t0, 1 /* t0 = t0 * 2 (r5g6b5) */ addu t0, a1, t0 lhu t0, 0(t0) /* t0 = source (r5g6b5) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t0, t3, t4, t5 CONVERT_1x0565_TO_1x8888 t2, t4, t5, t6 OVER_8888_8_8888 t3, t1, t4, t0, t7, t2, t5, t6, t8 CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5 4: j ra nop END(pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 lw s0, 36(sp) /* s0 = wt */ lw s1, 40(sp) /* s1 = wb */ lw s2, 44(sp) /* s2 = vx */ lw s3, 48(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a1) /* t0 = tl */ lwx t1, t8(a1) /* t1 = tr */ addiu a3, a3, -1 lwx t2, t9(a2) /* t2 = bl */ lwx t3, t8(a2) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez a3, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 lw s0, 36(sp) /* s0 = wt */ lw s1, 40(sp) /* s1 = wb */ lw s2, 44(sp) /* s2 = vx */ lw s3, 48(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a1) /* t0 = tl */ lwx t1, t8(a1) /* t1 = tr */ addiu a3, a3, -1 lwx t2, t9(a2) /* t2 = bl */ lwx t3, t8(a2) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3 addu s2, s2, s3 /* vx += unit_x; */ sh t1, 0(a0) bnez a3, 0b addiu a0, a0, 2 RESTORE_REGS_FROM_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_8888_SRC_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li v1, 0x07e007e0 li s8, 0x001f001f sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 1 addiu t8, t9, 2 lhx t0, t9(a1) /* t0 = tl */ lhx t1, t8(a1) /* t1 = tr */ andi t1, t1, 0xffff addiu a3, a3, -1 lhx t2, t9(a2) /* t2 = bl */ lhx t3, t8(a2) /* t3 = br */ andi t3, t3, 0xffff CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7 BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez a3, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_0565_8888_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li v1, 0x07e007e0 li s8, 0x001f001f sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 1 addiu t8, t9, 2 lhx t0, t9(a1) /* t0 = tl */ lhx t1, t8(a1) /* t1 = tr */ andi t1, t1, 0xffff addiu a3, a3, -1 lhx t2, t9(a2) /* t2 = bl */ lhx t3, t8(a2) /* t3 = br */ andi t3, t3, 0xffff CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7 BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3 addu s2, s2, s3 /* vx += unit_x; */ sh t1, 0(a0) bnez a3, 0b addiu a0, a0, 2 RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 40(sp) /* s0 = wt */ lw s1, 44(sp) /* s1 = wb */ lw s2, 48(sp) /* s2 = vx */ lw s3, 52(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li s8, 0x00ff00ff sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a1) /* t0 = tl */ lwx t1, t8(a1) /* t1 = tr */ addiu a3, a3, -1 lwx t2, t9(a2) /* t2 = bl */ lwx t3, t8(a2) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lw t1, 0(a0) /* t1 = dest */ OVER_8888_8888 t0, t1, t2, s8, t3, t4, t5, t6 addu s2, s2, s3 /* vx += unit_x; */ sw t2, 0(a0) bnez a3, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 lw s0, 36(sp) /* s0 = wt */ lw s1, 40(sp) /* s1 = wb */ lw s2, 44(sp) /* s2 = vx */ lw s3, 48(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a1) /* t0 = tl */ lwx t1, t8(a1) /* t1 = tr */ addiu a3, a3, -1 lwx t2, t9(a2) /* t2 = bl */ lwx t3, t8(a2) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lw t1, 0(a0) addu_s.qb t2, t0, t1 addu s2, s2, s3 /* vx += unit_x; */ sw t2, 0(a0) bnez a3, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_mips) /* * a0 - *dst * a1 - *mask * a2 - *src_top * a3 - *src_bottom * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ lw v1, 32(sp) beqz v1, 1f nop SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li s8, 0x00ff00ff sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a2) /* t0 = tl */ lwx t1, t8(a2) /* t1 = tr */ addiu v1, v1, -1 lwx t2, t9(a3) /* t2 = bl */ lwx t3, t8(a3) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ addiu a1, a1, 1 MIPS_UN8x4_MUL_UN8 t0, t1, t0, s8, t2, t3, t4 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez v1, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_mips) /* * a0 - *dst * a1 - *mask * a2 - *src_top * a3 - *src_bottom * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ lw v1, 32(sp) beqz v1, 1f nop SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li s8, 0x00ff00ff sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a2) /* t0 = tl */ lwx t1, t8(a2) /* t1 = tr */ addiu v1, v1, -1 lwx t2, t9(a3) /* t2 = bl */ lwx t3, t8(a3) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ addiu a1, a1, 1 MIPS_UN8x4_MUL_UN8 t0, t1, t0, s8, t2, t3, t4 CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3 addu s2, s2, s3 /* vx += unit_x; */ sh t1, 0(a0) bnez v1, 0b addiu a0, a0, 2 RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_mips) /* * a0 - *dst * a1 - *mask * a2 - *src_top * a3 - *src_bottom * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ lw t0, 32(sp) beqz t0, 1f nop SAVE_REGS_ON_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra lw s0, 48(sp) /* s0 = wt */ lw s1, 52(sp) /* s1 = wb */ lw s2, 56(sp) /* s2 = vx */ lw s3, 60(sp) /* s3 = unit_x */ lw ra, 64(sp) /* ra = w */ li v0, 0x00ff00ff li v1, 0x07e007e0 li s8, 0x001f001f sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ li t5, BILINEAR_INTERPOLATION_RANGE subu t5, t5, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 1 addiu t8, t9, 2 lhx t0, t9(a2) /* t0 = tl */ lhx t1, t8(a2) /* t1 = tr */ andi t1, t1, 0xffff addiu ra, ra, -1 lhx t2, t9(a3) /* t2 = bl */ lhx t3, t8(a3) /* t3 = br */ andi t3, t3, 0xffff CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7 BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ addiu a1, a1, 1 MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t2, t3, t4 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez ra, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra 1: j ra nop END(pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_mips) /* * a0 - *dst * a1 - *mask * a2 - *src_top * a3 - *src_bottom * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ lw t0, 32(sp) beqz t0, 1f nop SAVE_REGS_ON_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra lw s0, 48(sp) /* s0 = wt */ lw s1, 52(sp) /* s1 = wb */ lw s2, 56(sp) /* s2 = vx */ lw s3, 60(sp) /* s3 = unit_x */ lw ra, 64(sp) /* ra = w */ li v0, 0x00ff00ff li v1, 0x07e007e0 li s8, 0x001f001f sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ li t5, BILINEAR_INTERPOLATION_RANGE subu t5, t5, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 1 addiu t8, t9, 2 lhx t0, t9(a2) /* t0 = tl */ lhx t1, t8(a2) /* t1 = tr */ andi t1, t1, 0xffff addiu ra, ra, -1 lhx t2, t9(a3) /* t2 = bl */ lhx t3, t8(a3) /* t3 = br */ andi t3, t3, 0xffff CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7 BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ addiu a1, a1, 1 MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t2, t3, t4 CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3 addu s2, s2, s3 /* vx += unit_x; */ sh t1, 0(a0) bnez ra, 0b addiu a0, a0, 2 RESTORE_REGS_FROM_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra 1: j ra nop END(pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - mask (a8) * a2 - src_top (a8r8g8b8) * a3 - src_bottom (a8r8g8b8) * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw v1, 60(sp) /* v1 = w(sp + 32 + 28 save regs stack offset)*/ beqz v1, 1f nop lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li s8, 0x00ff00ff sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a2) /* t0 = tl */ lwx t1, t8(a2) /* t1 = tr */ addiu v1, v1, -1 lwx t2, t9(a3) /* t2 = bl */ lwx t3, t8(a3) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, \ t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ lw t2, 0(a0) /* t2 = dst */ addiu a1, a1, 1 OVER_8888_8_8888 t0, t1, t2, t0, s8, t3, t4, t5, t6 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez v1, 0b addiu a0, a0, 4 1: RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 j ra nop END(pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_mips) /* * a0 - *dst * a1 - *mask * a2 - *src_top * a3 - *src_bottom * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ lw v1, 32(sp) beqz v1, 1f nop SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li s8, 0x00ff00ff sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a2) /* t0 = tl */ lwx t1, t8(a2) /* t1 = tr */ addiu v1, v1, -1 lwx t2, t9(a3) /* t2 = bl */ lwx t3, t8(a3) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ lw t2, 0(a0) /* t2 = dst */ addiu a1, a1, 1 MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, t1, t2, t0, s8, t3, t4, t5 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez v1, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_mips)
Erio-Harrison/rust-os
1,769
src/arch/riscv/kernelvec.S
.section .text # interrupts and exceptions while in supervisor # mode come here. .globl kernelvec .align 4 kernelvec: # make room to save registers. addi sp, sp, -256 # save the registers. sd ra, 0(sp) sd gp, 16(sp) sd tp, 24(sp) sd t0, 32(sp) sd t1, 40(sp) sd t2, 48(sp) sd s0, 56(sp) sd s1, 64(sp) sd a0, 72(sp) sd a1, 80(sp) sd a2, 88(sp) sd a3, 96(sp) sd a4, 104(sp) sd a5, 112(sp) sd a6, 120(sp) sd a7, 128(sp) sd s2, 136(sp) sd s3, 144(sp) sd s4, 152(sp) sd s5, 160(sp) sd s6, 168(sp) sd s7, 176(sp) sd s8, 184(sp) sd s9, 192(sp) sd s10, 200(sp) sd s11, 208(sp) sd t3, 216(sp) sd t4, 224(sp) sd t5, 232(sp) sd t6, 240(sp) # call the C trap handler in trap.c call kerneltrap # restore registers. ld ra, 0(sp) ld gp, 16(sp) ld tp, 24(sp) ld t0, 32(sp) ld t1, 40(sp) ld t2, 48(sp) ld s0, 56(sp) ld s1, 64(sp) ld a0, 72(sp) ld a1, 80(sp) ld a2, 88(sp) ld a3, 96(sp) ld a4, 104(sp) ld a5, 112(sp) ld a6, 120(sp) ld a7, 128(sp) ld s2, 136(sp) ld s3, 144(sp) ld s4, 152(sp) ld s5, 160(sp) ld s6, 168(sp) ld s7, 176(sp) ld s8, 184(sp) ld s9, 192(sp) ld s10, 200(sp) ld s11, 208(sp) ld t3, 216(sp) ld t4, 224(sp) ld t5, 232(sp) ld t6, 240(sp) addi sp, sp, 256 # return to whatever we were doing in the kernel. sret
Erio-Harrison/rust-os
3,036
src/arch/riscv/trampoline.S
.section trampsec .globl trampoline .globl usertrap trampoline: .align 4 .globl uservec uservec: # save user a0 in sscratch so # a0 can be used to get at TRAPFRAME. csrw sscratch, a0 # each process has a separate p->trapframe memory area, # but it's mapped to the same virtual address # (TRAPFRAME) in every process's user page table. li a0, TRAPFRAME # save the user registers in TRAPFRAME sd ra, 40(a0) sd sp, 48(a0) sd gp, 56(a0) sd tp, 64(a0) sd t0, 72(a0) sd t1, 80(a0) sd t2, 88(a0) sd s0, 96(a0) sd s1, 104(a0) sd a1, 120(a0) sd a2, 128(a0) sd a3, 136(a0) sd a4, 144(a0) sd a5, 152(a0) sd a6, 160(a0) sd a7, 168(a0) sd s2, 176(a0) sd s3, 184(a0) sd s4, 192(a0) sd s5, 200(a0) sd s6, 208(a0) sd s7, 216(a0) sd s8, 224(a0) sd s9, 232(a0) sd s10, 240(a0) sd s11, 248(a0) sd t3, 256(a0) sd t4, 264(a0) sd t5, 272(a0) sd t6, 280(a0) # save the user a0 in p->trapframe->a0 csrr t0, sscratch sd t0, 112(a0) # initialize kernel stack pointer, from p->trapframe->kernel_sp ld sp, 8(a0) # make tp hold the current hartid, from p->trapframe->kernel_hartid ld tp, 32(a0) # load the address of usertrap(), from p->trapframe->kernel_trap ld t0, 16(a0) # fetch the kernel page table address, from p->trapframe->kernel_satp. ld t1, 0(a0) # wait for any previous memory operations to complete sfence.vma zero, zero # install the kernel page table. csrw satp, t1 # flush now-stale user entries from the TLB. sfence.vma zero, zero # jump to usertrap(), which does not return jr t0 .globl userret userret: # userret(pagetable) # a0: user page table, for satp. # switch to the user page table. sfence.vma zero, zero csrw satp, a0 sfence.vma zero, zero li a0, TRAPFRAME # restore all but a0 from TRAPFRAME ld ra, 40(a0) ld sp, 48(a0) ld gp, 56(a0) ld tp, 64(a0) ld t0, 72(a0) ld t1, 80(a0) ld t2, 88(a0) ld s0, 96(a0) ld s1, 104(a0) ld a1, 120(a0) ld a2, 128(a0) ld a3, 136(a0) ld a4, 144(a0) ld a5, 152(a0) ld a6, 160(a0) ld a7, 168(a0) ld s2, 176(a0) ld s3, 184(a0) ld s4, 192(a0) ld s5, 200(a0) ld s6, 208(a0) ld s7, 216(a0) ld s8, 224(a0) ld s9, 232(a0) ld s10, 240(a0) ld s11, 248(a0) ld t3, 256(a0) ld t4, 264(a0) ld t5, 272(a0) ld t6, 280(a0) # restore user a0 ld a0, 112(a0) # return to user mode and user pc. sret
esyywar/rust_books
9,889
chapter_1/hello_world/main.s
.file "main.4f53ca21a4bdf013-cgu.0" .section .text._ZN3std2rt10lang_start17hb927c81c6b10103cE,"ax",@progbits .hidden _ZN3std2rt10lang_start17hb927c81c6b10103cE .globl _ZN3std2rt10lang_start17hb927c81c6b10103cE .p2align 4 .type _ZN3std2rt10lang_start17hb927c81c6b10103cE,@function _ZN3std2rt10lang_start17hb927c81c6b10103cE: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 movl %ecx, %eax movq %rdx, %rcx movq %rsi, %rdx movq %rdi, (%rsp) movq %rsp, %rdi leaq .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.0(%rip), %rsi movzbl %al, %r8d callq *_ZN3std2rt19lang_start_internal17ha8ef919ae4984948E@GOTPCREL(%rip) popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _ZN3std2rt10lang_start17hb927c81c6b10103cE, .Lfunc_end0-_ZN3std2rt10lang_start17hb927c81c6b10103cE .cfi_endproc .section ".text._ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hea4213339228e556E","ax",@progbits .p2align 4 .type _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hea4213339228e556E,@function _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hea4213339228e556E: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 movq (%rdi), %rdi callq _ZN3std3sys9backtrace28__rust_begin_short_backtrace17h6cb232b6391f0ff5E callq _ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17hd3b5c0ed204c73c9E movzbl %al, %eax popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hea4213339228e556E, .Lfunc_end1-_ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hea4213339228e556E .cfi_endproc .section .text._ZN3std3sys9backtrace28__rust_begin_short_backtrace17h6cb232b6391f0ff5E,"ax",@progbits .p2align 4 .type _ZN3std3sys9backtrace28__rust_begin_short_backtrace17h6cb232b6391f0ff5E,@function _ZN3std3sys9backtrace28__rust_begin_short_backtrace17h6cb232b6391f0ff5E: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 callq _ZN4core3ops8function6FnOnce9call_once17h09a44c220a263972E #APP #NO_APP popq %rax .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _ZN3std3sys9backtrace28__rust_begin_short_backtrace17h6cb232b6391f0ff5E, .Lfunc_end2-_ZN3std3sys9backtrace28__rust_begin_short_backtrace17h6cb232b6391f0ff5E .cfi_endproc .section ".text._ZN4core3fmt2rt38_$LT$impl$u20$core..fmt..Arguments$GT$9new_const17h550f1769c56aa16cE","ax",@progbits .p2align 4 .type _ZN4core3fmt2rt38_$LT$impl$u20$core..fmt..Arguments$GT$9new_const17h550f1769c56aa16cE,@function _ZN4core3fmt2rt38_$LT$impl$u20$core..fmt..Arguments$GT$9new_const17h550f1769c56aa16cE: .cfi_startproc movq %rdi, %rax movq %rsi, (%rdi) movq $1, 8(%rdi) movq .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.1(%rip), %rdx movq .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.1+8(%rip), %rcx movq %rdx, 32(%rdi) movq %rcx, 40(%rdi) movl $8, %ecx movq %rcx, 16(%rdi) movq $0, 24(%rdi) retq .Lfunc_end3: .size _ZN4core3fmt2rt38_$LT$impl$u20$core..fmt..Arguments$GT$9new_const17h550f1769c56aa16cE, .Lfunc_end3-_ZN4core3fmt2rt38_$LT$impl$u20$core..fmt..Arguments$GT$9new_const17h550f1769c56aa16cE .cfi_endproc .section ".text._ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h1e564a62685d0748E","ax",@progbits .p2align 4 .type _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h1e564a62685d0748E,@function _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h1e564a62685d0748E: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 movq (%rdi), %rdi callq _ZN4core3ops8function6FnOnce9call_once17ha36a47d17391cb97E popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h1e564a62685d0748E, .Lfunc_end4-_ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h1e564a62685d0748E .cfi_endproc .section .text._ZN4core3ops8function6FnOnce9call_once17h09a44c220a263972E,"ax",@progbits .p2align 4 .type _ZN4core3ops8function6FnOnce9call_once17h09a44c220a263972E,@function _ZN4core3ops8function6FnOnce9call_once17h09a44c220a263972E: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 callq *%rdi popq %rax .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size _ZN4core3ops8function6FnOnce9call_once17h09a44c220a263972E, .Lfunc_end5-_ZN4core3ops8function6FnOnce9call_once17h09a44c220a263972E .cfi_endproc .section .text._ZN4core3ops8function6FnOnce9call_once17ha36a47d17391cb97E,"ax",@progbits .p2align 4 .type _ZN4core3ops8function6FnOnce9call_once17ha36a47d17391cb97E,@function _ZN4core3ops8function6FnOnce9call_once17ha36a47d17391cb97E: .Lfunc_begin0: .cfi_startproc .cfi_personality 155, DW.ref.rust_eh_personality .cfi_lsda 27, .Lexception0 subq $40, %rsp .cfi_def_cfa_offset 48 movq %rdi, 8(%rsp) .Ltmp0: leaq 8(%rsp), %rdi callq _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hea4213339228e556E .Ltmp1: movl %eax, 4(%rsp) jmp .LBB6_3 .LBB6_1: movq 24(%rsp), %rdi callq _Unwind_Resume@PLT .LBB6_2: .Ltmp2: movq %rax, %rcx movl %edx, %eax movq %rcx, 24(%rsp) movl %eax, 32(%rsp) jmp .LBB6_1 .LBB6_3: movl 4(%rsp), %eax addq $40, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end6: .size _ZN4core3ops8function6FnOnce9call_once17ha36a47d17391cb97E, .Lfunc_end6-_ZN4core3ops8function6FnOnce9call_once17ha36a47d17391cb97E .cfi_endproc .section .gcc_except_table._ZN4core3ops8function6FnOnce9call_once17ha36a47d17391cb97E,"a",@progbits .p2align 2, 0x0 GCC_except_table6: .Lexception0: .byte 255 .byte 255 .byte 1 .uleb128 .Lcst_end0-.Lcst_begin0 .Lcst_begin0: .uleb128 .Ltmp0-.Lfunc_begin0 .uleb128 .Ltmp1-.Ltmp0 .uleb128 .Ltmp2-.Lfunc_begin0 .byte 0 .uleb128 .Ltmp1-.Lfunc_begin0 .uleb128 .Lfunc_end6-.Ltmp1 .byte 0 .byte 0 .Lcst_end0: .p2align 2, 0x0 .section ".text._ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17ha5bc91ec65b56c72E","ax",@progbits .p2align 4 .type _ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17ha5bc91ec65b56c72E,@function _ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17ha5bc91ec65b56c72E: .cfi_startproc retq .Lfunc_end7: .size _ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17ha5bc91ec65b56c72E, .Lfunc_end7-_ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17ha5bc91ec65b56c72E .cfi_endproc .section ".text._ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17hd3b5c0ed204c73c9E","ax",@progbits .p2align 4 .type _ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17hd3b5c0ed204c73c9E,@function _ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17hd3b5c0ed204c73c9E: .cfi_startproc xorl %eax, %eax retq .Lfunc_end8: .size _ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17hd3b5c0ed204c73c9E, .Lfunc_end8-_ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17hd3b5c0ed204c73c9E .cfi_endproc .section .text._ZN4main4main17h8f1c9e3495794b54E,"ax",@progbits .p2align 4 .type _ZN4main4main17h8f1c9e3495794b54E,@function _ZN4main4main17h8f1c9e3495794b54E: .cfi_startproc subq $56, %rsp .cfi_def_cfa_offset 64 leaq 8(%rsp), %rdi leaq .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.3(%rip), %rsi callq _ZN4core3fmt2rt38_$LT$impl$u20$core..fmt..Arguments$GT$9new_const17h550f1769c56aa16cE leaq 8(%rsp), %rdi callq *_ZN3std2io5stdio6_print17h915f3273edec6464E@GOTPCREL(%rip) addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end9: .size _ZN4main4main17h8f1c9e3495794b54E, .Lfunc_end9-_ZN4main4main17h8f1c9e3495794b54E .cfi_endproc .section .text.main,"ax",@progbits .globl main .p2align 4 .type main,@function main: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 movq %rsi, %rdx movslq %edi, %rsi leaq _ZN4main4main17h8f1c9e3495794b54E(%rip), %rdi xorl %ecx, %ecx callq _ZN3std2rt10lang_start17hb927c81c6b10103cE popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end10: .size main, .Lfunc_end10-main .cfi_endproc .type .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.0,@object .section .data.rel.ro..Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.0,"aw",@progbits .p2align 3, 0x0 .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.0: .asciz "\000\000\000\000\000\000\000\000\b\000\000\000\000\000\000\000\b\000\000\000\000\000\000" .quad _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h1e564a62685d0748E .quad _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hea4213339228e556E .quad _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hea4213339228e556E .size .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.0, 48 .type .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.1,@object .section .rodata.cst16,"aM",@progbits,16 .p2align 3, 0x0 .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.1: .zero 8 .zero 8 .size .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.1, 16 .type .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.2,@object .section .rodata..Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.2,"a",@progbits .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.2: .ascii "Hello, world!\n" .size .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.2, 14 .type .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.3,@object .section .data.rel.ro..Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.3,"aw",@progbits .p2align 3, 0x0 .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.3: .quad .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.2 .asciz "\016\000\000\000\000\000\000" .size .Lanon.25f61a6dee3dd1753cfa72cbcf4c5df7.3, 16 .hidden DW.ref.rust_eh_personality .weak DW.ref.rust_eh_personality .section .data.DW.ref.rust_eh_personality,"awG",@progbits,DW.ref.rust_eh_personality,comdat .p2align 3, 0x0 .type DW.ref.rust_eh_personality,@object .size DW.ref.rust_eh_personality, 8 DW.ref.rust_eh_personality: .quad rust_eh_personality .ident "rustc version 1.88.0 (6b00bc388 2025-06-23)" .section ".note.GNU-stack","",@progbits
farlepet/rlboot
3,354
stage1/stage1.s
.code16 /* Source for boot sector. Must fit within 512 bytes. */ stack_top = 0x1000 /* Top of temporary stack (~2 KiB) */ IS_FLOPPY = 1 /* Need to reserve some bytes for the FAT12 data */ boot_sector_start: jmp start nop /* FAT header, bytes 3-29 */ fat.name: .ascii "lboot " /* 0x03 */ /* BIOS parameter block */ fat.bytes_per_sector: .skip 2 /* 0x0b */ fat.sectors_per_cluster: .skip 1 /* 0x0d */ fat.reserved_sectors: .skip 2 /* 0x0e */ fat.fat_copies: .skip 1 /* 0x10 */ fat.root_dir_entries: .skip 2 /* 0x11 */ fat.total_sectors: .skip 2 /* 0x13 */ fat.media_desc_type: .skip 1 /* 0x15 */ fat.sectors_per_fat: .skip 2 /* 0x16 */ fat.sectors_per_track: .skip 2 /* 0x18 */ fat.head_count: .skip 2 /* 0x1a */ fat.hidden_sectors: .skip 2 /* 0x1c */ /* @note The above only accounts for minimal FAT12. FAT16/32 take up more space here. */ .skip (90 - (. - boot_sector_start)) .global start start: cli /* Setup segment registers */ xorw %ax, %ax movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs /* Relocate to 0x1000 */ movw $0x7c00, %si movw $0x1000, %di movw $512, %cx 1: lodsb stosb dec %cx jnz 1b /* Jump to the relocated code, and set CS to 0 */ ljmp $0, $1f 1: /* Setup stack */ movw $stack_top, %sp movw %sp, %bp sti /* Save boot drive */ .if !IS_FLOPPY cmp $0, %dl jne .store_dl movb $0x80, %dl /* BIOS may have passed us a bogus drive */ .endif .store_dl: movb %dl, boot_drive /* Set video mode and clear screen */ movw $0x0003, %ax /* 80x25 */ int $0x10 /* Print boot message */ movw $boot_message, %si call msg_print /* Save address to jump to before it gets modified. */ movw (bootldr.stage2_addr), %bx movw %bx, (stage2_addr_bkp) .reset_drive: movb $0x00, %ah /* Reset drive */ movb boot_drive, %dl /* Boot drive */ int $0x13 /* Read stage 2 loader into memory */ call read_sector_map /* Jmp into stage2 */ movw (stage2_addr_bkp), %ax jmp *%ax .include "inc/read_sector.inc" .include "inc/read_sector_map.inc" disk_error: movw $disk_err_message, %si call msg_print jmp . /* Print message. * * Parameters: * %si: Pointer to string to print. */ msg_print: pusha movb $0x0E, %ah /* Write character */ xorw %bx, %bx /* Page, no color for text mode */ .loop: lodsb /* Load character and increment %si */ cmpb $0, %al /* Exit if character is null */ je .end int $0x10 /* Print character */ jmp .loop .end: popa ret boot_drive: .skip 1 /* Drive the BIOS tells us we booted from */ stage2_addr_bkp: .skip 2 boot_message: .asciz "Stage1.\r\n" disk_err_message: .asciz "Disk read error." sector_read_success_message: .asciz "." /* Place the following at the end of the boot sector */ .skip (506 - (. - boot_sector_start)) /* Bootloader-specific header data. Data here will eventually be populated by the build tool. */ bootldr.stage2_map_sector: .word 0x0000 /* Sector containing stage 2 sector map, 0 indexed */ bootldr.stage2_addr: .word 0x1200 /* Address to which to load stage 2 loader */ /* Boot sector magic number */ .word 0xAA55
farlepet/rlboot
1,328
src/intr/int_wrappers.s
.extern interrupt_wrapper .macro isr_wrapper num err .global isr_wrapper_\num .type isr_wrapper_\num, @function isr_wrapper_\num: .if !\err push $0 # Errcode .endif pusha push $\num call interrupt_wrapper addl $4, %esp # Interrupt number popa .if !\err addl $4, %esp # Errcode .endif iret .size isr_wrapper_\num, (. - isr_wrapper_\num) .endm # Exceptions isr_wrapper 0, 0 isr_wrapper 1, 0 isr_wrapper 2, 0 isr_wrapper 3, 0 isr_wrapper 4, 0 isr_wrapper 5, 0 isr_wrapper 6, 0 isr_wrapper 7, 0 isr_wrapper 8, 1 isr_wrapper 9, 0 isr_wrapper 10, 1 isr_wrapper 11, 1 isr_wrapper 12, 1 isr_wrapper 13, 1 isr_wrapper 14, 1 isr_wrapper 15, 0 isr_wrapper 16, 0 isr_wrapper 17, 1 isr_wrapper 18, 0 isr_wrapper 19, 0 isr_wrapper 20, 0 isr_wrapper 21, 0 isr_wrapper 22, 0 isr_wrapper 23, 0 isr_wrapper 24, 0 isr_wrapper 25, 0 isr_wrapper 26, 0 isr_wrapper 27, 0 isr_wrapper 28, 0 isr_wrapper 29, 0 isr_wrapper 30, 1 isr_wrapper 31, 0 # IRQs isr_wrapper 32, 0 isr_wrapper 33, 0 isr_wrapper 34, 0 isr_wrapper 35, 0 isr_wrapper 36, 0 isr_wrapper 37, 0 isr_wrapper 38, 0 isr_wrapper 39, 0 isr_wrapper 40, 0 isr_wrapper 41, 0 isr_wrapper 42, 0 isr_wrapper 43, 0 isr_wrapper 44, 0 isr_wrapper 45, 0 isr_wrapper 46, 0 isr_wrapper 47, 0
farlepet/rlboot
3,874
src/startup/startup.s
.code16 /* Entrypoint for stage 2 */ .section .entrypoint .extern ruststart .global start .type start, @function start: /* Print boot message */ movw $boot_message, %si call msg_print /* Switching to protected mode, following recommendations laid out in * Intel's Software Developer Manual, Vol. 3A, section 9.9.1 - Switching to * Protected Mode. */ /* 1. Disable interrupts, and NMI */ cli /* Disable NMI */ inb $0x70, %al orb $0x80, %al outb %al, $0x80 inb $0x71, %al /* 2. Setup GDT */ lgdt (gdtr) /* 3. Enable PE in CR0 */ movl %cr0, %eax orl $1, %eax movl %eax, %cr0 /* 4. Far jump */ ljmpl $0x08, $1f .code32 1: /* 5-7. N/A */ /* 8. Set up task state segment */ movw $0x28, %ax ltr %ax /* 9. Reload segment registers DS, SS, ES, FS, GS */ movw $0x10, %ax movw %ax, %ds movw %ax, %ss movw %ax, %es movw %ax, %fs movw %ax, %gs /* 10. Setup IDT */ /* 11. Re-enable interrupts and NMI */ // These are handled by the Rust code /* Enable NMI */ /*inb $0x70, %al andb $0x7F, %al outb %al, $0x80 inb $0x71, %al*/ /* Enable A20 line @todo Attempt multiple methods, perhaps in C */ inb $0x92, %al testb $0x02, %al jnz 1f orb $0x02, %al outb %al, $0x92 1: /* Clear any junk in the high side of edx (drive number) */ andl $0xff, %edx /* Call into C code */ pushl %edx call ruststart jmp . .size start, (. - start) /* Print message. * * Parameters: * %si: Pointer to string to print. */ .type msg_print, @function msg_print: pusha movb $0x0E, %ah /* Write character */ xorw %bx, %bx /* Page, no color for text mode */ .loop: lodsb /* Load character and increment %si */ cmpb $0, %al /* Exit if character is null */ je .end int $0x10 /* Print character */ jmp .loop .end: popa ret .size msg_print, (. - msg_print) boot_message: .asciz "\r\nStage2.\r\n" gdtr: .word ((gdt_end - gdt) - 1) /* Limit */ .long gdt /* Base */ .align 8 /* @note Flat memory model with no protection */ gdt: /* 0x00: Null descriptor */ .quad 0x00000000 /* 0x08: 32-bit Code segment */ .long 0x0000FFFF .long 0x00CF9A00 /* 0x10: 32-bit Data segment */ .long 0x0000FFFF .long 0x00CF9200 /* 0x18: 16-bit Code segment */ .long 0x0000FFFF .long 0x000F9A00 /* 0x20: 16-bit Data segment */ .long 0x0000FFFF .long 0x000F9200 /* 0x28: Task segment */ .word ((tss_end - tss) - 1) .word tss /* @note Since we are in the lower 16-bits of memory, we can just use the address here */ .long 0x00408900 gdt_end: /* @note Within the bootloader, we will never be in any ring other than 0 */ tss: .long 0x00000000 /* Prev task link */ /* @note Since we are never leaving ring 0, I don't _think_ we need to set ESP0 */ .long 0x00000000 /* ESP0 */ .long 0x00000010 /* SS0 */ .long 0x00000000 /* ESP1 */ .long 0x00000010 /* SS1 */ .long 0x00000000 /* ESP2 */ .long 0x00000010 /* SS2 */ .long 0x00000000 /* CR3 */ .long 0x00000000 /* EIP */ .long 0x00000000 /* EFLAGS */ .long 0x00000000 /* EAX */ .long 0x00000000 /* ECX */ .long 0x00000000 /* EDX */ .long 0x00000000 /* EBX */ .long 0x00000000 /* ESP */ .long 0x00000000 /* EBP */ .long 0x00000000 /* ESI */ .long 0x00000000 /* EDI */ .long 0x00000010 /* ES */ .long 0x0000000C /* CS */ .long 0x00000010 /* SS */ .long 0x00000010 /* DS */ .long 0x00000010 /* FS */ .long 0x00000010 /* GS */ .long 0x00000000 /* LDT selector */ .word 0x0000 /* Debug trap enable */ .word ((tss_end - tss) - 1) /* IO Map Base Address */ tss_end:
farlepet/rlboot
3,367
src/bios/bios_asm.s
.code32 .extern bios_idt /* int bios_call(bios_call_t *) */ .global bios_call_asm .type bios_call_asm, @function bios_call_asm: pushal /* Save BIOS call parameter pointer */ movl 36(%esp), %eax movl %eax, (_bios_call_ptr) /* Save current IDTR and GDTR */ sidt (_saved_idtr) sgdt (_saved_gdtr) /* * Enter real mode */ /* 1. Disable interrupts */ /* Interrupt disable is done in bios_call() */ /* Disable NMI */ inb $0x70, %al orb $0x80, %al outb %al, $0x80 inb $0x71, %al /* 2. Disable paging (N/A) */ /* 3. Transfer control to segment with limit of 0xFFFF */ ljmp $0x18, $1f .code16 1: /* 4. Set data segments to one with limit of 0xFFFF */ movw $0x20, %ax movw %ax, %ds movw %ax, %ss movw %ax, %es movw %ax, %fs movw %ax, %gs /* 5. Load real mode IDT */ lidt (_realmode_idtr) /* 6. Clear PE flag in CR0 */ movl %cr0, %eax andl $0xFFFFFFFE, %eax movl %eax, %cr0 /* 7. Far jump into real-mode code */ ljmp $0x00, $1f 1: /* 8. Setup data segment registers */ xorw %ax, %ax movw %ax, %ds movw %ax, %ss movw %ax, %es movw %ax, %fs movw %ax, %gs /* 9. Re-enable interrupts */ sti /* Enable NMI */ inb $0x70, %al andb $0x7F, %al outb %al, $0x80 inb $0x71, %al /* * Call BIOS function */ movw (_bios_call_ptr), %di /* Load interrupt number */ movb (%di), %al movb %al, (_int_id) /* Load parameters */ movl 4(%di), %eax movl 8(%di), %ebx movl 12(%di), %ecx movl 16(%di), %edx movl 20(%di), %esi movl 24(%di), %edi /* @note Not loading EFLAGS, not sure any interrupts use any of those bits as input */ /* @note INT only accepts immediates, so we need to modify the code */ .byte 0xCD /* INT */ _int_id: .byte 0x00 /* imm8 */ /* Save results */ pushw %bp movw (_bios_call_ptr), %bp movl %eax, 4(%bp) movl %ebx, 8(%bp) movl %ecx, 12(%bp) movl %edx, 16(%bp) movl %esi, 20(%bp) movl %edi, 24(%bp) /* Save EFLAGS */ pushfl popl %eax movl %eax, 28(%bp) popw %bp /* * Enter protected mode */ /* 1. Disable interrupts */ cli /* 2. Setup GDT */ lgdt (_saved_gdtr) /* 3. Enable PE in CR0 */ movl %cr0, %eax orl $1, %eax movl %eax, %cr0 /* 4. Far jump */ ljmpl $0x08, $1f .code32 1: /* 5-7. N/A */ /* 8. Set up task state segment */ /* @todo This is currently causing a GPF - Not sure it's strictly required * though since we set it up earlier. */ /*movw $0x28, %ax ltr %ax*/ /* 9. Reload segment registers DS, SS, ES, FS, GS */ movw $0x10, %ax movw %ax, %ds movw %ax, %ss movw %ax, %es movw %ax, %fs movw %ax, %gs /* 10. Setup IDT */ lidt (_saved_idtr) /* 11. Re-enable interrupts */ /* Interrupt enable is done in bios_call() */ /* Enable NMI */ /*inb $0x70, %al andb $0x7F, %al outb %al, $0x80 inb $0x71, %al*/ popal ret .size bios_call_asm, (. - bios_call_asm) _bios_call_ptr: .skip 4 _saved_idtr: .skip 6 _saved_gdtr: .skip 6 _realmode_idtr: .word 0x03FF .long 0x00000000
fatiimajamiil/rustpad-custom
40,185
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/chacha-armv8-ios64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) .section __TEXT,__const .align 5 Lsigma: .quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral Lone: .long 1,0,0,0 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text .globl _ChaCha20_ctr32_nohw .private_extern _ChaCha20_ctr32_nohw .align 5 _ChaCha20_ctr32_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma@PAGE add x5,x5,Lsigma@PAGEOFF stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ldp x28,x30,[x4] // load counter #ifdef __AARCH64EB__ ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif Loop_outer: mov w5,w22 // unpack key block lsr x6,x22,#32 mov w7,w23 lsr x8,x23,#32 mov w9,w24 lsr x10,x24,#32 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#10 subs x2,x2,#64 Loop: sub x4,x4,#1 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 ror w21,w21,#16 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#20 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 ror w21,w21,#24 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#25 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#16 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 ror w9,w9,#20 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#24 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 ror w9,w9,#25 cbnz x4,Loop add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 b.lo Ltail add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.hi Loop_outer ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .align 4 Ltail: add x2,x2,#64 Less_than_64: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif stp x5,x7,[sp,#0] stp x9,x11,[sp,#16] stp x13,x15,[sp,#32] stp x17,x20,[sp,#48] Loop_tail: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,Loop_tail stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl _ChaCha20_ctr32_neon .private_extern _ChaCha20_ctr32_neon .align 5 _ChaCha20_ctr32_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma@PAGE add x5,x5,Lsigma@PAGEOFF stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] cmp x2,#512 b.hs L512_or_more_neon sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 Loop_outer_neon: mov w5,w22 // unpack key block lsr x6,x22,#32 mov v0.16b,v24.16b mov w7,w23 lsr x8,x23,#32 mov v4.16b,v24.16b mov w9,w24 lsr x10,x24,#32 mov v16.16b,v24.16b mov w11,w25 mov v1.16b,v25.16b lsr x12,x25,#32 mov v5.16b,v25.16b mov w13,w26 mov v17.16b,v25.16b lsr x14,x26,#32 mov v3.16b,v27.16b mov w15,w27 mov v7.16b,v28.16b lsr x16,x27,#32 mov v19.16b,v29.16b mov w17,w28 mov v2.16b,v26.16b lsr x19,x28,#32 mov v6.16b,v26.16b mov w20,w30 mov v18.16b,v26.16b lsr x21,x30,#32 mov x4,#10 subs x2,x2,#256 Loop_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v16.4s,v16.4s,v17.4s add w7,w7,w11 eor v3.16b,v3.16b,v0.16b add w8,w8,w12 eor v7.16b,v7.16b,v4.16b eor w17,w17,w5 eor v19.16b,v19.16b,v16.16b eor w19,w19,w6 rev32 v3.8h,v3.8h eor w20,w20,w7 rev32 v7.8h,v7.8h eor w21,w21,w8 rev32 v19.8h,v19.8h ror w17,w17,#16 add v2.4s,v2.4s,v3.4s ror w19,w19,#16 add v6.4s,v6.4s,v7.4s ror w20,w20,#16 add v18.4s,v18.4s,v19.4s ror w21,w21,#16 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#20 add w16,w16,w21 ushr v5.4s,v21.4s,#20 eor w9,w9,w13 ushr v17.4s,v22.4s,#20 eor w10,w10,w14 sli v1.4s,v20.4s,#12 eor w11,w11,w15 sli v5.4s,v21.4s,#12 eor w12,w12,w16 sli v17.4s,v22.4s,#12 ror w9,w9,#20 add v0.4s,v0.4s,v1.4s ror w10,w10,#20 add v4.4s,v4.4s,v5.4s ror w11,w11,#20 add v16.4s,v16.4s,v17.4s ror w12,w12,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w9 eor v21.16b,v7.16b,v4.16b add w6,w6,w10 eor v22.16b,v19.16b,v16.16b add w7,w7,w11 ushr v3.4s,v20.4s,#24 add w8,w8,w12 ushr v7.4s,v21.4s,#24 eor w17,w17,w5 ushr v19.4s,v22.4s,#24 eor w19,w19,w6 sli v3.4s,v20.4s,#8 eor w20,w20,w7 sli v7.4s,v21.4s,#8 eor w21,w21,w8 sli v19.4s,v22.4s,#8 ror w17,w17,#24 add v2.4s,v2.4s,v3.4s ror w19,w19,#24 add v6.4s,v6.4s,v7.4s ror w20,w20,#24 add v18.4s,v18.4s,v19.4s ror w21,w21,#24 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#25 add w16,w16,w21 ushr v5.4s,v21.4s,#25 eor w9,w9,w13 ushr v17.4s,v22.4s,#25 eor w10,w10,w14 sli v1.4s,v20.4s,#7 eor w11,w11,w15 sli v5.4s,v21.4s,#7 eor w12,w12,w16 sli v17.4s,v22.4s,#7 ror w9,w9,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w10,w10,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w10 add v4.4s,v4.4s,v5.4s add w6,w6,w11 add v16.4s,v16.4s,v17.4s add w7,w7,w12 eor v3.16b,v3.16b,v0.16b add w8,w8,w9 eor v7.16b,v7.16b,v4.16b eor w21,w21,w5 eor v19.16b,v19.16b,v16.16b eor w17,w17,w6 rev32 v3.8h,v3.8h eor w19,w19,w7 rev32 v7.8h,v7.8h eor w20,w20,w8 rev32 v19.8h,v19.8h ror w21,w21,#16 add v2.4s,v2.4s,v3.4s ror w17,w17,#16 add v6.4s,v6.4s,v7.4s ror w19,w19,#16 add v18.4s,v18.4s,v19.4s ror w20,w20,#16 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#20 add w14,w14,w20 ushr v5.4s,v21.4s,#20 eor w10,w10,w15 ushr v17.4s,v22.4s,#20 eor w11,w11,w16 sli v1.4s,v20.4s,#12 eor w12,w12,w13 sli v5.4s,v21.4s,#12 eor w9,w9,w14 sli v17.4s,v22.4s,#12 ror w10,w10,#20 add v0.4s,v0.4s,v1.4s ror w11,w11,#20 add v4.4s,v4.4s,v5.4s ror w12,w12,#20 add v16.4s,v16.4s,v17.4s ror w9,w9,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w10 eor v21.16b,v7.16b,v4.16b add w6,w6,w11 eor v22.16b,v19.16b,v16.16b add w7,w7,w12 ushr v3.4s,v20.4s,#24 add w8,w8,w9 ushr v7.4s,v21.4s,#24 eor w21,w21,w5 ushr v19.4s,v22.4s,#24 eor w17,w17,w6 sli v3.4s,v20.4s,#8 eor w19,w19,w7 sli v7.4s,v21.4s,#8 eor w20,w20,w8 sli v19.4s,v22.4s,#8 ror w21,w21,#24 add v2.4s,v2.4s,v3.4s ror w17,w17,#24 add v6.4s,v6.4s,v7.4s ror w19,w19,#24 add v18.4s,v18.4s,v19.4s ror w20,w20,#24 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#25 add w14,w14,w20 ushr v5.4s,v21.4s,#25 eor w10,w10,w15 ushr v17.4s,v22.4s,#25 eor w11,w11,w16 sli v1.4s,v20.4s,#7 eor w12,w12,w13 sli v5.4s,v21.4s,#7 eor w9,w9,w14 sli v17.4s,v22.4s,#7 ror w10,w10,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w11,w11,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w12,w12,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 cbnz x4,Loop_neon add w5,w5,w22 // accumulate key block add v0.4s,v0.4s,v24.4s add x6,x6,x22,lsr#32 add v4.4s,v4.4s,v24.4s add w7,w7,w23 add v16.4s,v16.4s,v24.4s add x8,x8,x23,lsr#32 add v2.4s,v2.4s,v26.4s add w9,w9,w24 add v6.4s,v6.4s,v26.4s add x10,x10,x24,lsr#32 add v18.4s,v18.4s,v26.4s add w11,w11,w25 add v3.4s,v3.4s,v27.4s add x12,x12,x25,lsr#32 add w13,w13,w26 add v7.4s,v7.4s,v28.4s add x14,x14,x26,lsr#32 add w15,w15,w27 add v19.4s,v19.4s,v29.4s add x16,x16,x27,lsr#32 add w17,w17,w28 add v1.4s,v1.4s,v25.4s add x19,x19,x28,lsr#32 add w20,w20,w30 add v5.4s,v5.4s,v25.4s add x21,x21,x30,lsr#32 add v17.4s,v17.4s,v25.4s b.lo Ltail_neon add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v20.16b eor x15,x15,x16 eor v1.16b,v1.16b,v21.16b eor x17,x17,x19 eor v2.16b,v2.16b,v22.16b eor x20,x20,x21 eor v3.16b,v3.16b,v23.16b ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] add v27.4s,v27.4s,v31.4s // += 4 stp x13,x15,[x0,#32] add v28.4s,v28.4s,v31.4s stp x17,x20,[x0,#48] add v29.4s,v29.4s,v31.4s add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 eor v16.16b,v16.16b,v0.16b eor v17.16b,v17.16b,v1.16b eor v18.16b,v18.16b,v2.16b eor v19.16b,v19.16b,v3.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 b.hi Loop_outer_neon ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret Ltail_neon: add x2,x2,#256 cmp x2,#64 b.lo Less_than_64 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.eq Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo Less_than_128 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v0.16b,v0.16b,v20.16b eor v1.16b,v1.16b,v21.16b eor v2.16b,v2.16b,v22.16b eor v3.16b,v3.16b,v23.16b st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 b.eq Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo Less_than_192 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 b.eq Ldone_neon sub x2,x2,#64 st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp] b Last_neon Less_than_128: st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp] b Last_neon Less_than_192: st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp] b Last_neon .align 4 Last_neon: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 Loop_tail_neon: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,Loop_tail_neon stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] Ldone_neon: ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .align 5 ChaCha20_512_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma@PAGE add x5,x5,Lsigma@PAGEOFF stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] L512_or_more_neon: sub sp,sp,#128+64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 stp q24,q25,[sp,#0] // off-load key block, invariant part add v27.4s,v27.4s,v31.4s // not typo str q26,[sp,#32] add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s add v30.4s,v29.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 stp d8,d9,[sp,#128+0] // meet ABI requirements stp d10,d11,[sp,#128+16] stp d12,d13,[sp,#128+32] stp d14,d15,[sp,#128+48] sub x2,x2,#512 // not typo Loop_outer_512_neon: mov v0.16b,v24.16b mov v4.16b,v24.16b mov v8.16b,v24.16b mov v12.16b,v24.16b mov v16.16b,v24.16b mov v20.16b,v24.16b mov v1.16b,v25.16b mov w5,w22 // unpack key block mov v5.16b,v25.16b lsr x6,x22,#32 mov v9.16b,v25.16b mov w7,w23 mov v13.16b,v25.16b lsr x8,x23,#32 mov v17.16b,v25.16b mov w9,w24 mov v21.16b,v25.16b lsr x10,x24,#32 mov v3.16b,v27.16b mov w11,w25 mov v7.16b,v28.16b lsr x12,x25,#32 mov v11.16b,v29.16b mov w13,w26 mov v15.16b,v30.16b lsr x14,x26,#32 mov v2.16b,v26.16b mov w15,w27 mov v6.16b,v26.16b lsr x16,x27,#32 add v19.4s,v3.4s,v31.4s // +4 mov w17,w28 add v23.4s,v7.4s,v31.4s // +4 lsr x19,x28,#32 mov v10.16b,v26.16b mov w20,w30 mov v14.16b,v26.16b lsr x21,x30,#32 mov v18.16b,v26.16b stp q27,q28,[sp,#48] // off-load key block, variable part mov v22.16b,v26.16b str q29,[sp,#80] mov x4,#5 subs x2,x2,#512 Loop_upper_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,Loop_upper_neon add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter mov w5,w22 // unpack key block lsr x6,x22,#32 stp x9,x11,[x0,#16] mov w7,w23 lsr x8,x23,#32 stp x13,x15,[x0,#32] mov w9,w24 lsr x10,x24,#32 stp x17,x20,[x0,#48] add x0,x0,#64 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#5 Loop_lower_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,Loop_lower_neon add w5,w5,w22 // accumulate key block ldp q24,q25,[sp,#0] add x6,x6,x22,lsr#32 ldp q26,q27,[sp,#32] add w7,w7,w23 ldp q28,q29,[sp,#64] add x8,x8,x23,lsr#32 add v0.4s,v0.4s,v24.4s add w9,w9,w24 add v4.4s,v4.4s,v24.4s add x10,x10,x24,lsr#32 add v8.4s,v8.4s,v24.4s add w11,w11,w25 add v12.4s,v12.4s,v24.4s add x12,x12,x25,lsr#32 add v16.4s,v16.4s,v24.4s add w13,w13,w26 add v20.4s,v20.4s,v24.4s add x14,x14,x26,lsr#32 add v2.4s,v2.4s,v26.4s add w15,w15,w27 add v6.4s,v6.4s,v26.4s add x16,x16,x27,lsr#32 add v10.4s,v10.4s,v26.4s add w17,w17,w28 add v14.4s,v14.4s,v26.4s add x19,x19,x28,lsr#32 add v18.4s,v18.4s,v26.4s add w20,w20,w30 add v22.4s,v22.4s,v26.4s add x21,x21,x30,lsr#32 add v19.4s,v19.4s,v31.4s // +4 add x5,x5,x6,lsl#32 // pack add v23.4s,v23.4s,v31.4s // +4 add x7,x7,x8,lsl#32 add v3.4s,v3.4s,v27.4s ldp x6,x8,[x1,#0] // load input add v7.4s,v7.4s,v28.4s add x9,x9,x10,lsl#32 add v11.4s,v11.4s,v29.4s add x11,x11,x12,lsl#32 add v15.4s,v15.4s,v30.4s ldp x10,x12,[x1,#16] add v19.4s,v19.4s,v27.4s add x13,x13,x14,lsl#32 add v23.4s,v23.4s,v28.4s add x15,x15,x16,lsl#32 add v1.4s,v1.4s,v25.4s ldp x14,x16,[x1,#32] add v5.4s,v5.4s,v25.4s add x17,x17,x19,lsl#32 add v9.4s,v9.4s,v25.4s add x20,x20,x21,lsl#32 add v13.4s,v13.4s,v25.4s ldp x19,x21,[x1,#48] add v17.4s,v17.4s,v25.4s add x1,x1,#64 add v21.4s,v21.4s,v25.4s #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v24.16b eor x15,x15,x16 eor v1.16b,v1.16b,v25.16b eor x17,x17,x19 eor v2.16b,v2.16b,v26.16b eor x20,x20,x21 eor v3.16b,v3.16b,v27.16b ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#7 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v24.16b eor v5.16b,v5.16b,v25.16b eor v6.16b,v6.16b,v26.16b eor v7.16b,v7.16b,v27.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 eor v8.16b,v8.16b,v0.16b ldp q24,q25,[sp,#0] eor v9.16b,v9.16b,v1.16b ldp q26,q27,[sp,#32] eor v10.16b,v10.16b,v2.16b eor v11.16b,v11.16b,v3.16b st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64 ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64 eor v12.16b,v12.16b,v4.16b eor v13.16b,v13.16b,v5.16b eor v14.16b,v14.16b,v6.16b eor v15.16b,v15.16b,v7.16b st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64 ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64 eor v16.16b,v16.16b,v8.16b eor v17.16b,v17.16b,v9.16b eor v18.16b,v18.16b,v10.16b eor v19.16b,v19.16b,v11.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 shl v0.4s,v31.4s,#1 // 4 -> 8 eor v20.16b,v20.16b,v12.16b eor v21.16b,v21.16b,v13.16b eor v22.16b,v22.16b,v14.16b eor v23.16b,v23.16b,v15.16b st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64 add v27.4s,v27.4s,v0.4s // += 8 add v28.4s,v28.4s,v0.4s add v29.4s,v29.4s,v0.4s add v30.4s,v30.4s,v0.4s b.hs Loop_outer_512_neon adds x2,x2,#512 ushr v0.4s,v31.4s,#2 // 4 -> 1 ldp d8,d9,[sp,#128+0] // meet ABI requirements ldp d10,d11,[sp,#128+16] ldp d12,d13,[sp,#128+32] ldp d14,d15,[sp,#128+48] stp q24,q31,[sp,#0] // wipe off-load area stp q24,q31,[sp,#32] stp q24,q31,[sp,#64] b.eq Ldone_512_neon cmp x2,#192 sub v27.4s,v27.4s,v0.4s // -= 1 sub v28.4s,v28.4s,v0.4s sub v29.4s,v29.4s,v0.4s add sp,sp,#128 b.hs Loop_outer_neon eor v25.16b,v25.16b,v25.16b eor v26.16b,v26.16b,v26.16b eor v27.16b,v27.16b,v27.16b eor v28.16b,v28.16b,v28.16b eor v29.16b,v29.16b,v29.16b eor v30.16b,v30.16b,v30.16b b Loop_outer Ldone_512_neon: ldp x19,x20,[x29,#16] add sp,sp,#128+64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
fatiimajamiil/rustpad-custom
18,316
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/aesni-gcm-x86_64-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .p2align 5 _aesni_ctr32_ghash_6x: vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 vmovdqu 0-128(%rcx),%xmm15 vpaddb %xmm2,%xmm1,%xmm10 vpaddb %xmm2,%xmm10,%xmm11 vpaddb %xmm2,%xmm11,%xmm12 vpaddb %xmm2,%xmm12,%xmm13 vpaddb %xmm2,%xmm13,%xmm14 vpxor %xmm15,%xmm1,%xmm9 vmovdqu %xmm4,16+8(%rsp) jmp L$oop6x .p2align 5 L$oop6x: addl $100663296,%ebx jc L$handle_ctr32 vmovdqu 0-32(%r9),%xmm3 vpaddb %xmm2,%xmm14,%xmm1 vpxor %xmm15,%xmm10,%xmm10 vpxor %xmm15,%xmm11,%xmm11 L$resume_ctr32: vmovdqu %xmm1,(%r8) vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5 vpxor %xmm15,%xmm12,%xmm12 vmovups 16-128(%rcx),%xmm2 vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6 xorq %r12,%r12 cmpq %r14,%r15 vaesenc %xmm2,%xmm9,%xmm9 vmovdqu 48+8(%rsp),%xmm0 vpxor %xmm15,%xmm13,%xmm13 vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1 vaesenc %xmm2,%xmm10,%xmm10 vpxor %xmm15,%xmm14,%xmm14 setnc %r12b vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vmovdqu 16-32(%r9),%xmm3 negq %r12 vaesenc %xmm2,%xmm12,%xmm12 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5 vpxor %xmm4,%xmm8,%xmm8 vaesenc %xmm2,%xmm13,%xmm13 vpxor %xmm5,%xmm1,%xmm4 andq $0x60,%r12 vmovups 32-128(%rcx),%xmm15 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1 vaesenc %xmm2,%xmm14,%xmm14 vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2 leaq (%r14,%r12,1),%r14 vaesenc %xmm15,%xmm9,%xmm9 vpxor 16+8(%rsp),%xmm8,%xmm8 vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3 vmovdqu 64+8(%rsp),%xmm0 vaesenc %xmm15,%xmm10,%xmm10 movbeq 88(%r14),%r13 vaesenc %xmm15,%xmm11,%xmm11 movbeq 80(%r14),%r12 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,32+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,40+8(%rsp) vmovdqu 48-32(%r9),%xmm5 vaesenc %xmm15,%xmm14,%xmm14 vmovups 48-128(%rcx),%xmm15 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm3,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3 vaesenc %xmm15,%xmm11,%xmm11 vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5 vmovdqu 80+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vpxor %xmm1,%xmm4,%xmm4 vmovdqu 64-32(%r9),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vmovups 64-128(%rcx),%xmm15 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 vaesenc %xmm15,%xmm10,%xmm10 movbeq 72(%r14),%r13 vpxor %xmm5,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5 vaesenc %xmm15,%xmm11,%xmm11 movbeq 64(%r14),%r12 vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1 vmovdqu 96+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,48+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,56+8(%rsp) vpxor %xmm2,%xmm4,%xmm4 vmovdqu 96-32(%r9),%xmm2 vaesenc %xmm15,%xmm14,%xmm14 vmovups 80-128(%rcx),%xmm15 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5 vaesenc %xmm15,%xmm10,%xmm10 movbeq 56(%r14),%r13 vpxor %xmm1,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1 vpxor 112+8(%rsp),%xmm8,%xmm8 vaesenc %xmm15,%xmm11,%xmm11 movbeq 48(%r14),%r12 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,64+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,72+8(%rsp) vpxor %xmm3,%xmm4,%xmm4 vmovdqu 112-32(%r9),%xmm3 vaesenc %xmm15,%xmm14,%xmm14 vmovups 96-128(%rcx),%xmm15 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1 vaesenc %xmm15,%xmm10,%xmm10 movbeq 40(%r14),%r13 vpxor %xmm2,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 movbeq 32(%r14),%r12 vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,80+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,88+8(%rsp) vpxor %xmm5,%xmm6,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor %xmm1,%xmm6,%xmm6 vmovups 112-128(%rcx),%xmm15 vpslldq $8,%xmm6,%xmm5 vpxor %xmm2,%xmm4,%xmm4 vmovdqu 16(%r11),%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm8,%xmm7,%xmm7 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm5,%xmm4,%xmm4 movbeq 24(%r14),%r13 vaesenc %xmm15,%xmm11,%xmm11 movbeq 16(%r14),%r12 vpalignr $8,%xmm4,%xmm4,%xmm0 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 movq %r13,96+8(%rsp) vaesenc %xmm15,%xmm12,%xmm12 movq %r12,104+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 vmovups 128-128(%rcx),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vmovups 144-128(%rcx),%xmm15 vaesenc %xmm1,%xmm10,%xmm10 vpsrldq $8,%xmm6,%xmm6 vaesenc %xmm1,%xmm11,%xmm11 vpxor %xmm6,%xmm7,%xmm7 vaesenc %xmm1,%xmm12,%xmm12 vpxor %xmm0,%xmm4,%xmm4 movbeq 8(%r14),%r13 vaesenc %xmm1,%xmm13,%xmm13 movbeq 0(%r14),%r12 vaesenc %xmm1,%xmm14,%xmm14 vmovups 160-128(%rcx),%xmm1 cmpl $11,%r10d jb L$enc_tail vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 176-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 192-128(%rcx),%xmm1 vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 208-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 224-128(%rcx),%xmm1 jmp L$enc_tail .p2align 5 L$handle_ctr32: vmovdqu (%r11),%xmm0 vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 vpaddd 64(%r11),%xmm6,%xmm10 vpaddd %xmm5,%xmm6,%xmm11 vmovdqu 0-32(%r9),%xmm3 vpaddd %xmm5,%xmm10,%xmm12 vpshufb %xmm0,%xmm10,%xmm10 vpaddd %xmm5,%xmm11,%xmm13 vpshufb %xmm0,%xmm11,%xmm11 vpxor %xmm15,%xmm10,%xmm10 vpaddd %xmm5,%xmm12,%xmm14 vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm15,%xmm11,%xmm11 vpaddd %xmm5,%xmm13,%xmm1 vpshufb %xmm0,%xmm13,%xmm13 vpshufb %xmm0,%xmm14,%xmm14 vpshufb %xmm0,%xmm1,%xmm1 jmp L$resume_ctr32 .p2align 5 L$enc_tail: vaesenc %xmm15,%xmm9,%xmm9 vmovdqu %xmm7,16+8(%rsp) vpalignr $8,%xmm4,%xmm4,%xmm8 vaesenc %xmm15,%xmm10,%xmm10 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 vpxor 0(%rdi),%xmm1,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 vpxor 16(%rdi),%xmm1,%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vpxor 32(%rdi),%xmm1,%xmm5 vaesenc %xmm15,%xmm13,%xmm13 vpxor 48(%rdi),%xmm1,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor 64(%rdi),%xmm1,%xmm7 vpxor 80(%rdi),%xmm1,%xmm3 vmovdqu (%r8),%xmm1 vaesenclast %xmm2,%xmm9,%xmm9 vmovdqu 32(%r11),%xmm2 vaesenclast %xmm0,%xmm10,%xmm10 vpaddb %xmm2,%xmm1,%xmm0 movq %r13,112+8(%rsp) leaq 96(%rdi),%rdi prefetcht0 512(%rdi) prefetcht0 576(%rdi) vaesenclast %xmm5,%xmm11,%xmm11 vpaddb %xmm2,%xmm0,%xmm5 movq %r12,120+8(%rsp) leaq 96(%rsi),%rsi vmovdqu 0-128(%rcx),%xmm15 vaesenclast %xmm6,%xmm12,%xmm12 vpaddb %xmm2,%xmm5,%xmm6 vaesenclast %xmm7,%xmm13,%xmm13 vpaddb %xmm2,%xmm6,%xmm7 vaesenclast %xmm3,%xmm14,%xmm14 vpaddb %xmm2,%xmm7,%xmm3 addq $0x60,%rax subq $0x6,%rdx jc L$6x_done vmovups %xmm9,-96(%rsi) vpxor %xmm15,%xmm1,%xmm9 vmovups %xmm10,-80(%rsi) vmovdqa %xmm0,%xmm10 vmovups %xmm11,-64(%rsi) vmovdqa %xmm5,%xmm11 vmovups %xmm12,-48(%rsi) vmovdqa %xmm6,%xmm12 vmovups %xmm13,-32(%rsi) vmovdqa %xmm7,%xmm13 vmovups %xmm14,-16(%rsi) vmovdqa %xmm3,%xmm14 vmovdqu 32+8(%rsp),%xmm7 jmp L$oop6x L$6x_done: vpxor 16+8(%rsp),%xmm8,%xmm8 vpxor %xmm4,%xmm8,%xmm8 ret .globl _aesni_gcm_decrypt .private_extern _aesni_gcm_decrypt .p2align 5 _aesni_gcm_decrypt: _CET_ENDBR xorq %rax,%rax cmpq $0x60,%rdx jb L$gcm_dec_abort pushq %rbp movq %rsp,%rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 vzeroupper movq 16(%rbp),%r12 vmovdqu (%r8),%xmm1 addq $-128,%rsp movl 12(%r8),%ebx leaq L$bswap_mask(%rip),%r11 leaq -128(%rcx),%r14 movq $0xf80,%r15 vmovdqu (%r12),%xmm8 andq $-128,%rsp vmovdqu (%r11),%xmm0 leaq 128(%rcx),%rcx leaq 32(%r9),%r9 movl 240-128(%rcx),%r10d vpshufb %xmm0,%xmm8,%xmm8 andq %r15,%r14 andq %rsp,%r15 subq %r14,%r15 jc L$dec_no_key_aliasing cmpq $768,%r15 jnc L$dec_no_key_aliasing subq %r15,%rsp L$dec_no_key_aliasing: vmovdqu 80(%rdi),%xmm7 movq %rdi,%r14 vmovdqu 64(%rdi),%xmm4 leaq -192(%rdi,%rdx,1),%r15 vmovdqu 48(%rdi),%xmm5 shrq $4,%rdx xorq %rax,%rax vmovdqu 32(%rdi),%xmm6 vpshufb %xmm0,%xmm7,%xmm7 vmovdqu 16(%rdi),%xmm2 vpshufb %xmm0,%xmm4,%xmm4 vmovdqu (%rdi),%xmm3 vpshufb %xmm0,%xmm5,%xmm5 vmovdqu %xmm4,48(%rsp) vpshufb %xmm0,%xmm6,%xmm6 vmovdqu %xmm5,64(%rsp) vpshufb %xmm0,%xmm2,%xmm2 vmovdqu %xmm6,80(%rsp) vpshufb %xmm0,%xmm3,%xmm3 vmovdqu %xmm2,96(%rsp) vmovdqu %xmm3,112(%rsp) call _aesni_ctr32_ghash_6x movq 16(%rbp),%r12 vmovups %xmm9,-96(%rsi) vmovups %xmm10,-80(%rsi) vmovups %xmm11,-64(%rsi) vmovups %xmm12,-48(%rsi) vmovups %xmm13,-32(%rsi) vmovups %xmm14,-16(%rsi) vpshufb (%r11),%xmm8,%xmm8 vmovdqu %xmm8,(%r12) vzeroupper leaq -40(%rbp),%rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx popq %rbp L$gcm_dec_abort: ret .p2align 5 _aesni_ctr32_6x: vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%r10),%r13 vmovups 16-128(%rcx),%xmm15 leaq 32-128(%rcx),%r12 vpxor %xmm4,%xmm1,%xmm9 addl $100663296,%ebx jc L$handle_ctr32_2 vpaddb %xmm2,%xmm1,%xmm10 vpaddb %xmm2,%xmm10,%xmm11 vpxor %xmm4,%xmm10,%xmm10 vpaddb %xmm2,%xmm11,%xmm12 vpxor %xmm4,%xmm11,%xmm11 vpaddb %xmm2,%xmm12,%xmm13 vpxor %xmm4,%xmm12,%xmm12 vpaddb %xmm2,%xmm13,%xmm14 vpxor %xmm4,%xmm13,%xmm13 vpaddb %xmm2,%xmm14,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp L$oop_ctr32 .p2align 4 L$oop_ctr32: vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vmovups (%r12),%xmm15 leaq 16(%r12),%r12 decl %r13d jnz L$oop_ctr32 vmovdqu (%r12),%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor 0(%rdi),%xmm3,%xmm4 vaesenc %xmm15,%xmm10,%xmm10 vpxor 16(%rdi),%xmm3,%xmm5 vaesenc %xmm15,%xmm11,%xmm11 vpxor 32(%rdi),%xmm3,%xmm6 vaesenc %xmm15,%xmm12,%xmm12 vpxor 48(%rdi),%xmm3,%xmm8 vaesenc %xmm15,%xmm13,%xmm13 vpxor 64(%rdi),%xmm3,%xmm2 vaesenc %xmm15,%xmm14,%xmm14 vpxor 80(%rdi),%xmm3,%xmm3 leaq 96(%rdi),%rdi vaesenclast %xmm4,%xmm9,%xmm9 vaesenclast %xmm5,%xmm10,%xmm10 vaesenclast %xmm6,%xmm11,%xmm11 vaesenclast %xmm8,%xmm12,%xmm12 vaesenclast %xmm2,%xmm13,%xmm13 vaesenclast %xmm3,%xmm14,%xmm14 vmovups %xmm9,0(%rsi) vmovups %xmm10,16(%rsi) vmovups %xmm11,32(%rsi) vmovups %xmm12,48(%rsi) vmovups %xmm13,64(%rsi) vmovups %xmm14,80(%rsi) leaq 96(%rsi),%rsi ret .p2align 5 L$handle_ctr32_2: vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 vpaddd 64(%r11),%xmm6,%xmm10 vpaddd %xmm5,%xmm6,%xmm11 vpaddd %xmm5,%xmm10,%xmm12 vpshufb %xmm0,%xmm10,%xmm10 vpaddd %xmm5,%xmm11,%xmm13 vpshufb %xmm0,%xmm11,%xmm11 vpxor %xmm4,%xmm10,%xmm10 vpaddd %xmm5,%xmm12,%xmm14 vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm4,%xmm11,%xmm11 vpaddd %xmm5,%xmm13,%xmm1 vpshufb %xmm0,%xmm13,%xmm13 vpxor %xmm4,%xmm12,%xmm12 vpshufb %xmm0,%xmm14,%xmm14 vpxor %xmm4,%xmm13,%xmm13 vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp L$oop_ctr32 .globl _aesni_gcm_encrypt .private_extern _aesni_gcm_encrypt .p2align 5 _aesni_gcm_encrypt: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit+2(%rip) #endif xorq %rax,%rax cmpq $288,%rdx jb L$gcm_enc_abort pushq %rbp movq %rsp,%rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 vzeroupper vmovdqu (%r8),%xmm1 addq $-128,%rsp movl 12(%r8),%ebx leaq L$bswap_mask(%rip),%r11 leaq -128(%rcx),%r14 movq $0xf80,%r15 leaq 128(%rcx),%rcx vmovdqu (%r11),%xmm0 andq $-128,%rsp movl 240-128(%rcx),%r10d andq %r15,%r14 andq %rsp,%r15 subq %r14,%r15 jc L$enc_no_key_aliasing cmpq $768,%r15 jnc L$enc_no_key_aliasing subq %r15,%rsp L$enc_no_key_aliasing: movq %rsi,%r14 leaq -192(%rsi,%rdx,1),%r15 shrq $4,%rdx call _aesni_ctr32_6x vpshufb %xmm0,%xmm9,%xmm8 vpshufb %xmm0,%xmm10,%xmm2 vmovdqu %xmm8,112(%rsp) vpshufb %xmm0,%xmm11,%xmm4 vmovdqu %xmm2,96(%rsp) vpshufb %xmm0,%xmm12,%xmm5 vmovdqu %xmm4,80(%rsp) vpshufb %xmm0,%xmm13,%xmm6 vmovdqu %xmm5,64(%rsp) vpshufb %xmm0,%xmm14,%xmm7 vmovdqu %xmm6,48(%rsp) call _aesni_ctr32_6x movq 16(%rbp),%r12 leaq 32(%r9),%r9 vmovdqu (%r12),%xmm8 subq $12,%rdx movq $192,%rax vpshufb %xmm0,%xmm8,%xmm8 call _aesni_ctr32_ghash_6x vmovdqu 32(%rsp),%xmm7 vmovdqu (%r11),%xmm0 vmovdqu 0-32(%r9),%xmm3 vpunpckhqdq %xmm7,%xmm7,%xmm1 vmovdqu 32-32(%r9),%xmm15 vmovups %xmm9,-96(%rsi) vpshufb %xmm0,%xmm9,%xmm9 vpxor %xmm7,%xmm1,%xmm1 vmovups %xmm10,-80(%rsi) vpshufb %xmm0,%xmm10,%xmm10 vmovups %xmm11,-64(%rsi) vpshufb %xmm0,%xmm11,%xmm11 vmovups %xmm12,-48(%rsi) vpshufb %xmm0,%xmm12,%xmm12 vmovups %xmm13,-32(%rsi) vpshufb %xmm0,%xmm13,%xmm13 vmovups %xmm14,-16(%rsi) vpshufb %xmm0,%xmm14,%xmm14 vmovdqu %xmm9,16(%rsp) vmovdqu 48(%rsp),%xmm6 vmovdqu 16-32(%r9),%xmm0 vpunpckhqdq %xmm6,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5 vpxor %xmm6,%xmm2,%xmm2 vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 vmovdqu 64(%rsp),%xmm9 vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4 vmovdqu 48-32(%r9),%xmm3 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm9,%xmm9,%xmm5 vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6 vpxor %xmm9,%xmm5,%xmm5 vpxor %xmm7,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 vmovdqu 80-32(%r9),%xmm15 vpxor %xmm1,%xmm2,%xmm2 vmovdqu 80(%rsp),%xmm1 vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7 vmovdqu 64-32(%r9),%xmm0 vpxor %xmm4,%xmm7,%xmm7 vpunpckhqdq %xmm1,%xmm1,%xmm4 vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpxor %xmm6,%xmm9,%xmm9 vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 96(%rsp),%xmm2 vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6 vmovdqu 96-32(%r9),%xmm3 vpxor %xmm7,%xmm6,%xmm6 vpunpckhqdq %xmm2,%xmm2,%xmm7 vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1 vpxor %xmm2,%xmm7,%xmm7 vpxor %xmm9,%xmm1,%xmm1 vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4 vmovdqu 128-32(%r9),%xmm15 vpxor %xmm5,%xmm4,%xmm4 vpxor 112(%rsp),%xmm8,%xmm8 vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5 vmovdqu 112-32(%r9),%xmm0 vpunpckhqdq %xmm8,%xmm8,%xmm9 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2 vpxor %xmm8,%xmm9,%xmm9 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7 vpxor %xmm4,%xmm7,%xmm4 vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6 vmovdqu 0-32(%r9),%xmm3 vpunpckhqdq %xmm14,%xmm14,%xmm1 vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8 vpxor %xmm14,%xmm1,%xmm1 vpxor %xmm5,%xmm6,%xmm5 vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9 vmovdqu 32-32(%r9),%xmm15 vpxor %xmm2,%xmm8,%xmm7 vpxor %xmm4,%xmm9,%xmm6 vmovdqu 16-32(%r9),%xmm0 vpxor %xmm5,%xmm7,%xmm9 vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4 vpxor %xmm9,%xmm6,%xmm6 vpunpckhqdq %xmm13,%xmm13,%xmm2 vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14 vpxor %xmm13,%xmm2,%xmm2 vpslldq $8,%xmm6,%xmm9 vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 vpxor %xmm9,%xmm5,%xmm8 vpsrldq $8,%xmm6,%xmm6 vpxor %xmm6,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5 vmovdqu 48-32(%r9),%xmm3 vpxor %xmm4,%xmm5,%xmm5 vpunpckhqdq %xmm12,%xmm12,%xmm9 vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13 vpxor %xmm12,%xmm9,%xmm9 vpxor %xmm14,%xmm13,%xmm13 vpalignr $8,%xmm8,%xmm8,%xmm14 vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 vmovdqu 80-32(%r9),%xmm15 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4 vmovdqu 64-32(%r9),%xmm0 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm11,%xmm11,%xmm1 vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12 vpxor %xmm11,%xmm1,%xmm1 vpxor %xmm13,%xmm12,%xmm12 vxorps 16(%rsp),%xmm7,%xmm7 vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9 vpxor %xmm2,%xmm9,%xmm9 vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 vxorps %xmm14,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5 vmovdqu 96-32(%r9),%xmm3 vpxor %xmm4,%xmm5,%xmm5 vpunpckhqdq %xmm10,%xmm10,%xmm2 vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11 vpxor %xmm10,%xmm2,%xmm2 vpalignr $8,%xmm8,%xmm8,%xmm14 vpxor %xmm12,%xmm11,%xmm11 vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1 vmovdqu 128-32(%r9),%xmm15 vpxor %xmm9,%xmm1,%xmm1 vxorps %xmm7,%xmm14,%xmm14 vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 vxorps %xmm14,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4 vmovdqu 112-32(%r9),%xmm0 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm8,%xmm8,%xmm9 vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10 vpxor %xmm8,%xmm9,%xmm9 vpxor %xmm11,%xmm10,%xmm10 vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5 vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7 vpxor %xmm4,%xmm5,%xmm5 vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6 vpxor %xmm10,%xmm7,%xmm7 vpxor %xmm2,%xmm6,%xmm6 vpxor %xmm5,%xmm7,%xmm4 vpxor %xmm4,%xmm6,%xmm6 vpslldq $8,%xmm6,%xmm1 vmovdqu 16(%r11),%xmm3 vpsrldq $8,%xmm6,%xmm6 vpxor %xmm1,%xmm5,%xmm8 vpxor %xmm6,%xmm7,%xmm7 vpalignr $8,%xmm8,%xmm8,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 vpxor %xmm2,%xmm8,%xmm8 vpalignr $8,%xmm8,%xmm8,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 vpxor %xmm7,%xmm2,%xmm2 vpxor %xmm2,%xmm8,%xmm8 movq 16(%rbp),%r12 vpshufb (%r11),%xmm8,%xmm8 vmovdqu %xmm8,(%r12) vzeroupper leaq -40(%rbp),%rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx popq %rbp L$gcm_enc_abort: ret .section __DATA,__const .p2align 6 L$bswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 L$poly: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 L$one_msb: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 L$two_lsb: .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 L$one_lsb: .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .p2align 6 .text #endif
fatiimajamiil/rustpad-custom
10,863
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/ghash-neon-armv8-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) .text .globl gcm_init_neon .def gcm_init_neon .type 32 .endef .align 4 gcm_init_neon: AARCH64_VALID_CALL_TARGET // This function is adapted from gcm_init_v8. xC2 is t3. ld1 {v17.2d}, [x1] // load H movi v19.16b, #0xe1 shl v19.2d, v19.2d, #57 // 0xc2.0 ext v3.16b, v17.16b, v17.16b, #8 ushr v18.2d, v19.2d, #63 dup v17.4s, v17.s[1] ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01 ushr v18.2d, v3.2d, #63 sshr v17.4s, v17.4s, #31 // broadcast carry bit and v18.16b, v18.16b, v16.16b shl v3.2d, v3.2d, #1 ext v18.16b, v18.16b, v18.16b, #8 and v16.16b, v16.16b, v17.16b orr v3.16b, v3.16b, v18.16b // H<<<=1 eor v5.16b, v3.16b, v16.16b // twisted H st1 {v5.2d}, [x0] // store Htable[0] ret .globl gcm_gmult_neon .def gcm_gmult_neon .type 32 .endef .align 4 gcm_gmult_neon: AARCH64_VALID_CALL_TARGET ld1 {v3.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, Lmasks // load constants add x9, x9, :lo12:Lmasks ld1 {v24.2d, v25.2d}, [x9] rev64 v3.16b, v3.16b // byteswap Xi ext v3.16b, v3.16b, v3.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing mov x3, #16 b Lgmult_neon .globl gcm_ghash_neon .def gcm_ghash_neon .type 32 .endef .align 4 gcm_ghash_neon: AARCH64_VALID_CALL_TARGET ld1 {v0.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, Lmasks // load constants add x9, x9, :lo12:Lmasks ld1 {v24.2d, v25.2d}, [x9] rev64 v0.16b, v0.16b // byteswap Xi ext v0.16b, v0.16b, v0.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing Loop_neon: ld1 {v3.16b}, [x2], #16 // load inp rev64 v3.16b, v3.16b // byteswap inp ext v3.16b, v3.16b, v3.16b, #8 eor v3.16b, v3.16b, v0.16b // inp ^= Xi Lgmult_neon: // Split the input into v3 and v4. (The upper halves are unused, // so it is okay to leave them alone.) ins v4.d[0], v3.d[1] ext v16.8b, v5.8b, v5.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v0.8b, v3.8b, v3.8b, #1 // B1 pmull v0.8h, v5.8b, v0.8b // E = A*B1 ext v17.8b, v5.8b, v5.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v5.8b, v19.8b // G = A*B2 ext v18.8b, v5.8b, v5.8b, #3 // A3 eor v16.16b, v16.16b, v0.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v0.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v0.8h, v5.8b, v0.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v0.16b // N = I + J pmull v19.8h, v5.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v0.8h, v5.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v0.16b, v0.16b, v16.16b eor v0.16b, v0.16b, v18.16b eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing ext v16.8b, v7.8b, v7.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v1.8b, v3.8b, v3.8b, #1 // B1 pmull v1.8h, v7.8b, v1.8b // E = A*B1 ext v17.8b, v7.8b, v7.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v7.8b, v19.8b // G = A*B2 ext v18.8b, v7.8b, v7.8b, #3 // A3 eor v16.16b, v16.16b, v1.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v1.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v1.8h, v7.8b, v1.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v1.16b // N = I + J pmull v19.8h, v7.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v1.8h, v7.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v1.16b, v1.16b, v16.16b eor v1.16b, v1.16b, v18.16b ext v16.8b, v6.8b, v6.8b, #1 // A1 pmull v16.8h, v16.8b, v4.8b // F = A1*B ext v2.8b, v4.8b, v4.8b, #1 // B1 pmull v2.8h, v6.8b, v2.8b // E = A*B1 ext v17.8b, v6.8b, v6.8b, #2 // A2 pmull v17.8h, v17.8b, v4.8b // H = A2*B ext v19.8b, v4.8b, v4.8b, #2 // B2 pmull v19.8h, v6.8b, v19.8b // G = A*B2 ext v18.8b, v6.8b, v6.8b, #3 // A3 eor v16.16b, v16.16b, v2.16b // L = E + F pmull v18.8h, v18.8b, v4.8b // J = A3*B ext v2.8b, v4.8b, v4.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v2.8h, v6.8b, v2.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v4.8b, v4.8b, #4 // B4 eor v18.16b, v18.16b, v2.16b // N = I + J pmull v19.8h, v6.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v2.8h, v6.8b, v4.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v2.16b, v2.16b, v16.16b eor v2.16b, v2.16b, v18.16b ext v16.16b, v0.16b, v2.16b, #8 eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing eor v1.16b, v1.16b, v2.16b eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result // This is a no-op due to the ins instruction below. // ins v2.d[0], v1.d[1] // equivalent of reduction_avx from ghash-x86_64.pl shl v17.2d, v0.2d, #57 // 1st phase shl v18.2d, v0.2d, #62 eor v18.16b, v18.16b, v17.16b // shl v17.2d, v0.2d, #63 eor v18.16b, v18.16b, v17.16b // // Note Xm contains {Xl.d[1], Xh.d[0]}. eor v18.16b, v18.16b, v1.16b ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0] ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1] ushr v18.2d, v0.2d, #1 // 2nd phase eor v2.16b, v2.16b,v0.16b eor v0.16b, v0.16b,v18.16b // ushr v18.2d, v18.2d, #6 ushr v0.2d, v0.2d, #1 // eor v0.16b, v0.16b, v2.16b // eor v0.16b, v0.16b, v18.16b // subs x3, x3, #16 bne Loop_neon rev64 v0.16b, v0.16b // byteswap Xi and write ext v0.16b, v0.16b, v0.16b, #8 st1 {v0.16b}, [x0] ret .section .rodata .align 4 Lmasks: .quad 0x0000ffffffffffff // k48 .quad 0x00000000ffffffff // k32 .quad 0x000000000000ffff // k16 .quad 0x0000000000000000 // k0 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
fatiimajamiil/rustpad-custom
190,544
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/chacha20_poly1305_x86_64-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .section __DATA,__const .p2align 6 chacha20_poly1305_constants: L$chacha20_consts: .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' L$rol8: .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 L$rol16: .byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 .byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 L$avx2_init: .long 0,0,0,0 L$sse_inc: .long 1,0,0,0 L$avx2_inc: .long 2,0,0,0,2,0,0,0 L$clamp: .quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC .quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF .p2align 4 L$and_masks: .byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff .text .p2align 6 poly_hash_ad_internal: xorq %r10,%r10 xorq %r11,%r11 xorq %r12,%r12 cmpq $13,%r8 jne L$hash_ad_loop L$poly_fast_tls_ad: movq (%rcx),%r10 movq 5(%rcx),%r11 shrq $24,%r11 movq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 ret L$hash_ad_loop: cmpq $16,%r8 jb L$hash_ad_tail addq 0+0(%rcx),%r10 adcq 8+0(%rcx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rcx),%rcx subq $16,%r8 jmp L$hash_ad_loop L$hash_ad_tail: cmpq $0,%r8 je L$hash_ad_done xorq %r13,%r13 xorq %r14,%r14 xorq %r15,%r15 addq %r8,%rcx L$hash_ad_tail_loop: shldq $8,%r13,%r14 shlq $8,%r13 movzbq -1(%rcx),%r15 xorq %r15,%r13 decq %rcx decq %r8 jne L$hash_ad_tail_loop addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$hash_ad_done: ret .globl _chacha20_poly1305_open_sse41 .private_extern _chacha20_poly1305_open_sse41 .p2align 6 _chacha20_poly1305_open_sse41: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %r9 subq $288 + 0 + 32,%rsp leaq 32(%rsp),%rbp andq $-32,%rbp movq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) cmpq $128,%rbx jbe L$open_sse_128 movdqa L$chacha20_consts(%rip),%xmm0 movdqu 0(%r9),%xmm4 movdqu 16(%r9),%xmm8 movdqu 32(%r9),%xmm12 movdqa %xmm12,%xmm7 movdqa %xmm4,0+48(%rbp) movdqa %xmm8,0+64(%rbp) movdqa %xmm12,0+96(%rbp) movq $10,%r10 L$open_sse_init_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %r10 jne L$open_sse_init_rounds paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 pand L$clamp(%rip),%xmm0 movdqa %xmm0,0+0(%rbp) movdqa %xmm4,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal L$open_sse_main_loop: cmpq $256,%rbx jb L$open_sse_tail movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd L$sse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) movq $4,%rcx movq %rsi,%r8 L$open_sse_main_loop_rounds: movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 leaq 16(%r8),%r8 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %rcx jge L$open_sse_main_loop_rounds addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 cmpq $-6,%rcx jg L$open_sse_main_loop_rounds paddd L$chacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm12,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm12 pxor %xmm3,%xmm12 movdqu %xmm12,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm12 pxor %xmm7,%xmm12 movdqu %xmm12,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm12 pxor %xmm11,%xmm12 movdqu %xmm12,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm12 pxor %xmm15,%xmm12 movdqu %xmm12,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movdqu 0 + 192(%rsi),%xmm3 movdqu 16 + 192(%rsi),%xmm7 movdqu 32 + 192(%rsi),%xmm11 movdqu 48 + 192(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor 0+80(%rbp),%xmm15 movdqu %xmm0,0 + 192(%rdi) movdqu %xmm4,16 + 192(%rdi) movdqu %xmm8,32 + 192(%rdi) movdqu %xmm15,48 + 192(%rdi) leaq 256(%rsi),%rsi leaq 256(%rdi),%rdi subq $256,%rbx jmp L$open_sse_main_loop L$open_sse_tail: testq %rbx,%rbx jz L$open_sse_finalize cmpq $192,%rbx ja L$open_sse_tail_256 cmpq $128,%rbx ja L$open_sse_tail_192 cmpq $64,%rbx ja L$open_sse_tail_128 movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa 0+96(%rbp),%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) xorq %r8,%r8 movq %rbx,%rcx cmpq $16,%rcx jb L$open_sse_tail_64_rounds L$open_sse_tail_64_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx L$open_sse_tail_64_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 cmpq $16,%rcx jae L$open_sse_tail_64_rounds_and_x1hash cmpq $160,%r8 jne L$open_sse_tail_64_rounds paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 jmp L$open_sse_tail_64_dec_loop L$open_sse_tail_128: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa 0+96(%rbp),%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movq %rbx,%rcx andq $-16,%rcx xorq %r8,%r8 L$open_sse_tail_128_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$open_sse_tail_128_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 cmpq %rcx,%r8 jb L$open_sse_tail_128_rounds_and_x1hash cmpq $160,%r8 jne L$open_sse_tail_128_rounds paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 0(%rdi) movdqu %xmm5,16 + 0(%rdi) movdqu %xmm9,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) subq $64,%rbx leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi jmp L$open_sse_tail_64_dec_loop L$open_sse_tail_192: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa 0+96(%rbp),%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movq %rbx,%rcx movq $160,%r8 cmpq $160,%rcx cmovgq %r8,%rcx andq $-16,%rcx xorq %r8,%r8 L$open_sse_tail_192_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$open_sse_tail_192_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 cmpq %rcx,%r8 jb L$open_sse_tail_192_rounds_and_x1hash cmpq $160,%r8 jne L$open_sse_tail_192_rounds cmpq $176,%rbx jb L$open_sse_tail_192_finish addq 0+160(%rsi),%r10 adcq 8+160(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 cmpq $192,%rbx jb L$open_sse_tail_192_finish addq 0+176(%rsi),%r10 adcq 8+176(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$open_sse_tail_192_finish: paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) subq $128,%rbx leaq 128(%rsi),%rsi leaq 128(%rdi),%rdi jmp L$open_sse_tail_64_dec_loop L$open_sse_tail_256: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd L$sse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) xorq %r8,%r8 L$open_sse_tail_256_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movdqa %xmm11,0+80(%rbp) paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $12,%xmm11 psrld $20,%xmm4 pxor %xmm11,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $7,%xmm11 psrld $25,%xmm4 pxor %xmm11,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $12,%xmm11 psrld $20,%xmm5 pxor %xmm11,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $7,%xmm11 psrld $25,%xmm5 pxor %xmm11,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $12,%xmm11 psrld $20,%xmm6 pxor %xmm11,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $7,%xmm11 psrld $25,%xmm6 pxor %xmm11,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 movdqa 0+80(%rbp),%xmm11 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa %xmm9,0+80(%rbp) paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb L$rol16(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $12,%xmm9 psrld $20,%xmm7 pxor %xmm9,%xmm7 paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb L$rol8(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $7,%xmm9 psrld $25,%xmm7 pxor %xmm9,%xmm7 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 movdqa 0+80(%rbp),%xmm9 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx movdqa %xmm11,0+80(%rbp) paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $12,%xmm11 psrld $20,%xmm4 pxor %xmm11,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $7,%xmm11 psrld $25,%xmm4 pxor %xmm11,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $12,%xmm11 psrld $20,%xmm5 pxor %xmm11,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $7,%xmm11 psrld $25,%xmm5 pxor %xmm11,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $12,%xmm11 psrld $20,%xmm6 pxor %xmm11,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $7,%xmm11 psrld $25,%xmm6 pxor %xmm11,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 movdqa 0+80(%rbp),%xmm11 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm9,0+80(%rbp) paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb L$rol16(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $12,%xmm9 psrld $20,%xmm7 pxor %xmm9,%xmm7 paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb L$rol8(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $7,%xmm9 psrld $25,%xmm7 pxor %xmm9,%xmm7 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 movdqa 0+80(%rbp),%xmm9 addq $16,%r8 cmpq $160,%r8 jb L$open_sse_tail_256_rounds_and_x1hash movq %rbx,%rcx andq $-16,%rcx L$open_sse_tail_256_hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq $16,%r8 cmpq %rcx,%r8 jb L$open_sse_tail_256_hash paddd L$chacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm12,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm12 pxor %xmm3,%xmm12 movdqu %xmm12,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm12 pxor %xmm7,%xmm12 movdqu %xmm12,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm12 pxor %xmm11,%xmm12 movdqu %xmm12,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm12 pxor %xmm15,%xmm12 movdqu %xmm12,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movdqa 0+80(%rbp),%xmm12 subq $192,%rbx leaq 192(%rsi),%rsi leaq 192(%rdi),%rdi L$open_sse_tail_64_dec_loop: cmpq $16,%rbx jb L$open_sse_tail_16_init subq $16,%rbx movdqu (%rsi),%xmm3 pxor %xmm3,%xmm0 movdqu %xmm0,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movdqa %xmm4,%xmm0 movdqa %xmm8,%xmm4 movdqa %xmm12,%xmm8 jmp L$open_sse_tail_64_dec_loop L$open_sse_tail_16_init: movdqa %xmm0,%xmm1 L$open_sse_tail_16: testq %rbx,%rbx jz L$open_sse_finalize pxor %xmm3,%xmm3 leaq -1(%rsi,%rbx,1),%rsi movq %rbx,%r8 L$open_sse_tail_16_compose: pslldq $1,%xmm3 pinsrb $0,(%rsi),%xmm3 subq $1,%rsi subq $1,%r8 jnz L$open_sse_tail_16_compose .byte 102,73,15,126,221 pextrq $1,%xmm3,%r14 pxor %xmm1,%xmm3 L$open_sse_tail_16_extract: pextrb $0,%xmm3,(%rdi) psrldq $1,%xmm3 addq $1,%rdi subq $1,%rbx jne L$open_sse_tail_16_extract addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$open_sse_finalize: addq 0+0+32(%rbp),%r10 adcq 8+0+32(%rbp),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movq %r10,%r13 movq %r11,%r14 movq %r12,%r15 subq $-5,%r10 sbbq $-1,%r11 sbbq $3,%r12 cmovcq %r13,%r10 cmovcq %r14,%r11 cmovcq %r15,%r12 addq 0+0+16(%rbp),%r10 adcq 8+0+16(%rbp),%r11 addq $288 + 0 + 32,%rsp popq %r9 movq %r10,(%r9) movq %r11,8(%r9) popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx popq %rbp ret L$open_sse_128: movdqu L$chacha20_consts(%rip),%xmm0 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqu 0(%r9),%xmm4 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqu 16(%r9),%xmm8 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqu 32(%r9),%xmm12 movdqa %xmm12,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa %xmm13,%xmm15 movq $10,%r10 L$open_sse_128_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 decq %r10 jnz L$open_sse_128_rounds paddd L$chacha20_consts(%rip),%xmm0 paddd L$chacha20_consts(%rip),%xmm1 paddd L$chacha20_consts(%rip),%xmm2 paddd %xmm7,%xmm4 paddd %xmm7,%xmm5 paddd %xmm7,%xmm6 paddd %xmm11,%xmm9 paddd %xmm11,%xmm10 paddd %xmm15,%xmm13 paddd L$sse_inc(%rip),%xmm15 paddd %xmm15,%xmm14 pand L$clamp(%rip),%xmm0 movdqa %xmm0,0+0(%rbp) movdqa %xmm4,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal L$open_sse_128_xor_hash: cmpq $16,%rbx jb L$open_sse_tail_16 subq $16,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movdqu 0(%rsi),%xmm3 pxor %xmm3,%xmm1 movdqu %xmm1,0(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm5,%xmm1 movdqa %xmm9,%xmm5 movdqa %xmm13,%xmm9 movdqa %xmm2,%xmm13 movdqa %xmm6,%xmm2 movdqa %xmm10,%xmm6 movdqa %xmm14,%xmm10 jmp L$open_sse_128_xor_hash .globl _chacha20_poly1305_seal_sse41 .private_extern _chacha20_poly1305_seal_sse41 .p2align 6 _chacha20_poly1305_seal_sse41: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %r9 subq $288 + 0 + 32,%rsp leaq 32(%rsp),%rbp andq $-32,%rbp movq 56(%r9),%rbx addq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) movq %rdx,%rbx cmpq $128,%rbx jbe L$seal_sse_128 movdqa L$chacha20_consts(%rip),%xmm0 movdqu 0(%r9),%xmm4 movdqu 16(%r9),%xmm8 movdqu 32(%r9),%xmm12 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqa %xmm8,%xmm11 movdqa %xmm12,%xmm15 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,%xmm14 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,%xmm13 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm4,0+48(%rbp) movdqa %xmm8,0+64(%rbp) movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) movq $10,%r10 L$seal_sse_init_rounds: movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %r10 jnz L$seal_sse_init_rounds paddd L$chacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 pand L$clamp(%rip),%xmm3 movdqa %xmm3,0+0(%rbp) movdqa %xmm7,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) cmpq $192,%rbx ja L$seal_sse_main_init movq $128,%rcx subq $128,%rbx leaq 128(%rsi),%rsi jmp L$seal_sse_128_tail_hash L$seal_sse_main_init: movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor %xmm12,%xmm15 movdqu %xmm0,0 + 128(%rdi) movdqu %xmm4,16 + 128(%rdi) movdqu %xmm8,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movq $192,%rcx subq $192,%rbx leaq 192(%rsi),%rsi movq $2,%rcx movq $8,%r8 cmpq $64,%rbx jbe L$seal_sse_tail_64 cmpq $128,%rbx jbe L$seal_sse_tail_128 cmpq $192,%rbx jbe L$seal_sse_tail_192 L$seal_sse_main_loop: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd L$sse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) .p2align 5 L$seal_sse_main_rounds: movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 leaq 16(%rdi),%rdi decq %r8 jge L$seal_sse_main_rounds addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi decq %rcx jg L$seal_sse_main_rounds paddd L$chacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm14,0+80(%rbp) movdqa %xmm14,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm14 pxor %xmm3,%xmm14 movdqu %xmm14,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm14 pxor %xmm7,%xmm14 movdqu %xmm14,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm14 pxor %xmm11,%xmm14 movdqu %xmm14,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm14 pxor %xmm15,%xmm14 movdqu %xmm14,48 + 0(%rdi) movdqa 0+80(%rbp),%xmm14 movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) cmpq $256,%rbx ja L$seal_sse_main_loop_xor movq $192,%rcx subq $192,%rbx leaq 192(%rsi),%rsi jmp L$seal_sse_128_tail_hash L$seal_sse_main_loop_xor: movdqu 0 + 192(%rsi),%xmm3 movdqu 16 + 192(%rsi),%xmm7 movdqu 32 + 192(%rsi),%xmm11 movdqu 48 + 192(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor %xmm12,%xmm15 movdqu %xmm0,0 + 192(%rdi) movdqu %xmm4,16 + 192(%rdi) movdqu %xmm8,32 + 192(%rdi) movdqu %xmm15,48 + 192(%rdi) leaq 256(%rsi),%rsi subq $256,%rbx movq $6,%rcx movq $4,%r8 cmpq $192,%rbx jg L$seal_sse_main_loop movq %rbx,%rcx testq %rbx,%rbx je L$seal_sse_128_tail_hash movq $6,%rcx cmpq $128,%rbx ja L$seal_sse_tail_192 cmpq $64,%rbx ja L$seal_sse_tail_128 L$seal_sse_tail_64: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa 0+96(%rbp),%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) L$seal_sse_tail_64_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_sse_tail_64_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi decq %rcx jg L$seal_sse_tail_64_rounds_and_x2hash decq %r8 jge L$seal_sse_tail_64_rounds_and_x1hash paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 jmp L$seal_sse_128_tail_xor L$seal_sse_tail_128: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa 0+96(%rbp),%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) L$seal_sse_tail_128_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_sse_tail_128_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 leaq 16(%rdi),%rdi decq %rcx jg L$seal_sse_tail_128_rounds_and_x2hash decq %r8 jge L$seal_sse_tail_128_rounds_and_x1hash paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 0(%rdi) movdqu %xmm5,16 + 0(%rdi) movdqu %xmm9,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movq $64,%rcx subq $64,%rbx leaq 64(%rsi),%rsi jmp L$seal_sse_128_tail_hash L$seal_sse_tail_192: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa 0+96(%rbp),%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) L$seal_sse_tail_192_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_sse_tail_192_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 leaq 16(%rdi),%rdi decq %rcx jg L$seal_sse_tail_192_rounds_and_x2hash decq %r8 jge L$seal_sse_tail_192_rounds_and_x1hash paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movq $128,%rcx subq $128,%rbx leaq 128(%rsi),%rsi L$seal_sse_128_tail_hash: cmpq $16,%rcx jb L$seal_sse_128_tail_xor addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx leaq 16(%rdi),%rdi jmp L$seal_sse_128_tail_hash L$seal_sse_128_tail_xor: cmpq $16,%rbx jb L$seal_sse_tail_16 subq $16,%rbx movdqu 0(%rsi),%xmm3 pxor %xmm3,%xmm0 movdqu %xmm0,0(%rdi) addq 0(%rdi),%r10 adcq 8(%rdi),%r11 adcq $1,%r12 leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm4,%xmm0 movdqa %xmm8,%xmm4 movdqa %xmm12,%xmm8 movdqa %xmm1,%xmm12 movdqa %xmm5,%xmm1 movdqa %xmm9,%xmm5 movdqa %xmm13,%xmm9 jmp L$seal_sse_128_tail_xor L$seal_sse_tail_16: testq %rbx,%rbx jz L$process_blocks_of_extra_in movq %rbx,%r8 movq %rbx,%rcx leaq -1(%rsi,%rbx,1),%rsi pxor %xmm15,%xmm15 L$seal_sse_tail_16_compose: pslldq $1,%xmm15 pinsrb $0,(%rsi),%xmm15 leaq -1(%rsi),%rsi decq %rcx jne L$seal_sse_tail_16_compose pxor %xmm0,%xmm15 movq %rbx,%rcx movdqu %xmm15,%xmm0 L$seal_sse_tail_16_extract: pextrb $0,%xmm0,(%rdi) psrldq $1,%xmm0 addq $1,%rdi subq $1,%rcx jnz L$seal_sse_tail_16_extract movq 288 + 0 + 32(%rsp),%r9 movq 56(%r9),%r14 movq 48(%r9),%r13 testq %r14,%r14 jz L$process_partial_block movq $16,%r15 subq %rbx,%r15 cmpq %r15,%r14 jge L$load_extra_in movq %r14,%r15 L$load_extra_in: leaq -1(%r13,%r15,1),%rsi addq %r15,%r13 subq %r15,%r14 movq %r13,48(%r9) movq %r14,56(%r9) addq %r15,%r8 pxor %xmm11,%xmm11 L$load_extra_load_loop: pslldq $1,%xmm11 pinsrb $0,(%rsi),%xmm11 leaq -1(%rsi),%rsi subq $1,%r15 jnz L$load_extra_load_loop movq %rbx,%r15 L$load_extra_shift_loop: pslldq $1,%xmm11 subq $1,%r15 jnz L$load_extra_shift_loop leaq L$and_masks(%rip),%r15 shlq $4,%rbx pand -16(%r15,%rbx,1),%xmm15 por %xmm11,%xmm15 .byte 102,77,15,126,253 pextrq $1,%xmm15,%r14 addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$process_blocks_of_extra_in: movq 288+32+0 (%rsp),%r9 movq 48(%r9),%rsi movq 56(%r9),%r8 movq %r8,%rcx shrq $4,%r8 L$process_extra_hash_loop: jz process_extra_in_trailer addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rsi),%rsi subq $1,%r8 jmp L$process_extra_hash_loop process_extra_in_trailer: andq $15,%rcx movq %rcx,%rbx jz L$do_length_block leaq -1(%rsi,%rcx,1),%rsi L$process_extra_in_trailer_load: pslldq $1,%xmm15 pinsrb $0,(%rsi),%xmm15 leaq -1(%rsi),%rsi subq $1,%rcx jnz L$process_extra_in_trailer_load L$process_partial_block: leaq L$and_masks(%rip),%r15 shlq $4,%rbx pand -16(%r15,%rbx,1),%xmm15 .byte 102,77,15,126,253 pextrq $1,%xmm15,%r14 addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$do_length_block: addq 0+0+32(%rbp),%r10 adcq 8+0+32(%rbp),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movq %r10,%r13 movq %r11,%r14 movq %r12,%r15 subq $-5,%r10 sbbq $-1,%r11 sbbq $3,%r12 cmovcq %r13,%r10 cmovcq %r14,%r11 cmovcq %r15,%r12 addq 0+0+16(%rbp),%r10 adcq 8+0+16(%rbp),%r11 addq $288 + 0 + 32,%rsp popq %r9 movq %r10,(%r9) movq %r11,8(%r9) popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx popq %rbp ret L$seal_sse_128: movdqu L$chacha20_consts(%rip),%xmm0 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqu 0(%r9),%xmm4 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqu 16(%r9),%xmm8 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqu 32(%r9),%xmm14 movdqa %xmm14,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa %xmm12,%xmm15 movq $10,%r10 L$seal_sse_128_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 decq %r10 jnz L$seal_sse_128_rounds paddd L$chacha20_consts(%rip),%xmm0 paddd L$chacha20_consts(%rip),%xmm1 paddd L$chacha20_consts(%rip),%xmm2 paddd %xmm7,%xmm4 paddd %xmm7,%xmm5 paddd %xmm7,%xmm6 paddd %xmm11,%xmm8 paddd %xmm11,%xmm9 paddd %xmm15,%xmm12 paddd L$sse_inc(%rip),%xmm15 paddd %xmm15,%xmm13 pand L$clamp(%rip),%xmm2 movdqa %xmm2,0+0(%rbp) movdqa %xmm6,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal jmp L$seal_sse_128_tail_xor .globl _chacha20_poly1305_open_avx2 .private_extern _chacha20_poly1305_open_avx2 .p2align 6 _chacha20_poly1305_open_avx2: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %r9 subq $288 + 0 + 32,%rsp leaq 32(%rsp),%rbp andq $-32,%rbp movq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) vzeroupper vmovdqa L$chacha20_consts(%rip),%ymm0 vbroadcasti128 0(%r9),%ymm4 vbroadcasti128 16(%r9),%ymm8 vbroadcasti128 32(%r9),%ymm12 vpaddd L$avx2_init(%rip),%ymm12,%ymm12 cmpq $192,%rbx jbe L$open_avx2_192 cmpq $320,%rbx jbe L$open_avx2_320 vmovdqa %ymm4,0+64(%rbp) vmovdqa %ymm8,0+96(%rbp) vmovdqa %ymm12,0+160(%rbp) movq $10,%r10 L$open_avx2_init_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 decq %r10 jne L$open_avx2_init_rounds vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand L$clamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 movq %r8,%r8 call poly_hash_ad_internal xorq %rcx,%rcx L$open_avx2_init_hash: addq 0+0(%rsi,%rcx,1),%r10 adcq 8+0(%rsi,%rcx,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq $16,%rcx cmpq $64,%rcx jne L$open_avx2_init_hash vpxor 0(%rsi),%ymm0,%ymm0 vpxor 32(%rsi),%ymm4,%ymm4 vmovdqu %ymm0,0(%rdi) vmovdqu %ymm4,32(%rdi) leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi subq $64,%rbx L$open_avx2_main_loop: cmpq $512,%rbx jb L$open_avx2_main_loop_done vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) xorq %rcx,%rcx L$open_avx2_main_loop_rounds: addq 0+0(%rsi,%rcx,1),%r10 adcq 8+0(%rsi,%rcx,1),%r11 adcq $1,%r12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 addq %rax,%r15 adcq %rdx,%r9 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 addq 0+16(%rsi,%rcx,1),%r10 adcq 8+16(%rsi,%rcx,1),%r11 adcq $1,%r12 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 addq %rax,%r15 adcq %rdx,%r9 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq 0+32(%rsi,%rcx,1),%r10 adcq 8+32(%rsi,%rcx,1),%r11 adcq $1,%r12 leaq 48(%rcx),%rcx vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq %rax,%r15 adcq %rdx,%r9 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpalignr $4,%ymm12,%ymm12,%ymm12 cmpq $60*8,%rcx jne L$open_avx2_main_loop_rounds vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) addq 0+60*8(%rsi),%r10 adcq 8+60*8(%rsi),%r11 adcq $1,%r12 vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) addq 0+60*8+16(%rsi),%r10 adcq 8+60*8+16(%rsi),%r11 adcq $1,%r12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 vpxor 0+384(%rsi),%ymm3,%ymm3 vpxor 32+384(%rsi),%ymm0,%ymm0 vpxor 64+384(%rsi),%ymm4,%ymm4 vpxor 96+384(%rsi),%ymm8,%ymm8 vmovdqu %ymm3,0+384(%rdi) vmovdqu %ymm0,32+384(%rdi) vmovdqu %ymm4,64+384(%rdi) vmovdqu %ymm8,96+384(%rdi) leaq 512(%rsi),%rsi leaq 512(%rdi),%rdi subq $512,%rbx jmp L$open_avx2_main_loop L$open_avx2_main_loop_done: testq %rbx,%rbx vzeroupper je L$open_sse_finalize cmpq $384,%rbx ja L$open_avx2_tail_512 cmpq $256,%rbx ja L$open_avx2_tail_384 cmpq $128,%rbx ja L$open_avx2_tail_256 vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) xorq %r8,%r8 movq %rbx,%rcx andq $-16,%rcx testq %rcx,%rcx je L$open_avx2_tail_128_rounds L$open_avx2_tail_128_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$open_avx2_tail_128_rounds: addq $16,%r8 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 cmpq %rcx,%r8 jb L$open_avx2_tail_128_rounds_and_x1hash cmpq $160,%r8 jne L$open_avx2_tail_128_rounds vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 jmp L$open_avx2_tail_128_xor L$open_avx2_tail_256: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) movq %rbx,0+128(%rbp) movq %rbx,%rcx subq $128,%rcx shrq $4,%rcx movq $10,%r8 cmpq $10,%rcx cmovgq %r8,%rcx movq %rsi,%rbx xorq %r8,%r8 L$open_avx2_tail_256_rounds_and_x1hash: addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx L$open_avx2_tail_256_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 incq %r8 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 cmpq %rcx,%r8 jb L$open_avx2_tail_256_rounds_and_x1hash cmpq $10,%r8 jne L$open_avx2_tail_256_rounds movq %rbx,%r8 subq %rsi,%rbx movq %rbx,%rcx movq 0+128(%rbp),%rbx L$open_avx2_tail_256_hash: addq $16,%rcx cmpq %rbx,%rcx jg L$open_avx2_tail_256_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 jmp L$open_avx2_tail_256_hash L$open_avx2_tail_256_done: vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm1,%ymm1 vpxor 64+0(%rsi),%ymm5,%ymm5 vpxor 96+0(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm1,32+0(%rdi) vmovdqu %ymm5,64+0(%rdi) vmovdqu %ymm9,96+0(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 128(%rsi),%rsi leaq 128(%rdi),%rdi subq $128,%rbx jmp L$open_avx2_tail_128_xor L$open_avx2_tail_384: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq %rbx,0+128(%rbp) movq %rbx,%rcx subq $256,%rcx shrq $4,%rcx addq $6,%rcx movq $10,%r8 cmpq $10,%rcx cmovgq %r8,%rcx movq %rsi,%rbx xorq %r8,%r8 L$open_avx2_tail_384_rounds_and_x2hash: addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx L$open_avx2_tail_384_rounds_and_x1hash: vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx incq %r8 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 cmpq %rcx,%r8 jb L$open_avx2_tail_384_rounds_and_x2hash cmpq $10,%r8 jne L$open_avx2_tail_384_rounds_and_x1hash movq %rbx,%r8 subq %rsi,%rbx movq %rbx,%rcx movq 0+128(%rbp),%rbx L$open_avx2_384_tail_hash: addq $16,%rcx cmpq %rbx,%rcx jg L$open_avx2_384_tail_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 jmp L$open_avx2_384_tail_hash L$open_avx2_384_tail_done: vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm2,%ymm2 vpxor 64+0(%rsi),%ymm6,%ymm6 vpxor 96+0(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm2,32+0(%rdi) vmovdqu %ymm6,64+0(%rdi) vmovdqu %ymm10,96+0(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm1,%ymm1 vpxor 64+128(%rsi),%ymm5,%ymm5 vpxor 96+128(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm1,32+128(%rdi) vmovdqu %ymm5,64+128(%rdi) vmovdqu %ymm9,96+128(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 256(%rsi),%rsi leaq 256(%rdi),%rdi subq $256,%rbx jmp L$open_avx2_tail_128_xor L$open_avx2_tail_512: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) xorq %rcx,%rcx movq %rsi,%r8 L$open_avx2_tail_512_rounds_and_x2hash: addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 L$open_avx2_tail_512_rounds_and_x1hash: vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 addq 0+16(%r8),%r10 adcq 8+16(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%r8),%r8 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 incq %rcx cmpq $4,%rcx jl L$open_avx2_tail_512_rounds_and_x2hash cmpq $10,%rcx jne L$open_avx2_tail_512_rounds_and_x1hash movq %rbx,%rcx subq $384,%rcx andq $-16,%rcx L$open_avx2_tail_512_hash: testq %rcx,%rcx je L$open_avx2_tail_512_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 subq $16,%rcx jmp L$open_avx2_tail_512_hash L$open_avx2_tail_512_done: vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 384(%rsi),%rsi leaq 384(%rdi),%rdi subq $384,%rbx L$open_avx2_tail_128_xor: cmpq $32,%rbx jb L$open_avx2_tail_32_xor subq $32,%rbx vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 jmp L$open_avx2_tail_128_xor L$open_avx2_tail_32_xor: cmpq $16,%rbx vmovdqa %xmm0,%xmm1 jb L$open_avx2_exit subq $16,%rbx vpxor (%rsi),%xmm0,%xmm1 vmovdqu %xmm1,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi vperm2i128 $0x11,%ymm0,%ymm0,%ymm0 vmovdqa %xmm0,%xmm1 L$open_avx2_exit: vzeroupper jmp L$open_sse_tail_16 L$open_avx2_192: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd L$avx2_inc(%rip),%ymm12,%ymm13 vmovdqa %ymm12,%ymm11 vmovdqa %ymm13,%ymm15 movq $10,%r10 L$open_avx2_192_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 decq %r10 jne L$open_avx2_192_rounds vpaddd %ymm2,%ymm0,%ymm0 vpaddd %ymm2,%ymm1,%ymm1 vpaddd %ymm6,%ymm4,%ymm4 vpaddd %ymm6,%ymm5,%ymm5 vpaddd %ymm10,%ymm8,%ymm8 vpaddd %ymm10,%ymm9,%ymm9 vpaddd %ymm11,%ymm12,%ymm12 vpaddd %ymm15,%ymm13,%ymm13 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand L$clamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 L$open_avx2_short: movq %r8,%r8 call poly_hash_ad_internal L$open_avx2_short_hash_and_xor_loop: cmpq $32,%rbx jb L$open_avx2_short_tail_32 subq $32,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rsi),%r10 adcq 8+16(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 vmovdqa %ymm1,%ymm12 vmovdqa %ymm5,%ymm1 vmovdqa %ymm9,%ymm5 vmovdqa %ymm13,%ymm9 vmovdqa %ymm2,%ymm13 vmovdqa %ymm6,%ymm2 jmp L$open_avx2_short_hash_and_xor_loop L$open_avx2_short_tail_32: cmpq $16,%rbx vmovdqa %xmm0,%xmm1 jb L$open_avx2_short_tail_32_exit subq $16,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor (%rsi),%xmm0,%xmm3 vmovdqu %xmm3,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi vextracti128 $1,%ymm0,%xmm1 L$open_avx2_short_tail_32_exit: vzeroupper jmp L$open_sse_tail_16 L$open_avx2_320: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd L$avx2_inc(%rip),%ymm12,%ymm13 vpaddd L$avx2_inc(%rip),%ymm13,%ymm14 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq $10,%r10 L$open_avx2_320_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 decq %r10 jne L$open_avx2_320_rounds vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd %ymm7,%ymm4,%ymm4 vpaddd %ymm7,%ymm5,%ymm5 vpaddd %ymm7,%ymm6,%ymm6 vpaddd %ymm11,%ymm8,%ymm8 vpaddd %ymm11,%ymm9,%ymm9 vpaddd %ymm11,%ymm10,%ymm10 vpaddd 0+160(%rbp),%ymm12,%ymm12 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd 0+224(%rbp),%ymm14,%ymm14 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand L$clamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 jmp L$open_avx2_short .globl _chacha20_poly1305_seal_avx2 .private_extern _chacha20_poly1305_seal_avx2 .p2align 6 _chacha20_poly1305_seal_avx2: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %r9 subq $288 + 0 + 32,%rsp leaq 32(%rsp),%rbp andq $-32,%rbp movq 56(%r9),%rbx addq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) movq %rdx,%rbx vzeroupper vmovdqa L$chacha20_consts(%rip),%ymm0 vbroadcasti128 0(%r9),%ymm4 vbroadcasti128 16(%r9),%ymm8 vbroadcasti128 32(%r9),%ymm12 vpaddd L$avx2_init(%rip),%ymm12,%ymm12 cmpq $192,%rbx jbe L$seal_avx2_192 cmpq $320,%rbx jbe L$seal_avx2_320 vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm4,%ymm7 vmovdqa %ymm4,0+64(%rbp) vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vmovdqa %ymm8,%ymm11 vmovdqa %ymm8,0+96(%rbp) vmovdqa %ymm12,%ymm15 vpaddd L$avx2_inc(%rip),%ymm15,%ymm14 vpaddd L$avx2_inc(%rip),%ymm14,%ymm13 vpaddd L$avx2_inc(%rip),%ymm13,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm15,0+256(%rbp) movq $10,%r10 L$seal_avx2_init_rounds: vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 decq %r10 jnz L$seal_avx2_init_rounds vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vperm2i128 $0x02,%ymm3,%ymm7,%ymm15 vperm2i128 $0x13,%ymm3,%ymm7,%ymm3 vpand L$clamp(%rip),%ymm15,%ymm15 vmovdqa %ymm15,0+0(%rbp) movq %r8,%r8 call poly_hash_ad_internal vpxor 0(%rsi),%ymm3,%ymm3 vpxor 32(%rsi),%ymm11,%ymm11 vmovdqu %ymm3,0(%rdi) vmovdqu %ymm11,32(%rdi) vperm2i128 $0x02,%ymm2,%ymm6,%ymm15 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+64(%rsi),%ymm15,%ymm15 vpxor 32+64(%rsi),%ymm2,%ymm2 vpxor 64+64(%rsi),%ymm6,%ymm6 vpxor 96+64(%rsi),%ymm10,%ymm10 vmovdqu %ymm15,0+64(%rdi) vmovdqu %ymm2,32+64(%rdi) vmovdqu %ymm6,64+64(%rdi) vmovdqu %ymm10,96+64(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm15 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+192(%rsi),%ymm15,%ymm15 vpxor 32+192(%rsi),%ymm1,%ymm1 vpxor 64+192(%rsi),%ymm5,%ymm5 vpxor 96+192(%rsi),%ymm9,%ymm9 vmovdqu %ymm15,0+192(%rdi) vmovdqu %ymm1,32+192(%rdi) vmovdqu %ymm5,64+192(%rdi) vmovdqu %ymm9,96+192(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm15 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm15,%ymm8 leaq 320(%rsi),%rsi subq $320,%rbx movq $320,%rcx cmpq $128,%rbx jbe L$seal_avx2_short_hash_remainder vpxor 0(%rsi),%ymm0,%ymm0 vpxor 32(%rsi),%ymm4,%ymm4 vpxor 64(%rsi),%ymm8,%ymm8 vpxor 96(%rsi),%ymm12,%ymm12 vmovdqu %ymm0,320(%rdi) vmovdqu %ymm4,352(%rdi) vmovdqu %ymm8,384(%rdi) vmovdqu %ymm12,416(%rdi) leaq 128(%rsi),%rsi subq $128,%rbx movq $8,%rcx movq $2,%r8 cmpq $128,%rbx jbe L$seal_avx2_tail_128 cmpq $256,%rbx jbe L$seal_avx2_tail_256 cmpq $384,%rbx jbe L$seal_avx2_tail_384 cmpq $512,%rbx jbe L$seal_avx2_tail_512 vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 subq $16,%rdi movq $9,%rcx jmp L$seal_avx2_main_loop_rounds_entry .p2align 5 L$seal_avx2_main_loop: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) movq $10,%rcx .p2align 5 L$seal_avx2_main_loop_rounds: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 addq %rax,%r15 adcq %rdx,%r9 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$seal_avx2_main_loop_rounds_entry: vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 addq %rax,%r15 adcq %rdx,%r9 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq 0+32(%rdi),%r10 adcq 8+32(%rdi),%r11 adcq $1,%r12 leaq 48(%rdi),%rdi vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq %rax,%r15 adcq %rdx,%r9 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpalignr $4,%ymm12,%ymm12,%ymm12 decq %rcx jne L$seal_avx2_main_loop_rounds vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 vpxor 0+384(%rsi),%ymm3,%ymm3 vpxor 32+384(%rsi),%ymm0,%ymm0 vpxor 64+384(%rsi),%ymm4,%ymm4 vpxor 96+384(%rsi),%ymm8,%ymm8 vmovdqu %ymm3,0+384(%rdi) vmovdqu %ymm0,32+384(%rdi) vmovdqu %ymm4,64+384(%rdi) vmovdqu %ymm8,96+384(%rdi) leaq 512(%rsi),%rsi subq $512,%rbx cmpq $512,%rbx jg L$seal_avx2_main_loop addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi movq $10,%rcx xorq %r8,%r8 cmpq $384,%rbx ja L$seal_avx2_tail_512 cmpq $256,%rbx ja L$seal_avx2_tail_384 cmpq $128,%rbx ja L$seal_avx2_tail_256 L$seal_avx2_tail_128: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) L$seal_avx2_tail_128_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_avx2_tail_128_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg L$seal_avx2_tail_128_rounds_and_3xhash decq %r8 jge L$seal_avx2_tail_128_rounds_and_2xhash vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 jmp L$seal_avx2_short_loop L$seal_avx2_tail_256: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) L$seal_avx2_tail_256_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_avx2_tail_256_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg L$seal_avx2_tail_256_rounds_and_3xhash decq %r8 jge L$seal_avx2_tail_256_rounds_and_2xhash vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm1,%ymm1 vpxor 64+0(%rsi),%ymm5,%ymm5 vpxor 96+0(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm1,32+0(%rdi) vmovdqu %ymm5,64+0(%rdi) vmovdqu %ymm9,96+0(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $128,%rcx leaq 128(%rsi),%rsi subq $128,%rbx jmp L$seal_avx2_short_hash_remainder L$seal_avx2_tail_384: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) L$seal_avx2_tail_384_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_avx2_tail_384_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 leaq 32(%rdi),%rdi decq %rcx jg L$seal_avx2_tail_384_rounds_and_3xhash decq %r8 jge L$seal_avx2_tail_384_rounds_and_2xhash vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm2,%ymm2 vpxor 64+0(%rsi),%ymm6,%ymm6 vpxor 96+0(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm2,32+0(%rdi) vmovdqu %ymm6,64+0(%rdi) vmovdqu %ymm10,96+0(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm1,%ymm1 vpxor 64+128(%rsi),%ymm5,%ymm5 vpxor 96+128(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm1,32+128(%rdi) vmovdqu %ymm5,64+128(%rdi) vmovdqu %ymm9,96+128(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $256,%rcx leaq 256(%rsi),%rsi subq $256,%rbx jmp L$seal_avx2_short_hash_remainder L$seal_avx2_tail_512: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) L$seal_avx2_tail_512_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_avx2_tail_512_rounds_and_2xhash: vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 addq %rax,%r15 adcq %rdx,%r9 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg L$seal_avx2_tail_512_rounds_and_3xhash decq %r8 jge L$seal_avx2_tail_512_rounds_and_2xhash vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $384,%rcx leaq 384(%rsi),%rsi subq $384,%rbx jmp L$seal_avx2_short_hash_remainder L$seal_avx2_320: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd L$avx2_inc(%rip),%ymm12,%ymm13 vpaddd L$avx2_inc(%rip),%ymm13,%ymm14 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq $10,%r10 L$seal_avx2_320_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 decq %r10 jne L$seal_avx2_320_rounds vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd %ymm7,%ymm4,%ymm4 vpaddd %ymm7,%ymm5,%ymm5 vpaddd %ymm7,%ymm6,%ymm6 vpaddd %ymm11,%ymm8,%ymm8 vpaddd %ymm11,%ymm9,%ymm9 vpaddd %ymm11,%ymm10,%ymm10 vpaddd 0+160(%rbp),%ymm12,%ymm12 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd 0+224(%rbp),%ymm14,%ymm14 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand L$clamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 jmp L$seal_avx2_short L$seal_avx2_192: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd L$avx2_inc(%rip),%ymm12,%ymm13 vmovdqa %ymm12,%ymm11 vmovdqa %ymm13,%ymm15 movq $10,%r10 L$seal_avx2_192_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 decq %r10 jne L$seal_avx2_192_rounds vpaddd %ymm2,%ymm0,%ymm0 vpaddd %ymm2,%ymm1,%ymm1 vpaddd %ymm6,%ymm4,%ymm4 vpaddd %ymm6,%ymm5,%ymm5 vpaddd %ymm10,%ymm8,%ymm8 vpaddd %ymm10,%ymm9,%ymm9 vpaddd %ymm11,%ymm12,%ymm12 vpaddd %ymm15,%ymm13,%ymm13 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand L$clamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 L$seal_avx2_short: movq %r8,%r8 call poly_hash_ad_internal xorq %rcx,%rcx L$seal_avx2_short_hash_remainder: cmpq $16,%rcx jb L$seal_avx2_short_loop addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx addq $16,%rdi jmp L$seal_avx2_short_hash_remainder L$seal_avx2_short_loop: cmpq $32,%rbx jb L$seal_avx2_short_tail subq $32,%rbx vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 vmovdqa %ymm1,%ymm12 vmovdqa %ymm5,%ymm1 vmovdqa %ymm9,%ymm5 vmovdqa %ymm13,%ymm9 vmovdqa %ymm2,%ymm13 vmovdqa %ymm6,%ymm2 jmp L$seal_avx2_short_loop L$seal_avx2_short_tail: cmpq $16,%rbx jb L$seal_avx2_exit subq $16,%rbx vpxor (%rsi),%xmm0,%xmm3 vmovdqu %xmm3,(%rdi) leaq 16(%rsi),%rsi addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi vextracti128 $1,%ymm0,%xmm0 L$seal_avx2_exit: vzeroupper jmp L$seal_sse_tail_16 #endif
fatiimajamiil/rustpad-custom
10,875
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/ghash-neon-armv8-ios64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) .text .globl _gcm_init_neon .private_extern _gcm_init_neon .align 4 _gcm_init_neon: AARCH64_VALID_CALL_TARGET // This function is adapted from gcm_init_v8. xC2 is t3. ld1 {v17.2d}, [x1] // load H movi v19.16b, #0xe1 shl v19.2d, v19.2d, #57 // 0xc2.0 ext v3.16b, v17.16b, v17.16b, #8 ushr v18.2d, v19.2d, #63 dup v17.4s, v17.s[1] ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01 ushr v18.2d, v3.2d, #63 sshr v17.4s, v17.4s, #31 // broadcast carry bit and v18.16b, v18.16b, v16.16b shl v3.2d, v3.2d, #1 ext v18.16b, v18.16b, v18.16b, #8 and v16.16b, v16.16b, v17.16b orr v3.16b, v3.16b, v18.16b // H<<<=1 eor v5.16b, v3.16b, v16.16b // twisted H st1 {v5.2d}, [x0] // store Htable[0] ret .globl _gcm_gmult_neon .private_extern _gcm_gmult_neon .align 4 _gcm_gmult_neon: AARCH64_VALID_CALL_TARGET ld1 {v3.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, Lmasks@PAGE // load constants add x9, x9, Lmasks@PAGEOFF ld1 {v24.2d, v25.2d}, [x9] rev64 v3.16b, v3.16b // byteswap Xi ext v3.16b, v3.16b, v3.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing mov x3, #16 b Lgmult_neon .globl _gcm_ghash_neon .private_extern _gcm_ghash_neon .align 4 _gcm_ghash_neon: AARCH64_VALID_CALL_TARGET ld1 {v0.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, Lmasks@PAGE // load constants add x9, x9, Lmasks@PAGEOFF ld1 {v24.2d, v25.2d}, [x9] rev64 v0.16b, v0.16b // byteswap Xi ext v0.16b, v0.16b, v0.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing Loop_neon: ld1 {v3.16b}, [x2], #16 // load inp rev64 v3.16b, v3.16b // byteswap inp ext v3.16b, v3.16b, v3.16b, #8 eor v3.16b, v3.16b, v0.16b // inp ^= Xi Lgmult_neon: // Split the input into v3 and v4. (The upper halves are unused, // so it is okay to leave them alone.) ins v4.d[0], v3.d[1] ext v16.8b, v5.8b, v5.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v0.8b, v3.8b, v3.8b, #1 // B1 pmull v0.8h, v5.8b, v0.8b // E = A*B1 ext v17.8b, v5.8b, v5.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v5.8b, v19.8b // G = A*B2 ext v18.8b, v5.8b, v5.8b, #3 // A3 eor v16.16b, v16.16b, v0.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v0.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v0.8h, v5.8b, v0.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v0.16b // N = I + J pmull v19.8h, v5.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v0.8h, v5.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v0.16b, v0.16b, v16.16b eor v0.16b, v0.16b, v18.16b eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing ext v16.8b, v7.8b, v7.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v1.8b, v3.8b, v3.8b, #1 // B1 pmull v1.8h, v7.8b, v1.8b // E = A*B1 ext v17.8b, v7.8b, v7.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v7.8b, v19.8b // G = A*B2 ext v18.8b, v7.8b, v7.8b, #3 // A3 eor v16.16b, v16.16b, v1.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v1.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v1.8h, v7.8b, v1.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v1.16b // N = I + J pmull v19.8h, v7.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v1.8h, v7.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v1.16b, v1.16b, v16.16b eor v1.16b, v1.16b, v18.16b ext v16.8b, v6.8b, v6.8b, #1 // A1 pmull v16.8h, v16.8b, v4.8b // F = A1*B ext v2.8b, v4.8b, v4.8b, #1 // B1 pmull v2.8h, v6.8b, v2.8b // E = A*B1 ext v17.8b, v6.8b, v6.8b, #2 // A2 pmull v17.8h, v17.8b, v4.8b // H = A2*B ext v19.8b, v4.8b, v4.8b, #2 // B2 pmull v19.8h, v6.8b, v19.8b // G = A*B2 ext v18.8b, v6.8b, v6.8b, #3 // A3 eor v16.16b, v16.16b, v2.16b // L = E + F pmull v18.8h, v18.8b, v4.8b // J = A3*B ext v2.8b, v4.8b, v4.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v2.8h, v6.8b, v2.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v4.8b, v4.8b, #4 // B4 eor v18.16b, v18.16b, v2.16b // N = I + J pmull v19.8h, v6.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v2.8h, v6.8b, v4.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v2.16b, v2.16b, v16.16b eor v2.16b, v2.16b, v18.16b ext v16.16b, v0.16b, v2.16b, #8 eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing eor v1.16b, v1.16b, v2.16b eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result // This is a no-op due to the ins instruction below. // ins v2.d[0], v1.d[1] // equivalent of reduction_avx from ghash-x86_64.pl shl v17.2d, v0.2d, #57 // 1st phase shl v18.2d, v0.2d, #62 eor v18.16b, v18.16b, v17.16b // shl v17.2d, v0.2d, #63 eor v18.16b, v18.16b, v17.16b // // Note Xm contains {Xl.d[1], Xh.d[0]}. eor v18.16b, v18.16b, v1.16b ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0] ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1] ushr v18.2d, v0.2d, #1 // 2nd phase eor v2.16b, v2.16b,v0.16b eor v0.16b, v0.16b,v18.16b // ushr v18.2d, v18.2d, #6 ushr v0.2d, v0.2d, #1 // eor v0.16b, v0.16b, v2.16b // eor v0.16b, v0.16b, v18.16b // subs x3, x3, #16 bne Loop_neon rev64 v0.16b, v0.16b // byteswap Xi and write ext v0.16b, v0.16b, v0.16b, #8 st1 {v0.16b}, [x0] ret .section __TEXT,__const .align 4 Lmasks: .quad 0x0000ffffffffffff // k48 .quad 0x00000000ffffffff // k32 .quad 0x000000000000ffff // k16 .quad 0x0000000000000000 // k0 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
fatiimajamiil/rustpad-custom
17,785
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/bsaes-armv7-linux32.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) @ Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved. @ @ Licensed under the Apache License, Version 2.0 (the "License"); @ you may not use this file except in compliance with the License. @ You may obtain a copy of the License at @ @ https://www.apache.org/licenses/LICENSE-2.0 @ @ Unless required by applicable law or agreed to in writing, software @ distributed under the License is distributed on an "AS IS" BASIS, @ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @ See the License for the specific language governing permissions and @ limitations under the License. @ ==================================================================== @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @ project. @ @ Specific modes and adaptation for Linux kernel by Ard Biesheuvel @ of Linaro. @ ==================================================================== @ Bit-sliced AES for ARM NEON @ @ February 2012. @ @ This implementation is direct adaptation of bsaes-x86_64 module for @ ARM NEON. Except that this module is endian-neutral [in sense that @ it can be compiled for either endianness] by courtesy of vld1.8's @ neutrality. Initial version doesn't implement interface to OpenSSL, @ only low-level primitives and unsupported entry points, just enough @ to collect performance results, which for Cortex-A8 core are: @ @ encrypt 19.5 cycles per byte processed with 128-bit key @ decrypt 22.1 cycles per byte processed with 128-bit key @ key conv. 440 cycles per 128-bit key/0.18 of 8x block @ @ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7, @ which is [much] worse than anticipated (for further details see @ http://www.openssl.org/~appro/Snapdragon-S4.html). @ @ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code @ manages in 20.0 cycles]. @ @ When comparing to x86_64 results keep in mind that NEON unit is @ [mostly] single-issue and thus can't [fully] benefit from @ instruction-level parallelism. And when comparing to aes-armv4 @ results keep in mind key schedule conversion overhead (see @ bsaes-x86_64.pl for further details)... @ @ <appro@openssl.org> @ April-August 2013 @ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard. #ifndef __KERNEL__ # define VFP_ABI_PUSH vstmdb sp!,{d8-d15} # define VFP_ABI_POP vldmia sp!,{d8-d15} # define VFP_ABI_FRAME 0x40 #else # define VFP_ABI_PUSH # define VFP_ABI_POP # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define __ARM_MAX_ARCH__ 7 #endif #ifdef __thumb__ # define adrl adr #endif #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .text .syntax unified @ ARMv7-capable assembler is expected to handle this #if defined(__thumb2__) && !defined(__APPLE__) .thumb #else .code 32 # undef __thumb2__ #endif .type _bsaes_const,%object .align 6 _bsaes_const: .LM0ISR:@ InvShiftRows constants .quad 0x0a0e0206070b0f03, 0x0004080c0d010509 .LISR: .quad 0x0504070602010003, 0x0f0e0d0c080b0a09 .LISRM0: .quad 0x01040b0e0205080f, 0x0306090c00070a0d .LM0SR:@ ShiftRows constants .quad 0x0a0e02060f03070b, 0x0004080c05090d01 .LSR: .quad 0x0504070600030201, 0x0f0e0d0c0a09080b .LSRM0: .quad 0x0304090e00050a0f, 0x01060b0c0207080d .LM0: .quad 0x02060a0e03070b0f, 0x0004080c0105090d .LREVM0SR: .quad 0x090d01050c000408, 0x03070b0f060a0e02 .byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 6 .size _bsaes_const,.-_bsaes_const .type _bsaes_encrypt8,%function .align 4 _bsaes_encrypt8: adr r6,. vldmia r4!, {q9} @ round 0 key #if defined(__thumb2__) || defined(__APPLE__) adr r6,.LM0SR #else sub r6,r6,#_bsaes_encrypt8-.LM0SR #endif vldmia r6!, {q8} @ .LM0SR _bsaes_encrypt8_alt: veor q10, q0, q9 @ xor with round0 key veor q11, q1, q9 vtbl.8 d0, {q10}, d16 vtbl.8 d1, {q10}, d17 veor q12, q2, q9 vtbl.8 d2, {q11}, d16 vtbl.8 d3, {q11}, d17 veor q13, q3, q9 vtbl.8 d4, {q12}, d16 vtbl.8 d5, {q12}, d17 veor q14, q4, q9 vtbl.8 d6, {q13}, d16 vtbl.8 d7, {q13}, d17 veor q15, q5, q9 vtbl.8 d8, {q14}, d16 vtbl.8 d9, {q14}, d17 veor q10, q6, q9 vtbl.8 d10, {q15}, d16 vtbl.8 d11, {q15}, d17 veor q11, q7, q9 vtbl.8 d12, {q10}, d16 vtbl.8 d13, {q10}, d17 vtbl.8 d14, {q11}, d16 vtbl.8 d15, {q11}, d17 _bsaes_encrypt8_bitslice: vmov.i8 q8,#0x55 @ compose .LBS0 vmov.i8 q9,#0x33 @ compose .LBS1 vshr.u64 q10, q6, #1 vshr.u64 q11, q4, #1 veor q10, q10, q7 veor q11, q11, q5 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #1 veor q5, q5, q11 vshl.u64 q11, q11, #1 veor q6, q6, q10 veor q4, q4, q11 vshr.u64 q10, q2, #1 vshr.u64 q11, q0, #1 veor q10, q10, q3 veor q11, q11, q1 vand q10, q10, q8 vand q11, q11, q8 veor q3, q3, q10 vshl.u64 q10, q10, #1 veor q1, q1, q11 vshl.u64 q11, q11, #1 veor q2, q2, q10 veor q0, q0, q11 vmov.i8 q8,#0x0f @ compose .LBS2 vshr.u64 q10, q5, #2 vshr.u64 q11, q4, #2 veor q10, q10, q7 veor q11, q11, q6 vand q10, q10, q9 vand q11, q11, q9 veor q7, q7, q10 vshl.u64 q10, q10, #2 veor q6, q6, q11 vshl.u64 q11, q11, #2 veor q5, q5, q10 veor q4, q4, q11 vshr.u64 q10, q1, #2 vshr.u64 q11, q0, #2 veor q10, q10, q3 veor q11, q11, q2 vand q10, q10, q9 vand q11, q11, q9 veor q3, q3, q10 vshl.u64 q10, q10, #2 veor q2, q2, q11 vshl.u64 q11, q11, #2 veor q1, q1, q10 veor q0, q0, q11 vshr.u64 q10, q3, #4 vshr.u64 q11, q2, #4 veor q10, q10, q7 veor q11, q11, q6 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #4 veor q6, q6, q11 vshl.u64 q11, q11, #4 veor q3, q3, q10 veor q2, q2, q11 vshr.u64 q10, q1, #4 vshr.u64 q11, q0, #4 veor q10, q10, q5 veor q11, q11, q4 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #4 veor q4, q4, q11 vshl.u64 q11, q11, #4 veor q1, q1, q10 veor q0, q0, q11 sub r5,r5,#1 b .Lenc_sbox .align 4 .Lenc_loop: vldmia r4!, {q8,q9,q10,q11} veor q8, q8, q0 veor q9, q9, q1 vtbl.8 d0, {q8}, d24 vtbl.8 d1, {q8}, d25 vldmia r4!, {q8} veor q10, q10, q2 vtbl.8 d2, {q9}, d24 vtbl.8 d3, {q9}, d25 vldmia r4!, {q9} veor q11, q11, q3 vtbl.8 d4, {q10}, d24 vtbl.8 d5, {q10}, d25 vldmia r4!, {q10} vtbl.8 d6, {q11}, d24 vtbl.8 d7, {q11}, d25 vldmia r4!, {q11} veor q8, q8, q4 veor q9, q9, q5 vtbl.8 d8, {q8}, d24 vtbl.8 d9, {q8}, d25 veor q10, q10, q6 vtbl.8 d10, {q9}, d24 vtbl.8 d11, {q9}, d25 veor q11, q11, q7 vtbl.8 d12, {q10}, d24 vtbl.8 d13, {q10}, d25 vtbl.8 d14, {q11}, d24 vtbl.8 d15, {q11}, d25 .Lenc_sbox: veor q2, q2, q1 veor q5, q5, q6 veor q3, q3, q0 veor q6, q6, q2 veor q5, q5, q0 veor q6, q6, q3 veor q3, q3, q7 veor q7, q7, q5 veor q3, q3, q4 veor q4, q4, q5 veor q2, q2, q7 veor q3, q3, q1 veor q1, q1, q5 veor q11, q7, q4 veor q10, q1, q2 veor q9, q5, q3 veor q13, q2, q4 vmov q8, q10 veor q12, q6, q0 vorr q10, q10, q9 veor q15, q11, q8 vand q14, q11, q12 vorr q11, q11, q12 veor q12, q12, q9 vand q8, q8, q9 veor q9, q3, q0 vand q15, q15, q12 vand q13, q13, q9 veor q9, q7, q1 veor q12, q5, q6 veor q11, q11, q13 veor q10, q10, q13 vand q13, q9, q12 vorr q9, q9, q12 veor q11, q11, q15 veor q8, q8, q13 veor q10, q10, q14 veor q9, q9, q15 veor q8, q8, q14 vand q12, q2, q3 veor q9, q9, q14 vand q13, q4, q0 vand q14, q1, q5 vorr q15, q7, q6 veor q11, q11, q12 veor q9, q9, q14 veor q8, q8, q15 veor q10, q10, q13 @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3 @ new smaller inversion vand q14, q11, q9 vmov q12, q8 veor q13, q10, q14 veor q15, q8, q14 veor q14, q8, q14 @ q14=q15 vbsl q13, q9, q8 vbsl q15, q11, q10 veor q11, q11, q10 vbsl q12, q13, q14 vbsl q8, q14, q13 vand q14, q12, q15 veor q9, q9, q8 veor q14, q14, q11 veor q12, q6, q0 veor q8, q5, q3 veor q10, q15, q14 vand q10, q10, q6 veor q6, q6, q5 vand q11, q5, q15 vand q6, q6, q14 veor q5, q11, q10 veor q6, q6, q11 veor q15, q15, q13 veor q14, q14, q9 veor q11, q15, q14 veor q10, q13, q9 vand q11, q11, q12 vand q10, q10, q0 veor q12, q12, q8 veor q0, q0, q3 vand q8, q8, q15 vand q3, q3, q13 vand q12, q12, q14 vand q0, q0, q9 veor q8, q8, q12 veor q0, q0, q3 veor q12, q12, q11 veor q3, q3, q10 veor q6, q6, q12 veor q0, q0, q12 veor q5, q5, q8 veor q3, q3, q8 veor q12, q7, q4 veor q8, q1, q2 veor q11, q15, q14 veor q10, q13, q9 vand q11, q11, q12 vand q10, q10, q4 veor q12, q12, q8 veor q4, q4, q2 vand q8, q8, q15 vand q2, q2, q13 vand q12, q12, q14 vand q4, q4, q9 veor q8, q8, q12 veor q4, q4, q2 veor q12, q12, q11 veor q2, q2, q10 veor q15, q15, q13 veor q14, q14, q9 veor q10, q15, q14 vand q10, q10, q7 veor q7, q7, q1 vand q11, q1, q15 vand q7, q7, q14 veor q1, q11, q10 veor q7, q7, q11 veor q7, q7, q12 veor q4, q4, q12 veor q1, q1, q8 veor q2, q2, q8 veor q7, q7, q0 veor q1, q1, q6 veor q6, q6, q0 veor q4, q4, q7 veor q0, q0, q1 veor q1, q1, q5 veor q5, q5, q2 veor q2, q2, q3 veor q3, q3, q5 veor q4, q4, q5 veor q6, q6, q3 subs r5,r5,#1 bcc .Lenc_done vext.8 q8, q0, q0, #12 @ x0 <<< 32 vext.8 q9, q1, q1, #12 veor q0, q0, q8 @ x0 ^ (x0 <<< 32) vext.8 q10, q4, q4, #12 veor q1, q1, q9 vext.8 q11, q6, q6, #12 veor q4, q4, q10 vext.8 q12, q3, q3, #12 veor q6, q6, q11 vext.8 q13, q7, q7, #12 veor q3, q3, q12 vext.8 q14, q2, q2, #12 veor q7, q7, q13 vext.8 q15, q5, q5, #12 veor q2, q2, q14 veor q9, q9, q0 veor q5, q5, q15 vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64) veor q10, q10, q1 veor q8, q8, q5 veor q9, q9, q5 vext.8 q1, q1, q1, #8 veor q13, q13, q3 veor q0, q0, q8 veor q14, q14, q7 veor q1, q1, q9 vext.8 q8, q3, q3, #8 veor q12, q12, q6 vext.8 q9, q7, q7, #8 veor q15, q15, q2 vext.8 q3, q6, q6, #8 veor q11, q11, q4 vext.8 q7, q5, q5, #8 veor q12, q12, q5 vext.8 q6, q2, q2, #8 veor q11, q11, q5 vext.8 q2, q4, q4, #8 veor q5, q9, q13 veor q4, q8, q12 veor q3, q3, q11 veor q7, q7, q15 veor q6, q6, q14 @ vmov q4, q8 veor q2, q2, q10 @ vmov q5, q9 vldmia r6, {q12} @ .LSR ite eq @ Thumb2 thing, samity check in ARM addeq r6,r6,#0x10 bne .Lenc_loop vldmia r6, {q12} @ .LSRM0 b .Lenc_loop .align 4 .Lenc_done: vmov.i8 q8,#0x55 @ compose .LBS0 vmov.i8 q9,#0x33 @ compose .LBS1 vshr.u64 q10, q2, #1 vshr.u64 q11, q3, #1 veor q10, q10, q5 veor q11, q11, q7 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #1 veor q7, q7, q11 vshl.u64 q11, q11, #1 veor q2, q2, q10 veor q3, q3, q11 vshr.u64 q10, q4, #1 vshr.u64 q11, q0, #1 veor q10, q10, q6 veor q11, q11, q1 vand q10, q10, q8 vand q11, q11, q8 veor q6, q6, q10 vshl.u64 q10, q10, #1 veor q1, q1, q11 vshl.u64 q11, q11, #1 veor q4, q4, q10 veor q0, q0, q11 vmov.i8 q8,#0x0f @ compose .LBS2 vshr.u64 q10, q7, #2 vshr.u64 q11, q3, #2 veor q10, q10, q5 veor q11, q11, q2 vand q10, q10, q9 vand q11, q11, q9 veor q5, q5, q10 vshl.u64 q10, q10, #2 veor q2, q2, q11 vshl.u64 q11, q11, #2 veor q7, q7, q10 veor q3, q3, q11 vshr.u64 q10, q1, #2 vshr.u64 q11, q0, #2 veor q10, q10, q6 veor q11, q11, q4 vand q10, q10, q9 vand q11, q11, q9 veor q6, q6, q10 vshl.u64 q10, q10, #2 veor q4, q4, q11 vshl.u64 q11, q11, #2 veor q1, q1, q10 veor q0, q0, q11 vshr.u64 q10, q6, #4 vshr.u64 q11, q4, #4 veor q10, q10, q5 veor q11, q11, q2 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #4 veor q2, q2, q11 vshl.u64 q11, q11, #4 veor q6, q6, q10 veor q4, q4, q11 vshr.u64 q10, q1, #4 vshr.u64 q11, q0, #4 veor q10, q10, q7 veor q11, q11, q3 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #4 veor q3, q3, q11 vshl.u64 q11, q11, #4 veor q1, q1, q10 veor q0, q0, q11 vldmia r4, {q8} @ last round key veor q4, q4, q8 veor q6, q6, q8 veor q3, q3, q8 veor q7, q7, q8 veor q2, q2, q8 veor q5, q5, q8 veor q0, q0, q8 veor q1, q1, q8 bx lr .size _bsaes_encrypt8,.-_bsaes_encrypt8 .type _bsaes_key_convert,%function .align 4 _bsaes_key_convert: adr r6,. vld1.8 {q7}, [r4]! @ load round 0 key #if defined(__thumb2__) || defined(__APPLE__) adr r6,.LM0 #else sub r6,r6,#_bsaes_key_convert-.LM0 #endif vld1.8 {q15}, [r4]! @ load round 1 key vmov.i8 q8, #0x01 @ bit masks vmov.i8 q9, #0x02 vmov.i8 q10, #0x04 vmov.i8 q11, #0x08 vmov.i8 q12, #0x10 vmov.i8 q13, #0x20 vldmia r6, {q14} @ .LM0 #ifdef __ARMEL__ vrev32.8 q7, q7 vrev32.8 q15, q15 #endif sub r5,r5,#1 vstmia r12!, {q7} @ save round 0 key b .Lkey_loop .align 4 .Lkey_loop: vtbl.8 d14,{q15},d28 vtbl.8 d15,{q15},d29 vmov.i8 q6, #0x40 vmov.i8 q15, #0x80 vtst.8 q0, q7, q8 vtst.8 q1, q7, q9 vtst.8 q2, q7, q10 vtst.8 q3, q7, q11 vtst.8 q4, q7, q12 vtst.8 q5, q7, q13 vtst.8 q6, q7, q6 vtst.8 q7, q7, q15 vld1.8 {q15}, [r4]! @ load next round key vmvn q0, q0 @ "pnot" vmvn q1, q1 vmvn q5, q5 vmvn q6, q6 #ifdef __ARMEL__ vrev32.8 q15, q15 #endif subs r5,r5,#1 vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key bne .Lkey_loop vmov.i8 q7,#0x63 @ compose .L63 @ don't save last round key bx lr .size _bsaes_key_convert,.-_bsaes_key_convert .globl bsaes_ctr32_encrypt_blocks .hidden bsaes_ctr32_encrypt_blocks .type bsaes_ctr32_encrypt_blocks,%function .align 5 bsaes_ctr32_encrypt_blocks: @ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this @ out to retain a constant-time implementation. mov ip, sp stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr} VFP_ABI_PUSH ldr r8, [ip] @ ctr is 1st arg on the stack sub sp, sp, #0x10 @ scratch space to carry over the ctr mov r9, sp @ save sp ldr r10, [r3, #240] @ get # of rounds #ifndef BSAES_ASM_EXTENDED_KEY @ allocate the key schedule on the stack sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key add r12, #96 @ size of bit-sliced key schedule @ populate the key schedule mov r4, r3 @ pass key mov r5, r10 @ pass # of rounds mov sp, r12 @ sp is sp bl _bsaes_key_convert veor q7,q7,q15 @ fix up last round key vstmia r12, {q7} @ save last round key vld1.8 {q0}, [r8] @ load counter #ifdef __APPLE__ mov r8, #:lower16:(.LREVM0SR-.LM0) add r8, r6, r8 #else add r8, r6, #.LREVM0SR-.LM0 @ borrow r8 #endif vldmia sp, {q4} @ load round0 key #else ldr r12, [r3, #244] eors r12, #1 beq 0f @ populate the key schedule str r12, [r3, #244] mov r4, r3 @ pass key mov r5, r10 @ pass # of rounds add r12, r3, #248 @ pass key schedule bl _bsaes_key_convert veor q7,q7,q15 @ fix up last round key vstmia r12, {q7} @ save last round key .align 2 add r12, r3, #248 vld1.8 {q0}, [r8] @ load counter adrl r8, .LREVM0SR @ borrow r8 vldmia r12, {q4} @ load round0 key sub sp, #0x10 @ place for adjusted round0 key #endif vmov.i32 q8,#1 @ compose 1<<96 veor q9,q9,q9 vrev32.8 q0,q0 vext.8 q8,q9,q8,#4 vrev32.8 q4,q4 vadd.u32 q9,q8,q8 @ compose 2<<96 vstmia sp, {q4} @ save adjusted round0 key b .Lctr_enc_loop .align 4 .Lctr_enc_loop: vadd.u32 q10, q8, q9 @ compose 3<<96 vadd.u32 q1, q0, q8 @ +1 vadd.u32 q2, q0, q9 @ +2 vadd.u32 q3, q0, q10 @ +3 vadd.u32 q4, q1, q10 vadd.u32 q5, q2, q10 vadd.u32 q6, q3, q10 vadd.u32 q7, q4, q10 vadd.u32 q10, q5, q10 @ next counter @ Borrow prologue from _bsaes_encrypt8 to use the opportunity @ to flip byte order in 32-bit counter vldmia sp, {q9} @ load round0 key #ifndef BSAES_ASM_EXTENDED_KEY add r4, sp, #0x10 @ pass next round key #else add r4, r3, #264 #endif vldmia r8, {q8} @ .LREVM0SR mov r5, r10 @ pass rounds vstmia r9, {q10} @ save next counter #ifdef __APPLE__ mov r6, #:lower16:(.LREVM0SR-.LSR) sub r6, r8, r6 #else sub r6, r8, #.LREVM0SR-.LSR @ pass constants #endif bl _bsaes_encrypt8_alt subs r2, r2, #8 blo .Lctr_enc_loop_done vld1.8 {q8,q9}, [r0]! @ load input vld1.8 {q10,q11}, [r0]! veor q0, q8 veor q1, q9 vld1.8 {q12,q13}, [r0]! veor q4, q10 veor q6, q11 vld1.8 {q14,q15}, [r0]! veor q3, q12 vst1.8 {q0,q1}, [r1]! @ write output veor q7, q13 veor q2, q14 vst1.8 {q4}, [r1]! veor q5, q15 vst1.8 {q6}, [r1]! vmov.i32 q8, #1 @ compose 1<<96 vst1.8 {q3}, [r1]! veor q9, q9, q9 vst1.8 {q7}, [r1]! vext.8 q8, q9, q8, #4 vst1.8 {q2}, [r1]! vadd.u32 q9,q8,q8 @ compose 2<<96 vst1.8 {q5}, [r1]! vldmia r9, {q0} @ load counter bne .Lctr_enc_loop b .Lctr_enc_done .align 4 .Lctr_enc_loop_done: add r2, r2, #8 vld1.8 {q8}, [r0]! @ load input veor q0, q8 vst1.8 {q0}, [r1]! @ write output cmp r2, #2 blo .Lctr_enc_done vld1.8 {q9}, [r0]! veor q1, q9 vst1.8 {q1}, [r1]! beq .Lctr_enc_done vld1.8 {q10}, [r0]! veor q4, q10 vst1.8 {q4}, [r1]! cmp r2, #4 blo .Lctr_enc_done vld1.8 {q11}, [r0]! veor q6, q11 vst1.8 {q6}, [r1]! beq .Lctr_enc_done vld1.8 {q12}, [r0]! veor q3, q12 vst1.8 {q3}, [r1]! cmp r2, #6 blo .Lctr_enc_done vld1.8 {q13}, [r0]! veor q7, q13 vst1.8 {q7}, [r1]! beq .Lctr_enc_done vld1.8 {q14}, [r0] veor q2, q14 vst1.8 {q2}, [r1]! .Lctr_enc_done: vmov.i32 q0, #0 vmov.i32 q1, #0 #ifndef BSAES_ASM_EXTENDED_KEY .Lctr_enc_bzero:@ wipe key schedule [if any] vstmia sp!, {q0,q1} cmp sp, r9 bne .Lctr_enc_bzero #else vstmia sp, {q0,q1} #endif mov sp, r9 add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb VFP_ABI_POP ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return @ OpenSSL contains aes_nohw_* fallback code here. We patch this @ out to retain a constant-time implementation. .size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
fatiimajamiil/rustpad-custom
70,675
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/sha256-x86_64-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl sha256_block_data_order_nohw .hidden sha256_block_data_order_nohw .type sha256_block_data_order_nohw,@function .align 16 sha256_block_data_order_nohw: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 shlq $4,%rdx subq $64+32,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) .cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08 .Lprologue: movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d jmp .Lloop .align 16 .Lloop: movl %ebx,%edi leaq K256(%rip),%rbp xorl %ecx,%edi movl 0(%rsi),%r12d movl %r8d,%r13d movl %eax,%r14d bswapl %r12d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,0(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp addl %r14d,%r11d movl 4(%rsi),%r12d movl %edx,%r13d movl %r11d,%r14d bswapl %r12d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,4(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp addl %r14d,%r10d movl 8(%rsi),%r12d movl %ecx,%r13d movl %r10d,%r14d bswapl %r12d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,8(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp addl %r14d,%r9d movl 12(%rsi),%r12d movl %ebx,%r13d movl %r9d,%r14d bswapl %r12d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,12(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp addl %r14d,%r8d movl 16(%rsi),%r12d movl %eax,%r13d movl %r8d,%r14d bswapl %r12d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,16(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp addl %r14d,%edx movl 20(%rsi),%r12d movl %r11d,%r13d movl %edx,%r14d bswapl %r12d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,20(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp addl %r14d,%ecx movl 24(%rsi),%r12d movl %r10d,%r13d movl %ecx,%r14d bswapl %r12d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,24(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp addl %r14d,%ebx movl 28(%rsi),%r12d movl %r9d,%r13d movl %ebx,%r14d bswapl %r12d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,28(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp addl %r14d,%eax movl 32(%rsi),%r12d movl %r8d,%r13d movl %eax,%r14d bswapl %r12d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,32(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp addl %r14d,%r11d movl 36(%rsi),%r12d movl %edx,%r13d movl %r11d,%r14d bswapl %r12d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,36(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp addl %r14d,%r10d movl 40(%rsi),%r12d movl %ecx,%r13d movl %r10d,%r14d bswapl %r12d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,40(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp addl %r14d,%r9d movl 44(%rsi),%r12d movl %ebx,%r13d movl %r9d,%r14d bswapl %r12d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,44(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp addl %r14d,%r8d movl 48(%rsi),%r12d movl %eax,%r13d movl %r8d,%r14d bswapl %r12d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,48(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp addl %r14d,%edx movl 52(%rsi),%r12d movl %r11d,%r13d movl %edx,%r14d bswapl %r12d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,52(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp addl %r14d,%ecx movl 56(%rsi),%r12d movl %r10d,%r13d movl %ecx,%r14d bswapl %r12d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,56(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp addl %r14d,%ebx movl 60(%rsi),%r12d movl %r9d,%r13d movl %ebx,%r14d bswapl %r12d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,60(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp jmp .Lrounds_16_xx .align 16 .Lrounds_16_xx: movl 4(%rsp),%r13d movl 56(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%eax movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 36(%rsp),%r12d addl 0(%rsp),%r12d movl %r8d,%r13d addl %r15d,%r12d movl %eax,%r14d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,0(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp movl 8(%rsp),%r13d movl 60(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r11d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 40(%rsp),%r12d addl 4(%rsp),%r12d movl %edx,%r13d addl %edi,%r12d movl %r11d,%r14d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,4(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp movl 12(%rsp),%r13d movl 0(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r10d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 44(%rsp),%r12d addl 8(%rsp),%r12d movl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r14d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,8(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp movl 16(%rsp),%r13d movl 4(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r9d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 48(%rsp),%r12d addl 12(%rsp),%r12d movl %ebx,%r13d addl %edi,%r12d movl %r9d,%r14d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,12(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp movl 20(%rsp),%r13d movl 8(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r8d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 52(%rsp),%r12d addl 16(%rsp),%r12d movl %eax,%r13d addl %r15d,%r12d movl %r8d,%r14d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,16(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp movl 24(%rsp),%r13d movl 12(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%edx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 56(%rsp),%r12d addl 20(%rsp),%r12d movl %r11d,%r13d addl %edi,%r12d movl %edx,%r14d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,20(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp movl 28(%rsp),%r13d movl 16(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ecx movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 60(%rsp),%r12d addl 24(%rsp),%r12d movl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r14d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,24(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp movl 32(%rsp),%r13d movl 20(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ebx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 0(%rsp),%r12d addl 28(%rsp),%r12d movl %r9d,%r13d addl %edi,%r12d movl %ebx,%r14d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,28(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp movl 36(%rsp),%r13d movl 24(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%eax movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 4(%rsp),%r12d addl 32(%rsp),%r12d movl %r8d,%r13d addl %r15d,%r12d movl %eax,%r14d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,32(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp movl 40(%rsp),%r13d movl 28(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r11d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 8(%rsp),%r12d addl 36(%rsp),%r12d movl %edx,%r13d addl %edi,%r12d movl %r11d,%r14d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,36(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp movl 44(%rsp),%r13d movl 32(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r10d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 12(%rsp),%r12d addl 40(%rsp),%r12d movl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r14d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,40(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp movl 48(%rsp),%r13d movl 36(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r9d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 16(%rsp),%r12d addl 44(%rsp),%r12d movl %ebx,%r13d addl %edi,%r12d movl %r9d,%r14d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,44(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp movl 52(%rsp),%r13d movl 40(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r8d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 20(%rsp),%r12d addl 48(%rsp),%r12d movl %eax,%r13d addl %r15d,%r12d movl %r8d,%r14d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,48(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp movl 56(%rsp),%r13d movl 44(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%edx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 24(%rsp),%r12d addl 52(%rsp),%r12d movl %r11d,%r13d addl %edi,%r12d movl %edx,%r14d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,52(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp movl 60(%rsp),%r13d movl 48(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ecx movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 28(%rsp),%r12d addl 56(%rsp),%r12d movl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r14d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,56(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp movl 0(%rsp),%r13d movl 52(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ebx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 32(%rsp),%r12d addl 60(%rsp),%r12d movl %r9d,%r13d addl %edi,%r12d movl %ebx,%r14d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,60(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp cmpb $0,3(%rbp) jnz .Lrounds_16_xx movq 64+0(%rsp),%rdi addl %r14d,%eax leaq 64(%rsi),%rsi addl 0(%rdi),%eax addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb .Lloop movq 88(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lepilogue: ret .cfi_endproc .size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw .section .rodata .align 64 .type K256,@object K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl sha256_block_data_order_hw .hidden sha256_block_data_order_hw .type sha256_block_data_order_hw,@function .align 64 sha256_block_data_order_hw: .cfi_startproc _CET_ENDBR leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 movdqa 512-128(%rcx),%xmm7 pshufd $0x1b,%xmm1,%xmm0 pshufd $0xb1,%xmm1,%xmm1 pshufd $0x1b,%xmm2,%xmm2 movdqa %xmm7,%xmm8 .byte 102,15,58,15,202,8 punpcklqdq %xmm0,%xmm2 jmp .Loop_shaext .align 16 .Loop_shaext: movdqu (%rsi),%xmm3 movdqu 16(%rsi),%xmm4 movdqu 32(%rsi),%xmm5 .byte 102,15,56,0,223 movdqu 48(%rsi),%xmm6 movdqa 0-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 102,15,56,0,231 movdqa %xmm2,%xmm10 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 nop movdqa %xmm1,%xmm9 .byte 15,56,203,202 movdqa 32-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 102,15,56,0,239 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 leaq 64(%rsi),%rsi .byte 15,56,204,220 .byte 15,56,203,202 movdqa 64-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 102,15,56,0,247 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 96-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 128-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 160-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 nop paddd %xmm7,%xmm6 .byte 15,56,204,220 .byte 15,56,203,202 movdqa 192-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,205,245 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 224-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 256-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 288-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 nop paddd %xmm7,%xmm6 .byte 15,56,204,220 .byte 15,56,203,202 movdqa 320-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,205,245 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 352-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 384-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 416-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 .byte 15,56,203,202 paddd %xmm7,%xmm6 movdqa 448-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 .byte 15,56,205,245 movdqa %xmm8,%xmm7 .byte 15,56,203,202 movdqa 480-128(%rcx),%xmm0 paddd %xmm6,%xmm0 nop .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 decq %rdx nop .byte 15,56,203,202 paddd %xmm10,%xmm2 paddd %xmm9,%xmm1 jnz .Loop_shaext pshufd $0xb1,%xmm2,%xmm2 pshufd $0x1b,%xmm1,%xmm7 pshufd $0xb1,%xmm1,%xmm1 punpckhqdq %xmm2,%xmm1 .byte 102,15,58,15,215,8 movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) ret .cfi_endproc .size sha256_block_data_order_hw,.-sha256_block_data_order_hw .globl sha256_block_data_order_ssse3 .hidden sha256_block_data_order_ssse3 .type sha256_block_data_order_ssse3,@function .align 64 sha256_block_data_order_ssse3: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 shlq $4,%rdx subq $96,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) .cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08 .Lprologue_ssse3: movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d jmp .Lloop_ssse3 .align 16 .Lloop_ssse3: movdqa K256+512(%rip),%xmm7 movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 .byte 102,15,56,0,199 movdqu 48(%rsi),%xmm3 leaq K256(%rip),%rbp .byte 102,15,56,0,207 movdqa 0(%rbp),%xmm4 movdqa 32(%rbp),%xmm5 .byte 102,15,56,0,215 paddd %xmm0,%xmm4 movdqa 64(%rbp),%xmm6 .byte 102,15,56,0,223 movdqa 96(%rbp),%xmm7 paddd %xmm1,%xmm5 paddd %xmm2,%xmm6 paddd %xmm3,%xmm7 movdqa %xmm4,0(%rsp) movl %eax,%r14d movdqa %xmm5,16(%rsp) movl %ebx,%edi movdqa %xmm6,32(%rsp) xorl %ecx,%edi movdqa %xmm7,48(%rsp) movl %r8d,%r13d jmp .Lssse3_00_47 .align 16 .Lssse3_00_47: subq $-128,%rbp rorl $14,%r13d movdqa %xmm1,%xmm4 movl %r14d,%eax movl %r9d,%r12d movdqa %xmm3,%xmm7 rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d .byte 102,15,58,15,224,4 andl %r8d,%r12d xorl %r8d,%r13d .byte 102,15,58,15,250,4 addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %ebx,%r15d addl %r12d,%r11d movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi paddd %xmm7,%xmm0 rorl $2,%r14d addl %r11d,%edx psrld $7,%xmm6 addl %edi,%r11d movl %edx,%r13d pshufd $250,%xmm3,%xmm7 addl %r11d,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%r11d movl %r8d,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %r11d,%r14d pxor %xmm5,%xmm4 andl %edx,%r12d xorl %edx,%r13d pslld $11,%xmm5 addl 4(%rsp),%r10d movl %r11d,%edi pxor %xmm6,%xmm4 xorl %r9d,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %eax,%edi addl %r12d,%r10d pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d psrld $10,%xmm7 addl %r13d,%r10d xorl %eax,%r15d paddd %xmm4,%xmm0 rorl $2,%r14d addl %r10d,%ecx psrlq $17,%xmm6 addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %ecx,%r13d xorl %r8d,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d pshufd $128,%xmm7,%xmm7 xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d psrldq $8,%xmm7 xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d paddd %xmm7,%xmm0 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d pshufd $80,%xmm0,%xmm7 xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx movdqa %xmm7,%xmm6 addl %edi,%r9d movl %ebx,%r13d psrld $10,%xmm7 addl %r9d,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%r9d movl %ecx,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d psrlq $2,%xmm6 andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d pxor %xmm6,%xmm7 movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %r10d,%edi addl %r12d,%r8d movdqa 0(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d paddd %xmm7,%xmm0 rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d paddd %xmm0,%xmm6 movl %eax,%r13d addl %r8d,%r14d movdqa %xmm6,0(%rsp) rorl $14,%r13d movdqa %xmm2,%xmm4 movl %r14d,%r8d movl %ebx,%r12d movdqa %xmm0,%xmm7 rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d .byte 102,15,58,15,225,4 andl %eax,%r12d xorl %eax,%r13d .byte 102,15,58,15,251,4 addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %r9d,%r15d addl %r12d,%edx movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi paddd %xmm7,%xmm1 rorl $2,%r14d addl %edx,%r11d psrld $7,%xmm6 addl %edi,%edx movl %r11d,%r13d pshufd $250,%xmm0,%xmm7 addl %edx,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%edx movl %eax,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %edx,%r14d pxor %xmm5,%xmm4 andl %r11d,%r12d xorl %r11d,%r13d pslld $11,%xmm5 addl 20(%rsp),%ecx movl %edx,%edi pxor %xmm6,%xmm4 xorl %ebx,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %r8d,%edi addl %r12d,%ecx pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d psrld $10,%xmm7 addl %r13d,%ecx xorl %r8d,%r15d paddd %xmm4,%xmm1 rorl $2,%r14d addl %ecx,%r10d psrlq $17,%xmm6 addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %r10d,%r13d xorl %eax,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d pshufd $128,%xmm7,%xmm7 xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d psrldq $8,%xmm7 xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d paddd %xmm7,%xmm1 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx pshufd $80,%xmm1,%xmm7 xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d movdqa %xmm7,%xmm6 addl %edi,%ebx movl %r9d,%r13d psrld $10,%xmm7 addl %ebx,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%ebx movl %r10d,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d psrlq $2,%xmm6 andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax pxor %xmm6,%xmm7 movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %ecx,%edi addl %r12d,%eax movdqa 32(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d paddd %xmm7,%xmm1 rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax paddd %xmm1,%xmm6 movl %r8d,%r13d addl %eax,%r14d movdqa %xmm6,16(%rsp) rorl $14,%r13d movdqa %xmm3,%xmm4 movl %r14d,%eax movl %r9d,%r12d movdqa %xmm1,%xmm7 rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d .byte 102,15,58,15,226,4 andl %r8d,%r12d xorl %r8d,%r13d .byte 102,15,58,15,248,4 addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %ebx,%r15d addl %r12d,%r11d movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi paddd %xmm7,%xmm2 rorl $2,%r14d addl %r11d,%edx psrld $7,%xmm6 addl %edi,%r11d movl %edx,%r13d pshufd $250,%xmm1,%xmm7 addl %r11d,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%r11d movl %r8d,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %r11d,%r14d pxor %xmm5,%xmm4 andl %edx,%r12d xorl %edx,%r13d pslld $11,%xmm5 addl 36(%rsp),%r10d movl %r11d,%edi pxor %xmm6,%xmm4 xorl %r9d,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %eax,%edi addl %r12d,%r10d pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d psrld $10,%xmm7 addl %r13d,%r10d xorl %eax,%r15d paddd %xmm4,%xmm2 rorl $2,%r14d addl %r10d,%ecx psrlq $17,%xmm6 addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %ecx,%r13d xorl %r8d,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d pshufd $128,%xmm7,%xmm7 xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d psrldq $8,%xmm7 xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d paddd %xmm7,%xmm2 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d pshufd $80,%xmm2,%xmm7 xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx movdqa %xmm7,%xmm6 addl %edi,%r9d movl %ebx,%r13d psrld $10,%xmm7 addl %r9d,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%r9d movl %ecx,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d psrlq $2,%xmm6 andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d pxor %xmm6,%xmm7 movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %r10d,%edi addl %r12d,%r8d movdqa 64(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d paddd %xmm7,%xmm2 rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d paddd %xmm2,%xmm6 movl %eax,%r13d addl %r8d,%r14d movdqa %xmm6,32(%rsp) rorl $14,%r13d movdqa %xmm0,%xmm4 movl %r14d,%r8d movl %ebx,%r12d movdqa %xmm2,%xmm7 rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d .byte 102,15,58,15,227,4 andl %eax,%r12d xorl %eax,%r13d .byte 102,15,58,15,249,4 addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %r9d,%r15d addl %r12d,%edx movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi paddd %xmm7,%xmm3 rorl $2,%r14d addl %edx,%r11d psrld $7,%xmm6 addl %edi,%edx movl %r11d,%r13d pshufd $250,%xmm2,%xmm7 addl %edx,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%edx movl %eax,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %edx,%r14d pxor %xmm5,%xmm4 andl %r11d,%r12d xorl %r11d,%r13d pslld $11,%xmm5 addl 52(%rsp),%ecx movl %edx,%edi pxor %xmm6,%xmm4 xorl %ebx,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %r8d,%edi addl %r12d,%ecx pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d psrld $10,%xmm7 addl %r13d,%ecx xorl %r8d,%r15d paddd %xmm4,%xmm3 rorl $2,%r14d addl %ecx,%r10d psrlq $17,%xmm6 addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %r10d,%r13d xorl %eax,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d pshufd $128,%xmm7,%xmm7 xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d psrldq $8,%xmm7 xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d paddd %xmm7,%xmm3 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx pshufd $80,%xmm3,%xmm7 xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d movdqa %xmm7,%xmm6 addl %edi,%ebx movl %r9d,%r13d psrld $10,%xmm7 addl %ebx,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%ebx movl %r10d,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d psrlq $2,%xmm6 andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax pxor %xmm6,%xmm7 movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %ecx,%edi addl %r12d,%eax movdqa 96(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d paddd %xmm7,%xmm3 rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax paddd %xmm3,%xmm6 movl %r8d,%r13d addl %eax,%r14d movdqa %xmm6,48(%rsp) cmpb $0,131(%rbp) jne .Lssse3_00_47 rorl $14,%r13d movl %r14d,%eax movl %r9d,%r12d rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d xorl %ebx,%r15d addl %r12d,%r11d rorl $6,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi rorl $2,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d rorl $14,%r13d movl %r14d,%r11d movl %r8d,%r12d rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d rorl $11,%r14d xorl %eax,%edi addl %r12d,%r10d rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d rorl $2,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d xorl %ecx,%r13d xorl %r8d,%r12d rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d rorl $14,%r13d movl %r14d,%r9d movl %ecx,%r12d rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d xorl %r10d,%edi addl %r12d,%r8d rorl $6,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d rorl $14,%r13d movl %r14d,%r8d movl %ebx,%r12d rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d xorl %r9d,%r15d addl %r12d,%edx rorl $6,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi rorl $2,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d rorl $14,%r13d movl %r14d,%edx movl %eax,%r12d rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d rorl $11,%r14d xorl %r8d,%edi addl %r12d,%ecx rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d rorl $2,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d xorl %r10d,%r13d xorl %eax,%r12d rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d rorl $14,%r13d movl %r14d,%ebx movl %r10d,%r12d rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d xorl %ecx,%edi addl %r12d,%eax rorl $6,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d rorl $14,%r13d movl %r14d,%eax movl %r9d,%r12d rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d xorl %ebx,%r15d addl %r12d,%r11d rorl $6,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi rorl $2,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d rorl $14,%r13d movl %r14d,%r11d movl %r8d,%r12d rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d rorl $11,%r14d xorl %eax,%edi addl %r12d,%r10d rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d rorl $2,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d xorl %ecx,%r13d xorl %r8d,%r12d rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d rorl $14,%r13d movl %r14d,%r9d movl %ecx,%r12d rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d xorl %r10d,%edi addl %r12d,%r8d rorl $6,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d rorl $14,%r13d movl %r14d,%r8d movl %ebx,%r12d rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d xorl %r9d,%r15d addl %r12d,%edx rorl $6,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi rorl $2,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d rorl $14,%r13d movl %r14d,%edx movl %eax,%r12d rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d rorl $11,%r14d xorl %r8d,%edi addl %r12d,%ecx rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d rorl $2,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d xorl %r10d,%r13d xorl %eax,%r12d rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d rorl $14,%r13d movl %r14d,%ebx movl %r10d,%r12d rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d xorl %ecx,%edi addl %r12d,%eax rorl $6,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d movq 64+0(%rsp),%rdi movl %r14d,%eax addl 0(%rdi),%eax leaq 64(%rsi),%rsi addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb .Lloop_ssse3 movq 88(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lepilogue_ssse3: ret .cfi_endproc .size sha256_block_data_order_ssse3,.-sha256_block_data_order_ssse3 .globl sha256_block_data_order_avx .hidden sha256_block_data_order_avx .type sha256_block_data_order_avx,@function .align 64 sha256_block_data_order_avx: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 shlq $4,%rdx subq $96,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) .cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08 .Lprologue_avx: vzeroupper movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d vmovdqa K256+512+32(%rip),%xmm8 vmovdqa K256+512+64(%rip),%xmm9 jmp .Lloop_avx .align 16 .Lloop_avx: vmovdqa K256+512(%rip),%xmm7 vmovdqu 0(%rsi),%xmm0 vmovdqu 16(%rsi),%xmm1 vmovdqu 32(%rsi),%xmm2 vmovdqu 48(%rsi),%xmm3 vpshufb %xmm7,%xmm0,%xmm0 leaq K256(%rip),%rbp vpshufb %xmm7,%xmm1,%xmm1 vpshufb %xmm7,%xmm2,%xmm2 vpaddd 0(%rbp),%xmm0,%xmm4 vpshufb %xmm7,%xmm3,%xmm3 vpaddd 32(%rbp),%xmm1,%xmm5 vpaddd 64(%rbp),%xmm2,%xmm6 vpaddd 96(%rbp),%xmm3,%xmm7 vmovdqa %xmm4,0(%rsp) movl %eax,%r14d vmovdqa %xmm5,16(%rsp) movl %ebx,%edi vmovdqa %xmm6,32(%rsp) xorl %ecx,%edi vmovdqa %xmm7,48(%rsp) movl %r8d,%r13d jmp .Lavx_00_47 .align 16 .Lavx_00_47: subq $-128,%rbp vpalignr $4,%xmm0,%xmm1,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d vpalignr $4,%xmm2,%xmm3,%xmm7 shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d vpaddd %xmm7,%xmm0,%xmm0 xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d vpsrld $3,%xmm4,%xmm7 xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi vpshufd $250,%xmm3,%xmm7 shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d vpsrld $11,%xmm6,%xmm6 movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d vpsrld $10,%xmm7,%xmm6 addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d vpaddd %xmm4,%xmm0,%xmm0 addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d vpsrlq $2,%xmm7,%xmm7 addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d vpxor %xmm7,%xmm6,%xmm6 movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d vpaddd %xmm6,%xmm0,%xmm0 andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d vpshufd $80,%xmm0,%xmm7 movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d vpxor %xmm7,%xmm6,%xmm6 xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx vpsrlq $2,%xmm7,%xmm7 addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d vpaddd %xmm6,%xmm0,%xmm0 shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d vpaddd 0(%rbp),%xmm0,%xmm6 xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d vmovdqa %xmm6,0(%rsp) vpalignr $4,%xmm1,%xmm2,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d vpalignr $4,%xmm3,%xmm0,%xmm7 shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d vpaddd %xmm7,%xmm1,%xmm1 xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d vpsrld $3,%xmm4,%xmm7 xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi vpshufd $250,%xmm0,%xmm7 shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx vpsrld $11,%xmm6,%xmm6 movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d vpsrld $10,%xmm7,%xmm6 addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d vpaddd %xmm4,%xmm1,%xmm1 addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d vpsrlq $2,%xmm7,%xmm7 addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx vpxor %xmm7,%xmm6,%xmm6 movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d vpaddd %xmm6,%xmm1,%xmm1 andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx vpshufd $80,%xmm1,%xmm7 movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx vpxor %xmm7,%xmm6,%xmm6 xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d vpsrlq $2,%xmm7,%xmm7 addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d vpaddd %xmm6,%xmm1,%xmm1 shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d vpaddd 32(%rbp),%xmm1,%xmm6 xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d vmovdqa %xmm6,16(%rsp) vpalignr $4,%xmm2,%xmm3,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d vpalignr $4,%xmm0,%xmm1,%xmm7 shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d vpaddd %xmm7,%xmm2,%xmm2 xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d vpsrld $3,%xmm4,%xmm7 xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi vpshufd $250,%xmm1,%xmm7 shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d vpsrld $11,%xmm6,%xmm6 movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d vpsrld $10,%xmm7,%xmm6 addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d vpaddd %xmm4,%xmm2,%xmm2 addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d vpsrlq $2,%xmm7,%xmm7 addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d vpxor %xmm7,%xmm6,%xmm6 movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d vpaddd %xmm6,%xmm2,%xmm2 andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d vpshufd $80,%xmm2,%xmm7 movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d vpxor %xmm7,%xmm6,%xmm6 xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx vpsrlq $2,%xmm7,%xmm7 addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d vpaddd %xmm6,%xmm2,%xmm2 shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d vpaddd 64(%rbp),%xmm2,%xmm6 xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d vmovdqa %xmm6,32(%rsp) vpalignr $4,%xmm3,%xmm0,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d vpalignr $4,%xmm1,%xmm2,%xmm7 shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d vpaddd %xmm7,%xmm3,%xmm3 xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d vpsrld $3,%xmm4,%xmm7 xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi vpshufd $250,%xmm2,%xmm7 shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx vpsrld $11,%xmm6,%xmm6 movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d vpsrld $10,%xmm7,%xmm6 addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d vpaddd %xmm4,%xmm3,%xmm3 addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d vpsrlq $2,%xmm7,%xmm7 addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx vpxor %xmm7,%xmm6,%xmm6 movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d vpaddd %xmm6,%xmm3,%xmm3 andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx vpshufd $80,%xmm3,%xmm7 movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx vpxor %xmm7,%xmm6,%xmm6 xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d vpsrlq $2,%xmm7,%xmm7 addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d vpaddd %xmm6,%xmm3,%xmm3 shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d vpaddd 96(%rbp),%xmm3,%xmm6 xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d vmovdqa %xmm6,48(%rsp) cmpb $0,131(%rbp) jne .Lavx_00_47 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d movq 64+0(%rsp),%rdi movl %r14d,%eax addl 0(%rdi),%eax leaq 64(%rsi),%rsi addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb .Lloop_avx movq 88(%rsp),%rsi .cfi_def_cfa %rsi,8 vzeroupper movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lepilogue_avx: ret .cfi_endproc .size sha256_block_data_order_avx,.-sha256_block_data_order_avx #endif
fatiimajamiil/rustpad-custom
4,266
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/ghashv8-armx-linux64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #if __ARM_MAX_ARCH__>=7 .text .arch armv8-a+crypto .globl gcm_init_clmul .hidden gcm_init_clmul .type gcm_init_clmul,%function .align 4 gcm_init_clmul: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x1] //load input H movi v19.16b,#0xe1 shl v19.2d,v19.2d,#57 //0xc2.0 ext v3.16b,v17.16b,v17.16b,#8 ushr v18.2d,v19.2d,#63 dup v17.4s,v17.s[1] ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 ushr v18.2d,v3.2d,#63 sshr v17.4s,v17.4s,#31 //broadcast carry bit and v18.16b,v18.16b,v16.16b shl v3.2d,v3.2d,#1 ext v18.16b,v18.16b,v18.16b,#8 and v16.16b,v16.16b,v17.16b orr v3.16b,v3.16b,v18.16b //H<<<=1 eor v20.16b,v3.16b,v16.16b //twisted H st1 {v20.2d},[x0],#16 //store Htable[0] //calculate H^2 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing pmull v0.1q,v20.1d,v20.1d eor v16.16b,v16.16b,v20.16b pmull2 v2.1q,v20.2d,v20.2d pmull v1.1q,v16.1d,v16.1d ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v22.16b,v0.16b,v18.16b ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2] //calculate H^3 and H^4 pmull v0.1q,v20.1d, v22.1d pmull v5.1q,v22.1d,v22.1d pmull2 v2.1q,v20.2d, v22.2d pmull2 v7.1q,v22.2d,v22.2d pmull v1.1q,v16.1d,v17.1d pmull v6.1q,v17.1d,v17.1d ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing ext v17.16b,v5.16b,v7.16b,#8 eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v16.16b eor v4.16b,v5.16b,v7.16b eor v6.16b,v6.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase eor v6.16b,v6.16b,v4.16b pmull v4.1q,v5.1d,v19.1d ins v2.d[0],v1.d[1] ins v7.d[0],v6.d[1] ins v1.d[1],v0.d[0] ins v6.d[1],v5.d[0] eor v0.16b,v1.16b,v18.16b eor v5.16b,v6.16b,v4.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase ext v4.16b,v5.16b,v5.16b,#8 pmull v0.1q,v0.1d,v19.1d pmull v5.1q,v5.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v4.16b,v4.16b,v7.16b eor v20.16b, v0.16b,v18.16b //H^3 eor v22.16b,v5.16b,v4.16b //H^4 ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing ext v17.16b,v22.16b,v22.16b,#8 eor v16.16b,v16.16b,v20.16b eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5] ret .size gcm_init_clmul,.-gcm_init_clmul .globl gcm_gmult_clmul .hidden gcm_gmult_clmul .type gcm_gmult_clmul,%function .align 4 gcm_gmult_clmul: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x0] //load Xi movi v19.16b,#0xe1 ld1 {v20.2d,v21.2d},[x1] //load twisted H, ... shl v19.2d,v19.2d,#57 #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ext v3.16b,v17.16b,v17.16b,#8 pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif ext v0.16b,v0.16b,v0.16b,#8 st1 {v0.2d},[x0] //write out Xi ret .size gcm_gmult_clmul,.-gcm_gmult_clmul .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
fatiimajamiil/rustpad-custom
4,229
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/x86-mont-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl bn_mul_mont .hidden bn_mul_mont .type bn_mul_mont,@function .align 16 bn_mul_mont: .L_bn_mul_mont_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi xorl %eax,%eax movl 40(%esp),%edi leal 20(%esp),%esi leal 24(%esp),%edx addl $2,%edi negl %edi leal -32(%esp,%edi,4),%ebp negl %edi movl %ebp,%eax subl %edx,%eax andl $2047,%eax subl %eax,%ebp xorl %ebp,%edx andl $2048,%edx xorl $2048,%edx subl %edx,%ebp andl $-64,%ebp movl %esp,%eax subl %ebp,%eax andl $-4096,%eax movl %esp,%edx leal (%ebp,%eax,1),%esp movl (%esp),%eax cmpl %ebp,%esp ja .L000page_walk jmp .L001page_walk_done .align 16 .L000page_walk: leal -4096(%esp),%esp movl (%esp),%eax cmpl %ebp,%esp ja .L000page_walk .L001page_walk_done: movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%ebp movl 16(%esi),%esi movl (%esi),%esi movl %eax,4(%esp) movl %ebx,8(%esp) movl %ecx,12(%esp) movl %ebp,16(%esp) movl %esi,20(%esp) leal -3(%edi),%ebx movl %edx,24(%esp) movl $-1,%eax movd %eax,%mm7 movl 8(%esp),%esi movl 12(%esp),%edi movl 16(%esp),%ebp xorl %edx,%edx xorl %ecx,%ecx movd (%edi),%mm4 movd (%esi),%mm5 movd (%ebp),%mm3 pmuludq %mm4,%mm5 movq %mm5,%mm2 movq %mm5,%mm0 pand %mm7,%mm0 pmuludq 20(%esp),%mm5 pmuludq %mm5,%mm3 paddq %mm0,%mm3 movd 4(%ebp),%mm1 movd 4(%esi),%mm0 psrlq $32,%mm2 psrlq $32,%mm3 incl %ecx .align 16 .L0021st: pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 movd 4(%ebp,%ecx,4),%mm1 paddq %mm0,%mm3 movd 4(%esi,%ecx,4),%mm0 psrlq $32,%mm2 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm3 leal 1(%ecx),%ecx cmpl %ebx,%ecx jl .L0021st pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 paddq %mm0,%mm3 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm2 psrlq $32,%mm3 paddq %mm2,%mm3 movq %mm3,32(%esp,%ebx,4) incl %edx .L003outer: xorl %ecx,%ecx movd (%edi,%edx,4),%mm4 movd (%esi),%mm5 movd 32(%esp),%mm6 movd (%ebp),%mm3 pmuludq %mm4,%mm5 paddq %mm6,%mm5 movq %mm5,%mm0 movq %mm5,%mm2 pand %mm7,%mm0 pmuludq 20(%esp),%mm5 pmuludq %mm5,%mm3 paddq %mm0,%mm3 movd 36(%esp),%mm6 movd 4(%ebp),%mm1 movd 4(%esi),%mm0 psrlq $32,%mm2 psrlq $32,%mm3 paddq %mm6,%mm2 incl %ecx decl %ebx .L004inner: pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 movd 36(%esp,%ecx,4),%mm6 pand %mm7,%mm0 movd 4(%ebp,%ecx,4),%mm1 paddq %mm0,%mm3 movd 4(%esi,%ecx,4),%mm0 psrlq $32,%mm2 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm3 paddq %mm6,%mm2 decl %ebx leal 1(%ecx),%ecx jnz .L004inner movl %ecx,%ebx pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 paddq %mm0,%mm3 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm2 psrlq $32,%mm3 movd 36(%esp,%ebx,4),%mm6 paddq %mm2,%mm3 paddq %mm6,%mm3 movq %mm3,32(%esp,%ebx,4) leal 1(%edx),%edx cmpl %ebx,%edx jle .L003outer emms jmp .L005common_tail .align 16 .L005common_tail: movl 16(%esp),%ebp movl 4(%esp),%edi leal 32(%esp),%esi movl (%esi),%eax movl %ebx,%ecx xorl %edx,%edx .align 16 .L006sub: sbbl (%ebp,%edx,4),%eax movl %eax,(%edi,%edx,4) decl %ecx movl 4(%esi,%edx,4),%eax leal 1(%edx),%edx jge .L006sub sbbl $0,%eax movl $-1,%edx xorl %eax,%edx jmp .L007copy .align 16 .L007copy: movl 32(%esp,%ebx,4),%esi movl (%edi,%ebx,4),%ebp movl %ecx,32(%esp,%ebx,4) andl %eax,%esi andl %edx,%ebp orl %esi,%ebp movl %ebp,(%edi,%ebx,4) decl %ebx jge .L007copy movl 24(%esp),%esp movl $1,%eax popl %edi popl %esi popl %ebx popl %ebp ret .size bn_mul_mont,.-.L_bn_mul_mont_begin .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105 .byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56 .byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121 .byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46 .byte 111,114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
fatiimajamiil/rustpad-custom
42,856
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/sha512-armv4-linux32.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) @ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. @ @ Licensed under the Apache License, Version 2.0 (the "License"); @ you may not use this file except in compliance with the License. @ You may obtain a copy of the License at @ @ https://www.apache.org/licenses/LICENSE-2.0 @ @ Unless required by applicable law or agreed to in writing, software @ distributed under the License is distributed on an "AS IS" BASIS, @ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @ See the License for the specific language governing permissions and @ limitations under the License. @ ==================================================================== @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @ project. @ ==================================================================== @ SHA512 block procedure for ARMv4. September 2007. @ This code is ~4.5 (four and a half) times faster than code generated @ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue @ Xscale PXA250 core]. @ @ July 2010. @ @ Rescheduling for dual-issue pipeline resulted in 6% improvement on @ Cortex A8 core and ~40 cycles per processed byte. @ February 2011. @ @ Profiler-assisted and platform-specific optimization resulted in 7% @ improvement on Coxtex A8 core and ~38 cycles per byte. @ March 2011. @ @ Add NEON implementation. On Cortex A8 it was measured to process @ one byte in 23.3 cycles or ~60% faster than integer-only code. @ August 2012. @ @ Improve NEON performance by 12% on Snapdragon S4. In absolute @ terms it's 22.6 cycles per byte, which is disappointing result. @ Technical writers asserted that 3-way S4 pipeline can sustain @ multiple NEON instructions per cycle, but dual NEON issue could @ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html @ for further details. On side note Cortex-A15 processes one byte in @ 16 cycles. @ Byte order [in]dependence. ========================================= @ @ Originally caller was expected to maintain specific *dword* order in @ h[0-7], namely with most significant dword at *lower* address, which @ was reflected in below two parameters as 0 and 4. Now caller is @ expected to maintain native byte order for whole 64-bit values. #ifndef __KERNEL__ # define VFP_ABI_PUSH vstmdb sp!,{d8-d15} # define VFP_ABI_POP vldmia sp!,{d8-d15} #else # define __ARM_MAX_ARCH__ 7 # define VFP_ABI_PUSH # define VFP_ABI_POP #endif @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. .arch armv7-a #ifdef __ARMEL__ # define LO 0 # define HI 4 # define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 #else # define HI 0 # define LO 4 # define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 #endif .text #if defined(__thumb2__) .syntax unified .thumb # define adrl adr #else .code 32 #endif .type K512,%object .align 5 K512: WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .size K512,.-K512 .globl sha512_block_data_order_nohw .hidden sha512_block_data_order_nohw .type sha512_block_data_order_nohw,%function sha512_block_data_order_nohw: add r2,r1,r2,lsl#7 @ len to point at the end of inp stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} adr r14,K512 sub sp,sp,#9*8 ldr r7,[r0,#32+LO] ldr r8,[r0,#32+HI] ldr r9, [r0,#48+LO] ldr r10, [r0,#48+HI] ldr r11, [r0,#56+LO] ldr r12, [r0,#56+HI] .Loop: str r9, [sp,#48+0] str r10, [sp,#48+4] str r11, [sp,#56+0] str r12, [sp,#56+4] ldr r5,[r0,#0+LO] ldr r6,[r0,#0+HI] ldr r3,[r0,#8+LO] ldr r4,[r0,#8+HI] ldr r9, [r0,#16+LO] ldr r10, [r0,#16+HI] ldr r11, [r0,#24+LO] ldr r12, [r0,#24+HI] str r3,[sp,#8+0] str r4,[sp,#8+4] str r9, [sp,#16+0] str r10, [sp,#16+4] str r11, [sp,#24+0] str r12, [sp,#24+4] ldr r3,[r0,#40+LO] ldr r4,[r0,#40+HI] str r3,[sp,#40+0] str r4,[sp,#40+4] .L00_15: #if __ARM_ARCH<7 ldrb r3,[r1,#7] ldrb r9, [r1,#6] ldrb r10, [r1,#5] ldrb r11, [r1,#4] ldrb r4,[r1,#3] ldrb r12, [r1,#2] orr r3,r3,r9,lsl#8 ldrb r9, [r1,#1] orr r3,r3,r10,lsl#16 ldrb r10, [r1],#8 orr r3,r3,r11,lsl#24 orr r4,r4,r12,lsl#8 orr r4,r4,r9,lsl#16 orr r4,r4,r10,lsl#24 #else ldr r3,[r1,#4] ldr r4,[r1],#8 #ifdef __ARMEL__ rev r3,r3 rev r4,r4 #endif #endif @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 mov r9,r7,lsr#14 str r3,[sp,#64+0] mov r10,r8,lsr#14 str r4,[sp,#64+4] eor r9,r9,r8,lsl#18 ldr r11,[sp,#56+0] @ h.lo eor r10,r10,r7,lsl#18 ldr r12,[sp,#56+4] @ h.hi eor r9,r9,r7,lsr#18 eor r10,r10,r8,lsr#18 eor r9,r9,r8,lsl#14 eor r10,r10,r7,lsl#14 eor r9,r9,r8,lsr#9 eor r10,r10,r7,lsr#9 eor r9,r9,r7,lsl#23 eor r10,r10,r8,lsl#23 @ Sigma1(e) adds r3,r3,r9 ldr r9,[sp,#40+0] @ f.lo adc r4,r4,r10 @ T += Sigma1(e) ldr r10,[sp,#40+4] @ f.hi adds r3,r3,r11 ldr r11,[sp,#48+0] @ g.lo adc r4,r4,r12 @ T += h ldr r12,[sp,#48+4] @ g.hi eor r9,r9,r11 str r7,[sp,#32+0] eor r10,r10,r12 str r8,[sp,#32+4] and r9,r9,r7 str r5,[sp,#0+0] and r10,r10,r8 str r6,[sp,#0+4] eor r9,r9,r11 ldr r11,[r14,#LO] @ K[i].lo eor r10,r10,r12 @ Ch(e,f,g) ldr r12,[r14,#HI] @ K[i].hi adds r3,r3,r9 ldr r7,[sp,#24+0] @ d.lo adc r4,r4,r10 @ T += Ch(e,f,g) ldr r8,[sp,#24+4] @ d.hi adds r3,r3,r11 and r9,r11,#0xff adc r4,r4,r12 @ T += K[i] adds r7,r7,r3 ldr r11,[sp,#8+0] @ b.lo adc r8,r8,r4 @ d += T teq r9,#148 ldr r12,[sp,#16+0] @ c.lo #if __ARM_ARCH>=7 it eq @ Thumb2 thing, sanity check in ARM #endif orreq r14,r14,#1 @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 mov r9,r5,lsr#28 mov r10,r6,lsr#28 eor r9,r9,r6,lsl#4 eor r10,r10,r5,lsl#4 eor r9,r9,r6,lsr#2 eor r10,r10,r5,lsr#2 eor r9,r9,r5,lsl#30 eor r10,r10,r6,lsl#30 eor r9,r9,r6,lsr#7 eor r10,r10,r5,lsr#7 eor r9,r9,r5,lsl#25 eor r10,r10,r6,lsl#25 @ Sigma0(a) adds r3,r3,r9 and r9,r5,r11 adc r4,r4,r10 @ T += Sigma0(a) ldr r10,[sp,#8+4] @ b.hi orr r5,r5,r11 ldr r11,[sp,#16+4] @ c.hi and r5,r5,r12 and r12,r6,r10 orr r6,r6,r10 orr r5,r5,r9 @ Maj(a,b,c).lo and r6,r6,r11 adds r5,r5,r3 orr r6,r6,r12 @ Maj(a,b,c).hi sub sp,sp,#8 adc r6,r6,r4 @ h += T tst r14,#1 add r14,r14,#8 tst r14,#1 beq .L00_15 ldr r9,[sp,#184+0] ldr r10,[sp,#184+4] bic r14,r14,#1 .L16_79: @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 mov r3,r9,lsr#1 ldr r11,[sp,#80+0] mov r4,r10,lsr#1 ldr r12,[sp,#80+4] eor r3,r3,r10,lsl#31 eor r4,r4,r9,lsl#31 eor r3,r3,r9,lsr#8 eor r4,r4,r10,lsr#8 eor r3,r3,r10,lsl#24 eor r4,r4,r9,lsl#24 eor r3,r3,r9,lsr#7 eor r4,r4,r10,lsr#7 eor r3,r3,r10,lsl#25 @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26 @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6 mov r9,r11,lsr#19 mov r10,r12,lsr#19 eor r9,r9,r12,lsl#13 eor r10,r10,r11,lsl#13 eor r9,r9,r12,lsr#29 eor r10,r10,r11,lsr#29 eor r9,r9,r11,lsl#3 eor r10,r10,r12,lsl#3 eor r9,r9,r11,lsr#6 eor r10,r10,r12,lsr#6 ldr r11,[sp,#120+0] eor r9,r9,r12,lsl#26 ldr r12,[sp,#120+4] adds r3,r3,r9 ldr r9,[sp,#192+0] adc r4,r4,r10 ldr r10,[sp,#192+4] adds r3,r3,r11 adc r4,r4,r12 adds r3,r3,r9 adc r4,r4,r10 @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 mov r9,r7,lsr#14 str r3,[sp,#64+0] mov r10,r8,lsr#14 str r4,[sp,#64+4] eor r9,r9,r8,lsl#18 ldr r11,[sp,#56+0] @ h.lo eor r10,r10,r7,lsl#18 ldr r12,[sp,#56+4] @ h.hi eor r9,r9,r7,lsr#18 eor r10,r10,r8,lsr#18 eor r9,r9,r8,lsl#14 eor r10,r10,r7,lsl#14 eor r9,r9,r8,lsr#9 eor r10,r10,r7,lsr#9 eor r9,r9,r7,lsl#23 eor r10,r10,r8,lsl#23 @ Sigma1(e) adds r3,r3,r9 ldr r9,[sp,#40+0] @ f.lo adc r4,r4,r10 @ T += Sigma1(e) ldr r10,[sp,#40+4] @ f.hi adds r3,r3,r11 ldr r11,[sp,#48+0] @ g.lo adc r4,r4,r12 @ T += h ldr r12,[sp,#48+4] @ g.hi eor r9,r9,r11 str r7,[sp,#32+0] eor r10,r10,r12 str r8,[sp,#32+4] and r9,r9,r7 str r5,[sp,#0+0] and r10,r10,r8 str r6,[sp,#0+4] eor r9,r9,r11 ldr r11,[r14,#LO] @ K[i].lo eor r10,r10,r12 @ Ch(e,f,g) ldr r12,[r14,#HI] @ K[i].hi adds r3,r3,r9 ldr r7,[sp,#24+0] @ d.lo adc r4,r4,r10 @ T += Ch(e,f,g) ldr r8,[sp,#24+4] @ d.hi adds r3,r3,r11 and r9,r11,#0xff adc r4,r4,r12 @ T += K[i] adds r7,r7,r3 ldr r11,[sp,#8+0] @ b.lo adc r8,r8,r4 @ d += T teq r9,#23 ldr r12,[sp,#16+0] @ c.lo #if __ARM_ARCH>=7 it eq @ Thumb2 thing, sanity check in ARM #endif orreq r14,r14,#1 @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 mov r9,r5,lsr#28 mov r10,r6,lsr#28 eor r9,r9,r6,lsl#4 eor r10,r10,r5,lsl#4 eor r9,r9,r6,lsr#2 eor r10,r10,r5,lsr#2 eor r9,r9,r5,lsl#30 eor r10,r10,r6,lsl#30 eor r9,r9,r6,lsr#7 eor r10,r10,r5,lsr#7 eor r9,r9,r5,lsl#25 eor r10,r10,r6,lsl#25 @ Sigma0(a) adds r3,r3,r9 and r9,r5,r11 adc r4,r4,r10 @ T += Sigma0(a) ldr r10,[sp,#8+4] @ b.hi orr r5,r5,r11 ldr r11,[sp,#16+4] @ c.hi and r5,r5,r12 and r12,r6,r10 orr r6,r6,r10 orr r5,r5,r9 @ Maj(a,b,c).lo and r6,r6,r11 adds r5,r5,r3 orr r6,r6,r12 @ Maj(a,b,c).hi sub sp,sp,#8 adc r6,r6,r4 @ h += T tst r14,#1 add r14,r14,#8 #if __ARM_ARCH>=7 ittt eq @ Thumb2 thing, sanity check in ARM #endif ldreq r9,[sp,#184+0] ldreq r10,[sp,#184+4] beq .L16_79 bic r14,r14,#1 ldr r3,[sp,#8+0] ldr r4,[sp,#8+4] ldr r9, [r0,#0+LO] ldr r10, [r0,#0+HI] ldr r11, [r0,#8+LO] ldr r12, [r0,#8+HI] adds r9,r5,r9 str r9, [r0,#0+LO] adc r10,r6,r10 str r10, [r0,#0+HI] adds r11,r3,r11 str r11, [r0,#8+LO] adc r12,r4,r12 str r12, [r0,#8+HI] ldr r5,[sp,#16+0] ldr r6,[sp,#16+4] ldr r3,[sp,#24+0] ldr r4,[sp,#24+4] ldr r9, [r0,#16+LO] ldr r10, [r0,#16+HI] ldr r11, [r0,#24+LO] ldr r12, [r0,#24+HI] adds r9,r5,r9 str r9, [r0,#16+LO] adc r10,r6,r10 str r10, [r0,#16+HI] adds r11,r3,r11 str r11, [r0,#24+LO] adc r12,r4,r12 str r12, [r0,#24+HI] ldr r3,[sp,#40+0] ldr r4,[sp,#40+4] ldr r9, [r0,#32+LO] ldr r10, [r0,#32+HI] ldr r11, [r0,#40+LO] ldr r12, [r0,#40+HI] adds r7,r7,r9 str r7,[r0,#32+LO] adc r8,r8,r10 str r8,[r0,#32+HI] adds r11,r3,r11 str r11, [r0,#40+LO] adc r12,r4,r12 str r12, [r0,#40+HI] ldr r5,[sp,#48+0] ldr r6,[sp,#48+4] ldr r3,[sp,#56+0] ldr r4,[sp,#56+4] ldr r9, [r0,#48+LO] ldr r10, [r0,#48+HI] ldr r11, [r0,#56+LO] ldr r12, [r0,#56+HI] adds r9,r5,r9 str r9, [r0,#48+LO] adc r10,r6,r10 str r10, [r0,#48+HI] adds r11,r3,r11 str r11, [r0,#56+LO] adc r12,r4,r12 str r12, [r0,#56+HI] add sp,sp,#640 sub r14,r14,#640 teq r1,r2 bne .Loop add sp,sp,#8*9 @ destroy frame #if __ARM_ARCH>=5 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} #else ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif .size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .globl sha512_block_data_order_neon .hidden sha512_block_data_order_neon .type sha512_block_data_order_neon,%function .align 4 sha512_block_data_order_neon: dmb @ errata #451034 on early Cortex A8 add r2,r1,r2,lsl#7 @ len to point at the end of inp adr r3,K512 VFP_ABI_PUSH vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context .Loop_neon: vshr.u64 d24,d20,#14 @ 0 #if 0<16 vld1.64 {d0},[r1]! @ handles unaligned #endif vshr.u64 d25,d20,#18 #if 0>0 vadd.i64 d16,d30 @ h+=Maj from the past #endif vshr.u64 d26,d20,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 0<16 && defined(__ARMEL__) vrev64.8 d0,d0 #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d0 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 1 #if 1<16 vld1.64 {d1},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 1>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 1<16 && defined(__ARMEL__) vrev64.8 d1,d1 #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d1 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 d24,d18,#14 @ 2 #if 2<16 vld1.64 {d2},[r1]! @ handles unaligned #endif vshr.u64 d25,d18,#18 #if 2>0 vadd.i64 d22,d30 @ h+=Maj from the past #endif vshr.u64 d26,d18,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 2<16 && defined(__ARMEL__) vrev64.8 d2,d2 #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d2 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 3 #if 3<16 vld1.64 {d3},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 3>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 3<16 && defined(__ARMEL__) vrev64.8 d3,d3 #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d3 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 d24,d16,#14 @ 4 #if 4<16 vld1.64 {d4},[r1]! @ handles unaligned #endif vshr.u64 d25,d16,#18 #if 4>0 vadd.i64 d20,d30 @ h+=Maj from the past #endif vshr.u64 d26,d16,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 4<16 && defined(__ARMEL__) vrev64.8 d4,d4 #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d4 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 5 #if 5<16 vld1.64 {d5},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 5>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 5<16 && defined(__ARMEL__) vrev64.8 d5,d5 #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d5 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 d24,d22,#14 @ 6 #if 6<16 vld1.64 {d6},[r1]! @ handles unaligned #endif vshr.u64 d25,d22,#18 #if 6>0 vadd.i64 d18,d30 @ h+=Maj from the past #endif vshr.u64 d26,d22,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 6<16 && defined(__ARMEL__) vrev64.8 d6,d6 #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d6 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 7 #if 7<16 vld1.64 {d7},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 7>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 7<16 && defined(__ARMEL__) vrev64.8 d7,d7 #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d7 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 vshr.u64 d24,d20,#14 @ 8 #if 8<16 vld1.64 {d8},[r1]! @ handles unaligned #endif vshr.u64 d25,d20,#18 #if 8>0 vadd.i64 d16,d30 @ h+=Maj from the past #endif vshr.u64 d26,d20,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 8<16 && defined(__ARMEL__) vrev64.8 d8,d8 #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d8 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 9 #if 9<16 vld1.64 {d9},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 9>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 9<16 && defined(__ARMEL__) vrev64.8 d9,d9 #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d9 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 d24,d18,#14 @ 10 #if 10<16 vld1.64 {d10},[r1]! @ handles unaligned #endif vshr.u64 d25,d18,#18 #if 10>0 vadd.i64 d22,d30 @ h+=Maj from the past #endif vshr.u64 d26,d18,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 10<16 && defined(__ARMEL__) vrev64.8 d10,d10 #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d10 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 11 #if 11<16 vld1.64 {d11},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 11>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 11<16 && defined(__ARMEL__) vrev64.8 d11,d11 #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d11 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 d24,d16,#14 @ 12 #if 12<16 vld1.64 {d12},[r1]! @ handles unaligned #endif vshr.u64 d25,d16,#18 #if 12>0 vadd.i64 d20,d30 @ h+=Maj from the past #endif vshr.u64 d26,d16,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 12<16 && defined(__ARMEL__) vrev64.8 d12,d12 #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d12 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 13 #if 13<16 vld1.64 {d13},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 13>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 13<16 && defined(__ARMEL__) vrev64.8 d13,d13 #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d13 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 d24,d22,#14 @ 14 #if 14<16 vld1.64 {d14},[r1]! @ handles unaligned #endif vshr.u64 d25,d22,#18 #if 14>0 vadd.i64 d18,d30 @ h+=Maj from the past #endif vshr.u64 d26,d22,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 14<16 && defined(__ARMEL__) vrev64.8 d14,d14 #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d14 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 15 #if 15<16 vld1.64 {d15},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 15>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 15<16 && defined(__ARMEL__) vrev64.8 d15,d15 #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d15 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 mov r12,#4 .L16_79_neon: subs r12,#1 vshr.u64 q12,q7,#19 vshr.u64 q13,q7,#61 vadd.i64 d16,d30 @ h+=Maj from the past vshr.u64 q15,q7,#6 vsli.64 q12,q7,#45 vext.8 q14,q0,q1,#8 @ X[i+1] vsli.64 q13,q7,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q0,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q4,q5,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d20,#14 @ from NEON_00_15 vadd.i64 q0,q14 vshr.u64 d25,d20,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d20,#41 @ from NEON_00_15 vadd.i64 q0,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 16<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d0 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 17 #if 17<16 vld1.64 {d1},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 17>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 17<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d1 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 q12,q0,#19 vshr.u64 q13,q0,#61 vadd.i64 d22,d30 @ h+=Maj from the past vshr.u64 q15,q0,#6 vsli.64 q12,q0,#45 vext.8 q14,q1,q2,#8 @ X[i+1] vsli.64 q13,q0,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q1,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q5,q6,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d18,#14 @ from NEON_00_15 vadd.i64 q1,q14 vshr.u64 d25,d18,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d18,#41 @ from NEON_00_15 vadd.i64 q1,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 18<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d2 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 19 #if 19<16 vld1.64 {d3},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 19>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 19<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d3 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 q12,q1,#19 vshr.u64 q13,q1,#61 vadd.i64 d20,d30 @ h+=Maj from the past vshr.u64 q15,q1,#6 vsli.64 q12,q1,#45 vext.8 q14,q2,q3,#8 @ X[i+1] vsli.64 q13,q1,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q2,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q6,q7,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d16,#14 @ from NEON_00_15 vadd.i64 q2,q14 vshr.u64 d25,d16,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d16,#41 @ from NEON_00_15 vadd.i64 q2,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 20<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d4 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 21 #if 21<16 vld1.64 {d5},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 21>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 21<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d5 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 q12,q2,#19 vshr.u64 q13,q2,#61 vadd.i64 d18,d30 @ h+=Maj from the past vshr.u64 q15,q2,#6 vsli.64 q12,q2,#45 vext.8 q14,q3,q4,#8 @ X[i+1] vsli.64 q13,q2,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q3,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q7,q0,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d22,#14 @ from NEON_00_15 vadd.i64 q3,q14 vshr.u64 d25,d22,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d22,#41 @ from NEON_00_15 vadd.i64 q3,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 22<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d6 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 23 #if 23<16 vld1.64 {d7},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 23>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 23<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d7 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 vshr.u64 q12,q3,#19 vshr.u64 q13,q3,#61 vadd.i64 d16,d30 @ h+=Maj from the past vshr.u64 q15,q3,#6 vsli.64 q12,q3,#45 vext.8 q14,q4,q5,#8 @ X[i+1] vsli.64 q13,q3,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q4,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q0,q1,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d20,#14 @ from NEON_00_15 vadd.i64 q4,q14 vshr.u64 d25,d20,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d20,#41 @ from NEON_00_15 vadd.i64 q4,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 24<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d8 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 25 #if 25<16 vld1.64 {d9},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 25>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 25<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d9 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 q12,q4,#19 vshr.u64 q13,q4,#61 vadd.i64 d22,d30 @ h+=Maj from the past vshr.u64 q15,q4,#6 vsli.64 q12,q4,#45 vext.8 q14,q5,q6,#8 @ X[i+1] vsli.64 q13,q4,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q5,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q1,q2,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d18,#14 @ from NEON_00_15 vadd.i64 q5,q14 vshr.u64 d25,d18,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d18,#41 @ from NEON_00_15 vadd.i64 q5,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 26<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d10 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 27 #if 27<16 vld1.64 {d11},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 27>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 27<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d11 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 q12,q5,#19 vshr.u64 q13,q5,#61 vadd.i64 d20,d30 @ h+=Maj from the past vshr.u64 q15,q5,#6 vsli.64 q12,q5,#45 vext.8 q14,q6,q7,#8 @ X[i+1] vsli.64 q13,q5,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q6,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q2,q3,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d16,#14 @ from NEON_00_15 vadd.i64 q6,q14 vshr.u64 d25,d16,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d16,#41 @ from NEON_00_15 vadd.i64 q6,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 28<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d12 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 29 #if 29<16 vld1.64 {d13},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 29>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 29<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d13 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 q12,q6,#19 vshr.u64 q13,q6,#61 vadd.i64 d18,d30 @ h+=Maj from the past vshr.u64 q15,q6,#6 vsli.64 q12,q6,#45 vext.8 q14,q7,q0,#8 @ X[i+1] vsli.64 q13,q6,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q7,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q3,q4,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d22,#14 @ from NEON_00_15 vadd.i64 q7,q14 vshr.u64 d25,d22,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d22,#41 @ from NEON_00_15 vadd.i64 q7,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 30<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d14 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 31 #if 31<16 vld1.64 {d15},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 31>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 31<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d15 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 bne .L16_79_neon vadd.i64 d16,d30 @ h+=Maj from the past vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp vadd.i64 q8,q12 @ vectorized accumulate vadd.i64 q9,q13 vadd.i64 q10,q14 vadd.i64 q11,q15 vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context teq r1,r2 sub r3,#640 @ rewind K512 bne .Loop_neon VFP_ABI_POP bx lr @ .word 0xe12fff1e .size sha512_block_data_order_neon,.-sha512_block_data_order_neon #endif .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
fatiimajamiil/rustpad-custom
24,471
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/aes-gcm-avx2-x86_64-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .section __DATA,__const .p2align 4 L$bswap_mask: .quad 0x08090a0b0c0d0e0f, 0x0001020304050607 L$gfpoly: .quad 1, 0xc200000000000000 L$gfpoly_and_internal_carrybit: .quad 1, 0xc200000000000001 .p2align 5 L$ctr_pattern: .quad 0, 0 .quad 1, 0 L$inc_2blocks: .quad 2, 0 .quad 2, 0 .text .globl _gcm_init_vpclmulqdq_avx2 .private_extern _gcm_init_vpclmulqdq_avx2 .p2align 5 _gcm_init_vpclmulqdq_avx2: _CET_ENDBR vpshufd $0x4e,(%rsi),%xmm3 vpshufd $0xd3,%xmm3,%xmm0 vpsrad $31,%xmm0,%xmm0 vpaddq %xmm3,%xmm3,%xmm3 vpand L$gfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0 vpxor %xmm0,%xmm3,%xmm3 vbroadcasti128 L$gfpoly(%rip),%ymm6 vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0 vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1 vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2 vpxor %xmm2,%xmm1,%xmm1 vpclmulqdq $0x01,%xmm0,%xmm6,%xmm2 vpshufd $0x4e,%xmm0,%xmm0 vpxor %xmm0,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5 vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0 vpshufd $0x4e,%xmm1,%xmm1 vpxor %xmm1,%xmm5,%xmm5 vpxor %xmm0,%xmm5,%xmm5 vinserti128 $1,%xmm3,%ymm5,%ymm3 vinserti128 $1,%xmm5,%ymm5,%ymm5 .byte 0xc4,0xe3,0x65,0x44,0xc5,0x00 .byte 0xc4,0xe3,0x65,0x44,0xcd,0x01 .byte 0xc4,0xe3,0x65,0x44,0xd5,0x10 vpxor %ymm2,%ymm1,%ymm1 .byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01 vpshufd $0x4e,%ymm0,%ymm0 vpxor %ymm0,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 .byte 0xc4,0xe3,0x65,0x44,0xe5,0x11 .byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01 vpshufd $0x4e,%ymm1,%ymm1 vpxor %ymm1,%ymm4,%ymm4 vpxor %ymm0,%ymm4,%ymm4 vmovdqu %ymm3,96(%rdi) vmovdqu %ymm4,64(%rdi) vpunpcklqdq %ymm3,%ymm4,%ymm0 vpunpckhqdq %ymm3,%ymm4,%ymm1 vpxor %ymm1,%ymm0,%ymm0 vmovdqu %ymm0,128+32(%rdi) .byte 0xc4,0xe3,0x5d,0x44,0xc5,0x00 .byte 0xc4,0xe3,0x5d,0x44,0xcd,0x01 .byte 0xc4,0xe3,0x5d,0x44,0xd5,0x10 vpxor %ymm2,%ymm1,%ymm1 .byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01 vpshufd $0x4e,%ymm0,%ymm0 vpxor %ymm0,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 .byte 0xc4,0xe3,0x5d,0x44,0xdd,0x11 .byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01 vpshufd $0x4e,%ymm1,%ymm1 vpxor %ymm1,%ymm3,%ymm3 vpxor %ymm0,%ymm3,%ymm3 .byte 0xc4,0xe3,0x65,0x44,0xc5,0x00 .byte 0xc4,0xe3,0x65,0x44,0xcd,0x01 .byte 0xc4,0xe3,0x65,0x44,0xd5,0x10 vpxor %ymm2,%ymm1,%ymm1 .byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01 vpshufd $0x4e,%ymm0,%ymm0 vpxor %ymm0,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 .byte 0xc4,0xe3,0x65,0x44,0xe5,0x11 .byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01 vpshufd $0x4e,%ymm1,%ymm1 vpxor %ymm1,%ymm4,%ymm4 vpxor %ymm0,%ymm4,%ymm4 vmovdqu %ymm3,32(%rdi) vmovdqu %ymm4,0(%rdi) vpunpcklqdq %ymm3,%ymm4,%ymm0 vpunpckhqdq %ymm3,%ymm4,%ymm1 vpxor %ymm1,%ymm0,%ymm0 vmovdqu %ymm0,128(%rdi) vzeroupper ret .globl _gcm_ghash_vpclmulqdq_avx2_1 .private_extern _gcm_ghash_vpclmulqdq_avx2_1 .p2align 5 _gcm_ghash_vpclmulqdq_avx2_1: _CET_ENDBR vmovdqu L$bswap_mask(%rip),%xmm6 vmovdqu L$gfpoly(%rip),%xmm7 vmovdqu (%rdi),%xmm5 vpshufb %xmm6,%xmm5,%xmm5 L$ghash_lastblock: vmovdqu (%rdx),%xmm0 vpshufb %xmm6,%xmm0,%xmm0 vpxor %xmm0,%xmm5,%xmm5 vmovdqu 128-16(%rsi),%xmm0 vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1 vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2 vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3 vpxor %xmm3,%xmm2,%xmm2 vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3 vpshufd $0x4e,%xmm1,%xmm1 vpxor %xmm1,%xmm2,%xmm2 vpxor %xmm3,%xmm2,%xmm2 vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5 vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1 vpshufd $0x4e,%xmm2,%xmm2 vpxor %xmm2,%xmm5,%xmm5 vpxor %xmm1,%xmm5,%xmm5 L$ghash_done: vpshufb %xmm6,%xmm5,%xmm5 vmovdqu %xmm5,(%rdi) vzeroupper ret .globl _aes_gcm_enc_update_vaes_avx2 .private_extern _aes_gcm_enc_update_vaes_avx2 .p2align 5 _aes_gcm_enc_update_vaes_avx2: _CET_ENDBR pushq %r12 movq 16(%rsp),%r12 #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit+8(%rip) #endif vbroadcasti128 L$bswap_mask(%rip),%ymm0 vmovdqu (%r12),%xmm1 vpshufb %xmm0,%xmm1,%xmm1 vbroadcasti128 (%r8),%ymm11 vpshufb %ymm0,%ymm11,%ymm11 movl 240(%rcx),%r10d leal -20(,%r10,4),%r10d leaq 96(%rcx,%r10,4),%r11 vbroadcasti128 (%rcx),%ymm9 vbroadcasti128 (%r11),%ymm10 vpaddd L$ctr_pattern(%rip),%ymm11,%ymm11 cmpq $127,%rdx jbe L$crypt_loop_4x_done__func1 vmovdqu 128(%r9),%ymm7 vmovdqu 128+32(%r9),%ymm8 vmovdqu L$inc_2blocks(%rip),%ymm2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm14 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm15 vpaddd %ymm2,%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 vpxor %ymm9,%ymm14,%ymm14 vpxor %ymm9,%ymm15,%ymm15 leaq 16(%rcx),%rax L$vaesenc_loop_first_4_vecs__func1: vbroadcasti128 (%rax),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_first_4_vecs__func1 vpxor 0(%rdi),%ymm10,%ymm2 vpxor 32(%rdi),%ymm10,%ymm3 vpxor 64(%rdi),%ymm10,%ymm5 vpxor 96(%rdi),%ymm10,%ymm6 .byte 0xc4,0x62,0x1d,0xdd,0xe2 .byte 0xc4,0x62,0x15,0xdd,0xeb .byte 0xc4,0x62,0x0d,0xdd,0xf5 .byte 0xc4,0x62,0x05,0xdd,0xfe vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vmovdqu %ymm14,64(%rsi) vmovdqu %ymm15,96(%rsi) subq $-128,%rdi addq $-128,%rdx cmpq $127,%rdx jbe L$ghash_last_ciphertext_4x__func1 .p2align 4 L$crypt_loop_4x__func1: vmovdqu L$inc_2blocks(%rip),%ymm2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm14 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm15 vpaddd %ymm2,%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 vpxor %ymm9,%ymm14,%ymm14 vpxor %ymm9,%ymm15,%ymm15 cmpl $24,%r10d jl L$aes128__func1 je L$aes192__func1 vbroadcasti128 -208(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vbroadcasti128 -192(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa L$aes192__func1: vbroadcasti128 -176(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vbroadcasti128 -160(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa L$aes128__func1: prefetcht0 512(%rdi) prefetcht0 512+64(%rdi) vmovdqu 0(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 0(%r9),%ymm4 vpxor %ymm1,%ymm3,%ymm3 .byte 0xc4,0xe3,0x65,0x44,0xec,0x00 .byte 0xc4,0xe3,0x65,0x44,0xcc,0x11 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00 vbroadcasti128 -144(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vbroadcasti128 -128(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vmovdqu 32(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 32(%r9),%ymm4 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x00 vpxor %ymm2,%ymm5,%ymm5 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x11 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -112(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vmovdqu 64(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 64(%r9),%ymm4 vbroadcasti128 -96(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa .byte 0xc4,0xe3,0x65,0x44,0xd4,0x00 vpxor %ymm2,%ymm5,%ymm5 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x11 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -80(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 96(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vbroadcasti128 -64(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vmovdqu 96(%r9),%ymm4 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x00 vpxor %ymm2,%ymm5,%ymm5 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x11 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -48(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm1,%ymm6,%ymm6 vbroadcasti128 L$gfpoly(%rip),%ymm4 .byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -32(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa .byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -16(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vextracti128 $1,%ymm1,%xmm2 vpxor %xmm2,%xmm1,%xmm1 subq $-128,%rsi vpxor 0(%rdi),%ymm10,%ymm2 vpxor 32(%rdi),%ymm10,%ymm3 vpxor 64(%rdi),%ymm10,%ymm5 vpxor 96(%rdi),%ymm10,%ymm6 .byte 0xc4,0x62,0x1d,0xdd,0xe2 .byte 0xc4,0x62,0x15,0xdd,0xeb .byte 0xc4,0x62,0x0d,0xdd,0xf5 .byte 0xc4,0x62,0x05,0xdd,0xfe vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vmovdqu %ymm14,64(%rsi) vmovdqu %ymm15,96(%rsi) subq $-128,%rdi addq $-128,%rdx cmpq $127,%rdx ja L$crypt_loop_4x__func1 L$ghash_last_ciphertext_4x__func1: vmovdqu 0(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 0(%r9),%ymm4 vpxor %ymm1,%ymm3,%ymm3 .byte 0xc4,0xe3,0x65,0x44,0xec,0x00 .byte 0xc4,0xe3,0x65,0x44,0xcc,0x11 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00 vmovdqu 32(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 32(%r9),%ymm4 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x00 vpxor %ymm2,%ymm5,%ymm5 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x11 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 64(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 64(%r9),%ymm4 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x00 vpxor %ymm2,%ymm5,%ymm5 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x11 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 96(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 96(%r9),%ymm4 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x00 vpxor %ymm2,%ymm5,%ymm5 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x11 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10 vpxor %ymm2,%ymm6,%ymm6 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm1,%ymm6,%ymm6 vbroadcasti128 L$gfpoly(%rip),%ymm4 .byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm2,%ymm6,%ymm6 .byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vextracti128 $1,%ymm1,%xmm2 vpxor %xmm2,%xmm1,%xmm1 subq $-128,%rsi L$crypt_loop_4x_done__func1: testq %rdx,%rdx jz L$done__func1 leaq 128(%r9),%r8 subq %rdx,%r8 vpxor %xmm5,%xmm5,%xmm5 vpxor %xmm6,%xmm6,%xmm6 vpxor %xmm7,%xmm7,%xmm7 cmpq $64,%rdx jb L$lessthan64bytes__func1 vpshufb %ymm0,%ymm11,%ymm12 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax L$vaesenc_loop_tail_1__func1: vbroadcasti128 (%rax),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_1__func1 .byte 0xc4,0x42,0x1d,0xdd,0xe2 .byte 0xc4,0x42,0x15,0xdd,0xea vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%ymm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %ymm3,%ymm13,%ymm13 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vpshufb %ymm0,%ymm12,%ymm12 vpshufb %ymm0,%ymm13,%ymm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%ymm3 .byte 0xc4,0xe3,0x1d,0x44,0xea,0x00 .byte 0xc4,0xe3,0x1d,0x44,0xf2,0x01 .byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10 vpxor %ymm4,%ymm6,%ymm6 .byte 0xc4,0xe3,0x1d,0x44,0xfa,0x11 .byte 0xc4,0xe3,0x15,0x44,0xe3,0x00 vpxor %ymm4,%ymm5,%ymm5 .byte 0xc4,0xe3,0x15,0x44,0xe3,0x01 vpxor %ymm4,%ymm6,%ymm6 .byte 0xc4,0xe3,0x15,0x44,0xe3,0x10 vpxor %ymm4,%ymm6,%ymm6 .byte 0xc4,0xe3,0x15,0x44,0xe3,0x11 vpxor %ymm4,%ymm7,%ymm7 addq $64,%r8 addq $64,%rdi addq $64,%rsi subq $64,%rdx jz L$reduce__func1 vpxor %xmm1,%xmm1,%xmm1 L$lessthan64bytes__func1: vpshufb %ymm0,%ymm11,%ymm12 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax L$vaesenc_loop_tail_2__func1: vbroadcasti128 (%rax),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_2__func1 .byte 0xc4,0x42,0x1d,0xdd,0xe2 .byte 0xc4,0x42,0x15,0xdd,0xea cmpq $32,%rdx jb L$xor_one_block__func1 je L$xor_two_blocks__func1 L$xor_three_blocks__func1: vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%xmm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %xmm3,%xmm13,%xmm13 vmovdqu %ymm12,0(%rsi) vmovdqu %xmm13,32(%rsi) vpshufb %ymm0,%ymm12,%ymm12 vpshufb %xmm0,%xmm13,%xmm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%xmm3 vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm7,%ymm7 jmp L$ghash_mul_one_vec_unreduced__func1 L$xor_two_blocks__func1: vmovdqu (%rdi),%ymm2 vpxor %ymm2,%ymm12,%ymm12 vmovdqu %ymm12,(%rsi) vpshufb %ymm0,%ymm12,%ymm12 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 jmp L$ghash_mul_one_vec_unreduced__func1 L$xor_one_block__func1: vmovdqu (%rdi),%xmm2 vpxor %xmm2,%xmm12,%xmm12 vmovdqu %xmm12,(%rsi) vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm1,%xmm12,%xmm12 vmovdqu (%r8),%xmm2 L$ghash_mul_one_vec_unreduced__func1: .byte 0xc4,0xe3,0x1d,0x44,0xe2,0x00 vpxor %ymm4,%ymm5,%ymm5 .byte 0xc4,0xe3,0x1d,0x44,0xe2,0x01 vpxor %ymm4,%ymm6,%ymm6 .byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10 vpxor %ymm4,%ymm6,%ymm6 .byte 0xc4,0xe3,0x1d,0x44,0xe2,0x11 vpxor %ymm4,%ymm7,%ymm7 L$reduce__func1: vbroadcasti128 L$gfpoly(%rip),%ymm2 .byte 0xc4,0xe3,0x6d,0x44,0xdd,0x01 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 .byte 0xc4,0xe3,0x6d,0x44,0xde,0x01 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm7,%ymm7 vpxor %ymm3,%ymm7,%ymm7 vextracti128 $1,%ymm7,%xmm1 vpxor %xmm7,%xmm1,%xmm1 L$done__func1: vpshufb %xmm0,%xmm1,%xmm1 vmovdqu %xmm1,(%r12) vzeroupper popq %r12 ret .globl _aes_gcm_dec_update_vaes_avx2 .private_extern _aes_gcm_dec_update_vaes_avx2 .p2align 5 _aes_gcm_dec_update_vaes_avx2: _CET_ENDBR pushq %r12 movq 16(%rsp),%r12 vbroadcasti128 L$bswap_mask(%rip),%ymm0 vmovdqu (%r12),%xmm1 vpshufb %xmm0,%xmm1,%xmm1 vbroadcasti128 (%r8),%ymm11 vpshufb %ymm0,%ymm11,%ymm11 movl 240(%rcx),%r10d leal -20(,%r10,4),%r10d leaq 96(%rcx,%r10,4),%r11 vbroadcasti128 (%rcx),%ymm9 vbroadcasti128 (%r11),%ymm10 vpaddd L$ctr_pattern(%rip),%ymm11,%ymm11 cmpq $127,%rdx jbe L$crypt_loop_4x_done__func2 vmovdqu 128(%r9),%ymm7 vmovdqu 128+32(%r9),%ymm8 .p2align 4 L$crypt_loop_4x__func2: vmovdqu L$inc_2blocks(%rip),%ymm2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm14 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm15 vpaddd %ymm2,%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 vpxor %ymm9,%ymm14,%ymm14 vpxor %ymm9,%ymm15,%ymm15 cmpl $24,%r10d jl L$aes128__func2 je L$aes192__func2 vbroadcasti128 -208(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vbroadcasti128 -192(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa L$aes192__func2: vbroadcasti128 -176(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vbroadcasti128 -160(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa L$aes128__func2: prefetcht0 512(%rdi) prefetcht0 512+64(%rdi) vmovdqu 0(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 0(%r9),%ymm4 vpxor %ymm1,%ymm3,%ymm3 .byte 0xc4,0xe3,0x65,0x44,0xec,0x00 .byte 0xc4,0xe3,0x65,0x44,0xcc,0x11 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00 vbroadcasti128 -144(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vbroadcasti128 -128(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vmovdqu 32(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 32(%r9),%ymm4 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x00 vpxor %ymm2,%ymm5,%ymm5 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x11 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -112(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vmovdqu 64(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 64(%r9),%ymm4 vbroadcasti128 -96(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa .byte 0xc4,0xe3,0x65,0x44,0xd4,0x00 vpxor %ymm2,%ymm5,%ymm5 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x11 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -80(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 96(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vbroadcasti128 -64(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vmovdqu 96(%r9),%ymm4 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x00 vpxor %ymm2,%ymm5,%ymm5 .byte 0xc4,0xe3,0x65,0x44,0xd4,0x11 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 .byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -48(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm1,%ymm6,%ymm6 vbroadcasti128 L$gfpoly(%rip),%ymm4 .byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -32(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa .byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -16(%r11),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea .byte 0xc4,0x62,0x0d,0xdc,0xf2 .byte 0xc4,0x62,0x05,0xdc,0xfa vextracti128 $1,%ymm1,%xmm2 vpxor %xmm2,%xmm1,%xmm1 vpxor 0(%rdi),%ymm10,%ymm2 vpxor 32(%rdi),%ymm10,%ymm3 vpxor 64(%rdi),%ymm10,%ymm5 vpxor 96(%rdi),%ymm10,%ymm6 .byte 0xc4,0x62,0x1d,0xdd,0xe2 .byte 0xc4,0x62,0x15,0xdd,0xeb .byte 0xc4,0x62,0x0d,0xdd,0xf5 .byte 0xc4,0x62,0x05,0xdd,0xfe vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vmovdqu %ymm14,64(%rsi) vmovdqu %ymm15,96(%rsi) subq $-128,%rdi subq $-128,%rsi addq $-128,%rdx cmpq $127,%rdx ja L$crypt_loop_4x__func2 L$crypt_loop_4x_done__func2: testq %rdx,%rdx jz L$done__func2 leaq 128(%r9),%r8 subq %rdx,%r8 vpxor %xmm5,%xmm5,%xmm5 vpxor %xmm6,%xmm6,%xmm6 vpxor %xmm7,%xmm7,%xmm7 cmpq $64,%rdx jb L$lessthan64bytes__func2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax L$vaesenc_loop_tail_1__func2: vbroadcasti128 (%rax),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_1__func2 .byte 0xc4,0x42,0x1d,0xdd,0xe2 .byte 0xc4,0x42,0x15,0xdd,0xea vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%ymm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %ymm3,%ymm13,%ymm13 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vpshufb %ymm0,%ymm2,%ymm12 vpshufb %ymm0,%ymm3,%ymm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%ymm3 .byte 0xc4,0xe3,0x1d,0x44,0xea,0x00 .byte 0xc4,0xe3,0x1d,0x44,0xf2,0x01 .byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10 vpxor %ymm4,%ymm6,%ymm6 .byte 0xc4,0xe3,0x1d,0x44,0xfa,0x11 .byte 0xc4,0xe3,0x15,0x44,0xe3,0x00 vpxor %ymm4,%ymm5,%ymm5 .byte 0xc4,0xe3,0x15,0x44,0xe3,0x01 vpxor %ymm4,%ymm6,%ymm6 .byte 0xc4,0xe3,0x15,0x44,0xe3,0x10 vpxor %ymm4,%ymm6,%ymm6 .byte 0xc4,0xe3,0x15,0x44,0xe3,0x11 vpxor %ymm4,%ymm7,%ymm7 addq $64,%r8 addq $64,%rdi addq $64,%rsi subq $64,%rdx jz L$reduce__func2 vpxor %xmm1,%xmm1,%xmm1 L$lessthan64bytes__func2: vpshufb %ymm0,%ymm11,%ymm12 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax L$vaesenc_loop_tail_2__func2: vbroadcasti128 (%rax),%ymm2 .byte 0xc4,0x62,0x1d,0xdc,0xe2 .byte 0xc4,0x62,0x15,0xdc,0xea addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_2__func2 .byte 0xc4,0x42,0x1d,0xdd,0xe2 .byte 0xc4,0x42,0x15,0xdd,0xea cmpq $32,%rdx jb L$xor_one_block__func2 je L$xor_two_blocks__func2 L$xor_three_blocks__func2: vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%xmm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %xmm3,%xmm13,%xmm13 vmovdqu %ymm12,0(%rsi) vmovdqu %xmm13,32(%rsi) vpshufb %ymm0,%ymm2,%ymm12 vpshufb %xmm0,%xmm3,%xmm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%xmm3 vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm7,%ymm7 jmp L$ghash_mul_one_vec_unreduced__func2 L$xor_two_blocks__func2: vmovdqu (%rdi),%ymm2 vpxor %ymm2,%ymm12,%ymm12 vmovdqu %ymm12,(%rsi) vpshufb %ymm0,%ymm2,%ymm12 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 jmp L$ghash_mul_one_vec_unreduced__func2 L$xor_one_block__func2: vmovdqu (%rdi),%xmm2 vpxor %xmm2,%xmm12,%xmm12 vmovdqu %xmm12,(%rsi) vpshufb %xmm0,%xmm2,%xmm12 vpxor %xmm1,%xmm12,%xmm12 vmovdqu (%r8),%xmm2 L$ghash_mul_one_vec_unreduced__func2: .byte 0xc4,0xe3,0x1d,0x44,0xe2,0x00 vpxor %ymm4,%ymm5,%ymm5 .byte 0xc4,0xe3,0x1d,0x44,0xe2,0x01 vpxor %ymm4,%ymm6,%ymm6 .byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10 vpxor %ymm4,%ymm6,%ymm6 .byte 0xc4,0xe3,0x1d,0x44,0xe2,0x11 vpxor %ymm4,%ymm7,%ymm7 L$reduce__func2: vbroadcasti128 L$gfpoly(%rip),%ymm2 .byte 0xc4,0xe3,0x6d,0x44,0xdd,0x01 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 .byte 0xc4,0xe3,0x6d,0x44,0xde,0x01 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm7,%ymm7 vpxor %ymm3,%ymm7,%ymm7 vextracti128 $1,%ymm7,%xmm1 vpxor %xmm7,%xmm1,%xmm1 L$done__func2: vpshufb %xmm0,%xmm1,%xmm1 vmovdqu %xmm1,(%r12) vzeroupper popq %r12 ret #endif
fatiimajamiil/rustpad-custom
20,965
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/aesni-x86_64-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .type _aesni_encrypt2,@function .align 16 _aesni_encrypt2: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax .Lenc_loop2: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%rcx,%rax,1),%xmm0 jnz .Lenc_loop2 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 ret .cfi_endproc .size _aesni_encrypt2,.-_aesni_encrypt2 .type _aesni_encrypt3,@function .align 16 _aesni_encrypt3: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax .Lenc_loop3: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 movups -16(%rcx,%rax,1),%xmm0 jnz .Lenc_loop3 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 ret .cfi_endproc .size _aesni_encrypt3,.-_aesni_encrypt3 .type _aesni_encrypt4,@function .align 16 _aesni_encrypt4: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 xorps %xmm0,%xmm5 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 0x0f,0x1f,0x00 addq $16,%rax .Lenc_loop4: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movups -16(%rcx,%rax,1),%xmm0 jnz .Lenc_loop4 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 ret .cfi_endproc .size _aesni_encrypt4,.-_aesni_encrypt4 .type _aesni_encrypt6,@function .align 16 _aesni_encrypt6: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,220,209 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,217 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,220,225 pxor %xmm0,%xmm7 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp .Lenc_loop6_enter .align 16 .Lenc_loop6: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .Lenc_loop6_enter: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 movups -16(%rcx,%rax,1),%xmm0 jnz .Lenc_loop6 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 ret .cfi_endproc .size _aesni_encrypt6,.-_aesni_encrypt6 .type _aesni_encrypt8,@function .align 16 _aesni_encrypt8: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,209 pxor %xmm0,%xmm7 pxor %xmm0,%xmm8 .byte 102,15,56,220,217 pxor %xmm0,%xmm9 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp .Lenc_loop8_inner .align 16 .Lenc_loop8: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .Lenc_loop8_inner: .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 .Lenc_loop8_enter: movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups -16(%rcx,%rax,1),%xmm0 jnz .Lenc_loop8 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 .byte 102,68,15,56,221,192 .byte 102,68,15,56,221,200 ret .cfi_endproc .size _aesni_encrypt8,.-_aesni_encrypt8 .globl aes_hw_ctr32_encrypt_blocks .hidden aes_hw_ctr32_encrypt_blocks .type aes_hw_ctr32_encrypt_blocks,@function .align 16 aes_hw_ctr32_encrypt_blocks: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,BORINGSSL_function_hit(%rip) #endif cmpq $1,%rdx jne .Lctr32_bulk movups (%r8),%xmm2 movups (%rdi),%xmm3 movl 240(%rcx),%edx movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 .Loop_enc1_1: .byte 102,15,56,220,209 decl %edx movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz .Loop_enc1_1 .byte 102,15,56,221,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 xorps %xmm3,%xmm2 pxor %xmm3,%xmm3 movups %xmm2,(%rsi) xorps %xmm2,%xmm2 jmp .Lctr32_epilogue .align 16 .Lctr32_bulk: leaq (%rsp),%r11 .cfi_def_cfa_register %r11 pushq %rbp .cfi_offset %rbp,-16 subq $128,%rsp andq $-16,%rsp movdqu (%r8),%xmm2 movdqu (%rcx),%xmm0 movl 12(%r8),%r8d pxor %xmm0,%xmm2 movl 12(%rcx),%ebp movdqa %xmm2,0(%rsp) bswapl %r8d movdqa %xmm2,%xmm3 movdqa %xmm2,%xmm4 movdqa %xmm2,%xmm5 movdqa %xmm2,64(%rsp) movdqa %xmm2,80(%rsp) movdqa %xmm2,96(%rsp) movq %rdx,%r10 movdqa %xmm2,112(%rsp) leaq 1(%r8),%rax leaq 2(%r8),%rdx bswapl %eax bswapl %edx xorl %ebp,%eax xorl %ebp,%edx .byte 102,15,58,34,216,3 leaq 3(%r8),%rax movdqa %xmm3,16(%rsp) .byte 102,15,58,34,226,3 bswapl %eax movq %r10,%rdx leaq 4(%r8),%r10 movdqa %xmm4,32(%rsp) xorl %ebp,%eax bswapl %r10d .byte 102,15,58,34,232,3 xorl %ebp,%r10d movdqa %xmm5,48(%rsp) leaq 5(%r8),%r9 movl %r10d,64+12(%rsp) bswapl %r9d leaq 6(%r8),%r10 movl 240(%rcx),%eax xorl %ebp,%r9d bswapl %r10d movl %r9d,80+12(%rsp) xorl %ebp,%r10d leaq 7(%r8),%r9 movl %r10d,96+12(%rsp) bswapl %r9d xorl %ebp,%r9d movl %r9d,112+12(%rsp) movups 16(%rcx),%xmm1 movdqa 64(%rsp),%xmm6 movdqa 80(%rsp),%xmm7 cmpq $8,%rdx jb .Lctr32_tail leaq 128(%rcx),%rcx subq $8,%rdx jmp .Lctr32_loop8 .align 32 .Lctr32_loop8: addl $8,%r8d movdqa 96(%rsp),%xmm8 .byte 102,15,56,220,209 movl %r8d,%r9d movdqa 112(%rsp),%xmm9 .byte 102,15,56,220,217 bswapl %r9d movups 32-128(%rcx),%xmm0 .byte 102,15,56,220,225 xorl %ebp,%r9d nop .byte 102,15,56,220,233 movl %r9d,0+12(%rsp) leaq 1(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 48-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,16+12(%rsp) leaq 2(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 64-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,32+12(%rsp) leaq 3(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 80-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,48+12(%rsp) leaq 4(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 96-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,64+12(%rsp) leaq 5(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 112-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,80+12(%rsp) leaq 6(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 128-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,96+12(%rsp) leaq 7(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 144-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 xorl %ebp,%r9d movdqu 0(%rdi),%xmm10 .byte 102,15,56,220,232 movl %r9d,112+12(%rsp) cmpl $11,%eax .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 160-128(%rcx),%xmm0 jb .Lctr32_enc_done .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 176-128(%rcx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 192-128(%rcx),%xmm0 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 208-128(%rcx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 224-128(%rcx),%xmm0 jmp .Lctr32_enc_done .align 16 .Lctr32_enc_done: movdqu 16(%rdi),%xmm11 pxor %xmm0,%xmm10 movdqu 32(%rdi),%xmm12 pxor %xmm0,%xmm11 movdqu 48(%rdi),%xmm13 pxor %xmm0,%xmm12 movdqu 64(%rdi),%xmm14 pxor %xmm0,%xmm13 movdqu 80(%rdi),%xmm15 pxor %xmm0,%xmm14 prefetcht0 448(%rdi) prefetcht0 512(%rdi) pxor %xmm0,%xmm15 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movdqu 96(%rdi),%xmm1 leaq 128(%rdi),%rdi .byte 102,65,15,56,221,210 pxor %xmm0,%xmm1 movdqu 112-128(%rdi),%xmm10 .byte 102,65,15,56,221,219 pxor %xmm0,%xmm10 movdqa 0(%rsp),%xmm11 .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 movdqa 16(%rsp),%xmm12 movdqa 32(%rsp),%xmm13 .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 movdqa 48(%rsp),%xmm14 movdqa 64(%rsp),%xmm15 .byte 102,68,15,56,221,193 movdqa 80(%rsp),%xmm0 movups 16-128(%rcx),%xmm1 .byte 102,69,15,56,221,202 movups %xmm2,(%rsi) movdqa %xmm11,%xmm2 movups %xmm3,16(%rsi) movdqa %xmm12,%xmm3 movups %xmm4,32(%rsi) movdqa %xmm13,%xmm4 movups %xmm5,48(%rsi) movdqa %xmm14,%xmm5 movups %xmm6,64(%rsi) movdqa %xmm15,%xmm6 movups %xmm7,80(%rsi) movdqa %xmm0,%xmm7 movups %xmm8,96(%rsi) movups %xmm9,112(%rsi) leaq 128(%rsi),%rsi subq $8,%rdx jnc .Lctr32_loop8 addq $8,%rdx jz .Lctr32_done leaq -128(%rcx),%rcx .Lctr32_tail: leaq 16(%rcx),%rcx cmpq $4,%rdx jb .Lctr32_loop3 je .Lctr32_loop4 shll $4,%eax movdqa 96(%rsp),%xmm8 pxor %xmm9,%xmm9 movups 16(%rcx),%xmm0 .byte 102,15,56,220,209 .byte 102,15,56,220,217 leaq 32-16(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,225 addq $16,%rax movups (%rdi),%xmm10 .byte 102,15,56,220,233 .byte 102,15,56,220,241 movups 16(%rdi),%xmm11 movups 32(%rdi),%xmm12 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 call .Lenc_loop8_enter movdqu 48(%rdi),%xmm13 pxor %xmm10,%xmm2 movdqu 64(%rdi),%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm10,%xmm6 movdqu %xmm5,48(%rsi) movdqu %xmm6,64(%rsi) cmpq $6,%rdx jb .Lctr32_done movups 80(%rdi),%xmm11 xorps %xmm11,%xmm7 movups %xmm7,80(%rsi) je .Lctr32_done movups 96(%rdi),%xmm12 xorps %xmm12,%xmm8 movups %xmm8,96(%rsi) jmp .Lctr32_done .align 32 .Lctr32_loop4: .byte 102,15,56,220,209 leaq 16(%rcx),%rcx decl %eax .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%rcx),%xmm1 jnz .Lctr32_loop4 .byte 102,15,56,221,209 .byte 102,15,56,221,217 movups (%rdi),%xmm10 movups 16(%rdi),%xmm11 .byte 102,15,56,221,225 .byte 102,15,56,221,233 movups 32(%rdi),%xmm12 movups 48(%rdi),%xmm13 xorps %xmm10,%xmm2 movups %xmm2,(%rsi) xorps %xmm11,%xmm3 movups %xmm3,16(%rsi) pxor %xmm12,%xmm4 movdqu %xmm4,32(%rsi) pxor %xmm13,%xmm5 movdqu %xmm5,48(%rsi) jmp .Lctr32_done .align 32 .Lctr32_loop3: .byte 102,15,56,220,209 leaq 16(%rcx),%rcx decl %eax .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%rcx),%xmm1 jnz .Lctr32_loop3 .byte 102,15,56,221,209 .byte 102,15,56,221,217 .byte 102,15,56,221,225 movups (%rdi),%xmm10 xorps %xmm10,%xmm2 movups %xmm2,(%rsi) cmpq $2,%rdx jb .Lctr32_done movups 16(%rdi),%xmm11 xorps %xmm11,%xmm3 movups %xmm3,16(%rsi) je .Lctr32_done movups 32(%rdi),%xmm12 xorps %xmm12,%xmm4 movups %xmm4,32(%rsi) .Lctr32_done: xorps %xmm0,%xmm0 xorl %ebp,%ebp pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 movaps %xmm0,0(%rsp) pxor %xmm8,%xmm8 movaps %xmm0,16(%rsp) pxor %xmm9,%xmm9 movaps %xmm0,32(%rsp) pxor %xmm10,%xmm10 movaps %xmm0,48(%rsp) pxor %xmm11,%xmm11 movaps %xmm0,64(%rsp) pxor %xmm12,%xmm12 movaps %xmm0,80(%rsp) pxor %xmm13,%xmm13 movaps %xmm0,96(%rsp) pxor %xmm14,%xmm14 movaps %xmm0,112(%rsp) pxor %xmm15,%xmm15 movq -8(%r11),%rbp .cfi_restore %rbp leaq (%r11),%rsp .cfi_def_cfa_register %rsp .Lctr32_epilogue: ret .cfi_endproc .size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks .globl aes_hw_set_encrypt_key_base .hidden aes_hw_set_encrypt_key_base .type aes_hw_set_encrypt_key_base,@function .align 16 aes_hw_set_encrypt_key_base: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,BORINGSSL_function_hit+3(%rip) #endif subq $8,%rsp .cfi_adjust_cfa_offset 8 movups (%rdi),%xmm0 xorps %xmm4,%xmm4 leaq 16(%rdx),%rax cmpl $256,%esi je .L14rounds cmpl $128,%esi jne .Lbad_keybits .L10rounds: movl $9,%esi movups %xmm0,(%rdx) .byte 102,15,58,223,200,1 call .Lkey_expansion_128_cold .byte 102,15,58,223,200,2 call .Lkey_expansion_128 .byte 102,15,58,223,200,4 call .Lkey_expansion_128 .byte 102,15,58,223,200,8 call .Lkey_expansion_128 .byte 102,15,58,223,200,16 call .Lkey_expansion_128 .byte 102,15,58,223,200,32 call .Lkey_expansion_128 .byte 102,15,58,223,200,64 call .Lkey_expansion_128 .byte 102,15,58,223,200,128 call .Lkey_expansion_128 .byte 102,15,58,223,200,27 call .Lkey_expansion_128 .byte 102,15,58,223,200,54 call .Lkey_expansion_128 movups %xmm0,(%rax) movl %esi,80(%rax) xorl %eax,%eax jmp .Lenc_key_ret .align 16 .L14rounds: movups 16(%rdi),%xmm2 movl $13,%esi leaq 16(%rax),%rax movups %xmm0,(%rdx) movups %xmm2,16(%rdx) .byte 102,15,58,223,202,1 call .Lkey_expansion_256a_cold .byte 102,15,58,223,200,1 call .Lkey_expansion_256b .byte 102,15,58,223,202,2 call .Lkey_expansion_256a .byte 102,15,58,223,200,2 call .Lkey_expansion_256b .byte 102,15,58,223,202,4 call .Lkey_expansion_256a .byte 102,15,58,223,200,4 call .Lkey_expansion_256b .byte 102,15,58,223,202,8 call .Lkey_expansion_256a .byte 102,15,58,223,200,8 call .Lkey_expansion_256b .byte 102,15,58,223,202,16 call .Lkey_expansion_256a .byte 102,15,58,223,200,16 call .Lkey_expansion_256b .byte 102,15,58,223,202,32 call .Lkey_expansion_256a .byte 102,15,58,223,200,32 call .Lkey_expansion_256b .byte 102,15,58,223,202,64 call .Lkey_expansion_256a movups %xmm0,(%rax) movl %esi,16(%rax) xorq %rax,%rax jmp .Lenc_key_ret .align 16 .Lbad_keybits: movq $-2,%rax .Lenc_key_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 addq $8,%rsp .cfi_adjust_cfa_offset -8 ret .cfi_endproc .align 16 .Lkey_expansion_128: .cfi_startproc movups %xmm0,(%rax) leaq 16(%rax),%rax .Lkey_expansion_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .cfi_endproc .align 16 .Lkey_expansion_256a: .cfi_startproc movups %xmm2,(%rax) leaq 16(%rax),%rax .Lkey_expansion_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .cfi_endproc .align 16 .Lkey_expansion_256b: .cfi_startproc movups %xmm0,(%rax) leaq 16(%rax),%rax shufps $16,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $140,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret .cfi_endproc .size aes_hw_set_encrypt_key_base,.-aes_hw_set_encrypt_key_base .globl aes_hw_set_encrypt_key_alt .hidden aes_hw_set_encrypt_key_alt .type aes_hw_set_encrypt_key_alt,@function .align 16 aes_hw_set_encrypt_key_alt: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,BORINGSSL_function_hit+3(%rip) #endif subq $8,%rsp .cfi_adjust_cfa_offset 8 movups (%rdi),%xmm0 xorps %xmm4,%xmm4 leaq 16(%rdx),%rax cmpl $256,%esi je .L14rounds_alt cmpl $128,%esi jne .Lbad_keybits_alt movl $9,%esi movdqa .Lkey_rotate(%rip),%xmm5 movl $8,%r10d movdqa .Lkey_rcon1(%rip),%xmm4 movdqa %xmm0,%xmm2 movdqu %xmm0,(%rdx) jmp .Loop_key128 .align 16 .Loop_key128: .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 leaq 16(%rax),%rax movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,-16(%rax) movdqa %xmm0,%xmm2 decl %r10d jnz .Loop_key128 movdqa .Lkey_rcon1b(%rip),%xmm4 .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,(%rax) movdqa %xmm0,%xmm2 .byte 102,15,56,0,197 .byte 102,15,56,221,196 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,16(%rax) movl %esi,96(%rax) xorl %eax,%eax jmp .Lenc_key_ret_alt .align 16 .L14rounds_alt: movups 16(%rdi),%xmm2 movl $13,%esi leaq 16(%rax),%rax movdqa .Lkey_rotate(%rip),%xmm5 movdqa .Lkey_rcon1(%rip),%xmm4 movl $7,%r10d movdqu %xmm0,0(%rdx) movdqa %xmm2,%xmm1 movdqu %xmm2,16(%rdx) jmp .Loop_key256 .align 16 .Loop_key256: .byte 102,15,56,0,213 .byte 102,15,56,221,212 movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pslld $1,%xmm4 pxor %xmm2,%xmm0 movdqu %xmm0,(%rax) decl %r10d jz .Ldone_key256 pshufd $0xff,%xmm0,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,221,211 movdqa %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm3,%xmm1 pxor %xmm1,%xmm2 movdqu %xmm2,16(%rax) leaq 32(%rax),%rax movdqa %xmm2,%xmm1 jmp .Loop_key256 .Ldone_key256: movl %esi,16(%rax) xorl %eax,%eax jmp .Lenc_key_ret_alt .align 16 .Lbad_keybits_alt: movq $-2,%rax .Lenc_key_ret_alt: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 addq $8,%rsp .cfi_adjust_cfa_offset -8 ret .cfi_endproc .size aes_hw_set_encrypt_key_alt,.-aes_hw_set_encrypt_key_alt .section .rodata .align 64 .Lbswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .Lincrement32: .long 6,6,6,0 .Lincrement64: .long 1,0,0,0 .Lincrement1: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 .Lkey_rotate: .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d .Lkey_rotate192: .long 0x04070605,0x04070605,0x04070605,0x04070605 .Lkey_rcon1: .long 1,1,1,1 .Lkey_rcon1b: .long 0x1b,0x1b,0x1b,0x1b .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 64 .text #endif
fatiimajamiil/rustpad-custom
11,047
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/vpaes-x86_64-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .p2align 4 _vpaes_encrypt_core: movq %rdx,%r9 movq $16,%r11 movl 240(%rdx),%eax movdqa %xmm9,%xmm1 movdqa L$k_ipt(%rip),%xmm2 pandn %xmm0,%xmm1 movdqu (%r9),%xmm5 psrld $4,%xmm1 pand %xmm9,%xmm0 .byte 102,15,56,0,208 movdqa L$k_ipt+16(%rip),%xmm0 .byte 102,15,56,0,193 pxor %xmm5,%xmm2 addq $16,%r9 pxor %xmm2,%xmm0 leaq L$k_mc_backward(%rip),%r10 jmp L$enc_entry .p2align 4 L$enc_loop: movdqa %xmm13,%xmm4 movdqa %xmm12,%xmm0 .byte 102,15,56,0,226 .byte 102,15,56,0,195 pxor %xmm5,%xmm4 movdqa %xmm15,%xmm5 pxor %xmm4,%xmm0 movdqa -64(%r11,%r10,1),%xmm1 .byte 102,15,56,0,234 movdqa (%r11,%r10,1),%xmm4 movdqa %xmm14,%xmm2 .byte 102,15,56,0,211 movdqa %xmm0,%xmm3 pxor %xmm5,%xmm2 .byte 102,15,56,0,193 addq $16,%r9 pxor %xmm2,%xmm0 .byte 102,15,56,0,220 addq $16,%r11 pxor %xmm0,%xmm3 .byte 102,15,56,0,193 andq $0x30,%r11 subq $1,%rax pxor %xmm3,%xmm0 L$enc_entry: movdqa %xmm9,%xmm1 movdqa %xmm11,%xmm5 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 .byte 102,15,56,0,232 movdqa %xmm10,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm10,%xmm4 pxor %xmm5,%xmm3 .byte 102,15,56,0,224 movdqa %xmm10,%xmm2 pxor %xmm5,%xmm4 .byte 102,15,56,0,211 movdqa %xmm10,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%r9),%xmm5 pxor %xmm1,%xmm3 jnz L$enc_loop movdqa -96(%r10),%xmm4 movdqa -80(%r10),%xmm0 .byte 102,15,56,0,226 pxor %xmm5,%xmm4 .byte 102,15,56,0,195 movdqa 64(%r11,%r10,1),%xmm1 pxor %xmm4,%xmm0 .byte 102,15,56,0,193 ret .p2align 4 _vpaes_encrypt_core_2x: movq %rdx,%r9 movq $16,%r11 movl 240(%rdx),%eax movdqa %xmm9,%xmm1 movdqa %xmm9,%xmm7 movdqa L$k_ipt(%rip),%xmm2 movdqa %xmm2,%xmm8 pandn %xmm0,%xmm1 pandn %xmm6,%xmm7 movdqu (%r9),%xmm5 psrld $4,%xmm1 psrld $4,%xmm7 pand %xmm9,%xmm0 pand %xmm9,%xmm6 .byte 102,15,56,0,208 .byte 102,68,15,56,0,198 movdqa L$k_ipt+16(%rip),%xmm0 movdqa %xmm0,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,247 pxor %xmm5,%xmm2 pxor %xmm5,%xmm8 addq $16,%r9 pxor %xmm2,%xmm0 pxor %xmm8,%xmm6 leaq L$k_mc_backward(%rip),%r10 jmp L$enc2x_entry .p2align 4 L$enc2x_loop: movdqa L$k_sb1(%rip),%xmm4 movdqa L$k_sb1+16(%rip),%xmm0 movdqa %xmm4,%xmm12 movdqa %xmm0,%xmm6 .byte 102,15,56,0,226 .byte 102,69,15,56,0,224 .byte 102,15,56,0,195 .byte 102,65,15,56,0,243 pxor %xmm5,%xmm4 pxor %xmm5,%xmm12 movdqa L$k_sb2(%rip),%xmm5 movdqa %xmm5,%xmm13 pxor %xmm4,%xmm0 pxor %xmm12,%xmm6 movdqa -64(%r11,%r10,1),%xmm1 .byte 102,15,56,0,234 .byte 102,69,15,56,0,232 movdqa (%r11,%r10,1),%xmm4 movdqa L$k_sb2+16(%rip),%xmm2 movdqa %xmm2,%xmm8 .byte 102,15,56,0,211 .byte 102,69,15,56,0,195 movdqa %xmm0,%xmm3 movdqa %xmm6,%xmm11 pxor %xmm5,%xmm2 pxor %xmm13,%xmm8 .byte 102,15,56,0,193 .byte 102,15,56,0,241 addq $16,%r9 pxor %xmm2,%xmm0 pxor %xmm8,%xmm6 .byte 102,15,56,0,220 .byte 102,68,15,56,0,220 addq $16,%r11 pxor %xmm0,%xmm3 pxor %xmm6,%xmm11 .byte 102,15,56,0,193 .byte 102,15,56,0,241 andq $0x30,%r11 subq $1,%rax pxor %xmm3,%xmm0 pxor %xmm11,%xmm6 L$enc2x_entry: movdqa %xmm9,%xmm1 movdqa %xmm9,%xmm7 movdqa L$k_inv+16(%rip),%xmm5 movdqa %xmm5,%xmm13 pandn %xmm0,%xmm1 pandn %xmm6,%xmm7 psrld $4,%xmm1 psrld $4,%xmm7 pand %xmm9,%xmm0 pand %xmm9,%xmm6 .byte 102,15,56,0,232 .byte 102,68,15,56,0,238 movdqa %xmm10,%xmm3 movdqa %xmm10,%xmm11 pxor %xmm1,%xmm0 pxor %xmm7,%xmm6 .byte 102,15,56,0,217 .byte 102,68,15,56,0,223 movdqa %xmm10,%xmm4 movdqa %xmm10,%xmm12 pxor %xmm5,%xmm3 pxor %xmm13,%xmm11 .byte 102,15,56,0,224 .byte 102,68,15,56,0,230 movdqa %xmm10,%xmm2 movdqa %xmm10,%xmm8 pxor %xmm5,%xmm4 pxor %xmm13,%xmm12 .byte 102,15,56,0,211 .byte 102,69,15,56,0,195 movdqa %xmm10,%xmm3 movdqa %xmm10,%xmm11 pxor %xmm0,%xmm2 pxor %xmm6,%xmm8 .byte 102,15,56,0,220 .byte 102,69,15,56,0,220 movdqu (%r9),%xmm5 pxor %xmm1,%xmm3 pxor %xmm7,%xmm11 jnz L$enc2x_loop movdqa -96(%r10),%xmm4 movdqa -80(%r10),%xmm0 movdqa %xmm4,%xmm12 movdqa %xmm0,%xmm6 .byte 102,15,56,0,226 .byte 102,69,15,56,0,224 pxor %xmm5,%xmm4 pxor %xmm5,%xmm12 .byte 102,15,56,0,195 .byte 102,65,15,56,0,243 movdqa 64(%r11,%r10,1),%xmm1 pxor %xmm4,%xmm0 pxor %xmm12,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,241 ret .p2align 4 _vpaes_schedule_core: call _vpaes_preheat movdqa L$k_rcon(%rip),%xmm8 movdqu (%rdi),%xmm0 movdqa %xmm0,%xmm3 leaq L$k_ipt(%rip),%r11 call _vpaes_schedule_transform movdqa %xmm0,%xmm7 leaq L$k_sr(%rip),%r10 movdqu %xmm0,(%rdx) L$schedule_go: cmpl $192,%esi ja L$schedule_256 L$schedule_128: movl $10,%esi L$oop_schedule_128: call _vpaes_schedule_round decq %rsi jz L$schedule_mangle_last call _vpaes_schedule_mangle jmp L$oop_schedule_128 .p2align 4 L$schedule_256: movdqu 16(%rdi),%xmm0 call _vpaes_schedule_transform movl $7,%esi L$oop_schedule_256: call _vpaes_schedule_mangle movdqa %xmm0,%xmm6 call _vpaes_schedule_round decq %rsi jz L$schedule_mangle_last call _vpaes_schedule_mangle pshufd $0xFF,%xmm0,%xmm0 movdqa %xmm7,%xmm5 movdqa %xmm6,%xmm7 call _vpaes_schedule_low_round movdqa %xmm5,%xmm7 jmp L$oop_schedule_256 .p2align 4 L$schedule_mangle_last: leaq L$k_deskew(%rip),%r11 movdqa (%r8,%r10,1),%xmm1 .byte 102,15,56,0,193 leaq L$k_opt(%rip),%r11 addq $32,%rdx L$schedule_mangle_last_dec: addq $-16,%rdx pxor L$k_s63(%rip),%xmm0 call _vpaes_schedule_transform movdqu %xmm0,(%rdx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 ret .p2align 4 _vpaes_schedule_round: pxor %xmm1,%xmm1 .byte 102,65,15,58,15,200,15 .byte 102,69,15,58,15,192,15 pxor %xmm1,%xmm7 pshufd $0xFF,%xmm0,%xmm0 .byte 102,15,58,15,192,1 _vpaes_schedule_low_round: movdqa %xmm7,%xmm1 pslldq $4,%xmm7 pxor %xmm1,%xmm7 movdqa %xmm7,%xmm1 pslldq $8,%xmm7 pxor %xmm1,%xmm7 pxor L$k_s63(%rip),%xmm7 movdqa %xmm9,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 movdqa %xmm11,%xmm2 .byte 102,15,56,0,208 pxor %xmm1,%xmm0 movdqa %xmm10,%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 movdqa %xmm10,%xmm4 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm10,%xmm2 .byte 102,15,56,0,211 pxor %xmm0,%xmm2 movdqa %xmm10,%xmm3 .byte 102,15,56,0,220 pxor %xmm1,%xmm3 movdqa %xmm13,%xmm4 .byte 102,15,56,0,226 movdqa %xmm12,%xmm0 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 pxor %xmm7,%xmm0 movdqa %xmm0,%xmm7 ret .p2align 4 _vpaes_schedule_transform: movdqa %xmm9,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 movdqa (%r11),%xmm2 .byte 102,15,56,0,208 movdqa 16(%r11),%xmm0 .byte 102,15,56,0,193 pxor %xmm2,%xmm0 ret .p2align 4 _vpaes_schedule_mangle: movdqa %xmm0,%xmm4 movdqa L$k_mc_forward(%rip),%xmm5 addq $16,%rdx pxor L$k_s63(%rip),%xmm4 .byte 102,15,56,0,229 movdqa %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 L$schedule_mangle_both: movdqa (%r8,%r10,1),%xmm1 .byte 102,15,56,0,217 addq $-16,%r8 andq $0x30,%r8 movdqu %xmm3,(%rdx) ret .globl _vpaes_set_encrypt_key .private_extern _vpaes_set_encrypt_key .p2align 4 _vpaes_set_encrypt_key: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit+5(%rip) #endif movl %esi,%eax shrl $5,%eax addl $5,%eax movl %eax,240(%rdx) movl $0,%ecx movl $0x30,%r8d call _vpaes_schedule_core xorl %eax,%eax ret .globl _vpaes_ctr32_encrypt_blocks .private_extern _vpaes_ctr32_encrypt_blocks .p2align 4 _vpaes_ctr32_encrypt_blocks: _CET_ENDBR xchgq %rcx,%rdx testq %rcx,%rcx jz L$ctr32_abort movdqu (%r8),%xmm0 movdqa L$ctr_add_one(%rip),%xmm8 subq %rdi,%rsi call _vpaes_preheat movdqa %xmm0,%xmm6 pshufb L$rev_ctr(%rip),%xmm6 testq $1,%rcx jz L$ctr32_prep_loop movdqu (%rdi),%xmm7 call _vpaes_encrypt_core pxor %xmm7,%xmm0 paddd %xmm8,%xmm6 movdqu %xmm0,(%rsi,%rdi,1) subq $1,%rcx leaq 16(%rdi),%rdi jz L$ctr32_done L$ctr32_prep_loop: movdqa %xmm6,%xmm14 movdqa %xmm6,%xmm15 paddd %xmm8,%xmm15 L$ctr32_loop: movdqa L$rev_ctr(%rip),%xmm1 movdqa %xmm14,%xmm0 movdqa %xmm15,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,241 call _vpaes_encrypt_core_2x movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 movdqa L$ctr_add_two(%rip),%xmm3 pxor %xmm1,%xmm0 pxor %xmm2,%xmm6 paddd %xmm3,%xmm14 paddd %xmm3,%xmm15 movdqu %xmm0,(%rsi,%rdi,1) movdqu %xmm6,16(%rsi,%rdi,1) subq $2,%rcx leaq 32(%rdi),%rdi jnz L$ctr32_loop L$ctr32_done: L$ctr32_abort: ret .p2align 4 _vpaes_preheat: leaq L$k_s0F(%rip),%r10 movdqa -32(%r10),%xmm10 movdqa -16(%r10),%xmm11 movdqa 0(%r10),%xmm9 movdqa 48(%r10),%xmm13 movdqa 64(%r10),%xmm12 movdqa 80(%r10),%xmm15 movdqa 96(%r10),%xmm14 ret .section __DATA,__const .p2align 6 _vpaes_consts: L$k_inv: .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 L$k_s0F: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F L$k_ipt: .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 L$k_sb1: .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF L$k_sb2: .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A L$k_sbo: .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA L$k_mc_forward: .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 L$k_mc_backward: .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F L$k_sr: .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 L$k_rcon: .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 L$k_s63: .quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B L$k_opt: .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 L$k_deskew: .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 L$rev_ctr: .quad 0x0706050403020100, 0x0c0d0e0f0b0a0908 L$ctr_add_one: .quad 0x0000000000000000, 0x0000000100000000 L$ctr_add_two: .quad 0x0000000000000000, 0x0000000200000000 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .p2align 6 .text #endif
fatiimajamiil/rustpad-custom
78,605
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/p256-x86_64-asm-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .section .rodata .align 64 .Lpoly: .quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001 .LOne: .long 1,1,1,1,1,1,1,1 .LTwo: .long 2,2,2,2,2,2,2,2 .LThree: .long 3,3,3,3,3,3,3,3 .LONE_mont: .quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe .Lord: .quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000 .LordK: .quad 0xccd1c8aaee00bc4f .text .globl ecp_nistz256_neg .hidden ecp_nistz256_neg .type ecp_nistz256_neg,@function .align 32 ecp_nistz256_neg: .cfi_startproc _CET_ENDBR pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-24 .Lneg_body: xorq %r8,%r8 xorq %r9,%r9 xorq %r10,%r10 xorq %r11,%r11 xorq %r13,%r13 subq 0(%rsi),%r8 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r8,%rax sbbq 24(%rsi),%r11 leaq .Lpoly(%rip),%rsi movq %r9,%rdx sbbq $0,%r13 addq 0(%rsi),%r8 movq %r10,%rcx adcq 8(%rsi),%r9 adcq 16(%rsi),%r10 movq %r11,%r12 adcq 24(%rsi),%r11 testq %r13,%r13 cmovzq %rax,%r8 cmovzq %rdx,%r9 movq %r8,0(%rdi) cmovzq %rcx,%r10 movq %r9,8(%rdi) cmovzq %r12,%r11 movq %r10,16(%rdi) movq %r11,24(%rdi) movq 0(%rsp),%r13 .cfi_restore %r13 movq 8(%rsp),%r12 .cfi_restore %r12 leaq 16(%rsp),%rsp .cfi_adjust_cfa_offset -16 .Lneg_epilogue: ret .cfi_endproc .size ecp_nistz256_neg,.-ecp_nistz256_neg .globl ecp_nistz256_ord_mul_mont_nohw .hidden ecp_nistz256_ord_mul_mont_nohw .type ecp_nistz256_ord_mul_mont_nohw,@function .align 32 ecp_nistz256_ord_mul_mont_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lord_mul_body: movq 0(%rdx),%rax movq %rdx,%rbx leaq .Lord(%rip),%r14 movq .LordK(%rip),%r15 movq %rax,%rcx mulq 0(%rsi) movq %rax,%r8 movq %rcx,%rax movq %rdx,%r9 mulq 8(%rsi) addq %rax,%r9 movq %rcx,%rax adcq $0,%rdx movq %rdx,%r10 mulq 16(%rsi) addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %r8,%r13 imulq %r15,%r8 movq %rdx,%r11 mulq 24(%rsi) addq %rax,%r11 movq %r8,%rax adcq $0,%rdx movq %rdx,%r12 mulq 0(%r14) movq %r8,%rbp addq %rax,%r13 movq %r8,%rax adcq $0,%rdx movq %rdx,%rcx subq %r8,%r10 sbbq $0,%r8 mulq 8(%r14) addq %rcx,%r9 adcq $0,%rdx addq %rax,%r9 movq %rbp,%rax adcq %rdx,%r10 movq %rbp,%rdx adcq $0,%r8 shlq $32,%rax shrq $32,%rdx subq %rax,%r11 movq 8(%rbx),%rax sbbq %rdx,%rbp addq %r8,%r11 adcq %rbp,%r12 adcq $0,%r13 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r9 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r10 adcq $0,%rdx addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r11 adcq $0,%rdx addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %r9,%rcx imulq %r15,%r9 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r12 adcq $0,%rdx xorq %r8,%r8 addq %rax,%r12 movq %r9,%rax adcq %rdx,%r13 adcq $0,%r8 mulq 0(%r14) movq %r9,%rbp addq %rax,%rcx movq %r9,%rax adcq %rdx,%rcx subq %r9,%r11 sbbq $0,%r9 mulq 8(%r14) addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %rbp,%rax adcq %rdx,%r11 movq %rbp,%rdx adcq $0,%r9 shlq $32,%rax shrq $32,%rdx subq %rax,%r12 movq 16(%rbx),%rax sbbq %rdx,%rbp addq %r9,%r12 adcq %rbp,%r13 adcq $0,%r8 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r11 adcq $0,%rdx addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r12 adcq $0,%rdx addq %rax,%r12 movq %rcx,%rax adcq $0,%rdx movq %r10,%rcx imulq %r15,%r10 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r13 adcq $0,%rdx xorq %r9,%r9 addq %rax,%r13 movq %r10,%rax adcq %rdx,%r8 adcq $0,%r9 mulq 0(%r14) movq %r10,%rbp addq %rax,%rcx movq %r10,%rax adcq %rdx,%rcx subq %r10,%r12 sbbq $0,%r10 mulq 8(%r14) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq %rdx,%r12 movq %rbp,%rdx adcq $0,%r10 shlq $32,%rax shrq $32,%rdx subq %rax,%r13 movq 24(%rbx),%rax sbbq %rdx,%rbp addq %r10,%r13 adcq %rbp,%r8 adcq $0,%r9 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r12 adcq $0,%rdx addq %rax,%r12 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r13 adcq $0,%rdx addq %rax,%r13 movq %rcx,%rax adcq $0,%rdx movq %r11,%rcx imulq %r15,%r11 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r8 adcq $0,%rdx xorq %r10,%r10 addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 adcq $0,%r10 mulq 0(%r14) movq %r11,%rbp addq %rax,%rcx movq %r11,%rax adcq %rdx,%rcx subq %r11,%r13 sbbq $0,%r11 mulq 8(%r14) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq %rdx,%r13 movq %rbp,%rdx adcq $0,%r11 shlq $32,%rax shrq $32,%rdx subq %rax,%r8 sbbq %rdx,%rbp addq %r11,%r8 adcq %rbp,%r9 adcq $0,%r10 movq %r12,%rsi subq 0(%r14),%r12 movq %r13,%r11 sbbq 8(%r14),%r13 movq %r8,%rcx sbbq 16(%r14),%r8 movq %r9,%rbp sbbq 24(%r14),%r9 sbbq $0,%r10 cmovcq %rsi,%r12 cmovcq %r11,%r13 cmovcq %rcx,%r8 cmovcq %rbp,%r9 movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lord_mul_epilogue: ret .cfi_endproc .size ecp_nistz256_ord_mul_mont_nohw,.-ecp_nistz256_ord_mul_mont_nohw .globl ecp_nistz256_ord_sqr_mont_nohw .hidden ecp_nistz256_ord_sqr_mont_nohw .type ecp_nistz256_ord_sqr_mont_nohw,@function .align 32 ecp_nistz256_ord_sqr_mont_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lord_sqr_body: movq 0(%rsi),%r8 movq 8(%rsi),%rax movq 16(%rsi),%r14 movq 24(%rsi),%r15 leaq .Lord(%rip),%rsi movq %rdx,%rbx jmp .Loop_ord_sqr .align 32 .Loop_ord_sqr: movq %rax,%rbp mulq %r8 movq %rax,%r9 .byte 102,72,15,110,205 movq %r14,%rax movq %rdx,%r10 mulq %r8 addq %rax,%r10 movq %r15,%rax .byte 102,73,15,110,214 adcq $0,%rdx movq %rdx,%r11 mulq %r8 addq %rax,%r11 movq %r15,%rax .byte 102,73,15,110,223 adcq $0,%rdx movq %rdx,%r12 mulq %r14 movq %rax,%r13 movq %r14,%rax movq %rdx,%r14 mulq %rbp addq %rax,%r11 movq %r15,%rax adcq $0,%rdx movq %rdx,%r15 mulq %rbp addq %rax,%r12 adcq $0,%rdx addq %r15,%r12 adcq %rdx,%r13 adcq $0,%r14 xorq %r15,%r15 movq %r8,%rax addq %r9,%r9 adcq %r10,%r10 adcq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 adcq %r14,%r14 adcq $0,%r15 mulq %rax movq %rax,%r8 .byte 102,72,15,126,200 movq %rdx,%rbp mulq %rax addq %rbp,%r9 adcq %rax,%r10 .byte 102,72,15,126,208 adcq $0,%rdx movq %rdx,%rbp mulq %rax addq %rbp,%r11 adcq %rax,%r12 .byte 102,72,15,126,216 adcq $0,%rdx movq %rdx,%rbp movq %r8,%rcx imulq 32(%rsi),%r8 mulq %rax addq %rbp,%r13 adcq %rax,%r14 movq 0(%rsi),%rax adcq %rdx,%r15 mulq %r8 movq %r8,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r8,%r10 sbbq $0,%rbp mulq %r8 addq %rcx,%r9 adcq $0,%rdx addq %rax,%r9 movq %r8,%rax adcq %rdx,%r10 movq %r8,%rdx adcq $0,%rbp movq %r9,%rcx imulq 32(%rsi),%r9 shlq $32,%rax shrq $32,%rdx subq %rax,%r11 movq 0(%rsi),%rax sbbq %rdx,%r8 addq %rbp,%r11 adcq $0,%r8 mulq %r9 movq %r9,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r9,%r11 sbbq $0,%rbp mulq %r9 addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %r9,%rax adcq %rdx,%r11 movq %r9,%rdx adcq $0,%rbp movq %r10,%rcx imulq 32(%rsi),%r10 shlq $32,%rax shrq $32,%rdx subq %rax,%r8 movq 0(%rsi),%rax sbbq %rdx,%r9 addq %rbp,%r8 adcq $0,%r9 mulq %r10 movq %r10,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r10,%r8 sbbq $0,%rbp mulq %r10 addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %r10,%rax adcq %rdx,%r8 movq %r10,%rdx adcq $0,%rbp movq %r11,%rcx imulq 32(%rsi),%r11 shlq $32,%rax shrq $32,%rdx subq %rax,%r9 movq 0(%rsi),%rax sbbq %rdx,%r10 addq %rbp,%r9 adcq $0,%r10 mulq %r11 movq %r11,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r11,%r9 sbbq $0,%rbp mulq %r11 addq %rcx,%r8 adcq $0,%rdx addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 movq %r11,%rdx adcq $0,%rbp shlq $32,%rax shrq $32,%rdx subq %rax,%r10 sbbq %rdx,%r11 addq %rbp,%r10 adcq $0,%r11 xorq %rdx,%rdx addq %r12,%r8 adcq %r13,%r9 movq %r8,%r12 adcq %r14,%r10 adcq %r15,%r11 movq %r9,%rax adcq $0,%rdx subq 0(%rsi),%r8 movq %r10,%r14 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r11,%r15 sbbq 24(%rsi),%r11 sbbq $0,%rdx cmovcq %r12,%r8 cmovncq %r9,%rax cmovncq %r10,%r14 cmovncq %r11,%r15 decq %rbx jnz .Loop_ord_sqr movq %r8,0(%rdi) movq %rax,8(%rdi) pxor %xmm1,%xmm1 movq %r14,16(%rdi) pxor %xmm2,%xmm2 movq %r15,24(%rdi) pxor %xmm3,%xmm3 movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lord_sqr_epilogue: ret .cfi_endproc .size ecp_nistz256_ord_sqr_mont_nohw,.-ecp_nistz256_ord_sqr_mont_nohw .globl ecp_nistz256_ord_mul_mont_adx .hidden ecp_nistz256_ord_mul_mont_adx .type ecp_nistz256_ord_mul_mont_adx,@function .align 32 ecp_nistz256_ord_mul_mont_adx: .cfi_startproc .Lecp_nistz256_ord_mul_mont_adx: _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lord_mulx_body: movq %rdx,%rbx movq 0(%rdx),%rdx movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 leaq -128(%rsi),%rsi leaq .Lord-128(%rip),%r14 movq .LordK(%rip),%r15 mulxq %r9,%r8,%r9 mulxq %r10,%rcx,%r10 mulxq %r11,%rbp,%r11 addq %rcx,%r9 mulxq %r12,%rcx,%r12 movq %r8,%rdx mulxq %r15,%rdx,%rax adcq %rbp,%r10 adcq %rcx,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 24+128(%r14),%rcx,%rbp movq 8(%rbx),%rdx adcxq %rcx,%r11 adoxq %rbp,%r12 adcxq %r8,%r12 adoxq %r8,%r13 adcq $0,%r13 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%rsi),%rcx,%rbp movq %r9,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r12 adoxq %rbp,%r13 adcxq %r8,%r13 adoxq %r8,%r8 adcq $0,%r8 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%r14),%rcx,%rbp movq 16(%rbx),%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcxq %r9,%r13 adoxq %r9,%r8 adcq $0,%r8 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%rsi),%rcx,%rbp movq %r10,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r13 adoxq %rbp,%r8 adcxq %r9,%r8 adoxq %r9,%r9 adcq $0,%r9 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%r14),%rcx,%rbp movq 24(%rbx),%rdx adcxq %rcx,%r13 adoxq %rbp,%r8 adcxq %r10,%r8 adoxq %r10,%r9 adcq $0,%r9 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%rsi),%rcx,%rbp movq %r11,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r8 adoxq %rbp,%r9 adcxq %r10,%r9 adoxq %r10,%r10 adcq $0,%r10 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%r14),%rcx,%rbp leaq 128(%r14),%r14 movq %r12,%rbx adcxq %rcx,%r8 adoxq %rbp,%r9 movq %r13,%rdx adcxq %r11,%r9 adoxq %r11,%r10 adcq $0,%r10 movq %r8,%rcx subq 0(%r14),%r12 sbbq 8(%r14),%r13 sbbq 16(%r14),%r8 movq %r9,%rbp sbbq 24(%r14),%r9 sbbq $0,%r10 cmovcq %rbx,%r12 cmovcq %rdx,%r13 cmovcq %rcx,%r8 cmovcq %rbp,%r9 movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lord_mulx_epilogue: ret .cfi_endproc .size ecp_nistz256_ord_mul_mont_adx,.-ecp_nistz256_ord_mul_mont_adx .globl ecp_nistz256_ord_sqr_mont_adx .hidden ecp_nistz256_ord_sqr_mont_adx .type ecp_nistz256_ord_sqr_mont_adx,@function .align 32 ecp_nistz256_ord_sqr_mont_adx: .cfi_startproc _CET_ENDBR .Lecp_nistz256_ord_sqr_mont_adx: pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lord_sqrx_body: movq %rdx,%rbx movq 0(%rsi),%rdx movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 leaq .Lord(%rip),%rsi jmp .Loop_ord_sqrx .align 32 .Loop_ord_sqrx: mulxq %r14,%r9,%r10 mulxq %r15,%rcx,%r11 movq %rdx,%rax .byte 102,73,15,110,206 mulxq %r8,%rbp,%r12 movq %r14,%rdx addq %rcx,%r10 .byte 102,73,15,110,215 adcq %rbp,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq %r15,%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq %r8,%rcx,%rbp movq %r15,%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcq $0,%r13 mulxq %r8,%rcx,%r14 movq %rax,%rdx .byte 102,73,15,110,216 xorq %r15,%r15 adcxq %r9,%r9 adoxq %rcx,%r13 adcxq %r10,%r10 adoxq %r15,%r14 mulxq %rdx,%r8,%rbp .byte 102,72,15,126,202 adcxq %r11,%r11 adoxq %rbp,%r9 adcxq %r12,%r12 mulxq %rdx,%rcx,%rax .byte 102,72,15,126,210 adcxq %r13,%r13 adoxq %rcx,%r10 adcxq %r14,%r14 mulxq %rdx,%rcx,%rbp .byte 0x67 .byte 102,72,15,126,218 adoxq %rax,%r11 adcxq %r15,%r15 adoxq %rcx,%r12 adoxq %rbp,%r13 mulxq %rdx,%rcx,%rax adoxq %rcx,%r14 adoxq %rax,%r15 movq %r8,%rdx mulxq 32(%rsi),%rdx,%rcx xorq %rax,%rax mulxq 0(%rsi),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 8(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 16(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 24(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r8 adcxq %rax,%r8 movq %r9,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adoxq %rcx,%r9 adcxq %rbp,%r10 mulxq 8(%rsi),%rcx,%rbp adoxq %rcx,%r10 adcxq %rbp,%r11 mulxq 16(%rsi),%rcx,%rbp adoxq %rcx,%r11 adcxq %rbp,%r8 mulxq 24(%rsi),%rcx,%rbp adoxq %rcx,%r8 adcxq %rbp,%r9 adoxq %rax,%r9 movq %r10,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r8 mulxq 16(%rsi),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 24(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 adcxq %rax,%r10 movq %r11,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adoxq %rcx,%r11 adcxq %rbp,%r8 mulxq 8(%rsi),%rcx,%rbp adoxq %rcx,%r8 adcxq %rbp,%r9 mulxq 16(%rsi),%rcx,%rbp adoxq %rcx,%r9 adcxq %rbp,%r10 mulxq 24(%rsi),%rcx,%rbp adoxq %rcx,%r10 adcxq %rbp,%r11 adoxq %rax,%r11 addq %r8,%r12 adcq %r13,%r9 movq %r12,%rdx adcq %r14,%r10 adcq %r15,%r11 movq %r9,%r14 adcq $0,%rax subq 0(%rsi),%r12 movq %r10,%r15 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r11,%r8 sbbq 24(%rsi),%r11 sbbq $0,%rax cmovncq %r12,%rdx cmovncq %r9,%r14 cmovncq %r10,%r15 cmovncq %r11,%r8 decq %rbx jnz .Loop_ord_sqrx movq %rdx,0(%rdi) movq %r14,8(%rdi) pxor %xmm1,%xmm1 movq %r15,16(%rdi) pxor %xmm2,%xmm2 movq %r8,24(%rdi) pxor %xmm3,%xmm3 movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lord_sqrx_epilogue: ret .cfi_endproc .size ecp_nistz256_ord_sqr_mont_adx,.-ecp_nistz256_ord_sqr_mont_adx .globl ecp_nistz256_mul_mont_nohw .hidden ecp_nistz256_mul_mont_nohw .type ecp_nistz256_mul_mont_nohw,@function .align 32 ecp_nistz256_mul_mont_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lmul_body: movq %rdx,%rbx movq 0(%rdx),%rax movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 call __ecp_nistz256_mul_montq movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lmul_epilogue: ret .cfi_endproc .size ecp_nistz256_mul_mont_nohw,.-ecp_nistz256_mul_mont_nohw .type __ecp_nistz256_mul_montq,@function .align 32 __ecp_nistz256_mul_montq: .cfi_startproc movq %rax,%rbp mulq %r9 movq .Lpoly+8(%rip),%r14 movq %rax,%r8 movq %rbp,%rax movq %rdx,%r9 mulq %r10 movq .Lpoly+24(%rip),%r15 addq %rax,%r9 movq %rbp,%rax adcq $0,%rdx movq %rdx,%r10 mulq %r11 addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%r11 mulq %r12 addq %rax,%r11 movq %r8,%rax adcq $0,%rdx xorq %r13,%r13 movq %rdx,%r12 movq %r8,%rbp shlq $32,%r8 mulq %r15 shrq $32,%rbp addq %r8,%r9 adcq %rbp,%r10 adcq %rax,%r11 movq 8(%rbx),%rax adcq %rdx,%r12 adcq $0,%r13 xorq %r8,%r8 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r9 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %r9,%rax adcq %rdx,%r13 adcq $0,%r8 movq %r9,%rbp shlq $32,%r9 mulq %r15 shrq $32,%rbp addq %r9,%r10 adcq %rbp,%r11 adcq %rax,%r12 movq 16(%rbx),%rax adcq %rdx,%r13 adcq $0,%r8 xorq %r9,%r9 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r13 adcq $0,%rdx addq %rax,%r13 movq %r10,%rax adcq %rdx,%r8 adcq $0,%r9 movq %r10,%rbp shlq $32,%r10 mulq %r15 shrq $32,%rbp addq %r10,%r11 adcq %rbp,%r12 adcq %rax,%r13 movq 24(%rbx),%rax adcq %rdx,%r8 adcq $0,%r9 xorq %r10,%r10 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r13 adcq $0,%rdx addq %rax,%r13 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r8 adcq $0,%rdx addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 adcq $0,%r10 movq %r11,%rbp shlq $32,%r11 mulq %r15 shrq $32,%rbp addq %r11,%r12 adcq %rbp,%r13 movq %r12,%rcx adcq %rax,%r8 adcq %rdx,%r9 movq %r13,%rbp adcq $0,%r10 subq $-1,%r12 movq %r8,%rbx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%rdx sbbq %r15,%r9 sbbq $0,%r10 cmovcq %rcx,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rbx,%r8 movq %r13,8(%rdi) cmovcq %rdx,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq .globl ecp_nistz256_sqr_mont_nohw .hidden ecp_nistz256_sqr_mont_nohw .type ecp_nistz256_sqr_mont_nohw,@function .align 32 ecp_nistz256_sqr_mont_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lsqr_body: movq 0(%rsi),%rax movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 call __ecp_nistz256_sqr_montq movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lsqr_epilogue: ret .cfi_endproc .size ecp_nistz256_sqr_mont_nohw,.-ecp_nistz256_sqr_mont_nohw .type __ecp_nistz256_sqr_montq,@function .align 32 __ecp_nistz256_sqr_montq: .cfi_startproc movq %rax,%r13 mulq %r14 movq %rax,%r9 movq %r15,%rax movq %rdx,%r10 mulq %r13 addq %rax,%r10 movq %r8,%rax adcq $0,%rdx movq %rdx,%r11 mulq %r13 addq %rax,%r11 movq %r15,%rax adcq $0,%rdx movq %rdx,%r12 mulq %r14 addq %rax,%r11 movq %r8,%rax adcq $0,%rdx movq %rdx,%rbp mulq %r14 addq %rax,%r12 movq %r8,%rax adcq $0,%rdx addq %rbp,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %r15 xorq %r15,%r15 addq %rax,%r13 movq 0(%rsi),%rax movq %rdx,%r14 adcq $0,%r14 addq %r9,%r9 adcq %r10,%r10 adcq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 adcq %r14,%r14 adcq $0,%r15 mulq %rax movq %rax,%r8 movq 8(%rsi),%rax movq %rdx,%rcx mulq %rax addq %rcx,%r9 adcq %rax,%r10 movq 16(%rsi),%rax adcq $0,%rdx movq %rdx,%rcx mulq %rax addq %rcx,%r11 adcq %rax,%r12 movq 24(%rsi),%rax adcq $0,%rdx movq %rdx,%rcx mulq %rax addq %rcx,%r13 adcq %rax,%r14 movq %r8,%rax adcq %rdx,%r15 movq .Lpoly+8(%rip),%rsi movq .Lpoly+24(%rip),%rbp movq %r8,%rcx shlq $32,%r8 mulq %rbp shrq $32,%rcx addq %r8,%r9 adcq %rcx,%r10 adcq %rax,%r11 movq %r9,%rax adcq $0,%rdx movq %r9,%rcx shlq $32,%r9 movq %rdx,%r8 mulq %rbp shrq $32,%rcx addq %r9,%r10 adcq %rcx,%r11 adcq %rax,%r8 movq %r10,%rax adcq $0,%rdx movq %r10,%rcx shlq $32,%r10 movq %rdx,%r9 mulq %rbp shrq $32,%rcx addq %r10,%r11 adcq %rcx,%r8 adcq %rax,%r9 movq %r11,%rax adcq $0,%rdx movq %r11,%rcx shlq $32,%r11 movq %rdx,%r10 mulq %rbp shrq $32,%rcx addq %r11,%r8 adcq %rcx,%r9 adcq %rax,%r10 adcq $0,%rdx xorq %r11,%r11 addq %r8,%r12 adcq %r9,%r13 movq %r12,%r8 adcq %r10,%r14 adcq %rdx,%r15 movq %r13,%r9 adcq $0,%r11 subq $-1,%r12 movq %r14,%r10 sbbq %rsi,%r13 sbbq $0,%r14 movq %r15,%rcx sbbq %rbp,%r15 sbbq $0,%r11 cmovcq %r8,%r12 cmovcq %r9,%r13 movq %r12,0(%rdi) cmovcq %r10,%r14 movq %r13,8(%rdi) cmovcq %rcx,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq .globl ecp_nistz256_mul_mont_adx .hidden ecp_nistz256_mul_mont_adx .type ecp_nistz256_mul_mont_adx,@function .align 32 ecp_nistz256_mul_mont_adx: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lmulx_body: movq %rdx,%rbx movq 0(%rdx),%rdx movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 leaq -128(%rsi),%rsi call __ecp_nistz256_mul_montx movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lmulx_epilogue: ret .cfi_endproc .size ecp_nistz256_mul_mont_adx,.-ecp_nistz256_mul_mont_adx .type __ecp_nistz256_mul_montx,@function .align 32 __ecp_nistz256_mul_montx: .cfi_startproc mulxq %r9,%r8,%r9 mulxq %r10,%rcx,%r10 movq $32,%r14 xorq %r13,%r13 mulxq %r11,%rbp,%r11 movq .Lpoly+24(%rip),%r15 adcq %rcx,%r9 mulxq %r12,%rcx,%r12 movq %r8,%rdx adcq %rbp,%r10 shlxq %r14,%r8,%rbp adcq %rcx,%r11 shrxq %r14,%r8,%rcx adcq $0,%r12 addq %rbp,%r9 adcq %rcx,%r10 mulxq %r15,%rcx,%rbp movq 8(%rbx),%rdx adcq %rcx,%r11 adcq %rbp,%r12 adcq $0,%r13 xorq %r8,%r8 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%rsi),%rcx,%rbp movq %r9,%rdx adcxq %rcx,%r12 shlxq %r14,%r9,%rcx adoxq %rbp,%r13 shrxq %r14,%r9,%rbp adcxq %r8,%r13 adoxq %r8,%r8 adcq $0,%r8 addq %rcx,%r10 adcq %rbp,%r11 mulxq %r15,%rcx,%rbp movq 16(%rbx),%rdx adcq %rcx,%r12 adcq %rbp,%r13 adcq $0,%r8 xorq %r9,%r9 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%rsi),%rcx,%rbp movq %r10,%rdx adcxq %rcx,%r13 shlxq %r14,%r10,%rcx adoxq %rbp,%r8 shrxq %r14,%r10,%rbp adcxq %r9,%r8 adoxq %r9,%r9 adcq $0,%r9 addq %rcx,%r11 adcq %rbp,%r12 mulxq %r15,%rcx,%rbp movq 24(%rbx),%rdx adcq %rcx,%r13 adcq %rbp,%r8 adcq $0,%r9 xorq %r10,%r10 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%rsi),%rcx,%rbp movq %r11,%rdx adcxq %rcx,%r8 shlxq %r14,%r11,%rcx adoxq %rbp,%r9 shrxq %r14,%r11,%rbp adcxq %r10,%r9 adoxq %r10,%r10 adcq $0,%r10 addq %rcx,%r12 adcq %rbp,%r13 mulxq %r15,%rcx,%rbp movq %r12,%rbx movq .Lpoly+8(%rip),%r14 adcq %rcx,%r8 movq %r13,%rdx adcq %rbp,%r9 adcq $0,%r10 xorl %eax,%eax movq %r8,%rcx sbbq $-1,%r12 sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%rbp sbbq %r15,%r9 sbbq $0,%r10 cmovcq %rbx,%r12 cmovcq %rdx,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %rbp,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx .globl ecp_nistz256_sqr_mont_adx .hidden ecp_nistz256_sqr_mont_adx .type ecp_nistz256_sqr_mont_adx,@function .align 32 ecp_nistz256_sqr_mont_adx: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lsqrx_body: movq 0(%rsi),%rdx movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 leaq -128(%rsi),%rsi call __ecp_nistz256_sqr_montx movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lsqrx_epilogue: ret .cfi_endproc .size ecp_nistz256_sqr_mont_adx,.-ecp_nistz256_sqr_mont_adx .type __ecp_nistz256_sqr_montx,@function .align 32 __ecp_nistz256_sqr_montx: .cfi_startproc mulxq %r14,%r9,%r10 mulxq %r15,%rcx,%r11 xorl %eax,%eax adcq %rcx,%r10 mulxq %r8,%rbp,%r12 movq %r14,%rdx adcq %rbp,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq %r15,%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq %r8,%rcx,%rbp movq %r15,%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcq $0,%r13 mulxq %r8,%rcx,%r14 movq 0+128(%rsi),%rdx xorq %r15,%r15 adcxq %r9,%r9 adoxq %rcx,%r13 adcxq %r10,%r10 adoxq %r15,%r14 mulxq %rdx,%r8,%rbp movq 8+128(%rsi),%rdx adcxq %r11,%r11 adoxq %rbp,%r9 adcxq %r12,%r12 mulxq %rdx,%rcx,%rax movq 16+128(%rsi),%rdx adcxq %r13,%r13 adoxq %rcx,%r10 adcxq %r14,%r14 .byte 0x67 mulxq %rdx,%rcx,%rbp movq 24+128(%rsi),%rdx adoxq %rax,%r11 adcxq %r15,%r15 adoxq %rcx,%r12 movq $32,%rsi adoxq %rbp,%r13 .byte 0x67,0x67 mulxq %rdx,%rcx,%rax movq .Lpoly+24(%rip),%rdx adoxq %rcx,%r14 shlxq %rsi,%r8,%rcx adoxq %rax,%r15 shrxq %rsi,%r8,%rax movq %rdx,%rbp addq %rcx,%r9 adcq %rax,%r10 mulxq %r8,%rcx,%r8 adcq %rcx,%r11 shlxq %rsi,%r9,%rcx adcq $0,%r8 shrxq %rsi,%r9,%rax addq %rcx,%r10 adcq %rax,%r11 mulxq %r9,%rcx,%r9 adcq %rcx,%r8 shlxq %rsi,%r10,%rcx adcq $0,%r9 shrxq %rsi,%r10,%rax addq %rcx,%r11 adcq %rax,%r8 mulxq %r10,%rcx,%r10 adcq %rcx,%r9 shlxq %rsi,%r11,%rcx adcq $0,%r10 shrxq %rsi,%r11,%rax addq %rcx,%r8 adcq %rax,%r9 mulxq %r11,%rcx,%r11 adcq %rcx,%r10 adcq $0,%r11 xorq %rdx,%rdx addq %r8,%r12 movq .Lpoly+8(%rip),%rsi adcq %r9,%r13 movq %r12,%r8 adcq %r10,%r14 adcq %r11,%r15 movq %r13,%r9 adcq $0,%rdx subq $-1,%r12 movq %r14,%r10 sbbq %rsi,%r13 sbbq $0,%r14 movq %r15,%r11 sbbq %rbp,%r15 sbbq $0,%rdx cmovcq %r8,%r12 cmovcq %r9,%r13 movq %r12,0(%rdi) cmovcq %r10,%r14 movq %r13,8(%rdi) cmovcq %r11,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx .globl ecp_nistz256_select_w5_nohw .hidden ecp_nistz256_select_w5_nohw .type ecp_nistz256_select_w5_nohw,@function .align 32 ecp_nistz256_select_w5_nohw: .cfi_startproc _CET_ENDBR movdqa .LOne(%rip),%xmm0 movd %edx,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 movdqa %xmm0,%xmm8 pshufd $0,%xmm1,%xmm1 movq $16,%rax .Lselect_loop_sse_w5: movdqa %xmm8,%xmm15 paddd %xmm0,%xmm8 pcmpeqd %xmm1,%xmm15 movdqa 0(%rsi),%xmm9 movdqa 16(%rsi),%xmm10 movdqa 32(%rsi),%xmm11 movdqa 48(%rsi),%xmm12 movdqa 64(%rsi),%xmm13 movdqa 80(%rsi),%xmm14 leaq 96(%rsi),%rsi pand %xmm15,%xmm9 pand %xmm15,%xmm10 por %xmm9,%xmm2 pand %xmm15,%xmm11 por %xmm10,%xmm3 pand %xmm15,%xmm12 por %xmm11,%xmm4 pand %xmm15,%xmm13 por %xmm12,%xmm5 pand %xmm15,%xmm14 por %xmm13,%xmm6 por %xmm14,%xmm7 decq %rax jnz .Lselect_loop_sse_w5 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqu %xmm4,32(%rdi) movdqu %xmm5,48(%rdi) movdqu %xmm6,64(%rdi) movdqu %xmm7,80(%rdi) ret .cfi_endproc .LSEH_end_ecp_nistz256_select_w5_nohw: .size ecp_nistz256_select_w5_nohw,.-ecp_nistz256_select_w5_nohw .globl ecp_nistz256_select_w7_nohw .hidden ecp_nistz256_select_w7_nohw .type ecp_nistz256_select_w7_nohw,@function .align 32 ecp_nistz256_select_w7_nohw: .cfi_startproc _CET_ENDBR movdqa .LOne(%rip),%xmm8 movd %edx,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa %xmm8,%xmm0 pshufd $0,%xmm1,%xmm1 movq $64,%rax .Lselect_loop_sse_w7: movdqa %xmm8,%xmm15 paddd %xmm0,%xmm8 movdqa 0(%rsi),%xmm9 movdqa 16(%rsi),%xmm10 pcmpeqd %xmm1,%xmm15 movdqa 32(%rsi),%xmm11 movdqa 48(%rsi),%xmm12 leaq 64(%rsi),%rsi pand %xmm15,%xmm9 pand %xmm15,%xmm10 por %xmm9,%xmm2 pand %xmm15,%xmm11 por %xmm10,%xmm3 pand %xmm15,%xmm12 por %xmm11,%xmm4 prefetcht0 255(%rsi) por %xmm12,%xmm5 decq %rax jnz .Lselect_loop_sse_w7 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqu %xmm4,32(%rdi) movdqu %xmm5,48(%rdi) ret .cfi_endproc .LSEH_end_ecp_nistz256_select_w7_nohw: .size ecp_nistz256_select_w7_nohw,.-ecp_nistz256_select_w7_nohw .globl ecp_nistz256_select_w5_avx2 .hidden ecp_nistz256_select_w5_avx2 .type ecp_nistz256_select_w5_avx2,@function .align 32 ecp_nistz256_select_w5_avx2: .cfi_startproc _CET_ENDBR vzeroupper vmovdqa .LTwo(%rip),%ymm0 vpxor %ymm2,%ymm2,%ymm2 vpxor %ymm3,%ymm3,%ymm3 vpxor %ymm4,%ymm4,%ymm4 vmovdqa .LOne(%rip),%ymm5 vmovdqa .LTwo(%rip),%ymm10 vmovd %edx,%xmm1 vpermd %ymm1,%ymm2,%ymm1 movq $8,%rax .Lselect_loop_avx2_w5: vmovdqa 0(%rsi),%ymm6 vmovdqa 32(%rsi),%ymm7 vmovdqa 64(%rsi),%ymm8 vmovdqa 96(%rsi),%ymm11 vmovdqa 128(%rsi),%ymm12 vmovdqa 160(%rsi),%ymm13 vpcmpeqd %ymm1,%ymm5,%ymm9 vpcmpeqd %ymm1,%ymm10,%ymm14 vpaddd %ymm0,%ymm5,%ymm5 vpaddd %ymm0,%ymm10,%ymm10 leaq 192(%rsi),%rsi vpand %ymm9,%ymm6,%ymm6 vpand %ymm9,%ymm7,%ymm7 vpand %ymm9,%ymm8,%ymm8 vpand %ymm14,%ymm11,%ymm11 vpand %ymm14,%ymm12,%ymm12 vpand %ymm14,%ymm13,%ymm13 vpxor %ymm6,%ymm2,%ymm2 vpxor %ymm7,%ymm3,%ymm3 vpxor %ymm8,%ymm4,%ymm4 vpxor %ymm11,%ymm2,%ymm2 vpxor %ymm12,%ymm3,%ymm3 vpxor %ymm13,%ymm4,%ymm4 decq %rax jnz .Lselect_loop_avx2_w5 vmovdqu %ymm2,0(%rdi) vmovdqu %ymm3,32(%rdi) vmovdqu %ymm4,64(%rdi) vzeroupper ret .cfi_endproc .LSEH_end_ecp_nistz256_select_w5_avx2: .size ecp_nistz256_select_w5_avx2,.-ecp_nistz256_select_w5_avx2 .globl ecp_nistz256_select_w7_avx2 .hidden ecp_nistz256_select_w7_avx2 .type ecp_nistz256_select_w7_avx2,@function .align 32 ecp_nistz256_select_w7_avx2: .cfi_startproc _CET_ENDBR vzeroupper vmovdqa .LThree(%rip),%ymm0 vpxor %ymm2,%ymm2,%ymm2 vpxor %ymm3,%ymm3,%ymm3 vmovdqa .LOne(%rip),%ymm4 vmovdqa .LTwo(%rip),%ymm8 vmovdqa .LThree(%rip),%ymm12 vmovd %edx,%xmm1 vpermd %ymm1,%ymm2,%ymm1 movq $21,%rax .Lselect_loop_avx2_w7: vmovdqa 0(%rsi),%ymm5 vmovdqa 32(%rsi),%ymm6 vmovdqa 64(%rsi),%ymm9 vmovdqa 96(%rsi),%ymm10 vmovdqa 128(%rsi),%ymm13 vmovdqa 160(%rsi),%ymm14 vpcmpeqd %ymm1,%ymm4,%ymm7 vpcmpeqd %ymm1,%ymm8,%ymm11 vpcmpeqd %ymm1,%ymm12,%ymm15 vpaddd %ymm0,%ymm4,%ymm4 vpaddd %ymm0,%ymm8,%ymm8 vpaddd %ymm0,%ymm12,%ymm12 leaq 192(%rsi),%rsi vpand %ymm7,%ymm5,%ymm5 vpand %ymm7,%ymm6,%ymm6 vpand %ymm11,%ymm9,%ymm9 vpand %ymm11,%ymm10,%ymm10 vpand %ymm15,%ymm13,%ymm13 vpand %ymm15,%ymm14,%ymm14 vpxor %ymm5,%ymm2,%ymm2 vpxor %ymm6,%ymm3,%ymm3 vpxor %ymm9,%ymm2,%ymm2 vpxor %ymm10,%ymm3,%ymm3 vpxor %ymm13,%ymm2,%ymm2 vpxor %ymm14,%ymm3,%ymm3 decq %rax jnz .Lselect_loop_avx2_w7 vmovdqa 0(%rsi),%ymm5 vmovdqa 32(%rsi),%ymm6 vpcmpeqd %ymm1,%ymm4,%ymm7 vpand %ymm7,%ymm5,%ymm5 vpand %ymm7,%ymm6,%ymm6 vpxor %ymm5,%ymm2,%ymm2 vpxor %ymm6,%ymm3,%ymm3 vmovdqu %ymm2,0(%rdi) vmovdqu %ymm3,32(%rdi) vzeroupper ret .cfi_endproc .LSEH_end_ecp_nistz256_select_w7_avx2: .size ecp_nistz256_select_w7_avx2,.-ecp_nistz256_select_w7_avx2 .type __ecp_nistz256_add_toq,@function .align 32 __ecp_nistz256_add_toq: .cfi_startproc xorq %r11,%r11 addq 0(%rbx),%r12 adcq 8(%rbx),%r13 movq %r12,%rax adcq 16(%rbx),%r8 adcq 24(%rbx),%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq .type __ecp_nistz256_sub_fromq,@function .align 32 __ecp_nistz256_sub_fromq: .cfi_startproc subq 0(%rbx),%r12 sbbq 8(%rbx),%r13 movq %r12,%rax sbbq 16(%rbx),%r8 sbbq 24(%rbx),%r9 movq %r13,%rbp sbbq %r11,%r11 addq $-1,%r12 movq %r8,%rcx adcq %r14,%r13 adcq $0,%r8 movq %r9,%r10 adcq %r15,%r9 testq %r11,%r11 cmovzq %rax,%r12 cmovzq %rbp,%r13 movq %r12,0(%rdi) cmovzq %rcx,%r8 movq %r13,8(%rdi) cmovzq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq .type __ecp_nistz256_subq,@function .align 32 __ecp_nistz256_subq: .cfi_startproc subq %r12,%rax sbbq %r13,%rbp movq %rax,%r12 sbbq %r8,%rcx sbbq %r9,%r10 movq %rbp,%r13 sbbq %r11,%r11 addq $-1,%rax movq %rcx,%r8 adcq %r14,%rbp adcq $0,%rcx movq %r10,%r9 adcq %r15,%r10 testq %r11,%r11 cmovnzq %rax,%r12 cmovnzq %rbp,%r13 cmovnzq %rcx,%r8 cmovnzq %r10,%r9 ret .cfi_endproc .size __ecp_nistz256_subq,.-__ecp_nistz256_subq .type __ecp_nistz256_mul_by_2q,@function .align 32 __ecp_nistz256_mul_by_2q: .cfi_startproc xorq %r11,%r11 addq %r12,%r12 adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q .globl ecp_nistz256_point_double_nohw .hidden ecp_nistz256_point_double_nohw .type ecp_nistz256_point_double_nohw,@function .align 32 ecp_nistz256_point_double_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $160+8,%rsp .cfi_adjust_cfa_offset 32*5+8 .Lpoint_doubleq_body: .Lpoint_double_shortcutq: movdqu 0(%rsi),%xmm0 movq %rsi,%rbx movdqu 16(%rsi),%xmm1 movq 32+0(%rsi),%r12 movq 32+8(%rsi),%r13 movq 32+16(%rsi),%r8 movq 32+24(%rsi),%r9 movq .Lpoly+8(%rip),%r14 movq .Lpoly+24(%rip),%r15 movdqa %xmm0,96(%rsp) movdqa %xmm1,96+16(%rsp) leaq 32(%rdi),%r10 leaq 64(%rdi),%r11 .byte 102,72,15,110,199 .byte 102,73,15,110,202 .byte 102,73,15,110,211 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_by_2q movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 leaq 64-0(%rsi),%rsi leaq 64(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 0(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 32(%rbx),%rax movq 64+0(%rbx),%r9 movq 64+8(%rbx),%r10 movq 64+16(%rbx),%r11 movq 64+24(%rbx),%r12 leaq 64-0(%rbx),%rsi leaq 32(%rbx),%rbx .byte 102,72,15,126,215 call __ecp_nistz256_mul_montq call __ecp_nistz256_mul_by_2q movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 32(%rsp),%rdi call __ecp_nistz256_add_toq movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 .byte 102,72,15,126,207 call __ecp_nistz256_sqr_montq xorq %r9,%r9 movq %r12,%rax addq $-1,%r12 movq %r13,%r10 adcq %rsi,%r13 movq %r14,%rcx adcq $0,%r14 movq %r15,%r8 adcq %rbp,%r15 adcq $0,%r9 xorq %rsi,%rsi testq $1,%rax cmovzq %rax,%r12 cmovzq %r10,%r13 cmovzq %rcx,%r14 cmovzq %r8,%r15 cmovzq %rsi,%r9 movq %r13,%rax shrq $1,%r12 shlq $63,%rax movq %r14,%r10 shrq $1,%r13 orq %rax,%r12 shlq $63,%r10 movq %r15,%rcx shrq $1,%r14 orq %r10,%r13 shlq $63,%rcx movq %r12,0(%rdi) shrq $1,%r15 movq %r13,8(%rdi) shlq $63,%r9 orq %rcx,%r14 orq %r9,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) movq 64(%rsp),%rax leaq 64(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2q leaq 32(%rsp),%rbx leaq 32(%rsp),%rdi call __ecp_nistz256_add_toq movq 96(%rsp),%rax leaq 96(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2q movq 0+32(%rsp),%rax movq 8+32(%rsp),%r14 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r15 movq 24+32(%rsp),%r8 .byte 102,72,15,126,199 call __ecp_nistz256_sqr_montq leaq 128(%rsp),%rbx movq %r14,%r8 movq %r15,%r9 movq %rsi,%r14 movq %rbp,%r15 call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 0(%rsp),%rdi call __ecp_nistz256_subq movq 32(%rsp),%rax leaq 32(%rsp),%rbx movq %r12,%r14 xorl %ecx,%ecx movq %r12,0+0(%rsp) movq %r13,%r10 movq %r13,0+8(%rsp) cmovzq %r8,%r11 movq %r8,0+16(%rsp) leaq 0-0(%rsp),%rsi cmovzq %r9,%r12 movq %r9,0+24(%rsp) movq %r14,%r9 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq .byte 102,72,15,126,203 .byte 102,72,15,126,207 call __ecp_nistz256_sub_fromq leaq 160+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpoint_doubleq_epilogue: ret .cfi_endproc .size ecp_nistz256_point_double_nohw,.-ecp_nistz256_point_double_nohw .globl ecp_nistz256_point_add_nohw .hidden ecp_nistz256_point_add_nohw .type ecp_nistz256_point_add_nohw,@function .align 32 ecp_nistz256_point_add_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $576+8,%rsp .cfi_adjust_cfa_offset 32*18+8 .Lpoint_addq_body: movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq %rsi,%rbx movq %rdx,%rsi movdqa %xmm0,384(%rsp) movdqa %xmm1,384+16(%rsp) movdqa %xmm2,416(%rsp) movdqa %xmm3,416+16(%rsp) movdqa %xmm4,448(%rsp) movdqa %xmm5,448+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rsi),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 por %xmm3,%xmm5 movdqu 48(%rsi),%xmm3 movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,480(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,480+16(%rsp) movdqu 64(%rsi),%xmm0 movdqu 80(%rsi),%xmm1 movdqa %xmm2,512(%rsp) movdqa %xmm3,512+16(%rsp) por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm0,%xmm1 .byte 102,72,15,110,199 leaq 64-0(%rsi),%rsi movq %rax,544+0(%rsp) movq %r14,544+8(%rsp) movq %r15,544+16(%rsp) movq %r8,544+24(%rsp) leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montq pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm1,%xmm4 por %xmm1,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 por %xmm3,%xmm4 pxor %xmm3,%xmm3 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 movq 64+0(%rbx),%rax movq 64+8(%rbx),%r14 movq 64+16(%rbx),%r15 movq 64+24(%rbx),%r8 .byte 102,72,15,110,203 leaq 64-0(%rbx),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 544(%rsp),%rax leaq 544(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq movq 416(%rsp),%rax leaq 416(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq 0+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montq movq 512(%rsp),%rax leaq 512(%rsp),%rbx movq 0+256(%rsp),%r9 movq 8+256(%rsp),%r10 leaq 0+256(%rsp),%rsi movq 16+256(%rsp),%r11 movq 24+256(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 224(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq orq %r13,%r12 movdqa %xmm4,%xmm2 orq %r8,%r12 orq %r9,%r12 por %xmm5,%xmm2 .byte 102,73,15,110,220 movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montq movq 480(%rsp),%rax leaq 480(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 160(%rsp),%rbx leaq 0(%rsp),%rdi call __ecp_nistz256_sub_fromq orq %r13,%r12 orq %r8,%r12 orq %r9,%r12 .byte 102,73,15,126,208 .byte 102,73,15,126,217 orq %r8,%r12 .byte 0x3e jnz .Ladd_proceedq testq %r9,%r9 jz .Ladd_doubleq .byte 102,72,15,126,199 pxor %xmm0,%xmm0 movdqu %xmm0,0(%rdi) movdqu %xmm0,16(%rdi) movdqu %xmm0,32(%rdi) movdqu %xmm0,48(%rdi) movdqu %xmm0,64(%rdi) movdqu %xmm0,80(%rdi) jmp .Ladd_doneq .align 32 .Ladd_doubleq: .byte 102,72,15,126,206 .byte 102,72,15,126,199 addq $416,%rsp .cfi_adjust_cfa_offset -416 jmp .Lpoint_double_shortcutq .cfi_adjust_cfa_offset 416 .align 32 .Ladd_proceedq: movq 0+64(%rsp),%rax movq 8+64(%rsp),%r14 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 544(%rsp),%rax leaq 544(%rsp),%rbx movq 0+352(%rsp),%r9 movq 8+352(%rsp),%r10 leaq 0+352(%rsp),%rsi movq 16+352(%rsp),%r11 movq 24+352(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montq movq 0(%rsp),%rax leaq 0(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 128(%rsp),%rdi call __ecp_nistz256_mul_montq movq 160(%rsp),%rax leaq 160(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montq xorq %r11,%r11 addq %r12,%r12 leaq 96(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subq leaq 128(%rsp),%rbx leaq 288(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 192+0(%rsp),%rax movq 192+8(%rsp),%rbp movq 192+16(%rsp),%rcx movq 192+24(%rsp),%r10 leaq 320(%rsp),%rdi call __ecp_nistz256_subq movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 128(%rsp),%rax leaq 128(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq 0+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq movq 320(%rsp),%rax leaq 320(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 320(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 256(%rsp),%rbx leaq 320(%rsp),%rdi call __ecp_nistz256_sub_fromq .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 352(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 352+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 544(%rsp),%xmm2 pand 544+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 480(%rsp),%xmm2 pand 480+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 320(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 320+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 512(%rsp),%xmm2 pand 512+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) .Ladd_doneq: leaq 576+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpoint_addq_epilogue: ret .cfi_endproc .size ecp_nistz256_point_add_nohw,.-ecp_nistz256_point_add_nohw .globl ecp_nistz256_point_add_affine_nohw .hidden ecp_nistz256_point_add_affine_nohw .type ecp_nistz256_point_add_affine_nohw,@function .align 32 ecp_nistz256_point_add_affine_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $480+8,%rsp .cfi_adjust_cfa_offset 32*15+8 .Ladd_affineq_body: movdqu 0(%rsi),%xmm0 movq %rdx,%rbx movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,320(%rsp) movdqa %xmm1,320+16(%rsp) movdqa %xmm2,352(%rsp) movdqa %xmm3,352+16(%rsp) movdqa %xmm4,384(%rsp) movdqa %xmm5,384+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rbx),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rbx),%xmm1 movdqu 32(%rbx),%xmm2 por %xmm3,%xmm5 movdqu 48(%rbx),%xmm3 movdqa %xmm0,416(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,416+16(%rsp) por %xmm0,%xmm1 .byte 102,72,15,110,199 movdqa %xmm2,448(%rsp) movdqa %xmm3,448+16(%rsp) por %xmm2,%xmm3 por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm1,%xmm3 leaq 64-0(%rsi),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm3,%xmm4 movq 0(%rbx),%rax movq %r12,%r9 por %xmm3,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 movq %r13,%r10 por %xmm3,%xmm4 pxor %xmm3,%xmm3 movq %r14,%r11 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 leaq 32-0(%rsp),%rsi movq %r15,%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 320(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 288(%rsp),%rdi call __ecp_nistz256_mul_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 352(%rsp),%rbx leaq 96(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+64(%rsp),%rax movq 8+64(%rsp),%r14 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 128(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 0+96(%rsp),%rax movq 8+96(%rsp),%r14 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r15 movq 24+96(%rsp),%r8 leaq 192(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 128(%rsp),%rax leaq 128(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montq movq 320(%rsp),%rax leaq 320(%rsp),%rbx movq 0+128(%rsp),%r9 movq 8+128(%rsp),%r10 leaq 0+128(%rsp),%rsi movq 16+128(%rsp),%r11 movq 24+128(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq xorq %r11,%r11 addq %r12,%r12 leaq 192(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subq leaq 160(%rsp),%rbx leaq 224(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 64(%rsp),%rdi call __ecp_nistz256_subq movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 352(%rsp),%rax leaq 352(%rsp),%rbx movq 0+160(%rsp),%r9 movq 8+160(%rsp),%r10 leaq 0+160(%rsp),%rsi movq 16+160(%rsp),%r11 movq 24+160(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq movq 96(%rsp),%rax leaq 96(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 64(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 32(%rsp),%rbx leaq 256(%rsp),%rdi call __ecp_nistz256_sub_fromq .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand .LONE_mont(%rip),%xmm2 pand .LONE_mont+16(%rip),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 224(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 224+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 320(%rsp),%xmm2 pand 320+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 256(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 256+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 352(%rsp),%xmm2 pand 352+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) leaq 480+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Ladd_affineq_epilogue: ret .cfi_endproc .size ecp_nistz256_point_add_affine_nohw,.-ecp_nistz256_point_add_affine_nohw .type __ecp_nistz256_add_tox,@function .align 32 __ecp_nistz256_add_tox: .cfi_startproc xorq %r11,%r11 adcq 0(%rbx),%r12 adcq 8(%rbx),%r13 movq %r12,%rax adcq 16(%rbx),%r8 adcq 24(%rbx),%r9 movq %r13,%rbp adcq $0,%r11 xorq %r10,%r10 sbbq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox .type __ecp_nistz256_sub_fromx,@function .align 32 __ecp_nistz256_sub_fromx: .cfi_startproc xorq %r11,%r11 sbbq 0(%rbx),%r12 sbbq 8(%rbx),%r13 movq %r12,%rax sbbq 16(%rbx),%r8 sbbq 24(%rbx),%r9 movq %r13,%rbp sbbq $0,%r11 xorq %r10,%r10 adcq $-1,%r12 movq %r8,%rcx adcq %r14,%r13 adcq $0,%r8 movq %r9,%r10 adcq %r15,%r9 btq $0,%r11 cmovncq %rax,%r12 cmovncq %rbp,%r13 movq %r12,0(%rdi) cmovncq %rcx,%r8 movq %r13,8(%rdi) cmovncq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx .type __ecp_nistz256_subx,@function .align 32 __ecp_nistz256_subx: .cfi_startproc xorq %r11,%r11 sbbq %r12,%rax sbbq %r13,%rbp movq %rax,%r12 sbbq %r8,%rcx sbbq %r9,%r10 movq %rbp,%r13 sbbq $0,%r11 xorq %r9,%r9 adcq $-1,%rax movq %rcx,%r8 adcq %r14,%rbp adcq $0,%rcx movq %r10,%r9 adcq %r15,%r10 btq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 cmovcq %rcx,%r8 cmovcq %r10,%r9 ret .cfi_endproc .size __ecp_nistz256_subx,.-__ecp_nistz256_subx .type __ecp_nistz256_mul_by_2x,@function .align 32 __ecp_nistz256_mul_by_2x: .cfi_startproc xorq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 xorq %r10,%r10 sbbq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x .globl ecp_nistz256_point_double_adx .hidden ecp_nistz256_point_double_adx .type ecp_nistz256_point_double_adx,@function .align 32 ecp_nistz256_point_double_adx: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $160+8,%rsp .cfi_adjust_cfa_offset 32*5+8 .Lpoint_doublex_body: .Lpoint_double_shortcutx: movdqu 0(%rsi),%xmm0 movq %rsi,%rbx movdqu 16(%rsi),%xmm1 movq 32+0(%rsi),%r12 movq 32+8(%rsi),%r13 movq 32+16(%rsi),%r8 movq 32+24(%rsi),%r9 movq .Lpoly+8(%rip),%r14 movq .Lpoly+24(%rip),%r15 movdqa %xmm0,96(%rsp) movdqa %xmm1,96+16(%rsp) leaq 32(%rdi),%r10 leaq 64(%rdi),%r11 .byte 102,72,15,110,199 .byte 102,73,15,110,202 .byte 102,73,15,110,211 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_by_2x movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 leaq 64-128(%rsi),%rsi leaq 64(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 0(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 32(%rbx),%rdx movq 64+0(%rbx),%r9 movq 64+8(%rbx),%r10 movq 64+16(%rbx),%r11 movq 64+24(%rbx),%r12 leaq 64-128(%rbx),%rsi leaq 32(%rbx),%rbx .byte 102,72,15,126,215 call __ecp_nistz256_mul_montx call __ecp_nistz256_mul_by_2x movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 32(%rsp),%rdi call __ecp_nistz256_add_tox movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 .byte 102,72,15,126,207 call __ecp_nistz256_sqr_montx xorq %r9,%r9 movq %r12,%rax addq $-1,%r12 movq %r13,%r10 adcq %rsi,%r13 movq %r14,%rcx adcq $0,%r14 movq %r15,%r8 adcq %rbp,%r15 adcq $0,%r9 xorq %rsi,%rsi testq $1,%rax cmovzq %rax,%r12 cmovzq %r10,%r13 cmovzq %rcx,%r14 cmovzq %r8,%r15 cmovzq %rsi,%r9 movq %r13,%rax shrq $1,%r12 shlq $63,%rax movq %r14,%r10 shrq $1,%r13 orq %rax,%r12 shlq $63,%r10 movq %r15,%rcx shrq $1,%r14 orq %r10,%r13 shlq $63,%rcx movq %r12,0(%rdi) shrq $1,%r15 movq %r13,8(%rdi) shlq $63,%r9 orq %rcx,%r14 orq %r9,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) movq 64(%rsp),%rdx leaq 64(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2x leaq 32(%rsp),%rbx leaq 32(%rsp),%rdi call __ecp_nistz256_add_tox movq 96(%rsp),%rdx leaq 96(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2x movq 0+32(%rsp),%rdx movq 8+32(%rsp),%r14 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r15 movq 24+32(%rsp),%r8 .byte 102,72,15,126,199 call __ecp_nistz256_sqr_montx leaq 128(%rsp),%rbx movq %r14,%r8 movq %r15,%r9 movq %rsi,%r14 movq %rbp,%r15 call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 0(%rsp),%rdi call __ecp_nistz256_subx movq 32(%rsp),%rdx leaq 32(%rsp),%rbx movq %r12,%r14 xorl %ecx,%ecx movq %r12,0+0(%rsp) movq %r13,%r10 movq %r13,0+8(%rsp) cmovzq %r8,%r11 movq %r8,0+16(%rsp) leaq 0-128(%rsp),%rsi cmovzq %r9,%r12 movq %r9,0+24(%rsp) movq %r14,%r9 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx .byte 102,72,15,126,203 .byte 102,72,15,126,207 call __ecp_nistz256_sub_fromx leaq 160+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpoint_doublex_epilogue: ret .cfi_endproc .size ecp_nistz256_point_double_adx,.-ecp_nistz256_point_double_adx .globl ecp_nistz256_point_add_adx .hidden ecp_nistz256_point_add_adx .type ecp_nistz256_point_add_adx,@function .align 32 ecp_nistz256_point_add_adx: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $576+8,%rsp .cfi_adjust_cfa_offset 32*18+8 .Lpoint_addx_body: movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq %rsi,%rbx movq %rdx,%rsi movdqa %xmm0,384(%rsp) movdqa %xmm1,384+16(%rsp) movdqa %xmm2,416(%rsp) movdqa %xmm3,416+16(%rsp) movdqa %xmm4,448(%rsp) movdqa %xmm5,448+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rsi),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 por %xmm3,%xmm5 movdqu 48(%rsi),%xmm3 movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,480(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,480+16(%rsp) movdqu 64(%rsi),%xmm0 movdqu 80(%rsi),%xmm1 movdqa %xmm2,512(%rsp) movdqa %xmm3,512+16(%rsp) por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm0,%xmm1 .byte 102,72,15,110,199 leaq 64-128(%rsi),%rsi movq %rdx,544+0(%rsp) movq %r14,544+8(%rsp) movq %r15,544+16(%rsp) movq %r8,544+24(%rsp) leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montx pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm1,%xmm4 por %xmm1,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 por %xmm3,%xmm4 pxor %xmm3,%xmm3 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 movq 64+0(%rbx),%rdx movq 64+8(%rbx),%r14 movq 64+16(%rbx),%r15 movq 64+24(%rbx),%r8 .byte 102,72,15,110,203 leaq 64-128(%rbx),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 544(%rsp),%rdx leaq 544(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx movq 416(%rsp),%rdx leaq 416(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq -128+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montx movq 512(%rsp),%rdx leaq 512(%rsp),%rbx movq 0+256(%rsp),%r9 movq 8+256(%rsp),%r10 leaq -128+256(%rsp),%rsi movq 16+256(%rsp),%r11 movq 24+256(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 224(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx orq %r13,%r12 movdqa %xmm4,%xmm2 orq %r8,%r12 orq %r9,%r12 por %xmm5,%xmm2 .byte 102,73,15,110,220 movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montx movq 480(%rsp),%rdx leaq 480(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 160(%rsp),%rbx leaq 0(%rsp),%rdi call __ecp_nistz256_sub_fromx orq %r13,%r12 orq %r8,%r12 orq %r9,%r12 .byte 102,73,15,126,208 .byte 102,73,15,126,217 orq %r8,%r12 .byte 0x3e jnz .Ladd_proceedx testq %r9,%r9 jz .Ladd_doublex .byte 102,72,15,126,199 pxor %xmm0,%xmm0 movdqu %xmm0,0(%rdi) movdqu %xmm0,16(%rdi) movdqu %xmm0,32(%rdi) movdqu %xmm0,48(%rdi) movdqu %xmm0,64(%rdi) movdqu %xmm0,80(%rdi) jmp .Ladd_donex .align 32 .Ladd_doublex: .byte 102,72,15,126,206 .byte 102,72,15,126,199 addq $416,%rsp .cfi_adjust_cfa_offset -416 jmp .Lpoint_double_shortcutx .cfi_adjust_cfa_offset 416 .align 32 .Ladd_proceedx: movq 0+64(%rsp),%rdx movq 8+64(%rsp),%r14 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 544(%rsp),%rdx leaq 544(%rsp),%rbx movq 0+352(%rsp),%r9 movq 8+352(%rsp),%r10 leaq -128+352(%rsp),%rsi movq 16+352(%rsp),%r11 movq 24+352(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montx movq 0(%rsp),%rdx leaq 0(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 128(%rsp),%rdi call __ecp_nistz256_mul_montx movq 160(%rsp),%rdx leaq 160(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montx xorq %r11,%r11 addq %r12,%r12 leaq 96(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subx leaq 128(%rsp),%rbx leaq 288(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 192+0(%rsp),%rax movq 192+8(%rsp),%rbp movq 192+16(%rsp),%rcx movq 192+24(%rsp),%r10 leaq 320(%rsp),%rdi call __ecp_nistz256_subx movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 128(%rsp),%rdx leaq 128(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq -128+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx movq 320(%rsp),%rdx leaq 320(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 320(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 256(%rsp),%rbx leaq 320(%rsp),%rdi call __ecp_nistz256_sub_fromx .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 352(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 352+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 544(%rsp),%xmm2 pand 544+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 480(%rsp),%xmm2 pand 480+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 320(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 320+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 512(%rsp),%xmm2 pand 512+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) .Ladd_donex: leaq 576+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpoint_addx_epilogue: ret .cfi_endproc .size ecp_nistz256_point_add_adx,.-ecp_nistz256_point_add_adx .globl ecp_nistz256_point_add_affine_adx .hidden ecp_nistz256_point_add_affine_adx .type ecp_nistz256_point_add_affine_adx,@function .align 32 ecp_nistz256_point_add_affine_adx: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $480+8,%rsp .cfi_adjust_cfa_offset 32*15+8 .Ladd_affinex_body: movdqu 0(%rsi),%xmm0 movq %rdx,%rbx movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,320(%rsp) movdqa %xmm1,320+16(%rsp) movdqa %xmm2,352(%rsp) movdqa %xmm3,352+16(%rsp) movdqa %xmm4,384(%rsp) movdqa %xmm5,384+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rbx),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rbx),%xmm1 movdqu 32(%rbx),%xmm2 por %xmm3,%xmm5 movdqu 48(%rbx),%xmm3 movdqa %xmm0,416(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,416+16(%rsp) por %xmm0,%xmm1 .byte 102,72,15,110,199 movdqa %xmm2,448(%rsp) movdqa %xmm3,448+16(%rsp) por %xmm2,%xmm3 por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm1,%xmm3 leaq 64-128(%rsi),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm3,%xmm4 movq 0(%rbx),%rdx movq %r12,%r9 por %xmm3,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 movq %r13,%r10 por %xmm3,%xmm4 pxor %xmm3,%xmm3 movq %r14,%r11 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 leaq 32-128(%rsp),%rsi movq %r15,%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 320(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 288(%rsp),%rdi call __ecp_nistz256_mul_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 352(%rsp),%rbx leaq 96(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+64(%rsp),%rdx movq 8+64(%rsp),%r14 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 128(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 0+96(%rsp),%rdx movq 8+96(%rsp),%r14 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r15 movq 24+96(%rsp),%r8 leaq 192(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 128(%rsp),%rdx leaq 128(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montx movq 320(%rsp),%rdx leaq 320(%rsp),%rbx movq 0+128(%rsp),%r9 movq 8+128(%rsp),%r10 leaq -128+128(%rsp),%rsi movq 16+128(%rsp),%r11 movq 24+128(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx xorq %r11,%r11 addq %r12,%r12 leaq 192(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subx leaq 160(%rsp),%rbx leaq 224(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 64(%rsp),%rdi call __ecp_nistz256_subx movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 352(%rsp),%rdx leaq 352(%rsp),%rbx movq 0+160(%rsp),%r9 movq 8+160(%rsp),%r10 leaq -128+160(%rsp),%rsi movq 16+160(%rsp),%r11 movq 24+160(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx movq 96(%rsp),%rdx leaq 96(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 64(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 32(%rsp),%rbx leaq 256(%rsp),%rdi call __ecp_nistz256_sub_fromx .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand .LONE_mont(%rip),%xmm2 pand .LONE_mont+16(%rip),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 224(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 224+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 320(%rsp),%xmm2 pand 320+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 256(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 256+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 352(%rsp),%xmm2 pand 352+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) leaq 480+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Ladd_affinex_epilogue: ret .cfi_endproc .size ecp_nistz256_point_add_affine_adx,.-ecp_nistz256_point_add_affine_adx #endif
fatiimajamiil/rustpad-custom
30,650
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/armv8-mont-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) .text .globl bn_mul_mont_nohw .def bn_mul_mont_nohw .type 32 .endef .align 5 bn_mul_mont_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] ldr x9,[x2],#8 // bp[0] sub x22,sp,x5,lsl#3 ldp x7,x8,[x1],#16 // ap[0..1] lsl x5,x5,#3 ldr x4,[x4] // *n0 and x22,x22,#-16 // ABI says so ldp x13,x14,[x3],#16 // np[0..1] mul x6,x7,x9 // ap[0]*bp[0] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 mul x10,x8,x9 // ap[1]*bp[0] umulh x11,x8,x9 mul x15,x6,x4 // "tp[0]"*n0 mov sp,x22 // alloca // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 // discarded // (*) As for removal of first multiplication and addition // instructions. The outcome of first addition is // guaranteed to be zero, which leaves two computationally // significant outcomes: it either carries or not. Then // question is when does it carry? Is there alternative // way to deduce it? If you follow operations, you can // observe that condition for carry is quite simple: // x6 being non-zero. So that carry can be calculated // by adding -1 to x6. That's what next instruction does. subs xzr,x6,#1 // (*) umulh x17,x14,x15 adc x13,x13,xzr cbz x21,L1st_skip L1st: ldr x8,[x1],#8 adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr ldr x14,[x3],#8 adds x12,x16,x13 mul x10,x8,x9 // ap[j]*bp[0] adc x13,x17,xzr umulh x11,x8,x9 adds x12,x12,x6 mul x16,x14,x15 // np[j]*m1 adc x13,x13,xzr umulh x17,x14,x15 str x12,[x22],#8 // tp[j-1] cbnz x21,L1st L1st_skip: adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adc x13,x17,xzr adds x12,x12,x6 sub x20,x5,#8 // i=num-1 adcs x13,x13,x7 adc x19,xzr,xzr // upmost overflow bit stp x12,x13,[x22] Louter: ldr x9,[x2],#8 // bp[i] ldp x7,x8,[x1],#16 ldr x23,[sp] // tp[0] add x22,sp,#8 mul x6,x7,x9 // ap[0]*bp[i] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 ldp x13,x14,[x3],#16 mul x10,x8,x9 // ap[1]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x15,x6,x4 sub x20,x20,#8 // i-- // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 subs xzr,x6,#1 // (*) umulh x17,x14,x15 cbz x21,Linner_skip Linner: ldr x8,[x1],#8 adc x13,x13,xzr ldr x23,[x22],#8 // tp[j] adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr adds x12,x16,x13 ldr x14,[x3],#8 adc x13,x17,xzr mul x10,x8,x9 // ap[j]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x16,x14,x15 // np[j]*m1 adds x12,x12,x6 umulh x17,x14,x15 str x12,[x22,#-16] // tp[j-1] cbnz x21,Linner Linner_skip: ldr x23,[x22],#8 // tp[j] adc x13,x13,xzr adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adcs x13,x17,x19 adc x19,xzr,xzr adds x6,x6,x23 adc x7,x7,xzr adds x12,x12,x6 adcs x13,x13,x7 adc x19,x19,xzr // upmost overflow bit stp x12,x13,[x22,#-16] cbnz x20,Louter // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x14,[x3],#8 // np[0] subs x21,x5,#8 // j=num-1 and clear borrow mov x1,x0 Lsub: sbcs x8,x23,x14 // tp[j]-np[j] ldr x23,[x22],#8 sub x21,x21,#8 // j-- ldr x14,[x3],#8 str x8,[x1],#8 // rp[j]=tp[j]-np[j] cbnz x21,Lsub sbcs x8,x23,x14 sbcs x19,x19,xzr // did it borrow? str x8,[x1],#8 // rp[num-1] ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x8,[x0],#8 // rp[0] sub x5,x5,#8 // num-- nop Lcond_copy: sub x5,x5,#8 // num-- csel x14,x23,x8,lo // did it borrow? ldr x23,[x22],#8 ldr x8,[x0],#8 str xzr,[x22,#-16] // wipe tp str x14,[x0,#-16] cbnz x5,Lcond_copy csel x14,x23,x8,lo str xzr,[x22,#-8] // wipe tp str x14,[x0,#-8] ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldr x29,[sp],#64 AARCH64_VALIDATE_LINK_REGISTER ret .globl bn_sqr8x_mont .def bn_sqr8x_mont .type 32 .endef .align 5 bn_sqr8x_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] stp x0,x3,[sp,#96] // offload rp and np ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] ldp x12,x13,[x1,#8*6] sub x2,sp,x5,lsl#4 lsl x5,x5,#3 ldr x4,[x4] // *n0 mov sp,x2 // alloca sub x27,x5,#8*8 b Lsqr8x_zero_start Lsqr8x_zero: sub x27,x27,#8*8 stp xzr,xzr,[x2,#8*0] stp xzr,xzr,[x2,#8*2] stp xzr,xzr,[x2,#8*4] stp xzr,xzr,[x2,#8*6] Lsqr8x_zero_start: stp xzr,xzr,[x2,#8*8] stp xzr,xzr,[x2,#8*10] stp xzr,xzr,[x2,#8*12] stp xzr,xzr,[x2,#8*14] add x2,x2,#8*16 cbnz x27,Lsqr8x_zero add x3,x1,x5 add x1,x1,#8*8 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr mov x23,xzr mov x24,xzr mov x25,xzr mov x26,xzr mov x2,sp str x4,[x29,#112] // offload n0 // Multiply everything but a[i]*a[i] .align 4 Lsqr8x_outer_loop: // a[1]a[0] (i) // a[2]a[0] // a[3]a[0] // a[4]a[0] // a[5]a[0] // a[6]a[0] // a[7]a[0] // a[2]a[1] (ii) // a[3]a[1] // a[4]a[1] // a[5]a[1] // a[6]a[1] // a[7]a[1] // a[3]a[2] (iii) // a[4]a[2] // a[5]a[2] // a[6]a[2] // a[7]a[2] // a[4]a[3] (iv) // a[5]a[3] // a[6]a[3] // a[7]a[3] // a[5]a[4] (v) // a[6]a[4] // a[7]a[4] // a[6]a[5] (vi) // a[7]a[5] // a[7]a[6] (vii) mul x14,x7,x6 // lo(a[1..7]*a[0]) (i) mul x15,x8,x6 mul x16,x9,x6 mul x17,x10,x6 adds x20,x20,x14 // t[1]+lo(a[1]*a[0]) mul x14,x11,x6 adcs x21,x21,x15 mul x15,x12,x6 adcs x22,x22,x16 mul x16,x13,x6 adcs x23,x23,x17 umulh x17,x7,x6 // hi(a[1..7]*a[0]) adcs x24,x24,x14 umulh x14,x8,x6 adcs x25,x25,x15 umulh x15,x9,x6 adcs x26,x26,x16 umulh x16,x10,x6 stp x19,x20,[x2],#8*2 // t[0..1] adc x19,xzr,xzr // t[8] adds x21,x21,x17 // t[2]+lo(a[1]*a[0]) umulh x17,x11,x6 adcs x22,x22,x14 umulh x14,x12,x6 adcs x23,x23,x15 umulh x15,x13,x6 adcs x24,x24,x16 mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii) adcs x25,x25,x17 mul x17,x9,x7 adcs x26,x26,x14 mul x14,x10,x7 adc x19,x19,x15 mul x15,x11,x7 adds x22,x22,x16 mul x16,x12,x7 adcs x23,x23,x17 mul x17,x13,x7 adcs x24,x24,x14 umulh x14,x8,x7 // hi(a[2..7]*a[1]) adcs x25,x25,x15 umulh x15,x9,x7 adcs x26,x26,x16 umulh x16,x10,x7 adcs x19,x19,x17 umulh x17,x11,x7 stp x21,x22,[x2],#8*2 // t[2..3] adc x20,xzr,xzr // t[9] adds x23,x23,x14 umulh x14,x12,x7 adcs x24,x24,x15 umulh x15,x13,x7 adcs x25,x25,x16 mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii) adcs x26,x26,x17 mul x17,x10,x8 adcs x19,x19,x14 mul x14,x11,x8 adc x20,x20,x15 mul x15,x12,x8 adds x24,x24,x16 mul x16,x13,x8 adcs x25,x25,x17 umulh x17,x9,x8 // hi(a[3..7]*a[2]) adcs x26,x26,x14 umulh x14,x10,x8 adcs x19,x19,x15 umulh x15,x11,x8 adcs x20,x20,x16 umulh x16,x12,x8 stp x23,x24,[x2],#8*2 // t[4..5] adc x21,xzr,xzr // t[10] adds x25,x25,x17 umulh x17,x13,x8 adcs x26,x26,x14 mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv) adcs x19,x19,x15 mul x15,x11,x9 adcs x20,x20,x16 mul x16,x12,x9 adc x21,x21,x17 mul x17,x13,x9 adds x26,x26,x14 umulh x14,x10,x9 // hi(a[4..7]*a[3]) adcs x19,x19,x15 umulh x15,x11,x9 adcs x20,x20,x16 umulh x16,x12,x9 adcs x21,x21,x17 umulh x17,x13,x9 stp x25,x26,[x2],#8*2 // t[6..7] adc x22,xzr,xzr // t[11] adds x19,x19,x14 mul x14,x11,x10 // lo(a[5..7]*a[4]) (v) adcs x20,x20,x15 mul x15,x12,x10 adcs x21,x21,x16 mul x16,x13,x10 adc x22,x22,x17 umulh x17,x11,x10 // hi(a[5..7]*a[4]) adds x20,x20,x14 umulh x14,x12,x10 adcs x21,x21,x15 umulh x15,x13,x10 adcs x22,x22,x16 mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi) adc x23,xzr,xzr // t[12] adds x21,x21,x17 mul x17,x13,x11 adcs x22,x22,x14 umulh x14,x12,x11 // hi(a[6..7]*a[5]) adc x23,x23,x15 umulh x15,x13,x11 adds x22,x22,x16 mul x16,x13,x12 // lo(a[7]*a[6]) (vii) adcs x23,x23,x17 umulh x17,x13,x12 // hi(a[7]*a[6]) adc x24,xzr,xzr // t[13] adds x23,x23,x14 sub x27,x3,x1 // done yet? adc x24,x24,x15 adds x24,x24,x16 sub x14,x3,x5 // rewinded ap adc x25,xzr,xzr // t[14] add x25,x25,x17 cbz x27,Lsqr8x_outer_break mov x4,x6 ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x0,x1 adcs x26,xzr,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved below mov x27,#-8*8 // a[8]a[0] // a[9]a[0] // a[a]a[0] // a[b]a[0] // a[c]a[0] // a[d]a[0] // a[e]a[0] // a[f]a[0] // a[8]a[1] // a[f]a[1]........................ // a[8]a[2] // a[f]a[2]........................ // a[8]a[3] // a[f]a[3]........................ // a[8]a[4] // a[f]a[4]........................ // a[8]a[5] // a[f]a[5]........................ // a[8]a[6] // a[f]a[6]........................ // a[8]a[7] // a[f]a[7]........................ Lsqr8x_mul: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,Lsqr8x_mul // note that carry flag is guaranteed // to be zero at this point cmp x1,x3 // done yet? b.eq Lsqr8x_break ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 ldr x4,[x0,#-8*8] adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b Lsqr8x_mul .align 4 Lsqr8x_break: ldp x6,x7,[x0,#8*0] add x1,x0,#8*8 ldp x8,x9,[x0,#8*2] sub x14,x3,x1 // is it last iteration? ldp x10,x11,[x0,#8*4] sub x15,x2,x14 ldp x12,x13,[x0,#8*6] cbz x14,Lsqr8x_outer_loop stp x19,x20,[x2,#8*0] ldp x19,x20,[x15,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x15,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x15,#8*4] stp x25,x26,[x2,#8*6] mov x2,x15 ldp x25,x26,[x15,#8*6] b Lsqr8x_outer_loop .align 4 Lsqr8x_outer_break: // Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0] ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0] ldp x15,x16,[sp,#8*1] ldp x11,x13,[x14,#8*2] add x1,x14,#8*4 ldp x17,x14,[sp,#8*3] stp x19,x20,[x2,#8*0] mul x19,x7,x7 stp x21,x22,[x2,#8*2] umulh x7,x7,x7 stp x23,x24,[x2,#8*4] mul x8,x9,x9 stp x25,x26,[x2,#8*6] mov x2,sp umulh x9,x9,x9 adds x20,x7,x15,lsl#1 extr x15,x16,x15,#63 sub x27,x5,#8*4 Lsqr4x_shift_n_add: adcs x21,x8,x15 extr x16,x17,x16,#63 sub x27,x27,#8*4 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 ldp x7,x9,[x1],#8*2 umulh x11,x11,x11 mul x12,x13,x13 umulh x13,x13,x13 extr x17,x14,x17,#63 stp x19,x20,[x2,#8*0] adcs x23,x10,x17 extr x14,x15,x14,#63 stp x21,x22,[x2,#8*2] adcs x24,x11,x14 ldp x17,x14,[x2,#8*7] extr x15,x16,x15,#63 adcs x25,x12,x15 extr x16,x17,x16,#63 adcs x26,x13,x16 ldp x15,x16,[x2,#8*9] mul x6,x7,x7 ldp x11,x13,[x1],#8*2 umulh x7,x7,x7 mul x8,x9,x9 umulh x9,x9,x9 stp x23,x24,[x2,#8*4] extr x17,x14,x17,#63 stp x25,x26,[x2,#8*6] add x2,x2,#8*8 adcs x19,x6,x17 extr x14,x15,x14,#63 adcs x20,x7,x14 ldp x17,x14,[x2,#8*3] extr x15,x16,x15,#63 cbnz x27,Lsqr4x_shift_n_add ldp x1,x4,[x29,#104] // pull np and n0 adcs x21,x8,x15 extr x16,x17,x16,#63 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 umulh x11,x11,x11 stp x19,x20,[x2,#8*0] mul x12,x13,x13 umulh x13,x13,x13 stp x21,x22,[x2,#8*2] extr x17,x14,x17,#63 adcs x23,x10,x17 extr x14,x15,x14,#63 ldp x19,x20,[sp,#8*0] adcs x24,x11,x14 extr x15,x16,x15,#63 ldp x6,x7,[x1,#8*0] adcs x25,x12,x15 extr x16,xzr,x16,#63 ldp x8,x9,[x1,#8*2] adc x26,x13,x16 ldp x10,x11,[x1,#8*4] // Reduce by 512 bits per iteration mul x28,x4,x19 // t[0]*n0 ldp x12,x13,[x1,#8*6] add x3,x1,x5 ldp x21,x22,[sp,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[sp,#8*4] stp x25,x26,[x2,#8*6] ldp x25,x26,[sp,#8*6] add x1,x1,#8*8 mov x30,xzr // initial top-most carry mov x2,sp mov x27,#8 Lsqr8x_reduction: // (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0) mul x15,x7,x28 sub x27,x27,#1 mul x16,x8,x28 str x28,[x2],#8 // put aside t[0]*n0 for tail processing mul x17,x9,x28 // (*) adds xzr,x19,x14 subs xzr,x19,#1 // (*) mul x14,x10,x28 adcs x19,x20,x15 mul x15,x11,x28 adcs x20,x21,x16 mul x16,x12,x28 adcs x21,x22,x17 mul x17,x13,x28 adcs x22,x23,x14 umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0) adcs x23,x24,x15 umulh x15,x7,x28 adcs x24,x25,x16 umulh x16,x8,x28 adcs x25,x26,x17 umulh x17,x9,x28 adc x26,xzr,xzr adds x19,x19,x14 umulh x14,x10,x28 adcs x20,x20,x15 umulh x15,x11,x28 adcs x21,x21,x16 umulh x16,x12,x28 adcs x22,x22,x17 umulh x17,x13,x28 mul x28,x4,x19 // next t[0]*n0 adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adc x26,x26,x17 cbnz x27,Lsqr8x_reduction ldp x14,x15,[x2,#8*0] ldp x16,x17,[x2,#8*2] mov x0,x2 sub x27,x3,x1 // done yet? adds x19,x19,x14 adcs x20,x20,x15 ldp x14,x15,[x2,#8*4] adcs x21,x21,x16 adcs x22,x22,x17 ldp x16,x17,[x2,#8*6] adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adcs x26,x26,x17 //adc x28,xzr,xzr // moved below cbz x27,Lsqr8x8_post_condition ldr x4,[x2,#-8*8] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] mov x27,#-8*8 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 Lsqr8x_tail: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,Lsqr8x_tail // note that carry flag is guaranteed // to be zero at this point ldp x6,x7,[x2,#8*0] sub x27,x3,x1 // done yet? sub x16,x3,x5 // rewinded np ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] cbz x27,Lsqr8x_tail_break ldr x4,[x0,#-8*8] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b Lsqr8x_tail .align 4 Lsqr8x_tail_break: ldr x4,[x29,#112] // pull n0 add x27,x2,#8*8 // end of current t[num] window subs xzr,x30,#1 // "move" top-most carry to carry bit adcs x14,x19,x6 adcs x15,x20,x7 ldp x19,x20,[x0,#8*0] adcs x21,x21,x8 ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0] adcs x22,x22,x9 ldp x8,x9,[x16,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x16,#8*4] adcs x25,x25,x12 adcs x26,x26,x13 ldp x12,x13,[x16,#8*6] add x1,x16,#8*8 adc x30,xzr,xzr // top-most carry mul x28,x4,x19 stp x14,x15,[x2,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x0,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x0,#8*4] cmp x27,x29 // did we hit the bottom? stp x25,x26,[x2,#8*6] mov x2,x0 // slide the window ldp x25,x26,[x0,#8*6] mov x27,#8 b.ne Lsqr8x_reduction // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x0,[x29,#96] // pull rp add x2,x2,#8*8 subs x14,x19,x6 sbcs x15,x20,x7 sub x27,x5,#8*8 mov x3,x0 // x0 copy Lsqr8x_sub: sbcs x16,x21,x8 ldp x6,x7,[x1,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x1,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x10,x11,[x1,#8*4] sbcs x17,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 ldp x19,x20,[x2,#8*0] sub x27,x27,#8*8 ldp x21,x22,[x2,#8*2] ldp x23,x24,[x2,#8*4] ldp x25,x26,[x2,#8*6] add x2,x2,#8*8 stp x14,x15,[x0,#8*4] sbcs x14,x19,x6 stp x16,x17,[x0,#8*6] add x0,x0,#8*8 sbcs x15,x20,x7 cbnz x27,Lsqr8x_sub sbcs x16,x21,x8 mov x2,sp add x1,sp,x5 ldp x6,x7,[x3,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x3,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x19,x20,[x1,#8*0] sbcs x17,x26,x13 ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address stp x14,x15,[x0,#8*4] stp x16,x17,[x0,#8*6] sub x27,x5,#8*4 Lsqr4x_cond_copy: sub x27,x27,#8*4 csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo ldp x6,x7,[x3,#8*4] ldp x19,x20,[x1,#8*4] csel x16,x21,x8,lo stp xzr,xzr,[x2,#8*2] add x2,x2,#8*4 csel x17,x22,x9,lo ldp x8,x9,[x3,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] add x3,x3,#8*4 stp xzr,xzr,[x1,#8*0] stp xzr,xzr,[x1,#8*2] cbnz x27,Lsqr4x_cond_copy csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo stp xzr,xzr,[x2,#8*2] csel x16,x21,x8,lo csel x17,x22,x9,lo stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] b Lsqr8x_done .align 4 Lsqr8x8_post_condition: adc x28,xzr,xzr ldr x30,[x29,#8] // pull return address // x19-7,x28 hold result, x6-7 hold modulus subs x6,x19,x6 ldr x1,[x29,#96] // pull rp sbcs x7,x20,x7 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x8 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x9 stp xzr,xzr,[sp,#8*4] sbcs x10,x23,x10 stp xzr,xzr,[sp,#8*6] sbcs x11,x24,x11 stp xzr,xzr,[sp,#8*8] sbcs x12,x25,x12 stp xzr,xzr,[sp,#8*10] sbcs x13,x26,x13 stp xzr,xzr,[sp,#8*12] sbcs x28,x28,xzr // did it borrow? stp xzr,xzr,[sp,#8*14] // x6-7 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] csel x10,x23,x10,lo csel x11,x24,x11,lo stp x8,x9,[x1,#8*2] csel x12,x25,x12,lo csel x13,x26,x13,lo stp x10,x11,[x1,#8*4] stp x12,x13,[x1,#8*6] Lsqr8x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .globl bn_mul4x_mont .def bn_mul4x_mont .type 32 .endef .align 5 bn_mul4x_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub x26,sp,x5,lsl#3 lsl x5,x5,#3 ldr x4,[x4] // *n0 sub sp,x26,#8*4 // alloca add x10,x2,x5 add x27,x1,x5 stp x0,x10,[x29,#96] // offload rp and &b[num] ldr x24,[x2,#8*0] // b[0] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr ldp x14,x15,[x3,#8*0] // n[0..3] ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr mov x28,#0 mov x26,sp Loop_mul4x_1st_reduction: mul x10,x6,x24 // lo(a[0..3]*b[0]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[0]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 // (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0) str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0) adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 sub x10,x27,x1 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_1st_reduction cbz x10,Lmul4x4_post_condition ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldr x25,[sp] // a[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 Loop_mul4x_1st_tail: mul x10,x6,x24 // lo(a[4..7]*b[i]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[i]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*a[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 adcs x23,x23,x0 umulh x13,x17,x25 adc x0,xzr,xzr ldr x25,[sp,x28] // next t[0]*n0 str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_1st_tail sub x11,x27,x5 // rewinded x1 cbz x10,Lmul4x_proceed ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b Loop_mul4x_1st_tail .align 5 Lmul4x_proceed: ldr x24,[x2,#8*4]! // *++b adc x30,x0,xzr ldp x6,x7,[x11,#8*0] // a[0..3] sub x3,x3,x5 // rewind np ldp x8,x9,[x11,#8*2] add x1,x11,#8*4 stp x19,x20,[x26,#8*0] // result!!! ldp x19,x20,[sp,#8*4] // t[0..3] stp x21,x22,[x26,#8*2] // result!!! ldp x21,x22,[sp,#8*6] ldp x14,x15,[x3,#8*0] // n[0..3] mov x26,sp ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr .align 4 Loop_mul4x_reduction: mul x10,x6,x24 // lo(a[0..3]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[4]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 // (*) mul x10,x14,x25 str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 // lo(n[0..3]*t[0]*n0 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0 adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_reduction adc x0,x0,xzr ldp x10,x11,[x26,#8*4] // t[4..7] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldr x25,[sp] // t[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 .align 4 Loop_mul4x_tail: mul x10,x6,x24 // lo(a[4..7]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[4]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*t[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 umulh x13,x17,x25 adcs x23,x23,x0 ldr x25,[sp,x28] // next a[0]*n0 adc x0,xzr,xzr str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_tail sub x11,x3,x5 // rewinded np? adc x0,x0,xzr cbz x10,Loop_mul4x_break ldp x10,x11,[x26,#8*4] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b Loop_mul4x_tail .align 4 Loop_mul4x_break: ldp x12,x13,[x29,#96] // pull rp and &b[num] adds x19,x19,x30 add x2,x2,#8*4 // bp++ adcs x20,x20,xzr sub x1,x1,x5 // rewind ap adcs x21,x21,xzr stp x19,x20,[x26,#8*0] // result!!! adcs x22,x22,xzr ldp x19,x20,[sp,#8*4] // t[0..3] adc x30,x0,xzr stp x21,x22,[x26,#8*2] // result!!! cmp x2,x13 // done yet? ldp x21,x22,[sp,#8*6] ldp x14,x15,[x11,#8*0] // n[0..3] ldp x16,x17,[x11,#8*2] add x3,x11,#8*4 b.eq Lmul4x_post ldr x24,[x2] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] adds x1,x1,#8*4 // clear carry bit mov x0,xzr mov x26,sp b Loop_mul4x_reduction .align 4 Lmul4x_post: // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. mov x0,x12 mov x27,x12 // x0 copy subs x10,x19,x14 add x26,sp,#8*8 sbcs x11,x20,x15 sub x28,x5,#8*4 Lmul4x_sub: sbcs x12,x21,x16 ldp x14,x15,[x3,#8*0] sub x28,x28,#8*4 ldp x19,x20,[x26,#8*0] sbcs x13,x22,x17 ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 ldp x21,x22,[x26,#8*2] add x26,x26,#8*4 stp x10,x11,[x0,#8*0] sbcs x10,x19,x14 stp x12,x13,[x0,#8*2] add x0,x0,#8*4 sbcs x11,x20,x15 cbnz x28,Lmul4x_sub sbcs x12,x21,x16 mov x26,sp add x1,sp,#8*4 ldp x6,x7,[x27,#8*0] sbcs x13,x22,x17 stp x10,x11,[x0,#8*0] ldp x8,x9,[x27,#8*2] stp x12,x13,[x0,#8*2] ldp x19,x20,[x1,#8*0] ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address sub x28,x5,#8*4 Lmul4x_cond_copy: sub x28,x28,#8*4 csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo ldp x6,x7,[x27,#8*4] ldp x19,x20,[x1,#8*4] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*2] add x26,x26,#8*4 csel x13,x22,x9,lo ldp x8,x9,[x27,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] add x27,x27,#8*4 cbnz x28,Lmul4x_cond_copy csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo stp xzr,xzr,[x26,#8*2] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*3] csel x13,x22,x9,lo stp xzr,xzr,[x26,#8*4] stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] b Lmul4x_done .align 4 Lmul4x4_post_condition: adc x0,x0,xzr ldr x1,[x29,#96] // pull rp // x19-3,x0 hold result, x14-7 hold modulus subs x6,x19,x14 ldr x30,[x29,#8] // pull return address sbcs x7,x20,x15 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x16 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x17 stp xzr,xzr,[sp,#8*4] sbcs xzr,x0,xzr // did it borrow? stp xzr,xzr,[sp,#8*6] // x6-3 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] stp x8,x9,[x1,#8*2] Lmul4x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 4 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
fatiimajamiil/rustpad-custom
73,987
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/chacha20_poly1305_armv8-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) .section .rodata .align 7 Lchacha20_consts: .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' Linc: .long 1,2,3,4 Lrol8: .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 Lclamp: .quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC .text .def Lpoly_hash_ad_internal .type 32 .endef .align 6 Lpoly_hash_ad_internal: .cfi_startproc cbnz x4, Lpoly_hash_intro ret Lpoly_hash_intro: cmp x4, #16 b.lt Lpoly_hash_ad_tail ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b Lpoly_hash_ad_internal Lpoly_hash_ad_tail: cbz x4, Lpoly_hash_ad_ret eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD sub x4, x4, #1 Lpoly_hash_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, x4] mov v20.b[0], w11 subs x4, x4, #1 b.ge Lpoly_hash_tail_16_compose mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lpoly_hash_ad_ret: ret .cfi_endproc ///////////////////////////////// // // void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data); // .globl chacha20_poly1305_seal .def chacha20_poly1305_seal .type 32 .endef .align 6 chacha20_poly1305_seal: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, Lchacha20_consts add x11, x11, :lo12:Lchacha20_consts ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 ldr x12, [x5, #56] // The total cipher text length includes extra_in_len add x12, x12, x2 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x12 cmp x2, #128 b.le Lseal_128 // Optimization for smaller buffers // Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext, // and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically, // the fifth block (A4-D4) horizontally. ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b sub x5, x5, #32 mov x6, #10 .align 5 Lseal_init_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.hi Lseal_init_rounds add v15.4s, v15.4s, v25.4s mov x11, #4 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s and v4.16b, v4.16b, v27.16b add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s mov x16, v4.d[0] // Move the R key to GPRs mov x17, v4.d[1] mov v27.16b, v9.16b // Store the S key bl Lpoly_hash_ad_internal mov x3, x0 cmp x2, #256 b.le Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #256 mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256 Lseal_main_loop: adrp x11, Lchacha20_consts add x11, x11, :lo12:Lchacha20_consts ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s sub x5, x5, #32 .align 5 Lseal_main_loop_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.ge Lseal_main_loop_rounds ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most subs x7, x7, #1 b.gt Lseal_main_loop_rounds eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s cmp x2, #320 b.le Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #320 mov x6, #0 mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration b Lseal_main_loop Lseal_tail: // This part of the function handles the storage and authentication of the last [0,320) bytes // We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data. cmp x2, #64 b.lt Lseal_tail_64 // Store and authenticate 64B blocks per iteration ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 // Shift the state left by 64 bytes for the next iteration of the loop mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b mov v1.16b, v2.16b mov v6.16b, v7.16b mov v11.16b, v12.16b mov v16.16b, v17.16b mov v2.16b, v3.16b mov v7.16b, v8.16b mov v12.16b, v13.16b mov v17.16b, v18.16b mov v3.16b, v4.16b mov v8.16b, v9.16b mov v13.16b, v14.16b mov v18.16b, v19.16b b Lseal_tail Lseal_tail_64: ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr // Here we handle the last [0,64) bytes of plaintext cmp x2, #16 b.lt Lseal_tail_16 // Each iteration encrypt and authenticate a 16B block ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b}, [x0], #16 sub x2, x2, #16 // Shift the state left by 16 bytes for the next iteration of the loop mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b b Lseal_tail_64 Lseal_tail_16: // Here we handle the last [0,16) bytes of ciphertext that require a padded block cbz x2, Lseal_hash_extra eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes not v22.16b, v20.16b mov x6, x2 add x1, x1, x2 cbz x4, Lseal_tail_16_compose // No extra data to pad with, zero padding mov x7, #16 // We need to load some extra_in first for padding sub x7, x7, x2 cmp x4, x7 csel x7, x4, x7, lt // Load the minimum of extra_in_len and the amount needed to fill the register mov x12, x7 add x3, x3, x7 sub x4, x4, x7 Lseal_tail16_compose_extra_in: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x7, x7, #1 b.gt Lseal_tail16_compose_extra_in add x3, x3, x12 Lseal_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x1, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt Lseal_tail_16_compose and v0.16b, v0.16b, v21.16b eor v20.16b, v20.16b, v0.16b mov v21.16b, v20.16b Lseal_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt Lseal_tail_16_store // Hash in the final ct block concatenated with extra_in mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lseal_hash_extra: cbz x4, Lseal_finalize Lseal_hash_extra_loop: cmp x4, #16 b.lt Lseal_hash_extra_tail ld1 {v20.16b}, [x3], #16 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b Lseal_hash_extra_loop Lseal_hash_extra_tail: cbz x4, Lseal_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext add x3, x3, x4 Lseal_hash_extra_load: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x4, x4, #1 b.gt Lseal_hash_extra_load // Hash in the final padded extra_in blcok mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lseal_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret Lseal_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 Lseal_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi Lseal_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s // Only the first 32 bytes of the third block (counter = 0) are needed, // so skip updating v12 and v17. add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl Lpoly_hash_ad_internal b Lseal_tail .cfi_endproc ///////////////////////////////// // // void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data); // .globl chacha20_poly1305_open .def chacha20_poly1305_open .type 32 .endef .align 6 chacha20_poly1305_open: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, Lchacha20_consts add x11, x11, :lo12:Lchacha20_consts ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x2 cmp x2, #128 b.le Lopen_128 // Optimization for smaller buffers // Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b mov x6, #10 .align 5 Lopen_init_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.hi Lopen_init_rounds add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s and v0.16b, v0.16b, v27.16b mov x16, v0.d[0] // Move the R key to GPRs mov x17, v0.d[1] mov v27.16b, v5.16b // Store the S key bl Lpoly_hash_ad_internal Lopen_ad_done: mov x3, x1 // Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes Lopen_main_loop: cmp x2, #192 b.lt Lopen_tail adrp x11, Lchacha20_consts add x11, x11, :lo12:Lchacha20_consts ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] sub x5, x5, #32 add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12 sub x4, x4, #10 mov x7, #10 subs x6, x7, x4 subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full cbz x7, Lopen_main_loop_rounds_short .align 5 Lopen_main_loop_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lopen_main_loop_rounds_short: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x7, x7, #1 b.gt Lopen_main_loop_rounds subs x6, x6, #1 b.ge Lopen_main_loop_rounds_short eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s // We can always safely store 192 bytes ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #192 mov v0.16b, v3.16b mov v5.16b, v8.16b mov v10.16b, v13.16b mov v15.16b, v18.16b cmp x2, #64 b.lt Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v4.16b mov v5.16b, v9.16b mov v10.16b, v14.16b mov v15.16b, v19.16b cmp x2, #64 b.lt Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b Lopen_main_loop Lopen_tail: cbz x2, Lopen_finalize lsr x4, x2, #4 // How many whole blocks we have to hash cmp x2, #64 b.le Lopen_tail_64 cmp x2, #128 b.le Lopen_tail_128 Lopen_tail_192: // We need three more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b mov v17.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v21.16b, v21.16b, v21.16b ins v23.s[0], v25.s[0] ins v21.d[0], x15 add v22.4s, v23.4s, v21.4s add v21.4s, v22.4s, v21.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s mov x7, #10 subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing sub x4, x4, x7 cbz x7, Lopen_tail_192_rounds_no_hash Lopen_tail_192_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lopen_tail_192_rounds_no_hash: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x7, x7, #1 b.gt Lopen_tail_192_rounds subs x6, x6, #1 b.ge Lopen_tail_192_rounds_no_hash // We hashed 160 bytes at most, may still have 32 bytes left Lopen_tail_192_hash: cbz x4, Lopen_tail_192_hash_done ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b Lopen_tail_192_hash Lopen_tail_192_hash_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v12.4s, v12.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v17.4s, v17.4s, v30.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #128 b Lopen_tail_64_store Lopen_tail_128: // We need two more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v22.16b, v22.16b, v22.16b ins v23.s[0], v25.s[0] ins v22.d[0], x15 add v22.4s, v22.4s, v23.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s mov x6, #10 sub x6, x6, x4 Lopen_tail_128_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #4 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #12 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #4 subs x6, x6, #1 b.gt Lopen_tail_128_rounds cbz x4, Lopen_tail_128_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b Lopen_tail_128_rounds Lopen_tail_128_rounds_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b Lopen_tail_64_store Lopen_tail_64: // We just need a single block mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b eor v23.16b, v23.16b, v23.16b ins v23.s[0], v25.s[0] add v15.4s, v15.4s, v23.4s mov x6, #10 sub x6, x6, x4 Lopen_tail_64_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.gt Lopen_tail_64_rounds cbz x4, Lopen_tail_64_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b Lopen_tail_64_rounds Lopen_tail_64_rounds_done: add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v15.4s, v15.4s, v23.4s Lopen_tail_64_store: cmp x2, #16 b.lt Lopen_tail_16 ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b st1 {v20.16b}, [x0], #16 mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b sub x2, x2, #16 b Lopen_tail_64_store Lopen_tail_16: // Here we handle the last [0,16) bytes that require a padded block cbz x2, Lopen_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask not v22.16b, v20.16b add x7, x1, x2 mov x6, x2 Lopen_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x7, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt Lopen_tail_16_compose and v20.16b, v20.16b, v21.16b // Hash in the final padded block mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b Lopen_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt Lopen_tail_16_store Lopen_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret Lopen_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 Lopen_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi Lopen_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl Lpoly_hash_ad_internal Lopen_128_store: cmp x2, #64 b.lt Lopen_128_store_64 ld1 {v20.16b - v23.16b}, [x1], #64 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b Lopen_128_store_64: lsr x4, x2, #4 mov x3, x1 Lopen_128_hash_64: cbz x4, Lopen_tail_64_store ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b Lopen_128_hash_64 .cfi_endproc #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
fatiimajamiil/rustpad-custom
82,176
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/aesv8-gcm-armv8-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #if __ARM_MAX_ARCH__ >= 8 .arch armv8-a+crypto .text .globl aes_gcm_enc_kernel .def aes_gcm_enc_kernel .type 32 .endef .align 4 aes_gcm_enc_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys add x4, x0, x1, lsr #3 // end_input_ptr lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible sub x5, x5, #1 // byte_len - 1 ldr q18, [x8, #0] // load rk0 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) ldr q25, [x8, #112] // load rk7 add x5, x5, x0 lsr x12, x11, #32 fmov d2, x10 // CTR block 2 orr w11, w11, w11 rev w12, w12 // rev_ctr32 fmov d1, x10 // CTR block 1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 add w12, w12, #1 // increment rev_ctr32 rev w9, w12 // CTR block 1 fmov d3, x10 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 1 add w12, w12, #1 // CTR block 1 ldr q19, [x8, #16] // load rk1 fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 ldr q20, [x8, #32] // load rk2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 orr x9, x11, x9, lsl #32 // CTR block 3 fmov v3.d[1], x9 // CTR block 3 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q21, [x8, #48] // load rk3 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q24, [x8, #96] // load rk6 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q23, [x8, #80] // load rk5 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q22, [x8, #64] // load rk4 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 ldr q29, [x8, #176] // load rk11 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 ldr q26, [x8, #128] // load rk8 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 add w12, w12, #1 // CTR block 3 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 trn2 v17.2d, v14.2d, v15.2d // h4l | h3l aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 ldr q27, [x8, #144] // load rk9 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 ldr q28, [x8, #160] // load rk10 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 trn1 v9.2d, v14.2d, v15.2d // h4h | h3h aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 trn2 v16.2d, v12.2d, v13.2d // h2l | h1l aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 b.lt Lenc_finish_first_blocks // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 b.eq Lenc_finish_first_blocks // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Lenc_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v2.16b, v31.16b // AES block 2 - round N-1 trn1 v8.2d, v12.2d, v13.2d // h2h | h1h aese v1.16b, v31.16b // AES block 1 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 aese v3.16b, v31.16b // AES block 3 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k b.ge Lenc_tail // handle tail ldp x19, x20, [x0, #16] // AES block 1 - load plaintext rev w9, w12 // CTR block 4 ldp x6, x7, [x0, #0] // AES block 0 - load plaintext ldp x23, x24, [x0, #48] // AES block 3 - load plaintext ldp x21, x22, [x0, #32] // AES block 2 - load plaintext add x0, x0, #64 // AES input_ptr update eor x19, x19, x13 // AES block 1 - round N low eor x20, x20, x14 // AES block 1 - round N high fmov d5, x19 // AES block 1 - mov low eor x6, x6, x13 // AES block 0 - round N low eor x7, x7, x14 // AES block 0 - round N high eor x24, x24, x14 // AES block 3 - round N high fmov d4, x6 // AES block 0 - mov low cmp x0, x5 // check if we have <= 8 blocks fmov v4.d[1], x7 // AES block 0 - mov high eor x23, x23, x13 // AES block 3 - round N low eor x21, x21, x13 // AES block 2 - round N low fmov v5.d[1], x20 // AES block 1 - mov high fmov d6, x21 // AES block 2 - mov low add w12, w12, #1 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov d7, x23 // AES block 3 - mov low eor x22, x22, x14 // AES block 2 - round N high fmov v6.d[1], x22 // AES block 2 - mov high eor v4.16b, v4.16b, v0.16b // AES block 0 - result fmov d0, x10 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 eor v5.16b, v5.16b, v1.16b // AES block 1 - result fmov d1, x10 // CTR block 5 orr x9, x11, x9, lsl #32 // CTR block 5 fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 st1 { v4.16b}, [x2], #16 // AES block 0 - store result fmov v7.d[1], x24 // AES block 3 - mov high orr x9, x11, x9, lsl #32 // CTR block 6 eor v6.16b, v6.16b, v2.16b // AES block 2 - result st1 { v5.16b}, [x2], #16 // AES block 1 - store result add w12, w12, #1 // CTR block 6 fmov d2, x10 // CTR block 6 fmov v2.d[1], x9 // CTR block 6 st1 { v6.16b}, [x2], #16 // AES block 2 - store result rev w9, w12 // CTR block 7 orr x9, x11, x9, lsl #32 // CTR block 7 eor v7.16b, v7.16b, v3.16b // AES block 3 - result st1 { v7.16b}, [x2], #16 // AES block 3 - store result b.ge Lenc_prepretail // do prepretail Lenc_main_loop: // main loop start aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d3, x10 // CTR block 4k+3 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 fmov v3.d[1], x9 // CTR block 4k+3 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 eor v4.16b, v4.16b, v11.16b // PRE 1 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x23, x23, x13 // AES block 4k+7 - round N low aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d10, v17.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high eor x22, x22, x14 // AES block 4k+6 - round N high mov d8, v4.d[1] // GHASH block 4k - mid aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor x19, x19, x13 // AES block 4k+5 - round N low aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 eor x21, x21, x13 // AES block 4k+6 - round N low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 movi v8.8b, #0xc2 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check fmov d5, x19 // AES block 4k+5 - mov low ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext b.lt Lenc_main_loop_continue // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Lenc_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Lenc_main_loop_continue: shl d8, d8, #56 // mod_constant eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid add w12, w12, #1 // CTR block 4k+3 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up add x0, x0, #64 // AES input_ptr update pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid rev w9, w12 // CTR block 4k+8 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor x6, x6, x13 // AES block 4k+4 - round N low eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up eor x7, x7, x14 // AES block 4k+4 - round N high fmov d4, x6 // AES block 4k+4 - mov low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid eor x20, x20, x14 // AES block 4k+5 - round N high eor x24, x24, x14 // AES block 4k+7 - round N high add w12, w12, #1 // CTR block 4k+8 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid fmov d7, x23 // AES block 4k+7 - mov low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 fmov v5.d[1], x20 // AES block 4k+5 - mov high fmov d6, x21 // AES block 4k+6 - mov low cmp x0, x5 // LOOP CONTROL fmov v6.d[1], x22 // AES block 4k+6 - mov high pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 rev w9, w12 // CTR block 4k+9 add w12, w12, #1 // CTR block 4k+9 eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result fmov d1, x10 // CTR block 4k+9 orr x9, x11, x9, lsl #32 // CTR block 4k+9 fmov v1.d[1], x9 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 rev w9, w12 // CTR block 4k+10 st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result orr x9, x11, x9, lsl #32 // CTR block 4k+10 eor v11.16b, v11.16b, v9.16b // MODULO - fold into low fmov v7.d[1], x24 // AES block 4k+7 - mov high ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result fmov d2, x10 // CTR block 4k+10 st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result fmov v2.d[1], x9 // CTR block 4k+10 rev w9, w12 // CTR block 4k+11 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low orr x9, x11, x9, lsl #32 // CTR block 4k+11 eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result b.lt Lenc_main_loop Lenc_prepretail: // PREPRETAIL aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov d3, x10 // CTR block 4k+3 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) fmov v3.d[1], x9 // CTR block 4k+3 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid add w12, w12, #1 // CTR block 4k+3 pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v10.16b, v10.16b, v9.16b // karatsuba tidy up aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 pmull v4.1q, v9.1d, v8.1d ext v9.16b, v9.16b, v9.16b, #8 eor v10.16b, v10.16b, v11.16b b.lt Lenc_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 b.eq Lenc_finish_prepretail // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 Lenc_finish_prepretail: eor v10.16b, v10.16b, v4.16b eor v10.16b, v10.16b, v9.16b pmull v4.1q, v10.1d, v8.1d ext v10.16b, v10.16b, v10.16b, #8 aese v1.16b, v31.16b // AES block 4k+5 - round N-1 eor v11.16b, v11.16b, v4.16b aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b Lenc_tail: // TAIL ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high cmp x5, #48 fmov d4, x6 // AES block 4k+4 - mov low fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result b.gt Lenc_blocks_more_than_3 cmp x5, #32 mov v3.16b, v2.16b movi v11.8b, #0 movi v9.8b, #0 sub w12, w12, #1 mov v2.16b, v1.16b movi v10.8b, #0 b.gt Lenc_blocks_more_than_2 mov v3.16b, v1.16b sub w12, w12, #1 cmp x5, #16 b.gt Lenc_blocks_more_than_1 sub w12, w12, #1 b Lenc_blocks_less_than_1 Lenc_blocks_more_than_3: // blocks left > 3 st1 { v5.16b}, [x2], #16 // AES final-3 block - store result ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-3 block eor x6, x6, x13 // AES final-2 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag eor x7, x7, x14 // AES final-2 block - round N high mov d22, v4.d[1] // GHASH final-3 block - mid fmov d5, x6 // AES final-2 block - mov low fmov v5.d[1], x7 // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in mov d10, v17.d[1] // GHASH final-3 block - mid pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor v5.16b, v5.16b, v1.16b // AES final-2 block - result Lenc_blocks_more_than_2: // blocks left > 2 st1 { v5.16b}, [x2], #16 // AES final-2 block - store result ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-2 block eor x6, x6, x13 // AES final-1 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag fmov d5, x6 // AES final-1 block - mov low eor x7, x7, x14 // AES final-1 block - round N high fmov v5.d[1], x7 // AES final-1 block - mov high movi v8.8b, #0 // suppress further partial tag feed in pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid eor v5.16b, v5.16b, v2.16b // AES final-1 block - result eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid Lenc_blocks_more_than_1: // blocks left > 1 st1 { v5.16b}, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ldp x6, x7, [x0], #16 // AES final block - load input low & high eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in eor x6, x6, x13 // AES final block - round N low mov d22, v4.d[1] // GHASH final-1 block - mid pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor x7, x7, x14 // AES final block - round N high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high ins v22.d[1], v22.d[0] // GHASH final-1 block - mid fmov d5, x6 // AES final block - mov low fmov v5.d[1], x7 // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low eor v5.16b, v5.16b, v3.16b // AES final block - result eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low Lenc_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored mvn x14, xzr // rkN_h = 0xffffffffffffffff and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x6, x13, x14, lt csel x7, x14, xzr, lt fmov d0, x6 // ctr0b is mask for last block fmov v0.d[1], x7 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high mov d8, v4.d[1] // GHASH final block - mid rev w9, w12 pmull v21.1q, v4.1d, v12.1d // GHASH final block - low eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment str w9, [x16, #12] // store the updated counter st1 { v5.16b}, [x2] // store all 16B eor v11.16b, v11.16b, v9.16b // MODULO - fold into low eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret .globl aes_gcm_dec_kernel .def aes_gcm_dec_kernel .type 32 .endef .align 4 aes_gcm_dec_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ldr q26, [x8, #128] // load rk8 sub x5, x5, #1 // byte_len - 1 ldr q25, [x8, #112] // load rk7 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) add x4, x0, x1, lsr #3 // end_input_ptr ldr q24, [x8, #96] // load rk6 lsr x12, x11, #32 ldr q23, [x8, #80] // load rk5 orr w11, w11, w11 ldr q21, [x8, #48] // load rk3 add x5, x5, x0 rev w12, w12 // rev_ctr32 add w12, w12, #1 // increment rev_ctr32 fmov d3, x10 // CTR block 3 rev w9, w12 // CTR block 1 add w12, w12, #1 // CTR block 1 fmov d1, x10 // CTR block 1 orr x9, x11, x9, lsl #32 // CTR block 1 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 fmov d2, x10 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 3 ldr q18, [x8, #0] // load rk0 fmov v3.d[1], x9 // CTR block 3 add w12, w12, #1 // CTR block 3 ldr q22, [x8, #64] // load rk4 ldr q19, [x8, #16] // load rk1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q20, [x8, #32] // load rk2 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q27, [x8, #144] // load rk9 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q28, [x8, #160] // load rk10 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 cmp x17, #12 // setup flags for AES-128/192/256 check aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 ldr q29, [x8, #176] // load rk11 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 b.lt Ldec_finish_first_blocks // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 b.eq Ldec_finish_first_blocks // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Ldec_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks trn1 v9.2d, v14.2d, v15.2d // h4h | h3h trn2 v17.2d, v14.2d, v15.2d // h4l | h3l trn1 v8.2d, v12.2d, v13.2d // h2h | h1h trn2 v16.2d, v12.2d, v13.2d // h2l | h1l eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v1.16b, v31.16b // AES block 1 - round N-1 aese v2.16b, v31.16b // AES block 2 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k aese v3.16b, v31.16b // AES block 3 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 b.ge Ldec_tail // handle tail ldr q4, [x0, #0] // AES block 0 - load ciphertext ldr q5, [x0, #16] // AES block 1 - load ciphertext rev w9, w12 // CTR block 4 eor v0.16b, v4.16b, v0.16b // AES block 0 - result eor v1.16b, v5.16b, v1.16b // AES block 1 - result rev64 v5.16b, v5.16b // GHASH block 1 ldr q7, [x0, #48] // AES block 3 - load ciphertext mov x7, v0.d[1] // AES block 0 - mov high mov x6, v0.d[0] // AES block 0 - mov low rev64 v4.16b, v4.16b // GHASH block 0 add w12, w12, #1 // CTR block 4 fmov d0, x10 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 mov x19, v1.d[0] // AES block 1 - mov low orr x9, x11, x9, lsl #32 // CTR block 5 mov x20, v1.d[1] // AES block 1 - mov high eor x7, x7, x14 // AES block 0 - round N high eor x6, x6, x13 // AES block 0 - round N low stp x6, x7, [x2], #16 // AES block 0 - store result fmov d1, x10 // CTR block 5 ldr q6, [x0, #32] // AES block 2 - load ciphertext add x0, x0, #64 // AES input_ptr update fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 add w12, w12, #1 // CTR block 6 eor x19, x19, x13 // AES block 1 - round N low orr x9, x11, x9, lsl #32 // CTR block 6 eor x20, x20, x14 // AES block 1 - round N high stp x19, x20, [x2], #16 // AES block 1 - store result eor v2.16b, v6.16b, v2.16b // AES block 2 - result cmp x0, x5 // check if we have <= 8 blocks b.ge Ldec_prepretail // do prepretail Ldec_main_loop: // main loop start mov x21, v2.d[0] // AES block 4k+2 - mov low ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 eor v4.16b, v4.16b, v11.16b // PRE 1 rev w9, w12 // CTR block 4k+7 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x23, v3.d[0] // AES block 4k+3 - mov low pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov v3.d[1], x9 // CTR block 4k+7 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor x22, x22, x14 // AES block 4k+2 - round N high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 rev64 v6.16b, v6.16b // GHASH block 4k+2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x21, x21, x13 // AES block 4k+2 - round N low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 stp x21, x22, [x2], #16 // AES block 4k+2 - store result pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 rev64 v7.16b, v7.16b // GHASH block 4k+3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor x23, x23, x13 // AES block 4k+3 - round N low pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor x24, x24, x14 // AES block 4k+3 - round N high eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 add w12, w12, #1 // CTR block 4k+7 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid rev w9, w12 // CTR block 4k+8 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 add w12, w12, #1 // CTR block 4k+8 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid movi v8.8b, #0xc2 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 b.lt Ldec_main_loop_continue // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Ldec_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_main_loop_continue: pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext aese v0.16b, v31.16b // AES block 4k+4 - round N-1 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext mov x7, v0.d[1] // AES block 4k+4 - mov high eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid aese v1.16b, v31.16b // AES block 4k+5 - round N-1 add x0, x0, #64 // AES input_ptr update mov x6, v0.d[0] // AES block 4k+4 - mov low fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result rev w9, w12 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+9 cmp x0, x5 // LOOP CONTROL add w12, w12, #1 // CTR block 4k+9 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high mov x20, v1.d[1] // AES block 4k+5 - mov high eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low mov x19, v1.d[0] // AES block 4k+5 - mov low fmov d1, x10 // CTR block 4k+9 ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment fmov v1.d[1], x9 // CTR block 4k+9 rev w9, w12 // CTR block 4k+10 add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+10 rev64 v5.16b, v5.16b // GHASH block 4k+5 eor x20, x20, x14 // AES block 4k+5 - round N high stp x6, x7, [x2], #16 // AES block 4k+4 - store result eor x19, x19, x13 // AES block 4k+5 - round N low stp x19, x20, [x2], #16 // AES block 4k+5 - store result rev64 v4.16b, v4.16b // GHASH block 4k+4 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low b.lt Ldec_main_loop Ldec_prepretail: // PREPRETAIL ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 mov x21, v2.d[0] // AES block 4k+2 - mov low eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 rev w9, w12 // CTR block 4k+7 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v6.16b, v6.16b // GHASH block 4k+2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 mov x23, v3.d[0] // AES block 4k+3 - mov low aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high fmov v3.d[1], x9 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 rev64 v7.16b, v7.16b // GHASH block 4k+3 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 cmp x17, #12 // setup flags for AES-128/192/256 check eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 b.lt Ldec_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 b.eq Ldec_finish_prepretail // branch if AES-192 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_finish_prepretail: eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor x22, x22, x14 // AES block 4k+2 - round N high eor x23, x23, x13 // AES block 4k+3 - round N low eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid add w12, w12, #1 // CTR block 4k+7 eor x21, x21, x13 // AES block 4k+2 - round N low pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor x24, x24, x14 // AES block 4k+3 - round N high stp x21, x22, [x2], #16 // AES block 4k+2 - store result ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low Ldec_tail: // TAIL sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result mov x6, v0.d[0] // AES block 4k+4 - mov low mov x7, v0.d[1] // AES block 4k+4 - mov high ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag cmp x5, #48 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high b.gt Ldec_blocks_more_than_3 sub w12, w12, #1 mov v3.16b, v2.16b movi v10.8b, #0 movi v11.8b, #0 cmp x5, #32 movi v9.8b, #0 mov v2.16b, v1.16b b.gt Ldec_blocks_more_than_2 sub w12, w12, #1 mov v3.16b, v1.16b cmp x5, #16 b.gt Ldec_blocks_more_than_1 sub w12, w12, #1 b Ldec_blocks_less_than_1 Ldec_blocks_more_than_3: // blocks left > 3 rev64 v4.16b, v5.16b // GHASH final-3 block ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext stp x6, x7, [x2], #16 // AES final-3 block - store result mov d10, v17.d[1] // GHASH final-3 block - mid eor v4.16b, v4.16b, v8.16b // feed in partial tag eor v0.16b, v5.16b, v1.16b // AES final-2 block - result mov d22, v4.d[1] // GHASH final-3 block - mid mov x6, v0.d[0] // AES final-2 block - mov low mov x7, v0.d[1] // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor x6, x6, x13 // AES final-2 block - round N low pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low eor x7, x7, x14 // AES final-2 block - round N high Ldec_blocks_more_than_2: // blocks left > 2 rev64 v4.16b, v5.16b // GHASH final-2 block ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag stp x6, x7, [x2], #16 // AES final-2 block - store result eor v0.16b, v5.16b, v2.16b // AES final-1 block - result mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid mov x6, v0.d[0] // AES final-1 block - mov low mov x7, v0.d[1] // AES final-1 block - mov high eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low movi v8.8b, #0 // suppress further partial tag feed in pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high eor x6, x6, x13 // AES final-1 block - round N low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid eor x7, x7, x14 // AES final-1 block - round N high Ldec_blocks_more_than_1: // blocks left > 1 stp x6, x7, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in mov d22, v4.d[1] // GHASH final-1 block - mid eor v0.16b, v5.16b, v3.16b // AES final block - result pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low mov x6, v0.d[0] // AES final block - mov low ins v22.d[1], v22.d[0] // GHASH final-1 block - mid mov x7, v0.d[1] // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid eor x6, x6, x13 // AES final block - round N low eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor x7, x7, x14 // AES final block - round N high Ldec_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x14, xzr // rkN_h = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff ldp x4, x5, [x2] // load existing bytes we need to not overwrite neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x9, x13, x14, lt csel x10, x14, xzr, lt fmov d0, x9 // ctr0b is mask for last block and x6, x6, x9 mov v0.d[1], x10 bic x4, x4, x9 // mask out low existing bytes rev w9, w12 bic x5, x5, x10 // mask out high existing bytes orr x6, x6, x4 and x7, x7, x10 orr x7, x7, x5 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag pmull v21.1q, v4.1d, v12.1d // GHASH final block - low mov d8, v4.d[1] // GHASH final block - mid eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment eor v11.16b, v11.16b, v8.16b // MODULO - fold into low stp x6, x7, [x2] str w9, [x16, #12] // store the updated counter eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
fatiimajamiil/rustpad-custom
60,192
.cargo/registry/src/index.crates.io-6f17d22bba15001f/ring-0.17.14/pregenerated/sha256-armv4-linux32.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) @ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. @ @ Licensed under the Apache License, Version 2.0 (the "License"); @ you may not use this file except in compliance with the License. @ You may obtain a copy of the License at @ @ https://www.apache.org/licenses/LICENSE-2.0 @ @ Unless required by applicable law or agreed to in writing, software @ distributed under the License is distributed on an "AS IS" BASIS, @ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @ See the License for the specific language governing permissions and @ limitations under the License. @ ==================================================================== @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @ project. @ ==================================================================== @ SHA256 block procedure for ARMv4. May 2007. @ Performance is ~2x better than gcc 3.4 generated code and in "abso- @ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per @ byte [on single-issue Xscale PXA250 core]. @ July 2010. @ @ Rescheduling for dual-issue pipeline resulted in 22% improvement on @ Cortex A8 core and ~20 cycles per processed byte. @ February 2011. @ @ Profiler-assisted and platform-specific optimization resulted in 16% @ improvement on Cortex A8 core and ~15.4 cycles per processed byte. @ September 2013. @ @ Add NEON implementation. On Cortex A8 it was measured to process one @ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon @ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only @ code (meaning that latter performs sub-optimally, nothing was done @ about it). @ May 2014. @ @ Add ARMv8 code path performing at 2.0 cpb on Apple A7. #ifdef __KERNEL__ # define __ARM_ARCH __LINUX_ARM_ARCH__ # define __ARM_MAX_ARCH__ 7 #endif @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors. It does have ARMv8-only code, but those @ instructions are manually-encoded. (See unsha256.) .arch armv7-a .text #if defined(__thumb2__) .syntax unified .thumb #else .code 32 #endif .type K256,%object .align 5 K256: .word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .size K256,.-K256 .word 0 @ terminator .align 5 .globl sha256_block_data_order_nohw .hidden sha256_block_data_order_nohw .type sha256_block_data_order_nohw,%function sha256_block_data_order_nohw: add r2,r1,r2,lsl#6 @ len to point at the end of inp stmdb sp!,{r0,r1,r2,r4-r11,lr} ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} adr r14,K256 sub sp,sp,#16*4 @ alloca(X[16]) .Loop: # if __ARM_ARCH>=7 ldr r2,[r1],#4 # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ magic eor r12,r12,r12 #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 0 # if 0==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 add r4,r4,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r8,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 0 add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 0==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r8,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#0*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 0==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 0<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#2*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#15*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 1 # if 1==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 add r11,r11,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r7,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 1 add r11,r11,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 1==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r7,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#1*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 1==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 1<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#3*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#0*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 2 # if 2==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 add r10,r10,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r6,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 2 add r10,r10,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 2==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r6,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#2*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 2==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 2<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#4*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#1*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 3 # if 3==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 add r9,r9,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r5,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 3 add r9,r9,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 3==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r5,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#3*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 3==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 3<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#5*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#2*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 4 # if 4==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 add r8,r8,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r4,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 4 add r8,r8,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 4==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r4,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#4*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 4==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 4<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#6*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#3*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 5 # if 5==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r11,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 5==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r11,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#5*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 5==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 5<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#7*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#4*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 6 # if 6==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 add r6,r6,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r10,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 6 add r6,r6,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 6==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r10,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#6*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 6==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 6<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#8*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#5*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 7 # if 7==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 add r5,r5,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r9,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 7==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r9,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#7*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 7==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 7<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#9*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#6*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 8 # if 8==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 add r4,r4,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r8,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 8 add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 8==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r8,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#8*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 8==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 8<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#10*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#7*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 9 # if 9==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 add r11,r11,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r7,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 9 add r11,r11,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 9==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r7,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#9*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 9==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 9<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#11*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#8*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 10 # if 10==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 add r10,r10,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r6,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 10 add r10,r10,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 10==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r6,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#10*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 10==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 10<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#12*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#9*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 11 # if 11==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 add r9,r9,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r5,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 11 add r9,r9,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 11==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r5,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#11*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 11==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 11<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#13*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#10*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 12 # if 12==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 add r8,r8,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r4,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 12 add r8,r8,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 12==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r4,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#12*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 12==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 12<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#14*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#11*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 13 # if 13==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r11,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 13 add r7,r7,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 13==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r11,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#13*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 13==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 13<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#15*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#12*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 14 # if 14==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 add r6,r6,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r10,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 14 add r6,r6,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 14==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r10,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#14*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 14==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 14<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#0*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#13*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 15 # if 15==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 add r5,r5,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r9,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 15 add r5,r5,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 15==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r9,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#15*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 15==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 15<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#1*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#14*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) .Lrounds_16_xx: @ ldr r2,[sp,#1*4] @ 16 @ ldr r1,[sp,#14*4] mov r0,r2,ror#7 add r4,r4,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#0*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#9*4] add r12,r12,r0 eor r0,r8,r8,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r8,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#0*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 16==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 16<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#2*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#15*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#2*4] @ 17 @ ldr r1,[sp,#15*4] mov r0,r2,ror#7 add r11,r11,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#1*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#10*4] add r3,r3,r0 eor r0,r7,r7,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r7,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#1*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 17==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 17<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#3*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#0*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#3*4] @ 18 @ ldr r1,[sp,#0*4] mov r0,r2,ror#7 add r10,r10,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#2*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#11*4] add r12,r12,r0 eor r0,r6,r6,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r6,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#2*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 18==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 18<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#4*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#1*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#4*4] @ 19 @ ldr r1,[sp,#1*4] mov r0,r2,ror#7 add r9,r9,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#3*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#12*4] add r3,r3,r0 eor r0,r5,r5,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r5,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#3*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 19==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 19<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#5*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#2*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#5*4] @ 20 @ ldr r1,[sp,#2*4] mov r0,r2,ror#7 add r8,r8,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#4*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#13*4] add r12,r12,r0 eor r0,r4,r4,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r4,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#4*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 20==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 20<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#6*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#3*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#6*4] @ 21 @ ldr r1,[sp,#3*4] mov r0,r2,ror#7 add r7,r7,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#5*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#14*4] add r3,r3,r0 eor r0,r11,r11,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r11,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#5*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 21==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 21<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#7*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#4*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#7*4] @ 22 @ ldr r1,[sp,#4*4] mov r0,r2,ror#7 add r6,r6,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#6*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#15*4] add r12,r12,r0 eor r0,r10,r10,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r10,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#6*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 22==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 22<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#8*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#5*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#8*4] @ 23 @ ldr r1,[sp,#5*4] mov r0,r2,ror#7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#7*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#0*4] add r3,r3,r0 eor r0,r9,r9,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r9,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#7*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 23==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 23<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#9*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#6*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#9*4] @ 24 @ ldr r1,[sp,#6*4] mov r0,r2,ror#7 add r4,r4,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#8*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#1*4] add r12,r12,r0 eor r0,r8,r8,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r8,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#8*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 24==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 24<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#10*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#7*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#10*4] @ 25 @ ldr r1,[sp,#7*4] mov r0,r2,ror#7 add r11,r11,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#9*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#2*4] add r3,r3,r0 eor r0,r7,r7,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r7,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#9*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 25==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 25<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#11*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#8*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#11*4] @ 26 @ ldr r1,[sp,#8*4] mov r0,r2,ror#7 add r10,r10,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#10*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#3*4] add r12,r12,r0 eor r0,r6,r6,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r6,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#10*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 26==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 26<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#12*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#9*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#12*4] @ 27 @ ldr r1,[sp,#9*4] mov r0,r2,ror#7 add r9,r9,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#11*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#4*4] add r3,r3,r0 eor r0,r5,r5,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r5,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#11*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 27==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 27<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#13*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#10*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#13*4] @ 28 @ ldr r1,[sp,#10*4] mov r0,r2,ror#7 add r8,r8,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#12*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#5*4] add r12,r12,r0 eor r0,r4,r4,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r4,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#12*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 28==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 28<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#14*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#11*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#14*4] @ 29 @ ldr r1,[sp,#11*4] mov r0,r2,ror#7 add r7,r7,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#13*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#6*4] add r3,r3,r0 eor r0,r11,r11,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r11,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#13*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 29==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 29<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#15*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#12*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#15*4] @ 30 @ ldr r1,[sp,#12*4] mov r0,r2,ror#7 add r6,r6,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#14*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#7*4] add r12,r12,r0 eor r0,r10,r10,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r10,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#14*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 30==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 30<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#0*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#13*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#0*4] @ 31 @ ldr r1,[sp,#13*4] mov r0,r2,ror#7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#15*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#8*4] add r3,r3,r0 eor r0,r9,r9,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r9,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#15*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 31==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 31<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#1*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#14*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 ite eq @ Thumb2 thing, sanity check in ARM #endif ldreq r3,[sp,#16*4] @ pull ctx bne .Lrounds_16_xx add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldr r0,[r3,#0] ldr r2,[r3,#4] ldr r12,[r3,#8] add r4,r4,r0 ldr r0,[r3,#12] add r5,r5,r2 ldr r2,[r3,#16] add r6,r6,r12 ldr r12,[r3,#20] add r7,r7,r0 ldr r0,[r3,#24] add r8,r8,r2 ldr r2,[r3,#28] add r9,r9,r12 ldr r1,[sp,#17*4] @ pull inp ldr r12,[sp,#18*4] @ pull inp+len add r10,r10,r0 add r11,r11,r2 stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} cmp r1,r12 sub r14,r14,#256 @ rewind Ktbl bne .Loop add sp,sp,#19*4 @ destroy frame #if __ARM_ARCH>=5 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} #else ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif .size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .LK256_shortcut_neon: @ PC is 8 bytes ahead in Arm mode and 4 bytes ahead in Thumb mode. #if defined(__thumb2__) .word K256-(.LK256_add_neon+4) #else .word K256-(.LK256_add_neon+8) #endif .globl sha256_block_data_order_neon .hidden sha256_block_data_order_neon .type sha256_block_data_order_neon,%function .align 5 .skip 16 sha256_block_data_order_neon: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} sub r11,sp,#16*4+16 @ K256 is just at the boundary of being easily referenced by an ADR from @ this function. In Arm mode, when building with __ARM_ARCH=6, it does @ not fit. By moving code around, we could make it fit, but this is too @ fragile. For simplicity, just load the offset from @ .LK256_shortcut_neon. @ @ TODO(davidben): adrl would avoid a load, but clang-assembler does not @ support it. We might be able to emulate it with a macro, but Android's @ did not work when I tried it. @ https://android.googlesource.com/platform/ndk/+/refs/heads/main/docs/ClangMigration.md#arm ldr r14,.LK256_shortcut_neon .LK256_add_neon: add r14,pc,r14 bic r11,r11,#15 @ align for 128-bit stores mov r12,sp mov sp,r11 @ alloca add r2,r1,r2,lsl#6 @ len to point at the end of inp vld1.8 {q0},[r1]! vld1.8 {q1},[r1]! vld1.8 {q2},[r1]! vld1.8 {q3},[r1]! vld1.32 {q8},[r14,:128]! vld1.32 {q9},[r14,:128]! vld1.32 {q10},[r14,:128]! vld1.32 {q11},[r14,:128]! vrev32.8 q0,q0 @ yes, even on str r0,[sp,#64] vrev32.8 q1,q1 @ big-endian str r1,[sp,#68] mov r1,sp vrev32.8 q2,q2 str r2,[sp,#72] vrev32.8 q3,q3 str r12,[sp,#76] @ save original sp vadd.i32 q8,q8,q0 vadd.i32 q9,q9,q1 vst1.32 {q8},[r1,:128]! vadd.i32 q10,q10,q2 vst1.32 {q9},[r1,:128]! vadd.i32 q11,q11,q3 vst1.32 {q10},[r1,:128]! vst1.32 {q11},[r1,:128]! ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} sub r1,r1,#64 ldr r2,[sp,#0] eor r12,r12,r12 eor r3,r5,r6 b .L_00_48 .align 4 .L_00_48: vext.8 q8,q0,q1,#4 add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 vext.8 q9,q2,q3,#4 add r4,r4,r12 and r2,r2,r8 eor r12,r0,r8,ror#19 vshr.u32 q10,q8,#7 eor r0,r4,r4,ror#11 eor r2,r2,r10 vadd.i32 q0,q0,q9 add r11,r11,r12,ror#6 eor r12,r4,r5 vshr.u32 q9,q8,#3 eor r0,r0,r4,ror#20 add r11,r11,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#4] and r3,r3,r12 vshr.u32 q11,q8,#18 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 veor q9,q9,q10 add r10,r10,r2 vsli.32 q11,q8,#14 eor r2,r8,r9 eor r0,r7,r7,ror#5 vshr.u32 d24,d7,#17 add r11,r11,r3 and r2,r2,r7 veor q9,q9,q11 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 vsli.32 d24,d7,#15 eor r2,r2,r9 add r10,r10,r3,ror#6 vshr.u32 d25,d7,#10 eor r3,r11,r4 eor r0,r0,r11,ror#20 vadd.i32 q0,q0,q9 add r10,r10,r2 ldr r2,[sp,#8] veor d25,d25,d24 and r12,r12,r3 add r6,r6,r10 vshr.u32 d24,d7,#19 add r10,r10,r0,ror#2 eor r12,r12,r4 vsli.32 d24,d7,#13 add r9,r9,r2 eor r2,r7,r8 veor d25,d25,d24 eor r0,r6,r6,ror#5 add r10,r10,r12 vadd.i32 d0,d0,d25 and r2,r2,r6 eor r12,r0,r6,ror#19 vshr.u32 d24,d0,#17 eor r0,r10,r10,ror#11 eor r2,r2,r8 vsli.32 d24,d0,#15 add r9,r9,r12,ror#6 eor r12,r10,r11 vshr.u32 d25,d0,#10 eor r0,r0,r10,ror#20 add r9,r9,r2 veor d25,d25,d24 ldr r2,[sp,#12] and r3,r3,r12 vshr.u32 d24,d0,#19 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 vld1.32 {q8},[r14,:128]! add r8,r8,r2 vsli.32 d24,d0,#13 eor r2,r6,r7 eor r0,r5,r5,ror#5 veor d25,d25,d24 add r9,r9,r3 and r2,r2,r5 vadd.i32 d1,d1,d25 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 vadd.i32 q8,q8,q0 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#16] and r12,r12,r3 add r4,r4,r8 vst1.32 {q8},[r1,:128]! add r8,r8,r0,ror#2 eor r12,r12,r10 vext.8 q8,q1,q2,#4 add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 vext.8 q9,q3,q0,#4 add r8,r8,r12 and r2,r2,r4 eor r12,r0,r4,ror#19 vshr.u32 q10,q8,#7 eor r0,r8,r8,ror#11 eor r2,r2,r6 vadd.i32 q1,q1,q9 add r7,r7,r12,ror#6 eor r12,r8,r9 vshr.u32 q9,q8,#3 eor r0,r0,r8,ror#20 add r7,r7,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#20] and r3,r3,r12 vshr.u32 q11,q8,#18 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 veor q9,q9,q10 add r6,r6,r2 vsli.32 q11,q8,#14 eor r2,r4,r5 eor r0,r11,r11,ror#5 vshr.u32 d24,d1,#17 add r7,r7,r3 and r2,r2,r11 veor q9,q9,q11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 vsli.32 d24,d1,#15 eor r2,r2,r5 add r6,r6,r3,ror#6 vshr.u32 d25,d1,#10 eor r3,r7,r8 eor r0,r0,r7,ror#20 vadd.i32 q1,q1,q9 add r6,r6,r2 ldr r2,[sp,#24] veor d25,d25,d24 and r12,r12,r3 add r10,r10,r6 vshr.u32 d24,d1,#19 add r6,r6,r0,ror#2 eor r12,r12,r8 vsli.32 d24,d1,#13 add r5,r5,r2 eor r2,r11,r4 veor d25,d25,d24 eor r0,r10,r10,ror#5 add r6,r6,r12 vadd.i32 d2,d2,d25 and r2,r2,r10 eor r12,r0,r10,ror#19 vshr.u32 d24,d2,#17 eor r0,r6,r6,ror#11 eor r2,r2,r4 vsli.32 d24,d2,#15 add r5,r5,r12,ror#6 eor r12,r6,r7 vshr.u32 d25,d2,#10 eor r0,r0,r6,ror#20 add r5,r5,r2 veor d25,d25,d24 ldr r2,[sp,#28] and r3,r3,r12 vshr.u32 d24,d2,#19 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 vld1.32 {q8},[r14,:128]! add r4,r4,r2 vsli.32 d24,d2,#13 eor r2,r10,r11 eor r0,r9,r9,ror#5 veor d25,d25,d24 add r5,r5,r3 and r2,r2,r9 vadd.i32 d3,d3,d25 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 vadd.i32 q8,q8,q1 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#32] and r12,r12,r3 add r8,r8,r4 vst1.32 {q8},[r1,:128]! add r4,r4,r0,ror#2 eor r12,r12,r6 vext.8 q8,q2,q3,#4 add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 vext.8 q9,q0,q1,#4 add r4,r4,r12 and r2,r2,r8 eor r12,r0,r8,ror#19 vshr.u32 q10,q8,#7 eor r0,r4,r4,ror#11 eor r2,r2,r10 vadd.i32 q2,q2,q9 add r11,r11,r12,ror#6 eor r12,r4,r5 vshr.u32 q9,q8,#3 eor r0,r0,r4,ror#20 add r11,r11,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#36] and r3,r3,r12 vshr.u32 q11,q8,#18 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 veor q9,q9,q10 add r10,r10,r2 vsli.32 q11,q8,#14 eor r2,r8,r9 eor r0,r7,r7,ror#5 vshr.u32 d24,d3,#17 add r11,r11,r3 and r2,r2,r7 veor q9,q9,q11 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 vsli.32 d24,d3,#15 eor r2,r2,r9 add r10,r10,r3,ror#6 vshr.u32 d25,d3,#10 eor r3,r11,r4 eor r0,r0,r11,ror#20 vadd.i32 q2,q2,q9 add r10,r10,r2 ldr r2,[sp,#40] veor d25,d25,d24 and r12,r12,r3 add r6,r6,r10 vshr.u32 d24,d3,#19 add r10,r10,r0,ror#2 eor r12,r12,r4 vsli.32 d24,d3,#13 add r9,r9,r2 eor r2,r7,r8 veor d25,d25,d24 eor r0,r6,r6,ror#5 add r10,r10,r12 vadd.i32 d4,d4,d25 and r2,r2,r6 eor r12,r0,r6,ror#19 vshr.u32 d24,d4,#17 eor r0,r10,r10,ror#11 eor r2,r2,r8 vsli.32 d24,d4,#15 add r9,r9,r12,ror#6 eor r12,r10,r11 vshr.u32 d25,d4,#10 eor r0,r0,r10,ror#20 add r9,r9,r2 veor d25,d25,d24 ldr r2,[sp,#44] and r3,r3,r12 vshr.u32 d24,d4,#19 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 vld1.32 {q8},[r14,:128]! add r8,r8,r2 vsli.32 d24,d4,#13 eor r2,r6,r7 eor r0,r5,r5,ror#5 veor d25,d25,d24 add r9,r9,r3 and r2,r2,r5 vadd.i32 d5,d5,d25 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 vadd.i32 q8,q8,q2 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#48] and r12,r12,r3 add r4,r4,r8 vst1.32 {q8},[r1,:128]! add r8,r8,r0,ror#2 eor r12,r12,r10 vext.8 q8,q3,q0,#4 add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 vext.8 q9,q1,q2,#4 add r8,r8,r12 and r2,r2,r4 eor r12,r0,r4,ror#19 vshr.u32 q10,q8,#7 eor r0,r8,r8,ror#11 eor r2,r2,r6 vadd.i32 q3,q3,q9 add r7,r7,r12,ror#6 eor r12,r8,r9 vshr.u32 q9,q8,#3 eor r0,r0,r8,ror#20 add r7,r7,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#52] and r3,r3,r12 vshr.u32 q11,q8,#18 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 veor q9,q9,q10 add r6,r6,r2 vsli.32 q11,q8,#14 eor r2,r4,r5 eor r0,r11,r11,ror#5 vshr.u32 d24,d5,#17 add r7,r7,r3 and r2,r2,r11 veor q9,q9,q11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 vsli.32 d24,d5,#15 eor r2,r2,r5 add r6,r6,r3,ror#6 vshr.u32 d25,d5,#10 eor r3,r7,r8 eor r0,r0,r7,ror#20 vadd.i32 q3,q3,q9 add r6,r6,r2 ldr r2,[sp,#56] veor d25,d25,d24 and r12,r12,r3 add r10,r10,r6 vshr.u32 d24,d5,#19 add r6,r6,r0,ror#2 eor r12,r12,r8 vsli.32 d24,d5,#13 add r5,r5,r2 eor r2,r11,r4 veor d25,d25,d24 eor r0,r10,r10,ror#5 add r6,r6,r12 vadd.i32 d6,d6,d25 and r2,r2,r10 eor r12,r0,r10,ror#19 vshr.u32 d24,d6,#17 eor r0,r6,r6,ror#11 eor r2,r2,r4 vsli.32 d24,d6,#15 add r5,r5,r12,ror#6 eor r12,r6,r7 vshr.u32 d25,d6,#10 eor r0,r0,r6,ror#20 add r5,r5,r2 veor d25,d25,d24 ldr r2,[sp,#60] and r3,r3,r12 vshr.u32 d24,d6,#19 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 vld1.32 {q8},[r14,:128]! add r4,r4,r2 vsli.32 d24,d6,#13 eor r2,r10,r11 eor r0,r9,r9,ror#5 veor d25,d25,d24 add r5,r5,r3 and r2,r2,r9 vadd.i32 d7,d7,d25 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 vadd.i32 q8,q8,q3 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[r14] and r12,r12,r3 add r8,r8,r4 vst1.32 {q8},[r1,:128]! add r4,r4,r0,ror#2 eor r12,r12,r6 teq r2,#0 @ check for K256 terminator ldr r2,[sp,#0] sub r1,r1,#64 bne .L_00_48 ldr r1,[sp,#68] ldr r0,[sp,#72] sub r14,r14,#256 @ rewind r14 teq r1,r0 it eq subeq r1,r1,#64 @ avoid SEGV vld1.8 {q0},[r1]! @ load next input block vld1.8 {q1},[r1]! vld1.8 {q2},[r1]! vld1.8 {q3},[r1]! it ne strne r1,[sp,#68] mov r1,sp add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 add r4,r4,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r8 eor r12,r0,r8,ror#19 eor r0,r4,r4,ror#11 eor r2,r2,r10 vrev32.8 q0,q0 add r11,r11,r12,ror#6 eor r12,r4,r5 eor r0,r0,r4,ror#20 add r11,r11,r2 vadd.i32 q8,q8,q0 ldr r2,[sp,#4] and r3,r3,r12 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 add r10,r10,r2 eor r2,r8,r9 eor r0,r7,r7,ror#5 add r11,r11,r3 and r2,r2,r7 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 eor r2,r2,r9 add r10,r10,r3,ror#6 eor r3,r11,r4 eor r0,r0,r11,ror#20 add r10,r10,r2 ldr r2,[sp,#8] and r12,r12,r3 add r6,r6,r10 add r10,r10,r0,ror#2 eor r12,r12,r4 add r9,r9,r2 eor r2,r7,r8 eor r0,r6,r6,ror#5 add r10,r10,r12 and r2,r2,r6 eor r12,r0,r6,ror#19 eor r0,r10,r10,ror#11 eor r2,r2,r8 add r9,r9,r12,ror#6 eor r12,r10,r11 eor r0,r0,r10,ror#20 add r9,r9,r2 ldr r2,[sp,#12] and r3,r3,r12 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 add r8,r8,r2 eor r2,r6,r7 eor r0,r5,r5,ror#5 add r9,r9,r3 and r2,r2,r5 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#16] and r12,r12,r3 add r4,r4,r8 add r8,r8,r0,ror#2 eor r12,r12,r10 vst1.32 {q8},[r1,:128]! add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 add r8,r8,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r4 eor r12,r0,r4,ror#19 eor r0,r8,r8,ror#11 eor r2,r2,r6 vrev32.8 q1,q1 add r7,r7,r12,ror#6 eor r12,r8,r9 eor r0,r0,r8,ror#20 add r7,r7,r2 vadd.i32 q8,q8,q1 ldr r2,[sp,#20] and r3,r3,r12 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 add r6,r6,r2 eor r2,r4,r5 eor r0,r11,r11,ror#5 add r7,r7,r3 and r2,r2,r11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 eor r2,r2,r5 add r6,r6,r3,ror#6 eor r3,r7,r8 eor r0,r0,r7,ror#20 add r6,r6,r2 ldr r2,[sp,#24] and r12,r12,r3 add r10,r10,r6 add r6,r6,r0,ror#2 eor r12,r12,r8 add r5,r5,r2 eor r2,r11,r4 eor r0,r10,r10,ror#5 add r6,r6,r12 and r2,r2,r10 eor r12,r0,r10,ror#19 eor r0,r6,r6,ror#11 eor r2,r2,r4 add r5,r5,r12,ror#6 eor r12,r6,r7 eor r0,r0,r6,ror#20 add r5,r5,r2 ldr r2,[sp,#28] and r3,r3,r12 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 add r4,r4,r2 eor r2,r10,r11 eor r0,r9,r9,ror#5 add r5,r5,r3 and r2,r2,r9 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#32] and r12,r12,r3 add r8,r8,r4 add r4,r4,r0,ror#2 eor r12,r12,r6 vst1.32 {q8},[r1,:128]! add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 add r4,r4,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r8 eor r12,r0,r8,ror#19 eor r0,r4,r4,ror#11 eor r2,r2,r10 vrev32.8 q2,q2 add r11,r11,r12,ror#6 eor r12,r4,r5 eor r0,r0,r4,ror#20 add r11,r11,r2 vadd.i32 q8,q8,q2 ldr r2,[sp,#36] and r3,r3,r12 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 add r10,r10,r2 eor r2,r8,r9 eor r0,r7,r7,ror#5 add r11,r11,r3 and r2,r2,r7 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 eor r2,r2,r9 add r10,r10,r3,ror#6 eor r3,r11,r4 eor r0,r0,r11,ror#20 add r10,r10,r2 ldr r2,[sp,#40] and r12,r12,r3 add r6,r6,r10 add r10,r10,r0,ror#2 eor r12,r12,r4 add r9,r9,r2 eor r2,r7,r8 eor r0,r6,r6,ror#5 add r10,r10,r12 and r2,r2,r6 eor r12,r0,r6,ror#19 eor r0,r10,r10,ror#11 eor r2,r2,r8 add r9,r9,r12,ror#6 eor r12,r10,r11 eor r0,r0,r10,ror#20 add r9,r9,r2 ldr r2,[sp,#44] and r3,r3,r12 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 add r8,r8,r2 eor r2,r6,r7 eor r0,r5,r5,ror#5 add r9,r9,r3 and r2,r2,r5 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#48] and r12,r12,r3 add r4,r4,r8 add r8,r8,r0,ror#2 eor r12,r12,r10 vst1.32 {q8},[r1,:128]! add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 add r8,r8,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r4 eor r12,r0,r4,ror#19 eor r0,r8,r8,ror#11 eor r2,r2,r6 vrev32.8 q3,q3 add r7,r7,r12,ror#6 eor r12,r8,r9 eor r0,r0,r8,ror#20 add r7,r7,r2 vadd.i32 q8,q8,q3 ldr r2,[sp,#52] and r3,r3,r12 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 add r6,r6,r2 eor r2,r4,r5 eor r0,r11,r11,ror#5 add r7,r7,r3 and r2,r2,r11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 eor r2,r2,r5 add r6,r6,r3,ror#6 eor r3,r7,r8 eor r0,r0,r7,ror#20 add r6,r6,r2 ldr r2,[sp,#56] and r12,r12,r3 add r10,r10,r6 add r6,r6,r0,ror#2 eor r12,r12,r8 add r5,r5,r2 eor r2,r11,r4 eor r0,r10,r10,ror#5 add r6,r6,r12 and r2,r2,r10 eor r12,r0,r10,ror#19 eor r0,r6,r6,ror#11 eor r2,r2,r4 add r5,r5,r12,ror#6 eor r12,r6,r7 eor r0,r0,r6,ror#20 add r5,r5,r2 ldr r2,[sp,#60] and r3,r3,r12 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 add r4,r4,r2 eor r2,r10,r11 eor r0,r9,r9,ror#5 add r5,r5,r3 and r2,r2,r9 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#64] and r12,r12,r3 add r8,r8,r4 add r4,r4,r0,ror#2 eor r12,r12,r6 vst1.32 {q8},[r1,:128]! ldr r0,[r2,#0] add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldr r12,[r2,#4] ldr r3,[r2,#8] ldr r1,[r2,#12] add r4,r4,r0 @ accumulate ldr r0,[r2,#16] add r5,r5,r12 ldr r12,[r2,#20] add r6,r6,r3 ldr r3,[r2,#24] add r7,r7,r1 ldr r1,[r2,#28] add r8,r8,r0 str r4,[r2],#4 add r9,r9,r12 str r5,[r2],#4 add r10,r10,r3 str r6,[r2],#4 add r11,r11,r1 str r7,[r2],#4 stmia r2,{r8,r9,r10,r11} ittte ne movne r1,sp ldrne r2,[sp,#0] eorne r12,r12,r12 ldreq sp,[sp,#76] @ restore original sp itt ne eorne r3,r5,r6 bne .L_00_48 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} .size sha256_block_data_order_neon,.-sha256_block_data_order_neon #endif .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)