x86 stringlengths 122 9.39M | arm stringlengths 122 9.33M | file stringlengths 19 200 | source stringclasses 2
values |
|---|---|---|---|
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function bhnd_nvram_bcm_filter_unsetvar
_bhnd_nvram_bcm_filter_unsetvar: ## @bhnd_nvram_bcm_filter_unsetvar
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _bhnd_nvram_bcm_filter_unsetvar
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function bhnd_nvram_bcm_filter_unsetvar
_bhnd_nvram_bcm_filter_unsetvar: ; @bhnd_nvram_bcm_filter_unsetvar
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.no_dead_strip _bhnd_nvram_bcm_filter_unsetvar
.subsections_via_symbols
| AnghaBench/freebsd/sys/dev/bhnd/nvram/extr_bhnd_nvram_data_bcm.c_bhnd_nvram_bcm_filter_unsetvar.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _uart_init ## -- Begin function uart_init
.p2align 4, 0x90
_uart_init: ## @uart_init
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq _GPFSEL1@GOTPCREL(%rip), %rbx
movl (%rbx), %edi
callq _get32
## kill: def $eax killed $eax def $rax
andl $-258049, %eax ## imm = 0xFFFC0FFF
leal 73728(%rax), %esi
movl (%rbx), %edi
callq _put32
movq _GPPUD@GOTPCREL(%rip), %rax
movl (%rax), %edi
xorl %esi, %esi
callq _put32
movl $150, %edi
callq _delay
movq _GPPUDCLK0@GOTPCREL(%rip), %rbx
movl (%rbx), %edi
movl $49152, %esi ## imm = 0xC000
callq _put32
movl $150, %edi
callq _delay
movl (%rbx), %edi
xorl %esi, %esi
callq _put32
movq _AUX_ENABLES@GOTPCREL(%rip), %rax
movl (%rax), %edi
movl $1, %esi
callq _put32
movq _AUX_MU_CNTL_REG@GOTPCREL(%rip), %rbx
movl (%rbx), %edi
xorl %esi, %esi
callq _put32
movq _AUX_MU_IER_REG@GOTPCREL(%rip), %rax
movl (%rax), %edi
xorl %esi, %esi
callq _put32
movq _AUX_MU_LCR_REG@GOTPCREL(%rip), %rax
movl (%rax), %edi
movl $3, %esi
callq _put32
movq _AUX_MU_MCR_REG@GOTPCREL(%rip), %rax
movl (%rax), %edi
xorl %esi, %esi
callq _put32
movq _AUX_MU_BAUD_REG@GOTPCREL(%rip), %rax
movl (%rax), %edi
movl $270, %esi ## imm = 0x10E
callq _put32
movl (%rbx), %edi
movl $3, %esi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _put32 ## TAILCALL
.cfi_endproc
## -- End function
.comm _GPFSEL1,4,2 ## @GPFSEL1
.comm _GPPUD,4,2 ## @GPPUD
.comm _GPPUDCLK0,4,2 ## @GPPUDCLK0
.comm _AUX_ENABLES,4,2 ## @AUX_ENABLES
.comm _AUX_MU_CNTL_REG,4,2 ## @AUX_MU_CNTL_REG
.comm _AUX_MU_IER_REG,4,2 ## @AUX_MU_IER_REG
.comm _AUX_MU_LCR_REG,4,2 ## @AUX_MU_LCR_REG
.comm _AUX_MU_MCR_REG,4,2 ## @AUX_MU_MCR_REG
.comm _AUX_MU_BAUD_REG,4,2 ## @AUX_MU_BAUD_REG
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _uart_init ; -- Begin function uart_init
.p2align 2
_uart_init: ; @uart_init
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x19, _GPFSEL1@GOTPAGE
Lloh1:
ldr x19, [x19, _GPFSEL1@GOTPAGEOFF]
ldr w0, [x19]
bl _get32
mov x1, x0
mov w8, #18
bfi w1, w8, #12, #6
ldr w0, [x19]
bl _put32
Lloh2:
adrp x8, _GPPUD@GOTPAGE
Lloh3:
ldr x8, [x8, _GPPUD@GOTPAGEOFF]
Lloh4:
ldr w0, [x8]
mov w1, #0
bl _put32
mov w0, #150
bl _delay
Lloh5:
adrp x19, _GPPUDCLK0@GOTPAGE
Lloh6:
ldr x19, [x19, _GPPUDCLK0@GOTPAGEOFF]
ldr w0, [x19]
mov w1, #49152
bl _put32
mov w0, #150
bl _delay
ldr w0, [x19]
mov w1, #0
bl _put32
Lloh7:
adrp x8, _AUX_ENABLES@GOTPAGE
Lloh8:
ldr x8, [x8, _AUX_ENABLES@GOTPAGEOFF]
Lloh9:
ldr w0, [x8]
mov w1, #1
bl _put32
Lloh10:
adrp x19, _AUX_MU_CNTL_REG@GOTPAGE
Lloh11:
ldr x19, [x19, _AUX_MU_CNTL_REG@GOTPAGEOFF]
ldr w0, [x19]
mov w1, #0
bl _put32
Lloh12:
adrp x8, _AUX_MU_IER_REG@GOTPAGE
Lloh13:
ldr x8, [x8, _AUX_MU_IER_REG@GOTPAGEOFF]
Lloh14:
ldr w0, [x8]
mov w1, #0
bl _put32
Lloh15:
adrp x8, _AUX_MU_LCR_REG@GOTPAGE
Lloh16:
ldr x8, [x8, _AUX_MU_LCR_REG@GOTPAGEOFF]
Lloh17:
ldr w0, [x8]
mov w1, #3
bl _put32
Lloh18:
adrp x8, _AUX_MU_MCR_REG@GOTPAGE
Lloh19:
ldr x8, [x8, _AUX_MU_MCR_REG@GOTPAGEOFF]
Lloh20:
ldr w0, [x8]
mov w1, #0
bl _put32
Lloh21:
adrp x8, _AUX_MU_BAUD_REG@GOTPAGE
Lloh22:
ldr x8, [x8, _AUX_MU_BAUD_REG@GOTPAGEOFF]
Lloh23:
ldr w0, [x8]
mov w1, #270
bl _put32
ldr w0, [x19]
mov w1, #3
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _put32
.loh AdrpLdrGotLdr Lloh21, Lloh22, Lloh23
.loh AdrpLdrGotLdr Lloh18, Lloh19, Lloh20
.loh AdrpLdrGotLdr Lloh15, Lloh16, Lloh17
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGot Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh7, Lloh8, Lloh9
.loh AdrpLdrGot Lloh5, Lloh6
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm _GPFSEL1,4,2 ; @GPFSEL1
.comm _GPPUD,4,2 ; @GPPUD
.comm _GPPUDCLK0,4,2 ; @GPPUDCLK0
.comm _AUX_ENABLES,4,2 ; @AUX_ENABLES
.comm _AUX_MU_CNTL_REG,4,2 ; @AUX_MU_CNTL_REG
.comm _AUX_MU_IER_REG,4,2 ; @AUX_MU_IER_REG
.comm _AUX_MU_LCR_REG,4,2 ; @AUX_MU_LCR_REG
.comm _AUX_MU_MCR_REG,4,2 ; @AUX_MU_MCR_REG
.comm _AUX_MU_BAUD_REG,4,2 ; @AUX_MU_BAUD_REG
.subsections_via_symbols
| AnghaBench/raspberry-pi-os/exercises/lesson02/1/avenito/src/extr_mini_uart.c_uart_init.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ep_pass_free
_ep_pass_free: ## @ep_pass_free
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
movl 8(%rdi), %edi
callq _bfree
movl 4(%rbx), %edi
callq _da_free
movl (%rbx), %edi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _da_free ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _ep_pass_free
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ep_pass_free
_ep_pass_free: ; @ep_pass_free
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
ldr w0, [x0, #8]
bl _bfree
ldr w0, [x19, #4]
bl _da_free
ldr w0, [x19]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _da_free
.cfi_endproc
; -- End function
.no_dead_strip _ep_pass_free
.subsections_via_symbols
| AnghaBench/obs-studio/libobs/graphics/extr_effect-parser.h_ep_pass_free.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function qlcnic_sysfs_validate_mem
_qlcnic_sysfs_validate_mem: ## @qlcnic_sysfs_validate_mem
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _QLCNIC_DIAG_ENABLED@GOTPCREL(%rip), %rax
movl (%rax), %eax
testl %eax, (%rdi)
je LBB0_3
## %bb.1:
cmpq $8, %rdx
jne LBB0_3
## %bb.2:
xorl %eax, %eax
andl $7, %esi
jne LBB0_3
## %bb.4:
popq %rbp
retq
LBB0_3:
movq _EIO@GOTPCREL(%rip), %rcx
xorl %eax, %eax
subl (%rcx), %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _QLCNIC_DIAG_ENABLED,4,2 ## @QLCNIC_DIAG_ENABLED
.comm _EIO,4,2 ## @EIO
.no_dead_strip _qlcnic_sysfs_validate_mem
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function qlcnic_sysfs_validate_mem
_qlcnic_sysfs_validate_mem: ; @qlcnic_sysfs_validate_mem
.cfi_startproc
; %bb.0:
ldr w8, [x0]
Lloh0:
adrp x9, _QLCNIC_DIAG_ENABLED@GOTPAGE
Lloh1:
ldr x9, [x9, _QLCNIC_DIAG_ENABLED@GOTPAGEOFF]
Lloh2:
ldr w9, [x9]
tst w9, w8
b.eq LBB0_4
; %bb.1:
cmp x2, #8
b.ne LBB0_4
; %bb.2:
and w8, w1, #0x7
cbnz w8, LBB0_4
; %bb.3:
mov w0, #0
ret
LBB0_4:
Lloh3:
adrp x8, _EIO@GOTPAGE
Lloh4:
ldr x8, [x8, _EIO@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
neg w0, w8
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _QLCNIC_DIAG_ENABLED,4,2 ; @QLCNIC_DIAG_ENABLED
.comm _EIO,4,2 ; @EIO
.no_dead_strip _qlcnic_sysfs_validate_mem
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/net/qlcnic/extr_qlcnic_sysfs.c_qlcnic_sysfs_validate_mem.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function vidioc_s_input
_vidioc_s_input: ## @vidioc_s_input
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _EINVAL@GOTPCREL(%rip), %rcx
xorl %eax, %eax
subl (%rcx), %eax
testl %edx, %edx
cmovel %edx, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _EINVAL,4,2 ## @EINVAL
.no_dead_strip _vidioc_s_input
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function vidioc_s_input
_vidioc_s_input: ; @vidioc_s_input
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _EINVAL@GOTPAGE
Lloh1:
ldr x8, [x8, _EINVAL@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
cmp w2, #0
csneg w0, wzr, w8, eq
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _EINVAL,4,2 ; @EINVAL
.no_dead_strip _vidioc_s_input
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/media/radio/extr_radio-sf16fmi.c_vidioc_s_input.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl ___VERIFIER_assert ## -- Begin function __VERIFIER_assert
.p2align 4, 0x90
___VERIFIER_assert: ## @__VERIFIER_assert
.cfi_startproc
## %bb.0:
testl %edi, %edi
je LBB0_2
## %bb.1:
retq
LBB0_2:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
callq ___VERIFIER_error
.cfi_endproc
## -- End function
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $1024, %rsp ## imm = 0x400
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -8(%rbp)
xorl %eax, %eax
testb %al, %al
jne LBB1_5
## %bb.1:
leaq -1000(%rbp), %rax
movl $2, %ecx
.p2align 4, 0x90
LBB1_2: ## =>This Inner Loop Header: Depth=1
cmpq $26, %rcx
je LBB1_6
## %bb.3: ## in Loop: Header=BB1_2 Depth=1
movl -516(%rbp,%rcx,4), %edx
cmpl (%rax), %edx
jne LBB1_5
## %bb.4: ## in Loop: Header=BB1_2 Depth=1
movl -512(%rbp,%rcx,4), %edx
addq $2, %rcx
leaq 40(%rax), %rsi
cmpl 20(%rax), %edx
movq %rsi, %rax
je LBB1_2
LBB1_5:
xorl %eax, %eax
callq ___VERIFIER_error
LBB1_6:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -8(%rbp), %rax
jne LBB1_8
## %bb.7:
xorl %eax, %eax
addq $1024, %rsp ## imm = 0x400
popq %rbp
retq
LBB1_8:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl ___VERIFIER_assert ; -- Begin function __VERIFIER_assert
.p2align 2
___VERIFIER_assert: ; @__VERIFIER_assert
.cfi_startproc
; %bb.0:
cbz w0, LBB0_2
; %bb.1:
ret
LBB0_2:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
bl ___VERIFIER_error
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w27, -24
.cfi_offset w28, -32
sub sp, sp, #1008
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-24]
cbnz wzr, LBB1_4
; %bb.1:
mov x8, sp
add x8, x8, #24
mov w9, #4
add x10, sp, #500
LBB1_2: ; =>This Inner Loop Header: Depth=1
cmp x9, #100
b.eq LBB1_5
; %bb.3: ; in Loop: Header=BB1_2 Depth=1
ldr w11, [x10, x9]
ldr w12, [x8], #20
add x9, x9, #4
cmp w11, w12
b.eq LBB1_2
LBB1_4:
bl ___VERIFIER_error
LBB1_5:
ldur x8, [x29, #-24]
Lloh3:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh4:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh5:
ldr x9, [x9]
cmp x9, x8
b.ne LBB1_7
; %bb.6:
mov w0, #0
add sp, sp, #1008
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #32 ; 16-byte Folded Reload
ret
LBB1_7:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/167331827.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function feed_eq_set
_feed_eq_set: ## @feed_eq_set
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %ebx
leal -128(%rbx), %eax
cmpl $5, %eax
ja LBB0_21
## %bb.1:
movl %edx, %r15d
movq 8(%rdi), %r14
leaq LJTI0_0(%rip), %rcx
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
LBB0_9:
cmpl $101, %r15d
jae LBB0_21
## %bb.10:
movl %r15d, %edi
callq _FEEDEQ_L2GAIN
cmpl $128, %ebx
jne LBB0_12
## %bb.11:
movq %rax, 24(%r14)
xorl %eax, %eax
jmp LBB0_22
LBB0_16:
movq _FEEDEQ_BYPASS@GOTPCREL(%rip), %rax
cmpl %r15d, (%rax)
je LBB0_19
## %bb.17:
movq _FEEDEQ_ENABLE@GOTPCREL(%rip), %rax
cmpl %r15d, (%rax)
je LBB0_19
## %bb.18:
movq _FEEDEQ_DISABLE@GOTPCREL(%rip), %rax
cmpl %r15d, (%rax)
jne LBB0_21
LBB0_19:
movl %r15d, 8(%r14)
jmp LBB0_20
LBB0_5:
movl %r15d, %edi
callq _feeder_eq_validrate
testl %eax, %eax
je LBB0_21
## %bb.6:
movslq %r15d, %rax
movq %rax, 32(%r14)
movl 8(%r14), %eax
movq _FEEDEQ_UNKNOWN@GOTPCREL(%rip), %rcx
cmpl (%rcx), %eax
jne LBB0_8
## %bb.7:
movq _FEEDEQ_ENABLE@GOTPCREL(%rip), %rax
movl (%rax), %eax
movl %eax, 8(%r14)
LBB0_8:
movq %r14, %rdi
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _feed_eq_setup ## TAILCALL
LBB0_13:
movq _FEEDEQ_PREAMP_MIN@GOTPCREL(%rip), %rax
cmpl %r15d, (%rax)
jg LBB0_21
## %bb.14:
movq _FEEDEQ_PREAMP_MAX@GOTPCREL(%rip), %rax
cmpl %r15d, (%rax)
jl LBB0_21
## %bb.15:
movl %r15d, %edi
callq _FEEDEQ_PREAMP2IDX
movl %eax, 12(%r14)
xorl %eax, %eax
jmp LBB0_22
LBB0_2:
movq _SND_CHN_MIN@GOTPCREL(%rip), %rax
cmpl %r15d, (%rax)
jg LBB0_21
## %bb.3:
movq _SND_CHN_MAX@GOTPCREL(%rip), %rax
cmpl %r15d, (%rax)
jl LBB0_21
## %bb.4:
movl %r15d, (%r14)
movq (%rdi), %rax
movl (%rax), %edi
callq _AFMT_BPS
imull %r15d, %eax
movl %eax, 4(%r14)
LBB0_20:
movq %r14, %rdi
callq _feed_eq_reset
xorl %eax, %eax
jmp LBB0_22
LBB0_21:
movq _EINVAL@GOTPCREL(%rip), %rax
movl (%rax), %eax
LBB0_22:
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
LBB0_12:
movq %rax, 16(%r14)
xorl %eax, %eax
jmp LBB0_22
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L0_0_set_9, LBB0_9-LJTI0_0
.set L0_0_set_16, LBB0_16-LJTI0_0
.set L0_0_set_5, LBB0_5-LJTI0_0
.set L0_0_set_13, LBB0_13-LJTI0_0
.set L0_0_set_2, LBB0_2-LJTI0_0
LJTI0_0:
.long L0_0_set_9
.long L0_0_set_16
.long L0_0_set_5
.long L0_0_set_13
.long L0_0_set_2
.long L0_0_set_9
.end_data_region
## -- End function
.comm _SND_CHN_MIN,4,2 ## @SND_CHN_MIN
.comm _SND_CHN_MAX,4,2 ## @SND_CHN_MAX
.comm _EINVAL,4,2 ## @EINVAL
.comm _FEEDEQ_UNKNOWN,4,2 ## @FEEDEQ_UNKNOWN
.comm _FEEDEQ_ENABLE,4,2 ## @FEEDEQ_ENABLE
.comm _FEEDEQ_PREAMP_MIN,4,2 ## @FEEDEQ_PREAMP_MIN
.comm _FEEDEQ_PREAMP_MAX,4,2 ## @FEEDEQ_PREAMP_MAX
.comm _FEEDEQ_BYPASS,4,2 ## @FEEDEQ_BYPASS
.comm _FEEDEQ_DISABLE,4,2 ## @FEEDEQ_DISABLE
.no_dead_strip _feed_eq_set
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function feed_eq_set
_feed_eq_set: ; @feed_eq_set
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
sub w8, w1, #128
cmp w8, #5
b.hi LBB0_14
; %bb.1:
mov x20, x2
ldr x19, [x0, #8]
Lloh0:
adrp x9, lJTI0_0@PAGE
Lloh1:
add x9, x9, lJTI0_0@PAGEOFF
adr x10, LBB0_2
ldrb w11, [x9, x8]
add x10, x10, x11, lsl #2
br x10
LBB0_2:
cmp w20, #101
b.hs LBB0_14
; %bb.3:
mov x21, x1
mov x0, x20
bl _FEEDEQ_L2GAIN
mov x8, x0
cmp w21, #128
b.ne LBB0_15
; %bb.4:
mov w0, #0
str x8, [x19, #24]
b LBB0_18
LBB0_5:
Lloh2:
adrp x8, _FEEDEQ_BYPASS@GOTPAGE
Lloh3:
ldr x8, [x8, _FEEDEQ_BYPASS@GOTPAGEOFF]
Lloh4:
ldr w8, [x8]
Lloh5:
adrp x9, _FEEDEQ_ENABLE@GOTPAGE
Lloh6:
ldr x9, [x9, _FEEDEQ_ENABLE@GOTPAGEOFF]
Lloh7:
ldr w9, [x9]
Lloh8:
adrp x10, _FEEDEQ_DISABLE@GOTPAGE
Lloh9:
ldr x10, [x10, _FEEDEQ_DISABLE@GOTPAGEOFF]
Lloh10:
ldr w10, [x10]
cmp w8, w20
ccmp w9, w20, #4, ne
ccmp w10, w20, #4, ne
b.ne LBB0_14
; %bb.6:
str w20, [x19, #8]
b LBB0_17
LBB0_7:
mov x0, x20
bl _feeder_eq_validrate
cbz w0, LBB0_14
; %bb.8:
sxtw x8, w20
str x8, [x19, #32]
ldr w8, [x19, #8]
Lloh11:
adrp x9, _FEEDEQ_UNKNOWN@GOTPAGE
Lloh12:
ldr x9, [x9, _FEEDEQ_UNKNOWN@GOTPAGEOFF]
Lloh13:
ldr w9, [x9]
cmp w8, w9
b.ne LBB0_10
; %bb.9:
Lloh14:
adrp x8, _FEEDEQ_ENABLE@GOTPAGE
Lloh15:
ldr x8, [x8, _FEEDEQ_ENABLE@GOTPAGEOFF]
Lloh16:
ldr w8, [x8]
str w8, [x19, #8]
LBB0_10:
mov x0, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _feed_eq_setup
LBB0_11:
Lloh17:
adrp x8, _FEEDEQ_PREAMP_MIN@GOTPAGE
Lloh18:
ldr x8, [x8, _FEEDEQ_PREAMP_MIN@GOTPAGEOFF]
Lloh19:
ldr w8, [x8]
Lloh20:
adrp x9, _FEEDEQ_PREAMP_MAX@GOTPAGE
Lloh21:
ldr x9, [x9, _FEEDEQ_PREAMP_MAX@GOTPAGEOFF]
Lloh22:
ldr w9, [x9]
cmp w8, w20
ccmp w9, w20, #8, le
b.lt LBB0_14
; %bb.12:
mov x0, x20
bl _FEEDEQ_PREAMP2IDX
mov x8, x0
mov w0, #0
str w8, [x19, #12]
b LBB0_18
LBB0_13:
Lloh23:
adrp x8, _SND_CHN_MIN@GOTPAGE
Lloh24:
ldr x8, [x8, _SND_CHN_MIN@GOTPAGEOFF]
Lloh25:
ldr w8, [x8]
Lloh26:
adrp x9, _SND_CHN_MAX@GOTPAGE
Lloh27:
ldr x9, [x9, _SND_CHN_MAX@GOTPAGEOFF]
Lloh28:
ldr w9, [x9]
cmp w8, w20
ccmp w9, w20, #8, le
b.ge LBB0_16
LBB0_14:
Lloh29:
adrp x8, _EINVAL@GOTPAGE
Lloh30:
ldr x8, [x8, _EINVAL@GOTPAGEOFF]
Lloh31:
ldr w0, [x8]
b LBB0_18
LBB0_15:
mov w0, #0
str x8, [x19, #16]
b LBB0_18
LBB0_16:
str w20, [x19]
ldr x8, [x0]
ldr w0, [x8]
bl _AFMT_BPS
mul w8, w0, w20
str w8, [x19, #4]
LBB0_17:
mov x0, x19
bl _feed_eq_reset
mov w0, #0
LBB0_18:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.loh AdrpLdrGotLdr Lloh14, Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh20, Lloh21, Lloh22
.loh AdrpLdrGotLdr Lloh17, Lloh18, Lloh19
.loh AdrpLdrGotLdr Lloh26, Lloh27, Lloh28
.loh AdrpLdrGotLdr Lloh23, Lloh24, Lloh25
.loh AdrpLdrGotLdr Lloh29, Lloh30, Lloh31
.cfi_endproc
.section __TEXT,__const
lJTI0_0:
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_5-LBB0_2)>>2
.byte (LBB0_7-LBB0_2)>>2
.byte (LBB0_11-LBB0_2)>>2
.byte (LBB0_13-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
; -- End function
.comm _SND_CHN_MIN,4,2 ; @SND_CHN_MIN
.comm _SND_CHN_MAX,4,2 ; @SND_CHN_MAX
.comm _EINVAL,4,2 ; @EINVAL
.comm _FEEDEQ_UNKNOWN,4,2 ; @FEEDEQ_UNKNOWN
.comm _FEEDEQ_ENABLE,4,2 ; @FEEDEQ_ENABLE
.comm _FEEDEQ_PREAMP_MIN,4,2 ; @FEEDEQ_PREAMP_MIN
.comm _FEEDEQ_PREAMP_MAX,4,2 ; @FEEDEQ_PREAMP_MAX
.comm _FEEDEQ_BYPASS,4,2 ; @FEEDEQ_BYPASS
.comm _FEEDEQ_DISABLE,4,2 ; @FEEDEQ_DISABLE
.no_dead_strip _feed_eq_set
.subsections_via_symbols
| AnghaBench/freebsd/sys/dev/sound/pcm/extr_feeder_eq.c_feed_eq_set.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function pager_wait_on_lock
_pager_wait_on_lock: ## @pager_wait_on_lock
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %r14d
movq %rdi, %r15
movl (%rdi), %eax
movl $1, %edi
cmpl %esi, %eax
jge LBB0_4
## %bb.1:
movq _NO_LOCK@GOTPCREL(%rip), %rcx
cmpl (%rcx), %eax
jne LBB0_3
## %bb.2:
movq _SHARED_LOCK@GOTPCREL(%rip), %rcx
cmpl %r14d, (%rcx)
je LBB0_4
LBB0_3:
movq _RESERVED_LOCK@GOTPCREL(%rip), %rcx
xorl (%rcx), %eax
movq _EXCLUSIVE_LOCK@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
xorl %r14d, %ecx
xorl %edi, %edi
orl %eax, %ecx
sete %dil
LBB0_4:
callq _assert
movq _SQLITE_BUSY@GOTPCREL(%rip), %r12
.p2align 4, 0x90
LBB0_5: ## =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl %r14d, %esi
callq _pagerLockDb
movl %eax, %ebx
cmpl (%r12), %eax
jne LBB0_7
## %bb.6: ## in Loop: Header=BB0_5 Depth=1
movl 4(%r15), %edi
callq *8(%r15)
testq %rax, %rax
jne LBB0_5
LBB0_7:
movl %ebx, %eax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _NO_LOCK,4,2 ## @NO_LOCK
.comm _SHARED_LOCK,4,2 ## @SHARED_LOCK
.comm _RESERVED_LOCK,4,2 ## @RESERVED_LOCK
.comm _EXCLUSIVE_LOCK,4,2 ## @EXCLUSIVE_LOCK
.comm _SQLITE_BUSY,4,2 ## @SQLITE_BUSY
.no_dead_strip _pager_wait_on_lock
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function pager_wait_on_lock
_pager_wait_on_lock: ; @pager_wait_on_lock
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x1
mov x20, x0
ldr w8, [x0]
cmp w8, w1
b.ge LBB0_3
; %bb.1:
Lloh0:
adrp x9, _NO_LOCK@GOTPAGE
Lloh1:
ldr x9, [x9, _NO_LOCK@GOTPAGEOFF]
Lloh2:
ldr w9, [x9]
Lloh3:
adrp x10, _SHARED_LOCK@GOTPAGE
Lloh4:
ldr x10, [x10, _SHARED_LOCK@GOTPAGEOFF]
Lloh5:
ldr w10, [x10]
cmp w8, w9
ccmp w10, w19, #0, eq
b.eq LBB0_3
; %bb.2:
Lloh6:
adrp x9, _RESERVED_LOCK@GOTPAGE
Lloh7:
ldr x9, [x9, _RESERVED_LOCK@GOTPAGEOFF]
Lloh8:
ldr w9, [x9]
cmp w8, w9
Lloh9:
adrp x8, _EXCLUSIVE_LOCK@GOTPAGE
Lloh10:
ldr x8, [x8, _EXCLUSIVE_LOCK@GOTPAGEOFF]
Lloh11:
ldr w8, [x8]
ccmp w8, w19, #0, eq
cset w0, eq
b LBB0_4
LBB0_3:
mov w0, #1
LBB0_4:
bl _assert
Lloh12:
adrp x22, _SQLITE_BUSY@GOTPAGE
Lloh13:
ldr x22, [x22, _SQLITE_BUSY@GOTPAGEOFF]
LBB0_5: ; =>This Inner Loop Header: Depth=1
mov x0, x20
mov x1, x19
bl _pagerLockDb
mov x21, x0
ldr w8, [x22]
cmp w0, w8
b.ne LBB0_7
; %bb.6: ; in Loop: Header=BB0_5 Depth=1
ldr x8, [x20, #8]
ldr w0, [x20, #4]
blr x8
cbnz x0, LBB0_5
LBB0_7:
mov x0, x21
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGot Lloh12, Lloh13
.cfi_endproc
; -- End function
.comm _NO_LOCK,4,2 ; @NO_LOCK
.comm _SHARED_LOCK,4,2 ; @SHARED_LOCK
.comm _RESERVED_LOCK,4,2 ; @RESERVED_LOCK
.comm _EXCLUSIVE_LOCK,4,2 ; @EXCLUSIVE_LOCK
.comm _SQLITE_BUSY,4,2 ; @SQLITE_BUSY
.no_dead_strip _pager_wait_on_lock
.subsections_via_symbols
| AnghaBench/nodemcu-firmware/app/sqlite3/extr_sqlite3.c_pager_wait_on_lock.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ibmvfc_interrupt
_ibmvfc_interrupt: ## @ibmvfc_interrupt
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rsi, %rbx
movq (%rsi), %rax
movl (%rax), %edi
callq _spin_lock_irqsave
movl 12(%rbx), %edi
callq _to_vio_dev
movl %eax, %edi
callq _vio_disable_interrupts
leaq 8(%rbx), %rdi
callq _tasklet_schedule
movq (%rbx), %rax
movl (%rax), %edi
callq _spin_unlock_irqrestore
movq _IRQ_HANDLED@GOTPCREL(%rip), %rax
movl (%rax), %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _IRQ_HANDLED,4,2 ## @IRQ_HANDLED
.no_dead_strip _ibmvfc_interrupt
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ibmvfc_interrupt
_ibmvfc_interrupt: ; @ibmvfc_interrupt
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
ldr x8, [x1]
ldr w0, [x8]
bl _spin_lock_irqsave
ldr w0, [x19, #12]
bl _to_vio_dev
bl _vio_disable_interrupts
add x0, x19, #8
bl _tasklet_schedule
ldr x8, [x19]
ldr w0, [x8]
bl _spin_unlock_irqrestore
Lloh0:
adrp x8, _IRQ_HANDLED@GOTPAGE
Lloh1:
ldr x8, [x8, _IRQ_HANDLED@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _IRQ_HANDLED,4,2 ; @IRQ_HANDLED
.no_dead_strip _ibmvfc_interrupt
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/scsi/ibmvscsi/extr_ibmvfc.c_ibmvfc_interrupt.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ct_set_invtxc ## -- Begin function ct_set_invtxc
.p2align 4, 0x90
_ct_set_invtxc: ## @ct_set_invtxc
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
cmpq $0, 8(%rdi)
jne LBB0_1
## %bb.2:
movq _BCR2_INVTXC0@GOTPCREL(%rip), %rax
jmp LBB0_3
LBB0_1:
movq _BCR2_INVTXC1@GOTPCREL(%rip), %rax
LBB0_3:
movl (%rax), %eax
testl %esi, %esi
je LBB0_5
## %bb.4:
movq (%rbx), %rcx
orl %eax, (%rcx)
jmp LBB0_6
LBB0_5:
notl %eax
movq (%rbx), %rcx
andl %eax, (%rcx)
LBB0_6:
movl 4(%rcx), %edi
callq _BCR2
movq (%rbx), %rcx
movl (%rcx), %esi
movl %eax, %edi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _outb ## TAILCALL
.cfi_endproc
## -- End function
.comm _BCR2_INVTXC1,4,2 ## @BCR2_INVTXC1
.comm _BCR2_INVTXC0,4,2 ## @BCR2_INVTXC0
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ct_set_invtxc ; -- Begin function ct_set_invtxc
.p2align 2
_ct_set_invtxc: ; @ct_set_invtxc
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
Lloh0:
adrp x8, _BCR2_INVTXC1@GOTPAGE
Lloh1:
ldr x8, [x8, _BCR2_INVTXC1@GOTPAGEOFF]
ldr x9, [x0, #8]
Lloh2:
adrp x10, _BCR2_INVTXC0@GOTPAGE
Lloh3:
ldr x10, [x10, _BCR2_INVTXC0@GOTPAGEOFF]
cmp x9, #0
csel x8, x10, x8, eq
ldr w9, [x8]
cbz w1, LBB0_2
; %bb.1:
ldr x8, [x19]
ldr w10, [x8]
orr w9, w10, w9
b LBB0_3
LBB0_2:
ldr x8, [x19]
ldr w10, [x8]
bic w9, w10, w9
LBB0_3:
str w9, [x8]
ldr w0, [x8, #4]
bl _BCR2
ldr x8, [x19]
ldr w1, [x8]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _outb
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm _BCR2_INVTXC1,4,2 ; @BCR2_INVTXC1
.comm _BCR2_INVTXC0,4,2 ; @BCR2_INVTXC0
.subsections_via_symbols
| AnghaBench/freebsd/sys/dev/ctau/extr_ctddk.c_ct_set_invtxc.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function stop_watchdog
_stop_watchdog: ## @stop_watchdog
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq _wd_hrtimer@GOTPCREL(%rip), %rdi
callq _this_cpu_ptr
movq %rax, %r14
xorl %eax, %eax
callq _smp_processor_id
movl %eax, %ebx
movq _wd_cpus_enabled@GOTPCREL(%rip), %rsi
movl %eax, %edi
callq _cpumask_test_cpu
testl %eax, %eax
je LBB0_2
## %bb.1:
movq %r14, %rdi
callq _hrtimer_cancel
leaq -24(%rbp), %r14
movq %r14, %rdi
callq _wd_smp_lock
movq _wd_cpus_enabled@GOTPCREL(%rip), %rsi
movl %ebx, %edi
callq _cpumask_clear_cpu
movq %r14, %rdi
callq _wd_smp_unlock
xorl %eax, %eax
callq _get_tb
movl %ebx, %edi
movl %eax, %esi
callq _wd_smp_clear_cpu_pending
LBB0_2:
addq $16, %rsp
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _wd_hrtimer,4,2 ## @wd_hrtimer
.comm _wd_cpus_enabled,4,2 ## @wd_cpus_enabled
.no_dead_strip _stop_watchdog
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function stop_watchdog
_stop_watchdog: ; @stop_watchdog
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x0, _wd_hrtimer@GOTPAGE
Lloh1:
ldr x0, [x0, _wd_hrtimer@GOTPAGEOFF]
bl _this_cpu_ptr
mov x20, x0
bl _smp_processor_id
mov x19, x0
Lloh2:
adrp x1, _wd_cpus_enabled@GOTPAGE
Lloh3:
ldr x1, [x1, _wd_cpus_enabled@GOTPAGEOFF]
bl _cpumask_test_cpu
cbz w0, LBB0_2
; %bb.1:
mov x0, x20
bl _hrtimer_cancel
add x0, sp, #8
bl _wd_smp_lock
Lloh4:
adrp x1, _wd_cpus_enabled@GOTPAGE
Lloh5:
ldr x1, [x1, _wd_cpus_enabled@GOTPAGEOFF]
mov x0, x19
bl _cpumask_clear_cpu
add x0, sp, #8
bl _wd_smp_unlock
bl _get_tb
mov x1, x0
mov x0, x19
bl _wd_smp_clear_cpu_pending
LBB0_2:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGot Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _wd_hrtimer,4,2 ; @wd_hrtimer
.comm _wd_cpus_enabled,4,2 ; @wd_cpus_enabled
.no_dead_strip _stop_watchdog
.subsections_via_symbols
| AnghaBench/linux/arch/powerpc/kernel/extr_watchdog.c_stop_watchdog.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _txg_wait_synced_sig ## -- Begin function txg_wait_synced_sig
.p2align 4, 0x90
_txg_wait_synced_sig: ## @txg_wait_synced_sig
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _B_TRUE@GOTPCREL(%rip), %rax
movl (%rax), %edx
popq %rbp
jmp _txg_wait_synced_impl ## TAILCALL
.cfi_endproc
## -- End function
.comm _B_TRUE,4,2 ## @B_TRUE
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _txg_wait_synced_sig ; -- Begin function txg_wait_synced_sig
.p2align 2
_txg_wait_synced_sig: ; @txg_wait_synced_sig
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _B_TRUE@GOTPAGE
Lloh1:
ldr x8, [x8, _B_TRUE@GOTPAGEOFF]
Lloh2:
ldr w2, [x8]
b _txg_wait_synced_impl
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _B_TRUE,4,2 ; @B_TRUE
.subsections_via_symbols
| AnghaBench/zfs/module/zfs/extr_txg.c_txg_wait_synced_sig.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function uniphier_watchdog_start
_uniphier_watchdog_start: ## @uniphier_watchdog_start
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
callq _watchdog_get_drvdata
movq %rax, %r14
movl (%rbx), %edi
callq _roundup_pow_of_two
movl (%r14), %edi
movl %eax, %esi
popq %rbx
popq %r14
popq %rbp
jmp ___uniphier_watchdog_start ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _uniphier_watchdog_start
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function uniphier_watchdog_start
_uniphier_watchdog_start: ; @uniphier_watchdog_start
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
bl _watchdog_get_drvdata
mov x20, x0
ldr w0, [x19]
bl _roundup_pow_of_two
mov x1, x0
ldr w0, [x20]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b ___uniphier_watchdog_start
.cfi_endproc
; -- End function
.no_dead_strip _uniphier_watchdog_start
.subsections_via_symbols
| AnghaBench/linux/drivers/watchdog/extr_uniphier_wdt.c_uniphier_watchdog_start.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _btrfs_start_transaction_fallback_global_rsv ## -- Begin function btrfs_start_transaction_fallback_global_rsv
.p2align 4, 0x90
_btrfs_start_transaction_fallback_global_rsv: ## @btrfs_start_transaction_fallback_global_rsv
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r15d
movl %esi, %r12d
movq %rdi, %rbx
movq (%rdi), %r14
movq _TRANS_START@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq _BTRFS_RESERVE_FLUSH_ALL@GOTPCREL(%rip), %rax
movl (%rax), %ecx
xorl %r8d, %r8d
callq _start_transaction
movq %rax, %r13
movq %rax, %rdi
callq _IS_ERR
testq %rax, %rax
je LBB0_5
## %bb.1:
movq %r13, %rdi
callq _PTR_ERR
movq _ENOSPC@GOTPCREL(%rip), %rcx
addl (%rcx), %eax
jne LBB0_5
## %bb.2:
movq %rbx, %rdi
xorl %esi, %esi
callq _btrfs_start_transaction
movq %rax, %r13
movq %rax, %rdi
callq _IS_ERR
testq %rax, %rax
je LBB0_3
LBB0_5:
movq %r13, %rax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_3:
movq %r14, %rdi
movl %r12d, %esi
callq _btrfs_calc_insert_metadata_size
movl %eax, %r12d
movq %r14, %rdi
movq %r14, %rsi
movl %eax, %edx
movl %r15d, %ecx
callq _btrfs_cond_migrate_bytes
testl %eax, %eax
je LBB0_4
## %bb.6:
movl %eax, %ebx
movq %r13, %rdi
callq _btrfs_end_transaction
movl %ebx, %edi
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _ERR_PTR ## TAILCALL
LBB0_4:
movq %r14, 8(%r13)
movl %r12d, 4(%r13)
movl (%r13), %edx
leaq L_.str(%rip), %rsi
movq %r14, %rdi
movl %r12d, %ecx
movl $1, %r8d
callq _trace_btrfs_space_reservation
jmp LBB0_5
.cfi_endproc
## -- End function
.comm _TRANS_START,4,2 ## @TRANS_START
.comm _BTRFS_RESERVE_FLUSH_ALL,4,2 ## @BTRFS_RESERVE_FLUSH_ALL
.comm _ENOSPC,4,2 ## @ENOSPC
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "transaction"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _btrfs_start_transaction_fallback_global_rsv ; -- Begin function btrfs_start_transaction_fallback_global_rsv
.p2align 2
_btrfs_start_transaction_fallback_global_rsv: ; @btrfs_start_transaction_fallback_global_rsv
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x21, x2
mov x22, x1
mov x23, x0
ldr x19, [x0]
Lloh0:
adrp x8, _TRANS_START@GOTPAGE
Lloh1:
ldr x8, [x8, _TRANS_START@GOTPAGEOFF]
Lloh2:
ldr w2, [x8]
Lloh3:
adrp x8, _BTRFS_RESERVE_FLUSH_ALL@GOTPAGE
Lloh4:
ldr x8, [x8, _BTRFS_RESERVE_FLUSH_ALL@GOTPAGEOFF]
Lloh5:
ldr w3, [x8]
mov w4, #0
bl _start_transaction
mov x20, x0
bl _IS_ERR
cbz x0, LBB0_3
; %bb.1:
mov x0, x20
bl _PTR_ERR
Lloh6:
adrp x8, _ENOSPC@GOTPAGE
Lloh7:
ldr x8, [x8, _ENOSPC@GOTPAGEOFF]
Lloh8:
ldr w8, [x8]
cmn w0, w8
b.ne LBB0_3
; %bb.2:
mov x0, x23
mov w1, #0
bl _btrfs_start_transaction
mov x20, x0
bl _IS_ERR
cbz x0, LBB0_4
LBB0_3:
mov x0, x20
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
LBB0_4:
mov x0, x19
mov x1, x22
bl _btrfs_calc_insert_metadata_size
mov x22, x0
mov x0, x19
mov x1, x19
mov x2, x22
mov x3, x21
bl _btrfs_cond_migrate_bytes
cbz w0, LBB0_6
; %bb.5:
mov x21, x0
mov x0, x20
bl _btrfs_end_transaction
mov x0, x21
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
b _ERR_PTR
LBB0_6:
str x19, [x20, #8]
str w22, [x20, #4]
ldr w2, [x20]
Lloh9:
adrp x1, l_.str@PAGE
Lloh10:
add x1, x1, l_.str@PAGEOFF
mov x0, x19
mov x3, x22
mov w4, #1
bl _trace_btrfs_space_reservation
b LBB0_3
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpAdd Lloh9, Lloh10
.cfi_endproc
; -- End function
.comm _TRANS_START,4,2 ; @TRANS_START
.comm _BTRFS_RESERVE_FLUSH_ALL,4,2 ; @BTRFS_RESERVE_FLUSH_ALL
.comm _ENOSPC,4,2 ; @ENOSPC
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "transaction"
.subsections_via_symbols
| AnghaBench/linux/fs/btrfs/extr_transaction.c_btrfs_start_transaction_fallback_global_rsv.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function print_obj_settings_list
_print_obj_settings_list: ## @print_obj_settings_list
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %rdi
callq _VAL
movq %rax, %r12
leaq L_.str(%rip), %rsi
xorl %edi, %edi
callq _talloc_strdup
movq %rax, -48(%rbp)
testq %r12, %r12
je LBB0_18
## %bb.1:
cmpq $0, (%r12)
je LBB0_17
## %bb.2:
xorl %r14d, %r14d
movq %r12, %r13
movq %r12, -56(%rbp) ## 8-byte Spill
jmp LBB0_3
.p2align 4, 0x90
LBB0_16: ## in Loop: Header=BB0_3 Depth=1
incq %r14
movq %r14, %rax
shlq $5, %rax
movq -56(%rbp), %r12 ## 8-byte Reload
leaq (%r12,%rax), %r13
cmpq $0, (%r12,%rax)
je LBB0_17
LBB0_3: ## =>This Loop Header: Depth=1
## Child Loop BB0_15 Depth 2
testq %r14, %r14
je LBB0_5
## %bb.4: ## in Loop: Header=BB0_3 Depth=1
movq -48(%rbp), %rdi
leaq L_.str.1(%rip), %rsi
callq _talloc_strdup_append
movq %rax, -48(%rbp)
LBB0_5: ## in Loop: Header=BB0_3 Depth=1
movq %r14, %rbx
shlq $5, %rbx
movq 24(%r12,%rbx), %rdx
testq %rdx, %rdx
je LBB0_8
## %bb.6: ## in Loop: Header=BB0_3 Depth=1
cmpq $0, (%rdx)
je LBB0_8
## %bb.7: ## in Loop: Header=BB0_3 Depth=1
movq -48(%rbp), %rdi
leaq L_.str.2(%rip), %rsi
callq _talloc_asprintf_append
movq %rax, -48(%rbp)
LBB0_8: ## in Loop: Header=BB0_3 Depth=1
cmpl $0, 16(%r12,%rbx)
movq -48(%rbp), %rdi
jne LBB0_10
## %bb.9: ## in Loop: Header=BB0_3 Depth=1
leaq L_.str.3(%rip), %rsi
callq _talloc_strdup_append
movq %rax, %rdi
movq %rax, -48(%rbp)
LBB0_10: ## in Loop: Header=BB0_3 Depth=1
movq (%r13), %rsi
callq _talloc_strdup_append
movq %rax, -48(%rbp)
movq 8(%r12,%rbx), %rcx
testq %rcx, %rcx
leaq -48(%rbp), %r15
je LBB0_16
## %bb.11: ## in Loop: Header=BB0_3 Depth=1
cmpq $0, (%rcx)
je LBB0_16
## %bb.12: ## in Loop: Header=BB0_3 Depth=1
movq -56(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rbx), %r13
addq $8, %r13
movq %rax, %rdi
leaq L_.str.4(%rip), %rsi
callq _talloc_strdup_append
movq %rax, -48(%rbp)
movq (%r13), %rax
movq (%rax), %rsi
testq %rsi, %rsi
je LBB0_16
## %bb.13: ## in Loop: Header=BB0_3 Depth=1
movq %r15, %rdi
callq _append_param
movq -48(%rbp), %rdi
leaq L_.str.4(%rip), %rsi
callq _talloc_strdup_append
movq %rax, -48(%rbp)
movq (%r13), %rax
movq 8(%rax), %rsi
movq %r15, %rdi
callq _append_param
movq (%r13), %rax
cmpq $0, 16(%rax)
je LBB0_16
## %bb.14: ## in Loop: Header=BB0_3 Depth=1
movl $2, %ebx
movl $4, %r12d
.p2align 4, 0x90
LBB0_15: ## Parent Loop BB0_3 Depth=1
## => This Inner Loop Header: Depth=2
movq -48(%rbp), %rdi
leaq L_.str.5(%rip), %rsi
callq _talloc_strdup_append
movq %rax, -48(%rbp)
movq (%r13), %rax
movq (%rax,%rbx,8), %rsi
shlq $3, %rbx
movq %r15, %rdi
callq _append_param
movq -48(%rbp), %rdi
leaq L_.str.4(%rip), %rsi
callq _talloc_strdup_append
movq %rax, -48(%rbp)
movq (%r13), %rax
orq $8, %rbx
movq (%rax,%rbx), %rsi
movq %r15, %rdi
callq _append_param
movq (%r13), %rax
movl %r12d, %ebx
andl $-2, %ebx
addq $2, %r12
cmpq $0, (%rax,%rbx,8)
jne LBB0_15
jmp LBB0_16
LBB0_17:
movq -48(%rbp), %rax
LBB0_18:
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.space 1
L_.str.1: ## @.str.1
.asciz ","
L_.str.2: ## @.str.2
.asciz "@%s:"
L_.str.3: ## @.str.3
.asciz "!"
L_.str.4: ## @.str.4
.asciz "="
L_.str.5: ## @.str.5
.asciz ":"
.no_dead_strip _print_obj_settings_list
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function print_obj_settings_list
_print_obj_settings_list: ; @print_obj_settings_list
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x28, x27, [sp, #16] ; 16-byte Folded Spill
stp x26, x25, [sp, #32] ; 16-byte Folded Spill
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x0, x1
bl _VAL
mov x19, x0
Lloh0:
adrp x1, l_.str@PAGE
Lloh1:
add x1, x1, l_.str@PAGEOFF
mov x0, #0
bl _talloc_strdup
str x0, [sp, #8]
cbz x19, LBB0_18
; %bb.1:
ldr x8, [x19]
cbz x8, LBB0_17
; %bb.2:
mov x25, #0
Lloh2:
adrp x20, l_.str.3@PAGE
Lloh3:
add x20, x20, l_.str.3@PAGEOFF
Lloh4:
adrp x21, l_.str.4@PAGE
Lloh5:
add x21, x21, l_.str.4@PAGEOFF
Lloh6:
adrp x22, l_.str.5@PAGE
Lloh7:
add x22, x22, l_.str.5@PAGEOFF
Lloh8:
adrp x23, l_.str.2@PAGE
Lloh9:
add x23, x23, l_.str.2@PAGEOFF
mov x26, x19
Lloh10:
adrp x24, l_.str.1@PAGE
Lloh11:
add x24, x24, l_.str.1@PAGEOFF
b LBB0_4
LBB0_3: ; in Loop: Header=BB0_4 Depth=1
add x25, x25, #1
add x26, x19, x25, lsl #5
ldr x8, [x26]
cbz x8, LBB0_17
LBB0_4: ; =>This Loop Header: Depth=1
; Child Loop BB0_16 Depth 2
cbz x25, LBB0_6
; %bb.5: ; in Loop: Header=BB0_4 Depth=1
ldr x0, [sp, #8]
mov x1, x24
bl _talloc_strdup_append
str x0, [sp, #8]
LBB0_6: ; in Loop: Header=BB0_4 Depth=1
add x27, x19, x25, lsl #5
ldr x2, [x27, #24]
cbz x2, LBB0_9
; %bb.7: ; in Loop: Header=BB0_4 Depth=1
ldr x8, [x2]
cbz x8, LBB0_9
; %bb.8: ; in Loop: Header=BB0_4 Depth=1
ldr x0, [sp, #8]
mov x1, x23
bl _talloc_asprintf_append
str x0, [sp, #8]
LBB0_9: ; in Loop: Header=BB0_4 Depth=1
ldr w8, [x27, #16]
ldr x0, [sp, #8]
cbnz w8, LBB0_11
; %bb.10: ; in Loop: Header=BB0_4 Depth=1
mov x1, x20
bl _talloc_strdup_append
str x0, [sp, #8]
LBB0_11: ; in Loop: Header=BB0_4 Depth=1
ldr x1, [x26]
bl _talloc_strdup_append
str x0, [sp, #8]
add x26, x19, x25, lsl #5
ldr x8, [x26, #8]!
cbz x8, LBB0_3
; %bb.12: ; in Loop: Header=BB0_4 Depth=1
ldr x8, [x8]
cbz x8, LBB0_3
; %bb.13: ; in Loop: Header=BB0_4 Depth=1
mov x1, x21
bl _talloc_strdup_append
str x0, [sp, #8]
ldr x8, [x26]
ldr x1, [x8]
cbz x1, LBB0_3
; %bb.14: ; in Loop: Header=BB0_4 Depth=1
add x0, sp, #8
bl _append_param
ldr x0, [sp, #8]
mov x1, x21
bl _talloc_strdup_append
str x0, [sp, #8]
ldr x8, [x26]
ldr x1, [x8, #8]
add x0, sp, #8
bl _append_param
ldr x8, [x26]
ldr x8, [x8, #16]
cbz x8, LBB0_3
; %bb.15: ; in Loop: Header=BB0_4 Depth=1
mov w27, #4
mov w28, #2
LBB0_16: ; Parent Loop BB0_4 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [sp, #8]
mov x1, x22
bl _talloc_strdup_append
str x0, [sp, #8]
ldr x8, [x26]
lsl x28, x28, #3
ldr x1, [x8, x28]
add x0, sp, #8
bl _append_param
ldr x0, [sp, #8]
mov x1, x21
bl _talloc_strdup_append
str x0, [sp, #8]
ldr x8, [x26]
orr x9, x28, #0x8
ldr x1, [x8, x9]
add x0, sp, #8
bl _append_param
ldr x8, [x26]
and x28, x27, #0xfffffffe
ldr x8, [x8, x28, lsl #3]
add x27, x27, #2
cbnz x8, LBB0_16
b LBB0_3
LBB0_17:
ldr x0, [sp, #8]
LBB0_18:
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
ldp x26, x25, [sp, #32] ; 16-byte Folded Reload
ldp x28, x27, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh10, Lloh11
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpAdd Lloh6, Lloh7
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh2, Lloh3
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.space 1
l_.str.1: ; @.str.1
.asciz ","
l_.str.2: ; @.str.2
.asciz "@%s:"
l_.str.3: ; @.str.3
.asciz "!"
l_.str.4: ; @.str.4
.asciz "="
l_.str.5: ; @.str.5
.asciz ":"
.no_dead_strip _print_obj_settings_list
.subsections_via_symbols
| AnghaBench/mpv/options/extr_m_option.c_print_obj_settings_list.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function Progress
_Progress: ## @Progress
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
testq %rdi, %rdi
je LBB0_3
## %bb.1:
callq *(%rdi)
movq _SZ_OK@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_2
LBB0_3:
movq _SZ_OK@GOTPCREL(%rip), %rax
jmp LBB0_4
LBB0_2:
movq _SZ_ERROR_PROGRESS@GOTPCREL(%rip), %rax
LBB0_4:
movq (%rax), %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _SZ_OK,8,3 ## @SZ_OK
.comm _SZ_ERROR_PROGRESS,8,3 ## @SZ_ERROR_PROGRESS
.no_dead_strip _Progress
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function Progress
_Progress: ; @Progress
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
cbz x0, LBB0_2
; %bb.1:
ldr x8, [x0]
blr x8
Lloh0:
adrp x8, _SZ_OK@GOTPAGE
Lloh1:
ldr x8, [x8, _SZ_OK@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
cmp x0, x8
b.ne LBB0_3
LBB0_2:
Lloh3:
adrp x8, _SZ_OK@GOTPAGE
Lloh4:
ldr x8, [x8, _SZ_OK@GOTPAGEOFF]
b LBB0_4
LBB0_3:
Lloh5:
adrp x8, _SZ_ERROR_PROGRESS@GOTPAGE
Lloh6:
ldr x8, [x8, _SZ_ERROR_PROGRESS@GOTPAGEOFF]
LBB0_4:
ldr x0, [x8]
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGot Lloh3, Lloh4
.loh AdrpLdrGot Lloh5, Lloh6
.cfi_endproc
; -- End function
.comm _SZ_OK,8,3 ; @SZ_OK
.comm _SZ_ERROR_PROGRESS,8,3 ; @SZ_ERROR_PROGRESS
.no_dead_strip _Progress
.subsections_via_symbols
| AnghaBench/sumatrapdf/ext/lzma/C/extr_Lzma2Enc.c_Progress.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _MuOfficeDoc_providePassword ## -- Begin function MuOfficeDoc_providePassword
.p2align 4, 0x90
_MuOfficeDoc_providePassword: ## @MuOfficeDoc_providePassword
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
cmpl $0, 4(%rdi)
je LBB0_2
## %bb.1:
movq _MuError_PasswordPending@GOTPCREL(%rip), %rax
jmp LBB0_3
LBB0_2:
movq %rdi, %rbx
testq %rsi, %rsi
leaq L_.str(%rip), %r14
cmovneq %rsi, %r14
movq %r14, %rdi
callq _strlen
leaq 1(%rax), %rdi
callq _Pal_Mem_malloc
movl %eax, 4(%rbx)
movl %eax, %edi
movq %r14, %rsi
callq _strcpy
movq %rbx, %rdi
callq _mu_trigger_semaphore
movq _MuError_OK@GOTPCREL(%rip), %rax
LBB0_3:
movl (%rax), %eax
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _MuError_PasswordPending,4,2 ## @MuError_PasswordPending
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.space 1
.comm _MuError_OK,4,2 ## @MuError_OK
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _MuOfficeDoc_providePassword ; -- Begin function MuOfficeDoc_providePassword
.p2align 2
_MuOfficeDoc_providePassword: ; @MuOfficeDoc_providePassword
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
ldr w8, [x0, #4]
cbz w8, LBB0_2
; %bb.1:
Lloh0:
adrp x8, _MuError_PasswordPending@GOTPAGE
Lloh1:
ldr x8, [x8, _MuError_PasswordPending@GOTPAGEOFF]
b LBB0_3
LBB0_2:
mov x19, x0
Lloh2:
adrp x8, l_.str@PAGE
Lloh3:
add x8, x8, l_.str@PAGEOFF
cmp x1, #0
csel x20, x8, x1, eq
mov x0, x20
bl _strlen
add x0, x0, #1
bl _Pal_Mem_malloc
str w0, [x19, #4]
mov x1, x20
bl _strcpy
mov x0, x19
bl _mu_trigger_semaphore
Lloh4:
adrp x8, _MuError_OK@GOTPAGE
Lloh5:
ldr x8, [x8, _MuError_OK@GOTPAGEOFF]
LBB0_3:
ldr w0, [x8]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGot Lloh4, Lloh5
.loh AdrpAdd Lloh2, Lloh3
.cfi_endproc
; -- End function
.comm _MuError_PasswordPending,4,2 ; @MuError_PasswordPending
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.space 1
.comm _MuError_OK,4,2 ; @MuError_OK
.subsections_via_symbols
| AnghaBench/sumatrapdf/mupdf/source/helpers/mu-office-lib/extr_mu-office-lib.c_MuOfficeDoc_providePassword.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ds1603_disable ## -- Begin function ds1603_disable
.p2align 4, 0x90
_ds1603_disable: ## @ds1603_disable
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _TRIMMER_DISABLE_RTC@GOTPCREL(%rip), %rax
movl (%rax), %edi
popq %rbp
jmp _ds1603_set_trimmer ## TAILCALL
.cfi_endproc
## -- End function
.comm _TRIMMER_DISABLE_RTC,4,2 ## @TRIMMER_DISABLE_RTC
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ds1603_disable ; -- Begin function ds1603_disable
.p2align 2
_ds1603_disable: ; @ds1603_disable
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _TRIMMER_DISABLE_RTC@GOTPAGE
Lloh1:
ldr x8, [x8, _TRIMMER_DISABLE_RTC@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
b _ds1603_set_trimmer
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _TRIMMER_DISABLE_RTC,4,2 ; @TRIMMER_DISABLE_RTC
.subsections_via_symbols
| AnghaBench/linux/arch/mips/lasat/extr_ds1603.c_ds1603_disable.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function io_uring_poll
_io_uring_poll: ## @io_uring_poll
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %rdx
movq (%rdi), %r15
leaq 24(%r15), %rsi
callq _poll_wait
xorl %r14d, %r14d
xorl %eax, %eax
callq _smp_rmb
movq 16(%r15), %rax
movl 12(%rax), %edi
callq _READ_ONCE
subq (%r15), %rax
movq 16(%r15), %rcx
movq _EPOLLOUT@GOTPCREL(%rip), %rdx
movq _EPOLLWRNORM@GOTPCREL(%rip), %rsi
movl (%rsi), %ebx
orl (%rdx), %ebx
cmpq (%rcx), %rax
cmovel %r14d, %ebx
movl 8(%rcx), %edi
callq _READ_ONCE
movq _EPOLLIN@GOTPCREL(%rip), %rdx
movq _EPOLLRDNORM@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
orl (%rdx), %ecx
cmpq 8(%r15), %rax
cmovel %r14d, %ecx
orl %ebx, %ecx
movl %ecx, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _EPOLLOUT,4,2 ## @EPOLLOUT
.comm _EPOLLWRNORM,4,2 ## @EPOLLWRNORM
.comm _EPOLLIN,4,2 ## @EPOLLIN
.comm _EPOLLRDNORM,4,2 ## @EPOLLRDNORM
.no_dead_strip _io_uring_poll
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function io_uring_poll
_io_uring_poll: ; @io_uring_poll
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x2, x1
ldr x19, [x0]
add x1, x19, #24
bl _poll_wait
bl _smp_rmb
ldr x8, [x19, #16]
ldr w0, [x8, #12]
bl _READ_ONCE
ldr x8, [x19]
ldr x9, [x19, #16]
ldr x10, [x9]
Lloh0:
adrp x11, _EPOLLOUT@GOTPAGE
Lloh1:
ldr x11, [x11, _EPOLLOUT@GOTPAGEOFF]
Lloh2:
ldr w11, [x11]
Lloh3:
adrp x12, _EPOLLWRNORM@GOTPAGE
Lloh4:
ldr x12, [x12, _EPOLLWRNORM@GOTPAGEOFF]
Lloh5:
ldr w12, [x12]
orr w11, w12, w11
sub x8, x0, x8
cmp x8, x10
csel w20, wzr, w11, eq
ldr w0, [x9, #8]
bl _READ_ONCE
ldr x8, [x19, #8]
Lloh6:
adrp x9, _EPOLLIN@GOTPAGE
Lloh7:
ldr x9, [x9, _EPOLLIN@GOTPAGEOFF]
Lloh8:
ldr w9, [x9]
Lloh9:
adrp x10, _EPOLLRDNORM@GOTPAGE
Lloh10:
ldr x10, [x10, _EPOLLRDNORM@GOTPAGEOFF]
Lloh11:
ldr w10, [x10]
orr w9, w10, w9
cmp x0, x8
csel w8, wzr, w9, eq
orr w0, w8, w20
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _EPOLLOUT,4,2 ; @EPOLLOUT
.comm _EPOLLWRNORM,4,2 ; @EPOLLWRNORM
.comm _EPOLLIN,4,2 ; @EPOLLIN
.comm _EPOLLRDNORM,4,2 ; @EPOLLRDNORM
.no_dead_strip _io_uring_poll
.subsections_via_symbols
| AnghaBench/linux/fs/extr_io_uring.c_io_uring_poll.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _bayesianNetPrintSquereMatrix ## -- Begin function bayesianNetPrintSquereMatrix
.p2align 4, 0x90
_bayesianNetPrintSquereMatrix: ## @bayesianNetPrintSquereMatrix
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
cvtsi2sd %esi, %xmm0
movq %rdi, %r12
sqrtsd %xmm0, %xmm0
cvttsd2si %xmm0, %r13d
leaq L_.str(%rip), %rdi
callq _puts
leaq L_.str.1(%rip), %rdi
xorl %eax, %eax
callq _printf
testl %r13d, %r13d
jle LBB0_9
## %bb.1:
movl %r13d, %ebx
negl %ebx
movl $65, %r15d
leaq L_.str.2(%rip), %r14
.p2align 4, 0x90
LBB0_2: ## =>This Inner Loop Header: Depth=1
movq %r14, %rdi
movl %r15d, %esi
xorl %eax, %eax
callq _printf
leal (%rbx,%r15), %eax
incl %eax
movl %r15d, %ecx
incl %ecx
movl %ecx, %r15d
cmpl $65, %eax
jne LBB0_2
## %bb.3:
movl $10, %edi
callq _putchar
testl %r13d, %r13d
jle LBB0_8
## %bb.4:
movl %r13d, %ebx
leaq (,%rbx,4), %rax
movq %rax, -48(%rbp) ## 8-byte Spill
leaq L_.str.4(%rip), %r15
xorl %r14d, %r14d
.p2align 4, 0x90
LBB0_5: ## =>This Loop Header: Depth=1
## Child Loop BB0_6 Depth 2
leal 65(%r14), %esi
leaq L_.str.2(%rip), %rdi
xorl %eax, %eax
callq _printf
xorl %r13d, %r13d
.p2align 4, 0x90
LBB0_6: ## Parent Loop BB0_5 Depth=1
## => This Inner Loop Header: Depth=2
movl (%r12,%r13,4), %esi
movq %r15, %rdi
xorl %eax, %eax
callq _printf
incq %r13
cmpq %r13, %rbx
jne LBB0_6
## %bb.7: ## in Loop: Header=BB0_5 Depth=1
movl $10, %edi
callq _putchar
incq %r14
addq -48(%rbp), %r12 ## 8-byte Folded Reload
cmpq %rbx, %r14
jne LBB0_5
jmp LBB0_8
LBB0_9:
movl $10, %edi
callq _putchar
LBB0_8:
leaq L_.str(%rip), %rdi
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _puts ## TAILCALL
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "--------------"
L_.str.1: ## @.str.1
.asciz " "
L_.str.2: ## @.str.2
.asciz "%c "
L_.str.4: ## @.str.4
.asciz "%d "
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _bayesianNetPrintSquereMatrix ; -- Begin function bayesianNetPrintSquereMatrix
.p2align 2
_bayesianNetPrintSquereMatrix: ; @bayesianNetPrintSquereMatrix
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x19, x0
scvtf d0, w1
fsqrt d0, d0
fcvtzs w22, d0
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _puts
Lloh2:
adrp x0, l_.str.1@PAGE
Lloh3:
add x0, x0, l_.str.1@PAGEOFF
bl _printf
cmp w22, #1
b.lt LBB0_8
; %bb.1:
mov w21, #0
Lloh4:
adrp x20, l_.str.2@PAGE
Lloh5:
add x20, x20, l_.str.2@PAGEOFF
LBB0_2: ; =>This Inner Loop Header: Depth=1
add w8, w21, #65
str x8, [sp]
mov x0, x20
bl _printf
add w21, w21, #1
cmp w22, w21
b.ne LBB0_2
; %bb.3:
mov w0, #10
bl _putchar
cmp w22, #1
b.lt LBB0_9
; %bb.4:
mov x23, #0
lsl x24, x22, #2
Lloh6:
adrp x20, l_.str.2@PAGE
Lloh7:
add x20, x20, l_.str.2@PAGEOFF
Lloh8:
adrp x21, l_.str.4@PAGE
Lloh9:
add x21, x21, l_.str.4@PAGEOFF
LBB0_5: ; =>This Loop Header: Depth=1
; Child Loop BB0_6 Depth 2
add w8, w23, #65
str x8, [sp]
mov x0, x20
bl _printf
mov x25, x22
mov x26, x19
LBB0_6: ; Parent Loop BB0_5 Depth=1
; => This Inner Loop Header: Depth=2
ldr w8, [x26], #4
str x8, [sp]
mov x0, x21
bl _printf
subs x25, x25, #1
b.ne LBB0_6
; %bb.7: ; in Loop: Header=BB0_5 Depth=1
mov w0, #10
bl _putchar
add x23, x23, #1
add x19, x19, x24
cmp x23, x22
b.ne LBB0_5
b LBB0_9
LBB0_8:
mov w0, #10
bl _putchar
LBB0_9:
Lloh10:
adrp x0, l_.str@PAGE
Lloh11:
add x0, x0, l_.str@PAGEOFF
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #96
b _puts
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpAdd Lloh6, Lloh7
.loh AdrpAdd Lloh10, Lloh11
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "--------------"
l_.str.1: ; @.str.1
.asciz " "
l_.str.2: ; @.str.2
.asciz "%c "
l_.str.4: ; @.str.4
.asciz "%d "
.subsections_via_symbols
| the_stack_data/130872.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function gfs2_statfs_change_out
_gfs2_statfs_change_out: ## @gfs2_statfs_change_out
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %r14
movq %rdi, %rbx
movl 8(%rdi), %edi
callq _cpu_to_be64
movq %rax, 16(%r14)
movl 4(%rbx), %edi
callq _cpu_to_be64
movq %rax, 8(%r14)
movl (%rbx), %edi
callq _cpu_to_be64
movq %rax, (%r14)
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _gfs2_statfs_change_out
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function gfs2_statfs_change_out
_gfs2_statfs_change_out: ; @gfs2_statfs_change_out
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
mov x20, x0
ldr w0, [x0, #8]
bl _cpu_to_be64
str x0, [x19, #16]
ldr w0, [x20, #4]
bl _cpu_to_be64
str x0, [x19, #8]
ldr w0, [x20]
bl _cpu_to_be64
str x0, [x19]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _gfs2_statfs_change_out
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/fs/gfs2/extr_super.c_gfs2_statfs_change_out.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _CreateAccessMethod ## -- Begin function CreateAccessMethod
.p2align 4, 0x90
_CreateAccessMethod: ## @CreateAccessMethod
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $88, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r13
movq %rdi, -96(%rbp) ## 8-byte Spill
movq %rsp, -88(%rbp) ## 8-byte Spill
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movq _Natts_pg_am@GOTPCREL(%rip), %rbx
movl (%rbx), %eax
movq %rax, -56(%rbp) ## 8-byte Spill
leaq (,%rax,4), %rax
callq ____chkstk_darwin
addq $15, %rax
andq $-16, %rax
subq %rax, %rsp
movq %rsp, -72(%rbp) ## 8-byte Spill
movl (%rbx), %r12d
leaq (,%r12,4), %rax
callq ____chkstk_darwin
addq $15, %rax
andq $-16, %rax
subq %rax, %rsp
movq %rsp, -80(%rbp) ## 8-byte Spill
movq _AccessMethodRelationId@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq _RowExclusiveLock@GOTPCREL(%rip), %rax
movl (%rax), %esi
callq _table_open
movl %eax, -60(%rbp) ## 4-byte Spill
xorl %eax, %eax
callq _superuser
testl %eax, %eax
jne LBB0_2
## %bb.1:
movq _ERROR@GOTPCREL(%rip), %rax
movl (%rax), %ebx
movq _ERRCODE_INSUFFICIENT_PRIVILEGE@GOTPCREL(%rip), %rax
movl (%rax), %edi
callq _errcode
movl 4(%r13), %esi
leaq L_.str(%rip), %rdi
callq _errmsg
leaq L_.str.1(%rip), %rdi
callq _errhint
movl %ebx, %edi
movl %eax, %esi
callq _ereport
LBB0_2:
movq _AMNAME@GOTPCREL(%rip), %rax
movl (%rax), %ebx
movq _Anum_pg_am_oid@GOTPCREL(%rip), %rax
movl (%rax), %r15d
movl 4(%r13), %edi
callq _CStringGetDatum
movl %ebx, %edi
movl %r15d, %esi
movl %eax, %edx
callq _GetSysCacheOid1
movq %rax, %rdi
callq _OidIsValid
testq %rax, %rax
je LBB0_4
## %bb.3:
movq _ERROR@GOTPCREL(%rip), %rax
movl (%rax), %ebx
movq _ERRCODE_DUPLICATE_OBJECT@GOTPCREL(%rip), %rax
movl (%rax), %edi
callq _errcode
movl 4(%r13), %esi
leaq L_.str.2(%rip), %rdi
callq _errmsg
movl %ebx, %edi
movl %eax, %esi
callq _ereport
LBB0_4:
movl (%r13), %esi
movl 8(%r13), %edi
callq _lookup_am_handler_func
movq %rax, %r14
shll $2, %r12d
movq -80(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rdi
xorl %esi, %esi
movl %r12d, %edx
callq _memset
movq -56(%rbp), %rdx ## 8-byte Reload
shll $2, %edx
movq %rbx, %r12
movq -72(%rbp), %rdi ## 8-byte Reload
xorl %esi, %esi
## kill: def $edx killed $edx killed $rdx
callq _memset
movq _AmOidIndexId@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _Anum_pg_am_oid@GOTPCREL(%rip), %rbx
movl (%rbx), %edx
movl -60(%rbp), %r15d ## 4-byte Reload
movl %r15d, %edi
callq _GetNewOidWithIndex
movq %rax, -56(%rbp) ## 8-byte Spill
movq %rax, %rdi
callq _ObjectIdGetDatum
movslq (%rbx), %rcx
movl %eax, -4(%r12,%rcx,4)
movq _namein@GOTPCREL(%rip), %rax
movl (%rax), %ebx
movl 4(%r13), %edi
callq _CStringGetDatum
movl %ebx, %edi
movl %eax, %esi
callq _DirectFunctionCall1
movq _Anum_pg_am_amname@GOTPCREL(%rip), %rcx
movslq (%rcx), %rcx
movl %eax, -4(%r12,%rcx,4)
movq %r14, %rdi
callq _ObjectIdGetDatum
movq _Anum_pg_am_amhandler@GOTPCREL(%rip), %rcx
movslq (%rcx), %rcx
movl %eax, -4(%r12,%rcx,4)
movl (%r13), %edi
callq _CharGetDatum
movq _Anum_pg_am_amtype@GOTPCREL(%rip), %rcx
movslq (%rcx), %rcx
movl %eax, -4(%r12,%rcx,4)
movl %r15d, %edi
callq _RelationGetDescr
movl %eax, %edi
movq %r12, %rsi
movq -72(%rbp), %rdx ## 8-byte Reload
callq _heap_form_tuple
movl %eax, %ebx
movl %r15d, %edi
movl %eax, %esi
callq _CatalogTupleInsert
movl %ebx, %edi
callq _heap_freetuple
movq _AccessMethodRelationId@GOTPCREL(%rip), %rax
movl (%rax), %eax
movq -96(%rbp), %rbx ## 8-byte Reload
movl %eax, 16(%rbx)
movq -56(%rbp), %rax ## 8-byte Reload
movq %rax, 8(%rbx)
movq $0, (%rbx)
movq _ProcedureRelationId@GOTPCREL(%rip), %rax
movl (%rax), %eax
movl %eax, -104(%rbp)
movq %r14, -112(%rbp)
movq $0, -120(%rbp)
movq _DEPENDENCY_NORMAL@GOTPCREL(%rip), %rax
movl (%rax), %edx
leaq -120(%rbp), %rsi
movq %rbx, %rdi
callq _recordDependencyOn
movq %rbx, %rdi
xorl %esi, %esi
callq _recordDependencyOnCurrentExtension
movq _RowExclusiveLock@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl %r15d, %edi
callq _table_close
movq -88(%rbp), %rsp ## 8-byte Reload
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB0_6
## %bb.5:
movq %rbx, %rax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_6:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.comm _Natts_pg_am,4,2 ## @Natts_pg_am
.comm _AccessMethodRelationId,4,2 ## @AccessMethodRelationId
.comm _RowExclusiveLock,4,2 ## @RowExclusiveLock
.comm _ERROR,4,2 ## @ERROR
.comm _ERRCODE_INSUFFICIENT_PRIVILEGE,4,2 ## @ERRCODE_INSUFFICIENT_PRIVILEGE
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "permission denied to create access method \"%s\""
L_.str.1: ## @.str.1
.asciz "Must be superuser to create an access method."
.comm _AMNAME,4,2 ## @AMNAME
.comm _Anum_pg_am_oid,4,2 ## @Anum_pg_am_oid
.comm _ERRCODE_DUPLICATE_OBJECT,4,2 ## @ERRCODE_DUPLICATE_OBJECT
L_.str.2: ## @.str.2
.asciz "access method \"%s\" already exists"
.comm _AmOidIndexId,4,2 ## @AmOidIndexId
.comm _namein,4,2 ## @namein
.comm _Anum_pg_am_amname,4,2 ## @Anum_pg_am_amname
.comm _Anum_pg_am_amhandler,4,2 ## @Anum_pg_am_amhandler
.comm _Anum_pg_am_amtype,4,2 ## @Anum_pg_am_amtype
.comm _ProcedureRelationId,4,2 ## @ProcedureRelationId
.comm _DEPENDENCY_NORMAL,4,2 ## @DEPENDENCY_NORMAL
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _CreateAccessMethod ; -- Begin function CreateAccessMethod
.p2align 2
_CreateAccessMethod: ; @CreateAccessMethod
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #48
mov x22, x0
mov x19, x8
mov x8, sp
stur x8, [x29, #-120] ; 8-byte Folded Spill
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-88]
Lloh3:
adrp x12, _Natts_pg_am@GOTPAGE
Lloh4:
ldr x12, [x12, _Natts_pg_am@GOTPAGEOFF]
ldr w26, [x12]
lsl x8, x26, #2
mov x9, x8
Lloh5:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh6:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
mov x9, sp
add x8, x8, #15
and x8, x8, #0x7fffffff0
sub x21, x9, x8
mov sp, x21
ldr w28, [x12]
lsl x8, x28, #2
mov x9, x8
Lloh7:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh8:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
mov x9, sp
add x8, x8, #15
and x8, x8, #0x7fffffff0
sub x23, x9, x8
mov sp, x23
Lloh9:
adrp x8, _AccessMethodRelationId@GOTPAGE
Lloh10:
ldr x8, [x8, _AccessMethodRelationId@GOTPAGEOFF]
Lloh11:
ldr w0, [x8]
Lloh12:
adrp x8, _RowExclusiveLock@GOTPAGE
Lloh13:
ldr x8, [x8, _RowExclusiveLock@GOTPAGEOFF]
Lloh14:
ldr w1, [x8]
bl _table_open
mov x20, x0
bl _superuser
Lloh15:
adrp x8, _ERROR@GOTPAGE
Lloh16:
ldr x8, [x8, _ERROR@GOTPAGEOFF]
cbnz w0, LBB0_2
; %bb.1:
ldr w24, [x8]
Lloh17:
adrp x8, _ERRCODE_INSUFFICIENT_PRIVILEGE@GOTPAGE
Lloh18:
ldr x8, [x8, _ERRCODE_INSUFFICIENT_PRIVILEGE@GOTPAGEOFF]
Lloh19:
ldr w0, [x8]
bl _errcode
ldr w1, [x22, #4]
Lloh20:
adrp x0, l_.str@PAGE
Lloh21:
add x0, x0, l_.str@PAGEOFF
bl _errmsg
Lloh22:
adrp x0, l_.str.1@PAGE
Lloh23:
add x0, x0, l_.str.1@PAGEOFF
bl _errhint
mov x1, x0
mov x0, x24
bl _ereport
LBB0_2:
Lloh24:
adrp x8, _AMNAME@GOTPAGE
Lloh25:
ldr x8, [x8, _AMNAME@GOTPAGEOFF]
Lloh26:
ldr w24, [x8]
Lloh27:
adrp x27, _Anum_pg_am_oid@GOTPAGE
Lloh28:
ldr x27, [x27, _Anum_pg_am_oid@GOTPAGEOFF]
ldr w25, [x27]
ldr w0, [x22, #4]
bl _CStringGetDatum
mov x2, x0
mov x0, x24
mov x1, x25
bl _GetSysCacheOid1
bl _OidIsValid
cbz x0, LBB0_4
; %bb.3:
Lloh29:
adrp x8, _ERROR@GOTPAGE
Lloh30:
ldr x8, [x8, _ERROR@GOTPAGEOFF]
Lloh31:
ldr w24, [x8]
Lloh32:
adrp x8, _ERRCODE_DUPLICATE_OBJECT@GOTPAGE
Lloh33:
ldr x8, [x8, _ERRCODE_DUPLICATE_OBJECT@GOTPAGEOFF]
Lloh34:
ldr w0, [x8]
bl _errcode
ldr w1, [x22, #4]
Lloh35:
adrp x0, l_.str.2@PAGE
Lloh36:
add x0, x0, l_.str.2@PAGEOFF
bl _errmsg
mov x1, x0
mov x0, x24
bl _ereport
LBB0_4:
ldr w0, [x22, #8]
ldr w1, [x22]
bl _lookup_am_handler_func
mov x24, x0
lsl w2, w28, #2
mov x0, x23
mov w1, #0
bl _memset
lsl w2, w26, #2
mov x0, x21
mov w1, #0
bl _memset
Lloh37:
adrp x8, _AmOidIndexId@GOTPAGE
Lloh38:
ldr x8, [x8, _AmOidIndexId@GOTPAGEOFF]
Lloh39:
ldr w1, [x8]
ldr w2, [x27]
mov x0, x20
bl _GetNewOidWithIndex
mov x25, x0
bl _ObjectIdGetDatum
ldrsw x8, [x27]
add x8, x23, x8, lsl #2
stur w0, [x8, #-4]
Lloh40:
adrp x8, _namein@GOTPAGE
Lloh41:
ldr x8, [x8, _namein@GOTPAGEOFF]
Lloh42:
ldr w26, [x8]
ldr w0, [x22, #4]
bl _CStringGetDatum
mov x1, x0
mov x0, x26
bl _DirectFunctionCall1
Lloh43:
adrp x8, _Anum_pg_am_amname@GOTPAGE
Lloh44:
ldr x8, [x8, _Anum_pg_am_amname@GOTPAGEOFF]
Lloh45:
ldrsw x8, [x8]
add x8, x23, x8, lsl #2
stur w0, [x8, #-4]
mov x0, x24
bl _ObjectIdGetDatum
Lloh46:
adrp x8, _Anum_pg_am_amhandler@GOTPAGE
Lloh47:
ldr x8, [x8, _Anum_pg_am_amhandler@GOTPAGEOFF]
Lloh48:
ldrsw x8, [x8]
add x8, x23, x8, lsl #2
stur w0, [x8, #-4]
ldr w0, [x22]
bl _CharGetDatum
Lloh49:
adrp x8, _Anum_pg_am_amtype@GOTPAGE
Lloh50:
ldr x8, [x8, _Anum_pg_am_amtype@GOTPAGEOFF]
Lloh51:
ldrsw x8, [x8]
add x8, x23, x8, lsl #2
stur w0, [x8, #-4]
mov x0, x20
bl _RelationGetDescr
mov x1, x23
mov x2, x21
bl _heap_form_tuple
mov x21, x0
mov x0, x20
mov x1, x21
bl _CatalogTupleInsert
mov x0, x21
bl _heap_freetuple
Lloh52:
adrp x8, _AccessMethodRelationId@GOTPAGE
Lloh53:
ldr x8, [x8, _AccessMethodRelationId@GOTPAGEOFF]
Lloh54:
ldr w8, [x8]
str w8, [x19, #16]
stp xzr, x25, [x19]
Lloh55:
adrp x8, _ProcedureRelationId@GOTPAGE
Lloh56:
ldr x8, [x8, _ProcedureRelationId@GOTPAGEOFF]
Lloh57:
ldr w8, [x8]
stur w8, [x29, #-96]
stp xzr, x24, [x29, #-112]
Lloh58:
adrp x8, _DEPENDENCY_NORMAL@GOTPAGE
Lloh59:
ldr x8, [x8, _DEPENDENCY_NORMAL@GOTPAGEOFF]
Lloh60:
ldr w2, [x8]
sub x1, x29, #112
mov x0, x19
bl _recordDependencyOn
mov x0, x19
mov w1, #0
bl _recordDependencyOnCurrentExtension
Lloh61:
adrp x8, _RowExclusiveLock@GOTPAGE
Lloh62:
ldr x8, [x8, _RowExclusiveLock@GOTPAGEOFF]
Lloh63:
ldr w1, [x8]
mov x0, x20
bl _table_close
ldur x8, [x29, #-120] ; 8-byte Folded Reload
mov sp, x8
ldur x8, [x29, #-88]
Lloh64:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh65:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh66:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_6
; %bb.5:
sub sp, x29, #80
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB0_6:
bl ___stack_chk_fail
.loh AdrpLdrGot Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGot Lloh7, Lloh8
.loh AdrpLdrGot Lloh5, Lloh6
.loh AdrpLdrGot Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh22, Lloh23
.loh AdrpAdd Lloh20, Lloh21
.loh AdrpLdrGotLdr Lloh17, Lloh18, Lloh19
.loh AdrpLdrGot Lloh27, Lloh28
.loh AdrpLdrGotLdr Lloh24, Lloh25, Lloh26
.loh AdrpAdd Lloh35, Lloh36
.loh AdrpLdrGotLdr Lloh32, Lloh33, Lloh34
.loh AdrpLdrGotLdr Lloh29, Lloh30, Lloh31
.loh AdrpLdrGotLdr Lloh64, Lloh65, Lloh66
.loh AdrpLdrGotLdr Lloh61, Lloh62, Lloh63
.loh AdrpLdrGotLdr Lloh58, Lloh59, Lloh60
.loh AdrpLdrGotLdr Lloh55, Lloh56, Lloh57
.loh AdrpLdrGotLdr Lloh52, Lloh53, Lloh54
.loh AdrpLdrGotLdr Lloh49, Lloh50, Lloh51
.loh AdrpLdrGotLdr Lloh46, Lloh47, Lloh48
.loh AdrpLdrGotLdr Lloh43, Lloh44, Lloh45
.loh AdrpLdrGotLdr Lloh40, Lloh41, Lloh42
.loh AdrpLdrGotLdr Lloh37, Lloh38, Lloh39
.cfi_endproc
; -- End function
.comm _Natts_pg_am,4,2 ; @Natts_pg_am
.comm _AccessMethodRelationId,4,2 ; @AccessMethodRelationId
.comm _RowExclusiveLock,4,2 ; @RowExclusiveLock
.comm _ERROR,4,2 ; @ERROR
.comm _ERRCODE_INSUFFICIENT_PRIVILEGE,4,2 ; @ERRCODE_INSUFFICIENT_PRIVILEGE
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "permission denied to create access method \"%s\""
l_.str.1: ; @.str.1
.asciz "Must be superuser to create an access method."
.comm _AMNAME,4,2 ; @AMNAME
.comm _Anum_pg_am_oid,4,2 ; @Anum_pg_am_oid
.comm _ERRCODE_DUPLICATE_OBJECT,4,2 ; @ERRCODE_DUPLICATE_OBJECT
l_.str.2: ; @.str.2
.asciz "access method \"%s\" already exists"
.comm _AmOidIndexId,4,2 ; @AmOidIndexId
.comm _namein,4,2 ; @namein
.comm _Anum_pg_am_amname,4,2 ; @Anum_pg_am_amname
.comm _Anum_pg_am_amhandler,4,2 ; @Anum_pg_am_amhandler
.comm _Anum_pg_am_amtype,4,2 ; @Anum_pg_am_amtype
.comm _ProcedureRelationId,4,2 ; @ProcedureRelationId
.comm _DEPENDENCY_NORMAL,4,2 ; @DEPENDENCY_NORMAL
.subsections_via_symbols
| AnghaBench/postgres/src/backend/commands/extr_amcmds.c_CreateAccessMethod.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _svn_fs_fs__read_format_file ## -- Begin function svn_fs_fs__read_format_file
.p2align 4, 0x90
_svn_fs_fs__read_format_file: ## @svn_fs_fs__read_format_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %r14
movq (%rdi), %rbx
callq _path_format
leaq -28(%rbp), %rdi
leaq -24(%rbp), %rsi
leaq -20(%rbp), %rdx
movl %eax, %ecx
movq %r14, %r8
callq _read_format
movl %eax, %edi
callq _SVN_ERR
movl -28(%rbp), %eax
movl %eax, (%rbx)
movl -24(%rbp), %eax
movl %eax, 4(%rbx)
movl -20(%rbp), %eax
movl %eax, 8(%rbx)
movq _SVN_NO_ERROR@GOTPCREL(%rip), %rax
movq (%rax), %rax
addq $16, %rsp
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _SVN_NO_ERROR,8,3 ## @SVN_NO_ERROR
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _svn_fs_fs__read_format_file ; -- Begin function svn_fs_fs__read_format_file
.p2align 2
_svn_fs_fs__read_format_file: ; @svn_fs_fs__read_format_file
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
ldr x20, [x0]
bl _path_format
mov x3, x0
add x0, sp, #12
add x1, sp, #8
add x2, sp, #4
mov x4, x19
bl _read_format
bl _SVN_ERR
ldr w8, [sp, #12]
str w8, [x20]
ldp w9, w8, [sp, #4]
stp w8, w9, [x20, #4]
Lloh0:
adrp x8, _SVN_NO_ERROR@GOTPAGE
Lloh1:
ldr x8, [x8, _SVN_NO_ERROR@GOTPAGEOFF]
Lloh2:
ldr x0, [x8]
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _SVN_NO_ERROR,8,3 ; @SVN_NO_ERROR
.subsections_via_symbols
| AnghaBench/freebsd/contrib/subversion/subversion/libsvn_fs_fs/extr_fs_fs.c_svn_fs_fs__read_format_file.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _stb0899_read_reg ## -- Begin function stb0899_read_reg
.p2align 4, 0x90
_stb0899_read_reg: ## @stb0899_read_reg
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %ebx
movq %rdi, %r14
callq __stb0899_read_reg
movl %eax, %r15d
movl %ebx, %eax
andl $-1025, %eax ## imm = 0xFBFF
cmpl $62207, %eax ## imm = 0xF2FF
je LBB0_3
## %bb.1:
movl %ebx, %eax
orl $1024, %eax ## imm = 0x400
andl $65280, %eax ## imm = 0xFF00
cmpl $62976, %eax ## imm = 0xF600
jne LBB0_3
## %bb.2:
orl $255, %ebx
movq %r14, %rdi
movl %ebx, %esi
callq __stb0899_read_reg
LBB0_3:
movl %r15d, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _stb0899_read_reg ; -- Begin function stb0899_read_reg
.p2align 2
_stb0899_read_reg: ; @stb0899_read_reg
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x21, x1
mov x19, x0
bl __stb0899_read_reg
mov x20, x0
and w8, w21, #0xfffffbff
mov w9, #62207
cmp w8, w9
b.eq LBB0_3
; %bb.1:
and w8, w21, #0xff00
orr w8, w8, #0x400
mov w9, #62976
cmp w8, w9
b.ne LBB0_3
; %bb.2:
orr w1, w21, #0xff
mov x0, x19
bl __stb0899_read_reg
LBB0_3:
mov x0, x20
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/linux/drivers/media/dvb-frontends/extr_stb0899_drv.c_stb0899_read_reg.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function tcp_pcap_take_cluster_reference
_tcp_pcap_take_cluster_reference: ## @tcp_pcap_take_cluster_reference
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _tcp_pcap_clusters_referenced_cur@GOTPCREL(%rip), %rdi
movl $1, %esi
callq _atomic_fetchadd_int
movq _tcp_pcap_clusters_referenced_max@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jge LBB0_2
## %bb.1:
movq _TRUE@GOTPCREL(%rip), %rax
jmp LBB0_3
LBB0_2:
movq _tcp_pcap_clusters_referenced_cur@GOTPCREL(%rip), %rdi
movl $-1, %esi
callq _atomic_add_int
movq _FALSE@GOTPCREL(%rip), %rax
LBB0_3:
movl (%rax), %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _tcp_pcap_clusters_referenced_cur,4,2 ## @tcp_pcap_clusters_referenced_cur
.comm _tcp_pcap_clusters_referenced_max,8,3 ## @tcp_pcap_clusters_referenced_max
.comm _FALSE,4,2 ## @FALSE
.comm _TRUE,4,2 ## @TRUE
.no_dead_strip _tcp_pcap_take_cluster_reference
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function tcp_pcap_take_cluster_reference
_tcp_pcap_take_cluster_reference: ; @tcp_pcap_take_cluster_reference
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x0, _tcp_pcap_clusters_referenced_cur@GOTPAGE
Lloh1:
ldr x0, [x0, _tcp_pcap_clusters_referenced_cur@GOTPAGEOFF]
mov w1, #1
bl _atomic_fetchadd_int
Lloh2:
adrp x8, _tcp_pcap_clusters_referenced_max@GOTPAGE
Lloh3:
ldr x8, [x8, _tcp_pcap_clusters_referenced_max@GOTPAGEOFF]
Lloh4:
ldr x8, [x8]
cmp x0, x8
b.ge LBB0_2
; %bb.1:
Lloh5:
adrp x8, _TRUE@GOTPAGE
Lloh6:
ldr x8, [x8, _TRUE@GOTPAGEOFF]
b LBB0_3
LBB0_2:
Lloh7:
adrp x0, _tcp_pcap_clusters_referenced_cur@GOTPAGE
Lloh8:
ldr x0, [x0, _tcp_pcap_clusters_referenced_cur@GOTPAGEOFF]
mov w1, #-1
bl _atomic_add_int
Lloh9:
adrp x8, _FALSE@GOTPAGE
Lloh10:
ldr x8, [x8, _FALSE@GOTPAGEOFF]
LBB0_3:
ldr w0, [x8]
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGot Lloh5, Lloh6
.loh AdrpLdrGot Lloh9, Lloh10
.loh AdrpLdrGot Lloh7, Lloh8
.cfi_endproc
; -- End function
.comm _tcp_pcap_clusters_referenced_cur,4,2 ; @tcp_pcap_clusters_referenced_cur
.comm _tcp_pcap_clusters_referenced_max,8,3 ; @tcp_pcap_clusters_referenced_max
.comm _FALSE,4,2 ; @FALSE
.comm _TRUE,4,2 ; @TRUE
.no_dead_strip _tcp_pcap_take_cluster_reference
.subsections_via_symbols
| AnghaBench/freebsd/sys/netinet/extr_tcp_pcap.c_tcp_pcap_take_cluster_reference.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| the_stack_data/34511462.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function clk_pll_recalc_rate
_clk_pll_recalc_rate: ## @clk_pll_recalc_rate
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rsi, %rbx
callq _to_hb_clk
movl (%rax), %edi
callq _readl
movq %rax, %rdx
movq _HB_PLL_EXT_BYPASS@GOTPCREL(%rip), %rax
testq %rdx, (%rax)
jne LBB0_4
## %bb.1:
movq _HB_PLL_DIVF_MASK@GOTPCREL(%rip), %rax
movq (%rax), %rax
andq %rdx, %rax
movq _HB_PLL_DIVF_SHIFT@GOTPCREL(%rip), %rcx
movb (%rcx), %cl
shrq %cl, %rax
movq _HB_PLL_DIVQ_MASK@GOTPCREL(%rip), %rcx
andq (%rcx), %rdx
movq _HB_PLL_DIVQ_SHIFT@GOTPCREL(%rip), %rcx
movb (%rcx), %cl
shrq %cl, %rdx
incq %rax
movl $1, %esi
movl %edx, %ecx
shll %cl, %esi
imulq %rbx, %rax
movslq %esi, %rcx
movq %rax, %rdx
orq %rcx, %rdx
shrq $32, %rdx
je LBB0_2
## %bb.3:
xorl %edx, %edx
divq %rcx
movq %rax, %rbx
jmp LBB0_4
LBB0_2:
## kill: def $eax killed $eax killed $rax
xorl %edx, %edx
divl %esi
movl %eax, %ebx
LBB0_4:
movq %rbx, %rax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _HB_PLL_EXT_BYPASS,8,3 ## @HB_PLL_EXT_BYPASS
.comm _HB_PLL_DIVF_MASK,8,3 ## @HB_PLL_DIVF_MASK
.comm _HB_PLL_DIVF_SHIFT,8,3 ## @HB_PLL_DIVF_SHIFT
.comm _HB_PLL_DIVQ_MASK,8,3 ## @HB_PLL_DIVQ_MASK
.comm _HB_PLL_DIVQ_SHIFT,8,3 ## @HB_PLL_DIVQ_SHIFT
.no_dead_strip _clk_pll_recalc_rate
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function clk_pll_recalc_rate
_clk_pll_recalc_rate: ; @clk_pll_recalc_rate
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
bl _to_hb_clk
ldr w0, [x0]
bl _readl
Lloh0:
adrp x8, _HB_PLL_EXT_BYPASS@GOTPAGE
Lloh1:
ldr x8, [x8, _HB_PLL_EXT_BYPASS@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
tst x8, x0
b.ne LBB0_2
; %bb.1:
Lloh3:
adrp x8, _HB_PLL_DIVF_MASK@GOTPAGE
Lloh4:
ldr x8, [x8, _HB_PLL_DIVF_MASK@GOTPAGEOFF]
Lloh5:
ldr x8, [x8]
and x8, x8, x0
Lloh6:
adrp x9, _HB_PLL_DIVF_SHIFT@GOTPAGE
Lloh7:
ldr x9, [x9, _HB_PLL_DIVF_SHIFT@GOTPAGEOFF]
Lloh8:
ldr x9, [x9]
Lloh9:
adrp x10, _HB_PLL_DIVQ_MASK@GOTPAGE
Lloh10:
ldr x10, [x10, _HB_PLL_DIVQ_MASK@GOTPAGEOFF]
lsr x8, x8, x9
Lloh11:
ldr x9, [x10]
and x9, x9, x0
Lloh12:
adrp x10, _HB_PLL_DIVQ_SHIFT@GOTPAGE
Lloh13:
ldr x10, [x10, _HB_PLL_DIVQ_SHIFT@GOTPAGEOFF]
Lloh14:
ldr x10, [x10]
lsr x9, x9, x10
madd x8, x19, x8, x19
mov w10, #1
lsl w9, w10, w9
sxtw x9, w9
udiv x19, x8, x9
LBB0_2:
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _HB_PLL_EXT_BYPASS,8,3 ; @HB_PLL_EXT_BYPASS
.comm _HB_PLL_DIVF_MASK,8,3 ; @HB_PLL_DIVF_MASK
.comm _HB_PLL_DIVF_SHIFT,8,3 ; @HB_PLL_DIVF_SHIFT
.comm _HB_PLL_DIVQ_MASK,8,3 ; @HB_PLL_DIVQ_MASK
.comm _HB_PLL_DIVQ_SHIFT,8,3 ; @HB_PLL_DIVQ_SHIFT
.no_dead_strip _clk_pll_recalc_rate
.subsections_via_symbols
| AnghaBench/linux/drivers/clk/extr_clk-highbank.c_clk_pll_recalc_rate.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _grub_file_read ## -- Begin function grub_file_read
.p2align 4, 0x90
_grub_file_read: ## @grub_file_read
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq 8(%rdi), %rax
subq (%rdi), %rax
jge LBB0_2
## %bb.1:
movq _GRUB_ERR_OUT_OF_RANGE@GOTPCREL(%rip), %rax
movl (%rax), %edi
leaq L_.str(%rip), %rsi
callq _grub_error
movl $-1, %eax
jmp LBB0_9
LBB0_2:
movq %rdi, %rbx
testl %edx, %edx
je LBB0_4
## %bb.3:
movslq %edx, %rcx
cmpq %rcx, %rax
jge LBB0_5
LBB0_4:
movl %eax, %edx
LBB0_5:
movl %edx, %ecx
shrl $31, %ecx
## kill: def $cl killed $cl killed $ecx
sarl %cl, %edx
testl %edx, %edx
je LBB0_6
## %bb.7:
movq 16(%rbx), %rax
movq %rbx, %rdi
callq *(%rax)
testl %eax, %eax
jle LBB0_9
## %bb.8:
movl %eax, %ecx
addq %rcx, (%rbx)
jmp LBB0_9
LBB0_6:
xorl %eax, %eax
LBB0_9:
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _GRUB_ERR_OUT_OF_RANGE,4,2 ## @GRUB_ERR_OUT_OF_RANGE
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "attempt to read past the end of file"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _grub_file_read ; -- Begin function grub_file_read
.p2align 2
_grub_file_read: ; @grub_file_read
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
ldp x8, x9, [x0]
subs x8, x9, x8
b.ge LBB0_2
; %bb.1:
Lloh0:
adrp x8, _GRUB_ERR_OUT_OF_RANGE@GOTPAGE
Lloh1:
ldr x8, [x8, _GRUB_ERR_OUT_OF_RANGE@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
Lloh3:
adrp x1, l_.str@PAGE
Lloh4:
add x1, x1, l_.str@PAGEOFF
bl _grub_error
mov w0, #-1
b LBB0_9
LBB0_2:
mov x19, x0
cbz w2, LBB0_4
; %bb.3:
cmp x8, w2, sxtw
b.ge LBB0_5
LBB0_4:
mov x2, x8
LBB0_5:
lsr w8, w2, #31
asr w2, w2, w8
cbz w2, LBB0_8
; %bb.6:
ldr x8, [x19, #16]
ldr x8, [x8]
mov x0, x19
blr x8
cmp w0, #1
b.lt LBB0_9
; %bb.7:
ldr x8, [x19]
add x8, x8, w0, uxtw
str x8, [x19]
b LBB0_9
LBB0_8:
mov w0, #0
LBB0_9:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _GRUB_ERR_OUT_OF_RANGE,4,2 ; @GRUB_ERR_OUT_OF_RANGE
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "attempt to read past the end of file"
.subsections_via_symbols
| AnghaBench/radare2/shlr/grub/kern/extr_file.c_grub_file_read.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function es1371_quirk_lookup
_es1371_quirk_lookup: ## @es1371_quirk_lookup
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _PCI_ANY_ID@GOTPCREL(%rip), %rax
movzwl (%rax), %r9d
movzwl (%rsi), %ecx
xorl %eax, %eax
cmpw %r9w, %cx
jne LBB0_1
LBB0_7:
popq %rbp
retq
LBB0_1:
movq 8(%rdi), %r8
movzwl (%r8), %edx
addq $24, %rsi
jmp LBB0_2
.p2align 4, 0x90
LBB0_6: ## in Loop: Header=BB0_2 Depth=1
movzwl (%rsi), %ecx
addq $24, %rsi
cmpw %r9w, %cx
je LBB0_7
LBB0_2: ## =>This Inner Loop Header: Depth=1
cmpw %cx, %dx
jne LBB0_6
## %bb.3: ## in Loop: Header=BB0_2 Depth=1
movq 8(%r8), %rcx
cmpq -16(%rsi), %rcx
jne LBB0_6
## %bb.4: ## in Loop: Header=BB0_2 Depth=1
movq (%rdi), %rcx
cmpq -8(%rsi), %rcx
jne LBB0_6
## %bb.5:
movl $1, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _PCI_ANY_ID,8,3 ## @PCI_ANY_ID
.no_dead_strip _es1371_quirk_lookup
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function es1371_quirk_lookup
_es1371_quirk_lookup: ; @es1371_quirk_lookup
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _PCI_ANY_ID@GOTPAGE
Lloh1:
ldr x8, [x8, _PCI_ANY_ID@GOTPAGEOFF]
ldrh w8, [x8]
ldrh w12, [x1]
cmp w12, w8
b.ne LBB0_2
LBB0_1:
mov w0, #0
ret
LBB0_2:
ldr x9, [x0, #8]
ldrh w10, [x9]
add x11, x1, #24
b LBB0_4
LBB0_3: ; in Loop: Header=BB0_4 Depth=1
ldrh w12, [x11], #24
cmp w12, w8
b.eq LBB0_1
LBB0_4: ; =>This Inner Loop Header: Depth=1
cmp w10, w12
b.ne LBB0_3
; %bb.5: ; in Loop: Header=BB0_4 Depth=1
ldr x12, [x9, #8]
ldur x13, [x11, #-16]
cmp x12, x13
b.ne LBB0_3
; %bb.6: ; in Loop: Header=BB0_4 Depth=1
ldr x12, [x0]
ldur x13, [x11, #-8]
cmp x12, x13
b.ne LBB0_3
; %bb.7:
mov w0, #1
ret
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm _PCI_ANY_ID,8,3 ; @PCI_ANY_ID
.no_dead_strip _es1371_quirk_lookup
.subsections_via_symbols
| AnghaBench/linux/sound/pci/extr_ens1370.c_es1371_quirk_lookup.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _thread_set_voucher_name ## -- Begin function thread_set_voucher_name
.p2align 4, 0x90
_thread_set_voucher_name: ## @thread_set_voucher_name
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r14
xorl %eax, %eax
callq _current_thread
movq %rax, %rbx
movq _IPC_VOUCHER_NULL@GOTPCREL(%rip), %r12
movq (%r12), %r15
movq $0, -48(%rbp)
movq $0, -64(%rbp)
movq _MACH_PORT_DEAD@GOTPCREL(%rip), %rax
cmpq %r14, (%rax)
jne LBB0_2
## %bb.1:
movq _KERN_INVALID_RIGHT@GOTPCREL(%rip), %rbx
jmp LBB0_7
LBB0_2:
movq %r14, %rdi
callq _MACH_PORT_VALID
testq %rax, %rax
je LBB0_5
## %bb.3:
movq %r14, %rdi
callq _convert_port_name_to_voucher
movq %rax, %r15
cmpq %rax, (%r12)
jne LBB0_5
## %bb.4:
movq _KERN_INVALID_ARGUMENT@GOTPCREL(%rip), %rbx
jmp LBB0_7
LBB0_5:
leaq -48(%rbp), %rsi
leaq -64(%rbp), %rdx
movq %r15, %rdi
callq _bank_get_bank_ledger_and_thread_group
movq %rbx, %rdi
callq _thread_mtx_lock
movq (%rbx), %rax
movq %rax, -56(%rbp) ## 8-byte Spill
movq %r14, 8(%rbx)
movq %r15, (%rbx)
movq %rbx, %rdi
callq _thread_mtx_unlock
movq -48(%rbp), %rsi
movq %rbx, %rdi
callq _bank_swap_thread_bank_ledger
movq _KDEBUG_TRACE@GOTPCREL(%rip), %rax
movl (%rax), %r13d
movq _DBG_MACH_IPC@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq _MACH_THREAD_SET_VOUCHER@GOTPCREL(%rip), %rax
movl (%rax), %esi
callq _MACHDBG_CODE
movl %eax, %r12d
movq _DBG_FUNC_NONE@GOTPCREL(%rip), %rax
orl (%rax), %r12d
movq %rbx, %rdi
callq _thread_tid
movq %rax, %rbx
movq %r15, %rdi
callq _VM_KERNEL_ADDRPERM
movl $0, (%rsp)
movl %r13d, %edi
movl %r12d, %esi
movq %rbx, %rdx
movq %r14, %rcx
movl %eax, %r8d
movl $1, %r9d
callq _KERNEL_DEBUG_CONSTANT_IST
movq -56(%rbp), %rdi ## 8-byte Reload
movq _KERN_SUCCESS@GOTPCREL(%rip), %rbx
movq _IPC_VOUCHER_NULL@GOTPCREL(%rip), %rax
cmpq %rdi, (%rax)
je LBB0_7
## %bb.6:
callq _ipc_voucher_release
LBB0_7:
movl (%rbx), %eax
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _IPC_VOUCHER_NULL,8,3 ## @IPC_VOUCHER_NULL
.comm _MACH_PORT_DEAD,8,3 ## @MACH_PORT_DEAD
.comm _KERN_INVALID_RIGHT,4,2 ## @KERN_INVALID_RIGHT
.comm _KERN_INVALID_ARGUMENT,4,2 ## @KERN_INVALID_ARGUMENT
.comm _KDEBUG_TRACE,4,2 ## @KDEBUG_TRACE
.comm _DBG_MACH_IPC,4,2 ## @DBG_MACH_IPC
.comm _MACH_THREAD_SET_VOUCHER,4,2 ## @MACH_THREAD_SET_VOUCHER
.comm _DBG_FUNC_NONE,4,2 ## @DBG_FUNC_NONE
.comm _KERN_SUCCESS,4,2 ## @KERN_SUCCESS
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _thread_set_voucher_name ; -- Begin function thread_set_voucher_name
.p2align 2
_thread_set_voucher_name: ; @thread_set_voucher_name
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x19, x0
bl _current_thread
Lloh0:
adrp x25, _IPC_VOUCHER_NULL@GOTPAGE
Lloh1:
ldr x25, [x25, _IPC_VOUCHER_NULL@GOTPAGEOFF]
ldr x21, [x25]
stp xzr, xzr, [sp]
Lloh2:
adrp x8, _MACH_PORT_DEAD@GOTPAGE
Lloh3:
ldr x8, [x8, _MACH_PORT_DEAD@GOTPAGEOFF]
Lloh4:
ldr x8, [x8]
cmp x8, x19
b.ne LBB0_2
; %bb.1:
Lloh5:
adrp x8, _KERN_INVALID_RIGHT@GOTPAGE
Lloh6:
ldr x8, [x8, _KERN_INVALID_RIGHT@GOTPAGEOFF]
b LBB0_8
LBB0_2:
mov x20, x0
mov x0, x19
bl _MACH_PORT_VALID
cbz x0, LBB0_5
; %bb.3:
mov x0, x19
bl _convert_port_name_to_voucher
mov x21, x0
ldr x8, [x25]
cmp x8, x0
b.ne LBB0_5
; %bb.4:
Lloh7:
adrp x8, _KERN_INVALID_ARGUMENT@GOTPAGE
Lloh8:
ldr x8, [x8, _KERN_INVALID_ARGUMENT@GOTPAGEOFF]
b LBB0_8
LBB0_5:
add x1, sp, #8
mov x2, sp
mov x0, x21
bl _bank_get_bank_ledger_and_thread_group
mov x0, x20
bl _thread_mtx_lock
ldr x22, [x20]
stp x21, x19, [x20]
mov x0, x20
bl _thread_mtx_unlock
ldr x1, [sp, #8]
mov x0, x20
bl _bank_swap_thread_bank_ledger
Lloh9:
adrp x8, _KDEBUG_TRACE@GOTPAGE
Lloh10:
ldr x8, [x8, _KDEBUG_TRACE@GOTPAGEOFF]
Lloh11:
ldr w23, [x8]
Lloh12:
adrp x8, _DBG_MACH_IPC@GOTPAGE
Lloh13:
ldr x8, [x8, _DBG_MACH_IPC@GOTPAGEOFF]
Lloh14:
ldr w0, [x8]
Lloh15:
adrp x8, _MACH_THREAD_SET_VOUCHER@GOTPAGE
Lloh16:
ldr x8, [x8, _MACH_THREAD_SET_VOUCHER@GOTPAGEOFF]
Lloh17:
ldr w1, [x8]
bl _MACHDBG_CODE
Lloh18:
adrp x8, _DBG_FUNC_NONE@GOTPAGE
Lloh19:
ldr x8, [x8, _DBG_FUNC_NONE@GOTPAGEOFF]
Lloh20:
ldr w8, [x8]
orr w24, w8, w0
mov x0, x20
bl _thread_tid
mov x20, x0
mov x0, x21
bl _VM_KERNEL_ADDRPERM
mov x4, x0
mov x0, x23
mov x1, x24
mov x2, x20
mov x3, x19
mov w5, #1
mov w6, #0
bl _KERNEL_DEBUG_CONSTANT_IST
ldr x8, [x25]
cmp x8, x22
b.eq LBB0_7
; %bb.6:
mov x0, x22
bl _ipc_voucher_release
LBB0_7:
Lloh21:
adrp x8, _KERN_SUCCESS@GOTPAGE
Lloh22:
ldr x8, [x8, _KERN_SUCCESS@GOTPAGEOFF]
LBB0_8:
ldr w0, [x8]
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #96
ret
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGot Lloh5, Lloh6
.loh AdrpLdrGot Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh18, Lloh19, Lloh20
.loh AdrpLdrGotLdr Lloh15, Lloh16, Lloh17
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGot Lloh21, Lloh22
.cfi_endproc
; -- End function
.comm _IPC_VOUCHER_NULL,8,3 ; @IPC_VOUCHER_NULL
.comm _MACH_PORT_DEAD,8,3 ; @MACH_PORT_DEAD
.comm _KERN_INVALID_RIGHT,4,2 ; @KERN_INVALID_RIGHT
.comm _KERN_INVALID_ARGUMENT,4,2 ; @KERN_INVALID_ARGUMENT
.comm _KDEBUG_TRACE,4,2 ; @KDEBUG_TRACE
.comm _DBG_MACH_IPC,4,2 ; @DBG_MACH_IPC
.comm _MACH_THREAD_SET_VOUCHER,4,2 ; @MACH_THREAD_SET_VOUCHER
.comm _DBG_FUNC_NONE,4,2 ; @DBG_FUNC_NONE
.comm _KERN_SUCCESS,4,2 ; @KERN_SUCCESS
.subsections_via_symbols
| AnghaBench/darwin-xnu/osfmk/kern/extr_thread.c_thread_set_voucher_name.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function add_1mod
_add_1mod: ## @add_1mod
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movl %edi, %ebx
movl %esi, %edi
callq _is_ereg
xorl %ecx, %ecx
testq %rax, %rax
setne %cl
orl %ebx, %ecx
movl %ecx, %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _add_1mod
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function add_1mod
_add_1mod: ; @add_1mod
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
mov x0, x1
bl _is_ereg
cmp x0, #0
cset w8, ne
orr w0, w8, w19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _add_1mod
.subsections_via_symbols
| AnghaBench/linux/arch/x86/net/extr_bpf_jit_comp.c_add_1mod.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function Opcode_ult_s_Slot_inst_encode
_Opcode_ult_s_Slot_inst_encode: ## @Opcode_ult_s_Slot_inst_encode
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $5963776, (%rdi) ## imm = 0x5B0000
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _Opcode_ult_s_Slot_inst_encode
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function Opcode_ult_s_Slot_inst_encode
_Opcode_ult_s_Slot_inst_encode: ; @Opcode_ult_s_Slot_inst_encode
.cfi_startproc
; %bb.0:
mov w8, #5963776
str w8, [x0]
ret
.cfi_endproc
; -- End function
.no_dead_strip _Opcode_ult_s_Slot_inst_encode
.subsections_via_symbols
| AnghaBench/radare2/libr/asm/arch/xtensa/gnu/extr_xtensa-modules.c_Opcode_ult_s_Slot_inst_encode.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function w9968cf_sensor_update_settings
_w9968cf_sensor_update_settings: ## @w9968cf_sensor_update_settings
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
movq _OVCAMCHIP_CID_AUTOBRIGHT@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl 20(%rdi), %edx
callq _w9968cf_sensor_set_control
testl %eax, %eax
jne LBB0_5
## %bb.1:
movq _OVCAMCHIP_CID_AUTOEXP@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl 16(%rbx), %edx
movq %rbx, %rdi
callq _w9968cf_sensor_set_control
testl %eax, %eax
jne LBB0_5
## %bb.2:
movq _OVCAMCHIP_CID_BANDFILT@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl 12(%rbx), %edx
movq %rbx, %rdi
callq _w9968cf_sensor_set_control
testl %eax, %eax
jne LBB0_5
## %bb.3:
movq _OVCAMCHIP_CID_FREQ@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl 8(%rbx), %edx
movq %rbx, %rdi
callq _w9968cf_sensor_set_control
testl %eax, %eax
jne LBB0_5
## %bb.4:
movq _OVCAMCHIP_CID_BACKLIGHT@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl 4(%rbx), %edx
movq %rbx, %rdi
callq _w9968cf_sensor_set_control
testl %eax, %eax
je LBB0_6
LBB0_5:
addq $8, %rsp
popq %rbx
popq %rbp
retq
LBB0_6:
movq _OVCAMCHIP_CID_MIRROR@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl (%rbx), %edx
movq %rbx, %rdi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _w9968cf_sensor_set_control ## TAILCALL
.cfi_endproc
## -- End function
.comm _OVCAMCHIP_CID_AUTOBRIGHT,4,2 ## @OVCAMCHIP_CID_AUTOBRIGHT
.comm _OVCAMCHIP_CID_AUTOEXP,4,2 ## @OVCAMCHIP_CID_AUTOEXP
.comm _OVCAMCHIP_CID_BANDFILT,4,2 ## @OVCAMCHIP_CID_BANDFILT
.comm _OVCAMCHIP_CID_FREQ,4,2 ## @OVCAMCHIP_CID_FREQ
.comm _OVCAMCHIP_CID_BACKLIGHT,4,2 ## @OVCAMCHIP_CID_BACKLIGHT
.comm _OVCAMCHIP_CID_MIRROR,4,2 ## @OVCAMCHIP_CID_MIRROR
.no_dead_strip _w9968cf_sensor_update_settings
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function w9968cf_sensor_update_settings
_w9968cf_sensor_update_settings: ; @w9968cf_sensor_update_settings
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
Lloh0:
adrp x8, _OVCAMCHIP_CID_AUTOBRIGHT@GOTPAGE
Lloh1:
ldr x8, [x8, _OVCAMCHIP_CID_AUTOBRIGHT@GOTPAGEOFF]
Lloh2:
ldr w1, [x8]
ldr w2, [x0, #20]
bl _w9968cf_sensor_set_control
cbnz w0, LBB0_5
; %bb.1:
Lloh3:
adrp x8, _OVCAMCHIP_CID_AUTOEXP@GOTPAGE
Lloh4:
ldr x8, [x8, _OVCAMCHIP_CID_AUTOEXP@GOTPAGEOFF]
Lloh5:
ldr w1, [x8]
ldr w2, [x19, #16]
mov x0, x19
bl _w9968cf_sensor_set_control
cbnz w0, LBB0_5
; %bb.2:
Lloh6:
adrp x8, _OVCAMCHIP_CID_BANDFILT@GOTPAGE
Lloh7:
ldr x8, [x8, _OVCAMCHIP_CID_BANDFILT@GOTPAGEOFF]
Lloh8:
ldr w1, [x8]
ldr w2, [x19, #12]
mov x0, x19
bl _w9968cf_sensor_set_control
cbnz w0, LBB0_5
; %bb.3:
Lloh9:
adrp x8, _OVCAMCHIP_CID_FREQ@GOTPAGE
Lloh10:
ldr x8, [x8, _OVCAMCHIP_CID_FREQ@GOTPAGEOFF]
Lloh11:
ldr w1, [x8]
ldr w2, [x19, #8]
mov x0, x19
bl _w9968cf_sensor_set_control
cbnz w0, LBB0_5
; %bb.4:
Lloh12:
adrp x8, _OVCAMCHIP_CID_BACKLIGHT@GOTPAGE
Lloh13:
ldr x8, [x8, _OVCAMCHIP_CID_BACKLIGHT@GOTPAGEOFF]
Lloh14:
ldr w1, [x8]
ldr w2, [x19, #4]
mov x0, x19
bl _w9968cf_sensor_set_control
cbz w0, LBB0_6
LBB0_5:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB0_6:
Lloh15:
adrp x8, _OVCAMCHIP_CID_MIRROR@GOTPAGE
Lloh16:
ldr x8, [x8, _OVCAMCHIP_CID_MIRROR@GOTPAGEOFF]
Lloh17:
ldr w1, [x8]
ldr w2, [x19]
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _w9968cf_sensor_set_control
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh15, Lloh16, Lloh17
.cfi_endproc
; -- End function
.comm _OVCAMCHIP_CID_AUTOBRIGHT,4,2 ; @OVCAMCHIP_CID_AUTOBRIGHT
.comm _OVCAMCHIP_CID_AUTOEXP,4,2 ; @OVCAMCHIP_CID_AUTOEXP
.comm _OVCAMCHIP_CID_BANDFILT,4,2 ; @OVCAMCHIP_CID_BANDFILT
.comm _OVCAMCHIP_CID_FREQ,4,2 ; @OVCAMCHIP_CID_FREQ
.comm _OVCAMCHIP_CID_BACKLIGHT,4,2 ; @OVCAMCHIP_CID_BACKLIGHT
.comm _OVCAMCHIP_CID_MIRROR,4,2 ; @OVCAMCHIP_CID_MIRROR
.no_dead_strip _w9968cf_sensor_update_settings
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/media/video/extr_w9968cf.c_w9968cf_sensor_update_settings.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function falcon_mdio_write
_falcon_mdio_write: ## @falcon_mdio_write
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r15d
movl %ecx, %r12d
movl %edx, %r13d
movl %esi, %r14d
callq _netdev_priv
movq %rax, %rdi
movq 8(%rax), %rbx
movq _hw@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl (%rdi), %edx
leaq L_.str(%rip), %rcx
movl %r14d, -48(%rbp) ## 4-byte Spill
movl %r14d, %r8d
movq %r12, %r14
movq %rdi, %r12
movl %r13d, -52(%rbp) ## 4-byte Spill
movl %r13d, %r9d
movq %r15, -64(%rbp) ## 8-byte Spill
pushq %r15
pushq %r14
callq _netif_vdbg
addq $16, %rsp
movq %rbx, -72(%rbp) ## 8-byte Spill
movq %rbx, %rdi
callq _mutex_lock
movq %r12, %rdi
callq _falcon_gmii_wait
movl %eax, %r15d
testl %eax, %eax
jne LBB0_3
## %bb.1:
movq _FRF_AB_MD_PHY_ADR@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl %r14d, %edx
callq _EF4_POPULATE_OWORD_1
movq _FR_AB_MD_PHY_ADR@GOTPCREL(%rip), %rax
movl (%rax), %edx
leaq -44(%rbp), %rbx
movq %r12, %rdi
movq %rbx, %rsi
callq _ef4_writeo
movl -44(%rbp), %edi
movq _FRF_AB_MD_PRT_ADR@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _FRF_AB_MD_DEV_ADR@GOTPCREL(%rip), %rax
movl (%rax), %ecx
movl -48(%rbp), %edx ## 4-byte Reload
movl -52(%rbp), %r8d ## 4-byte Reload
callq _EF4_POPULATE_OWORD_2
movq _FR_AB_MD_ID@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq %r12, %rdi
movq %rbx, %rsi
callq _ef4_writeo
movl -44(%rbp), %edi
movq _FRF_AB_MD_TXD@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq -64(%rbp), %rdx ## 8-byte Reload
## kill: def $edx killed $edx killed $rdx
callq _EF4_POPULATE_OWORD_1
movq _FR_AB_MD_TXD@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq %r12, %rdi
movq %rbx, %rsi
callq _ef4_writeo
movl -44(%rbp), %edi
movq _FRF_AB_MD_WRC@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _FRF_AB_MD_GC@GOTPCREL(%rip), %rax
movl (%rax), %ecx
xorl %r15d, %r15d
movl $1, %edx
xorl %r8d, %r8d
callq _EF4_POPULATE_OWORD_2
movq _FR_AB_MD_CS@GOTPCREL(%rip), %r13
movl (%r13), %edx
movq %r12, %rdi
movq %rbx, %rsi
callq _ef4_writeo
movq %r12, %rdi
callq _falcon_gmii_wait
testl %eax, %eax
je LBB0_3
## %bb.2:
movl %eax, %r14d
movl -44(%rbp), %edi
movq _FRF_AB_MD_WRC@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _FRF_AB_MD_GC@GOTPCREL(%rip), %rax
movl (%rax), %ecx
xorl %edx, %edx
movl $1, %r8d
callq _EF4_POPULATE_OWORD_2
movl (%r13), %edx
leaq -44(%rbp), %rsi
movq %r12, %rdi
callq _ef4_writeo
movl $10, %edi
callq _udelay
movl %r14d, %r15d
LBB0_3:
movq -72(%rbp), %rdi ## 8-byte Reload
callq _mutex_unlock
movl %r15d, %eax
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _hw,4,2 ## @hw
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "writing MDIO %d register %d.%d with 0x%04x\n"
.comm _FRF_AB_MD_PHY_ADR,4,2 ## @FRF_AB_MD_PHY_ADR
.comm _FR_AB_MD_PHY_ADR,4,2 ## @FR_AB_MD_PHY_ADR
.comm _FRF_AB_MD_PRT_ADR,4,2 ## @FRF_AB_MD_PRT_ADR
.comm _FRF_AB_MD_DEV_ADR,4,2 ## @FRF_AB_MD_DEV_ADR
.comm _FR_AB_MD_ID,4,2 ## @FR_AB_MD_ID
.comm _FRF_AB_MD_TXD,4,2 ## @FRF_AB_MD_TXD
.comm _FR_AB_MD_TXD,4,2 ## @FR_AB_MD_TXD
.comm _FRF_AB_MD_WRC,4,2 ## @FRF_AB_MD_WRC
.comm _FRF_AB_MD_GC,4,2 ## @FRF_AB_MD_GC
.comm _FR_AB_MD_CS,4,2 ## @FR_AB_MD_CS
.no_dead_strip _falcon_mdio_write
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function falcon_mdio_write
_falcon_mdio_write: ; @falcon_mdio_write
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x21, x4
mov x24, x3
mov x22, x2
mov x23, x1
bl _netdev_priv
mov x20, x0
ldr x19, [x0, #8]
Lloh0:
adrp x8, _hw@GOTPAGE
Lloh1:
ldr x8, [x8, _hw@GOTPAGEOFF]
Lloh2:
ldr w1, [x8]
ldr w2, [x0]
Lloh3:
adrp x3, l_.str@PAGE
Lloh4:
add x3, x3, l_.str@PAGEOFF
mov x4, x23
mov x5, x22
mov x6, x24
mov x7, x21
bl _netif_vdbg
mov x0, x19
bl _mutex_lock
mov x0, x20
bl _falcon_gmii_wait
mov x25, x0
cbnz w0, LBB0_3
; %bb.1:
Lloh5:
adrp x8, _FRF_AB_MD_PHY_ADR@GOTPAGE
Lloh6:
ldr x8, [x8, _FRF_AB_MD_PHY_ADR@GOTPAGEOFF]
Lloh7:
ldr w1, [x8]
mov x2, x24
bl _EF4_POPULATE_OWORD_1
Lloh8:
adrp x8, _FR_AB_MD_PHY_ADR@GOTPAGE
Lloh9:
ldr x8, [x8, _FR_AB_MD_PHY_ADR@GOTPAGEOFF]
Lloh10:
ldr w2, [x8]
add x1, sp, #12
mov x0, x20
bl _ef4_writeo
ldr w0, [sp, #12]
Lloh11:
adrp x8, _FRF_AB_MD_PRT_ADR@GOTPAGE
Lloh12:
ldr x8, [x8, _FRF_AB_MD_PRT_ADR@GOTPAGEOFF]
Lloh13:
ldr w1, [x8]
Lloh14:
adrp x8, _FRF_AB_MD_DEV_ADR@GOTPAGE
Lloh15:
ldr x8, [x8, _FRF_AB_MD_DEV_ADR@GOTPAGEOFF]
Lloh16:
ldr w3, [x8]
mov x2, x23
mov x4, x22
bl _EF4_POPULATE_OWORD_2
Lloh17:
adrp x8, _FR_AB_MD_ID@GOTPAGE
Lloh18:
ldr x8, [x8, _FR_AB_MD_ID@GOTPAGEOFF]
Lloh19:
ldr w2, [x8]
add x1, sp, #12
mov x0, x20
bl _ef4_writeo
ldr w0, [sp, #12]
Lloh20:
adrp x8, _FRF_AB_MD_TXD@GOTPAGE
Lloh21:
ldr x8, [x8, _FRF_AB_MD_TXD@GOTPAGEOFF]
Lloh22:
ldr w1, [x8]
mov x2, x21
bl _EF4_POPULATE_OWORD_1
Lloh23:
adrp x8, _FR_AB_MD_TXD@GOTPAGE
Lloh24:
ldr x8, [x8, _FR_AB_MD_TXD@GOTPAGEOFF]
Lloh25:
ldr w2, [x8]
add x1, sp, #12
mov x0, x20
bl _ef4_writeo
ldr w0, [sp, #12]
Lloh26:
adrp x21, _FRF_AB_MD_WRC@GOTPAGE
Lloh27:
ldr x21, [x21, _FRF_AB_MD_WRC@GOTPAGEOFF]
ldr w1, [x21]
Lloh28:
adrp x22, _FRF_AB_MD_GC@GOTPAGE
Lloh29:
ldr x22, [x22, _FRF_AB_MD_GC@GOTPAGEOFF]
ldr w3, [x22]
mov w2, #1
mov w4, #0
bl _EF4_POPULATE_OWORD_2
Lloh30:
adrp x23, _FR_AB_MD_CS@GOTPAGE
Lloh31:
ldr x23, [x23, _FR_AB_MD_CS@GOTPAGEOFF]
ldr w2, [x23]
add x1, sp, #12
mov x0, x20
bl _ef4_writeo
mov x0, x20
bl _falcon_gmii_wait
mov x25, x0
cbz w0, LBB0_3
; %bb.2:
ldr w0, [sp, #12]
ldr w1, [x21]
ldr w3, [x22]
mov w2, #0
mov w4, #1
bl _EF4_POPULATE_OWORD_2
ldr w2, [x23]
add x1, sp, #12
mov x0, x20
bl _ef4_writeo
mov w0, #10
bl _udelay
LBB0_3:
mov x0, x19
bl _mutex_unlock
mov x0, x25
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #96
ret
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGot Lloh30, Lloh31
.loh AdrpLdrGot Lloh28, Lloh29
.loh AdrpLdrGot Lloh26, Lloh27
.loh AdrpLdrGotLdr Lloh23, Lloh24, Lloh25
.loh AdrpLdrGotLdr Lloh20, Lloh21, Lloh22
.loh AdrpLdrGotLdr Lloh17, Lloh18, Lloh19
.loh AdrpLdrGotLdr Lloh14, Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.cfi_endproc
; -- End function
.comm _hw,4,2 ; @hw
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "writing MDIO %d register %d.%d with 0x%04x\n"
.comm _FRF_AB_MD_PHY_ADR,4,2 ; @FRF_AB_MD_PHY_ADR
.comm _FR_AB_MD_PHY_ADR,4,2 ; @FR_AB_MD_PHY_ADR
.comm _FRF_AB_MD_PRT_ADR,4,2 ; @FRF_AB_MD_PRT_ADR
.comm _FRF_AB_MD_DEV_ADR,4,2 ; @FRF_AB_MD_DEV_ADR
.comm _FR_AB_MD_ID,4,2 ; @FR_AB_MD_ID
.comm _FRF_AB_MD_TXD,4,2 ; @FRF_AB_MD_TXD
.comm _FR_AB_MD_TXD,4,2 ; @FR_AB_MD_TXD
.comm _FRF_AB_MD_WRC,4,2 ; @FRF_AB_MD_WRC
.comm _FRF_AB_MD_GC,4,2 ; @FRF_AB_MD_GC
.comm _FR_AB_MD_CS,4,2 ; @FR_AB_MD_CS
.no_dead_strip _falcon_mdio_write
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/sfc/falcon/extr_falcon.c_falcon_mdio_write.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
callq _uart0_config
leaq L_.str(%rip), %rdi
callq _puts
.p2align 4, 0x90
LBB0_1: ## =>This Inner Loop Header: Depth=1
xorl %eax, %eax
callq _getchar
movzbl %al, %edi
callq _putchar
xorl %eax, %eax
callq _getchar
cmpl $13, %eax
jne LBB0_3
## %bb.2: ## in Loop: Header=BB0_1 Depth=1
movl $10, %edi
callq _putchar
LBB0_3: ## in Loop: Header=BB0_1 Depth=1
xorl %eax, %eax
callq _getchar
cmpl $10, %eax
jne LBB0_1
## %bb.4: ## in Loop: Header=BB0_1 Depth=1
movl $13, %edi
callq _putchar
jmp LBB0_1
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Hello!!!\n\r"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
bl _uart0_config
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _puts
LBB0_1: ; =>This Inner Loop Header: Depth=1
bl _getchar
and w0, w0, #0xff
bl _putchar
bl _getchar
cmp w0, #13
b.ne LBB0_3
; %bb.2: ; in Loop: Header=BB0_1 Depth=1
mov w0, #10
bl _putchar
LBB0_3: ; in Loop: Header=BB0_1 Depth=1
bl _getchar
cmp w0, #10
b.ne LBB0_1
; %bb.4: ; in Loop: Header=BB0_1 Depth=1
mov w0, #13
bl _putchar
b LBB0_1
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Hello!!!\n\r"
.subsections_via_symbols
| the_stack_data/18886643.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function gfs2_xattr_set
_gfs2_xattr_set: ## @gfs2_xattr_set
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %r9, -64(%rbp) ## 8-byte Spill
movq %r8, -56(%rbp) ## 8-byte Spill
movq %rcx, %r12
movq %rdx, %rbx
movq %rdi, %r13
movq %rdx, %rdi
callq _GFS2_I
movq %rax, %r15
movq %rax, %rdi
callq _gfs2_rsqa_alloc
movl %eax, %r14d
testl %eax, %eax
je LBB0_1
LBB0_8:
movl %r14d, %eax
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_1:
movq (%r15), %rdi
callq _gfs2_glock_is_locked_by_me
movq (%r15), %rdi
testl %eax, %eax
je LBB0_2
## %bb.3:
movq (%rdi), %rax
movq _LM_ST_EXCLUSIVE@GOTPCREL(%rip), %rcx
xorl %edi, %edi
cmpq (%rcx), %rax
setne %dil
callq _WARN_ON_ONCE
testq %rax, %rax
je LBB0_5
## %bb.4:
movq _EIO@GOTPCREL(%rip), %rax
xorl %r14d, %r14d
subl (%rax), %r14d
jmp LBB0_8
LBB0_2:
movq _LM_ST_EXCLUSIVE@GOTPCREL(%rip), %rax
movq (%rax), %rsi
leaq -48(%rbp), %rcx
xorl %edx, %edx
callq _gfs2_glock_nq_init
movl %eax, %r14d
testl %eax, %eax
jne LBB0_8
jmp LBB0_6
LBB0_5:
leaq -48(%rbp), %rdi
callq _gfs2_holder_mark_uninitialized
LBB0_6:
movl 16(%rbp), %r8d
movl (%r13), %r9d
movq %rbx, %rdi
movq %r12, %rsi
movq -56(%rbp), %rdx ## 8-byte Reload
movq -64(%rbp), %rcx ## 8-byte Reload
callq ___gfs2_xattr_set
movl %eax, %r14d
leaq -48(%rbp), %rdi
callq _gfs2_holder_initialized
testq %rax, %rax
je LBB0_8
## %bb.7:
leaq -48(%rbp), %rdi
callq _gfs2_glock_dq_uninit
jmp LBB0_8
.cfi_endproc
## -- End function
.comm _LM_ST_EXCLUSIVE,8,3 ## @LM_ST_EXCLUSIVE
.comm _EIO,4,2 ## @EIO
.no_dead_strip _gfs2_xattr_set
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function gfs2_xattr_set
_gfs2_xattr_set: ; @gfs2_xattr_set
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x19, x6
mov x20, x5
mov x21, x4
mov x22, x3
mov x23, x2
mov x24, x0
mov x0, x2
bl _GFS2_I
mov x26, x0
bl _gfs2_rsqa_alloc
mov x25, x0
cbz w0, LBB0_2
LBB0_1:
mov x0, x25
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #96
ret
LBB0_2:
ldr x0, [x26]
bl _gfs2_glock_is_locked_by_me
mov x8, x0
ldr x0, [x26]
cbz w8, LBB0_5
; %bb.3:
ldr x8, [x0]
Lloh0:
adrp x9, _LM_ST_EXCLUSIVE@GOTPAGE
Lloh1:
ldr x9, [x9, _LM_ST_EXCLUSIVE@GOTPAGEOFF]
Lloh2:
ldr x9, [x9]
cmp x8, x9
cset w0, ne
bl _WARN_ON_ONCE
cbz x0, LBB0_6
; %bb.4:
Lloh3:
adrp x8, _EIO@GOTPAGE
Lloh4:
ldr x8, [x8, _EIO@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
neg w25, w8
b LBB0_1
LBB0_5:
Lloh6:
adrp x8, _LM_ST_EXCLUSIVE@GOTPAGE
Lloh7:
ldr x8, [x8, _LM_ST_EXCLUSIVE@GOTPAGEOFF]
Lloh8:
ldr x1, [x8]
add x3, sp, #8
mov w2, #0
bl _gfs2_glock_nq_init
mov x25, x0
cbnz w0, LBB0_1
b LBB0_7
LBB0_6:
add x0, sp, #8
bl _gfs2_holder_mark_uninitialized
LBB0_7:
ldr w5, [x24]
mov x0, x23
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl ___gfs2_xattr_set
mov x25, x0
add x0, sp, #8
bl _gfs2_holder_initialized
cbz x0, LBB0_1
; %bb.8:
add x0, sp, #8
bl _gfs2_glock_dq_uninit
b LBB0_1
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.cfi_endproc
; -- End function
.comm _LM_ST_EXCLUSIVE,8,3 ; @LM_ST_EXCLUSIVE
.comm _EIO,4,2 ; @EIO
.no_dead_strip _gfs2_xattr_set
.subsections_via_symbols
| AnghaBench/linux/fs/gfs2/extr_xattr.c_gfs2_xattr_set.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _megaInit ## -- Begin function megaInit
.p2align 4, 0x90
_megaInit: ## @megaInit
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _RandomFunc ## -- Begin function RandomFunc
.p2align 4, 0x90
_RandomFunc: ## @RandomFunc
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $326104726, %eax ## imm = 0x136FF696
addl (%rdi), %eax
movl %eax, %ecx
shrb $4, %cl
andb $1, %cl
shll %cl, %eax
imull $709125895, %eax, %eax ## imm = 0x2A446707
addl $1040438877, %eax ## imm = 0x3E03D65D
movl %eax, (%rsi)
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
cmpl $2, %edi
jne LBB2_4
## %bb.1:
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq _strtoul
addl $326104726, %eax ## imm = 0x136FF696
movl %eax, %ecx
shrb $4, %cl
andb $1, %cl
shll %cl, %eax
imull $709125895, %eax, %ebx ## imm = 0x2A446707
addl $1040438877, %ebx ## imm = 0x3E03D65D
cmpl $716058374, %ebx ## imm = 0x2AAE2F06
jne LBB2_3
## %bb.2:
leaq L_str(%rip), %rdi
callq _puts
LBB2_3:
leaq L_.str.2(%rip), %rdi
movl %ebx, %esi
xorl %eax, %eax
callq _printf
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
LBB2_4:
leaq L_.str(%rip), %rdi
movl $1, %esi
xorl %eax, %eax
callq _printf
movl $-1, %edi
callq _exit
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Call this program with %i arguments\n"
L_.str.2: ## @.str.2
.asciz "%u\n"
L_str: ## @str
.asciz "You win!"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _megaInit ; -- Begin function megaInit
.p2align 2
_megaInit: ; @megaInit
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.globl _RandomFunc ; -- Begin function RandomFunc
.p2align 2
_RandomFunc: ; @RandomFunc
.cfi_startproc
; %bb.0:
ldr w8, [x0]
mov w9, #63126
movk w9, #4975, lsl #16
add w8, w8, w9
ubfx w9, w8, #4, #1
lsl w8, w8, w9
mov w9, #26375
movk w9, #10820, lsl #16
mov w10, #54877
movk w10, #15875, lsl #16
madd w8, w8, w9, w10
str w8, [x1]
ret
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
cmp w0, #2
b.ne LBB2_4
; %bb.1:
ldr x0, [x1, #8]
mov x1, #0
mov w2, #10
bl _strtoul
mov w8, #63126
movk w8, #4975, lsl #16
add w8, w0, w8
ubfx w9, w8, #4, #1
lsl w8, w8, w9
mov w9, #26375
movk w9, #10820, lsl #16
mov w10, #54877
movk w10, #15875, lsl #16
madd w19, w8, w9, w10
mov w8, #12038
movk w8, #10926, lsl #16
cmp w19, w8
b.ne LBB2_3
; %bb.2:
Lloh0:
adrp x0, l_str@PAGE
Lloh1:
add x0, x0, l_str@PAGEOFF
bl _puts
LBB2_3:
str x19, [sp]
Lloh2:
adrp x0, l_.str.2@PAGE
Lloh3:
add x0, x0, l_.str.2@PAGEOFF
bl _printf
mov w0, #0
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
LBB2_4:
mov w8, #1
str x8, [sp]
Lloh4:
adrp x0, l_.str@PAGE
Lloh5:
add x0, x0, l_.str@PAGEOFF
bl _printf
mov w0, #-1
bl _exit
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh4, Lloh5
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Call this program with %i arguments\n"
l_.str.2: ; @.str.2
.asciz "%u\n"
l_str: ; @str
.asciz "You win!"
.subsections_via_symbols
| the_stack_data/156392868.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function hw_ep_enable
_hw_ep_enable: ## @hw_ep_enable
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r15d
movl %edi, %r14d
testl %esi, %esi
je LBB0_2
## %bb.1:
movq _ENDPTCTRL_TXT@GOTPCREL(%rip), %rax
movl (%rax), %ebx
movl %ebx, %edi
callq _ffs_nr
movl %eax, %ecx
shll %cl, %r15d
movq _ENDPTCTRL_TXS@GOTPCREL(%rip), %rax
movq _ENDPTCTRL_TXR@GOTPCREL(%rip), %rcx
movq _ENDPTCTRL_TXE@GOTPCREL(%rip), %rdx
jmp LBB0_3
LBB0_2:
movq _ENDPTCTRL_RXT@GOTPCREL(%rip), %rax
movl (%rax), %ebx
movl %ebx, %edi
callq _ffs_nr
movl %eax, %ecx
shll %cl, %r15d
movq _ENDPTCTRL_RXS@GOTPCREL(%rip), %rax
movq _ENDPTCTRL_RXR@GOTPCREL(%rip), %rcx
movq _ENDPTCTRL_RXE@GOTPCREL(%rip), %rdx
LBB0_3:
movl (%rdx), %edx
orl (%rcx), %edx
orl %edx, %ebx
orl (%rax), %ebx
orl %r15d, %edx
movq _CAP_ENDPTCTRL@GOTPCREL(%rip), %rax
movslq %r14d, %rdi
shlq $2, %rdi
addq (%rax), %rdi
movl %ebx, %esi
callq _hw_cwrite
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _ENDPTCTRL_TXT,4,2 ## @ENDPTCTRL_TXT
.comm _ENDPTCTRL_TXS,4,2 ## @ENDPTCTRL_TXS
.comm _ENDPTCTRL_TXR,4,2 ## @ENDPTCTRL_TXR
.comm _ENDPTCTRL_TXE,4,2 ## @ENDPTCTRL_TXE
.comm _ENDPTCTRL_RXT,4,2 ## @ENDPTCTRL_RXT
.comm _ENDPTCTRL_RXS,4,2 ## @ENDPTCTRL_RXS
.comm _ENDPTCTRL_RXR,4,2 ## @ENDPTCTRL_RXR
.comm _ENDPTCTRL_RXE,4,2 ## @ENDPTCTRL_RXE
.comm _CAP_ENDPTCTRL,8,3 ## @CAP_ENDPTCTRL
.no_dead_strip _hw_ep_enable
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function hw_ep_enable
_hw_ep_enable: ; @hw_ep_enable
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x2
mov x19, x0
cbz w1, LBB0_2
; %bb.1:
Lloh0:
adrp x8, _ENDPTCTRL_TXT@GOTPAGE
Lloh1:
ldr x8, [x8, _ENDPTCTRL_TXT@GOTPAGEOFF]
Lloh2:
ldr w21, [x8]
mov x0, x21
bl _ffs_nr
Lloh3:
adrp x8, _ENDPTCTRL_TXS@GOTPAGE
Lloh4:
ldr x8, [x8, _ENDPTCTRL_TXS@GOTPAGEOFF]
lsl w9, w20, w0
Lloh5:
ldr w8, [x8]
Lloh6:
adrp x10, _ENDPTCTRL_TXR@GOTPAGE
Lloh7:
ldr x10, [x10, _ENDPTCTRL_TXR@GOTPAGEOFF]
Lloh8:
adrp x11, _ENDPTCTRL_TXE@GOTPAGE
Lloh9:
ldr x11, [x11, _ENDPTCTRL_TXE@GOTPAGEOFF]
b LBB0_3
LBB0_2:
Lloh10:
adrp x8, _ENDPTCTRL_RXT@GOTPAGE
Lloh11:
ldr x8, [x8, _ENDPTCTRL_RXT@GOTPAGEOFF]
Lloh12:
ldr w21, [x8]
mov x0, x21
bl _ffs_nr
Lloh13:
adrp x8, _ENDPTCTRL_RXS@GOTPAGE
Lloh14:
ldr x8, [x8, _ENDPTCTRL_RXS@GOTPAGEOFF]
lsl w9, w20, w0
Lloh15:
ldr w8, [x8]
Lloh16:
adrp x10, _ENDPTCTRL_RXR@GOTPAGE
Lloh17:
ldr x10, [x10, _ENDPTCTRL_RXR@GOTPAGEOFF]
Lloh18:
adrp x11, _ENDPTCTRL_RXE@GOTPAGE
Lloh19:
ldr x11, [x11, _ENDPTCTRL_RXE@GOTPAGEOFF]
LBB0_3:
ldr w10, [x10]
ldr w11, [x11]
orr w10, w11, w10
orr w11, w10, w21
orr w1, w11, w8
orr w2, w10, w9
Lloh20:
adrp x8, _CAP_ENDPTCTRL@GOTPAGE
Lloh21:
ldr x8, [x8, _CAP_ENDPTCTRL@GOTPAGEOFF]
Lloh22:
ldr x8, [x8]
add x0, x8, w19, sxtw #2
bl _hw_cwrite
mov w0, #0
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh8, Lloh9
.loh AdrpLdrGot Lloh6, Lloh7
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGot Lloh18, Lloh19
.loh AdrpLdrGot Lloh16, Lloh17
.loh AdrpLdrGotLdr Lloh13, Lloh14, Lloh15
.loh AdrpLdrGotLdr Lloh10, Lloh11, Lloh12
.loh AdrpLdrGotLdr Lloh20, Lloh21, Lloh22
.cfi_endproc
; -- End function
.comm _ENDPTCTRL_TXT,4,2 ; @ENDPTCTRL_TXT
.comm _ENDPTCTRL_TXS,4,2 ; @ENDPTCTRL_TXS
.comm _ENDPTCTRL_TXR,4,2 ; @ENDPTCTRL_TXR
.comm _ENDPTCTRL_TXE,4,2 ; @ENDPTCTRL_TXE
.comm _ENDPTCTRL_RXT,4,2 ; @ENDPTCTRL_RXT
.comm _ENDPTCTRL_RXS,4,2 ; @ENDPTCTRL_RXS
.comm _ENDPTCTRL_RXR,4,2 ; @ENDPTCTRL_RXR
.comm _ENDPTCTRL_RXE,4,2 ; @ENDPTCTRL_RXE
.comm _CAP_ENDPTCTRL,8,3 ; @CAP_ENDPTCTRL
.no_dead_strip _hw_ep_enable
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/usb/gadget/extr_ci13xxx_udc.c_hw_ep_enable.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _git_offmap_size ## -- Begin function git_offmap_size
.p2align 4, 0x90
_git_offmap_size: ## @git_offmap_size
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _kh_size ## TAILCALL
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _git_offmap_size ; -- Begin function git_offmap_size
.p2align 2
_git_offmap_size: ; @git_offmap_size
.cfi_startproc
; %bb.0:
b _kh_size
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/libgit2/src/extr_offmap.c_git_offmap_size.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L_.str(%rip), %rdi
xorl %eax, %eax
callq _printf
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Hello World!"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _printf
mov w0, #0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Hello World!"
.subsections_via_symbols
| the_stack_data/154826760.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function mipsnet_xmit
_mipsnet_xmit: ## @mipsnet_xmit
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %rbx
movq %rdi, %r14
movq %rsi, %rdi
callq _netif_stop_queue
movq %rbx, %rdi
movq %r14, %rsi
callq _mipsnet_put_todevice
movq _NETDEV_TX_OK@GOTPCREL(%rip), %rax
movl (%rax), %eax
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _NETDEV_TX_OK,4,2 ## @NETDEV_TX_OK
.no_dead_strip _mipsnet_xmit
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function mipsnet_xmit
_mipsnet_xmit: ; @mipsnet_xmit
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
mov x20, x0
mov x0, x1
bl _netif_stop_queue
mov x0, x19
mov x1, x20
bl _mipsnet_put_todevice
Lloh0:
adrp x8, _NETDEV_TX_OK@GOTPAGE
Lloh1:
ldr x8, [x8, _NETDEV_TX_OK@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _NETDEV_TX_OK,4,2 ; @NETDEV_TX_OK
.no_dead_strip _mipsnet_xmit
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/net/extr_mipsnet.c_mipsnet_xmit.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $136, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movl $0, -136(%rbp)
movq $0, -144(%rbp)
movq _EXIT_FAILURE@GOTPCREL(%rip), %rax
movl (%rax), %r14d
movl $0, -148(%rbp)
leaq -164(%rbp), %rbx
movq %rbx, %rdi
callq _CPU_ZERO
xorl %r15d, %r15d
xorl %edi, %edi
movq %rbx, %rsi
callq _CPU_SET
xorl %eax, %eax
callq _pthread_self
movl %eax, %edi
movl $4, %esi
movq %rbx, %rdx
callq _pthread_setaffinity_np
xorl %eax, %eax
callq _setup_cgroup_environment
testq %rax, %rax
je LBB0_6
## %bb.1:
movl $-1, %ebx
jmp LBB0_2
LBB0_6:
leaq L_.str.1(%rip), %rdi
callq _create_and_get_cgroup
movl %eax, %ebx
testl %eax, %eax
js LBB0_9
## %bb.7:
leaq L_.str.1(%rip), %rdi
callq _join_cgroup
testq %rax, %rax
je LBB0_10
LBB0_9:
xorl %r15d, %r15d
LBB0_2:
movq _BPF_CGROUP_SOCK_OPS@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl %ebx, %edi
callq _bpf_prog_detach
movl %ebx, %edi
callq _close
xorl %eax, %eax
callq _cleanup_cgroup_environment
movq %r15, %rdi
callq _IS_ERR_OR_NULL
testl %eax, %eax
jne LBB0_4
## %bb.3:
movq %r15, %rdi
callq _perf_buffer__free
LBB0_4:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB0_25
## %bb.5:
movl %r14d, %eax
addq $136, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_10:
movq _BPF_PROG_TYPE_SOCK_OPS@GOTPCREL(%rip), %rax
movl (%rax), %esi
leaq L_.str(%rip), %rdi
leaq -160(%rbp), %rdx
leaq -152(%rbp), %rcx
callq _bpf_prog_load
testq %rax, %rax
je LBB0_13
## %bb.11:
leaq L_.str.2(%rip), %rdi
leaq L_.str(%rip), %rsi
LBB0_12:
xorl %r15d, %r15d
xorl %eax, %eax
callq _printf
jmp LBB0_2
LBB0_13:
movl -152(%rbp), %edi
movq _BPF_CGROUP_SOCK_OPS@GOTPCREL(%rip), %rax
movl (%rax), %edx
movl %ebx, %esi
xorl %ecx, %ecx
callq _bpf_prog_attach
testl %eax, %eax
je LBB0_15
## %bb.14:
movq _errno@GOTPCREL(%rip), %rax
movl (%rax), %edi
callq _strerror
leaq L_.str.3(%rip), %rdi
xorl %r15d, %r15d
movl %r14d, %esi
movq %rax, %rdx
xorl %eax, %eax
callq _printf
jmp LBB0_2
LBB0_15:
movq -160(%rbp), %rdi
leaq L_.str.4(%rip), %rsi
callq _bpf_object__find_map_by_name
testq %rax, %rax
je LBB0_20
## %bb.16:
movq %rax, %r15
movq -160(%rbp), %rdi
leaq L_.str.6(%rip), %rsi
callq _bpf_object__find_map_by_name
testq %rax, %rax
je LBB0_21
## %bb.17:
movq %rax, %r12
movq _dummyfn@GOTPCREL(%rip), %rax
movl (%rax), %eax
movl %eax, -136(%rbp)
movq %r15, %rdi
callq _bpf_map__fd
leaq -136(%rbp), %rdx
movl %eax, %edi
movl $8, %esi
callq _perf_buffer__new
movq %rax, %r15
movq %rax, %rdi
callq _IS_ERR
testq %rax, %rax
jne LBB0_2
## %bb.18:
movq _poller_thread@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq _tid@GOTPCREL(%rip), %rdi
xorl %esi, %esi
movq %r15, %rcx
callq _pthread_create
movq _TESTPORT@GOTPCREL(%rip), %rax
movl (%rax), %edx
leaq L_.str.7(%rip), %rsi
leaq -128(%rbp), %r13
movq %r13, %rdi
callq _sprintf
movq %r13, %rdi
callq _system
movq _TESTPORT@GOTPCREL(%rip), %rax
movl (%rax), %edx
leaq L_.str.8(%rip), %rsi
movq %r13, %rdi
callq _sprintf
movq %r13, %rdi
callq _system
movq _TESTPORT@GOTPCREL(%rip), %rax
movl (%rax), %edx
leaq L_.str.9(%rip), %rsi
movq %r13, %rdi
callq _sprintf
movq %r13, %rdi
callq _system
movq %r12, %rdi
callq _bpf_map__fd
leaq -148(%rbp), %rsi
leaq -144(%rbp), %rdx
movl %eax, %edi
callq _bpf_map_lookup_elem
testl %eax, %eax
je LBB0_22
## %bb.19:
leaq L_.str.10(%rip), %rdi
movl %eax, %esi
xorl %eax, %eax
callq _printf
jmp LBB0_2
LBB0_20:
leaq L_.str.5(%rip), %rdi
leaq L_.str.4(%rip), %rsi
jmp LBB0_12
LBB0_21:
leaq L_.str.5(%rip), %rdi
leaq L_.str.6(%rip), %rsi
xorl %eax, %eax
callq _printf
movl $-1, %r14d
jmp LBB0_4
LBB0_22:
movl $10, %edi
callq _sleep
leaq -144(%rbp), %rdi
callq _verify_result
testq %rax, %rax
je LBB0_24
## %bb.23:
movl -144(%rbp), %esi
movq _rx_callbacks@GOTPCREL(%rip), %rax
movl (%rax), %edx
leaq L_.str.11(%rip), %rdi
xorl %eax, %eax
callq _printf
jmp LBB0_2
LBB0_24:
leaq L_str(%rip), %rdi
callq _puts
xorl %r14d, %r14d
jmp LBB0_2
LBB0_25:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "test_tcpnotify_kern.o"
L_.str.1: ## @.str.1
.asciz "/foo"
.comm _EXIT_FAILURE,4,2 ## @EXIT_FAILURE
.comm _BPF_PROG_TYPE_SOCK_OPS,4,2 ## @BPF_PROG_TYPE_SOCK_OPS
L_.str.2: ## @.str.2
.asciz "FAILED: load_bpf_file failed for: %s\n"
.comm _BPF_CGROUP_SOCK_OPS,4,2 ## @BPF_CGROUP_SOCK_OPS
L_.str.3: ## @.str.3
.asciz "FAILED: bpf_prog_attach: %d (%s)\n"
.comm _errno,4,2 ## @errno
L_.str.4: ## @.str.4
.asciz "perf_event_map"
L_.str.5: ## @.str.5
.asciz "FAIL:map '%s' not found\n"
L_.str.6: ## @.str.6
.asciz "global_map"
.comm _dummyfn,4,2 ## @dummyfn
.comm _tid,4,2 ## @tid
.comm _poller_thread,4,2 ## @poller_thread
L_.str.7: ## @.str.7
.asciz "iptables -A INPUT -p tcp --dport %d -j DROP"
.comm _TESTPORT,4,2 ## @TESTPORT
L_.str.8: ## @.str.8
.asciz "nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 "
L_.str.9: ## @.str.9
.asciz "iptables -D INPUT -p tcp --dport %d -j DROP"
L_.str.10: ## @.str.10
.asciz "FAILED: bpf_map_lookup_elem returns %d\n"
L_.str.11: ## @.str.11
.asciz "FAILED: Wrong stats Expected %d calls, got %d\n"
.comm _rx_callbacks,4,2 ## @rx_callbacks
L_str: ## @str
.asciz "PASSED!"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #208
.cfi_def_cfa_offset 208
stp x24, x23, [sp, #144] ; 16-byte Folded Spill
stp x22, x21, [sp, #160] ; 16-byte Folded Spill
stp x20, x19, [sp, #176] ; 16-byte Folded Spill
stp x29, x30, [sp, #192] ; 16-byte Folded Spill
add x29, sp, #192
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-56]
str wzr, [sp, #48]
str xzr, [sp, #40]
Lloh3:
adrp x8, _EXIT_FAILURE@GOTPAGE
Lloh4:
ldr x8, [x8, _EXIT_FAILURE@GOTPAGEOFF]
Lloh5:
ldr w19, [x8]
str wzr, [sp, #16]
add x0, sp, #20
bl _CPU_ZERO
add x1, sp, #20
mov w0, #0
bl _CPU_SET
bl _pthread_self
add x2, sp, #20
mov w1, #4
bl _pthread_setaffinity_np
bl _setup_cgroup_environment
Lloh6:
adrp x24, _BPF_CGROUP_SOCK_OPS@GOTPAGE
Lloh7:
ldr x24, [x24, _BPF_CGROUP_SOCK_OPS@GOTPAGEOFF]
cbz x0, LBB0_2
; %bb.1:
mov x21, #0
mov w20, #-1
b LBB0_10
LBB0_2:
Lloh8:
adrp x0, l_.str.1@PAGE
Lloh9:
add x0, x0, l_.str.1@PAGEOFF
bl _create_and_get_cgroup
mov x20, x0
tbnz w0, #31, LBB0_4
; %bb.3:
Lloh10:
adrp x0, l_.str.1@PAGE
Lloh11:
add x0, x0, l_.str.1@PAGEOFF
bl _join_cgroup
cbz x0, LBB0_5
LBB0_4:
mov x21, #0
b LBB0_10
LBB0_5:
Lloh12:
adrp x8, _BPF_PROG_TYPE_SOCK_OPS@GOTPAGE
Lloh13:
ldr x8, [x8, _BPF_PROG_TYPE_SOCK_OPS@GOTPAGEOFF]
Lloh14:
ldr w1, [x8]
Lloh15:
adrp x21, l_.str@PAGE
Lloh16:
add x21, x21, l_.str@PAGEOFF
add x2, sp, #24
add x3, sp, #36
mov x0, x21
bl _bpf_prog_load
cbz x0, LBB0_7
; %bb.6:
str x21, [sp]
Lloh17:
adrp x0, l_.str.2@PAGE
Lloh18:
add x0, x0, l_.str.2@PAGEOFF
b LBB0_9
LBB0_7:
ldr w0, [sp, #36]
ldr w2, [x24]
mov x1, x20
mov w3, #0
bl _bpf_prog_attach
cbz w0, LBB0_14
; %bb.8:
Lloh19:
adrp x8, _errno@GOTPAGE
Lloh20:
ldr x8, [x8, _errno@GOTPAGEOFF]
Lloh21:
ldr w0, [x8]
bl _strerror
stp x19, x0, [sp]
Lloh22:
adrp x0, l_.str.3@PAGE
Lloh23:
add x0, x0, l_.str.3@PAGEOFF
LBB0_9:
bl _printf
mov x21, #0
LBB0_10:
ldr w1, [x24]
mov x0, x20
bl _bpf_prog_detach
mov x0, x20
bl _close
bl _cleanup_cgroup_environment
mov x0, x21
bl _IS_ERR_OR_NULL
cbnz w0, LBB0_12
; %bb.11:
mov x0, x21
bl _perf_buffer__free
LBB0_12:
ldur x8, [x29, #-56]
Lloh24:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh25:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh26:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_24
; %bb.13:
mov x0, x19
ldp x29, x30, [sp, #192] ; 16-byte Folded Reload
ldp x20, x19, [sp, #176] ; 16-byte Folded Reload
ldp x22, x21, [sp, #160] ; 16-byte Folded Reload
ldp x24, x23, [sp, #144] ; 16-byte Folded Reload
add sp, sp, #208
ret
LBB0_14:
ldr x0, [sp, #24]
Lloh27:
adrp x22, l_.str.4@PAGE
Lloh28:
add x22, x22, l_.str.4@PAGEOFF
mov x1, x22
bl _bpf_object__find_map_by_name
mov x21, x0
cbz x0, LBB0_19
; %bb.15:
ldr x0, [sp, #24]
Lloh29:
adrp x23, l_.str.6@PAGE
Lloh30:
add x23, x23, l_.str.6@PAGEOFF
mov x1, x23
bl _bpf_object__find_map_by_name
cbz x0, LBB0_20
; %bb.16:
mov x22, x0
Lloh31:
adrp x8, _dummyfn@GOTPAGE
Lloh32:
ldr x8, [x8, _dummyfn@GOTPAGEOFF]
Lloh33:
ldr w8, [x8]
str w8, [sp, #48]
mov x0, x21
bl _bpf_map__fd
add x2, sp, #48
mov w1, #8
bl _perf_buffer__new
mov x21, x0
bl _IS_ERR
cbnz x0, LBB0_10
; %bb.17:
Lloh34:
adrp x8, _poller_thread@GOTPAGE
Lloh35:
ldr x8, [x8, _poller_thread@GOTPAGEOFF]
Lloh36:
ldr w2, [x8]
Lloh37:
adrp x0, _tid@GOTPAGE
Lloh38:
ldr x0, [x0, _tid@GOTPAGEOFF]
mov x1, #0
mov x3, x21
bl _pthread_create
Lloh39:
adrp x23, _TESTPORT@GOTPAGE
Lloh40:
ldr x23, [x23, _TESTPORT@GOTPAGEOFF]
ldr w2, [x23]
Lloh41:
adrp x1, l_.str.7@PAGE
Lloh42:
add x1, x1, l_.str.7@PAGEOFF
add x0, sp, #56
bl _sprintf
add x0, sp, #56
bl _system
ldr w2, [x23]
Lloh43:
adrp x1, l_.str.8@PAGE
Lloh44:
add x1, x1, l_.str.8@PAGEOFF
add x0, sp, #56
bl _sprintf
add x0, sp, #56
bl _system
ldr w2, [x23]
Lloh45:
adrp x1, l_.str.9@PAGE
Lloh46:
add x1, x1, l_.str.9@PAGEOFF
add x0, sp, #56
bl _sprintf
add x0, sp, #56
bl _system
mov x0, x22
bl _bpf_map__fd
add x1, sp, #16
add x2, sp, #40
bl _bpf_map_lookup_elem
; kill: def $w0 killed $w0 def $x0
cbz w0, LBB0_21
; %bb.18:
str x0, [sp]
Lloh47:
adrp x0, l_.str.10@PAGE
Lloh48:
add x0, x0, l_.str.10@PAGEOFF
bl _printf
b LBB0_10
LBB0_19:
str x22, [sp]
Lloh49:
adrp x0, l_.str.5@PAGE
Lloh50:
add x0, x0, l_.str.5@PAGEOFF
bl _printf
b LBB0_10
LBB0_20:
str x23, [sp]
Lloh51:
adrp x0, l_.str.5@PAGE
Lloh52:
add x0, x0, l_.str.5@PAGEOFF
bl _printf
mov w19, #-1
b LBB0_12
LBB0_21:
mov w0, #10
bl _sleep
add x0, sp, #40
bl _verify_result
cbz x0, LBB0_23
; %bb.22:
Lloh53:
adrp x8, _rx_callbacks@GOTPAGE
Lloh54:
ldr x8, [x8, _rx_callbacks@GOTPAGEOFF]
ldr w9, [sp, #40]
Lloh55:
ldr w8, [x8]
stp x9, x8, [sp]
Lloh56:
adrp x0, l_.str.11@PAGE
Lloh57:
add x0, x0, l_.str.11@PAGEOFF
bl _printf
b LBB0_10
LBB0_23:
Lloh58:
adrp x0, l_str@PAGE
Lloh59:
add x0, x0, l_str@PAGEOFF
bl _puts
mov w19, #0
b LBB0_10
LBB0_24:
bl ___stack_chk_fail
.loh AdrpLdrGot Lloh6, Lloh7
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpAdd Lloh10, Lloh11
.loh AdrpAdd Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpAdd Lloh17, Lloh18
.loh AdrpAdd Lloh22, Lloh23
.loh AdrpLdrGotLdr Lloh19, Lloh20, Lloh21
.loh AdrpLdrGotLdr Lloh24, Lloh25, Lloh26
.loh AdrpAdd Lloh27, Lloh28
.loh AdrpAdd Lloh29, Lloh30
.loh AdrpLdrGotLdr Lloh31, Lloh32, Lloh33
.loh AdrpAdd Lloh45, Lloh46
.loh AdrpAdd Lloh43, Lloh44
.loh AdrpAdd Lloh41, Lloh42
.loh AdrpLdrGot Lloh39, Lloh40
.loh AdrpLdrGot Lloh37, Lloh38
.loh AdrpLdrGotLdr Lloh34, Lloh35, Lloh36
.loh AdrpAdd Lloh47, Lloh48
.loh AdrpAdd Lloh49, Lloh50
.loh AdrpAdd Lloh51, Lloh52
.loh AdrpAdd Lloh56, Lloh57
.loh AdrpLdrGotLdr Lloh53, Lloh54, Lloh55
.loh AdrpAdd Lloh58, Lloh59
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "test_tcpnotify_kern.o"
l_.str.1: ; @.str.1
.asciz "/foo"
.comm _EXIT_FAILURE,4,2 ; @EXIT_FAILURE
.comm _BPF_PROG_TYPE_SOCK_OPS,4,2 ; @BPF_PROG_TYPE_SOCK_OPS
l_.str.2: ; @.str.2
.asciz "FAILED: load_bpf_file failed for: %s\n"
.comm _BPF_CGROUP_SOCK_OPS,4,2 ; @BPF_CGROUP_SOCK_OPS
l_.str.3: ; @.str.3
.asciz "FAILED: bpf_prog_attach: %d (%s)\n"
.comm _errno,4,2 ; @errno
l_.str.4: ; @.str.4
.asciz "perf_event_map"
l_.str.5: ; @.str.5
.asciz "FAIL:map '%s' not found\n"
l_.str.6: ; @.str.6
.asciz "global_map"
.comm _dummyfn,4,2 ; @dummyfn
.comm _tid,4,2 ; @tid
.comm _poller_thread,4,2 ; @poller_thread
l_.str.7: ; @.str.7
.asciz "iptables -A INPUT -p tcp --dport %d -j DROP"
.comm _TESTPORT,4,2 ; @TESTPORT
l_.str.8: ; @.str.8
.asciz "nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 "
l_.str.9: ; @.str.9
.asciz "iptables -D INPUT -p tcp --dport %d -j DROP"
l_.str.10: ; @.str.10
.asciz "FAILED: bpf_map_lookup_elem returns %d\n"
l_.str.11: ; @.str.11
.asciz "FAILED: Wrong stats Expected %d calls, got %d\n"
.comm _rx_callbacks,4,2 ; @rx_callbacks
l_str: ; @str
.asciz "PASSED!"
.subsections_via_symbols
| AnghaBench/linux/tools/testing/selftests/bpf/extr_test_tcpnotify_user.c_main.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function t3_ddp_cleanup
_t3_ddp_cleanup: ## @t3_ddp_cleanup
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq (%rdi), %rbx
callq _cxgbi_ddp_cleanup
testq %rax, %rax
je LBB0_2
## %bb.1:
leaq L_.str(%rip), %rdi
movq %rbx, %rsi
callq _pr_info
movq $0, (%rbx)
LBB0_2:
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "t3dev 0x%p, ulp_iscsi no more user.\n"
.no_dead_strip _t3_ddp_cleanup
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function t3_ddp_cleanup
_t3_ddp_cleanup: ; @t3_ddp_cleanup
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
ldr x19, [x0]
bl _cxgbi_ddp_cleanup
cbz x0, LBB0_2
; %bb.1:
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
mov x1, x19
bl _pr_info
str xzr, [x19]
LBB0_2:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "t3dev 0x%p, ulp_iscsi no more user.\n"
.no_dead_strip _t3_ddp_cleanup
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/scsi/cxgbi/cxgb3i/extr_cxgb3i.c_t3_ddp_cleanup.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $32, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -24(%rbp)
cmpl $1, %edi
jg LBB0_1
## %bb.2:
leaq -48(%rbp), %rbx
movl $19, %edx
xorl %edi, %edi
movq %rbx, %rsi
callq _read
movb $0, -48(%rbp,%rax)
jmp LBB0_3
LBB0_1:
movq 8(%rsi), %rbx
LBB0_3:
leaq L_.str(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB0_4
## %bb.5:
leaq L_.str.2(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB0_6
## %bb.7:
leaq L_.str.4(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB0_8
## %bb.9:
cmpl $-1414673666, (%rbx) ## imm = 0xABADCAFE
jne LBB0_11
## %bb.10:
leaq L_str.15(%rip), %rdi
jmp LBB0_18
LBB0_4:
leaq L_str.17(%rip), %rdi
jmp LBB0_18
LBB0_6:
leaq L_str.16(%rip), %rdi
LBB0_18:
callq _puts
LBB0_19:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -24(%rbp), %rax
jne LBB0_21
## %bb.20:
xorl %eax, %eax
addq $32, %rsp
popq %rbx
popq %r14
popq %rbp
retq
LBB0_8:
movl $16, %edi
callq _malloc
movq %rax, %r14
movl $1414743380, (%rax) ## imm = 0x54534554
movb $0, 4(%rax)
movl $16, %edx
movq %rax, %rdi
movq %rbx, %rsi
callq ___strcat_chk
leaq L_.str.6(%rip), %rdi
movq %r14, %rsi
xorl %eax, %eax
callq _printf
jmp LBB0_19
LBB0_11:
movabsq $5927113470108716876, %rax ## imm = 0x5241564C41434F4C
cmpq %rax, (%rbx)
je LBB0_12
## %bb.13:
cmpl $6513249, (%rbx) ## imm = 0x636261
je LBB0_14
## %bb.15:
movq _global_cmpval(%rip), %rax
xorq (%rbx), %rax
movq _global_cmpval+7(%rip), %rcx
xorq 7(%rbx), %rcx
orq %rax, %rcx
je LBB0_16
## %bb.17:
leaq L_str(%rip), %rdi
jmp LBB0_18
LBB0_12:
leaq L_str.14(%rip), %rdi
jmp LBB0_18
LBB0_14:
leaq L_str.13(%rip), %rdi
jmp LBB0_18
LBB0_16:
leaq L_str.12(%rip), %rdi
jmp LBB0_18
LBB0_21:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __DATA,__data
.globl _global_cmpval ## @global_cmpval
_global_cmpval:
.asciz "GLOBALVARIABLE"
.section __TEXT,__cstring,cstring_literals
L___const.main.cmpval: ## @__const.main.cmpval
.asciz "LOCALVARIABLE"
L___const.main.shortval: ## @__const.main.shortval
.asciz "abc"
L_.str: ## @.str
.asciz "LIBTOKENCAP"
L_.str.2: ## @.str.2
.asciz "BUGMENOT"
L_.str.4: ## @.str.4
.asciz "BUFFEROVERFLOW"
L_.str.5: ## @.str.5
.asciz "TEST"
L_.str.6: ## @.str.6
.asciz "This will only crash with libdislocator: %s\n"
L_str: ## @str
.asciz "I do not know your string"
L_str.12: ## @str.12
.asciz "global var memcmp works!"
L_str.13: ## @str.13
.asciz "short local var memcmp works!"
L_str.14: ## @str.14
.asciz "local var memcmp works!"
L_str.15: ## @str.15
.asciz "GG you eat cmp tokens for breakfast!"
L_str.16: ## @str.16
.asciz "your string was bugmenot"
L_str.17: ## @str.17
.asciz "your string was libtokencap"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-24]
cmp w0, #1
b.gt LBB0_2
; %bb.1:
add x19, sp, #20
add x1, sp, #20
mov w0, #0
mov w2, #19
bl _read
strb wzr, [x19, x0]
b LBB0_3
LBB0_2:
ldr x19, [x1, #8]
LBB0_3:
Lloh3:
adrp x1, l_.str@PAGE
Lloh4:
add x1, x1, l_.str@PAGEOFF
mov x0, x19
bl _strcmp
cbz w0, LBB0_8
; %bb.4:
Lloh5:
adrp x1, l_.str.2@PAGE
Lloh6:
add x1, x1, l_.str.2@PAGEOFF
mov x0, x19
bl _strcmp
cbz w0, LBB0_9
; %bb.5:
Lloh7:
adrp x1, l_.str.4@PAGE
Lloh8:
add x1, x1, l_.str.4@PAGEOFF
mov x0, x19
bl _strcmp
cbz w0, LBB0_13
; %bb.6:
ldr w8, [x19]
mov w9, #51966
movk w9, #43949, lsl #16
cmp w8, w9
b.ne LBB0_14
; %bb.7:
Lloh9:
adrp x0, l_str.15@PAGE
Lloh10:
add x0, x0, l_str.15@PAGEOFF
b LBB0_10
LBB0_8:
Lloh11:
adrp x0, l_str.17@PAGE
Lloh12:
add x0, x0, l_str.17@PAGEOFF
b LBB0_10
LBB0_9:
Lloh13:
adrp x0, l_str.16@PAGE
Lloh14:
add x0, x0, l_str.16@PAGEOFF
LBB0_10:
bl _puts
LBB0_11:
ldur x8, [x29, #-24]
Lloh15:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh16:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh17:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_21
; %bb.12:
mov w0, #0
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #80
ret
LBB0_13:
mov w0, #16
bl _malloc
mov x20, x0
mov w8, #17748
movk w8, #21587, lsl #16
str w8, [x0]
strb wzr, [x0, #4]
mov x1, x19
mov w2, #16
bl ___strcat_chk
str x20, [sp]
Lloh18:
adrp x0, l_.str.6@PAGE
Lloh19:
add x0, x0, l_.str.6@PAGEOFF
bl _printf
b LBB0_11
LBB0_14:
ldr x8, [x19]
mov x9, #20300
movk x9, #16707, lsl #16
movk x9, #22092, lsl #32
movk x9, #21057, lsl #48
cmp x8, x9
b.eq LBB0_18
; %bb.15:
ldr w8, [x19]
sub w8, w8, #1590, lsl #12 ; =6512640
cmp w8, #609
b.eq LBB0_19
; %bb.16:
Lloh20:
adrp x8, _global_cmpval@PAGE
Lloh21:
add x8, x8, _global_cmpval@PAGEOFF
ldr x9, [x8]
ldr x10, [x19]
eor x9, x9, x10
ldur x8, [x8, #7]
ldur x10, [x19, #7]
eor x8, x8, x10
orr x8, x9, x8
cbz x8, LBB0_20
; %bb.17:
Lloh22:
adrp x0, l_str@PAGE
Lloh23:
add x0, x0, l_str@PAGEOFF
b LBB0_10
LBB0_18:
Lloh24:
adrp x0, l_str.14@PAGE
Lloh25:
add x0, x0, l_str.14@PAGEOFF
b LBB0_10
LBB0_19:
Lloh26:
adrp x0, l_str.13@PAGE
Lloh27:
add x0, x0, l_str.13@PAGEOFF
b LBB0_10
LBB0_20:
Lloh28:
adrp x0, l_str.12@PAGE
Lloh29:
add x0, x0, l_str.12@PAGEOFF
b LBB0_10
LBB0_21:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpAdd Lloh5, Lloh6
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpAdd Lloh9, Lloh10
.loh AdrpAdd Lloh11, Lloh12
.loh AdrpAdd Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh15, Lloh16, Lloh17
.loh AdrpAdd Lloh18, Lloh19
.loh AdrpAdd Lloh20, Lloh21
.loh AdrpAdd Lloh22, Lloh23
.loh AdrpAdd Lloh24, Lloh25
.loh AdrpAdd Lloh26, Lloh27
.loh AdrpAdd Lloh28, Lloh29
.cfi_endproc
; -- End function
.section __DATA,__data
.globl _global_cmpval ; @global_cmpval
_global_cmpval:
.asciz "GLOBALVARIABLE"
.section __TEXT,__cstring,cstring_literals
l___const.main.cmpval: ; @__const.main.cmpval
.asciz "LOCALVARIABLE"
l___const.main.shortval: ; @__const.main.shortval
.asciz "abc"
l_.str: ; @.str
.asciz "LIBTOKENCAP"
l_.str.2: ; @.str.2
.asciz "BUGMENOT"
l_.str.4: ; @.str.4
.asciz "BUFFEROVERFLOW"
l_.str.5: ; @.str.5
.asciz "TEST"
l_.str.6: ; @.str.6
.asciz "This will only crash with libdislocator: %s\n"
l_str: ; @str
.asciz "I do not know your string"
l_str.12: ; @str.12
.asciz "global var memcmp works!"
l_str.13: ; @str.13
.asciz "short local var memcmp works!"
l_str.14: ; @str.14
.asciz "local var memcmp works!"
l_str.15: ; @str.15
.asciz "GG you eat cmp tokens for breakfast!"
l_str.16: ; @str.16
.asciz "your string was bugmenot"
l_str.17: ; @str.17
.asciz "your string was libtokencap"
.subsections_via_symbols
| the_stack_data/215769248.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _sqlite3_nextchar_init ## -- Begin function sqlite3_nextchar_init
.p2align 4, 0x90
_sqlite3_nextchar_init: ## @sqlite3_nextchar_init
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
movq %rdx, %rdi
callq _SQLITE_EXTENSION_INIT2
movq _SQLITE_UTF8@GOTPCREL(%rip), %r14
movl (%r14), %ecx
movq _nextCharFunc@GOTPCREL(%rip), %r15
movl (%r15), %r9d
leaq L_.str(%rip), %rsi
movq %rbx, %rdi
movl $3, %edx
xorl %r8d, %r8d
pushq $0
pushq $0
callq _sqlite3_create_function
addq $16, %rsp
movq _SQLITE_OK@GOTPCREL(%rip), %r12
movl (%r12), %ecx
cmpl %ecx, %eax
jne LBB0_2
## %bb.1:
movl (%r14), %ecx
movl (%r15), %r9d
leaq L_.str(%rip), %rsi
movq %rbx, %rdi
movl $4, %edx
xorl %r8d, %r8d
pushq $0
pushq $0
callq _sqlite3_create_function
addq $16, %rsp
movl (%r12), %ecx
LBB0_2:
cmpl %ecx, %eax
jne LBB0_4
## %bb.3:
movl (%r14), %ecx
movl (%r15), %r9d
leaq L_.str(%rip), %rsi
movq %rbx, %rdi
movl $5, %edx
xorl %r8d, %r8d
pushq $0
pushq $0
callq _sqlite3_create_function
addq $16, %rsp
LBB0_4:
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _SQLITE_OK,4,2 ## @SQLITE_OK
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "next_char"
.comm _SQLITE_UTF8,4,2 ## @SQLITE_UTF8
.comm _nextCharFunc,4,2 ## @nextCharFunc
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _sqlite3_nextchar_init ; -- Begin function sqlite3_nextchar_init
.p2align 2
_sqlite3_nextchar_init: ; @sqlite3_nextchar_init
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
mov x0, x2
bl _SQLITE_EXTENSION_INIT2
Lloh0:
adrp x20, _SQLITE_UTF8@GOTPAGE
Lloh1:
ldr x20, [x20, _SQLITE_UTF8@GOTPAGEOFF]
ldr w3, [x20]
Lloh2:
adrp x21, _nextCharFunc@GOTPAGE
Lloh3:
ldr x21, [x21, _nextCharFunc@GOTPAGEOFF]
ldr w5, [x21]
Lloh4:
adrp x1, l_.str@PAGE
Lloh5:
add x1, x1, l_.str@PAGEOFF
mov x0, x19
mov w2, #3
mov w4, #0
mov w6, #0
mov w7, #0
bl _sqlite3_create_function
Lloh6:
adrp x22, _SQLITE_OK@GOTPAGE
Lloh7:
ldr x22, [x22, _SQLITE_OK@GOTPAGEOFF]
ldr w8, [x22]
cmp w0, w8
b.ne LBB0_2
; %bb.1:
ldr w3, [x20]
ldr w5, [x21]
Lloh8:
adrp x1, l_.str@PAGE
Lloh9:
add x1, x1, l_.str@PAGEOFF
mov x0, x19
mov w2, #4
mov w4, #0
mov w6, #0
mov w7, #0
bl _sqlite3_create_function
ldr w8, [x22]
LBB0_2:
cmp w0, w8
b.ne LBB0_4
; %bb.3:
ldr w3, [x20]
ldr w5, [x21]
Lloh10:
adrp x1, l_.str@PAGE
Lloh11:
add x1, x1, l_.str@PAGEOFF
mov x0, x19
mov w2, #5
mov w4, #0
mov w6, #0
mov w7, #0
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _sqlite3_create_function
LBB0_4:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh6, Lloh7
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpAdd Lloh10, Lloh11
.cfi_endproc
; -- End function
.comm _SQLITE_OK,4,2 ; @SQLITE_OK
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "next_char"
.comm _SQLITE_UTF8,4,2 ; @SQLITE_UTF8
.comm _nextCharFunc,4,2 ; @nextCharFunc
.subsections_via_symbols
| AnghaBench/sqlcipher/ext/misc/extr_nextchar.c_sqlite3_nextchar_init.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function store_restart_layout
_store_restart_layout: ## @store_restart_layout
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq _LC_NUMERIC@GOTPCREL(%rip), %rbx
movl (%rbx), %edi
leaq L_.str(%rip), %rsi
callq _setlocale
xorl %edi, %edi
callq _yajl_gen_alloc
movq _croot@GOTPCREL(%rip), %rcx
movl (%rcx), %esi
movl %eax, %edi
movl $1, %edx
callq _dump_node
movl (%rbx), %edi
leaq L_.str.1(%rip), %rsi
callq _setlocale
movq _get_buf@GOTPCREL(%rip), %rax
movslq (%rax), %rdi
leaq -40(%rbp), %rsi
leaq -32(%rbp), %rdx
xorl %eax, %eax
callq _y
movq _config@GOTPCREL(%rip), %rax
movq (%rax), %rdi
testq %rdi, %rdi
je LBB0_2
## %bb.1:
callq _resolve_tilde
movq %rax, %r14
jmp LBB0_3
LBB0_2:
leaq L_.str.2(%rip), %rdi
callq _get_process_filename
movq %rax, %r14
testq %rax, %rax
je LBB0_13
LBB0_3:
movq %r14, %rdi
callq _sstrdup
movq %rax, %r15
movq %rax, %rdi
callq _dirname
movq %rax, %rbx
leaq L_.str.3(%rip), %rdi
movq %rax, %rsi
xorl %eax, %eax
callq _DLOG
movq _DEFAULT_DIR_MODE@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %rbx, %rdi
callq _mkdirp
testq %rax, %rax
je LBB0_5
## %bb.4:
leaq L_.str.4(%rip), %rdi
movq %rbx, %rsi
xorl %eax, %eax
callq _ELOG
LBB0_5:
movq %r15, %rdi
callq _free
movq _O_WRONLY@GOTPCREL(%rip), %rax
movq _O_CREAT@GOTPCREL(%rip), %rcx
movl (%rcx), %esi
orl (%rax), %esi
movq _O_TRUNC@GOTPCREL(%rip), %rax
orl (%rax), %esi
movq _S_IRUSR@GOTPCREL(%rip), %rax
movq _S_IWUSR@GOTPCREL(%rip), %rcx
movl (%rcx), %edx
orl (%rax), %edx
movq %r14, %rdi
callq _open
cmpl $-1, %eax
je LBB0_12
## %bb.6:
movl %eax, %ebx
movq -40(%rbp), %rsi
movq -32(%rbp), %rdx
movl %eax, %edi
callq _writeall
cmpl $-1, %eax
je LBB0_10
## %bb.7:
movl %ebx, %edi
callq _close
movq -32(%rbp), %rsi
testq %rsi, %rsi
je LBB0_9
## %bb.8:
movq -40(%rbp), %rdx
leaq L_.str.7(%rip), %rdi
## kill: def $esi killed $esi killed $rsi
xorl %eax, %eax
callq _DLOG
LBB0_9:
movq _free@GOTPCREL(%rip), %rdi
xorl %eax, %eax
callq _y
jmp LBB0_14
LBB0_10:
movq _errno@GOTPCREL(%rip), %rax
movl (%rax), %edi
callq _strerror
leaq L_.str.6(%rip), %rdi
movq %r14, %rsi
movl %eax, %edx
xorl %eax, %eax
callq _ELOG
movq %r14, %rdi
callq _free
movl %ebx, %edi
callq _close
LBB0_13:
xorl %r14d, %r14d
LBB0_14:
movq %r14, %rax
addq $24, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
LBB0_12:
leaq L_.str.5(%rip), %rdi
callq _perror
movq %r14, %rdi
callq _free
jmp LBB0_13
.cfi_endproc
## -- End function
.comm _LC_NUMERIC,4,2 ## @LC_NUMERIC
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "C"
.comm _croot,4,2 ## @croot
L_.str.1: ## @.str.1
.space 1
.comm _get_buf,4,2 ## @get_buf
.comm _config,8,3 ## @config
L_.str.2: ## @.str.2
.asciz "restart-state"
L_.str.3: ## @.str.3
.asciz "Creating \"%s\" for storing the restart layout\n"
.comm _DEFAULT_DIR_MODE,4,2 ## @DEFAULT_DIR_MODE
L_.str.4: ## @.str.4
.asciz "Could not create \"%s\" for storing the restart layout, layout will be lost.\n"
.comm _O_WRONLY,4,2 ## @O_WRONLY
.comm _O_CREAT,4,2 ## @O_CREAT
.comm _O_TRUNC,4,2 ## @O_TRUNC
.comm _S_IRUSR,4,2 ## @S_IRUSR
.comm _S_IWUSR,4,2 ## @S_IWUSR
L_.str.5: ## @.str.5
.asciz "open()"
L_.str.6: ## @.str.6
.asciz "Could not write restart layout to \"%s\", layout will be lost: %s\n"
.comm _errno,4,2 ## @errno
L_.str.7: ## @.str.7
.asciz "layout: %.*s\n"
.no_dead_strip _store_restart_layout
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function store_restart_layout
_store_restart_layout: ; @store_restart_layout
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
Lloh0:
adrp x19, _LC_NUMERIC@GOTPAGE
Lloh1:
ldr x19, [x19, _LC_NUMERIC@GOTPAGEOFF]
ldr w0, [x19]
Lloh2:
adrp x1, l_.str@PAGE
Lloh3:
add x1, x1, l_.str@PAGEOFF
bl _setlocale
mov x0, #0
bl _yajl_gen_alloc
Lloh4:
adrp x8, _croot@GOTPAGE
Lloh5:
ldr x8, [x8, _croot@GOTPAGEOFF]
Lloh6:
ldr w1, [x8]
mov w2, #1
bl _dump_node
ldr w0, [x19]
Lloh7:
adrp x1, l_.str.1@PAGE
Lloh8:
add x1, x1, l_.str.1@PAGEOFF
bl _setlocale
Lloh9:
adrp x8, _get_buf@GOTPAGE
Lloh10:
ldr x8, [x8, _get_buf@GOTPAGEOFF]
Lloh11:
ldrsw x0, [x8]
add x8, sp, #16
add x9, sp, #24
stp x9, x8, [sp]
bl _y
Lloh12:
adrp x8, _config@GOTPAGE
Lloh13:
ldr x8, [x8, _config@GOTPAGEOFF]
Lloh14:
ldr x0, [x8]
cbz x0, LBB0_2
; %bb.1:
bl _resolve_tilde
mov x19, x0
b LBB0_3
LBB0_2:
Lloh15:
adrp x0, l_.str.2@PAGE
Lloh16:
add x0, x0, l_.str.2@PAGEOFF
bl _get_process_filename
mov x19, x0
cbz x0, LBB0_12
LBB0_3:
mov x0, x19
bl _sstrdup
mov x20, x0
bl _dirname
mov x21, x0
str x0, [sp]
Lloh17:
adrp x0, l_.str.3@PAGE
Lloh18:
add x0, x0, l_.str.3@PAGEOFF
bl _DLOG
Lloh19:
adrp x8, _DEFAULT_DIR_MODE@GOTPAGE
Lloh20:
ldr x8, [x8, _DEFAULT_DIR_MODE@GOTPAGEOFF]
Lloh21:
ldr w1, [x8]
mov x0, x21
bl _mkdirp
cbz x0, LBB0_5
; %bb.4:
Lloh22:
adrp x0, l_.str.4@PAGE
Lloh23:
add x0, x0, l_.str.4@PAGEOFF
mov x1, x21
bl _ELOG
LBB0_5:
mov x0, x20
bl _free
Lloh24:
adrp x8, _O_WRONLY@GOTPAGE
Lloh25:
ldr x8, [x8, _O_WRONLY@GOTPAGEOFF]
Lloh26:
ldr w8, [x8]
Lloh27:
adrp x9, _O_CREAT@GOTPAGE
Lloh28:
ldr x9, [x9, _O_CREAT@GOTPAGEOFF]
Lloh29:
ldr w9, [x9]
orr w8, w9, w8
Lloh30:
adrp x9, _O_TRUNC@GOTPAGE
Lloh31:
ldr x9, [x9, _O_TRUNC@GOTPAGEOFF]
Lloh32:
ldr w9, [x9]
Lloh33:
adrp x10, _S_IRUSR@GOTPAGE
Lloh34:
ldr x10, [x10, _S_IRUSR@GOTPAGEOFF]
orr w1, w8, w9
Lloh35:
ldr w8, [x10]
Lloh36:
adrp x9, _S_IWUSR@GOTPAGE
Lloh37:
ldr x9, [x9, _S_IWUSR@GOTPAGEOFF]
Lloh38:
ldr w9, [x9]
orr w2, w9, w8
mov x0, x19
bl _open
cmn w0, #1
b.eq LBB0_13
; %bb.6:
mov x20, x0
ldp x2, x1, [sp, #16]
bl _writeall
cmn w0, #1
b.eq LBB0_10
; %bb.7:
mov x0, x20
bl _close
ldr x8, [sp, #16]
cbz x8, LBB0_9
; %bb.8:
ldr x9, [sp, #24]
stp x8, x9, [sp]
Lloh39:
adrp x0, l_.str.7@PAGE
Lloh40:
add x0, x0, l_.str.7@PAGEOFF
bl _DLOG
LBB0_9:
Lloh41:
adrp x0, _free@GOTPAGE
Lloh42:
ldr x0, [x0, _free@GOTPAGEOFF]
bl _y
b LBB0_12
LBB0_10:
Lloh43:
adrp x8, _errno@GOTPAGE
Lloh44:
ldr x8, [x8, _errno@GOTPAGEOFF]
Lloh45:
ldr w0, [x8]
bl _strerror
; kill: def $w0 killed $w0 def $x0
str x0, [sp]
Lloh46:
adrp x0, l_.str.6@PAGE
Lloh47:
add x0, x0, l_.str.6@PAGEOFF
mov x1, x19
bl _ELOG
mov x0, x19
bl _free
mov x0, x20
bl _close
LBB0_11:
mov x19, #0
LBB0_12:
mov x0, x19
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #80
ret
LBB0_13:
Lloh48:
adrp x0, l_.str.5@PAGE
Lloh49:
add x0, x0, l_.str.5@PAGEOFF
bl _perror
mov x0, x19
bl _free
b LBB0_11
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh4, Lloh5, Lloh6
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpAdd Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh19, Lloh20, Lloh21
.loh AdrpAdd Lloh17, Lloh18
.loh AdrpAdd Lloh22, Lloh23
.loh AdrpLdrGotLdr Lloh36, Lloh37, Lloh38
.loh AdrpLdrGotLdr Lloh33, Lloh34, Lloh35
.loh AdrpLdrGotLdr Lloh30, Lloh31, Lloh32
.loh AdrpLdrGotLdr Lloh27, Lloh28, Lloh29
.loh AdrpLdrGotLdr Lloh24, Lloh25, Lloh26
.loh AdrpAdd Lloh39, Lloh40
.loh AdrpLdrGot Lloh41, Lloh42
.loh AdrpAdd Lloh46, Lloh47
.loh AdrpLdrGotLdr Lloh43, Lloh44, Lloh45
.loh AdrpAdd Lloh48, Lloh49
.cfi_endproc
; -- End function
.comm _LC_NUMERIC,4,2 ; @LC_NUMERIC
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "C"
.comm _croot,4,2 ; @croot
l_.str.1: ; @.str.1
.space 1
.comm _get_buf,4,2 ; @get_buf
.comm _config,8,3 ; @config
l_.str.2: ; @.str.2
.asciz "restart-state"
l_.str.3: ; @.str.3
.asciz "Creating \"%s\" for storing the restart layout\n"
.comm _DEFAULT_DIR_MODE,4,2 ; @DEFAULT_DIR_MODE
l_.str.4: ; @.str.4
.asciz "Could not create \"%s\" for storing the restart layout, layout will be lost.\n"
.comm _O_WRONLY,4,2 ; @O_WRONLY
.comm _O_CREAT,4,2 ; @O_CREAT
.comm _O_TRUNC,4,2 ; @O_TRUNC
.comm _S_IRUSR,4,2 ; @S_IRUSR
.comm _S_IWUSR,4,2 ; @S_IWUSR
l_.str.5: ; @.str.5
.asciz "open()"
l_.str.6: ; @.str.6
.asciz "Could not write restart layout to \"%s\", layout will be lost: %s\n"
.comm _errno,4,2 ; @errno
l_.str.7: ; @.str.7
.asciz "layout: %.*s\n"
.no_dead_strip _store_restart_layout
.subsections_via_symbols
| AnghaBench/i3/src/extr_util.c_store_restart_layout.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ioapic_enable_source
_ioapic_enable_source: ## @ioapic_enable_source
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r14
movq (%rdi), %r15
movq _icu_lock@GOTPCREL(%rip), %rdi
callq _mtx_lock_spin
cmpq $0, 8(%r14)
je LBB0_2
## %bb.1:
movq _IOART_INTMASK@GOTPCREL(%rip), %rax
movl (%rax), %ebx
notl %ebx
andl (%r14), %ebx
movl (%r15), %r15d
movl 16(%r14), %edi
callq _IOAPIC_REDTBL_LO
movl %r15d, %edi
movl %eax, %esi
movl %ebx, %edx
callq _ioapic_write
movq $0, 8(%r14)
LBB0_2:
movq _icu_lock@GOTPCREL(%rip), %rdi
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _mtx_unlock_spin ## TAILCALL
.cfi_endproc
## -- End function
.comm _icu_lock,4,2 ## @icu_lock
.comm _IOART_INTMASK,4,2 ## @IOART_INTMASK
.no_dead_strip _ioapic_enable_source
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ioapic_enable_source
_ioapic_enable_source: ; @ioapic_enable_source
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
ldr x21, [x0]
Lloh0:
adrp x0, _icu_lock@GOTPAGE
Lloh1:
ldr x0, [x0, _icu_lock@GOTPAGEOFF]
bl _mtx_lock_spin
ldr x8, [x19, #8]
cbz x8, LBB0_2
; %bb.1:
ldr w8, [x19]
Lloh2:
adrp x9, _IOART_INTMASK@GOTPAGE
Lloh3:
ldr x9, [x9, _IOART_INTMASK@GOTPAGEOFF]
Lloh4:
ldr w9, [x9]
bic w20, w8, w9
ldr w21, [x21]
ldr w0, [x19, #16]
bl _IOAPIC_REDTBL_LO
mov x1, x0
mov x0, x21
mov x2, x20
bl _ioapic_write
str xzr, [x19, #8]
LBB0_2:
Lloh5:
adrp x0, _icu_lock@GOTPAGE
Lloh6:
ldr x0, [x0, _icu_lock@GOTPAGEOFF]
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _mtx_unlock_spin
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGot Lloh5, Lloh6
.cfi_endproc
; -- End function
.comm _icu_lock,4,2 ; @icu_lock
.comm _IOART_INTMASK,4,2 ; @IOART_INTMASK
.no_dead_strip _ioapic_enable_source
.subsections_via_symbols
| AnghaBench/freebsd/sys/x86/x86/extr_io_apic.c_ioapic_enable_source.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _AcpiNsGetType ## -- Begin function AcpiNsGetType
.p2align 4, 0x90
_AcpiNsGetType: ## @AcpiNsGetType
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
movq _NsGetType@GOTPCREL(%rip), %rax
movl (%rax), %edi
callq _ACPI_FUNCTION_TRACE
testq %rbx, %rbx
jne LBB0_2
## %bb.1:
leaq L_.str(%rip), %rdi
## kill: def $edi killed $edi killed $rdi
callq _ACPI_WARNING
movq _ACPI_TYPE_ANY@GOTPCREL(%rip), %rax
movl (%rax), %edi
callq _return_UINT8
LBB0_2:
movl (%rbx), %edi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _return_UINT8 ## TAILCALL
.cfi_endproc
## -- End function
.comm _NsGetType,4,2 ## @NsGetType
.comm _AE_INFO,4,2 ## @AE_INFO
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Null Node parameter"
.comm _ACPI_TYPE_ANY,4,2 ## @ACPI_TYPE_ANY
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _AcpiNsGetType ; -- Begin function AcpiNsGetType
.p2align 2
_AcpiNsGetType: ; @AcpiNsGetType
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
Lloh0:
adrp x8, _NsGetType@GOTPAGE
Lloh1:
ldr x8, [x8, _NsGetType@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
bl _ACPI_FUNCTION_TRACE
cbnz x19, LBB0_2
; %bb.1:
Lloh3:
adrp x0, l_.str@PAGE
Lloh4:
add x0, x0, l_.str@PAGEOFF
; kill: def $w0 killed $w0 killed $x0
bl _ACPI_WARNING
Lloh5:
adrp x8, _ACPI_TYPE_ANY@GOTPAGE
Lloh6:
ldr x8, [x8, _ACPI_TYPE_ANY@GOTPAGEOFF]
Lloh7:
ldr w0, [x8]
bl _return_UINT8
LBB0_2:
ldr w0, [x19]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _return_UINT8
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.loh AdrpAdd Lloh3, Lloh4
.cfi_endproc
; -- End function
.comm _NsGetType,4,2 ; @NsGetType
.comm _AE_INFO,4,2 ; @AE_INFO
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Null Node parameter"
.comm _ACPI_TYPE_ANY,4,2 ; @ACPI_TYPE_ANY
.subsections_via_symbols
| AnghaBench/reactos/drivers/bus/acpi/acpica/namespace/extr_nsutils.c_AcpiNsGetType.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ocfs2_complete_lock_res_refresh
_ocfs2_complete_lock_res_refresh: ## @ocfs2_complete_lock_res_refresh
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %r15d
movq %rdi, %rbx
xorl %eax, %eax
callq _mlog_entry_void
leaq 4(%rbx), %r14
movq %r14, %rdi
callq _spin_lock_irqsave
movq _OCFS2_LOCK_REFRESHING@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %rbx, %rdi
callq _lockres_clear_flags
testl %r15d, %r15d
jne LBB0_2
## %bb.1:
movq _OCFS2_LOCK_NEEDS_REFRESH@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %rbx, %rdi
callq _lockres_clear_flags
LBB0_2:
movq %r14, %rdi
callq _spin_unlock_irqrestore
movq %rbx, %rdi
callq _wake_up
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _mlog_exit_void ## TAILCALL
.cfi_endproc
## -- End function
.comm _OCFS2_LOCK_REFRESHING,4,2 ## @OCFS2_LOCK_REFRESHING
.comm _OCFS2_LOCK_NEEDS_REFRESH,4,2 ## @OCFS2_LOCK_NEEDS_REFRESH
.no_dead_strip _ocfs2_complete_lock_res_refresh
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ocfs2_complete_lock_res_refresh
_ocfs2_complete_lock_res_refresh: ; @ocfs2_complete_lock_res_refresh
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x21, x1
mov x19, x0
bl _mlog_entry_void
add x20, x19, #4
mov x0, x20
bl _spin_lock_irqsave
Lloh0:
adrp x8, _OCFS2_LOCK_REFRESHING@GOTPAGE
Lloh1:
ldr x8, [x8, _OCFS2_LOCK_REFRESHING@GOTPAGEOFF]
Lloh2:
ldr w1, [x8]
mov x0, x19
bl _lockres_clear_flags
cbnz w21, LBB0_2
; %bb.1:
Lloh3:
adrp x8, _OCFS2_LOCK_NEEDS_REFRESH@GOTPAGE
Lloh4:
ldr x8, [x8, _OCFS2_LOCK_NEEDS_REFRESH@GOTPAGEOFF]
Lloh5:
ldr w1, [x8]
mov x0, x19
bl _lockres_clear_flags
LBB0_2:
mov x0, x20
bl _spin_unlock_irqrestore
mov x0, x19
bl _wake_up
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _mlog_exit_void
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _OCFS2_LOCK_REFRESHING,4,2 ; @OCFS2_LOCK_REFRESHING
.comm _OCFS2_LOCK_NEEDS_REFRESH,4,2 ; @OCFS2_LOCK_NEEDS_REFRESH
.no_dead_strip _ocfs2_complete_lock_res_refresh
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/fs/ocfs2/extr_dlmglue.c_ocfs2_complete_lock_res_refresh.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl %edi, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/51700284.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _cipher_generic_cipher ## -- Begin function cipher_generic_cipher
.p2align 4, 0x90
_cipher_generic_cipher: ## @cipher_generic_cipher
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
cmpq %r9, %rcx
jae LBB0_3
## %bb.1:
movq _ERR_LIB_PROV@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq _PROV_R_OUTPUT_BUFFER_TOO_SMALL@GOTPCREL(%rip), %rax
jmp LBB0_2
LBB0_3:
movq %r9, %rbx
movq %rdx, %r14
movq (%rdi), %rax
movq %r8, %rdx
movq %r9, %rcx
callq *(%rax)
testl %eax, %eax
je LBB0_4
## %bb.5:
movq %rbx, (%r14)
movl $1, %eax
jmp LBB0_6
LBB0_4:
movq _ERR_LIB_PROV@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq _PROV_R_CIPHER_OPERATION_FAILED@GOTPCREL(%rip), %rax
LBB0_2:
movl (%rax), %esi
callq _ERR_raise
xorl %eax, %eax
LBB0_6:
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _ERR_LIB_PROV,4,2 ## @ERR_LIB_PROV
.comm _PROV_R_OUTPUT_BUFFER_TOO_SMALL,4,2 ## @PROV_R_OUTPUT_BUFFER_TOO_SMALL
.comm _PROV_R_CIPHER_OPERATION_FAILED,4,2 ## @PROV_R_CIPHER_OPERATION_FAILED
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _cipher_generic_cipher ; -- Begin function cipher_generic_cipher
.p2align 2
_cipher_generic_cipher: ; @cipher_generic_cipher
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
cmp x3, x5
b.hs LBB0_2
; %bb.1:
Lloh0:
adrp x8, _ERR_LIB_PROV@GOTPAGE
Lloh1:
ldr x8, [x8, _ERR_LIB_PROV@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
Lloh3:
adrp x8, _PROV_R_OUTPUT_BUFFER_TOO_SMALL@GOTPAGE
Lloh4:
ldr x8, [x8, _PROV_R_OUTPUT_BUFFER_TOO_SMALL@GOTPAGEOFF]
b LBB0_5
LBB0_2:
mov x19, x5
mov x20, x2
ldr x8, [x0]
ldr x8, [x8]
mov x2, x4
mov x3, x5
blr x8
cbz w0, LBB0_4
; %bb.3:
str x19, [x20]
mov w0, #1
b LBB0_6
LBB0_4:
Lloh5:
adrp x8, _ERR_LIB_PROV@GOTPAGE
Lloh6:
ldr x8, [x8, _ERR_LIB_PROV@GOTPAGEOFF]
Lloh7:
ldr w0, [x8]
Lloh8:
adrp x8, _PROV_R_CIPHER_OPERATION_FAILED@GOTPAGE
Lloh9:
ldr x8, [x8, _PROV_R_CIPHER_OPERATION_FAILED@GOTPAGEOFF]
LBB0_5:
ldr w1, [x8]
bl _ERR_raise
mov w0, #0
LBB0_6:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGot Lloh8, Lloh9
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.cfi_endproc
; -- End function
.comm _ERR_LIB_PROV,4,2 ; @ERR_LIB_PROV
.comm _PROV_R_OUTPUT_BUFFER_TOO_SMALL,4,2 ; @PROV_R_OUTPUT_BUFFER_TOO_SMALL
.comm _PROV_R_CIPHER_OPERATION_FAILED,4,2 ; @PROV_R_CIPHER_OPERATION_FAILED
.subsections_via_symbols
| AnghaBench/openssl/providers/common/ciphers/extr_cipher_common.c_cipher_generic_cipher.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/144765.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _G_SetMovedir ## -- Begin function G_SetMovedir
.p2align 4, 0x90
_G_SetMovedir: ## @G_SetMovedir
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %ecx, %r15d
movq %rdx, %r12
movl %esi, %r14d
movq %rdi, %rbx
movabsq $-4294967296, %rdx ## imm = 0xFFFFFFFF00000000
xorl %ecx, %ecx
callq _VectorCompare
testq %rax, %rax
je LBB0_3
## %bb.1:
xorl %edi, %edi
movl $1, %esi
jmp LBB0_2
LBB0_3:
movabsq $-8589934592, %rdx ## imm = 0xFFFFFFFE00000000
movq %rbx, %rdi
movl %r14d, %esi
xorl %ecx, %ecx
callq _VectorCompare
testq %rax, %rax
je LBB0_5
## %bb.4:
xorl %edi, %edi
movl $-1, %esi
LBB0_2:
movq %r12, %rdx
movl %r15d, %ecx
callq _VectorCopy
LBB0_6:
movq %rbx, %rdi
movl %r14d, %esi
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp _VectorClear ## TAILCALL
LBB0_5:
movq %rbx, %rdi
movl %r14d, %esi
movq %r12, %rdx
movl %r15d, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq _AngleVectors
jmp LBB0_6
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _G_SetMovedir ; -- Begin function G_SetMovedir
.p2align 2
_G_SetMovedir: ; @G_SetMovedir
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x22, x3
mov x21, x2
mov x19, x0
and x20, x1, #0xffffffff
mov x1, x20
mov x2, #-4294967296
mov x3, #0
bl _VectorCompare
cbz x0, LBB0_2
; %bb.1:
and x3, x22, #0xffffffff
mov x0, #0
mov w1, #1
b LBB0_4
LBB0_2:
mov x0, x19
mov x1, x20
mov x2, #-8589934592
mov x3, #0
bl _VectorCompare
and x3, x22, #0xffffffff
cbz x0, LBB0_6
; %bb.3:
mov x0, #0
mov w1, #-1
LBB0_4:
mov x2, x21
bl _VectorCopy
LBB0_5:
mov x0, x19
mov x1, x20
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _VectorClear
LBB0_6:
mov x0, x19
mov x1, x20
mov x2, x21
mov x4, #0
mov x5, #0
bl _AngleVectors
b LBB0_5
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/Quake-III-Arena/code/game/extr_g_utils.c_G_SetMovedir.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _vnode_pager_dirtied ## -- Begin function vnode_pager_dirtied
.p2align 4, 0x90
_vnode_pager_dirtied: ## @vnode_pager_dirtied
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
testq %rdi, %rdi
je LBB0_2
## %bb.1:
movq (%rdi), %rax
cmpq _vnode_pager_ops@GOTPCREL(%rip), %rax
je LBB0_3
LBB0_2:
popq %rbx
popq %r14
popq %rbp
retq
LBB0_3:
movl %edx, %r14d
movl %esi, %ebx
callq _vnode_pager_lookup
movl (%rax), %edi
movl %ebx, %esi
movl %r14d, %edx
popq %rbx
popq %r14
popq %rbp
jmp _vnode_pager_was_dirtied ## TAILCALL
.cfi_endproc
## -- End function
.comm _vnode_pager_ops,4,2 ## @vnode_pager_ops
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _vnode_pager_dirtied ; -- Begin function vnode_pager_dirtied
.p2align 2
_vnode_pager_dirtied: ; @vnode_pager_dirtied
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
cbz x0, LBB0_2
; %bb.1:
ldr x8, [x0]
Lloh0:
adrp x9, _vnode_pager_ops@GOTPAGE
Lloh1:
ldr x9, [x9, _vnode_pager_ops@GOTPAGEOFF]
cmp x8, x9
b.eq LBB0_3
LBB0_2:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB0_3:
mov x19, x2
mov x20, x1
bl _vnode_pager_lookup
ldr w0, [x0]
mov x1, x20
mov x2, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _vnode_pager_was_dirtied
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm _vnode_pager_ops,4,2 ; @vnode_pager_ops
.subsections_via_symbols
| AnghaBench/darwin-xnu/osfmk/vm/extr_bsd_vm.c_vnode_pager_dirtied.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _svn_fs_x__read_content ## -- Begin function svn_fs_x__read_content
.p2align 4, 0x90
_svn_fs_x__read_content: ## @svn_fs_x__read_content
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r14
movq %rsi, %r15
movq $0, (%rdi)
movq _SVN_FS_X__RECOVERABLE_RETRY_COUNT@GOTPCREL(%rip), %r12
movl (%r12), %eax
testl %eax, %eax
jle LBB0_6
## %bb.1:
movq %rdi, %r13
movl $1, %ebx
.p2align 4, 0x90
LBB0_2: ## =>This Inner Loop Header: Depth=1
xorl %ecx, %ecx
cmpl %eax, %ebx
setl %cl
movq %r13, %rdi
xorl %esi, %esi
movq %r15, %rdx
movq %r14, %r8
callq _svn_fs_x__try_stringbuf_from_file
movl %eax, %edi
callq _SVN_ERR
movq (%r13), %rcx
testq %rcx, %rcx
jne LBB0_4
## %bb.3: ## in Loop: Header=BB0_2 Depth=1
leal 1(%rbx), %edx
movl (%r12), %eax
cmpl %eax, %ebx
movl %edx, %ebx
jl LBB0_2
LBB0_4:
testq %rcx, %rcx
je LBB0_6
## %bb.5:
movq _SVN_NO_ERROR@GOTPCREL(%rip), %rax
movq (%rax), %rax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_6:
movq _SVN_ERR_FS_CORRUPT@GOTPCREL(%rip), %rax
movl (%rax), %r12d
leaq L_.str(%rip), %rdi
callq __
movl %eax, %ebx
movq %r15, %rdi
movq %r14, %rsi
callq _svn_dirent_local_style
movl %r12d, %edi
xorl %esi, %esi
movl %ebx, %edx
movl %eax, %ecx
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _svn_error_createf ## TAILCALL
.cfi_endproc
## -- End function
.comm _SVN_FS_X__RECOVERABLE_RETRY_COUNT,4,2 ## @SVN_FS_X__RECOVERABLE_RETRY_COUNT
.comm _SVN_ERR_FS_CORRUPT,4,2 ## @SVN_ERR_FS_CORRUPT
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Can't read '%s'"
.comm _SVN_NO_ERROR,8,3 ## @SVN_NO_ERROR
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _svn_fs_x__read_content ; -- Begin function svn_fs_x__read_content
.p2align 2
_svn_fs_x__read_content: ; @svn_fs_x__read_content
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x2
mov x20, x1
str xzr, [x0]
Lloh0:
adrp x22, _SVN_FS_X__RECOVERABLE_RETRY_COUNT@GOTPAGE
Lloh1:
ldr x22, [x22, _SVN_FS_X__RECOVERABLE_RETRY_COUNT@GOTPAGEOFF]
ldr w8, [x22]
cmp w8, #1
b.lt LBB0_5
; %bb.1:
mov x21, x0
mov w23, #1
LBB0_2: ; =>This Inner Loop Header: Depth=1
cmp w23, w8
cset w3, lt
mov x0, x21
mov x1, #0
mov x2, x20
mov x4, x19
bl _svn_fs_x__try_stringbuf_from_file
bl _SVN_ERR
ldr x9, [x21]
ldr w8, [x22]
cmp x9, #0
ccmp w23, w8, #0, eq
add w23, w23, #1
b.lt LBB0_2
; %bb.3:
cbz x9, LBB0_5
; %bb.4:
Lloh2:
adrp x8, _SVN_NO_ERROR@GOTPAGE
Lloh3:
ldr x8, [x8, _SVN_NO_ERROR@GOTPAGEOFF]
Lloh4:
ldr x0, [x8]
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
LBB0_5:
Lloh5:
adrp x8, _SVN_ERR_FS_CORRUPT@GOTPAGE
Lloh6:
ldr x8, [x8, _SVN_ERR_FS_CORRUPT@GOTPAGEOFF]
Lloh7:
ldr w21, [x8]
Lloh8:
adrp x0, l_.str@PAGE
Lloh9:
add x0, x0, l_.str@PAGEOFF
bl __
mov x22, x0
mov x0, x20
mov x1, x19
bl _svn_dirent_local_style
mov x3, x0
mov x0, x21
mov x1, #0
mov x2, x22
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
b _svn_error_createf
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.cfi_endproc
; -- End function
.comm _SVN_FS_X__RECOVERABLE_RETRY_COUNT,4,2 ; @SVN_FS_X__RECOVERABLE_RETRY_COUNT
.comm _SVN_ERR_FS_CORRUPT,4,2 ; @SVN_ERR_FS_CORRUPT
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Can't read '%s'"
.comm _SVN_NO_ERROR,8,3 ; @SVN_NO_ERROR
.subsections_via_symbols
| AnghaBench/freebsd/contrib/subversion/subversion/libsvn_fs_x/extr_util.c_svn_fs_x__read_content.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function lmp91000_read
_lmp91000_read: ## @lmp91000_read
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r14
movl %esi, %r15d
movq %rdi, %r13
movl 24(%rdi), %edi
movq _LMP91000_REG_MODECN@GOTPCREL(%rip), %rbx
movl (%rbx), %esi
leaq -44(%rbp), %rdx
callq _regmap_read
testl %eax, %eax
jne LBB0_1
## %bb.3:
movl 24(%r13), %edi
movl (%rbx), %esi
movl %r15d, %edx
callq _regmap_write
testl %eax, %eax
je LBB0_4
LBB0_1:
movq _EINVAL@GOTPCREL(%rip), %rcx
LBB0_2:
xorl %eax, %eax
subl (%rcx), %eax
LBB0_10:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_4:
cmpl %r15d, -44(%rbp)
je LBB0_7
## %bb.5:
movq _LMP91000_REG_MODECN_TEMP@GOTPCREL(%rip), %rax
cmpl %r15d, (%rax)
jne LBB0_7
## %bb.6:
movl $3000, %edi ## imm = 0xBB8
movl $4000, %esi ## imm = 0xFA0
callq _usleep_range
LBB0_7:
movq _LMP91000_REG_MODECN_3LEAD@GOTPCREL(%rip), %rax
xorl %ecx, %ecx
cmpl %r15d, (%rax)
setne %cl
movl %ecx, (%r13)
movl 20(%r13), %edi
callq _iio_trigger_poll_chained
leaq 16(%r13), %r15
movq _HZ@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r15, %rdi
callq _wait_for_completion_timeout
movl %eax, %r12d
movq %r15, %rdi
callq _reinit_completion
testl %r12d, %r12d
je LBB0_8
## %bb.9:
movq 8(%r13), %rax
movslq (%r13), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, (%r14)
xorl %eax, %eax
jmp LBB0_10
LBB0_8:
movq _ETIMEDOUT@GOTPCREL(%rip), %rcx
jmp LBB0_2
.cfi_endproc
## -- End function
.comm _LMP91000_REG_MODECN,4,2 ## @LMP91000_REG_MODECN
.comm _EINVAL,4,2 ## @EINVAL
.comm _LMP91000_REG_MODECN_TEMP,4,2 ## @LMP91000_REG_MODECN_TEMP
.comm _LMP91000_REG_MODECN_3LEAD,4,2 ## @LMP91000_REG_MODECN_3LEAD
.comm _HZ,4,2 ## @HZ
.comm _ETIMEDOUT,4,2 ## @ETIMEDOUT
.no_dead_strip _lmp91000_read
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function lmp91000_read
_lmp91000_read: ; @lmp91000_read
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x2
mov x21, x1
mov x20, x0
ldr w0, [x0, #24]
Lloh0:
adrp x22, _LMP91000_REG_MODECN@GOTPAGE
Lloh1:
ldr x22, [x22, _LMP91000_REG_MODECN@GOTPAGEOFF]
ldr w1, [x22]
add x2, sp, #12
bl _regmap_read
cbnz w0, LBB0_2
; %bb.1:
ldr w0, [x20, #24]
ldr w1, [x22]
mov x2, x21
bl _regmap_write
cbz w0, LBB0_5
LBB0_2:
Lloh2:
adrp x8, _EINVAL@GOTPAGE
Lloh3:
ldr x8, [x8, _EINVAL@GOTPAGEOFF]
LBB0_3:
ldr w8, [x8]
neg w0, w8
LBB0_4:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
LBB0_5:
ldr w8, [sp, #12]
Lloh4:
adrp x9, _LMP91000_REG_MODECN_TEMP@GOTPAGE
Lloh5:
ldr x9, [x9, _LMP91000_REG_MODECN_TEMP@GOTPAGEOFF]
Lloh6:
ldr w9, [x9]
cmp w8, w21
ccmp w9, w21, #0, ne
b.ne LBB0_7
; %bb.6:
mov w0, #3000
mov w1, #4000
bl _usleep_range
LBB0_7:
Lloh7:
adrp x8, _LMP91000_REG_MODECN_3LEAD@GOTPAGE
Lloh8:
ldr x8, [x8, _LMP91000_REG_MODECN_3LEAD@GOTPAGEOFF]
Lloh9:
ldr w8, [x8]
cmp w8, w21
cset w8, ne
str w8, [x20]
ldr w0, [x20, #20]
bl _iio_trigger_poll_chained
add x21, x20, #16
Lloh10:
adrp x8, _HZ@GOTPAGE
Lloh11:
ldr x8, [x8, _HZ@GOTPAGEOFF]
Lloh12:
ldr w1, [x8]
mov x0, x21
bl _wait_for_completion_timeout
mov x22, x0
mov x0, x21
bl _reinit_completion
cbz w22, LBB0_9
; %bb.8:
mov w0, #0
ldr x8, [x20, #8]
ldrsw x9, [x20]
ldr w8, [x8, x9, lsl #2]
str w8, [x19]
b LBB0_4
LBB0_9:
Lloh13:
adrp x8, _ETIMEDOUT@GOTPAGE
Lloh14:
ldr x8, [x8, _ETIMEDOUT@GOTPAGEOFF]
b LBB0_3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGotLdr Lloh4, Lloh5, Lloh6
.loh AdrpLdrGotLdr Lloh10, Lloh11, Lloh12
.loh AdrpLdrGotLdr Lloh7, Lloh8, Lloh9
.loh AdrpLdrGot Lloh13, Lloh14
.cfi_endproc
; -- End function
.comm _LMP91000_REG_MODECN,4,2 ; @LMP91000_REG_MODECN
.comm _EINVAL,4,2 ; @EINVAL
.comm _LMP91000_REG_MODECN_TEMP,4,2 ; @LMP91000_REG_MODECN_TEMP
.comm _LMP91000_REG_MODECN_3LEAD,4,2 ; @LMP91000_REG_MODECN_3LEAD
.comm _HZ,4,2 ; @HZ
.comm _ETIMEDOUT,4,2 ; @ETIMEDOUT
.no_dead_strip _lmp91000_read
.subsections_via_symbols
| AnghaBench/linux/drivers/iio/potentiostat/extr_lmp91000.c_lmp91000_read.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ipoib_ndo_uninit
_ipoib_ndo_uninit: ## @ipoib_ndo_uninit
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r14
callq _ipoib_priv
movq %rax, %rbx
xorl %eax, %eax
callq _ASSERT_RTNL
leaq 24(%rbx), %rdi
callq _list_empty
xorl %edi, %edi
testl %eax, %eax
sete %dil
callq _WARN_ON
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB0_2
## %bb.1:
callq _ipoib_priv
leaq 16(%rax), %r15
movq %r15, %rdi
callq _down_write
movq %rbx, %rdi
addq $20, %rdi
callq _list_del
movq %r15, %rdi
callq _up_write
LBB0_2:
movq %r14, %rdi
callq _ipoib_neigh_hash_uninit
movq %r14, %rdi
callq _ipoib_ib_dev_cleanup
movq 8(%rbx), %rdi
testq %rdi, %rdi
je LBB0_4
## %bb.3:
callq _flush_workqueue
movq 8(%rbx), %rdi
callq _destroy_workqueue
movq $0, 8(%rbx)
LBB0_4:
movq (%rbx), %rdi
addq $8, %rsp
testq %rdi, %rdi
je LBB0_5
## %bb.6:
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _dev_put ## TAILCALL
LBB0_5:
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _ipoib_ndo_uninit
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ipoib_ndo_uninit
_ipoib_ndo_uninit: ; @ipoib_ndo_uninit
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x0
bl _ipoib_priv
mov x19, x0
bl _ASSERT_RTNL
add x0, x19, #24
bl _list_empty
cmp w0, #0
cset w0, eq
bl _WARN_ON
ldr x0, [x19]
cbz x0, LBB0_2
; %bb.1:
bl _ipoib_priv
add x21, x0, #16
mov x0, x21
bl _down_write
add x0, x19, #20
bl _list_del
mov x0, x21
bl _up_write
LBB0_2:
mov x0, x20
bl _ipoib_neigh_hash_uninit
mov x0, x20
bl _ipoib_ib_dev_cleanup
ldr x0, [x19, #8]
cbz x0, LBB0_4
; %bb.3:
bl _flush_workqueue
ldr x0, [x19, #8]
bl _destroy_workqueue
str xzr, [x19, #8]
LBB0_4:
ldr x0, [x19]
cbz x0, LBB0_6
; %bb.5:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _dev_put
LBB0_6:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _ipoib_ndo_uninit
.subsections_via_symbols
| AnghaBench/linux/drivers/infiniband/ulp/ipoib/extr_ipoib_main.c_ipoib_ndo_uninit.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function qlcnic_get_act_pci_func
_qlcnic_get_act_pci_func: ## @qlcnic_get_act_pci_func
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq _QLCNIC_ESWITCH_ENABLED@GOTPCREL(%rip), %rax
movl (%rax), %eax
movq 8(%rdi), %rcx
testl %eax, (%rdi)
je LBB0_1
## %bb.6:
movq 8(%rcx), %rcx
movq _QLCNIC_MGMT_FUNC@GOTPCREL(%rip), %rdx
xorl %eax, %eax
cmpq (%rdx), %rcx
je LBB0_10
## %bb.7:
movq %rdi, %rbx
movq _QLCNIC_MAX_PCI_FUNC@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq _GFP_KERNEL@GOTPCREL(%rip), %rax
movl (%rax), %edx
movl $4, %esi
callq _kcalloc
testq %rax, %rax
je LBB0_8
## %bb.9:
movq %rax, %r14
movq %rbx, %rdi
movq %rax, %rsi
callq _qlcnic_get_pci_info
movl %eax, %ebx
movq %r14, %rdi
callq _kfree
movl %ebx, %eax
jmp LBB0_10
LBB0_1:
movl (%rcx), %edx
xorl %eax, %eax
cmpl $128, %edx
je LBB0_5
## %bb.2:
cmpl $129, %edx
jne LBB0_10
## %bb.3:
movq _QLCNIC_NIU_MAX_GBE_PORTS@GOTPCREL(%rip), %rdx
jmp LBB0_4
LBB0_8:
movq _ENOMEM@GOTPCREL(%rip), %rcx
xorl %eax, %eax
subl (%rcx), %eax
jmp LBB0_10
LBB0_5:
movq _QLCNIC_NIU_MAX_XG_PORTS@GOTPCREL(%rip), %rdx
LBB0_4:
movl (%rdx), %edx
movl %edx, 16(%rcx)
LBB0_10:
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _QLCNIC_ESWITCH_ENABLED,4,2 ## @QLCNIC_ESWITCH_ENABLED
.comm _QLCNIC_NIU_MAX_GBE_PORTS,4,2 ## @QLCNIC_NIU_MAX_GBE_PORTS
.comm _QLCNIC_NIU_MAX_XG_PORTS,4,2 ## @QLCNIC_NIU_MAX_XG_PORTS
.comm _QLCNIC_MGMT_FUNC,8,3 ## @QLCNIC_MGMT_FUNC
.comm _QLCNIC_MAX_PCI_FUNC,4,2 ## @QLCNIC_MAX_PCI_FUNC
.comm _GFP_KERNEL,4,2 ## @GFP_KERNEL
.comm _ENOMEM,4,2 ## @ENOMEM
.no_dead_strip _qlcnic_get_act_pci_func
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function qlcnic_get_act_pci_func
_qlcnic_get_act_pci_func: ; @qlcnic_get_act_pci_func
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
ldr w9, [x0]
Lloh0:
adrp x8, _QLCNIC_ESWITCH_ENABLED@GOTPAGE
Lloh1:
ldr x8, [x8, _QLCNIC_ESWITCH_ENABLED@GOTPAGEOFF]
Lloh2:
ldr w10, [x8]
ldr x8, [x0, #8]
tst w10, w9
b.eq LBB0_4
; %bb.1:
ldr x8, [x8, #8]
Lloh3:
adrp x9, _QLCNIC_MGMT_FUNC@GOTPAGE
Lloh4:
ldr x9, [x9, _QLCNIC_MGMT_FUNC@GOTPAGEOFF]
Lloh5:
ldr x9, [x9]
cmp x8, x9
b.eq LBB0_7
; %bb.2:
mov x19, x0
Lloh6:
adrp x8, _QLCNIC_MAX_PCI_FUNC@GOTPAGE
Lloh7:
ldr x8, [x8, _QLCNIC_MAX_PCI_FUNC@GOTPAGEOFF]
Lloh8:
ldr w0, [x8]
Lloh9:
adrp x8, _GFP_KERNEL@GOTPAGE
Lloh10:
ldr x8, [x8, _GFP_KERNEL@GOTPAGEOFF]
Lloh11:
ldr w2, [x8]
mov w1, #4
bl _kcalloc
cbz x0, LBB0_8
; %bb.3:
mov x20, x0
mov x0, x19
mov x1, x20
bl _qlcnic_get_pci_info
mov x19, x0
mov x0, x20
bl _kfree
mov x0, x19
b LBB0_11
LBB0_4:
ldr w9, [x8]
cmp w9, #128
b.eq LBB0_9
; %bb.5:
cmp w9, #129
b.ne LBB0_7
; %bb.6:
mov w0, #0
Lloh12:
adrp x9, _QLCNIC_NIU_MAX_GBE_PORTS@GOTPAGE
Lloh13:
ldr x9, [x9, _QLCNIC_NIU_MAX_GBE_PORTS@GOTPAGEOFF]
b LBB0_10
LBB0_7:
mov w0, #0
b LBB0_11
LBB0_8:
Lloh14:
adrp x8, _ENOMEM@GOTPAGE
Lloh15:
ldr x8, [x8, _ENOMEM@GOTPAGEOFF]
Lloh16:
ldr w8, [x8]
neg w0, w8
b LBB0_11
LBB0_9:
mov w0, #0
Lloh17:
adrp x9, _QLCNIC_NIU_MAX_XG_PORTS@GOTPAGE
Lloh18:
ldr x9, [x9, _QLCNIC_NIU_MAX_XG_PORTS@GOTPAGEOFF]
LBB0_10:
ldr w9, [x9]
str w9, [x8, #16]
LBB0_11:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGot Lloh12, Lloh13
.loh AdrpLdrGotLdr Lloh14, Lloh15, Lloh16
.loh AdrpLdrGot Lloh17, Lloh18
.cfi_endproc
; -- End function
.comm _QLCNIC_ESWITCH_ENABLED,4,2 ; @QLCNIC_ESWITCH_ENABLED
.comm _QLCNIC_NIU_MAX_GBE_PORTS,4,2 ; @QLCNIC_NIU_MAX_GBE_PORTS
.comm _QLCNIC_NIU_MAX_XG_PORTS,4,2 ; @QLCNIC_NIU_MAX_XG_PORTS
.comm _QLCNIC_MGMT_FUNC,8,3 ; @QLCNIC_MGMT_FUNC
.comm _QLCNIC_MAX_PCI_FUNC,4,2 ; @QLCNIC_MAX_PCI_FUNC
.comm _GFP_KERNEL,4,2 ; @GFP_KERNEL
.comm _ENOMEM,4,2 ; @ENOMEM
.no_dead_strip _qlcnic_get_act_pci_func
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/net/qlcnic/extr_qlcnic_main.c_qlcnic_get_act_pci_func.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _iicbus_release_bus ## -- Begin function iicbus_release_bus
.p2align 4, 0x90
_iicbus_release_bus: ## @iicbus_release_bus
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r15
movq %rdi, %r14
callq _device_get_softc
movq %rax, %rbx
movq %rax, %rdi
callq _IICBUS_LOCK
cmpq %r15, (%rbx)
jne LBB0_1
## %bb.2:
decq 8(%rbx)
jne LBB0_4
## %bb.3:
movq %rbx, %rdi
callq _IICBUS_UNLOCK
movq %r14, %rdi
callq _device_get_parent
movq _IIC_RELEASE_BUS@GOTPCREL(%rip), %rcx
movl (%rcx), %esi
movl %eax, %edi
xorl %edx, %edx
callq _IICBUS_CALLBACK
movq %rbx, %rdi
callq _IICBUS_LOCK
movq $0, (%rbx)
movq %rbx, %rdi
callq _wakeup_one
movl 16(%rbx), %edi
callq _device_unbusy
LBB0_4:
movq %rbx, %rdi
callq _IICBUS_UNLOCK
xorl %eax, %eax
jmp LBB0_5
LBB0_1:
movq %rbx, %rdi
callq _IICBUS_UNLOCK
movq _IIC_EBUSBSY@GOTPCREL(%rip), %rax
movl (%rax), %eax
LBB0_5:
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _IIC_EBUSBSY,4,2 ## @IIC_EBUSBSY
.comm _IIC_RELEASE_BUS,4,2 ## @IIC_RELEASE_BUS
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _iicbus_release_bus ; -- Begin function iicbus_release_bus
.p2align 2
_iicbus_release_bus: ; @iicbus_release_bus
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x21, x1
mov x20, x0
bl _device_get_softc
mov x19, x0
bl _IICBUS_LOCK
ldr x8, [x19]
cmp x8, x21
b.ne LBB0_4
; %bb.1:
ldr x8, [x19, #8]
subs x8, x8, #1
str x8, [x19, #8]
b.ne LBB0_3
; %bb.2:
mov x0, x19
bl _IICBUS_UNLOCK
mov x0, x20
bl _device_get_parent
Lloh0:
adrp x8, _IIC_RELEASE_BUS@GOTPAGE
Lloh1:
ldr x8, [x8, _IIC_RELEASE_BUS@GOTPAGEOFF]
Lloh2:
ldr w1, [x8]
mov x2, #0
bl _IICBUS_CALLBACK
mov x0, x19
bl _IICBUS_LOCK
str xzr, [x19]
mov x0, x19
bl _wakeup_one
ldr w0, [x19, #16]
bl _device_unbusy
LBB0_3:
mov x0, x19
bl _IICBUS_UNLOCK
mov w0, #0
b LBB0_5
LBB0_4:
mov x0, x19
bl _IICBUS_UNLOCK
Lloh3:
adrp x8, _IIC_EBUSBSY@GOTPAGE
Lloh4:
ldr x8, [x8, _IIC_EBUSBSY@GOTPAGEOFF]
Lloh5:
ldr w0, [x8]
LBB0_5:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _IIC_EBUSBSY,4,2 ; @IIC_EBUSBSY
.comm _IIC_RELEASE_BUS,4,2 ; @IIC_RELEASE_BUS
.subsections_via_symbols
| AnghaBench/freebsd/sys/dev/iicbus/extr_iiconf.c_iicbus_release_bus.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function mxl5007t_tuner_init
_mxl5007t_tuner_init: ## @mxl5007t_tuner_init
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
callq _mxl5007t_calc_init_regs
movq %rbx, %rdi
movq %rax, %rsi
callq _mxl5007t_write_regs
movl %eax, %ebx
movl %eax, %edi
callq _mxl_fail
testq %rax, %rax
jne LBB0_2
## %bb.1:
movl $1, %edi
callq _mdelay
LBB0_2:
movl %ebx, %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _mxl5007t_tuner_init
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function mxl5007t_tuner_init
_mxl5007t_tuner_init: ; @mxl5007t_tuner_init
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
bl _mxl5007t_calc_init_regs
mov x1, x0
mov x0, x19
bl _mxl5007t_write_regs
mov x19, x0
bl _mxl_fail
cbnz x0, LBB0_2
; %bb.1:
mov w0, #1
bl _mdelay
LBB0_2:
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _mxl5007t_tuner_init
.subsections_via_symbols
| AnghaBench/linux/drivers/media/tuners/extr_mxl5007t.c_mxl5007t_tuner_init.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function zap_pmd_range
_zap_pmd_range: ## @zap_pmd_range
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %r9, -56(%rbp) ## 8-byte Spill
movq %r8, %r13
movq %rcx, %r14
movq %rsi, %r15
movq %rdi, -48(%rbp) ## 8-byte Spill
movq %rdx, %rdi
movq %rcx, %rsi
callq _pmd_offset
movq %rax, %rbx
jmp LBB0_1
.p2align 4, 0x90
LBB0_6: ## in Loop: Header=BB0_1 Depth=1
movq -48(%rbp), %rdi ## 8-byte Reload
movq %r15, %rsi
movq %rbx, %rdx
movq %r14, %rcx
callq _zap_huge_pmd
testq %rax, %rax
je LBB0_7
LBB0_9: ## in Loop: Header=BB0_1 Depth=1
xorl %eax, %eax
callq _cond_resched
addq $4, %rbx
movq %r12, %r14
cmpq %r13, %r12
je LBB0_10
LBB0_1: ## =>This Inner Loop Header: Depth=1
movq %r14, %rdi
movq %r13, %rsi
callq _pmd_addr_end
movq %rax, %r12
movl (%rbx), %edi
callq _is_swap_pmd
testq %rax, %rax
jne LBB0_4
## %bb.2: ## in Loop: Header=BB0_1 Depth=1
movl (%rbx), %edi
callq _pmd_trans_huge
testq %rax, %rax
jne LBB0_4
## %bb.3: ## in Loop: Header=BB0_1 Depth=1
movl (%rbx), %edi
callq _pmd_devmap
testq %rax, %rax
je LBB0_7
.p2align 4, 0x90
LBB0_4: ## in Loop: Header=BB0_1 Depth=1
movq %r12, %rax
subq %r14, %rax
movq _HPAGE_PMD_SIZE@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
je LBB0_6
## %bb.5: ## in Loop: Header=BB0_1 Depth=1
movq %r15, %rdi
movq %rbx, %rsi
movq %r14, %rdx
xorl %ecx, %ecx
xorl %r8d, %r8d
callq ___split_huge_pmd
LBB0_7: ## in Loop: Header=BB0_1 Depth=1
movq %rbx, %rdi
callq _pmd_none_or_trans_huge_or_clear_bad
testq %rax, %rax
jne LBB0_9
## %bb.8: ## in Loop: Header=BB0_1 Depth=1
movq -48(%rbp), %rdi ## 8-byte Reload
movq %r15, %rsi
movq %rbx, %rdx
movq %r14, %rcx
movq %r12, %r8
movq -56(%rbp), %r9 ## 8-byte Reload
callq _zap_pte_range
movq %rax, %r12
jmp LBB0_9
LBB0_10:
movq %r13, %rax
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _HPAGE_PMD_SIZE,8,3 ## @HPAGE_PMD_SIZE
.no_dead_strip _zap_pmd_range
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function zap_pmd_range
_zap_pmd_range: ; @zap_pmd_range
.cfi_startproc
; %bb.0:
stp x26, x25, [sp, #-80]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x20, x5
mov x19, x4
mov x24, x3
mov x21, x1
mov x22, x0
mov x0, x2
mov x1, x3
bl _pmd_offset
mov x23, x0
Lloh0:
adrp x26, _HPAGE_PMD_SIZE@GOTPAGE
Lloh1:
ldr x26, [x26, _HPAGE_PMD_SIZE@GOTPAGEOFF]
b LBB0_3
LBB0_1: ; in Loop: Header=BB0_3 Depth=1
mov x0, x22
mov x1, x21
mov x2, x23
mov x3, x24
bl _zap_huge_pmd
cbz x0, LBB0_8
LBB0_2: ; in Loop: Header=BB0_3 Depth=1
bl _cond_resched
add x23, x23, #4
mov x24, x25
cmp x25, x19
b.eq LBB0_10
LBB0_3: ; =>This Inner Loop Header: Depth=1
mov x0, x24
mov x1, x19
bl _pmd_addr_end
mov x25, x0
ldr w0, [x23]
bl _is_swap_pmd
cbnz x0, LBB0_6
; %bb.4: ; in Loop: Header=BB0_3 Depth=1
ldr w0, [x23]
bl _pmd_trans_huge
cbnz x0, LBB0_6
; %bb.5: ; in Loop: Header=BB0_3 Depth=1
ldr w0, [x23]
bl _pmd_devmap
cbz x0, LBB0_8
LBB0_6: ; in Loop: Header=BB0_3 Depth=1
ldr x8, [x26]
sub x9, x25, x24
cmp x9, x8
b.eq LBB0_1
; %bb.7: ; in Loop: Header=BB0_3 Depth=1
mov x0, x21
mov x1, x23
mov x2, x24
mov w3, #0
mov x4, #0
bl ___split_huge_pmd
LBB0_8: ; in Loop: Header=BB0_3 Depth=1
mov x0, x23
bl _pmd_none_or_trans_huge_or_clear_bad
cbnz x0, LBB0_2
; %bb.9: ; in Loop: Header=BB0_3 Depth=1
mov x0, x22
mov x1, x21
mov x2, x23
mov x3, x24
mov x4, x25
mov x5, x20
bl _zap_pte_range
mov x25, x0
b LBB0_2
LBB0_10:
mov x0, x19
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
ldp x26, x25, [sp], #80 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm _HPAGE_PMD_SIZE,8,3 ; @HPAGE_PMD_SIZE
.no_dead_strip _zap_pmd_range
.subsections_via_symbols
| AnghaBench/linux/mm/extr_memory.c_zap_pmd_range.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function metric_id_get
_metric_id_get: ## @metric_id_get
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdx, %rbx
leaq -16(%rbp), %rsi
callq _mesh_get_default_parameters
testl %eax, %eax
jne LBB0_2
## %bb.1:
movl -16(%rbp), %ecx
leaq L_.str(%rip), %rdx
movq %rbx, %rdi
movl $5, %esi
callq _snprintf
LBB0_2:
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%d\n"
.no_dead_strip _metric_id_get
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function metric_id_get
_metric_id_get: ; @metric_id_get
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x2
add x1, sp, #8
bl _mesh_get_default_parameters
cbnz w0, LBB0_2
; %bb.1:
ldr w3, [sp, #8]
Lloh0:
adrp x2, l_.str@PAGE
Lloh1:
add x2, x2, l_.str@PAGEOFF
mov x0, x19
mov w1, #5
bl _snprintf
LBB0_2:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "%d\n"
.no_dead_strip _metric_id_get
.subsections_via_symbols
| AnghaBench/linux/drivers/net/wireless/marvell/libertas/extr_mesh.c_metric_id_get.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _fiat_p384_mul ## -- Begin function fiat_p384_mul
.p2align 4, 0x90
_fiat_p384_mul: ## @fiat_p384_mul
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $960, %rsp ## imm = 0x3C0
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, -1128(%rbp) ## 8-byte Spill
movq %rdi, %r13
movq (%rdi), %rbx
movq %rbx, -432(%rbp) ## 8-byte Spill
movq %rdi, -896(%rbp) ## 8-byte Spill
movq %rsi, -368(%rbp) ## 8-byte Spill
movq (%rsi), %r8
movq 4(%rsi), %rdi
movq 8(%rsi), %rax
movq 12(%rsi), %rcx
movq %rcx, -552(%rbp) ## 8-byte Spill
movq %rcx, %rdx
imulq %rbx, %rdx
movq %rdx, %r12
movq %rax, %rcx
imulq %rbx, %rcx
movq %rax, %rsi
movq %rax, -208(%rbp) ## 8-byte Spill
mulq %rbx
movq %rdx, %r9
movq %rdi, %rax
movq %rdi, -280(%rbp) ## 8-byte Spill
imulq %rbx, %rdi
mulq %rbx
movq %rdx, %r10
movq %r8, %rax
mulq %rbx
movq %rdx, %r11
movq %r8, %r15
imulq %rbx, %r15
addq %rdi, %r11
adcq %rcx, %r10
cmpq %rcx, %r10
adcq $0, %r9
movl $4294967294, %eax ## imm = 0xFFFFFFFE
incq %rax
movq %rax, -800(%rbp) ## 8-byte Spill
movq %r15, %rcx
imulq %rax, %rcx
movq %rcx, %rbx
movq %rcx, -624(%rbp) ## 8-byte Spill
movq %r15, %r14
shlq $32, %r14
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
movq %r15, %rax
mulq %rcx
movq %rdx, -520(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r15, %r14
setb %al
leaq (%r11,%rax), %rcx
xorl %edi, %edi
addq %rdx, %rcx
setb %dil
addq %rax, %r11
adcq %r10, %rdi
setb -272(%rbp) ## 1-byte Folded Spill
movq %r12, %r10
leaq (%r9,%r12), %r12
adcq %rbx, %r12
movq %r12, -48(%rbp) ## 8-byte Spill
movq 4(%r13), %rbx
movq %rbx, -352(%rbp) ## 8-byte Spill
imulq %rbx, %rsi
movq %rsi, -104(%rbp) ## 8-byte Spill
movq -280(%rbp), %rax ## 8-byte Reload
movq %rax, %r11
imulq %rbx, %r11
mulq %rbx
movq %rdx, %r13
movq %r8, -728(%rbp) ## 8-byte Spill
movq %r8, %rax
mulq %rbx
imulq %rbx, %r8
addq %r11, %rdx
adcq %rsi, %r13
movq %rcx, %rax
addq %r8, %rax
movq %rdx, %r11
adcq %rdi, %r11
addq %rcx, %r8
adcq %rdi, %rdx
setb -64(%rbp) ## 1-byte Folded Spill
movq %r13, %rax
adcq %r12, %rax
movq %rax, -632(%rbp) ## 8-byte Spill
movq -552(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
movq -432(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, %rdi
movq %r9, %rax
addq %r10, %rax
movq %rdx, %rax
adcq $0, %rax
setb %r12b
movq -368(%rbp), %rax ## 8-byte Reload
movq 16(%rax), %rax
movq %rax, -112(%rbp) ## 8-byte Spill
movq %rax, %rsi
imulq %rcx, %rsi
mulq %rcx
movq %rdx, -264(%rbp) ## 8-byte Spill
addq %r10, %r9
adcq %rdi, %rsi
movq %rsi, -376(%rbp) ## 8-byte Spill
movzbl %r12b, %eax
adcq %rdx, %rax
movq %rax, -440(%rbp) ## 8-byte Spill
movq %r15, %rax
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
subq %r15, %r14
addq -520(%rbp), %r14 ## 8-byte Folded Reload
movq %r14, -136(%rbp) ## 8-byte Spill
movq -624(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdx
movq %rdx, -256(%rbp) ## 8-byte Spill
addb $255, -272(%rbp) ## 1-byte Folded Spill
adcq %rax, %r9
setb -56(%rbp) ## 1-byte Folded Spill
adcq %rsi, %r14
movq %r14, -360(%rbp) ## 8-byte Spill
movq %rbx, %r15
movq %rbx, %r12
movq -352(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r15
movq -208(%rbp), %r9 ## 8-byte Reload
movq %r9, %rax
mulq %rcx
movq %rdx, %rdi
cmpq -104(%rbp), %r13 ## 8-byte Folded Reload
adcq $0, %rdi
addb $255, -64(%rbp) ## 1-byte Folded Spill
adcq -48(%rbp), %r13 ## 8-byte Folded Reload
setb -64(%rbp) ## 1-byte Folded Spill
leaq (%rdi,%r15), %rax
adcq %r14, %rax
movq %rax, %r10
movq %rax, -192(%rbp) ## 8-byte Spill
movq %r8, %rax
imulq -800(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rsi
movq %rax, -392(%rbp) ## 8-byte Spill
movq %r8, %rcx
shlq $32, %rcx
movq %r8, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -200(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r8, %rcx
movq %rcx, -272(%rbp) ## 8-byte Spill
setb %al
leaq (%r11,%rax), %r13
xorl %ebx, %ebx
addq %rdx, %r13
setb %bl
addq %rax, %r11
adcq -632(%rbp), %rbx ## 8-byte Folded Reload
setb -424(%rbp) ## 1-byte Folded Spill
movq %rsi, %rax
adcq %r10, %rax
movq %rax, %rcx
movq %rax, -176(%rbp) ## 8-byte Spill
movq -896(%rbp), %rax ## 8-byte Reload
movq 8(%rax), %rsi
movq %rsi, -240(%rbp) ## 8-byte Spill
imulq %rsi, %r9
movq %r9, -184(%rbp) ## 8-byte Spill
movq -280(%rbp), %rax ## 8-byte Reload
movq %rax, %r11
imulq %rsi, %r11
mulq %rsi
movq %rdx, %r10
movq -728(%rbp), %r14 ## 8-byte Reload
movq %r14, %rax
mulq %rsi
imulq %rsi, %r14
addq %r11, %rdx
adcq %r9, %r10
movq %r13, %rax
addq %r14, %rax
movq %rdx, %r11
adcq %rbx, %r11
addq %r13, %r14
adcq %rbx, %rdx
setb -248(%rbp) ## 1-byte Folded Spill
movq %r10, %rax
adcq %rcx, %rax
movq %rax, -48(%rbp) ## 8-byte Spill
movq %r12, %rax
movq -352(%rbp), %rbx ## 8-byte Reload
mulq %rbx
movq %rdx, %r13
movq %rdi, %rax
addq %r15, %rax
movq %rdx, %rax
adcq $0, %rax
setb %r9b
movq -272(%rbp), %r12 ## 8-byte Reload
subq %r8, %r12
movq -112(%rbp), %rsi ## 8-byte Reload
movq %rsi, %rax
mulq %rbx
movq %rdx, %rcx
movq %rdx, -416(%rbp) ## 8-byte Spill
movq %r8, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
imulq %rbx, %rsi
addq -200(%rbp), %r12 ## 8-byte Folded Reload
movq %r12, %r8
movq %r12, -272(%rbp) ## 8-byte Spill
movq -392(%rbp), %r12 ## 8-byte Reload
adcq %r12, %rdx
movq %rdx, -104(%rbp) ## 8-byte Spill
addq %r15, %rdi
adcq %r13, %rsi
movq %rsi, %rbx
movq %rsi, -456(%rbp) ## 8-byte Spill
movzbl %r9b, %eax
adcq %rcx, %rax
movq %rax, -128(%rbp) ## 8-byte Spill
movq -368(%rbp), %rax ## 8-byte Reload
movq 20(%rax), %rax
movq %rax, -632(%rbp) ## 8-byte Spill
movq %rax, %rcx
movq -432(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
mulq %rdx
movq %rdx, -96(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -440(%rbp), %rsi ## 8-byte Reload
cmpq -264(%rbp), %rsi ## 8-byte Folded Reload
setb %al
addq %rsi, %rcx
movq %rcx, -320(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -88(%rbp) ## 8-byte Spill
addb $255, -56(%rbp) ## 1-byte Folded Spill
movq -376(%rbp), %rax ## 8-byte Reload
adcq %rax, -136(%rbp) ## 8-byte Folded Spill
setb -328(%rbp) ## 1-byte Folded Spill
movq -256(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -512(%rbp) ## 8-byte Spill
addb $255, -64(%rbp) ## 1-byte Folded Spill
adcq -360(%rbp), %rdi ## 8-byte Folded Reload
setb -408(%rbp) ## 1-byte Folded Spill
adcq %rax, %rbx
movq %rbx, -344(%rbp) ## 8-byte Spill
addb $255, -424(%rbp) ## 1-byte Folded Spill
adcq %r12, -192(%rbp) ## 8-byte Folded Spill
setb -544(%rbp) ## 1-byte Folded Spill
movq %r8, %rax
adcq %rbx, %rax
movq %rax, %rcx
movq %rax, -168(%rbp) ## 8-byte Spill
movq -552(%rbp), %r13 ## 8-byte Reload
movq %r13, %rax
movq -240(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rax
movq %rax, %rsi
movq %rax, -224(%rbp) ## 8-byte Spill
movq -208(%rbp), %r15 ## 8-byte Reload
movq %r15, %rax
mulq %rdx
cmpq -184(%rbp), %r10 ## 8-byte Folded Reload
adcq $0, %rdx
movq %rdx, -440(%rbp) ## 8-byte Spill
addb $255, -248(%rbp) ## 1-byte Folded Spill
adcq -176(%rbp), %r10 ## 8-byte Folded Reload
setb -536(%rbp) ## 1-byte Folded Spill
leaq (%rdx,%rsi), %rax
adcq %rcx, %rax
movq %rax, %rsi
movq %rax, -616(%rbp) ## 8-byte Spill
movq %r14, %rax
imulq -800(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rcx
movq %rax, -336(%rbp) ## 8-byte Spill
movq %r14, %rdi
shlq $32, %rdi
movq %r14, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -360(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r14, %rdi
movq %rdi, -136(%rbp) ## 8-byte Spill
setb %al
leaq (%r11,%rax), %r9
xorl %edi, %edi
addq %rdx, %r9
setb %dil
addq %rax, %r11
adcq -48(%rbp), %rdi ## 8-byte Folded Reload
setb -80(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %rsi, %rax
movq %rax, %r10
movq %rax, -120(%rbp) ## 8-byte Spill
movq -896(%rbp), %r8 ## 8-byte Reload
movq 12(%r8), %rbx
movq %rbx, -400(%rbp) ## 8-byte Spill
imulq %rbx, %r15
movq %r15, %r12
movq %r15, -504(%rbp) ## 8-byte Spill
movq -280(%rbp), %rax ## 8-byte Reload
movq %rax, %r11
imulq %rbx, %r11
mulq %rbx
movq %rdx, %rsi
movq -728(%rbp), %r15 ## 8-byte Reload
movq %r15, %rax
mulq %rbx
imulq %rbx, %r15
addq %r11, %rdx
adcq %r12, %rsi
movq %rsi, -72(%rbp) ## 8-byte Spill
movq %r9, %rax
addq %r15, %rax
movq %rdx, %rcx
adcq %rdi, %rcx
addq %r9, %r15
adcq %rdi, %rdx
setb -528(%rbp) ## 1-byte Folded Spill
movq %rsi, %rdi
adcq %r10, %rdi
movq %r15, %rsi
shlq $32, %rsi
movq %r15, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -424(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r15, %rsi
movq %rsi, -376(%rbp) ## 8-byte Spill
setb %al
leaq (%rcx,%rax), %rbx
xorl %r11d, %r11d
addq %rdx, %rbx
movq %rbx, -48(%rbp) ## 8-byte Spill
setb %r11b
addq %rax, %rcx
adcq %rdi, %r11
setb -64(%rbp) ## 1-byte Folded Spill
movq %r13, %rax
movq -240(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, %r13
movq -440(%rbp), %rax ## 8-byte Reload
movq -224(%rbp), %r12 ## 8-byte Reload
addq %r12, %rax
movq %rdx, %rax
adcq $0, %rax
setb -304(%rbp) ## 1-byte Folded Spill
movq -112(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
imulq %rcx, %rdx
movq %rdx, -56(%rbp) ## 8-byte Spill
movq %rcx, %rdx
movq -136(%rbp), %rsi ## 8-byte Reload
subq %r14, %rsi
movq 16(%r8), %rbx
movq %rbx, -232(%rbp) ## 8-byte Spill
movq -208(%rbp), %rdi ## 8-byte Reload
imulq %rbx, %rdi
movq %rdi, -248(%rbp) ## 8-byte Spill
movq -280(%rbp), %rcx ## 8-byte Reload
movq %rcx, %r9
imulq %rbx, %r9
mulq %rdx
movq %rdx, %r10
movq %rdx, -312(%rbp) ## 8-byte Spill
movq %r14, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, %r8
movq %rcx, %rax
mulq %rbx
movq %rdx, %r14
movq -728(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
mulq %rbx
imulq %rbx, %rcx
movq %rcx, -264(%rbp) ## 8-byte Spill
addq %r9, %rdx
movq %rdx, -384(%rbp) ## 8-byte Spill
adcq %rdi, %r14
movq %r14, -192(%rbp) ## 8-byte Spill
movq -48(%rbp), %rax ## 8-byte Reload
addq %rcx, %rax
movq %rdx, %rax
adcq %r11, %rax
movq %rax, -176(%rbp) ## 8-byte Spill
addq -360(%rbp), %rsi ## 8-byte Folded Reload
movq %rsi, -136(%rbp) ## 8-byte Spill
movq -336(%rbp), %r9 ## 8-byte Reload
adcq %r9, %r8
movq %r8, -184(%rbp) ## 8-byte Spill
movq -440(%rbp), %r8 ## 8-byte Reload
addq %r12, %r8
movq -56(%rbp), %rcx ## 8-byte Reload
adcq %r13, %rcx
movq %rcx, -56(%rbp) ## 8-byte Spill
movzbl -304(%rbp), %eax ## 1-byte Folded Reload
adcq %r10, %rax
movq %rax, -224(%rbp) ## 8-byte Spill
movq -368(%rbp), %rax ## 8-byte Reload
movq 24(%rax), %rax
movq %rax, -440(%rbp) ## 8-byte Spill
movq %rax, %rdi
movq -432(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rdi
mulq %rdx
movq %rdx, -296(%rbp) ## 8-byte Spill
xorl %r14d, %r14d
movq -88(%rbp), %rax ## 8-byte Reload
cmpq -96(%rbp), %rax ## 8-byte Folded Reload
setb %r14b
addq %rax, %rdi
movq %rdi, -216(%rbp) ## 8-byte Spill
adcq %rdx, %r14
movq -624(%rbp), %rbx ## 8-byte Reload
movq -256(%rbp), %rdx ## 8-byte Reload
cmpq %rbx, %rdx
adcq -520(%rbp), %rbx ## 8-byte Folded Reload
addb $255, -328(%rbp) ## 1-byte Folded Spill
adcq %rdx, -320(%rbp) ## 8-byte Folded Spill
setb -152(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %rdi, %rax
movq %rax, %rsi
movq %rax, -304(%rbp) ## 8-byte Spill
movq -632(%rbp), %rax ## 8-byte Reload
movq %rax, %r12
movq -352(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %r12
mulq %rdx
movq %rdx, -448(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -128(%rbp), %rax ## 8-byte Reload
cmpq -416(%rbp), %rax ## 8-byte Folded Reload
setb %dil
addq %rax, %r12
adcq %rdx, %rdi
movq %rdi, -288(%rbp) ## 8-byte Spill
addb $255, -408(%rbp) ## 1-byte Folded Spill
movq -512(%rbp), %rax ## 8-byte Reload
adcq -456(%rbp), %rax ## 8-byte Folded Reload
setb -160(%rbp) ## 1-byte Folded Spill
movq %r12, %rdx
adcq %rsi, %rdx
movq %rdx, -256(%rbp) ## 8-byte Spill
addb $255, -544(%rbp) ## 1-byte Folded Spill
movq -344(%rbp), %rax ## 8-byte Reload
adcq -272(%rbp), %rax ## 8-byte Folded Reload
setb -512(%rbp) ## 1-byte Folded Spill
movq -104(%rbp), %rax ## 8-byte Reload
adcq %rdx, %rax
movq %rax, -408(%rbp) ## 8-byte Spill
addb $255, -536(%rbp) ## 1-byte Folded Spill
adcq -168(%rbp), %r8 ## 8-byte Folded Reload
setb -168(%rbp) ## 1-byte Folded Spill
adcq %rax, %rcx
movq %rcx, -88(%rbp) ## 8-byte Spill
addb $255, -80(%rbp) ## 1-byte Folded Spill
adcq %r9, -616(%rbp) ## 8-byte Folded Spill
setb -96(%rbp) ## 1-byte Folded Spill
movq -136(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, %rsi
movq %rax, -144(%rbp) ## 8-byte Spill
movq -208(%rbp), %rax ## 8-byte Reload
movq -400(%rbp), %rdi ## 8-byte Reload
mulq %rdi
movq %rdx, %r10
movq -552(%rbp), %rax ## 8-byte Reload
movq %rax, %r13
imulq %rdi, %r13
movq -72(%rbp), %rcx ## 8-byte Reload
cmpq -504(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %r10
addb $255, -528(%rbp) ## 1-byte Folded Spill
adcq -120(%rbp), %rcx ## 8-byte Folded Reload
setb -80(%rbp) ## 1-byte Folded Spill
leaq (%r10,%r13), %rcx
adcq %rsi, %rcx
movq %rcx, -72(%rbp) ## 8-byte Spill
movq %r15, %r8
imulq -800(%rbp), %r8 ## 8-byte Folded Reload
movb -64(%rbp), %dl ## 1-byte Reload
addb $255, %dl
movq %r8, %rdx
adcq %rcx, %rdx
movq %rdx, -344(%rbp) ## 8-byte Spill
movq -48(%rbp), %rcx ## 8-byte Reload
addq %rcx, -264(%rbp) ## 8-byte Folded Spill
adcq %r11, -384(%rbp) ## 8-byte Folded Spill
setb -544(%rbp) ## 1-byte Folded Spill
movq -192(%rbp), %rcx ## 8-byte Reload
adcq %rdx, %rcx
movq %rcx, -128(%rbp) ## 8-byte Spill
mulq %rdi
movq %rdi, %rcx
movq %rdx, %r9
movq %r10, %rax
addq %r13, %rax
movq %rdx, %rax
adcq $0, %rax
setb -272(%rbp) ## 1-byte Folded Spill
movq -376(%rbp), %r11 ## 8-byte Reload
subq %r15, %r11
movq -112(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
mulq %rcx
movq %rdx, %rsi
movq %rdx, -456(%rbp) ## 8-byte Spill
movq %r15, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
imulq %rcx, %rdi
addq -424(%rbp), %r11 ## 8-byte Folded Reload
movq %r11, -376(%rbp) ## 8-byte Spill
adcq %r8, %rdx
movq %rdx, -416(%rbp) ## 8-byte Spill
movq %r8, -536(%rbp) ## 8-byte Spill
addq %r13, %r10
adcq %r9, %rdi
movq %rdi, %r9
movq %rdi, -320(%rbp) ## 8-byte Spill
movzbl -272(%rbp), %eax ## 1-byte Folded Reload
adcq %rsi, %rax
movq %rax, -328(%rbp) ## 8-byte Spill
movq -368(%rbp), %rax ## 8-byte Reload
movq 28(%rax), %rax
movq %rax, -272(%rbp) ## 8-byte Spill
movq %rax, %rcx
movq -432(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
mulq %rdx
movq %rdx, -784(%rbp) ## 8-byte Spill
xorl %eax, %eax
cmpq -296(%rbp), %r14 ## 8-byte Folded Reload
setb %al
addq %r14, %rcx
movq %rcx, -792(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -488(%rbp) ## 8-byte Spill
movq -624(%rbp), %rax ## 8-byte Reload
cmpq %rax, %rbx
adcq -520(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -608(%rbp) ## 8-byte Spill
addb $255, -152(%rbp) ## 1-byte Folded Spill
adcq -216(%rbp), %rbx ## 8-byte Folded Reload
setb -736(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %r14
movq %rax, -296(%rbp) ## 8-byte Spill
movq -440(%rbp), %rax ## 8-byte Reload
movq %rax, %rdi
movq -352(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdi
mulq %rcx
movq %rdx, %r15
movq %rdx, -472(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -288(%rbp), %r13 ## 8-byte Reload
cmpq -448(%rbp), %r13 ## 8-byte Folded Reload
setb %al
movq %rax, %rbx
movq -632(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -240(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
mulq %rdx
movq %rdx, -480(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -224(%rbp), %rsi ## 8-byte Reload
cmpq -312(%rbp), %rsi ## 8-byte Folded Reload
setb %al
addq %rsi, %rcx
movq %rcx, -120(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -600(%rbp) ## 8-byte Spill
addq %r13, %rdi
movq %rdi, -528(%rbp) ## 8-byte Spill
adcq %r15, %rbx
movq %rbx, -464(%rbp) ## 8-byte Spill
addb $255, -160(%rbp) ## 1-byte Folded Spill
adcq -304(%rbp), %r12 ## 8-byte Folded Reload
setb -304(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq %r14, %rax
movq %rax, %rsi
movq %rax, -448(%rbp) ## 8-byte Spill
movq -392(%rbp), %rax ## 8-byte Reload
movq -104(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
movq %rax, %rdi
adcq -200(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -288(%rbp) ## 8-byte Spill
addb $255, -512(%rbp) ## 1-byte Folded Spill
adcq %rdx, -256(%rbp) ## 8-byte Folded Spill
setb -160(%rbp) ## 1-byte Folded Spill
movq %rdi, %rdx
adcq %rsi, %rdx
movq %rdx, -152(%rbp) ## 8-byte Spill
addb $255, -168(%rbp) ## 1-byte Folded Spill
movq -56(%rbp), %rax ## 8-byte Reload
adcq %rax, -408(%rbp) ## 8-byte Folded Spill
setb -216(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %rdx, %rax
movq %rax, -616(%rbp) ## 8-byte Spill
addb $255, -96(%rbp) ## 1-byte Folded Spill
movq -88(%rbp), %rcx ## 8-byte Reload
adcq -136(%rbp), %rcx ## 8-byte Folded Reload
setb -96(%rbp) ## 1-byte Folded Spill
movq -184(%rbp), %rcx ## 8-byte Reload
adcq %rax, %rcx
movq %rcx, -504(%rbp) ## 8-byte Spill
addb $255, -80(%rbp) ## 1-byte Folded Spill
adcq -144(%rbp), %r10 ## 8-byte Folded Reload
setb -312(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r9
movq %r9, -80(%rbp) ## 8-byte Spill
addb $255, -64(%rbp) ## 1-byte Folded Spill
adcq %r8, -72(%rbp) ## 8-byte Folded Spill
setb -224(%rbp) ## 1-byte Folded Spill
adcq %r9, %r11
movq %r11, -384(%rbp) ## 8-byte Spill
movq -552(%rbp), %r14 ## 8-byte Reload
movq %r14, %rdx
movq -232(%rbp), %r13 ## 8-byte Reload
imulq %r13, %rdx
movq %rdx, %rbx
movq %rdx, -136(%rbp) ## 8-byte Spill
movq -208(%rbp), %r10 ## 8-byte Reload
movq %r10, %rax
mulq %r13
movq -192(%rbp), %rax ## 8-byte Reload
cmpq -248(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %rdx
movq %rdx, -48(%rbp) ## 8-byte Spill
addb $255, -544(%rbp) ## 1-byte Folded Spill
adcq %rax, -344(%rbp) ## 8-byte Folded Spill
setb -144(%rbp) ## 1-byte Folded Spill
leaq (%rdx,%rbx), %rax
adcq %r11, %rax
movq %rax, %r9
movq %rax, -712(%rbp) ## 8-byte Spill
movq -264(%rbp), %rbx ## 8-byte Reload
movq %rbx, %r11
imulq -800(%rbp), %r11 ## 8-byte Folded Reload
movq %r11, -248(%rbp) ## 8-byte Spill
movq %rbx, %rsi
shlq $32, %rsi
movq %rbx, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -256(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rbx, %rsi
movq %rsi, -56(%rbp) ## 8-byte Spill
setb %al
movq -176(%rbp), %rsi ## 8-byte Reload
leaq (%rsi,%rax), %r8
xorl %r15d, %r15d
addq %rdx, %r8
setb %r15b
addq %rax, %rsi
adcq -128(%rbp), %r15 ## 8-byte Folded Reload
setb -720(%rbp) ## 1-byte Folded Spill
adcq %r9, %r11
movq %r11, -592(%rbp) ## 8-byte Spill
movq -896(%rbp), %r12 ## 8-byte Reload
movq 20(%r12), %rsi
movq %rsi, -512(%rbp) ## 8-byte Spill
imulq %rsi, %r10
movq %r10, -704(%rbp) ## 8-byte Spill
movq -280(%rbp), %rax ## 8-byte Reload
movq %rax, %rdi
imulq %rsi, %rdi
mulq %rsi
movq %rdx, %r9
movq -728(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
mulq %rsi
imulq %rsi, %rcx
addq %rdi, %rdx
adcq %r10, %r9
movq %r9, -680(%rbp) ## 8-byte Spill
movq %r8, %rax
addq %rcx, %rax
movq %rdx, %rdi
adcq %r15, %rdi
addq %r8, %rcx
movq %rcx, -688(%rbp) ## 8-byte Spill
adcq %r15, %rdx
setb -696(%rbp) ## 1-byte Folded Spill
adcq %r11, %r9
movq %rcx, %rbx
shlq $32, %rbx
movq %rcx, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -104(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rcx, %rbx
movq %rbx, -192(%rbp) ## 8-byte Spill
setb %al
leaq (%rdi,%rax), %rbx
xorl %r15d, %r15d
addq %rdx, %rbx
movq %rbx, -408(%rbp) ## 8-byte Spill
setb %r15b
addq %rax, %rdi
adcq %r9, %r15
movq %r15, -760(%rbp) ## 8-byte Spill
setb -176(%rbp) ## 1-byte Folded Spill
movq %r14, %rax
movq %r13, %r10
mulq %r13
movq %rdx, -72(%rbp) ## 8-byte Spill
movq -48(%rbp), %r9 ## 8-byte Reload
movq %r9, %rax
movq -136(%rbp), %r13 ## 8-byte Reload
addq %r13, %rax
movq %rdx, %rax
adcq $0, %rax
setb -584(%rbp) ## 1-byte Folded Spill
movq -112(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
imulq %r10, %rcx
movq %rcx, -64(%rbp) ## 8-byte Spill
movq %r10, %rbx
movq -56(%rbp), %rcx ## 8-byte Reload
movq -264(%rbp), %r8 ## 8-byte Reload
subq %r8, %rcx
movq 24(%r12), %rdi
movq %rdi, -128(%rbp) ## 8-byte Spill
movq -208(%rbp), %rdx ## 8-byte Reload
imulq %rdi, %rdx
movq %rdx, %r10
movq %rdx, -344(%rbp) ## 8-byte Spill
movq -280(%rbp), %rsi ## 8-byte Reload
movq %rsi, %r11
imulq %rdi, %r11
mulq %rbx
movq %rdx, %r14
movq %rdx, -496(%rbp) ## 8-byte Spill
movq %r8, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, %r12
movq %rsi, %rax
mulq %rdi
movq %rdx, %rsi
movq -728(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
mulq %rdi
imulq %rdi, %rbx
movq %rbx, -168(%rbp) ## 8-byte Spill
addq %r11, %rdx
movq %rdx, -768(%rbp) ## 8-byte Spill
adcq %r10, %rsi
movq %rsi, -264(%rbp) ## 8-byte Spill
movq -408(%rbp), %rax ## 8-byte Reload
addq %rbx, %rax
movq %rdx, %rax
adcq %r15, %rax
movq %rax, -544(%rbp) ## 8-byte Spill
addq -256(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -56(%rbp) ## 8-byte Spill
movq -248(%rbp), %rax ## 8-byte Reload
adcq %rax, %r12
movq %r12, -88(%rbp) ## 8-byte Spill
addq %r13, %r9
movq %r9, -48(%rbp) ## 8-byte Spill
movq -64(%rbp), %rax ## 8-byte Reload
adcq -72(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -64(%rbp) ## 8-byte Spill
movzbl -584(%rbp), %eax ## 1-byte Folded Reload
adcq %r14, %rax
movq %rax, -72(%rbp) ## 8-byte Spill
movq -368(%rbp), %rax ## 8-byte Reload
movq 32(%rax), %rax
movq %rax, -136(%rbp) ## 8-byte Spill
movq %rax, %rsi
movq -432(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rsi
mulq %rcx
movq %rdx, -648(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -488(%rbp), %rax ## 8-byte Reload
cmpq -784(%rbp), %rax ## 8-byte Folded Reload
setb %cl
addq %rax, %rsi
movq %rsi, -656(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -568(%rbp) ## 8-byte Spill
movq -624(%rbp), %r13 ## 8-byte Reload
movq -608(%rbp), %rcx ## 8-byte Reload
cmpq %r13, %rcx
adcq -520(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -880(%rbp) ## 8-byte Spill
addb $255, -736(%rbp) ## 1-byte Folded Spill
adcq -792(%rbp), %rcx ## 8-byte Folded Reload
setb -872(%rbp) ## 1-byte Folded Spill
adcq %rsi, %r13
movq %r13, -784(%rbp) ## 8-byte Spill
movq -272(%rbp), %rax ## 8-byte Reload
movq %rax, %r10
movq -352(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r10
mulq %rcx
movq %rdx, %r15
movq %rdx, -744(%rbp) ## 8-byte Spill
xorl %r9d, %r9d
movq -464(%rbp), %rbx ## 8-byte Reload
cmpq -472(%rbp), %rbx ## 8-byte Folded Reload
setb %r9b
movq -440(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -240(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %r14
mulq %rcx
movq %rdx, %r11
movq %rdx, -752(%rbp) ## 8-byte Spill
xorl %esi, %esi
movq -600(%rbp), %r8 ## 8-byte Reload
cmpq -480(%rbp), %r8 ## 8-byte Folded Reload
setb %sil
movq -632(%rbp), %rax ## 8-byte Reload
movq %rax, %r12
movq -400(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %r12
mulq %rdx
movq %rdx, -608(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -328(%rbp), %rax ## 8-byte Reload
cmpq -456(%rbp), %rax ## 8-byte Folded Reload
setb %dil
addq %rax, %r12
movq %r12, -576(%rbp) ## 8-byte Spill
adcq %rdx, %rdi
movq %rdi, -848(%rbp) ## 8-byte Spill
addq %r8, %r14
movq %r14, -480(%rbp) ## 8-byte Spill
adcq %r11, %rsi
movq %rsi, -840(%rbp) ## 8-byte Spill
addq %rbx, %r10
movq %r10, -792(%rbp) ## 8-byte Spill
adcq %r15, %r9
movq %r9, -832(%rbp) ## 8-byte Spill
addb $255, -304(%rbp) ## 1-byte Folded Spill
movq -296(%rbp), %rax ## 8-byte Reload
adcq %rax, -528(%rbp) ## 8-byte Folded Spill
setb -472(%rbp) ## 1-byte Folded Spill
adcq %r13, %r10
movq %r10, -736(%rbp) ## 8-byte Spill
movq -392(%rbp), %rax ## 8-byte Reload
movq -288(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
movq %rax, %rsi
adcq -200(%rbp), %rsi ## 8-byte Folded Reload
movq %rsi, -488(%rbp) ## 8-byte Spill
addb $255, -160(%rbp) ## 1-byte Folded Spill
adcq -448(%rbp), %rcx ## 8-byte Folded Reload
setb -464(%rbp) ## 1-byte Folded Spill
movq %rsi, %rcx
adcq %r10, %rcx
movq %rcx, -448(%rbp) ## 8-byte Spill
addb $255, -216(%rbp) ## 1-byte Folded Spill
movq -152(%rbp), %rax ## 8-byte Reload
adcq -120(%rbp), %rax ## 8-byte Folded Reload
setb -664(%rbp) ## 1-byte Folded Spill
movq %r14, %rax
adcq %rcx, %rax
movq %rax, %rcx
movq %rax, -288(%rbp) ## 8-byte Spill
movq -336(%rbp), %rax ## 8-byte Reload
movq -184(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
movq %rax, %rsi
adcq -360(%rbp), %rsi ## 8-byte Folded Reload
movq %rsi, -584(%rbp) ## 8-byte Spill
addb $255, -96(%rbp) ## 1-byte Folded Spill
adcq %rdx, -616(%rbp) ## 8-byte Folded Spill
setb -640(%rbp) ## 1-byte Folded Spill
movq %rsi, %rdx
adcq %rcx, %rdx
movq %rdx, -672(%rbp) ## 8-byte Spill
addb $255, -312(%rbp) ## 1-byte Folded Spill
movq -504(%rbp), %rax ## 8-byte Reload
adcq -320(%rbp), %rax ## 8-byte Folded Reload
setb -560(%rbp) ## 1-byte Folded Spill
adcq %rdx, %r12
movq %r12, -320(%rbp) ## 8-byte Spill
addb $255, -224(%rbp) ## 1-byte Folded Spill
movq -80(%rbp), %rax ## 8-byte Reload
adcq -376(%rbp), %rax ## 8-byte Folded Reload
setb -80(%rbp) ## 1-byte Folded Spill
movq -416(%rbp), %rax ## 8-byte Reload
adcq %r12, %rax
movq %rax, -96(%rbp) ## 8-byte Spill
addb $255, -144(%rbp) ## 1-byte Folded Spill
movq -48(%rbp), %rcx ## 8-byte Reload
adcq -384(%rbp), %rcx ## 8-byte Folded Reload
setb -616(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %r12 ## 8-byte Reload
adcq %rax, %r12
movq %r12, -144(%rbp) ## 8-byte Spill
addb $255, -720(%rbp) ## 1-byte Folded Spill
movq -248(%rbp), %rax ## 8-byte Reload
adcq %rax, -712(%rbp) ## 8-byte Folded Spill
setb -504(%rbp) ## 1-byte Folded Spill
movq -56(%rbp), %rax ## 8-byte Reload
adcq %r12, %rax
movq %rax, %rdi
movq %rax, -856(%rbp) ## 8-byte Spill
movq -208(%rbp), %rax ## 8-byte Reload
movq -512(%rbp), %rsi ## 8-byte Reload
mulq %rsi
movq -552(%rbp), %rax ## 8-byte Reload
movq %rax, %r13
imulq %rsi, %r13
movq %rsi, %r8
movq -680(%rbp), %rcx ## 8-byte Reload
cmpq -704(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %rdx
addb $255, -696(%rbp) ## 1-byte Folded Spill
adcq -592(%rbp), %rcx ## 8-byte Folded Reload
setb -776(%rbp) ## 1-byte Folded Spill
leaq (%rdx,%r13), %rcx
movq %rdx, %rbx
adcq %rdi, %rcx
movq %rcx, -384(%rbp) ## 8-byte Spill
movq -688(%rbp), %rdi ## 8-byte Reload
movq %rdi, %r10
imulq -800(%rbp), %r10 ## 8-byte Folded Reload
movb -176(%rbp), %dl ## 1-byte Reload
addb $255, %dl
movq %r10, %rdx
movq %r10, -456(%rbp) ## 8-byte Spill
adcq %rcx, %rdx
movq %rdx, -224(%rbp) ## 8-byte Spill
movq -408(%rbp), %rcx ## 8-byte Reload
addq %rcx, -168(%rbp) ## 8-byte Folded Spill
movq -760(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -768(%rbp) ## 8-byte Folded Spill
setb -48(%rbp) ## 1-byte Folded Spill
movq -264(%rbp), %rcx ## 8-byte Reload
adcq %rdx, %rcx
movq %rcx, -408(%rbp) ## 8-byte Spill
mulq %rsi
movq %rdx, %r9
movq %rbx, %rax
addq %r13, %rax
movq %rdx, %rax
adcq $0, %rax
setb %r11b
movq -192(%rbp), %rsi ## 8-byte Reload
subq %rdi, %rsi
movq %rdi, %rcx
movq -112(%rbp), %r15 ## 8-byte Reload
movq %r15, %rax
mulq %r8
movq %r8, %rdi
movq %rdx, %r8
movq %rdx, -216(%rbp) ## 8-byte Spill
movq %rcx, %rax
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
imulq %rdi, %r15
addq -104(%rbp), %rsi ## 8-byte Folded Reload
movq %rsi, -192(%rbp) ## 8-byte Spill
adcq %r10, %rdx
movq %rdx, -328(%rbp) ## 8-byte Spill
addq %r13, %rbx
movq %rbx, -824(%rbp) ## 8-byte Spill
adcq %r9, %r15
movq %r15, -312(%rbp) ## 8-byte Spill
movzbl %r11b, %eax
adcq %r8, %rax
movq %rax, -152(%rbp) ## 8-byte Spill
movq -368(%rbp), %rax ## 8-byte Reload
movq 36(%rax), %rax
movq %rax, -376(%rbp) ## 8-byte Spill
movq %rax, %rsi
movq -432(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rsi
mulq %rcx
movq %rdx, -760(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -568(%rbp), %rcx ## 8-byte Reload
cmpq -648(%rbp), %rcx ## 8-byte Folded Reload
setb %al
addq %rcx, %rsi
movq %rsi, -768(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -888(%rbp) ## 8-byte Spill
movq -624(%rbp), %rax ## 8-byte Reload
movq -880(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -520(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -864(%rbp) ## 8-byte Spill
addb $255, -872(%rbp) ## 1-byte Folded Spill
adcq -656(%rbp), %rcx ## 8-byte Folded Reload
setb -296(%rbp) ## 1-byte Folded Spill
adcq %rsi, %rax
movq %rax, -528(%rbp) ## 8-byte Spill
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %r11
movq -352(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r11
mulq %rcx
movq %rdx, -120(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -832(%rbp), %r15 ## 8-byte Reload
cmpq -744(%rbp), %r15 ## 8-byte Folded Reload
setb %al
movq %rax, %r12
movq -272(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -240(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, -184(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, %r14
movq %rdx, -808(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -840(%rbp), %r10 ## 8-byte Reload
cmpq -752(%rbp), %r10 ## 8-byte Folded Reload
setb %al
movq %rax, %r13
movq -440(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -400(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rbx
mulq %rcx
movq %rdx, %r9
movq %rdx, -752(%rbp) ## 8-byte Spill
xorl %esi, %esi
movq -848(%rbp), %r8 ## 8-byte Reload
cmpq -608(%rbp), %r8 ## 8-byte Folded Reload
setb %sil
movq -632(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -232(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rdi
mulq %rcx
movq %rdx, -880(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -72(%rbp), %rcx ## 8-byte Reload
cmpq -496(%rbp), %rcx ## 8-byte Folded Reload
setb %al
addq %rcx, %rdi
movq %rdi, -600(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -872(%rbp) ## 8-byte Spill
addq %r8, %rbx
movq %rbx, -720(%rbp) ## 8-byte Spill
adcq %r9, %rsi
movq %rsi, -744(%rbp) ## 8-byte Spill
movq -184(%rbp), %rsi ## 8-byte Reload
addq %r10, %rsi
movq %rsi, -184(%rbp) ## 8-byte Spill
adcq %r14, %r13
movq %r13, -304(%rbp) ## 8-byte Spill
addq %r15, %r11
movq %r11, -608(%rbp) ## 8-byte Spill
adcq -120(%rbp), %r12 ## 8-byte Folded Reload
movq %r12, -160(%rbp) ## 8-byte Spill
addb $255, -472(%rbp) ## 1-byte Folded Spill
movq -784(%rbp), %rax ## 8-byte Reload
adcq %rax, -792(%rbp) ## 8-byte Folded Spill
setb -712(%rbp) ## 1-byte Folded Spill
movq %r11, %rax
adcq -528(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r8
movq %rax, -656(%rbp) ## 8-byte Spill
movq -392(%rbp), %rax ## 8-byte Reload
movq -488(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -200(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -568(%rbp) ## 8-byte Spill
addb $255, -464(%rbp) ## 1-byte Folded Spill
adcq -736(%rbp), %rcx ## 8-byte Folded Reload
setb -472(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %r8, %rcx
movq %rcx, -680(%rbp) ## 8-byte Spill
addb $255, -664(%rbp) ## 1-byte Folded Spill
movq -480(%rbp), %rax ## 8-byte Reload
adcq %rax, -448(%rbp) ## 8-byte Folded Spill
setb -464(%rbp) ## 1-byte Folded Spill
movq %rsi, %rax
adcq %rcx, %rax
movq %rax, %rcx
movq %rax, -664(%rbp) ## 8-byte Spill
movq -336(%rbp), %rax ## 8-byte Reload
movq -584(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -360(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -648(%rbp) ## 8-byte Spill
addb $255, -640(%rbp) ## 1-byte Folded Spill
adcq -288(%rbp), %rdx ## 8-byte Folded Reload
setb -704(%rbp) ## 1-byte Folded Spill
movq %rax, %rsi
adcq %rcx, %rsi
movq %rsi, -584(%rbp) ## 8-byte Spill
addb $255, -560(%rbp) ## 1-byte Folded Spill
movq -576(%rbp), %rax ## 8-byte Reload
adcq %rax, -672(%rbp) ## 8-byte Folded Spill
setb -696(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %rsi, %rax
movq %rax, %rdx
movq %rax, -576(%rbp) ## 8-byte Spill
movq -536(%rbp), %rax ## 8-byte Reload
movq -416(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
movq %rax, %rsi
adcq -424(%rbp), %rsi ## 8-byte Folded Reload
movq %rsi, -640(%rbp) ## 8-byte Spill
addb $255, -80(%rbp) ## 1-byte Folded Spill
adcq %rcx, -320(%rbp) ## 8-byte Folded Spill
setb -592(%rbp) ## 1-byte Folded Spill
movq %rsi, %rcx
adcq %rdx, %rcx
movq %rcx, -672(%rbp) ## 8-byte Spill
addb $255, -616(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %rax ## 8-byte Reload
adcq %rax, -96(%rbp) ## 8-byte Folded Spill
setb -688(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq %rcx, %rax
movq %rax, -488(%rbp) ## 8-byte Spill
addb $255, -504(%rbp) ## 1-byte Folded Spill
movq -144(%rbp), %rcx ## 8-byte Reload
adcq -56(%rbp), %rcx ## 8-byte Folded Reload
setb -144(%rbp) ## 1-byte Folded Spill
movq -88(%rbp), %rcx ## 8-byte Reload
adcq %rax, %rcx
movq %rcx, -480(%rbp) ## 8-byte Spill
addb $255, -776(%rbp) ## 1-byte Folded Spill
movq -824(%rbp), %rax ## 8-byte Reload
adcq -856(%rbp), %rax ## 8-byte Folded Reload
setb -496(%rbp) ## 1-byte Folded Spill
movq -312(%rbp), %r9 ## 8-byte Reload
adcq %rcx, %r9
movq %r9, -784(%rbp) ## 8-byte Spill
addb $255, -176(%rbp) ## 1-byte Folded Spill
movq -384(%rbp), %rax ## 8-byte Reload
adcq -456(%rbp), %rax ## 8-byte Folded Reload
setb -736(%rbp) ## 1-byte Folded Spill
movq -192(%rbp), %rcx ## 8-byte Reload
adcq %r9, %rcx
movq %rcx, %rsi
movq %rcx, -776(%rbp) ## 8-byte Spill
movq -552(%rbp), %r15 ## 8-byte Reload
movq %r15, %rbx
movq -128(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rbx
movq %rbx, -56(%rbp) ## 8-byte Spill
movq -208(%rbp), %r10 ## 8-byte Reload
movq %r10, %rax
mulq %rcx
movq -264(%rbp), %rax ## 8-byte Reload
cmpq -344(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %rdx
movq %rdx, -344(%rbp) ## 8-byte Spill
addb $255, -48(%rbp) ## 1-byte Folded Spill
adcq %rax, -224(%rbp) ## 8-byte Folded Spill
setb -560(%rbp) ## 1-byte Folded Spill
leaq (%rdx,%rbx), %rax
adcq %rsi, %rax
movq %rax, %rbx
movq %rax, -848(%rbp) ## 8-byte Spill
movq -168(%rbp), %r13 ## 8-byte Reload
movq %r13, %r11
imulq -800(%rbp), %r11 ## 8-byte Folded Reload
movq %r11, -416(%rbp) ## 8-byte Spill
movq %r13, %rcx
shlq $32, %rcx
movq %r13, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -64(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r13, %rcx
movq %rcx, -48(%rbp) ## 8-byte Spill
setb %al
movq -544(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rax), %r9
xorl %r8d, %r8d
addq %rdx, %r9
setb %r8b
addq %rax, %rcx
adcq -408(%rbp), %r8 ## 8-byte Folded Reload
setb -856(%rbp) ## 1-byte Folded Spill
adcq %rbx, %r11
movq %r11, -824(%rbp) ## 8-byte Spill
movq -896(%rbp), %r14 ## 8-byte Reload
movq 28(%r14), %rbx
movq %rbx, -544(%rbp) ## 8-byte Spill
imulq %rbx, %r10
movq %r10, -840(%rbp) ## 8-byte Spill
movq -280(%rbp), %rax ## 8-byte Reload
movq %rax, %rdi
imulq %rbx, %rdi
mulq %rbx
movq %rdx, %r12
movq -728(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
mulq %rbx
imulq %rbx, %rcx
addq %rdi, %rdx
adcq %r10, %r12
movq %r12, -816(%rbp) ## 8-byte Spill
movq %r9, %rax
movq %rcx, %rbx
addq %rcx, %rax
movq %rdx, %rdi
adcq %r8, %rdi
addq %r9, %rbx
movq %rbx, -320(%rbp) ## 8-byte Spill
adcq %r8, %rdx
setb -832(%rbp) ## 1-byte Folded Spill
adcq %r11, %r12
movq %rbx, %rsi
shlq $32, %rsi
movq %rbx, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -264(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rbx, %rsi
movq %rsi, -96(%rbp) ## 8-byte Spill
setb %al
leaq (%rdi,%rax), %rbx
xorl %esi, %esi
addq %rdx, %rbx
movq %rbx, -72(%rbp) ## 8-byte Spill
setb %sil
addq %rax, %rdi
adcq %r12, %rsi
movq %rsi, %r12
movq %rsi, -904(%rbp) ## 8-byte Spill
setb -224(%rbp) ## 1-byte Folded Spill
movq %r15, %rax
movq -128(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, -176(%rbp) ## 8-byte Spill
movq -344(%rbp), %r8 ## 8-byte Reload
movq %r8, %rax
addq -56(%rbp), %rax ## 8-byte Folded Reload
movq %rdx, %rax
adcq $0, %rax
setb -792(%rbp) ## 1-byte Folded Spill
movq -112(%rbp), %rax ## 8-byte Reload
movq %rax, %r9
imulq %rcx, %r9
movq %rcx, %rbx
movq -48(%rbp), %rsi ## 8-byte Reload
subq %r13, %rsi
movq 32(%r14), %rcx
movq %rcx, -408(%rbp) ## 8-byte Spill
movq -208(%rbp), %rdx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %r11
movq %rdx, -288(%rbp) ## 8-byte Spill
movq -280(%rbp), %r15 ## 8-byte Reload
movq %r15, %rdi
imulq %rcx, %rdi
mulq %rbx
movq %rdx, %r10
movq %rdx, -384(%rbp) ## 8-byte Spill
movq %r13, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, %r14
movq %r15, %rax
mulq %rcx
movq %rdx, %rbx
movq -728(%rbp), %r13 ## 8-byte Reload
movq %r13, %rax
mulq %rcx
imulq %rcx, %r13
movq %r13, -168(%rbp) ## 8-byte Spill
addq %rdi, %rdx
movq %rdx, -976(%rbp) ## 8-byte Spill
adcq %r11, %rbx
movq %rbx, -616(%rbp) ## 8-byte Spill
movq -72(%rbp), %rax ## 8-byte Reload
addq %r13, %rax
movq %rdx, %rax
adcq %r12, %rax
movq %rax, -448(%rbp) ## 8-byte Spill
addq -64(%rbp), %rsi ## 8-byte Folded Reload
movq %rsi, -48(%rbp) ## 8-byte Spill
movq -416(%rbp), %rax ## 8-byte Reload
adcq %rax, %r14
movq %r14, -504(%rbp) ## 8-byte Spill
addq -56(%rbp), %r8 ## 8-byte Folded Reload
movq %r8, -344(%rbp) ## 8-byte Spill
adcq -176(%rbp), %r9 ## 8-byte Folded Reload
movq %r9, -80(%rbp) ## 8-byte Spill
movzbl -792(%rbp), %eax ## 1-byte Folded Reload
adcq %r10, %rax
movq %rax, -792(%rbp) ## 8-byte Spill
movq -368(%rbp), %rax ## 8-byte Reload
movq 40(%rax), %rax
movq %rax, -56(%rbp) ## 8-byte Spill
movq %rax, %rcx
movq -432(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
mulq %rdx
movq %rdx, -984(%rbp) ## 8-byte Spill
xorl %esi, %esi
movq -888(%rbp), %rax ## 8-byte Reload
cmpq -760(%rbp), %rax ## 8-byte Folded Reload
setb %sil
addq %rax, %rcx
movq %rcx, %rdi
movq %rcx, -944(%rbp) ## 8-byte Spill
adcq %rdx, %rsi
movq %rsi, -928(%rbp) ## 8-byte Spill
movq -624(%rbp), %rax ## 8-byte Reload
movq -864(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -520(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -920(%rbp) ## 8-byte Spill
addb $255, -296(%rbp) ## 1-byte Folded Spill
adcq -768(%rbp), %rcx ## 8-byte Folded Reload
setb -936(%rbp) ## 1-byte Folded Spill
adcq %rdi, %rax
movq %rax, -1080(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
movq %rax, %r13
movq -352(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r13
mulq %rcx
movq %rdx, -1072(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -120(%rbp), %rax ## 8-byte Reload
cmpq %rax, -160(%rbp) ## 8-byte Folded Reload
setb %cl
movq %rcx, -952(%rbp) ## 8-byte Spill
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -240(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, -176(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -296(%rbp) ## 8-byte Spill
xorl %r14d, %r14d
movq -304(%rbp), %rcx ## 8-byte Reload
cmpq -808(%rbp), %rcx ## 8-byte Folded Reload
setb %r14b
movq -272(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -400(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, %r11
mulq %rdx
movq %rdx, %r15
movq %rdx, -960(%rbp) ## 8-byte Spill
xorl %r9d, %r9d
movq -744(%rbp), %r12 ## 8-byte Reload
cmpq -752(%rbp), %r12 ## 8-byte Folded Reload
setb %r9b
movq -440(%rbp), %rax ## 8-byte Reload
movq %rax, %r8
movq -232(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r8
mulq %rcx
movq %rdx, %rbx
movq %rdx, -992(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -872(%rbp), %r10 ## 8-byte Reload
cmpq -880(%rbp), %r10 ## 8-byte Folded Reload
setb %cl
movq -632(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -512(%rbp), %rdi ## 8-byte Reload
imulq %rdi, %rdx
movq %rdx, %rsi
mulq %rdi
movq %rdx, -768(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -152(%rbp), %rax ## 8-byte Reload
cmpq -216(%rbp), %rax ## 8-byte Folded Reload
setb %dil
addq %rax, %rsi
movq %rsi, -880(%rbp) ## 8-byte Spill
adcq %rdx, %rdi
movq %rdi, -864(%rbp) ## 8-byte Spill
addq %r10, %r8
movq %r8, -888(%rbp) ## 8-byte Spill
adcq %rbx, %rcx
movq %rcx, -912(%rbp) ## 8-byte Spill
movq %r11, %rbx
addq %r12, %rbx
movq %rbx, -760(%rbp) ## 8-byte Spill
adcq %r15, %r9
movq %r9, -120(%rbp) ## 8-byte Spill
movq -176(%rbp), %r9 ## 8-byte Reload
addq -304(%rbp), %r9 ## 8-byte Folded Reload
movq %r9, -176(%rbp) ## 8-byte Spill
adcq -296(%rbp), %r14 ## 8-byte Folded Reload
movq %r14, -152(%rbp) ## 8-byte Spill
movq %r13, %r15
addq -160(%rbp), %r15 ## 8-byte Folded Reload
movq -1072(%rbp), %r11 ## 8-byte Reload
movq -952(%rbp), %r10 ## 8-byte Reload
adcq %r11, %r10
addb $255, -712(%rbp) ## 1-byte Folded Spill
movq -528(%rbp), %rax ## 8-byte Reload
adcq %rax, -608(%rbp) ## 8-byte Folded Spill
setb -160(%rbp) ## 1-byte Folded Spill
movq %r15, %rax
movq -1080(%rbp), %r14 ## 8-byte Reload
adcq %r14, %rax
movq %rax, -1032(%rbp) ## 8-byte Spill
movq -392(%rbp), %r13 ## 8-byte Reload
movq -568(%rbp), %rdi ## 8-byte Reload
cmpq %r13, %rdi
adcq -200(%rbp), %r13 ## 8-byte Folded Reload
addb $255, -472(%rbp) ## 1-byte Folded Spill
adcq -656(%rbp), %rdi ## 8-byte Folded Reload
setb -216(%rbp) ## 1-byte Folded Spill
movq %r13, %rdx
adcq %rax, %rdx
movq %rdx, -872(%rbp) ## 8-byte Spill
addb $255, -464(%rbp) ## 1-byte Folded Spill
movq -680(%rbp), %rax ## 8-byte Reload
adcq -184(%rbp), %rax ## 8-byte Folded Reload
setb -464(%rbp) ## 1-byte Folded Spill
movq %r9, %rax
adcq %rdx, %rax
movq %rax, %rdx
movq %rax, -952(%rbp) ## 8-byte Spill
movq -336(%rbp), %rax ## 8-byte Reload
movq -648(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -360(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -1008(%rbp) ## 8-byte Spill
addb $255, -704(%rbp) ## 1-byte Folded Spill
adcq -664(%rbp), %rcx ## 8-byte Folded Reload
setb -704(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rdx, %rcx
movq %rcx, -752(%rbp) ## 8-byte Spill
addb $255, -696(%rbp) ## 1-byte Folded Spill
movq -584(%rbp), %rax ## 8-byte Reload
adcq -720(%rbp), %rax ## 8-byte Folded Reload
setb -696(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %rcx, %rax
movq %rax, %rdi
movq %rax, -1000(%rbp) ## 8-byte Spill
movq -536(%rbp), %rax ## 8-byte Reload
movq -640(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -424(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -1048(%rbp) ## 8-byte Spill
addb $255, -592(%rbp) ## 1-byte Folded Spill
adcq -576(%rbp), %rcx ## 8-byte Folded Reload
setb -584(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rdi, %rcx
movq %rcx, -744(%rbp) ## 8-byte Spill
addb $255, -688(%rbp) ## 1-byte Folded Spill
movq -672(%rbp), %rax ## 8-byte Reload
adcq -600(%rbp), %rax ## 8-byte Folded Reload
setb -688(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r8
movq %r8, -968(%rbp) ## 8-byte Spill
movq -248(%rbp), %rax ## 8-byte Reload
movq -88(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -256(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -1040(%rbp) ## 8-byte Spill
addb $255, -144(%rbp) ## 1-byte Folded Spill
adcq %rdx, -488(%rbp) ## 8-byte Folded Spill
setb -672(%rbp) ## 1-byte Folded Spill
movq %rax, %rdx
adcq %r8, %rdx
movq %rdx, -1016(%rbp) ## 8-byte Spill
addb $255, -496(%rbp) ## 1-byte Folded Spill
movq -480(%rbp), %rax ## 8-byte Reload
adcq -312(%rbp), %rax ## 8-byte Folded Reload
setb -568(%rbp) ## 1-byte Folded Spill
movq %rsi, %rcx
adcq %rdx, %rcx
movq %rcx, -720(%rbp) ## 8-byte Spill
addb $255, -736(%rbp) ## 1-byte Folded Spill
movq -784(%rbp), %rax ## 8-byte Reload
adcq -192(%rbp), %rax ## 8-byte Folded Reload
setb -488(%rbp) ## 1-byte Folded Spill
movq -328(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -712(%rbp) ## 8-byte Spill
addb $255, -560(%rbp) ## 1-byte Folded Spill
movq -344(%rbp), %rcx ## 8-byte Reload
adcq -776(%rbp), %rcx ## 8-byte Folded Reload
setb -608(%rbp) ## 1-byte Folded Spill
movq -80(%rbp), %rcx ## 8-byte Reload
adcq %rax, %rcx
movq %rcx, -472(%rbp) ## 8-byte Spill
addb $255, -856(%rbp) ## 1-byte Folded Spill
movq -416(%rbp), %rax ## 8-byte Reload
adcq %rax, -848(%rbp) ## 8-byte Folded Spill
setb -600(%rbp) ## 1-byte Folded Spill
movq -48(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, %rsi
movq %rax, -1024(%rbp) ## 8-byte Spill
movq -208(%rbp), %rax ## 8-byte Reload
movq -544(%rbp), %rdi ## 8-byte Reload
mulq %rdi
movq %rdx, %r9
movq -552(%rbp), %rax ## 8-byte Reload
movq %rax, %r12
imulq %rdi, %r12
movq -816(%rbp), %rcx ## 8-byte Reload
cmpq -840(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %r9
addb $255, -832(%rbp) ## 1-byte Folded Spill
adcq -824(%rbp), %rcx ## 8-byte Folded Reload
setb -824(%rbp) ## 1-byte Folded Spill
leaq (%r9,%r12), %rbx
adcq %rsi, %rbx
movq %rbx, -832(%rbp) ## 8-byte Spill
movq -320(%rbp), %rcx ## 8-byte Reload
imulq -800(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -192(%rbp) ## 8-byte Spill
movb -224(%rbp), %dl ## 1-byte Reload
addb $255, %dl
movq %rcx, %rdx
adcq %rbx, %rdx
movq %rdx, -784(%rbp) ## 8-byte Spill
movq -72(%rbp), %rcx ## 8-byte Reload
addq %rcx, -168(%rbp) ## 8-byte Folded Spill
movq -904(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -976(%rbp) ## 8-byte Folded Spill
setb -184(%rbp) ## 1-byte Folded Spill
movq -616(%rbp), %rcx ## 8-byte Reload
adcq %rdx, %rcx
movq %rcx, -344(%rbp) ## 8-byte Spill
mulq %rdi
movq %rdx, %r8
movq %r9, %rax
addq %r12, %rax
movq %rdx, %rax
adcq $0, %rax
setb %al
movzbl %al, %esi
movq -112(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
mulq %rdi
movq %rdx, -480(%rbp) ## 8-byte Spill
imulq %rdi, %rcx
addq %r12, %r9
movq %r9, -816(%rbp) ## 8-byte Spill
adcq %r8, %rcx
movq %rcx, -304(%rbp) ## 8-byte Spill
movq -368(%rbp), %rax ## 8-byte Reload
movq 44(%rax), %rax
movq %rax, -368(%rbp) ## 8-byte Spill
adcq %rdx, %rsi
movq %rsi, -736(%rbp) ## 8-byte Spill
movq %rax, %rcx
movq -432(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
mulq %rdx
xorl %eax, %eax
movq -928(%rbp), %rsi ## 8-byte Reload
cmpq -984(%rbp), %rsi ## 8-byte Folded Reload
setb %al
addq %rsi, %rcx
movq %rcx, -144(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -528(%rbp) ## 8-byte Spill
movq -624(%rbp), %rax ## 8-byte Reload
movq -920(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -520(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -496(%rbp) ## 8-byte Spill
addb $255, -936(%rbp) ## 1-byte Folded Spill
adcq -944(%rbp), %rdx ## 8-byte Folded Reload
setb -944(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rsi
movq %rax, -640(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq %rax, %rdi
movq -352(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdi
mulq %rcx
movq %rdx, -648(%rbp) ## 8-byte Spill
xorl %eax, %eax
cmpq %r11, %r10
setb %al
addq %r10, %rdi
movq %rdi, -664(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -808(%rbp) ## 8-byte Spill
addb $255, -160(%rbp) ## 1-byte Folded Spill
adcq %r14, %r15
setb -928(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq %rsi, %rax
movq %rax, %rcx
movq %rax, -936(%rbp) ## 8-byte Spill
movq -392(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r13
adcq -200(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -984(%rbp) ## 8-byte Spill
addb $255, -216(%rbp) ## 1-byte Folded Spill
adcq -1032(%rbp), %r13 ## 8-byte Folded Reload
setb -920(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, -160(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
movq %rax, %r13
movq -240(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r13
mulq %rcx
movq %rdx, -216(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -152(%rbp), %rcx ## 8-byte Reload
cmpq -296(%rbp), %rcx ## 8-byte Folded Reload
setb %al
movq %rax, -312(%rbp) ## 8-byte Spill
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -400(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, -88(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -296(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -960(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, -120(%rbp) ## 8-byte Folded Reload
setb %al
movq %rax, %r14
movq -272(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -232(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %r12
mulq %rcx
movq %rdx, %r11
movq %rdx, -1032(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -912(%rbp), %r10 ## 8-byte Reload
cmpq -992(%rbp), %r10 ## 8-byte Folded Reload
setb %al
movq %rax, %r15
movq -440(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -512(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rbx
mulq %rcx
movq %rdx, %r9
movq %rdx, -992(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -864(%rbp), %r8 ## 8-byte Reload
cmpq -768(%rbp), %r8 ## 8-byte Folded Reload
setb %al
movq %rax, %rdi
movq -632(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -128(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rsi
mulq %rcx
movq %rdx, -680(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -792(%rbp), %rcx ## 8-byte Reload
cmpq -384(%rbp), %rcx ## 8-byte Folded Reload
setb %al
addq %rcx, %rsi
movq %rsi, -592(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -576(%rbp) ## 8-byte Spill
addq %r8, %rbx
movq %rbx, -768(%rbp) ## 8-byte Spill
adcq %r9, %rdi
movq %rdi, -904(%rbp) ## 8-byte Spill
addq %r10, %r12
movq %r12, -656(%rbp) ## 8-byte Spill
adcq %r11, %r15
movq %r15, -72(%rbp) ## 8-byte Spill
movq -88(%rbp), %r10 ## 8-byte Reload
addq -120(%rbp), %r10 ## 8-byte Folded Reload
movq %r10, -88(%rbp) ## 8-byte Spill
adcq -296(%rbp), %r14 ## 8-byte Folded Reload
movq %r14, -120(%rbp) ## 8-byte Spill
addq -152(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -912(%rbp) ## 8-byte Spill
movq -312(%rbp), %rax ## 8-byte Reload
adcq -216(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -312(%rbp) ## 8-byte Spill
addb $255, -464(%rbp) ## 1-byte Folded Spill
movq -872(%rbp), %rax ## 8-byte Reload
adcq -176(%rbp), %rax ## 8-byte Folded Reload
setb -960(%rbp) ## 1-byte Folded Spill
adcq -160(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -976(%rbp) ## 8-byte Spill
movq -336(%rbp), %rax ## 8-byte Reload
movq -1008(%rbp), %rdi ## 8-byte Reload
cmpq %rax, %rdi
movq %rax, %rcx
adcq -360(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -1064(%rbp) ## 8-byte Spill
addb $255, -704(%rbp) ## 1-byte Folded Spill
adcq -952(%rbp), %rdi ## 8-byte Folded Reload
setb -1056(%rbp) ## 1-byte Folded Spill
adcq %r13, %rcx
movq %rcx, -864(%rbp) ## 8-byte Spill
addb $255, -696(%rbp) ## 1-byte Folded Spill
movq -752(%rbp), %rax ## 8-byte Reload
adcq -760(%rbp), %rax ## 8-byte Folded Reload
setb -560(%rbp) ## 1-byte Folded Spill
movq %r10, %rax
adcq %rcx, %rax
movq %rax, %rcx
movq %rax, -752(%rbp) ## 8-byte Spill
movq -536(%rbp), %rax ## 8-byte Reload
movq -1048(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -424(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -1008(%rbp) ## 8-byte Spill
addb $255, -584(%rbp) ## 1-byte Folded Spill
adcq -1000(%rbp), %rdx ## 8-byte Folded Reload
setb -776(%rbp) ## 1-byte Folded Spill
movq %rax, %rdi
adcq %rcx, %rdi
movq %rdi, -856(%rbp) ## 8-byte Spill
addb $255, -688(%rbp) ## 1-byte Folded Spill
movq -744(%rbp), %rax ## 8-byte Reload
adcq -888(%rbp), %rax ## 8-byte Folded Reload
setb -760(%rbp) ## 1-byte Folded Spill
movq %r12, %rax
adcq %rdi, %rax
movq %rax, %rcx
movq %rax, -744(%rbp) ## 8-byte Spill
movq -248(%rbp), %rax ## 8-byte Reload
movq -1040(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -256(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -952(%rbp) ## 8-byte Spill
addb $255, -672(%rbp) ## 1-byte Folded Spill
adcq -968(%rbp), %rdx ## 8-byte Folded Reload
setb -888(%rbp) ## 1-byte Folded Spill
movq %rax, %rdi
adcq %rcx, %rdi
movq %rdi, -848(%rbp) ## 8-byte Spill
addb $255, -568(%rbp) ## 1-byte Folded Spill
movq -1016(%rbp), %rax ## 8-byte Reload
adcq -880(%rbp), %rax ## 8-byte Folded Reload
setb -880(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %rdi, %rax
movq %rax, %rdx
movq %rax, -840(%rbp) ## 8-byte Spill
movq -456(%rbp), %rax ## 8-byte Reload
movq -328(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -104(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -1016(%rbp) ## 8-byte Spill
addb $255, -488(%rbp) ## 1-byte Folded Spill
adcq %rcx, -720(%rbp) ## 8-byte Folded Spill
setb -872(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rdx, %rcx
movq %rcx, -672(%rbp) ## 8-byte Spill
addb $255, -608(%rbp) ## 1-byte Folded Spill
movq -712(%rbp), %rax ## 8-byte Reload
adcq -80(%rbp), %rax ## 8-byte Folded Reload
setb -688(%rbp) ## 1-byte Folded Spill
movq %rsi, %rax
adcq %rcx, %rax
movq %rax, -696(%rbp) ## 8-byte Spill
addb $255, -600(%rbp) ## 1-byte Folded Spill
movq -472(%rbp), %rcx ## 8-byte Reload
adcq -48(%rbp), %rcx ## 8-byte Folded Reload
setb -704(%rbp) ## 1-byte Folded Spill
movq -504(%rbp), %rcx ## 8-byte Reload
adcq %rax, %rcx
movq %rcx, -464(%rbp) ## 8-byte Spill
addb $255, -824(%rbp) ## 1-byte Folded Spill
movq -816(%rbp), %rax ## 8-byte Reload
adcq -1024(%rbp), %rax ## 8-byte Folded Reload
setb -472(%rbp) ## 1-byte Folded Spill
movq -304(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -600(%rbp) ## 8-byte Spill
movq -96(%rbp), %rcx ## 8-byte Reload
subq -320(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -96(%rbp) ## 8-byte Spill
addb $255, -224(%rbp) ## 1-byte Folded Spill
movq -832(%rbp), %rax ## 8-byte Reload
adcq -192(%rbp), %rax ## 8-byte Folded Reload
setb -608(%rbp) ## 1-byte Folded Spill
movq -264(%rbp), %rax ## 8-byte Reload
leaq (%rcx,%rax), %rax
adcq %rdx, %rax
movq %rax, %rdi
movq %rax, -712(%rbp) ## 8-byte Spill
movq -552(%rbp), %r15 ## 8-byte Reload
movq %r15, %rax
movq -408(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rax
movq %rax, %rbx
movq %rax, -152(%rbp) ## 8-byte Spill
movq -208(%rbp), %rsi ## 8-byte Reload
movq %rsi, %rax
mulq %rcx
movq %rcx, %r14
movq -616(%rbp), %rax ## 8-byte Reload
cmpq -288(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %rdx
movq %rdx, -80(%rbp) ## 8-byte Spill
addb $255, -184(%rbp) ## 1-byte Folded Spill
adcq %rax, -784(%rbp) ## 8-byte Folded Spill
setb -720(%rbp) ## 1-byte Folded Spill
leaq (%rdx,%rbx), %rax
adcq %rdi, %rax
movq %rax, %r8
movq %rax, -584(%rbp) ## 8-byte Spill
movq -168(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
imulq -800(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rcx
movq %rax, -184(%rbp) ## 8-byte Spill
movq %rbx, %rdi
shlq $32, %rdi
movq %rbx, %rax
movl $4294967295, %r12d ## imm = 0xFFFFFFFF
mulq %r12
movq %rdx, -48(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rbx, %rdi
movq %rdi, -224(%rbp) ## 8-byte Spill
setb %al
movq -448(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %r9
xorl %r13d, %r13d
addq %rdx, %r9
setb %r13b
addq %rax, %rdi
adcq -344(%rbp), %r13 ## 8-byte Folded Reload
movq %rcx, %rax
setb -448(%rbp) ## 1-byte Folded Spill
adcq %r8, %rax
movq %rax, %r10
movq %rax, -824(%rbp) ## 8-byte Spill
movq -896(%rbp), %rax ## 8-byte Reload
movq 36(%rax), %rbx
movq %rbx, -344(%rbp) ## 8-byte Spill
imulq %rbx, %rsi
movq %rsi, %r11
movq %rsi, -568(%rbp) ## 8-byte Spill
movq -280(%rbp), %rax ## 8-byte Reload
movq %rax, %rsi
imulq %rbx, %rsi
mulq %rbx
movq %rdx, %r8
movq -728(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
mulq %rbx
imulq %rbx, %rcx
addq %rsi, %rdx
adcq %r11, %r8
movq %r8, -1000(%rbp) ## 8-byte Spill
movq %r9, %rax
addq %rcx, %rax
movq %rdx, %rsi
adcq %r13, %rsi
addq %r9, %rcx
movq %rcx, -832(%rbp) ## 8-byte Spill
adcq %r13, %rdx
setb -816(%rbp) ## 1-byte Folded Spill
adcq %r10, %r8
movq %rcx, %rdi
shlq $32, %rdi
movq %rcx, %rax
mulq %r12
movq %rdx, -432(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rcx, %rdi
movq %rdi, -328(%rbp) ## 8-byte Spill
setb %al
leaq (%rsi,%rax), %rdi
xorl %r12d, %r12d
addq %rdx, %rdi
movq %rdi, -384(%rbp) ## 8-byte Spill
setb %r12b
addq %rax, %rsi
adcq %r8, %r12
movq %r12, -1120(%rbp) ## 8-byte Spill
setb -968(%rbp) ## 1-byte Folded Spill
movq %r15, %rax
movq %r14, %r15
mulq %r14
movq %rdx, %r14
movq -80(%rbp), %r9 ## 8-byte Reload
movq %r9, %rax
addq -152(%rbp), %rax ## 8-byte Folded Reload
movq %rdx, %rax
adcq $0, %rax
setb %cl
movq -112(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
mulq %r15
movq %rdx, %r11
movq %rdx, -792(%rbp) ## 8-byte Spill
movq -168(%rbp), %r10 ## 8-byte Reload
movq %r10, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, %r13
movzbl %cl, %r8d
movq -896(%rbp), %rax ## 8-byte Reload
movq 40(%rax), %rcx
movq %rcx, -176(%rbp) ## 8-byte Spill
movq -728(%rbp), %rsi ## 8-byte Reload
movq %rsi, %rax
mulq %rcx
movq %rdx, -1112(%rbp) ## 8-byte Spill
movq -224(%rbp), %rdi ## 8-byte Reload
subq %r10, %rdi
imulq %r15, %rbx
movq %rbx, %r15
movq -280(%rbp), %rbx ## 8-byte Reload
imulq %rcx, %rbx
movq %rbx, -1096(%rbp) ## 8-byte Spill
imulq %rcx, %rsi
movq %rsi, -168(%rbp) ## 8-byte Spill
movq -384(%rbp), %rax ## 8-byte Reload
addq %rsi, %rax
leaq (%rbx,%rdx), %rax
adcq %r12, %rax
movq %rax, -784(%rbp) ## 8-byte Spill
addq -48(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -224(%rbp) ## 8-byte Spill
movq -184(%rbp), %rax ## 8-byte Reload
adcq %rax, %r13
movq %r13, -616(%rbp) ## 8-byte Spill
addq -152(%rbp), %r9 ## 8-byte Folded Reload
movq %r9, -80(%rbp) ## 8-byte Spill
adcq %r14, %r15
movq %r15, -152(%rbp) ## 8-byte Spill
adcq %r11, %r8
movq %r8, -488(%rbp) ## 8-byte Spill
movq -496(%rbp), %rcx ## 8-byte Reload
cmpq -624(%rbp), %rcx ## 8-byte Folded Reload
movq -520(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -520(%rbp) ## 8-byte Spill
addb $255, -944(%rbp) ## 1-byte Folded Spill
adcq -144(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rcx
setb -944(%rbp) ## 1-byte Folded Spill
adcq -528(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, %rbx
movq %rcx, -1024(%rbp) ## 8-byte Spill
movq -368(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -352(%rbp), %rdi ## 8-byte Reload
mulq %rdi
movq %rcx, %rsi
imulq %rdi, %rsi
xorl %eax, %eax
movq -808(%rbp), %rcx ## 8-byte Reload
cmpq -648(%rbp), %rcx ## 8-byte Folded Reload
setb %al
addq %rcx, %rsi
movq %rsi, -1040(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -1080(%rbp) ## 8-byte Spill
addb $255, -928(%rbp) ## 1-byte Folded Spill
movq -640(%rbp), %rax ## 8-byte Reload
adcq %rax, -664(%rbp) ## 8-byte Folded Spill
movq %rsi, %rcx
setb -1048(%rbp) ## 1-byte Folded Spill
adcq %rbx, %rcx
movq %rcx, -648(%rbp) ## 8-byte Spill
movq -392(%rbp), %rax ## 8-byte Reload
movq -984(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -200(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -808(%rbp) ## 8-byte Spill
addb $255, -920(%rbp) ## 1-byte Folded Spill
adcq -936(%rbp), %rdx ## 8-byte Folded Reload
setb -640(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rdi
movq %rax, -928(%rbp) ## 8-byte Spill
movq -56(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -240(%rbp), %rsi ## 8-byte Reload
mulq %rsi
movq %rdx, -1072(%rbp) ## 8-byte Spill
imulq %rsi, %rcx
xorl %esi, %esi
movq -312(%rbp), %rax ## 8-byte Reload
cmpq -216(%rbp), %rax ## 8-byte Folded Reload
setb %sil
addq %rax, %rcx
movq %rcx, -936(%rbp) ## 8-byte Spill
adcq %rdx, %rsi
movq %rsi, -1104(%rbp) ## 8-byte Spill
addb $255, -960(%rbp) ## 1-byte Folded Spill
movq -912(%rbp), %rax ## 8-byte Reload
adcq -160(%rbp), %rax ## 8-byte Folded Reload
setb -912(%rbp) ## 1-byte Folded Spill
adcq %rdi, %rcx
movq %rcx, -920(%rbp) ## 8-byte Spill
movq -336(%rbp), %rax ## 8-byte Reload
movq -1064(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -360(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -960(%rbp) ## 8-byte Spill
addb $255, -1056(%rbp) ## 1-byte Folded Spill
adcq -976(%rbp), %rdx ## 8-byte Folded Reload
setb -976(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, -144(%rbp) ## 8-byte Spill
movq -376(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -400(%rbp), %rsi ## 8-byte Reload
mulq %rsi
movq %rdx, -496(%rbp) ## 8-byte Spill
imulq %rsi, %rcx
movq %rcx, -312(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -296(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, -120(%rbp) ## 8-byte Folded Reload
setb %al
movq %rax, %r12
movq -136(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -232(%rbp), %rsi ## 8-byte Reload
mulq %rsi
movq %rdx, -288(%rbp) ## 8-byte Spill
imulq %rsi, %rcx
movq %rcx, -352(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -1032(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, -72(%rbp) ## 8-byte Folded Reload
setb %al
movq %rax, %r13
movq -272(%rbp), %r8 ## 8-byte Reload
movq %r8, %rax
movq -512(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, %r11
movq %rdx, -1032(%rbp) ## 8-byte Spill
imulq %rcx, %r8
xorl %eax, %eax
movq -904(%rbp), %r15 ## 8-byte Reload
cmpq -992(%rbp), %r15 ## 8-byte Folded Reload
setb %al
movq %rax, %rdi
movq -440(%rbp), %r10 ## 8-byte Reload
movq %r10, %rax
movq -128(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, %rsi
movq %rdx, -992(%rbp) ## 8-byte Spill
imulq %rcx, %r10
xorl %eax, %eax
movq -576(%rbp), %r14 ## 8-byte Reload
cmpq -680(%rbp), %r14 ## 8-byte Folded Reload
setb %al
movq %rax, %rbx
movq -632(%rbp), %r9 ## 8-byte Reload
movq %r9, %rax
movq -544(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, -680(%rbp) ## 8-byte Spill
imulq %rcx, %r9
xorl %ecx, %ecx
movq -736(%rbp), %rax ## 8-byte Reload
cmpq -480(%rbp), %rax ## 8-byte Folded Reload
setb %cl
addq %rax, %r9
adcq %rdx, %rcx
movq %rcx, -664(%rbp) ## 8-byte Spill
addq %r14, %r10
adcq %rsi, %rbx
movq %rbx, -984(%rbp) ## 8-byte Spill
addq %r15, %r8
adcq %r11, %rdi
movq %rdi, -904(%rbp) ## 8-byte Spill
movq -352(%rbp), %rdx ## 8-byte Reload
addq -72(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -352(%rbp) ## 8-byte Spill
adcq -288(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -736(%rbp) ## 8-byte Spill
movq -312(%rbp), %rax ## 8-byte Reload
addq -120(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -312(%rbp) ## 8-byte Spill
adcq -496(%rbp), %r12 ## 8-byte Folded Reload
movq %r12, -1064(%rbp) ## 8-byte Spill
addb $255, -560(%rbp) ## 1-byte Folded Spill
movq -864(%rbp), %rcx ## 8-byte Reload
adcq -88(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rsi
setb -88(%rbp) ## 1-byte Folded Spill
adcq -144(%rbp), %rsi ## 8-byte Folded Reload
movq %rsi, -1056(%rbp) ## 8-byte Spill
movq -536(%rbp), %rax ## 8-byte Reload
movq -1008(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -424(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -1088(%rbp) ## 8-byte Spill
addb $255, -776(%rbp) ## 1-byte Folded Spill
adcq -752(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rcx
setb -864(%rbp) ## 1-byte Folded Spill
adcq %rsi, %rcx
movq %rcx, -576(%rbp) ## 8-byte Spill
addb $255, -760(%rbp) ## 1-byte Folded Spill
movq -856(%rbp), %rax ## 8-byte Reload
adcq -656(%rbp), %rax ## 8-byte Folded Reload
movq %rdx, %rax
setb -560(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -656(%rbp) ## 8-byte Spill
movq -248(%rbp), %rax ## 8-byte Reload
movq -952(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -256(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -760(%rbp) ## 8-byte Spill
addb $255, -888(%rbp) ## 1-byte Folded Spill
adcq -744(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rcx
setb -776(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rcx
addb $255, -880(%rbp) ## 1-byte Folded Spill
movq -848(%rbp), %rax ## 8-byte Reload
adcq -768(%rbp), %rax ## 8-byte Folded Reload
movq %r8, %rdi
setb %dl
adcq %rcx, %rdi
movq -456(%rbp), %rax ## 8-byte Reload
movq -1016(%rbp), %rbx ## 8-byte Reload
cmpq %rax, %rbx
movq %rax, %rsi
adcq -104(%rbp), %rsi ## 8-byte Folded Reload
addb $255, -872(%rbp) ## 1-byte Folded Spill
adcq -840(%rbp), %rbx ## 8-byte Folded Reload
movq %rsi, %rax
movq %rsi, %rbx
movq %rsi, -768(%rbp) ## 8-byte Spill
setb %sil
adcq %rdi, %rax
addb $255, %dl
adcq %r8, %rcx
setb -120(%rbp) ## 1-byte Folded Spill
addb $255, -688(%rbp) ## 1-byte Folded Spill
movq -672(%rbp), %rcx ## 8-byte Reload
adcq -592(%rbp), %rcx ## 8-byte Folded Reload
movq %r10, %rdx
setb %r8b
adcq %rax, %rdx
addb $255, %sil
adcq %rbx, %rdi
setb -72(%rbp) ## 1-byte Folded Spill
movq -416(%rbp), %rcx ## 8-byte Reload
movq -504(%rbp), %rsi ## 8-byte Reload
cmpq %rcx, %rsi
movq %rcx, %rdi
adcq -64(%rbp), %rdi ## 8-byte Folded Reload
addb $255, -704(%rbp) ## 1-byte Folded Spill
adcq %rsi, -696(%rbp) ## 8-byte Folded Spill
movq %rdi, %rcx
movq %rdi, %rbx
movq %rdi, -592(%rbp) ## 8-byte Spill
setb %sil
adcq %rdx, %rcx
addb $255, %r8b
adcq %r10, %rax
setb -504(%rbp) ## 1-byte Folded Spill
addb $255, -472(%rbp) ## 1-byte Folded Spill
movq -464(%rbp), %rax ## 8-byte Reload
adcq -304(%rbp), %rax ## 8-byte Folded Reload
movq %r9, %rdi
setb %r8b
adcq %rcx, %rdi
addb $255, %sil
adcq %rbx, %rdx
setb -304(%rbp) ## 1-byte Folded Spill
movq -320(%rbp), %rax ## 8-byte Reload
movl $4294967294, %r10d ## imm = 0xFFFFFFFE
mulq %r10
movq -96(%rbp), %rax ## 8-byte Reload
addq -264(%rbp), %rax ## 8-byte Folded Reload
adcq -192(%rbp), %rdx ## 8-byte Folded Reload
addb $255, -608(%rbp) ## 1-byte Folded Spill
adcq -600(%rbp), %rax ## 8-byte Folded Reload
movq %rdx, %rax
movq %rdx, %r11
movq %rdx, -472(%rbp) ## 8-byte Spill
setb %dl
adcq %rdi, %rax
addb $255, %r8b
adcq %r9, %rcx
setb -160(%rbp) ## 1-byte Folded Spill
addb $255, -720(%rbp) ## 1-byte Folded Spill
movq -80(%rbp), %rcx ## 8-byte Reload
adcq -712(%rbp), %rcx ## 8-byte Folded Reload
movq -152(%rbp), %r9 ## 8-byte Reload
movq %r9, %rbx
setb %sil
adcq %rax, %rbx
addb $255, %dl
adcq %r11, %rdi
setb -80(%rbp) ## 1-byte Folded Spill
addb $255, -448(%rbp) ## 1-byte Folded Spill
movq -184(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -584(%rbp) ## 8-byte Folded Spill
movq -224(%rbp), %r12 ## 8-byte Reload
movq %r12, %rcx
setb %r8b
adcq %rbx, %rcx
addb $255, %sil
adcq %r9, %rax
setb -216(%rbp) ## 1-byte Folded Spill
movq -208(%rbp), %r15 ## 8-byte Reload
movq %r15, %rax
movq -344(%rbp), %rdi ## 8-byte Reload
mulq %rdi
movq %rdx, %rsi
movq -552(%rbp), %r13 ## 8-byte Reload
movq %r13, %r9
imulq %rdi, %r9
movq %rdi, %r11
movq -1000(%rbp), %rax ## 8-byte Reload
cmpq -568(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %rsi
addb $255, -816(%rbp) ## 1-byte Folded Spill
adcq -824(%rbp), %rax ## 8-byte Folded Reload
leaq (%rsi,%r9), %r14
movq %r14, %rdx
setb %al
adcq %rcx, %rdx
addb $255, %r8b
adcq %r12, %rbx
movq -832(%rbp), %r12 ## 8-byte Reload
movq %r12, %rdi
setb -296(%rbp) ## 1-byte Folded Spill
imulq -800(%rbp), %rdi ## 8-byte Folded Reload
movb -968(%rbp), %r8b ## 1-byte Reload
movl %r8d, %ebx
addb $255, %bl
movq %rdi, %rbx
adcq %rdx, %rbx
movq %rbx, -752(%rbp) ## 8-byte Spill
addb $255, %al
adcq %r14, %rcx
setb -448(%rbp) ## 1-byte Folded Spill
addb $255, %r8b
adcq %rdi, %rdx
movq %rdi, -624(%rbp) ## 8-byte Spill
setb -152(%rbp) ## 1-byte Folded Spill
movq -280(%rbp), %rax ## 8-byte Reload
movq -176(%rbp), %rcx ## 8-byte Reload
mulq %rcx
imulq %rcx, %r15
movq %r15, -464(%rbp) ## 8-byte Spill
movq -1096(%rbp), %rax ## 8-byte Reload
addq -1112(%rbp), %rax ## 8-byte Folded Reload
adcq %r15, %rdx
movq %rdx, -824(%rbp) ## 8-byte Spill
movq -384(%rbp), %rcx ## 8-byte Reload
addq %rcx, -168(%rbp) ## 8-byte Folded Spill
adcq -1120(%rbp), %rax ## 8-byte Folded Reload
movq %rdx, %rax
setb -744(%rbp) ## 1-byte Folded Spill
adcq %rbx, %rax
movq %rax, -600(%rbp) ## 8-byte Spill
movq %r13, %rax
mulq %r11
movq %rdx, %r13
movq %rsi, %rax
addq %r9, %rax
movq %rdx, %rax
adcq $0, %rax
setb %r15b
movq -112(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
mulq %r11
movq %rdx, %rbx
movq %rdx, -480(%rbp) ## 8-byte Spill
movq %r12, %rax
mulq %r10
movq -328(%rbp), %rax ## 8-byte Reload
subq %r12, %rax
imulq %r11, %rcx
addq -432(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -328(%rbp) ## 8-byte Spill
adcq %rdi, %rdx
movq %rdx, -224(%rbp) ## 8-byte Spill
addq %r9, %rsi
adcq %r13, %rcx
movq %rcx, -384(%rbp) ## 8-byte Spill
movzbl %r15b, %eax
adcq %rbx, %rax
movq %rax, -608(%rbp) ## 8-byte Spill
addb $255, -944(%rbp) ## 1-byte Folded Spill
movq -520(%rbp), %rax ## 8-byte Reload
adcq -528(%rbp), %rax ## 8-byte Folded Reload
movq -1080(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
adcq $0, %rdx
addb $255, -1048(%rbp) ## 1-byte Folded Spill
movq -1024(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -1040(%rbp) ## 8-byte Folded Spill
adcq $0, %rdx
xorl %ecx, %ecx
cmpq %rax, %rdx
movq %rdx, %r8
movq %rdx, -968(%rbp) ## 8-byte Spill
setb %cl
movq %rcx, %rbx
movq -368(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -240(%rbp), %rsi ## 8-byte Reload
mulq %rsi
movq %rcx, %rdi
imulq %rsi, %rdi
xorl %esi, %esi
movq -1104(%rbp), %rcx ## 8-byte Reload
cmpq -1072(%rbp), %rcx ## 8-byte Folded Reload
setb %sil
movq %rcx, %rax
addq %rdi, %rax
leaq (%rsi,%rdx), %rax
adcq %rdx, %rsi
movq %rsi, -888(%rbp) ## 8-byte Spill
addq %rcx, %rdi
movq %rdi, -944(%rbp) ## 8-byte Spill
adcq %rax, %rbx
movq %rbx, -96(%rbp) ## 8-byte Spill
movq -808(%rbp), %rcx ## 8-byte Reload
cmpq -392(%rbp), %rcx ## 8-byte Folded Reload
movq -200(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -200(%rbp) ## 8-byte Spill
addb $255, -640(%rbp) ## 1-byte Folded Spill
adcq -648(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rcx
setb -1048(%rbp) ## 1-byte Folded Spill
adcq %r8, %rcx
movq %rcx, -1040(%rbp) ## 8-byte Spill
addb $255, -912(%rbp) ## 1-byte Folded Spill
movq -928(%rbp), %rax ## 8-byte Reload
adcq %rax, -936(%rbp) ## 8-byte Folded Spill
movq %rdi, %rax
setb -936(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -856(%rbp) ## 8-byte Spill
movq -336(%rbp), %rax ## 8-byte Reload
movq -960(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -360(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -840(%rbp) ## 8-byte Spill
addb $255, -976(%rbp) ## 1-byte Folded Spill
adcq -920(%rbp), %rcx ## 8-byte Folded Reload
setb -848(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, %rsi
movq %rax, -808(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq %rax, %rdi
movq -400(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdi
mulq %rcx
movq %rdx, -920(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -1064(%rbp), %rax ## 8-byte Reload
cmpq -496(%rbp), %rax ## 8-byte Folded Reload
setb %cl
addq %rax, %rdi
movq %rdi, -832(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -912(%rbp) ## 8-byte Spill
addb $255, -88(%rbp) ## 1-byte Folded Spill
movq -312(%rbp), %rax ## 8-byte Reload
adcq -144(%rbp), %rax ## 8-byte Folded Reload
movq %rdi, %rax
setb -1016(%rbp) ## 1-byte Folded Spill
adcq %rsi, %rax
movq %rax, %rdx
movq %rax, -952(%rbp) ## 8-byte Spill
movq -536(%rbp), %rax ## 8-byte Reload
movq -1088(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -424(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -1000(%rbp) ## 8-byte Spill
addb $255, -864(%rbp) ## 1-byte Folded Spill
adcq -1056(%rbp), %rcx ## 8-byte Folded Reload
setb -1008(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, -1024(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
movq %rax, %r15
movq -232(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r15
mulq %rcx
movq %rdx, -144(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -736(%rbp), %rax ## 8-byte Reload
cmpq -288(%rbp), %rax ## 8-byte Folded Reload
setb %cl
movq %rcx, -88(%rbp) ## 8-byte Spill
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -512(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, -320(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -288(%rbp) ## 8-byte Spill
xorl %ebx, %ebx
movq -904(%rbp), %r12 ## 8-byte Reload
cmpq -1032(%rbp), %r12 ## 8-byte Folded Reload
setb %bl
movq -272(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -128(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, -392(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, %r14
movq %rdx, -928(%rbp) ## 8-byte Spill
xorl %r10d, %r10d
movq -984(%rbp), %r13 ## 8-byte Reload
cmpq -992(%rbp), %r13 ## 8-byte Folded Reload
setb %r10b
movq -440(%rbp), %rax ## 8-byte Reload
movq %rax, %r8
movq -544(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r8
mulq %rcx
movq %rdx, %r11
movq %rdx, -864(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -664(%rbp), %r9 ## 8-byte Reload
cmpq -680(%rbp), %r9 ## 8-byte Folded Reload
setb %dil
movq -632(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -408(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rsi
mulq %rcx
movq %rdx, -880(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -488(%rbp), %rax ## 8-byte Reload
cmpq -792(%rbp), %rax ## 8-byte Folded Reload
setb %cl
addq %rax, %rsi
movq %rsi, -696(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -872(%rbp) ## 8-byte Spill
addq %r9, %r8
movq %r8, -680(%rbp) ## 8-byte Spill
adcq %r11, %rdi
movq %rdi, -816(%rbp) ## 8-byte Spill
movq -392(%rbp), %r9 ## 8-byte Reload
addq %r13, %r9
movq %r9, -392(%rbp) ## 8-byte Spill
adcq %r14, %r10
movq %r10, -976(%rbp) ## 8-byte Spill
movq -320(%rbp), %rdi ## 8-byte Reload
addq %r12, %rdi
movq %rdi, -320(%rbp) ## 8-byte Spill
adcq -288(%rbp), %rbx ## 8-byte Folded Reload
movq %rbx, -496(%rbp) ## 8-byte Spill
movq %r15, %r13
addq -736(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -904(%rbp) ## 8-byte Spill
movq -88(%rbp), %rax ## 8-byte Reload
adcq -144(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -88(%rbp) ## 8-byte Spill
addb $255, -560(%rbp) ## 1-byte Folded Spill
movq -576(%rbp), %rax ## 8-byte Reload
adcq -352(%rbp), %rax ## 8-byte Folded Reload
setb -960(%rbp) ## 1-byte Folded Spill
movq -1024(%rbp), %r12 ## 8-byte Reload
adcq %r12, %r13
movq -248(%rbp), %r14 ## 8-byte Reload
movq -760(%rbp), %rcx ## 8-byte Reload
cmpq %r14, %rcx
adcq -256(%rbp), %r14 ## 8-byte Folded Reload
addb $255, -776(%rbp) ## 1-byte Folded Spill
adcq -656(%rbp), %rcx ## 8-byte Folded Reload
movq %r14, %rdx
setb -760(%rbp) ## 1-byte Folded Spill
adcq %r13, %rdx
movq %rdx, -560(%rbp) ## 8-byte Spill
movb -120(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %rdi, %rcx
adcq %rdx, %rcx
movq %rcx, -568(%rbp) ## 8-byte Spill
movq -456(%rbp), %rax ## 8-byte Reload
cmpq %rax, -768(%rbp) ## 8-byte Folded Reload
movq %rax, %rdx
adcq -104(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -776(%rbp) ## 8-byte Spill
movb -72(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rcx, %rdx
movq %rdx, -576(%rbp) ## 8-byte Spill
movb -504(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %r9, %rcx
adcq %rdx, %rcx
movq %rcx, -664(%rbp) ## 8-byte Spill
movq -416(%rbp), %rax ## 8-byte Reload
cmpq %rax, -592(%rbp) ## 8-byte Folded Reload
movq %rax, %rdx
adcq -64(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -656(%rbp) ## 8-byte Spill
movb -304(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rcx, %rdx
movq %rdx, -584(%rbp) ## 8-byte Spill
movb -160(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rdx, %r8
movq %r8, -688(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
cmpq %rax, -472(%rbp) ## 8-byte Folded Reload
movq %rax, %rdx
adcq -264(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -672(%rbp) ## 8-byte Spill
movb -80(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %r8, %rdx
movq %rdx, -592(%rbp) ## 8-byte Spill
movb -216(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %rsi, %rcx
adcq %rdx, %rcx
movq %rcx, -472(%rbp) ## 8-byte Spill
movb -296(%rbp), %al ## 1-byte Reload
addb $255, %al
movq -616(%rbp), %rdx ## 8-byte Reload
adcq %rcx, %rdx
movq %rdx, -712(%rbp) ## 8-byte Spill
movb -448(%rbp), %al ## 1-byte Reload
addb $255, %al
movq -384(%rbp), %rcx ## 8-byte Reload
adcq %rdx, %rcx
movq %rcx, -720(%rbp) ## 8-byte Spill
movb -152(%rbp), %al ## 1-byte Reload
addb $255, %al
movq -328(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, %rcx
movq %rax, -704(%rbp) ## 8-byte Spill
movq -208(%rbp), %rax ## 8-byte Reload
movq -176(%rbp), %r11 ## 8-byte Reload
mulq %r11
movq -552(%rbp), %r9 ## 8-byte Reload
movq %r9, %r8
imulq %r11, %r8
movq -824(%rbp), %rax ## 8-byte Reload
cmpq -464(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %rdx
addb $255, -744(%rbp) ## 1-byte Folded Spill
adcq -752(%rbp), %rax ## 8-byte Folded Reload
leaq (%rdx,%r8), %rax
movq %rdx, %r15
setb -464(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rbx
movq %rax, -648(%rbp) ## 8-byte Spill
movq -168(%rbp), %rsi ## 8-byte Reload
movq %rsi, %r10
imulq -800(%rbp), %r10 ## 8-byte Folded Reload
movq %r10, -352(%rbp) ## 8-byte Spill
movq %rsi, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -520(%rbp) ## 8-byte Spill
movq %rsi, %rcx
shlq $32, %rcx
xorl %eax, %eax
subq %rsi, %rcx
movq %rcx, -240(%rbp) ## 8-byte Spill
setb %al
movq -784(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rax), %rsi
xorl %edi, %edi
addq %rdx, %rsi
movq %rsi, -792(%rbp) ## 8-byte Spill
setb %dil
addq %rax, %rcx
adcq -600(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -784(%rbp) ## 8-byte Spill
setb -640(%rbp) ## 1-byte Folded Spill
adcq %rbx, %r10
movq %r10, -736(%rbp) ## 8-byte Spill
movq %r9, %rax
mulq %r11
movq %rdx, %rdi
movq %r15, %rax
addq %r8, %rax
movq %rdx, %rax
adcq $0, %rax
setb %cl
movq -112(%rbp), %rsi ## 8-byte Reload
movq %rsi, %rax
mulq %r11
movq %rdx, -488(%rbp) ## 8-byte Spill
imulq %r11, %rsi
addq %r8, %r15
movq %r15, -768(%rbp) ## 8-byte Spill
adcq %rdi, %rsi
movq %rsi, -528(%rbp) ## 8-byte Spill
movzbl %cl, %eax
adcq %rdx, %rax
movq %rax, -600(%rbp) ## 8-byte Spill
addb $255, -1048(%rbp) ## 1-byte Folded Spill
movq -968(%rbp), %rax ## 8-byte Reload
adcq %rax, -200(%rbp) ## 8-byte Folded Spill
movq -96(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -936(%rbp) ## 1-byte Folded Spill
movq -1040(%rbp), %rcx ## 8-byte Reload
adcq -944(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %rax
xorl %ecx, %ecx
cmpq -888(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r8
movq %rax, -96(%rbp) ## 8-byte Spill
setb %cl
movq %rcx, %rbx
movq -368(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -400(%rbp), %rsi ## 8-byte Reload
mulq %rsi
movq %rcx, %rdi
imulq %rsi, %rdi
xorl %esi, %esi
movq -912(%rbp), %rcx ## 8-byte Reload
cmpq -920(%rbp), %rcx ## 8-byte Folded Reload
setb %sil
movq %rcx, %rax
addq %rdi, %rax
leaq (%rsi,%rdx), %rax
adcq %rdx, %rsi
movq %rsi, -200(%rbp) ## 8-byte Spill
addq %rcx, %rdi
movq %rdi, -944(%rbp) ## 8-byte Spill
adcq %rax, %rbx
movq %rbx, -312(%rbp) ## 8-byte Spill
movq -840(%rbp), %rcx ## 8-byte Reload
cmpq -336(%rbp), %rcx ## 8-byte Folded Reload
movq -360(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -360(%rbp) ## 8-byte Spill
addb $255, -848(%rbp) ## 1-byte Folded Spill
adcq -856(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rcx
setb -936(%rbp) ## 1-byte Folded Spill
adcq %r8, %rcx
movq %rcx, -992(%rbp) ## 8-byte Spill
addb $255, -1016(%rbp) ## 1-byte Folded Spill
movq -808(%rbp), %rax ## 8-byte Reload
adcq %rax, -832(%rbp) ## 8-byte Folded Spill
movq %rdi, %rax
setb -984(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -856(%rbp) ## 8-byte Spill
movq -536(%rbp), %rax ## 8-byte Reload
movq -1000(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -424(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -752(%rbp) ## 8-byte Spill
addb $255, -1008(%rbp) ## 1-byte Folded Spill
adcq -952(%rbp), %rcx ## 8-byte Folded Reload
setb -848(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, %rcx
movq %rax, -808(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq %rax, %rdi
movq -232(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rdi
mulq %rdx
movq %rdx, -920(%rbp) ## 8-byte Spill
xorl %esi, %esi
movq -88(%rbp), %rax ## 8-byte Reload
cmpq -144(%rbp), %rax ## 8-byte Folded Reload
setb %sil
addq %rax, %rdi
movq %rdi, -840(%rbp) ## 8-byte Spill
adcq %rdx, %rsi
movq %rsi, -912(%rbp) ## 8-byte Spill
addb $255, -960(%rbp) ## 1-byte Folded Spill
adcq %r12, -904(%rbp) ## 8-byte Folded Spill
movq %rdi, %rax
setb -1016(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -952(%rbp) ## 8-byte Spill
movq -248(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r14
adcq -256(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -1000(%rbp) ## 8-byte Spill
addb $255, -760(%rbp) ## 1-byte Folded Spill
adcq %r13, %r14
setb -1008(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, -904(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
movq %rax, %r15
movq -512(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r15
mulq %rcx
movq %rdx, -968(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -496(%rbp), %rcx ## 8-byte Reload
cmpq -288(%rbp), %rcx ## 8-byte Folded Reload
setb %al
movq %rax, -744(%rbp) ## 8-byte Spill
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -128(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, -400(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -88(%rbp) ## 8-byte Spill
xorl %r11d, %r11d
movq -976(%rbp), %r13 ## 8-byte Reload
cmpq -928(%rbp), %r13 ## 8-byte Folded Reload
setb %r11b
movq -272(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -544(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, -336(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, %r14
movq %rdx, -928(%rbp) ## 8-byte Spill
xorl %r9d, %r9d
movq -816(%rbp), %rbx ## 8-byte Reload
cmpq -864(%rbp), %rbx ## 8-byte Folded Reload
setb %r9b
movq -440(%rbp), %rax ## 8-byte Reload
movq %rax, %r12
movq -408(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r12
mulq %rcx
movq %rdx, %r10
movq %rdx, -832(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -872(%rbp), %rdi ## 8-byte Reload
cmpq -880(%rbp), %rdi ## 8-byte Folded Reload
setb %cl
movq -632(%rbp), %rax ## 8-byte Reload
movq %rax, %r8
movq -344(%rbp), %rsi ## 8-byte Reload
imulq %rsi, %r8
mulq %rsi
movq %rdx, -888(%rbp) ## 8-byte Spill
xorl %esi, %esi
movq -608(%rbp), %rax ## 8-byte Reload
cmpq -480(%rbp), %rax ## 8-byte Folded Reload
setb %sil
addq %rax, %r8
movq %r8, -480(%rbp) ## 8-byte Spill
adcq %rdx, %rsi
movq %rsi, -880(%rbp) ## 8-byte Spill
addq %rdi, %r12
movq %r12, -760(%rbp) ## 8-byte Spill
adcq %r10, %rcx
movq %rcx, -824(%rbp) ## 8-byte Spill
movq -336(%rbp), %rsi ## 8-byte Reload
addq %rbx, %rsi
movq %rsi, -336(%rbp) ## 8-byte Spill
adcq %r14, %r9
movq %r9, -288(%rbp) ## 8-byte Spill
movq -400(%rbp), %rdx ## 8-byte Reload
addq %r13, %rdx
movq %rdx, -400(%rbp) ## 8-byte Spill
adcq -88(%rbp), %r11 ## 8-byte Folded Reload
movq %r11, -144(%rbp) ## 8-byte Spill
addq -496(%rbp), %r15 ## 8-byte Folded Reload
movq -968(%rbp), %rbx ## 8-byte Reload
movq -744(%rbp), %r11 ## 8-byte Reload
adcq %rbx, %r11
addb $255, -120(%rbp) ## 1-byte Folded Spill
movq -560(%rbp), %rax ## 8-byte Reload
adcq -320(%rbp), %rax ## 8-byte Folded Reload
movq %r15, %r13
setb -320(%rbp) ## 1-byte Folded Spill
movq -904(%rbp), %r14 ## 8-byte Reload
adcq %r14, %r13
movq -456(%rbp), %r9 ## 8-byte Reload
movq -776(%rbp), %rcx ## 8-byte Reload
cmpq %r9, %rcx
adcq -104(%rbp), %r9 ## 8-byte Folded Reload
addb $255, -72(%rbp) ## 1-byte Folded Spill
adcq -568(%rbp), %rcx ## 8-byte Folded Reload
movq %r9, %rcx
setb -816(%rbp) ## 1-byte Folded Spill
adcq %r13, %rcx
movq %rcx, -568(%rbp) ## 8-byte Spill
addb $255, -504(%rbp) ## 1-byte Folded Spill
movq -576(%rbp), %rax ## 8-byte Reload
adcq -392(%rbp), %rax ## 8-byte Folded Reload
movq %rdx, %rax
setb -560(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -576(%rbp) ## 8-byte Spill
movq -416(%rbp), %rax ## 8-byte Reload
movq -656(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -64(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -744(%rbp) ## 8-byte Spill
addb $255, -304(%rbp) ## 1-byte Folded Spill
adcq -664(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rcx
setb -656(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rcx
movq %rcx, -664(%rbp) ## 8-byte Spill
addb $255, -160(%rbp) ## 1-byte Folded Spill
movq -584(%rbp), %rax ## 8-byte Reload
adcq -680(%rbp), %rax ## 8-byte Folded Reload
movq %rsi, %rax
setb -776(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -584(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
movq -672(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -264(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -864(%rbp) ## 8-byte Spill
addb $255, -80(%rbp) ## 1-byte Folded Spill
adcq -688(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rcx
setb -872(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rcx
movq %rcx, -688(%rbp) ## 8-byte Spill
addb $255, -216(%rbp) ## 1-byte Folded Spill
movq -592(%rbp), %rax ## 8-byte Reload
adcq -696(%rbp), %rax ## 8-byte Folded Reload
setb -680(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r12
movq %r12, -80(%rbp) ## 8-byte Spill
movq -184(%rbp), %rax ## 8-byte Reload
movq -616(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -48(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -592(%rbp) ## 8-byte Spill
addb $255, -296(%rbp) ## 1-byte Folded Spill
adcq %rdx, -472(%rbp) ## 8-byte Folded Spill
movq %rax, %rdx
setb -608(%rbp) ## 1-byte Folded Spill
adcq %r12, %rdx
movq %rdx, -392(%rbp) ## 8-byte Spill
addb $255, -448(%rbp) ## 1-byte Folded Spill
movq -712(%rbp), %rax ## 8-byte Reload
adcq -384(%rbp), %rax ## 8-byte Folded Reload
setb -496(%rbp) ## 1-byte Folded Spill
adcq %rdx, %r8
movq %r8, -160(%rbp) ## 8-byte Spill
addb $255, -152(%rbp) ## 1-byte Folded Spill
movq -720(%rbp), %rax ## 8-byte Reload
adcq -328(%rbp), %rax ## 8-byte Folded Reload
movq -224(%rbp), %rax ## 8-byte Reload
setb -328(%rbp) ## 1-byte Folded Spill
adcq %r8, %rax
movq %rax, -504(%rbp) ## 8-byte Spill
addb $255, -464(%rbp) ## 1-byte Folded Spill
movq -768(%rbp), %rcx ## 8-byte Reload
adcq -704(%rbp), %rcx ## 8-byte Folded Reload
movq -528(%rbp), %rcx ## 8-byte Reload
setb -120(%rbp) ## 1-byte Folded Spill
adcq %rax, %rcx
movq %rcx, -304(%rbp) ## 8-byte Spill
movq -240(%rbp), %rdx ## 8-byte Reload
subq -168(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -240(%rbp) ## 8-byte Spill
addb $255, -640(%rbp) ## 1-byte Folded Spill
movq -648(%rbp), %rax ## 8-byte Reload
adcq -352(%rbp), %rax ## 8-byte Folded Reload
movq -520(%rbp), %rax ## 8-byte Reload
leaq (%rdx,%rax), %rax
setb -72(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, -616(%rbp) ## 8-byte Spill
addb $255, -936(%rbp) ## 1-byte Folded Spill
movq -96(%rbp), %rax ## 8-byte Reload
adcq %rax, -360(%rbp) ## 8-byte Folded Spill
movq -312(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -984(%rbp) ## 1-byte Folded Spill
movq -992(%rbp), %rcx ## 8-byte Reload
adcq -944(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %rax
xorl %ecx, %ecx
cmpq -200(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r8
movq %rax, -312(%rbp) ## 8-byte Spill
setb %cl
movq %rcx, %rsi
movq -368(%rbp), %r10 ## 8-byte Reload
movq %r10, %rax
movq -232(%rbp), %rcx ## 8-byte Reload
mulq %rcx
imulq %rcx, %r10
xorl %ecx, %ecx
movq -912(%rbp), %rdi ## 8-byte Reload
cmpq -920(%rbp), %rdi ## 8-byte Folded Reload
setb %cl
movq %rdi, %rax
addq %r10, %rax
leaq (%rcx,%rdx), %rax
adcq %rdx, %rcx
movq %rcx, -360(%rbp) ## 8-byte Spill
addq %rdi, %r10
movq %r10, -640(%rbp) ## 8-byte Spill
adcq %rax, %rsi
movq %rsi, -200(%rbp) ## 8-byte Spill
movq -752(%rbp), %rcx ## 8-byte Reload
cmpq -536(%rbp), %rcx ## 8-byte Folded Reload
movq -424(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -424(%rbp) ## 8-byte Spill
addb $255, -848(%rbp) ## 1-byte Folded Spill
adcq -856(%rbp), %rcx ## 8-byte Folded Reload
setb -648(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %r8, %rcx
movq %rcx, -856(%rbp) ## 8-byte Spill
addb $255, -1016(%rbp) ## 1-byte Folded Spill
movq -808(%rbp), %rax ## 8-byte Reload
adcq %rax, -840(%rbp) ## 8-byte Folded Spill
setb -768(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r10
movq %r10, -152(%rbp) ## 8-byte Spill
movq -248(%rbp), %rax ## 8-byte Reload
movq -1000(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -256(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -384(%rbp) ## 8-byte Spill
addb $255, -1008(%rbp) ## 1-byte Folded Spill
adcq -952(%rbp), %rdx ## 8-byte Folded Reload
setb -216(%rbp) ## 1-byte Folded Spill
adcq %r10, %rax
movq %rax, %rsi
movq %rax, -704(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq %rax, %rdi
movq -512(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdi
mulq %rcx
movq %rdx, -752(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
cmpq %rbx, %r11
setb %cl
addq %r11, %rdi
movq %rdi, -720(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -840(%rbp) ## 8-byte Spill
addb $255, -320(%rbp) ## 1-byte Folded Spill
adcq %r14, %r15
setb -472(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq %rsi, %rax
movq %rax, %rcx
movq %rax, -696(%rbp) ## 8-byte Spill
movq -456(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r9
adcq -104(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -672(%rbp) ## 8-byte Spill
addb $255, -816(%rbp) ## 1-byte Folded Spill
adcq %r13, %r9
setb -464(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, -816(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -128(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, -232(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -808(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -144(%rbp), %rcx ## 8-byte Reload
cmpq -88(%rbp), %rcx ## 8-byte Folded Reload
setb %al
movq %rax, -88(%rbp) ## 8-byte Spill
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %rbx
movq -544(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rbx
mulq %rcx
movq %rdx, -96(%rbp) ## 8-byte Spill
xorl %r10d, %r10d
movq -928(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, -288(%rbp) ## 8-byte Folded Reload
setb %r10b
movq -272(%rbp), %rax ## 8-byte Reload
movq %rax, %r14
movq -408(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r14
mulq %rcx
movq %rdx, %r12
movq %rdx, -848(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -824(%rbp), %r15 ## 8-byte Reload
cmpq -832(%rbp), %r15 ## 8-byte Folded Reload
setb %dil
movq -440(%rbp), %rax ## 8-byte Reload
movq %rax, %r13
movq -344(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r13
mulq %rcx
movq %rdx, %r9
movq %rdx, -712(%rbp) ## 8-byte Spill
xorl %esi, %esi
movq -880(%rbp), %r8 ## 8-byte Reload
cmpq -888(%rbp), %r8 ## 8-byte Folded Reload
setb %sil
movq -632(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -176(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %r11
mulq %rcx
movq %rdx, -296(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -600(%rbp), %rax ## 8-byte Reload
cmpq -488(%rbp), %rax ## 8-byte Folded Reload
setb %cl
addq %rax, %r11
movq %r11, -320(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -448(%rbp) ## 8-byte Spill
addq %r8, %r13
adcq %r9, %rsi
movq %rsi, -488(%rbp) ## 8-byte Spill
addq %r15, %r14
adcq %r12, %rdi
movq %rdi, -600(%rbp) ## 8-byte Spill
addq -288(%rbp), %rbx ## 8-byte Folded Reload
adcq -96(%rbp), %r10 ## 8-byte Folded Reload
movq %r10, -888(%rbp) ## 8-byte Spill
movq -232(%rbp), %rcx ## 8-byte Reload
addq -144(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -232(%rbp) ## 8-byte Spill
movq -808(%rbp), %r9 ## 8-byte Reload
adcq %r9, -88(%rbp) ## 8-byte Folded Spill
addb $255, -560(%rbp) ## 1-byte Folded Spill
movq -568(%rbp), %rax ## 8-byte Reload
adcq -400(%rbp), %rax ## 8-byte Folded Reload
setb -568(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
movq -816(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rax
movq %rax, -560(%rbp) ## 8-byte Spill
movq -416(%rbp), %r11 ## 8-byte Reload
movq -744(%rbp), %rcx ## 8-byte Reload
cmpq %r11, %rcx
adcq -64(%rbp), %r11 ## 8-byte Folded Reload
addb $255, -656(%rbp) ## 1-byte Folded Spill
adcq -576(%rbp), %rcx ## 8-byte Folded Reload
setb -576(%rbp) ## 1-byte Folded Spill
movq %r11, %rdx
adcq %rax, %rdx
addb $255, -776(%rbp) ## 1-byte Folded Spill
movq -664(%rbp), %rax ## 8-byte Reload
adcq -336(%rbp), %rax ## 8-byte Folded Reload
setb %r8b
movq %rbx, %rcx
adcq %rdx, %rcx
movq -192(%rbp), %rax ## 8-byte Reload
movq -864(%rbp), %rdi ## 8-byte Reload
cmpq %rax, %rdi
movq %rax, %rsi
adcq -264(%rbp), %rsi ## 8-byte Folded Reload
addb $255, -872(%rbp) ## 1-byte Folded Spill
adcq -584(%rbp), %rdi ## 8-byte Folded Reload
setb %r12b
movq %rsi, %rax
movq %rsi, -288(%rbp) ## 8-byte Spill
adcq %rcx, %rax
addb $255, %r8b
adcq %rbx, %rdx
setb -336(%rbp) ## 1-byte Folded Spill
addb $255, -680(%rbp) ## 1-byte Folded Spill
movq -688(%rbp), %rdx ## 8-byte Reload
adcq -760(%rbp), %rdx ## 8-byte Folded Reload
setb %r8b
movq %r14, %rdx
adcq %rax, %rdx
addb $255, %r12b
adcq %rsi, %rcx
setb -400(%rbp) ## 1-byte Folded Spill
movq -184(%rbp), %rcx ## 8-byte Reload
movq -592(%rbp), %rsi ## 8-byte Reload
cmpq %rcx, %rsi
movq %rcx, %rdi
adcq -48(%rbp), %rdi ## 8-byte Folded Reload
addb $255, -608(%rbp) ## 1-byte Folded Spill
adcq -80(%rbp), %rsi ## 8-byte Folded Reload
setb %cl
movq %rdi, %rbx
movq %rdi, %rsi
movq %rdi, -144(%rbp) ## 8-byte Spill
adcq %rdx, %rbx
addb $255, %r8b
adcq %r14, %rax
setb -80(%rbp) ## 1-byte Folded Spill
addb $255, -496(%rbp) ## 1-byte Folded Spill
movq -392(%rbp), %rax ## 8-byte Reload
adcq -480(%rbp), %rax ## 8-byte Folded Reload
setb %al
movq %r13, %rdi
adcq %rbx, %rdi
addb $255, %cl
adcq %rsi, %rdx
setb -392(%rbp) ## 1-byte Folded Spill
movq -624(%rbp), %rcx ## 8-byte Reload
movq -224(%rbp), %rdx ## 8-byte Reload
cmpq %rcx, %rdx
movq %rcx, %rsi
adcq -432(%rbp), %rsi ## 8-byte Folded Reload
addb $255, -328(%rbp) ## 1-byte Folded Spill
adcq %rdx, -160(%rbp) ## 8-byte Folded Spill
setb %cl
movq %rsi, %rdx
movq %rsi, -496(%rbp) ## 8-byte Spill
adcq %rdi, %rdx
movq %rdx, %r8
movq %rdx, -480(%rbp) ## 8-byte Spill
addb $255, %al
adcq %r13, %rbx
setb -328(%rbp) ## 1-byte Folded Spill
addb $255, %cl
adcq %rsi, %rdi
setb -536(%rbp) ## 1-byte Folded Spill
movq -168(%rbp), %rax ## 8-byte Reload
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
movq -240(%rbp), %rcx ## 8-byte Reload
addq -520(%rbp), %rcx ## 8-byte Folded Reload
adcq -352(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, %rsi
movq %rdx, -160(%rbp) ## 8-byte Spill
addb $255, -120(%rbp) ## 1-byte Folded Spill
movq -504(%rbp), %rax ## 8-byte Reload
adcq -528(%rbp), %rax ## 8-byte Folded Reload
setb -120(%rbp) ## 1-byte Folded Spill
movq -320(%rbp), %rdx ## 8-byte Reload
adcq %r8, %rdx
movq %rdx, -528(%rbp) ## 8-byte Spill
addb $255, -72(%rbp) ## 1-byte Folded Spill
adcq -304(%rbp), %rcx ## 8-byte Folded Reload
setb -224(%rbp) ## 1-byte Folded Spill
movq %rsi, %rax
adcq %rdx, %rax
movq %rax, -240(%rbp) ## 8-byte Spill
addb $255, -648(%rbp) ## 1-byte Folded Spill
movq -312(%rbp), %rax ## 8-byte Reload
adcq %rax, -424(%rbp) ## 8-byte Folded Spill
movq -200(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -768(%rbp) ## 1-byte Folded Spill
movq -856(%rbp), %rcx ## 8-byte Reload
adcq -640(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %rax
xorl %ecx, %ecx
cmpq -360(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r14
movq %rax, -200(%rbp) ## 8-byte Spill
setb %cl
movq %rcx, %rbx
movq -368(%rbp), %r8 ## 8-byte Reload
movq %r8, %rax
movq -512(%rbp), %rsi ## 8-byte Reload
mulq %rsi
imulq %rsi, %r8
xorl %ecx, %ecx
movq -840(%rbp), %rdi ## 8-byte Reload
cmpq -752(%rbp), %rdi ## 8-byte Folded Reload
setb %cl
movq %rdi, %rax
addq %r8, %rax
leaq (%rcx,%rdx), %rax
adcq %rdx, %rcx
movq %rcx, -504(%rbp) ## 8-byte Spill
addq %rdi, %r8
movq %r8, -72(%rbp) ## 8-byte Spill
adcq %rax, %rbx
movq %rbx, -360(%rbp) ## 8-byte Spill
movq -384(%rbp), %rcx ## 8-byte Reload
cmpq -248(%rbp), %rcx ## 8-byte Folded Reload
movq -256(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -256(%rbp) ## 8-byte Spill
addb $255, -216(%rbp) ## 1-byte Folded Spill
adcq -152(%rbp), %rcx ## 8-byte Folded Reload
setb -512(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %r14, %rcx
movq %rcx, -216(%rbp) ## 8-byte Spill
addb $255, -472(%rbp) ## 1-byte Folded Spill
movq -704(%rbp), %rax ## 8-byte Reload
adcq %rax, -720(%rbp) ## 8-byte Folded Spill
setb -304(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r8
movq %r8, -664(%rbp) ## 8-byte Spill
movq -456(%rbp), %rax ## 8-byte Reload
movq -672(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -104(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -648(%rbp) ## 8-byte Spill
addb $255, -464(%rbp) ## 1-byte Folded Spill
adcq -696(%rbp), %rcx ## 8-byte Folded Reload
setb -608(%rbp) ## 1-byte Folded Spill
adcq %r8, %rax
movq %rax, %rsi
movq %rax, -592(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq %rax, %r13
movq -128(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r13
mulq %rcx
movq %rdx, -656(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -88(%rbp), %rax ## 8-byte Reload
cmpq %r9, %rax
setb %cl
addq %rax, %r13
adcq %rdx, %rcx
movq %rcx, -168(%rbp) ## 8-byte Spill
addb $255, -568(%rbp) ## 1-byte Folded Spill
adcq %r10, -232(%rbp) ## 8-byte Folded Spill
setb -232(%rbp) ## 1-byte Folded Spill
movq %r13, %rax
adcq %rsi, %rax
movq %rax, %rcx
movq %rax, -584(%rbp) ## 8-byte Spill
movq -416(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r11
adcq -64(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -672(%rbp) ## 8-byte Spill
addb $255, -576(%rbp) ## 1-byte Folded Spill
adcq -560(%rbp), %r11 ## 8-byte Folded Reload
setb -680(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, -248(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
movq %rax, %r9
movq -544(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r9
mulq %rcx
movq %rdx, -560(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -888(%rbp), %r15 ## 8-byte Reload
cmpq -96(%rbp), %r15 ## 8-byte Folded Reload
setb %al
movq %rax, -424(%rbp) ## 8-byte Spill
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %r12
movq -408(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r12
mulq %rcx
movq %rdx, %r14
xorl %eax, %eax
movq -600(%rbp), %r10 ## 8-byte Reload
cmpq -848(%rbp), %r10 ## 8-byte Folded Reload
setb %al
movq %rax, -96(%rbp) ## 8-byte Spill
movq -272(%rbp), %rax ## 8-byte Reload
movq %rax, %rsi
movq -344(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rsi
mulq %rcx
movq %rdx, %r8
movq %rdx, -568(%rbp) ## 8-byte Spill
xorl %ebx, %ebx
movq -488(%rbp), %rdi ## 8-byte Reload
cmpq -712(%rbp), %rdi ## 8-byte Folded Reload
setb %bl
movq -440(%rbp), %rax ## 8-byte Reload
movq %rax, %r11
movq -176(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r11
mulq %rcx
movq %rdx, -576(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -448(%rbp), %rax ## 8-byte Reload
cmpq -296(%rbp), %rax ## 8-byte Folded Reload
setb %cl
addq %rax, %r11
adcq %rdx, %rcx
movq %rcx, -296(%rbp) ## 8-byte Spill
addq %rdi, %rsi
movq %rsi, -488(%rbp) ## 8-byte Spill
adcq %r8, %rbx
movq %rbx, -640(%rbp) ## 8-byte Spill
addq %r10, %r12
movq %r12, -712(%rbp) ## 8-byte Spill
movq -96(%rbp), %r10 ## 8-byte Reload
adcq %r14, %r10
addq %r15, %r9
movq %r9, -696(%rbp) ## 8-byte Spill
movq -424(%rbp), %rbx ## 8-byte Reload
movq -560(%rbp), %r8 ## 8-byte Reload
adcq %r8, %rbx
movq %rbx, -424(%rbp) ## 8-byte Spill
movb -336(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq -248(%rbp), %r9 ## 8-byte Folded Reload
movq %r9, -720(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
cmpq %rax, -288(%rbp) ## 8-byte Folded Reload
movq %rax, %rdx
adcq -264(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -688(%rbp) ## 8-byte Spill
movb -400(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %r9, %rdx
movq %rdx, -472(%rbp) ## 8-byte Spill
movb -80(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rdx, %r12
movq %r12, -384(%rbp) ## 8-byte Spill
movq -184(%rbp), %rax ## 8-byte Reload
cmpq %rax, -144(%rbp) ## 8-byte Folded Reload
movq %rax, %rdx
adcq -48(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -704(%rbp) ## 8-byte Spill
movb -392(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %r12, %rdx
movq %rdx, -600(%rbp) ## 8-byte Spill
movb -328(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rdx, %rsi
movq %rsi, -448(%rbp) ## 8-byte Spill
movq -624(%rbp), %rax ## 8-byte Reload
cmpq %rax, -496(%rbp) ## 8-byte Folded Reload
movq %rax, %rdx
adcq -432(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -464(%rbp) ## 8-byte Spill
movb -536(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %rdx, %rax
adcq %rsi, %rax
addb $255, -120(%rbp) ## 1-byte Folded Spill
movq -480(%rbp), %rcx ## 8-byte Reload
adcq -320(%rbp), %rcx ## 8-byte Folded Reload
setb %dl
movq %r11, %rcx
adcq %rax, %rcx
movq %rcx, -88(%rbp) ## 8-byte Spill
addb $255, %dl
adcq %r11, %rax
setb -152(%rbp) ## 1-byte Folded Spill
movq -352(%rbp), %rax ## 8-byte Reload
movq -160(%rbp), %rsi ## 8-byte Reload
cmpq %rax, %rsi
movq %rax, %rdx
adcq -520(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -312(%rbp) ## 8-byte Spill
addb $255, -224(%rbp) ## 1-byte Folded Spill
adcq %rsi, -528(%rbp) ## 8-byte Folded Spill
setb -96(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %rdx, %rax
setb -320(%rbp) ## 1-byte Folded Spill
addb $255, -512(%rbp) ## 1-byte Folded Spill
movq -200(%rbp), %rax ## 8-byte Reload
adcq %rax, -256(%rbp) ## 8-byte Folded Spill
movq -360(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -304(%rbp) ## 1-byte Folded Spill
movq -216(%rbp), %rcx ## 8-byte Reload
adcq -72(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %rax
movq %rax, %rcx
movq %rax, -360(%rbp) ## 8-byte Spill
movq -648(%rbp), %rdx ## 8-byte Reload
cmpq -456(%rbp), %rdx ## 8-byte Folded Reload
movq -104(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -104(%rbp) ## 8-byte Spill
addb $255, -608(%rbp) ## 1-byte Folded Spill
adcq -664(%rbp), %rdx ## 8-byte Folded Reload
setb -528(%rbp) ## 1-byte Folded Spill
movq %rax, %rdx
adcq %rcx, %rdx
movq %rdx, %rsi
movq %rdx, -160(%rbp) ## 8-byte Spill
movq -368(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -128(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, %rdi
movq %rcx, -456(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -480(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -168(%rbp), %rcx ## 8-byte Reload
cmpq -656(%rbp), %rcx ## 8-byte Folded Reload
setb %al
movq %rax, -128(%rbp) ## 8-byte Spill
addq %rdi, %rcx
adcq %rdx, %rax
movq %rax, -224(%rbp) ## 8-byte Spill
addb $255, -232(%rbp) ## 1-byte Folded Spill
adcq -592(%rbp), %r13 ## 8-byte Folded Reload
setb -304(%rbp) ## 1-byte Folded Spill
adcq %rsi, %rcx
movq %rcx, -496(%rbp) ## 8-byte Spill
movq -416(%rbp), %rax ## 8-byte Reload
movq -672(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -64(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -592(%rbp) ## 8-byte Spill
addb $255, -680(%rbp) ## 1-byte Folded Spill
adcq -584(%rbp), %rdx ## 8-byte Folded Reload
setb -216(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, -200(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq %rax, %r13
movq -544(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r13
mulq %rcx
movq %rdx, -232(%rbp) ## 8-byte Spill
xorl %r9d, %r9d
cmpq %r8, %rbx
setb %r9b
movq -376(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -408(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %r11
mulq %rcx
movq %rdx, %r12
movq %rdx, -672(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq %r10, %r15
cmpq %r14, %r10
setb %al
movq %rax, %rbx
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -344(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rsi
mulq %rcx
movq %rdx, %r14
movq %rdx, -288(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -640(%rbp), %r8 ## 8-byte Reload
cmpq -568(%rbp), %r8 ## 8-byte Folded Reload
setb %al
movq %rax, %rdi
movq -272(%rbp), %rax ## 8-byte Reload
movq %rax, %r10
movq -176(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r10
mulq %rcx
movq %rdx, -120(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -296(%rbp), %rcx ## 8-byte Reload
cmpq -576(%rbp), %rcx ## 8-byte Folded Reload
setb %al
addq %rcx, %r10
adcq %rdx, %rax
movq %rax, -72(%rbp) ## 8-byte Spill
addq %r8, %rsi
movq %rsi, -608(%rbp) ## 8-byte Spill
adcq %r14, %rdi
movq %rdi, -144(%rbp) ## 8-byte Spill
addq %r15, %r11
movq %r11, -296(%rbp) ## 8-byte Spill
adcq %r12, %rbx
movq %rbx, -664(%rbp) ## 8-byte Spill
addq -424(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -648(%rbp) ## 8-byte Spill
adcq -232(%rbp), %r9 ## 8-byte Folded Reload
movq %r9, -512(%rbp) ## 8-byte Spill
addb $255, -336(%rbp) ## 1-byte Folded Spill
movq -696(%rbp), %rax ## 8-byte Reload
adcq -248(%rbp), %rax ## 8-byte Folded Reload
setb -640(%rbp) ## 1-byte Folded Spill
movq %r13, %rax
adcq -200(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -776(%rbp) ## 8-byte Spill
movq -192(%rbp), %r13 ## 8-byte Reload
movq -688(%rbp), %rcx ## 8-byte Reload
cmpq %r13, %rcx
adcq -264(%rbp), %r13 ## 8-byte Folded Reload
addb $255, -400(%rbp) ## 1-byte Folded Spill
adcq -720(%rbp), %rcx ## 8-byte Folded Reload
setb -560(%rbp) ## 1-byte Folded Spill
movq %r13, %rcx
adcq %rax, %rcx
movq %rcx, -720(%rbp) ## 8-byte Spill
addb $255, -80(%rbp) ## 1-byte Folded Spill
movq -472(%rbp), %rax ## 8-byte Reload
adcq -712(%rbp), %rax ## 8-byte Folded Reload
setb -424(%rbp) ## 1-byte Folded Spill
movq %r11, %rax
adcq %rcx, %rax
movq %rax, %rcx
movq %rax, -472(%rbp) ## 8-byte Spill
movq -184(%rbp), %rax ## 8-byte Reload
movq -704(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -48(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -696(%rbp) ## 8-byte Spill
addb $255, -392(%rbp) ## 1-byte Folded Spill
adcq -384(%rbp), %rdx ## 8-byte Folded Reload
setb -384(%rbp) ## 1-byte Folded Spill
movq %rax, %rdi
adcq %rcx, %rdi
movq %rdi, -712(%rbp) ## 8-byte Spill
addb $255, -328(%rbp) ## 1-byte Folded Spill
movq -600(%rbp), %rax ## 8-byte Reload
adcq -488(%rbp), %rax ## 8-byte Folded Reload
setb -600(%rbp) ## 1-byte Folded Spill
movq %rsi, %rax
adcq %rdi, %rax
movq %rax, %rcx
movq %rax, -704(%rbp) ## 8-byte Spill
movq -624(%rbp), %rax ## 8-byte Reload
movq -464(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
movq %rax, %rbx
adcq -432(%rbp), %rbx ## 8-byte Folded Reload
movq %rbx, -688(%rbp) ## 8-byte Spill
addb $255, -536(%rbp) ## 1-byte Folded Spill
adcq -448(%rbp), %rdx ## 8-byte Folded Reload
movq -896(%rbp), %rax ## 8-byte Reload
movq 44(%rax), %rsi
setb -464(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rbx
movq -280(%rbp), %r8 ## 8-byte Reload
movq %r8, %rcx
imulq %rsi, %rcx
movq -728(%rbp), %rax ## 8-byte Reload
movq %rax, %r14
imulq %rsi, %r14
mulq %rsi
movq %rdx, %r9
movq -792(%rbp), %r15 ## 8-byte Reload
movq %r15, %rax
addq %r14, %rax
leaq (%rcx,%rdx), %rax
movq -784(%rbp), %r11 ## 8-byte Reload
adcq %r11, %rax
movq %rax, -728(%rbp) ## 8-byte Spill
movb -152(%rbp), %dl ## 1-byte Reload
movl %edx, %eax
addb $255, %al
movq %r10, %rax
adcq %rbx, %rax
movq %rax, -448(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
movq -360(%rbp), %rax ## 8-byte Reload
cmpq -504(%rbp), %rax ## 8-byte Folded Reload
setb %r12b
movq -128(%rbp), %rax ## 8-byte Reload
addq -480(%rbp), %rax ## 8-byte Folded Reload
movq -168(%rbp), %rdi ## 8-byte Reload
addq %rdi, -456(%rbp) ## 8-byte Folded Spill
adcq %rax, %r12
addb $255, %dl
adcq %r10, %rbx
setb -128(%rbp) ## 1-byte Folded Spill
movq %r8, %rax
mulq %rsi
movq %rdx, %rdi
movq -208(%rbp), %r8 ## 8-byte Reload
movq %r8, %rbx
imulq %rsi, %rbx
addq %r9, %rcx
adcq %rbx, %rdi
addq %r15, %r14
movq %r14, -336(%rbp) ## 8-byte Spill
adcq %r11, %rcx
setb %r11b
movq %rdi, %rax
movq -736(%rbp), %r14 ## 8-byte Reload
adcq %r14, %rax
movq %rax, -584(%rbp) ## 8-byte Spill
movq -112(%rbp), %rax ## 8-byte Reload
imulq %rsi, %rax
movq %rax, %r9
movq -552(%rbp), %rax ## 8-byte Reload
movq %rax, %r10
mulq %rsi
movq %rdx, %rcx
movq %rdx, -568(%rbp) ## 8-byte Spill
movq %r8, %rax
mulq %rsi
movq %rsi, -256(%rbp) ## 8-byte Spill
imulq %rsi, %r10
movq %r10, -656(%rbp) ## 8-byte Spill
cmpq %rbx, %rdi
movq %rdx, %rax
adcq $0, %rax
movq %rax, -576(%rbp) ## 8-byte Spill
addq %r10, %rax
movq %rax, %rdx
movq %rax, -552(%rbp) ## 8-byte Spill
adcq %rcx, %r9
movq %r9, -488(%rbp) ## 8-byte Spill
setb -680(%rbp) ## 1-byte Folded Spill
addb $255, %r11b
adcq %r14, %rdi
setb -208(%rbp) ## 1-byte Folded Spill
movq -616(%rbp), %r14 ## 8-byte Reload
movq %r14, %rax
adcq %rdx, %rax
setb -480(%rbp) ## 1-byte Folded Spill
movq %r9, %rax
adcq -240(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -152(%rbp) ## 8-byte Spill
addb $255, -528(%rbp) ## 1-byte Folded Spill
movq -360(%rbp), %rax ## 8-byte Reload
adcq %rax, -104(%rbp) ## 8-byte Folded Spill
adcq $0, %r12
addb $255, -304(%rbp) ## 1-byte Folded Spill
movq -160(%rbp), %rax ## 8-byte Reload
adcq %rax, -456(%rbp) ## 8-byte Folded Spill
adcq $0, %r12
movq %r12, -248(%rbp) ## 8-byte Spill
movq -592(%rbp), %rcx ## 8-byte Reload
cmpq -416(%rbp), %rcx ## 8-byte Folded Reload
movq -64(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -64(%rbp) ## 8-byte Spill
addb $255, -216(%rbp) ## 1-byte Folded Spill
adcq -496(%rbp), %rcx ## 8-byte Folded Reload
setb -400(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %r12, %rcx
movq %rcx, -168(%rbp) ## 8-byte Spill
movq -368(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -544(%rbp), %rdi ## 8-byte Reload
imulq %rdi, %rdx
movq %rdx, %rsi
movq %rdx, -416(%rbp) ## 8-byte Spill
mulq %rdi
movq %rdx, -592(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -512(%rbp), %rax ## 8-byte Reload
cmpq -232(%rbp), %rax ## 8-byte Folded Reload
setb %dil
movq %rdi, -104(%rbp) ## 8-byte Spill
movq %rax, %rbx
addq %rsi, %rbx
movq %rdi, %rax
adcq %rdx, %rax
movq %rax, -544(%rbp) ## 8-byte Spill
addb $255, -640(%rbp) ## 1-byte Folded Spill
movq -648(%rbp), %rax ## 8-byte Reload
adcq -200(%rbp), %rax ## 8-byte Folded Reload
setb -896(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rbx
movq %rbx, -160(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r13
adcq -264(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -216(%rbp) ## 8-byte Spill
addb $255, -560(%rbp) ## 1-byte Folded Spill
adcq -776(%rbp), %r13 ## 8-byte Folded Reload
setb -80(%rbp) ## 1-byte Folded Spill
adcq %rbx, %rax
movq %rax, %r15
movq %rax, -496(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq %rax, %rbx
movq -408(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rbx
mulq %rcx
movq %rdx, %r12
movq %rdx, -792(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -664(%rbp), %r9 ## 8-byte Reload
cmpq -672(%rbp), %r9 ## 8-byte Folded Reload
setb %al
movq %rax, %rsi
movq -376(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -344(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %r11
mulq %rcx
movq %rdx, %r13
movq %rdx, -536(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -144(%rbp), %r8 ## 8-byte Reload
cmpq -288(%rbp), %r8 ## 8-byte Folded Reload
setb %al
movq %rax, %rcx
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %r10
movq -176(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %r10
mulq %rdx
movq %rdx, -232(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -72(%rbp), %rdi ## 8-byte Reload
cmpq -120(%rbp), %rdi ## 8-byte Folded Reload
setb %al
addq %rdi, %r10
adcq %rdx, %rax
movq %rax, -392(%rbp) ## 8-byte Spill
movq %r11, %rdi
addq %r8, %rdi
movq %rdi, -328(%rbp) ## 8-byte Spill
adcq %r13, %rcx
movq %rcx, -120(%rbp) ## 8-byte Spill
addq %r9, %rbx
movq %rbx, -288(%rbp) ## 8-byte Spill
adcq %r12, %rsi
movq %rsi, -456(%rbp) ## 8-byte Spill
addb $255, -424(%rbp) ## 1-byte Folded Spill
movq -720(%rbp), %rax ## 8-byte Reload
adcq -296(%rbp), %rax ## 8-byte Folded Reload
setb -144(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %r15, %rax
movq %rax, %rdx
movq %rax, -736(%rbp) ## 8-byte Spill
movq -184(%rbp), %rax ## 8-byte Reload
movq -696(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -48(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -784(%rbp) ## 8-byte Spill
addb $255, -384(%rbp) ## 1-byte Folded Spill
adcq -472(%rbp), %rcx ## 8-byte Folded Reload
setb -384(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rdx, %rcx
movq %rcx, -72(%rbp) ## 8-byte Spill
addb $255, -600(%rbp) ## 1-byte Folded Spill
movq -712(%rbp), %rax ## 8-byte Reload
adcq -608(%rbp), %rax ## 8-byte Folded Reload
setb -504(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq %rcx, %rax
movq %rax, %rcx
movq %rax, -304(%rbp) ## 8-byte Spill
movq -624(%rbp), %rax ## 8-byte Reload
movq -688(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
movq %rax, %rsi
adcq -432(%rbp), %rsi ## 8-byte Folded Reload
movq %rsi, -296(%rbp) ## 8-byte Spill
addb $255, -464(%rbp) ## 1-byte Folded Spill
adcq -704(%rbp), %rdx ## 8-byte Folded Reload
setb -528(%rbp) ## 1-byte Folded Spill
movq %rsi, %r12
adcq %rcx, %r12
movb -128(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %r12, %rax
adcq %r10, %rax
setb -200(%rbp) ## 1-byte Folded Spill
addb $255, -208(%rbp) ## 1-byte Folded Spill
movq -552(%rbp), %rbx ## 8-byte Reload
adcq %r14, %rbx
movq %rbx, -552(%rbp) ## 8-byte Spill
movq -336(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
imulq -800(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rsi
movq %rax, -280(%rbp) ## 8-byte Spill
movq %rcx, %r9
shlq $32, %r9
movq %rcx, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -208(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rcx, %r9
setb %al
movq -728(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rcx
addq %rdx, %rcx
movq %rcx, -360(%rbp) ## 8-byte Spill
movl $0, %edx
setb %dl
addq %rax, %rdi
adcq -584(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -424(%rbp) ## 8-byte Spill
setb -616(%rbp) ## 1-byte Folded Spill
movq %rsi, %rax
adcq %rbx, %rax
movq %rax, -728(%rbp) ## 8-byte Spill
movq -656(%rbp), %rax ## 8-byte Reload
addq %rax, -576(%rbp) ## 8-byte Folded Spill
adcq $0, -568(%rbp) ## 8-byte Folded Spill
setb %bl
addb $255, -96(%rbp) ## 1-byte Folded Spill
movq -88(%rbp), %r8 ## 8-byte Reload
movq -312(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %r8
movq -112(%rbp), %rax ## 8-byte Reload
movq -256(%rbp), %r14 ## 8-byte Reload
mulq %r14
movq %rdx, -112(%rbp) ## 8-byte Spill
addb $255, -680(%rbp) ## 1-byte Folded Spill
movzbl %bl, %r13d
adcq %rdx, %r13
movq -352(%rbp), %r11 ## 8-byte Reload
cmpq %r11, %rdi
movq %r11, %rax
movq -520(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %rax
movb -320(%rbp), %bl ## 1-byte Reload
movl %ebx, %edx
addb $255, %dl
movq %rax, %rcx
movq -448(%rbp), %r15 ## 8-byte Reload
adcq %r15, %rcx
movq %rcx, -96(%rbp) ## 8-byte Spill
xorl %esi, %esi
movq -248(%rbp), %rdx ## 8-byte Reload
cmpq -224(%rbp), %rdx ## 8-byte Folded Reload
setb %sil
movq -104(%rbp), %rcx ## 8-byte Reload
addq -592(%rbp), %rcx ## 8-byte Folded Reload
movq -512(%rbp), %rdx ## 8-byte Reload
addq %rdx, -416(%rbp) ## 8-byte Folded Spill
adcq %rcx, %rsi
movq %rsi, -104(%rbp) ## 8-byte Spill
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq %r10, %r12
movq %r11, %rcx
cmpq %r11, %rax
adcq %rdi, %rcx
addb $255, %bl
adcq %r15, %rax
setb %r10b
movq %rcx, %rax
movq %rcx, -312(%rbp) ## 8-byte Spill
adcq %r12, %rax
movq %rax, -128(%rbp) ## 8-byte Spill
movq -632(%rbp), %rax ## 8-byte Reload
movq %rax, %rbx
movq %r14, %rsi
imulq %r14, %rbx
addb $255, -480(%rbp) ## 1-byte Folded Spill
movq -488(%rbp), %rdi ## 8-byte Reload
adcq -240(%rbp), %rdi ## 8-byte Folded Reload
setb %r11b
leaq (%r13,%rbx), %rdx
adcq %r8, %rdx
movq %rdx, -240(%rbp) ## 8-byte Spill
movq %r8, %r14
addb $255, %r10b
adcq %rcx, %r12
setb -320(%rbp) ## 1-byte Folded Spill
setb -88(%rbp) ## 1-byte Folded Spill
mulq %rsi
movq %rdx, %r8
xorl %edi, %edi
cmpq -112(%rbp), %r13 ## 8-byte Folded Reload
setb %dil
addq %r13, %rbx
adcq %rdx, %rdi
addb $255, %r11b
adcq %r14, %rbx
setb %r11b
movq -336(%rbp), %r14 ## 8-byte Reload
subq %r14, %r9
addb $255, -616(%rbp) ## 1-byte Folded Spill
movq -280(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -552(%rbp) ## 8-byte Folded Spill
setb %r12b
movq -208(%rbp), %r15 ## 8-byte Reload
leaq (%r9,%r15), %rax
movq -152(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rax
movq %rax, -552(%rbp) ## 8-byte Spill
movq -440(%rbp), %rax ## 8-byte Reload
movq %rax, %rsi
movq -256(%rbp), %rbx ## 8-byte Reload
imulq %rbx, %rsi
mulq %rbx
movq %rdx, -224(%rbp) ## 8-byte Spill
xorl %r13d, %r13d
cmpq %r8, %rdi
setb %r13b
addq %rdi, %rsi
adcq %rdx, %r13
movl %r11d, %eax
addb $255, %al
movq %rsi, %rax
movq -96(%rbp), %rdx ## 8-byte Reload
adcq %rdx, %rax
movq %rax, %rdi
movq %rax, -448(%rbp) ## 8-byte Spill
movq -272(%rbp), %rax ## 8-byte Reload
imulq %rbx, %rax
movq %rax, -112(%rbp) ## 8-byte Spill
addb $255, %r11b
adcq %rdx, %rsi
setb -488(%rbp) ## 1-byte Folded Spill
leaq (%r13,%rax), %rax
adcq -128(%rbp), %rax ## 8-byte Folded Reload
setb -512(%rbp) ## 1-byte Folded Spill
setb -96(%rbp) ## 1-byte Folded Spill
movq %r14, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
addq %r15, %r9
adcq %rcx, %rdx
cmpq %rcx, %rdx
movq %rcx, %r14
adcq %r15, %r14
addb $255, %r12b
adcq %r10, %r9
setb -440(%rbp) ## 1-byte Folded Spill
movq -240(%rbp), %rax ## 8-byte Reload
adcq %rdx, %rax
movq %rdx, %r12
setb -616(%rbp) ## 1-byte Folded Spill
movq %r14, %rax
adcq %rdi, %rax
movq %rax, -632(%rbp) ## 8-byte Spill
addb $255, -400(%rbp) ## 1-byte Folded Spill
movq -248(%rbp), %rax ## 8-byte Reload
adcq %rax, -64(%rbp) ## 8-byte Folded Spill
movq -104(%rbp), %r9 ## 8-byte Reload
adcq $0, %r9
addb $255, -896(%rbp) ## 1-byte Folded Spill
movq -168(%rbp), %rax ## 8-byte Reload
adcq %rax, -416(%rbp) ## 8-byte Folded Spill
adcq $0, %r9
movq %r9, -104(%rbp) ## 8-byte Spill
movq -216(%rbp), %rax ## 8-byte Reload
cmpq -192(%rbp), %rax ## 8-byte Folded Reload
movq -264(%rbp), %rcx ## 8-byte Reload
adcq $0, %rcx
movq %rcx, -264(%rbp) ## 8-byte Spill
addb $255, -80(%rbp) ## 1-byte Folded Spill
adcq -160(%rbp), %rax ## 8-byte Folded Reload
setb -416(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %r9, %rax
movq %rax, %rsi
movq %rax, -160(%rbp) ## 8-byte Spill
movq -368(%rbp), %rax ## 8-byte Reload
movq %rax, %r15
movq -408(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r15
mulq %rcx
movq %rdx, -480(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -456(%rbp), %rax ## 8-byte Reload
cmpq -792(%rbp), %rax ## 8-byte Folded Reload
setb %cl
movq %rax, %rdi
addq %r15, %rdi
movq %rcx, %rax
adcq %rdx, %rax
movq %rax, -168(%rbp) ## 8-byte Spill
addb $255, -144(%rbp) ## 1-byte Folded Spill
movq -496(%rbp), %rax ## 8-byte Reload
adcq %rax, -288(%rbp) ## 8-byte Folded Spill
setb -80(%rbp) ## 1-byte Folded Spill
adcq %rsi, %rdi
movq %rdi, -336(%rbp) ## 8-byte Spill
movq -184(%rbp), %rax ## 8-byte Reload
movq -784(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -48(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -400(%rbp) ## 8-byte Spill
addb $255, -384(%rbp) ## 1-byte Folded Spill
adcq -736(%rbp), %rdx ## 8-byte Folded Reload
setb -192(%rbp) ## 1-byte Folded Spill
adcq %rdi, %rax
movq %rax, %r8
movq %rax, -216(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq %rax, %r9
movq -344(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %r9
mulq %rdx
movq %rdx, %rdi
movq %rdx, -152(%rbp) ## 8-byte Spill
xorl %r11d, %r11d
movq -120(%rbp), %r10 ## 8-byte Reload
cmpq -536(%rbp), %r10 ## 8-byte Folded Reload
setb %r11b
movq -376(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -176(%rbp), %rsi ## 8-byte Reload
imulq %rsi, %rdx
movq %rdx, %rbx
mulq %rsi
movq %rdx, -64(%rbp) ## 8-byte Spill
xorl %esi, %esi
movq -392(%rbp), %rax ## 8-byte Reload
cmpq -232(%rbp), %rax ## 8-byte Folded Reload
setb %sil
addq %rax, %rbx
movq %rbx, -232(%rbp) ## 8-byte Spill
adcq %rdx, %rsi
movq %rsi, -408(%rbp) ## 8-byte Spill
addq %r10, %r9
movq %r9, -536(%rbp) ## 8-byte Spill
adcq %rdi, %r11
addb $255, -504(%rbp) ## 1-byte Folded Spill
movq -72(%rbp), %rax ## 8-byte Reload
adcq -328(%rbp), %rax ## 8-byte Folded Reload
setb -328(%rbp) ## 1-byte Folded Spill
movq %r9, %rax
adcq %r8, %rax
movq %rax, %rdi
movq %rax, -120(%rbp) ## 8-byte Spill
movq -624(%rbp), %rax ## 8-byte Reload
movq -296(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -432(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -72(%rbp) ## 8-byte Spill
addb $255, -528(%rbp) ## 1-byte Folded Spill
adcq -304(%rbp), %rdx ## 8-byte Folded Reload
setb -504(%rbp) ## 1-byte Folded Spill
movq %rax, %rsi
adcq %rdi, %rsi
movq %rsi, -392(%rbp) ## 8-byte Spill
movb -200(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %rbx, %r9
adcq %rsi, %r9
movq -352(%rbp), %rdi ## 8-byte Reload
cmpq %rdi, -312(%rbp) ## 8-byte Folded Reload
adcq -520(%rbp), %rdi ## 8-byte Folded Reload
addb $255, -88(%rbp) ## 1-byte Folded Spill
movq %r9, %rax
adcq %rdi, %rax
setb -248(%rbp) ## 1-byte Folded Spill
setb -896(%rbp) ## 1-byte Folded Spill
addb $255, -440(%rbp) ## 1-byte Folded Spill
adcq -240(%rbp), %r12 ## 8-byte Folded Reload
movq %r12, -440(%rbp) ## 8-byte Spill
movq -272(%rbp), %rax ## 8-byte Reload
movq -256(%rbp), %rbx ## 8-byte Reload
mulq %rbx
movq %rdx, %r8
xorl %esi, %esi
cmpq -224(%rbp), %r13 ## 8-byte Folded Reload
setb %sil
movq -112(%rbp), %rax ## 8-byte Reload
addq %r13, %rax
adcq %rdx, %rsi
addb $255, -488(%rbp) ## 1-byte Folded Spill
adcq -128(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rdx
movq %rax, -112(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
movq -104(%rbp), %r10 ## 8-byte Reload
cmpq -544(%rbp), %r10 ## 8-byte Folded Reload
setb %r12b
addq -480(%rbp), %rcx ## 8-byte Folded Reload
addq -456(%rbp), %r15 ## 8-byte Folded Reload
adcq %rcx, %r12
addb $255, -320(%rbp) ## 1-byte Folded Spill
adcq %rdi, %r9
movq -280(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r14
adcq -208(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -240(%rbp) ## 8-byte Spill
addb $255, -616(%rbp) ## 1-byte Folded Spill
adcq -448(%rbp), %r14 ## 8-byte Folded Reload
setb -128(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, -272(%rbp) ## 8-byte Spill
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %r14
imulq %rbx, %r14
mulq %rbx
movq %rdx, -456(%rbp) ## 8-byte Spill
xorl %r13d, %r13d
cmpq %r8, %rsi
setb %r13b
addq %rsi, %r14
adcq %rdx, %r13
addb $255, -96(%rbp) ## 1-byte Folded Spill
movq %r14, %rax
adcq %r9, %rax
movq %rax, -136(%rbp) ## 8-byte Spill
movq %r9, %rbx
addb $255, -416(%rbp) ## 1-byte Folded Spill
adcq %r10, -264(%rbp) ## 8-byte Folded Spill
adcq $0, %r12
addb $255, -80(%rbp) ## 1-byte Folded Spill
adcq -160(%rbp), %r15 ## 8-byte Folded Reload
adcq $0, %r12
xorl %eax, %eax
cmpq -168(%rbp), %r12 ## 8-byte Folded Reload
setb %al
movq %rax, %rsi
movq -368(%rbp), %r10 ## 8-byte Reload
movq %r10, %rax
movq -344(%rbp), %rcx ## 8-byte Reload
mulq %rcx
imulq %rcx, %r10
xorl %ecx, %ecx
cmpq -152(%rbp), %r11 ## 8-byte Folded Reload
setb %cl
movq %r11, %rax
addq %r10, %rax
leaq (%rcx,%rdx), %rax
adcq %rdx, %rcx
movq %rcx, -416(%rbp) ## 8-byte Spill
addq %r11, %r10
adcq %rax, %rsi
movq %rsi, -544(%rbp) ## 8-byte Spill
movq -400(%rbp), %rcx ## 8-byte Reload
cmpq -184(%rbp), %rcx ## 8-byte Folded Reload
movq -48(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -48(%rbp) ## 8-byte Spill
addb $255, -192(%rbp) ## 1-byte Folded Spill
adcq -336(%rbp), %rcx ## 8-byte Folded Reload
setb -184(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %r12, %rcx
movq %rcx, -336(%rbp) ## 8-byte Spill
addb $255, -328(%rbp) ## 1-byte Folded Spill
movq -216(%rbp), %rax ## 8-byte Reload
adcq %rax, -536(%rbp) ## 8-byte Folded Spill
setb -344(%rbp) ## 1-byte Folded Spill
movq %r10, %rax
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -168(%rbp) ## 8-byte Spill
movq -624(%rbp), %r15 ## 8-byte Reload
movq -72(%rbp), %rcx ## 8-byte Reload
cmpq %r15, %rcx
adcq -432(%rbp), %r15 ## 8-byte Folded Reload
addb $255, -504(%rbp) ## 1-byte Folded Spill
adcq -120(%rbp), %rcx ## 8-byte Folded Reload
setb -400(%rbp) ## 1-byte Folded Spill
movq %r15, %rax
adcq %rdx, %rax
movq %rax, %rcx
movq %rax, -536(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq %rax, %r9
movq -176(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %r9
mulq %rdx
movq %rdx, -328(%rbp) ## 8-byte Spill
xorl %esi, %esi
movq -408(%rbp), %rax ## 8-byte Reload
cmpq -64(%rbp), %rax ## 8-byte Folded Reload
setb %sil
addq %rax, %r9
adcq %rdx, %rsi
movq %rsi, -192(%rbp) ## 8-byte Spill
addb $255, -200(%rbp) ## 1-byte Folded Spill
movq -392(%rbp), %rax ## 8-byte Reload
adcq -232(%rbp), %rax ## 8-byte Folded Reload
setb -408(%rbp) ## 1-byte Folded Spill
movq %r9, %rdx
adcq %rcx, %rdx
movq -352(%rbp), %rax ## 8-byte Reload
cmpq %rax, %rdi
adcq -520(%rbp), %rax ## 8-byte Folded Reload
addb $255, -896(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
movq %rax, -232(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq -376(%rbp), %r8 ## 8-byte Reload
movq %r8, %rsi
movq -256(%rbp), %rdi ## 8-byte Reload
imulq %rdi, %rsi
addb $255, -512(%rbp) ## 1-byte Folded Spill
adcq %rbx, %r14
setb %r11b
leaq (%r13,%rsi), %rbx
adcq %rcx, %rbx
movq %rbx, -64(%rbp) ## 8-byte Spill
addb $255, -248(%rbp) ## 1-byte Folded Spill
adcq %rax, %rdx
setb -104(%rbp) ## 1-byte Folded Spill
movq %r8, %rax
mulq %rdi
movq %rdx, -248(%rbp) ## 8-byte Spill
xorl %edi, %edi
cmpq -456(%rbp), %r13 ## 8-byte Folded Reload
setb %dil
addq %r13, %rsi
adcq %rdx, %rdi
addb $255, %r11b
adcq %rcx, %rsi
setb -200(%rbp) ## 1-byte Folded Spill
setb -376(%rbp) ## 1-byte Folded Spill
movq -280(%rbp), %r11 ## 8-byte Reload
movq -240(%rbp), %rax ## 8-byte Reload
cmpq %r11, %rax
movq %r11, %rbx
movq -208(%rbp), %rdx ## 8-byte Reload
adcq %rdx, %rbx
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq -112(%rbp), %rax ## 8-byte Folded Reload
setb %cl
movq %rbx, %rax
movq -136(%rbp), %r14 ## 8-byte Reload
adcq %r14, %rax
movq %rax, -264(%rbp) ## 8-byte Spill
cmpq %r11, %rbx
adcq %rdx, %r11
addb $255, %cl
adcq %r14, %rbx
setb -112(%rbp) ## 1-byte Folded Spill
movq %r11, %rax
adcq -64(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -136(%rbp) ## 8-byte Spill
addb $255, -184(%rbp) ## 1-byte Folded Spill
adcq %r12, -48(%rbp) ## 8-byte Folded Spill
movq -544(%rbp), %r13 ## 8-byte Reload
adcq $0, %r13
addb $255, -344(%rbp) ## 1-byte Folded Spill
adcq %r10, -336(%rbp) ## 8-byte Folded Spill
adcq $0, %r13
cmpq -624(%rbp), %r15 ## 8-byte Folded Reload
movq -432(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -432(%rbp) ## 8-byte Spill
addb $255, -400(%rbp) ## 1-byte Folded Spill
adcq -168(%rbp), %r15 ## 8-byte Folded Reload
setb -48(%rbp) ## 1-byte Folded Spill
movq %rax, %r15
adcq %r13, %r15
movq -368(%rbp), %rax ## 8-byte Reload
movq %rax, %r8
movq -176(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r8
mulq %rcx
movq %rdx, -240(%rbp) ## 8-byte Spill
xorl %esi, %esi
movq -192(%rbp), %r10 ## 8-byte Reload
cmpq -328(%rbp), %r10 ## 8-byte Folded Reload
setb %sil
addq %r8, %r10
movq %rsi, %rax
adcq %rdx, %rax
movq %rax, -176(%rbp) ## 8-byte Spill
addb $255, -408(%rbp) ## 1-byte Folded Spill
adcq -536(%rbp), %r9 ## 8-byte Folded Reload
setb -128(%rbp) ## 1-byte Folded Spill
adcq %r15, %r10
movq -352(%rbp), %r14 ## 8-byte Reload
cmpq %r14, -232(%rbp) ## 8-byte Folded Reload
adcq -520(%rbp), %r14 ## 8-byte Folded Reload
movb -104(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %r14, %rbx
adcq %r10, %rbx
movq %rbx, -184(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq %rax, %r9
movq -256(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r9
mulq %rcx
movq %rdx, -56(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
cmpq -248(%rbp), %rdi ## 8-byte Folded Reload
setb %r12b
addq %rdi, %r9
adcq %rdx, %r12
addb $255, -376(%rbp) ## 1-byte Folded Spill
movq %r9, %rax
adcq %rbx, %rax
movq %rax, %rdx
xorl %edi, %edi
cmpq -416(%rbp), %r13 ## 8-byte Folded Reload
setb %dil
addq -240(%rbp), %rsi ## 8-byte Folded Reload
addq -192(%rbp), %r8 ## 8-byte Folded Reload
adcq %rsi, %rdi
movq -280(%rbp), %rbx ## 8-byte Reload
cmpq %rbx, %r11
adcq -208(%rbp), %rbx ## 8-byte Folded Reload
addb $255, -112(%rbp) ## 1-byte Folded Spill
adcq -64(%rbp), %r11 ## 8-byte Folded Reload
setb -112(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %rdx, %rax
movq %rdx, %r11
movq %rax, -376(%rbp) ## 8-byte Spill
addb $255, -48(%rbp) ## 1-byte Folded Spill
adcq %r13, -432(%rbp) ## 8-byte Folded Spill
adcq $0, %rdi
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq %r15, %r8
adcq $0, %rdi
xorl %r13d, %r13d
cmpq -176(%rbp), %rdi ## 8-byte Folded Reload
setb %r13b
movq -368(%rbp), %rax ## 8-byte Reload
movq %rax, %rsi
mulq %rcx
imulq %rcx, %rsi
xorl %r15d, %r15d
cmpq -56(%rbp), %r12 ## 8-byte Folded Reload
setb %r15b
movq %r12, %rcx
addq %rsi, %rcx
leaq (%r15,%rdx), %rcx
adcq %rdx, %r15
addq %r12, %rsi
adcq %rcx, %r13
cmpq -352(%rbp), %r14 ## 8-byte Folded Reload
movq -520(%rbp), %rcx ## 8-byte Reload
adcq $0, %rcx
addb $255, -104(%rbp) ## 1-byte Folded Spill
adcq %r10, %r14
setb %r8b
movq %rcx, %rax
movq %rcx, %r10
adcq %rdi, %rax
addb $255, -200(%rbp) ## 1-byte Folded Spill
adcq -184(%rbp), %r9 ## 8-byte Folded Reload
setb %r9b
movq %rsi, %rdx
adcq %rax, %rdx
movq -280(%rbp), %r14 ## 8-byte Reload
cmpq %r14, %rbx
movq -208(%rbp), %rcx ## 8-byte Reload
adcq %rcx, %r14
addb $255, -112(%rbp) ## 1-byte Folded Spill
adcq %r11, %rbx
setb %r11b
movq %r14, %rbx
adcq %rdx, %rbx
movq %rbx, -56(%rbp) ## 8-byte Spill
addb $255, %r8b
adcq %rdi, %r10
adcq $0, %r13
addb $255, %r9b
adcq %rsi, %rax
adcq $0, %r13
xorl %r12d, %r12d
cmpq %r15, %r13
setb %r12b
addb $255, %r11b
adcq %r14, %rdx
setb -432(%rbp) ## 1-byte Folded Spill
movq %r13, %rax
adcq $0, %rax
movq %rax, -112(%rbp) ## 8-byte Spill
movq %r12, %rax
adcq $0, %rax
movq %rax, -64(%rbp) ## 8-byte Spill
cmpq -280(%rbp), %r14 ## 8-byte Folded Reload
adcq $0, %rcx
movq %rcx, -208(%rbp) ## 8-byte Spill
movl $4294967295, %eax ## imm = 0xFFFFFFFF
cmpq %rax, -360(%rbp) ## 8-byte Folded Reload
movq -424(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rdx
sbbq $0, %rdx
movq %rdx, -280(%rbp) ## 8-byte Spill
cmpq %rdx, %rcx
movq -728(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rsi
sbbq $0, %rsi
movq %rsi, -104(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -552(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
movl $4294967295, %ebx ## imm = 0xFFFFFFFF
movl $0, %eax
sbbq %rax, %rax
cmpq %rsi, %rcx
movabsq $-4294967295, %r15 ## imm = 0xFFFFFFFF00000001
leaq (%rdx,%r15), %rcx
movq %rcx, %rdx
sbbq $0, %rdx
movq %rdx, -368(%rbp) ## 8-byte Spill
cmpq %rdx, %rcx
sbbq $0, %rax
movq -440(%rbp), %rdx ## 8-byte Reload
movq %rdx, %rcx
shrq %rcx
cmpq $2147483647, %rcx ## imm = 0x7FFFFFFF
leaq (%rdx,%r15), %rcx
leaq 1(%rax,%rcx), %rcx
movq %rcx, -256(%rbp) ## 8-byte Spill
movl $0, %esi
sbbq %rsi, %rsi
leaq (%rdx,%r15), %rax
incq %rax
cmpq %rcx, %rax
sbbq $0, %rsi
movq -632(%rbp), %rax ## 8-byte Reload
cmpq %rbx, %rax
movl $0, %r11d
sbbq %r11, %r11
addq %r15, %rax
addq %rax, %rsi
movq %rsi, -48(%rbp) ## 8-byte Spill
cmpq %rsi, %rax
sbbq $0, %r11
movq -272(%rbp), %rax ## 8-byte Reload
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
cmpq %rcx, %rax
movl $0, %ebx
sbbq %rbx, %rbx
addq %r15, %rax
addq %rax, %r11
cmpq %r11, %rax
sbbq $0, %rbx
movq -264(%rbp), %rax ## 8-byte Reload
cmpq %rcx, %rax
movl $0, %r10d
sbbq %r10, %r10
addq %r15, %rax
addq %rax, %rbx
cmpq %rbx, %rax
sbbq $0, %r10
movq -136(%rbp), %rax ## 8-byte Reload
cmpq %rcx, %rax
movl $0, %r9d
sbbq %r9, %r9
addq %r15, %rax
addq %rax, %r10
cmpq %r10, %rax
sbbq $0, %r9
movq -376(%rbp), %rax ## 8-byte Reload
cmpq %rcx, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
movl $0, %esi
sbbq %rsi, %rsi
leaq (%rax,%r15), %r8
addq %r8, %r9
cmpq %r9, %r8
sbbq $0, %rsi
movq -56(%rbp), %rax ## 8-byte Reload
cmpq %rdx, %rax
movl $0, %r14d
sbbq %r14, %r14
addq %r15, %rax
addq %rax, %rsi
cmpq %rsi, %rax
sbbq $0, %r14
movq -112(%rbp), %rcx ## 8-byte Reload
movq -208(%rbp), %r8 ## 8-byte Reload
leaq (%rcx,%r8), %rax
cmpq %rdx, %rax
leaq (%rax,%r15), %rdx
movl $0, %eax
sbbq %rax, %rax
addq %rdx, %r14
cmpq %r14, %rdx
sbbq $0, %rax
addq %r8, %rcx
movq %rcx, -112(%rbp) ## 8-byte Spill
adcq -64(%rbp), %rax ## 8-byte Folded Reload
addb $255, -432(%rbp) ## 1-byte Folded Spill
adcq %r13, %r8
adcq $0, %r12
cmpq %rax, %r12
movq -360(%rbp), %rax ## 8-byte Reload
leaq (%rax,%r15), %r15
sbbq %rdi, %rdi
movq -800(%rbp), %rcx ## 8-byte Reload
xorq %rdi, %rcx
andq %rdi, %rax
andq %rcx, %r15
orq %rax, %r15
movq -424(%rbp), %rax ## 8-byte Reload
andq %rdi, %rax
movq -280(%rbp), %rdx ## 8-byte Reload
andq %rcx, %rdx
orq %rax, %rdx
movq %rdx, -280(%rbp) ## 8-byte Spill
movq -728(%rbp), %r12 ## 8-byte Reload
andq %rdi, %r12
movq -104(%rbp), %rdx ## 8-byte Reload
andq %rcx, %rdx
orq %r12, %rdx
movq -552(%rbp), %r13 ## 8-byte Reload
andq %rdi, %r13
movq -368(%rbp), %r12 ## 8-byte Reload
andq %rcx, %r12
orq %r13, %r12
movq -440(%rbp), %rax ## 8-byte Reload
andq %rdi, %rax
movq -256(%rbp), %r13 ## 8-byte Reload
andq %rcx, %r13
orq %rax, %r13
movq -632(%rbp), %rax ## 8-byte Reload
andq %rdi, %rax
movq -48(%rbp), %r8 ## 8-byte Reload
andq %rcx, %r8
orq %rax, %r8
movq -272(%rbp), %rax ## 8-byte Reload
andq %rdi, %rax
andq %rcx, %r11
orq %rax, %r11
movq -264(%rbp), %rax ## 8-byte Reload
andq %rdi, %rax
andq %rcx, %rbx
orq %rax, %rbx
movq -136(%rbp), %rax ## 8-byte Reload
andq %rdi, %rax
andq %rcx, %r10
orq %rax, %r10
movq -376(%rbp), %rax ## 8-byte Reload
andq %rdi, %rax
andq %rcx, %r9
orq %rax, %r9
movq -56(%rbp), %rax ## 8-byte Reload
andq %rdi, %rax
andq %rcx, %rsi
orq %rax, %rsi
andq -112(%rbp), %rdi ## 8-byte Folded Reload
andq %r14, %rcx
orq %rdi, %rcx
movq %rcx, %rdi
movq -1128(%rbp), %rcx ## 8-byte Reload
movq %r15, (%rcx)
movq -280(%rbp), %rax ## 8-byte Reload
movq %rax, 4(%rcx)
movq %rdx, 8(%rcx)
movq %r12, 12(%rcx)
movq %r13, 16(%rcx)
movq %r8, 20(%rcx)
movq %r11, 24(%rcx)
movq %rbx, 28(%rcx)
movq %r10, 32(%rcx)
movq %r9, 36(%rcx)
movq %rsi, 40(%rcx)
movq %rdi, 44(%rcx)
addq $960, %rsp ## imm = 0x3C0
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _fiat_p384_square ## -- Begin function fiat_p384_square
.p2align 4, 0x90
_fiat_p384_square: ## @fiat_p384_square
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $848, %rsp ## imm = 0x350
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, -224(%rbp) ## 8-byte Spill
movq (%rdi), %rcx
movq %rcx, -240(%rbp) ## 8-byte Spill
movq 4(%rdi), %r11
movq 8(%rdi), %rbx
movq 12(%rdi), %rax
movq %rax, -440(%rbp) ## 8-byte Spill
imulq %rcx, %rax
movq %rax, -176(%rbp) ## 8-byte Spill
movq %rbx, %rax
movq %rbx, %rdi
imulq %rcx, %rdi
movq %rbx, %r13
movq %rbx, -112(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, %r9
movq %rdx, -200(%rbp) ## 8-byte Spill
movq %r11, %r15
imulq %rcx, %r15
movq %r11, %rax
mulq %rcx
movq %rdx, %r8
movq %rdx, -80(%rbp) ## 8-byte Spill
movq %rcx, %r14
movq %rcx, %rax
mulq %rcx
movq %rdx, %rbx
imulq %rcx, %r14
addq %r15, %rbx
movq %rdi, -392(%rbp) ## 8-byte Spill
movq %rdi, %rcx
adcq %r8, %rcx
cmpq %rdi, %rcx
adcq $0, %r9
movl $4294967294, %eax ## imm = 0xFFFFFFFE
incq %rax
movq %rax, -728(%rbp) ## 8-byte Spill
movq %r14, %r10
imulq %rax, %r10
movq %r10, -208(%rbp) ## 8-byte Spill
movq %r14, %rdi
shlq $32, %rdi
movl $4294967295, %edx ## imm = 0xFFFFFFFF
movq %r14, %rax
mulq %rdx
movq %rdx, -128(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r14, %rdi
movq %rdi, -104(%rbp) ## 8-byte Spill
setb %al
leaq (%rbx,%rax), %r8
xorl %r12d, %r12d
addq %rdx, %r8
setb %r12b
addq %rax, %rbx
adcq %rcx, %r12
setb -72(%rbp) ## 1-byte Folded Spill
movq -176(%rbp), %rbx ## 8-byte Reload
leaq (%r9,%rbx), %rdi
adcq %r10, %rdi
movq %rdi, -336(%rbp) ## 8-byte Spill
movq %r13, %r10
movq %r11, -120(%rbp) ## 8-byte Spill
imulq %r11, %r10
movq %r11, %rax
mulq %r11
movq %rdx, %r13
imulq %r11, %r11
addq -80(%rbp), %r11 ## 8-byte Folded Reload
adcq %r10, %r13
movq %r8, %rdx
addq %r15, %rdx
movq %r11, %rcx
adcq %r12, %rcx
movq %rcx, -184(%rbp) ## 8-byte Spill
addq %r15, %r8
adcq %r12, %r11
setb -384(%rbp) ## 1-byte Folded Spill
movq %r13, %rax
adcq %rdi, %rax
movq %rax, -192(%rbp) ## 8-byte Spill
movq -440(%rbp), %r15 ## 8-byte Reload
movq %r15, %rax
movq -240(%rbp), %rdi ## 8-byte Reload
mulq %rdi
movq %r9, %rax
addq %rbx, %rax
movq %rdx, %rax
movq %rdx, %r11
movq %rdx, -56(%rbp) ## 8-byte Spill
adcq $0, %rax
setb %r12b
movq -224(%rbp), %rax ## 8-byte Reload
movq 16(%rax), %rax
movq %rax, -136(%rbp) ## 8-byte Spill
movq %rax, %rcx
imulq %rdi, %rcx
movq %rcx, -432(%rbp) ## 8-byte Spill
mulq %rdi
movq %rdx, -248(%rbp) ## 8-byte Spill
addq %rbx, %r9
adcq %r11, %rcx
movq %rcx, %rdi
movq %rcx, -64(%rbp) ## 8-byte Spill
movzbl %r12b, %eax
adcq %rdx, %rax
movq %rax, -80(%rbp) ## 8-byte Spill
movq %r14, %rax
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
movq -104(%rbp), %rcx ## 8-byte Reload
subq %r14, %rcx
addq -128(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -104(%rbp) ## 8-byte Spill
movq -208(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdx
movq %rdx, -376(%rbp) ## 8-byte Spill
addb $255, -72(%rbp) ## 1-byte Folded Spill
adcq %rax, %r9
setb -328(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %rdi, %rax
movq %rax, %rdi
movq %rax, -168(%rbp) ## 8-byte Spill
movq %r15, %r9
movq -120(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r9
movq -112(%rbp), %r11 ## 8-byte Reload
movq %r11, %rax
mulq %rcx
movq %rdx, %r12
movq %rdx, -72(%rbp) ## 8-byte Spill
cmpq %r10, %r13
adcq $0, %r12
addb $255, -384(%rbp) ## 1-byte Folded Spill
adcq -336(%rbp), %r13 ## 8-byte Folded Reload
setb -320(%rbp) ## 1-byte Folded Spill
leaq (%r12,%r9), %rax
adcq %rdi, %rax
movq %rax, %r13
movq %rax, -88(%rbp) ## 8-byte Spill
movq %r8, %r15
imulq -728(%rbp), %r15 ## 8-byte Folded Reload
movq %r15, -216(%rbp) ## 8-byte Spill
movq %r8, %rcx
shlq $32, %rcx
movq %r8, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -336(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r8, %rcx
setb %al
movq -184(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %r14
xorl %ebx, %ebx
addq %rdx, %r14
setb %bl
addq %rax, %rdi
adcq -192(%rbp), %rbx ## 8-byte Folded Reload
setb -280(%rbp) ## 1-byte Folded Spill
adcq %r13, %r15
movq %r15, -416(%rbp) ## 8-byte Spill
imulq %r11, %r11
movq %r11, -152(%rbp) ## 8-byte Spill
addq -200(%rbp), %r10 ## 8-byte Folded Reload
movq -72(%rbp), %rdi ## 8-byte Reload
adcq %r11, %rdi
movq %rdi, -72(%rbp) ## 8-byte Spill
movq %r14, %rax
movq -392(%rbp), %rdx ## 8-byte Reload
addq %rdx, %rax
movq %r10, %rax
adcq %rbx, %rax
movq %rax, -552(%rbp) ## 8-byte Spill
addq %rdx, %r14
adcq %rbx, %r10
setb -504(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq %r15, %rax
movq %rax, -296(%rbp) ## 8-byte Spill
movq -440(%rbp), %rax ## 8-byte Reload
movq -120(%rbp), %r10 ## 8-byte Reload
mulq %r10
movq %rdx, %r13
movq %r12, %rax
addq %r9, %rax
movq %rdx, %rax
movq %rdx, -384(%rbp) ## 8-byte Spill
adcq $0, %rax
setb %r15b
movq %rcx, %rdi
subq %r8, %rdi
movq -136(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
mulq %r10
movq %rdx, %rbx
movq %rdx, -48(%rbp) ## 8-byte Spill
movq %r8, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
imulq %r10, %rcx
movq %rcx, -392(%rbp) ## 8-byte Spill
addq -336(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, %r8
movq %rdi, -160(%rbp) ## 8-byte Spill
movq -216(%rbp), %r11 ## 8-byte Reload
adcq %r11, %rdx
movq %rdx, -288(%rbp) ## 8-byte Spill
addq %r9, %r12
movq %rcx, %r10
adcq %r13, %r10
movq %r10, -496(%rbp) ## 8-byte Spill
movzbl %r15b, %eax
adcq %rbx, %rax
movq %rax, -184(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq 20(%rax), %rax
movq %rax, -200(%rbp) ## 8-byte Spill
movq %rax, %rdx
movq -240(%rbp), %rdi ## 8-byte Reload
imulq %rdi, %rdx
movq %rdx, %rbx
movq %rdx, -144(%rbp) ## 8-byte Spill
mulq %rdi
movq %rdx, -424(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -80(%rbp), %rax ## 8-byte Reload
cmpq -248(%rbp), %rax ## 8-byte Folded Reload
setb %dil
addq %rbx, %rax
movq %rax, -80(%rbp) ## 8-byte Spill
adcq %rdx, %rdi
movq %rdi, -192(%rbp) ## 8-byte Spill
addb $255, -328(%rbp) ## 1-byte Folded Spill
movq -104(%rbp), %rcx ## 8-byte Reload
adcq -64(%rbp), %rcx ## 8-byte Folded Reload
setb -96(%rbp) ## 1-byte Folded Spill
movq -376(%rbp), %rdx ## 8-byte Reload
adcq %rax, %rdx
movq %rdx, -264(%rbp) ## 8-byte Spill
addb $255, -320(%rbp) ## 1-byte Folded Spill
adcq -168(%rbp), %r12 ## 8-byte Folded Reload
setb -272(%rbp) ## 1-byte Folded Spill
adcq %rdx, %r10
movq %r10, -360(%rbp) ## 8-byte Spill
addb $255, -280(%rbp) ## 1-byte Folded Spill
adcq %r11, -88(%rbp) ## 8-byte Folded Spill
setb -368(%rbp) ## 1-byte Folded Spill
movq %r8, %rcx
adcq %r10, %rcx
movq %rcx, %rdi
movq %rcx, -648(%rbp) ## 8-byte Spill
movq -440(%rbp), %r12 ## 8-byte Reload
movq %r12, %rcx
movq -112(%rbp), %rax ## 8-byte Reload
imulq %rax, %rcx
mulq %rax
movq %rdx, %r13
movq -72(%rbp), %rax ## 8-byte Reload
cmpq -152(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %r13
addb $255, -504(%rbp) ## 1-byte Folded Spill
adcq -416(%rbp), %rax ## 8-byte Folded Reload
setb -576(%rbp) ## 1-byte Folded Spill
leaq (%r13,%rcx), %rax
movq %rcx, %r10
adcq %rdi, %rax
movq %rax, %r11
movq %rax, -640(%rbp) ## 8-byte Spill
movq %r14, %rax
imulq -728(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r15
movq %rax, -328(%rbp) ## 8-byte Spill
movq %r14, %rcx
shlq $32, %rcx
movq %r14, %rax
movl $4294967295, %r8d ## imm = 0xFFFFFFFF
mulq %r8
movq %rdx, -64(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r14, %rcx
movq %rcx, -72(%rbp) ## 8-byte Spill
setb %al
movq -552(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rax), %rdi
xorl %ebx, %ebx
addq %rdx, %rdi
setb %bl
addq %rax, %rcx
adcq -296(%rbp), %rbx ## 8-byte Folded Reload
setb -592(%rbp) ## 1-byte Folded Spill
movq %r15, %rax
adcq %r11, %rax
movq %rax, %r15
movq %rax, -536(%rbp) ## 8-byte Spill
addq -56(%rbp), %r9 ## 8-byte Folded Reload
movq -384(%rbp), %rcx ## 8-byte Reload
adcq %r10, %rcx
movq %rcx, -384(%rbp) ## 8-byte Spill
movq %rdi, %rax
movq -176(%rbp), %rdx ## 8-byte Reload
addq %rdx, %rax
movq %r9, %r11
adcq %rbx, %r11
addq %rdx, %rdi
movq %rdi, -296(%rbp) ## 8-byte Spill
adcq %rbx, %r9
setb -632(%rbp) ## 1-byte Folded Spill
movq %rcx, %rbx
adcq %r15, %rbx
movq %rdi, %rcx
shlq $32, %rcx
movq %rdi, %rax
mulq %r8
movq %rdx, -104(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rdi, %rcx
movq %rcx, -320(%rbp) ## 8-byte Spill
setb %al
leaq (%r11,%rax), %r8
xorl %r9d, %r9d
addq %rdx, %r8
movq %r8, -56(%rbp) ## 8-byte Spill
setb %r9b
addq %rax, %r11
adcq %rbx, %r9
setb -552(%rbp) ## 1-byte Folded Spill
movq %r12, %rax
movq -112(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, %r12
movq %r13, %rax
addq %r10, %rax
movq %r10, %r11
movq %rdx, %rax
adcq $0, %rax
setb -168(%rbp) ## 1-byte Folded Spill
movq -72(%rbp), %rdi ## 8-byte Reload
subq %r14, %rdi
movq -136(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
mulq %rcx
movq %rcx, %r10
movq %rdx, %r15
movq %rdx, -176(%rbp) ## 8-byte Spill
movq %r14, %rax
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
imulq %r10, %rbx
movq -392(%rbp), %rcx ## 8-byte Reload
addq -248(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -392(%rbp) ## 8-byte Spill
movq %rbx, %rax
movq %rbx, -232(%rbp) ## 8-byte Spill
adcq -48(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -88(%rbp) ## 8-byte Spill
addq -432(%rbp), %r8 ## 8-byte Folded Reload
movq %rcx, %rax
adcq %r9, %rax
movq %rax, -280(%rbp) ## 8-byte Spill
addq -64(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -72(%rbp) ## 8-byte Spill
adcq -328(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -416(%rbp) ## 8-byte Spill
addq %r11, %r13
movq %rbx, %r8
adcq %r12, %r8
movq %r8, -488(%rbp) ## 8-byte Spill
movzbl -168(%rbp), %eax ## 1-byte Folded Reload
adcq %r15, %rax
movq %rax, -152(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq 24(%rax), %rax
movq %rax, -248(%rbp) ## 8-byte Spill
movq %rax, %rdx
movq -240(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rdi
movq %rdx, -504(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -168(%rbp) ## 8-byte Spill
xorl %r14d, %r14d
movq -192(%rbp), %r10 ## 8-byte Reload
cmpq -424(%rbp), %r10 ## 8-byte Folded Reload
setb %r14b
addq %rdi, %r10
movq %r10, %rdi
movq %r10, -192(%rbp) ## 8-byte Spill
adcq %rdx, %r14
movq -208(%rbp), %r10 ## 8-byte Reload
movq -376(%rbp), %rcx ## 8-byte Reload
cmpq %r10, %rcx
adcq -128(%rbp), %r10 ## 8-byte Folded Reload
addb $255, -96(%rbp) ## 1-byte Folded Spill
adcq %rcx, -80(%rbp) ## 8-byte Folded Spill
setb -528(%rbp) ## 1-byte Folded Spill
movq %r10, %rax
adcq %rdi, %rax
movq %rax, %rbx
movq %rax, -312(%rbp) ## 8-byte Spill
movq -200(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -120(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rdi
movq %rdx, -544(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -80(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -184(%rbp), %rcx ## 8-byte Reload
cmpq -48(%rbp), %rcx ## 8-byte Folded Reload
setb %al
addq %rdi, %rcx
movq %rcx, -184(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -96(%rbp) ## 8-byte Spill
addb $255, -272(%rbp) ## 1-byte Folded Spill
movq -264(%rbp), %rax ## 8-byte Reload
adcq -496(%rbp), %rax ## 8-byte Folded Reload
setb -264(%rbp) ## 1-byte Folded Spill
adcq %rbx, %rcx
movq %rcx, -720(%rbp) ## 8-byte Spill
addb $255, -368(%rbp) ## 1-byte Folded Spill
movq -360(%rbp), %rax ## 8-byte Reload
adcq -160(%rbp), %rax ## 8-byte Folded Reload
setb -400(%rbp) ## 1-byte Folded Spill
movq -288(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -408(%rbp) ## 8-byte Spill
addb $255, -576(%rbp) ## 1-byte Folded Spill
adcq -648(%rbp), %r13 ## 8-byte Folded Reload
setb -648(%rbp) ## 1-byte Folded Spill
movq %r8, %r15
adcq %rax, %r15
addb $255, -592(%rbp) ## 1-byte Folded Spill
movq -640(%rbp), %rax ## 8-byte Reload
adcq -328(%rbp), %rax ## 8-byte Folded Reload
setb -640(%rbp) ## 1-byte Folded Spill
movq -72(%rbp), %rax ## 8-byte Reload
adcq %r15, %rax
movq %rax, %rdx
movq %rax, -256(%rbp) ## 8-byte Spill
movq -440(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rdi
imulq %rcx, %rdi
movq -384(%rbp), %rax ## 8-byte Reload
cmpq %r11, %rax
adcq $0, %r12
addb $255, -632(%rbp) ## 1-byte Folded Spill
adcq -536(%rbp), %rax ## 8-byte Folded Reload
setb -632(%rbp) ## 1-byte Folded Spill
leaq (%r12,%rdi), %r13
adcq %rdx, %r13
movq -296(%rbp), %rdx ## 8-byte Reload
imulq -728(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -376(%rbp) ## 8-byte Spill
movb -552(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %r13, %rdx
movq %rdx, -576(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
addq -432(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -56(%rbp) ## 8-byte Spill
adcq %r9, -392(%rbp) ## 8-byte Folded Spill
setb -432(%rbp) ## 1-byte Folded Spill
movq -88(%rbp), %rax ## 8-byte Reload
adcq %rdx, %rax
movq %rax, -384(%rbp) ## 8-byte Spill
movq %rcx, %rax
mulq %rcx
movq %r12, %rax
addq %rdi, %rax
movq %rdx, %rax
movq %rdx, %rbx
adcq $0, %rax
setb %r9b
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
imulq %rcx, %rdx
movq %rdx, %r8
movq %rdx, -464(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -704(%rbp) ## 8-byte Spill
addq %rdi, %r12
adcq %r8, %rbx
movq %rbx, -368(%rbp) ## 8-byte Spill
movzbl %r9b, %eax
adcq %rdx, %rax
movq %rax, -48(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq 28(%rax), %rax
movq %rax, -392(%rbp) ## 8-byte Spill
movq %rax, %rcx
movq -240(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, -800(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -360(%rbp) ## 8-byte Spill
xorl %eax, %eax
cmpq -168(%rbp), %r14 ## 8-byte Folded Reload
setb %al
addq %rcx, %r14
movq %r14, -584(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -160(%rbp) ## 8-byte Spill
movq -208(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r10
adcq -128(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -624(%rbp) ## 8-byte Spill
addb $255, -528(%rbp) ## 1-byte Folded Spill
adcq -192(%rbp), %r10 ## 8-byte Folded Reload
setb -592(%rbp) ## 1-byte Folded Spill
adcq %r14, %rax
movq %rax, %r14
movq %rax, -448(%rbp) ## 8-byte Spill
movq -248(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -120(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, %r8
movq %rcx, -496(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, %r10
movq %rdx, -192(%rbp) ## 8-byte Spill
xorl %ebx, %ebx
movq -96(%rbp), %r11 ## 8-byte Reload
cmpq -80(%rbp), %r11 ## 8-byte Folded Reload
setb %bl
movq -200(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -112(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rdi
mulq %rcx
movq %rdx, -272(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -152(%rbp), %rax ## 8-byte Reload
cmpq -176(%rbp), %rax ## 8-byte Folded Reload
setb %cl
addq %rdi, %rax
movq %rdi, -520(%rbp) ## 8-byte Spill
movq %rax, %r9
movq %rax, -152(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -616(%rbp) ## 8-byte Spill
addq %r8, %r11
movq %r11, -96(%rbp) ## 8-byte Spill
adcq %r10, %rbx
movq %rbx, -712(%rbp) ## 8-byte Spill
addb $255, -264(%rbp) ## 1-byte Folded Spill
movq -184(%rbp), %rcx ## 8-byte Reload
adcq -312(%rbp), %rcx ## 8-byte Folded Reload
setb -264(%rbp) ## 1-byte Folded Spill
adcq %r14, %r11
movq %r11, -864(%rbp) ## 8-byte Spill
movq -216(%rbp), %rax ## 8-byte Reload
movq -288(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -336(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -792(%rbp) ## 8-byte Spill
addb $255, -400(%rbp) ## 1-byte Folded Spill
adcq %rcx, -720(%rbp) ## 8-byte Folded Spill
setb -456(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %r11, %rcx
movq %rcx, -512(%rbp) ## 8-byte Spill
addb $255, -648(%rbp) ## 1-byte Folded Spill
movq -408(%rbp), %rax ## 8-byte Reload
adcq -488(%rbp), %rax ## 8-byte Folded Reload
setb -648(%rbp) ## 1-byte Folded Spill
movq %r9, %rax
adcq %rcx, %rax
movq %rax, -480(%rbp) ## 8-byte Spill
addb $255, -640(%rbp) ## 1-byte Folded Spill
adcq -72(%rbp), %r15 ## 8-byte Folded Reload
setb -536(%rbp) ## 1-byte Folded Spill
movq -416(%rbp), %rcx ## 8-byte Reload
adcq %rax, %rcx
movq %rcx, -528(%rbp) ## 8-byte Spill
addb $255, -632(%rbp) ## 1-byte Folded Spill
adcq -256(%rbp), %r12 ## 8-byte Folded Reload
setb -312(%rbp) ## 1-byte Folded Spill
movq -368(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -408(%rbp) ## 8-byte Spill
movq -320(%rbp), %rcx ## 8-byte Reload
subq -296(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -320(%rbp) ## 8-byte Spill
addb $255, -552(%rbp) ## 1-byte Folded Spill
adcq -376(%rbp), %r13 ## 8-byte Folded Reload
setb -488(%rbp) ## 1-byte Folded Spill
movq -104(%rbp), %rax ## 8-byte Reload
leaq (%rcx,%rax), %rax
adcq %rdx, %rax
movq %rax, %rdx
movq %rax, -640(%rbp) ## 8-byte Spill
movq -88(%rbp), %rcx ## 8-byte Reload
cmpq -232(%rbp), %rcx ## 8-byte Folded Reload
movq -176(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -432(%rbp) ## 1-byte Folded Spill
adcq %rcx, -576(%rbp) ## 8-byte Folded Spill
setb -232(%rbp) ## 1-byte Folded Spill
movq -464(%rbp), %r12 ## 8-byte Reload
leaq (%rax,%r12), %rcx
movq %rax, %r8
adcq %rdx, %rcx
movq %rcx, %r14
movq %rcx, -344(%rbp) ## 8-byte Spill
movq -56(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
imulq -728(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r11
movq %rbx, %rcx
shlq $32, %rcx
movq %rbx, %rax
movl $4294967295, %r15d ## imm = 0xFFFFFFFF
mulq %r15
movq %rdx, -184(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rbx, %rcx
movq %rcx, %r10
setb %al
movq -280(%rbp), %rbx ## 8-byte Reload
leaq (%rbx,%rax), %r9
xorl %ecx, %ecx
addq %rdx, %r9
setb %cl
addq %rax, %rbx
adcq -384(%rbp), %rcx ## 8-byte Folded Reload
setb -256(%rbp) ## 1-byte Folded Spill
movq %r11, %rax
movq %r11, %r13
movq %r11, -432(%rbp) ## 8-byte Spill
adcq %r14, %rax
movq %rax, %r14
movq %rax, -304(%rbp) ## 8-byte Spill
movq -544(%rbp), %rbx ## 8-byte Reload
addq -424(%rbp), %rbx ## 8-byte Folded Reload
movq -80(%rbp), %rdx ## 8-byte Reload
adcq %rdi, %rdx
movq %rdx, -80(%rbp) ## 8-byte Spill
movq %r9, %rax
movq -144(%rbp), %rdi ## 8-byte Reload
addq %rdi, %rax
movq %rbx, %r11
adcq %rcx, %r11
addq %rdi, %r9
adcq %rcx, %rbx
setb -352(%rbp) ## 1-byte Folded Spill
movq %rdx, %rcx
adcq %r14, %rcx
movq %r9, %rdi
shlq $32, %rdi
movq %r9, %rax
movq %r9, -472(%rbp) ## 8-byte Spill
mulq %r15
movq %rdx, -384(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r9, %rdi
movq %rdi, -424(%rbp) ## 8-byte Spill
setb %al
leaq (%r11,%rax), %rdi
addq %rdx, %rdi
movq %rdi, %r15
movq %rdi, -576(%rbp) ## 8-byte Spill
movl $0, %edx
setb %dl
addq %rax, %r11
adcq %rcx, %rdx
movq %rdx, %r9
movq %rdx, -808(%rbp) ## 8-byte Spill
setb -856(%rbp) ## 1-byte Folded Spill
movq %r8, %rbx
movq %r8, %rax
addq %r12, %rax
movq -704(%rbp), %r8 ## 8-byte Reload
movq %r8, %rax
adcq $0, %rax
setb %r11b
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
imulq %rax, %rcx
movq -56(%rbp), %rdi ## 8-byte Reload
subq %rdi, %r10
mulq %rax
movq %rdx, %r14
movq %rdx, -400(%rbp) ## 8-byte Spill
movq %rdi, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %r15, %rax
addq -504(%rbp), %rax ## 8-byte Folded Reload
movq -168(%rbp), %rax ## 8-byte Reload
movq -496(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rax
adcq %r9, %rax
movq %rax, -784(%rbp) ## 8-byte Spill
addq -184(%rbp), %r10 ## 8-byte Folded Reload
movq %r10, -568(%rbp) ## 8-byte Spill
adcq %r13, %rdx
movq %rdx, -544(%rbp) ## 8-byte Spill
addq %r12, %rbx
movq %rbx, -176(%rbp) ## 8-byte Spill
adcq %r8, %rcx
movq %rcx, -560(%rbp) ## 8-byte Spill
movq %r8, %r15
movzbl %r11b, %eax
adcq %r14, %rax
movq %rax, -720(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq 32(%rax), %rax
movq %rax, -72(%rbp) ## 8-byte Spill
movq %rax, %rcx
movq -240(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, -816(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -632(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -160(%rbp), %rax ## 8-byte Reload
cmpq -360(%rbp), %rax ## 8-byte Folded Reload
setb %dil
addq %rcx, %rax
movq %rax, %rcx
movq %rax, -160(%rbp) ## 8-byte Spill
adcq %rdx, %rdi
movq %rdi, -88(%rbp) ## 8-byte Spill
movq -208(%rbp), %rax ## 8-byte Reload
movq -624(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
movq %rax, %r9
adcq -128(%rbp), %r9 ## 8-byte Folded Reload
movq %r9, -464(%rbp) ## 8-byte Spill
addb $255, -592(%rbp) ## 1-byte Folded Spill
adcq -584(%rbp), %rdx ## 8-byte Folded Reload
setb -56(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r9
movq -392(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -120(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, -584(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, %r12
movq %rdx, -552(%rbp) ## 8-byte Spill
xorl %r13d, %r13d
movq -712(%rbp), %r10 ## 8-byte Reload
cmpq -192(%rbp), %r10 ## 8-byte Folded Reload
setb %r13b
movq -248(%rbp), %rax ## 8-byte Reload
movq %rax, %r11
movq -112(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r11
movq %r11, -592(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, %r8
movq %rdx, -288(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -616(%rbp), %r14 ## 8-byte Reload
cmpq -272(%rbp), %r14 ## 8-byte Folded Reload
setb %dil
movq -200(%rbp), %rax ## 8-byte Reload
movq %rax, %rbx
movq -440(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rbx
movq %rbx, -696(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -624(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -48(%rbp), %rcx ## 8-byte Reload
cmpq %r15, %rcx
setb %al
addq %rbx, %rcx
movq %rcx, -48(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -144(%rbp) ## 8-byte Spill
addq %r11, %r14
adcq %r8, %rdi
movq %rdi, -280(%rbp) ## 8-byte Spill
addq -584(%rbp), %r10 ## 8-byte Folded Reload
adcq %r12, %r13
addb $255, -264(%rbp) ## 1-byte Folded Spill
movq -448(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -96(%rbp) ## 8-byte Folded Spill
setb %bl
movq %r10, %rdi
adcq %r9, %rdi
movq -216(%rbp), %rax ## 8-byte Reload
movq -792(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -336(%rbp), %rax ## 8-byte Folded Reload
addb $255, -456(%rbp) ## 1-byte Folded Spill
adcq -864(%rbp), %rcx ## 8-byte Folded Reload
setb %r8b
movq %rax, %rcx
movq %rax, -96(%rbp) ## 8-byte Spill
adcq %rdi, %rcx
addb $255, %bl
adcq %r9, %r10
setb -264(%rbp) ## 1-byte Folded Spill
addb $255, -648(%rbp) ## 1-byte Folded Spill
movq -512(%rbp), %rdx ## 8-byte Reload
adcq -152(%rbp), %rdx ## 8-byte Folded Reload
setb %r11b
movq %r14, %rdx
adcq %rcx, %rdx
addb $255, %r8b
adcq %rax, %rdi
setb -648(%rbp) ## 1-byte Folded Spill
movq -328(%rbp), %rax ## 8-byte Reload
movq -416(%rbp), %rbx ## 8-byte Reload
cmpq %rax, %rbx
adcq -64(%rbp), %rax ## 8-byte Folded Reload
addb $255, -536(%rbp) ## 1-byte Folded Spill
adcq %rbx, -480(%rbp) ## 8-byte Folded Spill
setb %dil
movq %rax, %rbx
movq %rax, %r8
movq %rax, -456(%rbp) ## 8-byte Spill
adcq %rdx, %rbx
addb $255, %r11b
adcq %r14, %rcx
setb -536(%rbp) ## 1-byte Folded Spill
addb $255, -312(%rbp) ## 1-byte Folded Spill
movq -528(%rbp), %rax ## 8-byte Reload
adcq -368(%rbp), %rax ## 8-byte Folded Reload
setb %r9b
movq -48(%rbp), %r10 ## 8-byte Reload
movq %r10, %rcx
adcq %rbx, %rcx
addb $255, %dil
adcq %r8, %rdx
setb -312(%rbp) ## 1-byte Folded Spill
movq -296(%rbp), %rax ## 8-byte Reload
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq -320(%rbp), %rax ## 8-byte Reload
addq -104(%rbp), %rax ## 8-byte Folded Reload
adcq -376(%rbp), %rdx ## 8-byte Folded Reload
addb $255, -488(%rbp) ## 1-byte Folded Spill
adcq -408(%rbp), %rax ## 8-byte Folded Reload
setb %r8b
movq %rdx, %rdi
movq %rdx, -296(%rbp) ## 8-byte Spill
adcq %rcx, %rdi
addb $255, %r9b
adcq %r10, %rbx
setb -480(%rbp) ## 1-byte Folded Spill
addb $255, -232(%rbp) ## 1-byte Folded Spill
movq -176(%rbp), %rax ## 8-byte Reload
adcq -640(%rbp), %rax ## 8-byte Folded Reload
setb %bl
movq -560(%rbp), %r10 ## 8-byte Reload
movq %r10, %rax
adcq %rdi, %rax
addb $255, %r8b
adcq %rdx, %rcx
setb -512(%rbp) ## 1-byte Folded Spill
addb $255, -256(%rbp) ## 1-byte Folded Spill
movq -344(%rbp), %rcx ## 8-byte Reload
adcq -432(%rbp), %rcx ## 8-byte Folded Reload
setb %r9b
movq -568(%rbp), %r8 ## 8-byte Reload
movq %r8, %rcx
adcq %rax, %rcx
addb $255, %bl
adcq %r10, %rdi
setb -256(%rbp) ## 1-byte Folded Spill
movq -80(%rbp), %rdx ## 8-byte Reload
cmpq -520(%rbp), %rdx ## 8-byte Folded Reload
movq -272(%rbp), %r14 ## 8-byte Reload
adcq $0, %r14
addb $255, -352(%rbp) ## 1-byte Folded Spill
adcq -304(%rbp), %rdx ## 8-byte Folded Reload
setb %r11b
movq -696(%rbp), %r15 ## 8-byte Reload
leaq (%r14,%r15), %r12
movq %r12, %rbx
adcq %rcx, %rbx
addb $255, %r9b
adcq %r8, %rax
setb -352(%rbp) ## 1-byte Folded Spill
movq -472(%rbp), %r8 ## 8-byte Reload
movq %r8, %rdi
imulq -728(%rbp), %rdi ## 8-byte Folded Reload
movb -856(%rbp), %dl ## 1-byte Reload
movl %edx, %eax
addb $255, %al
movq %rdi, %rax
adcq %rbx, %rax
movq %rax, %r9
movq %rax, -368(%rbp) ## 8-byte Spill
addb $255, %r11b
adcq %r12, %rcx
setb -344(%rbp) ## 1-byte Folded Spill
addb $255, %dl
adcq %rdi, %rbx
movq %rdi, %r10
movq %rdi, -176(%rbp) ## 8-byte Spill
setb -304(%rbp) ## 1-byte Folded Spill
movq -496(%rbp), %rdx ## 8-byte Reload
addq -168(%rbp), %rdx ## 8-byte Folded Reload
movq -192(%rbp), %rcx ## 8-byte Reload
adcq -592(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -192(%rbp) ## 8-byte Spill
movq -504(%rbp), %rax ## 8-byte Reload
addq %rax, -576(%rbp) ## 8-byte Folded Spill
adcq -808(%rbp), %rdx ## 8-byte Folded Reload
setb -168(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %r9, %rax
movq %rax, -616(%rbp) ## 8-byte Spill
movq %r14, %rax
addq %r15, %rax
movq %r15, %r12
movq -624(%rbp), %r11 ## 8-byte Reload
movq %r11, %rax
adcq $0, %rax
setb %r9b
movq -424(%rbp), %rdi ## 8-byte Reload
subq %r8, %rdi
movq -200(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -136(%rbp), %rbx ## 8-byte Reload
mulq %rbx
movq %rdx, %r15
movq %rdx, -504(%rbp) ## 8-byte Spill
movq %r8, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
imulq %rbx, %rcx
addq -384(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -424(%rbp) ## 8-byte Spill
adcq %r10, %rdx
movq %rdx, -528(%rbp) ## 8-byte Spill
addq %r12, %r14
movq %rcx, %rax
movq %rcx, %r14
adcq %r11, %rax
movq %r11, %r12
movq %rax, -472(%rbp) ## 8-byte Spill
movzbl %r9b, %eax
adcq %r15, %rax
movq %rax, -416(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq 36(%rax), %rax
movq %rax, -80(%rbp) ## 8-byte Spill
movq %rax, %rcx
movq -240(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, -808(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -488(%rbp) ## 8-byte Spill
xorl %ebx, %ebx
movq -88(%rbp), %rax ## 8-byte Reload
cmpq -632(%rbp), %rax ## 8-byte Folded Reload
setb %bl
addq %rcx, %rax
movq %rax, %rdi
movq %rax, -88(%rbp) ## 8-byte Spill
adcq %rdx, %rbx
movq %rbx, -232(%rbp) ## 8-byte Spill
movq -208(%rbp), %r10 ## 8-byte Reload
movq -464(%rbp), %rcx ## 8-byte Reload
cmpq %r10, %rcx
adcq -128(%rbp), %r10 ## 8-byte Folded Reload
movq %r10, -496(%rbp) ## 8-byte Spill
addb $255, -56(%rbp) ## 1-byte Folded Spill
adcq -160(%rbp), %rcx ## 8-byte Folded Reload
setb -160(%rbp) ## 1-byte Folded Spill
movq %r10, %rax
adcq %rdi, %rax
movq %rax, -448(%rbp) ## 8-byte Spill
movq -72(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -120(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, %r9
movq %rcx, -640(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, %r11
movq %rdx, -152(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq %r13, %r8
cmpq -552(%rbp), %r13 ## 8-byte Folded Reload
setb %al
movq %rax, -48(%rbp) ## 8-byte Spill
movq -392(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
movq -112(%rbp), %rdi ## 8-byte Reload
mulq %rdi
movq %rdx, %rcx
movq %rdx, -320(%rbp) ## 8-byte Spill
imulq %rdi, %rbx
movq %rbx, -408(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -280(%rbp), %r15 ## 8-byte Reload
cmpq -288(%rbp), %r15 ## 8-byte Folded Reload
setb %al
movq %rax, -56(%rbp) ## 8-byte Spill
movq -248(%rbp), %rax ## 8-byte Reload
movq %rax, %r13
movq -440(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %r13
mulq %rdx
movq %rdx, %r10
xorl %edx, %edx
movq -144(%rbp), %rax ## 8-byte Reload
cmpq %r12, %rax
setb %dl
xorl %edi, %edi
movq -720(%rbp), %r12 ## 8-byte Reload
cmpq -400(%rbp), %r12 ## 8-byte Folded Reload
setb %dil
addq %r12, %r14
movq %r14, -608(%rbp) ## 8-byte Spill
adcq -504(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -824(%rbp) ## 8-byte Spill
addq %r13, %rax
movq %rax, %r12
movq %rax, -144(%rbp) ## 8-byte Spill
adcq %r10, %rdx
movq %rdx, -272(%rbp) ## 8-byte Spill
movq %r15, %rdx
addq %rbx, %rdx
movq %rdx, -280(%rbp) ## 8-byte Spill
movq -56(%rbp), %r15 ## 8-byte Reload
adcq %rcx, %r15
movq %r15, -56(%rbp) ## 8-byte Spill
movq %r8, %rcx
addq %r9, %rcx
movq %rcx, -600(%rbp) ## 8-byte Spill
movq -48(%rbp), %r8 ## 8-byte Reload
adcq %r11, %r8
movq %r8, -48(%rbp) ## 8-byte Spill
movb -264(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %rcx, %rdi
adcq -448(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -664(%rbp) ## 8-byte Spill
movq -216(%rbp), %rax ## 8-byte Reload
cmpq %rax, -96(%rbp) ## 8-byte Folded Reload
movq %rax, %rbx
adcq -336(%rbp), %rbx ## 8-byte Folded Reload
movq %rbx, -832(%rbp) ## 8-byte Spill
movb -648(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rdi, %rbx
movq %rbx, -672(%rbp) ## 8-byte Spill
movb -536(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %rdx, %rcx
adcq %rbx, %rcx
movq %rcx, -680(%rbp) ## 8-byte Spill
movq -328(%rbp), %rax ## 8-byte Reload
cmpq %rax, -456(%rbp) ## 8-byte Folded Reload
movq %rax, %rbx
adcq -64(%rbp), %rbx ## 8-byte Folded Reload
movq %rbx, -840(%rbp) ## 8-byte Spill
movb -312(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rcx, %rbx
movq %rbx, -688(%rbp) ## 8-byte Spill
movb -480(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %r12, %rcx
adcq %rbx, %rcx
movq %rcx, -752(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
cmpq %rax, -296(%rbp) ## 8-byte Folded Reload
movq %rax, %rbx
adcq -104(%rbp), %rbx ## 8-byte Folded Reload
movq %rbx, -848(%rbp) ## 8-byte Spill
movb -512(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rcx, %rbx
movq %rbx, -760(%rbp) ## 8-byte Spill
movb -256(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rbx, %r14
movq %r14, -696(%rbp) ## 8-byte Spill
movb -352(%rbp), %al ## 1-byte Reload
addb $255, %al
movq -544(%rbp), %rdx ## 8-byte Reload
adcq %r14, %rdx
movq %rdx, -704(%rbp) ## 8-byte Spill
movb -344(%rbp), %al ## 1-byte Reload
addb $255, %al
movq -472(%rbp), %rcx ## 8-byte Reload
adcq %rdx, %rcx
movq %rcx, -864(%rbp) ## 8-byte Spill
movb -304(%rbp), %al ## 1-byte Reload
addb $255, %al
movq -424(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -560(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
cmpq -592(%rbp), %rax ## 8-byte Folded Reload
movq -288(%rbp), %rcx ## 8-byte Reload
adcq $0, %rcx
addb $255, -168(%rbp) ## 1-byte Folded Spill
adcq -368(%rbp), %rax ## 8-byte Folded Reload
setb -792(%rbp) ## 1-byte Folded Spill
leaq (%rcx,%r13), %rax
adcq %rdx, %rax
movq %rax, -712(%rbp) ## 8-byte Spill
movq %rcx, %rax
movq %rcx, %rbx
addq %r13, %rax
movq %r10, %rax
adcq $0, %rax
setb %r9b
movq -248(%rbp), %rax ## 8-byte Reload
movq %rax, %rdi
movq -136(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdi
movq %rdi, -368(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -464(%rbp) ## 8-byte Spill
addq %r13, %rbx
movq %rbx, -288(%rbp) ## 8-byte Spill
movq %rdi, %rax
adcq %r10, %rax
movq %rax, -520(%rbp) ## 8-byte Spill
movzbl %r9b, %eax
adcq %rdx, %rax
movq %rax, -568(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq 40(%rax), %rax
movq %rax, -192(%rbp) ## 8-byte Spill
movq %rax, %rcx
movq -240(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, -856(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -592(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -232(%rbp), %r14 ## 8-byte Reload
cmpq -488(%rbp), %r14 ## 8-byte Folded Reload
setb %al
addq %rcx, %r14
movq %r14, -232(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -96(%rbp) ## 8-byte Spill
movq -208(%rbp), %rax ## 8-byte Reload
movq -496(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -128(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -736(%rbp) ## 8-byte Spill
addb $255, -160(%rbp) ## 1-byte Folded Spill
adcq -88(%rbp), %rcx ## 8-byte Folded Reload
setb -744(%rbp) ## 1-byte Folded Spill
adcq %r14, %rax
movq %rax, -888(%rbp) ## 8-byte Spill
movq -80(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
movq -120(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, -168(%rbp) ## 8-byte Spill
imulq %rcx, %rdi
movq %rdi, -496(%rbp) ## 8-byte Spill
xorl %eax, %eax
cmpq -152(%rbp), %r8 ## 8-byte Folded Reload
setb %al
movq %rax, -88(%rbp) ## 8-byte Spill
movq -72(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -112(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, -720(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, %r13
movq %rdx, -296(%rbp) ## 8-byte Spill
xorl %eax, %eax
cmpq -320(%rbp), %r15 ## 8-byte Folded Reload
setb %al
movq %rax, %r11
movq -392(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -440(%rbp), %rdi ## 8-byte Reload
mulq %rdi
movq %rdx, %r8
movq %rdx, -400(%rbp) ## 8-byte Spill
imulq %rdi, %rcx
movq %rcx, %r15
movq %rcx, -624(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
movq -272(%rbp), %r14 ## 8-byte Reload
cmpq %r10, %r14
setb %r12b
xorl %edi, %edi
movq -504(%rbp), %r9 ## 8-byte Reload
movq -824(%rbp), %r10 ## 8-byte Reload
cmpq %r9, %r10
setb %dil
movq -200(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
imulq %rax, %rcx
movq %rcx, %rbx
mulq %rax
movq %rdx, -776(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -416(%rbp), %rcx ## 8-byte Reload
cmpq %r9, %rcx
setb %al
addq %rcx, %rbx
movq %rbx, -456(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -768(%rbp) ## 8-byte Spill
movq -368(%rbp), %rbx ## 8-byte Reload
addq %r10, %rbx
movq %rbx, -368(%rbp) ## 8-byte Spill
adcq -464(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -416(%rbp) ## 8-byte Spill
movq %r14, %rcx
addq %r15, %rcx
movq %rcx, -272(%rbp) ## 8-byte Spill
adcq %r8, %r12
movq %r12, -160(%rbp) ## 8-byte Spill
movq -56(%rbp), %r14 ## 8-byte Reload
addq -720(%rbp), %r14 ## 8-byte Folded Reload
movq %r14, -56(%rbp) ## 8-byte Spill
adcq %r13, %r11
movq %r11, -504(%rbp) ## 8-byte Spill
movq -48(%rbp), %r13 ## 8-byte Reload
addq -496(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -48(%rbp) ## 8-byte Spill
movq -168(%rbp), %r12 ## 8-byte Reload
movq -88(%rbp), %r9 ## 8-byte Reload
adcq %r12, %r9
addb $255, -264(%rbp) ## 1-byte Folded Spill
movq -600(%rbp), %rax ## 8-byte Reload
adcq -448(%rbp), %rax ## 8-byte Folded Reload
setb -448(%rbp) ## 1-byte Folded Spill
movq -888(%rbp), %r10 ## 8-byte Reload
adcq %r10, %r13
movq -216(%rbp), %r8 ## 8-byte Reload
movq -832(%rbp), %rdi ## 8-byte Reload
cmpq %r8, %rdi
adcq -336(%rbp), %r8 ## 8-byte Folded Reload
addb $255, -648(%rbp) ## 1-byte Folded Spill
adcq -664(%rbp), %rdi ## 8-byte Folded Reload
setb -600(%rbp) ## 1-byte Folded Spill
movq %r8, %r15
adcq %r13, %r15
addb $255, -536(%rbp) ## 1-byte Folded Spill
movq -672(%rbp), %rax ## 8-byte Reload
adcq -280(%rbp), %rax ## 8-byte Folded Reload
setb -264(%rbp) ## 1-byte Folded Spill
adcq %r15, %r14
movq -328(%rbp), %r11 ## 8-byte Reload
movq -840(%rbp), %rdx ## 8-byte Reload
cmpq %r11, %rdx
adcq -64(%rbp), %r11 ## 8-byte Folded Reload
addb $255, -312(%rbp) ## 1-byte Folded Spill
adcq -680(%rbp), %rdx ## 8-byte Folded Reload
setb -312(%rbp) ## 1-byte Folded Spill
movq %r11, %rdx
adcq %r14, %rdx
movq %rdx, -896(%rbp) ## 8-byte Spill
addb $255, -480(%rbp) ## 1-byte Folded Spill
movq -688(%rbp), %rax ## 8-byte Reload
adcq -144(%rbp), %rax ## 8-byte Folded Reload
setb -672(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %rdx, %rax
movq %rax, %rcx
movq %rax, -872(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
movq -848(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -104(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -976(%rbp) ## 8-byte Spill
addb $255, -512(%rbp) ## 1-byte Folded Spill
adcq -752(%rbp), %rdx ## 8-byte Folded Reload
setb -920(%rbp) ## 1-byte Folded Spill
movq %rax, %rdi
adcq %rcx, %rdi
movq %rdi, -936(%rbp) ## 8-byte Spill
addb $255, -256(%rbp) ## 1-byte Folded Spill
movq -760(%rbp), %rax ## 8-byte Reload
adcq -608(%rbp), %rax ## 8-byte Folded Reload
setb -912(%rbp) ## 1-byte Folded Spill
adcq %rdi, %rbx
movq %rbx, -984(%rbp) ## 8-byte Spill
movq -432(%rbp), %rax ## 8-byte Reload
movq -544(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -184(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -968(%rbp) ## 8-byte Spill
addb $255, -352(%rbp) ## 1-byte Folded Spill
adcq %rcx, -696(%rbp) ## 8-byte Folded Spill
setb -904(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rbx, %rcx
movq %rcx, -928(%rbp) ## 8-byte Spill
addb $255, -344(%rbp) ## 1-byte Folded Spill
movq -704(%rbp), %rax ## 8-byte Reload
adcq -472(%rbp), %rax ## 8-byte Folded Reload
setb -880(%rbp) ## 1-byte Folded Spill
movq -456(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -664(%rbp) ## 8-byte Spill
addb $255, -304(%rbp) ## 1-byte Folded Spill
movq -864(%rbp), %rcx ## 8-byte Reload
adcq -424(%rbp), %rcx ## 8-byte Folded Reload
setb -848(%rbp) ## 1-byte Folded Spill
movq -528(%rbp), %rdx ## 8-byte Reload
adcq %rax, %rdx
movq %rdx, -824(%rbp) ## 8-byte Spill
addb $255, -792(%rbp) ## 1-byte Folded Spill
movq -288(%rbp), %rax ## 8-byte Reload
adcq -560(%rbp), %rax ## 8-byte Folded Reload
movq -224(%rbp), %rax ## 8-byte Reload
movq 44(%rax), %rax
movq %rax, -224(%rbp) ## 8-byte Spill
setb -832(%rbp) ## 1-byte Folded Spill
movq -520(%rbp), %rcx ## 8-byte Reload
adcq %rdx, %rcx
movq %rcx, -512(%rbp) ## 8-byte Spill
movq %rax, %rdi
movq -240(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rdi
movq %rdi, -648(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -864(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -96(%rbp), %rcx ## 8-byte Reload
cmpq -592(%rbp), %rcx ## 8-byte Folded Reload
setb %al
addq %rdi, %rcx
movq %rcx, -96(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -480(%rbp) ## 8-byte Spill
movq -208(%rbp), %rax ## 8-byte Reload
movq -736(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -128(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -344(%rbp) ## 8-byte Spill
addb $255, -744(%rbp) ## 1-byte Folded Spill
adcq -232(%rbp), %rdx ## 8-byte Folded Reload
setb -424(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rbx
movq %rax, -680(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -120(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rdi
movq %rdx, -792(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -536(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq %r9, %rax
cmpq %r12, %r9
setb %cl
addq %rdi, %rax
movq %rax, -88(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -256(%rbp) ## 8-byte Spill
addb $255, -448(%rbp) ## 1-byte Folded Spill
adcq %r10, -48(%rbp) ## 8-byte Folded Spill
setb -472(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rbx, %rcx
movq %rcx, -744(%rbp) ## 8-byte Spill
movq -216(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r8
adcq -336(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -736(%rbp) ## 8-byte Spill
addb $255, -600(%rbp) ## 1-byte Folded Spill
adcq %r13, %r8
setb -688(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %r8
movq %rax, -608(%rbp) ## 8-byte Spill
movq -80(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -112(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, %rbx
movq %rcx, -704(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -288(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -504(%rbp), %rcx ## 8-byte Reload
cmpq -296(%rbp), %rcx ## 8-byte Folded Reload
setb %al
addq %rbx, %rcx
movq %rcx, -504(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -280(%rbp) ## 8-byte Spill
addb $255, -264(%rbp) ## 1-byte Folded Spill
adcq -56(%rbp), %r15 ## 8-byte Folded Reload
setb -560(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %r8, %rax
movq %rax, %rcx
movq %rax, -760(%rbp) ## 8-byte Spill
movq -328(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r11
adcq -64(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -752(%rbp) ## 8-byte Spill
addb $255, -312(%rbp) ## 1-byte Folded Spill
adcq %r14, %r11
setb -696(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, -448(%rbp) ## 8-byte Spill
movq -72(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -440(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, -352(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, %r13
movq %rdx, -304(%rbp) ## 8-byte Spill
xorl %r10d, %r10d
movq -160(%rbp), %rcx ## 8-byte Reload
cmpq -400(%rbp), %rcx ## 8-byte Folded Reload
setb %r10b
movq -392(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
movq -136(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, %r11
movq %rdx, -232(%rbp) ## 8-byte Spill
imulq %rcx, %rdi
movq %rdi, %r14
movq %rdi, -312(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -416(%rbp), %rdi ## 8-byte Reload
movq -464(%rbp), %r12 ## 8-byte Reload
cmpq %r12, %rdi
setb %al
movq %rax, %r8
movq -248(%rbp), %rax ## 8-byte Reload
movq %rax, %rbx
movq -200(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rbx
mulq %rcx
movq %rdx, %r9
xorl %eax, %eax
movq -768(%rbp), %r15 ## 8-byte Reload
cmpq -776(%rbp), %r15 ## 8-byte Folded Reload
setb %al
xorl %ecx, %ecx
movq -568(%rbp), %rdx ## 8-byte Reload
cmpq %r12, %rdx
setb %cl
addq %rbx, %rdx
movq %rdx, %r12
movq %r9, -600(%rbp) ## 8-byte Spill
adcq %r9, %rcx
movq %rcx, -464(%rbp) ## 8-byte Spill
addq %r15, %rbx
movq %rbx, -840(%rbp) ## 8-byte Spill
adcq %r9, %rax
movq %rax, -264(%rbp) ## 8-byte Spill
addq %r14, %rdi
movq %rdi, -416(%rbp) ## 8-byte Spill
adcq %r11, %r8
movq %r8, -544(%rbp) ## 8-byte Spill
movq -160(%rbp), %rax ## 8-byte Reload
addq -352(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -160(%rbp) ## 8-byte Spill
adcq %r13, %r10
movq %r10, -144(%rbp) ## 8-byte Spill
addb $255, -672(%rbp) ## 1-byte Folded Spill
movq -896(%rbp), %rcx ## 8-byte Reload
adcq -272(%rbp), %rcx ## 8-byte Folded Reload
setb -272(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq -448(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -672(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
movq -976(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -104(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -888(%rbp) ## 8-byte Spill
addb $255, -920(%rbp) ## 1-byte Folded Spill
adcq -872(%rbp), %rdx ## 8-byte Folded Reload
setb -568(%rbp) ## 1-byte Folded Spill
movq %rax, %rdx
adcq %rcx, %rdx
movq %rdx, -896(%rbp) ## 8-byte Spill
addb $255, -912(%rbp) ## 1-byte Folded Spill
movq -936(%rbp), %rax ## 8-byte Reload
adcq -368(%rbp), %rax ## 8-byte Folded Reload
setb -912(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq %rdx, %rax
movq %rax, %rcx
movq %rax, -952(%rbp) ## 8-byte Spill
movq -432(%rbp), %rax ## 8-byte Reload
movq -968(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -184(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -1000(%rbp) ## 8-byte Spill
addb $255, -904(%rbp) ## 1-byte Folded Spill
adcq -984(%rbp), %rdx ## 8-byte Folded Reload
setb -904(%rbp) ## 1-byte Folded Spill
movq %rax, %rdi
adcq %rcx, %rdi
movq %rdi, -872(%rbp) ## 8-byte Spill
addb $255, -880(%rbp) ## 1-byte Folded Spill
movq -928(%rbp), %rax ## 8-byte Reload
adcq -456(%rbp), %rax ## 8-byte Folded Reload
setb -880(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %rdi, %rax
movq %rax, %rdi
movq %rax, -960(%rbp) ## 8-byte Spill
movq -176(%rbp), %rax ## 8-byte Reload
movq -528(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -384(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -944(%rbp) ## 8-byte Spill
addb $255, -848(%rbp) ## 1-byte Folded Spill
adcq %rcx, -664(%rbp) ## 8-byte Folded Spill
setb -928(%rbp) ## 1-byte Folded Spill
adcq %rdi, %rax
addb $255, -832(%rbp) ## 1-byte Folded Spill
movq -824(%rbp), %rcx ## 8-byte Reload
adcq -520(%rbp), %rcx ## 8-byte Folded Reload
setb %cl
movq %r12, %rdi
adcq %rax, %rdi
movq %rdi, %r8
movq %rdi, -1016(%rbp) ## 8-byte Spill
addb $255, %cl
adcq %r12, %rax
setb -652(%rbp) ## 1-byte Folded Spill
movq -576(%rbp), %r15 ## 8-byte Reload
movq %r15, %r12
imulq -728(%rbp), %r12 ## 8-byte Folded Reload
movq %r15, %rdi
shlq $32, %rdi
movq %r15, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
movq %rdx, %r13
xorl %eax, %eax
subq %r15, %rdi
movq %rdi, -48(%rbp) ## 8-byte Spill
setb %al
movq -784(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rbx
xorl %ecx, %ecx
addq %rdx, %rbx
movq %rbx, %r14
movq %rdx, -240(%rbp) ## 8-byte Spill
setb %cl
addq %rax, %rdi
adcq -616(%rbp), %rcx ## 8-byte Folded Reload
setb %r10b
movq %r12, %r9
movq -712(%rbp), %r11 ## 8-byte Reload
adcq %r11, %r9
movq %r9, -784(%rbp) ## 8-byte Spill
movq -584(%rbp), %rbx ## 8-byte Reload
addq -360(%rbp), %rbx ## 8-byte Folded Reload
movq -552(%rbp), %rdx ## 8-byte Reload
adcq -408(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -552(%rbp) ## 8-byte Spill
movq %r14, %rax
movq -800(%rbp), %rdi ## 8-byte Reload
addq %rdi, %rax
movq %rbx, %rax
adcq %rcx, %rax
movq %rax, -768(%rbp) ## 8-byte Spill
addq %rdi, %r14
movq %r14, -520(%rbp) ## 8-byte Spill
adcq %rcx, %rbx
setb -800(%rbp) ## 1-byte Folded Spill
movq %rdx, %rax
adcq %r9, %rax
movq %rax, -776(%rbp) ## 8-byte Spill
movq %r15, %rax
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
movq -48(%rbp), %r9 ## 8-byte Reload
subq %r15, %r9
addq %r13, %r9
movq %r9, -48(%rbp) ## 8-byte Spill
movq %r12, -56(%rbp) ## 8-byte Spill
adcq %r12, %rdx
movq %rdx, -1008(%rbp) ## 8-byte Spill
addb $255, %r10b
adcq %r12, %r11
setb -651(%rbp) ## 1-byte Folded Spill
movq -512(%rbp), %rax ## 8-byte Reload
adcq %r9, %rax
setb -653(%rbp) ## 1-byte Folded Spill
movq %rdx, %rax
adcq %r8, %rax
movq %rax, -848(%rbp) ## 8-byte Spill
movq -344(%rbp), %rcx ## 8-byte Reload
cmpq -208(%rbp), %rcx ## 8-byte Folded Reload
movq -128(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -128(%rbp) ## 8-byte Spill
addb $255, -424(%rbp) ## 1-byte Folded Spill
adcq -96(%rbp), %rcx ## 8-byte Folded Reload
setb -616(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq -480(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, %rdx
movq %rcx, -824(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
imulq -120(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -424(%rbp) ## 8-byte Spill
addb $255, -472(%rbp) ## 1-byte Folded Spill
movq -680(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -88(%rbp) ## 8-byte Folded Spill
setb -832(%rbp) ## 1-byte Folded Spill
movq -256(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rax), %rax
adcq %rdx, %rax
movq %rax, %rcx
movq %rax, -984(%rbp) ## 8-byte Spill
movq -216(%rbp), %rax ## 8-byte Reload
movq -736(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -336(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -968(%rbp) ## 8-byte Spill
addb $255, -688(%rbp) ## 1-byte Folded Spill
adcq -744(%rbp), %rdx ## 8-byte Folded Reload
setb -920(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, -472(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -112(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, -584(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -368(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -280(%rbp), %rcx ## 8-byte Reload
cmpq -288(%rbp), %rcx ## 8-byte Folded Reload
setb %al
movq %rax, -96(%rbp) ## 8-byte Spill
movq -80(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -440(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, -208(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -456(%rbp) ## 8-byte Spill
xorl %r11d, %r11d
movq -144(%rbp), %rcx ## 8-byte Reload
cmpq -304(%rbp), %rcx ## 8-byte Folded Reload
setb %r11b
movq -72(%rbp), %rax ## 8-byte Reload
movq %rax, %r12
movq -136(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r12
movq %r12, -528(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -576(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -544(%rbp), %r14 ## 8-byte Reload
cmpq -232(%rbp), %r14 ## 8-byte Folded Reload
setb %dil
movq -392(%rbp), %rax ## 8-byte Reload
movq %rax, %r9
movq -200(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r9
movq %r9, -688(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, %r8
movq %rdx, -976(%rbp) ## 8-byte Spill
xorl %ebx, %ebx
movq -264(%rbp), %r10 ## 8-byte Reload
movq -600(%rbp), %r13 ## 8-byte Reload
cmpq %r13, %r10
setb %bl
movq -248(%rbp), %rax ## 8-byte Reload
movq %rax, %r15
imulq %rax, %r15
mulq %rax
movq %rdx, -664(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -464(%rbp), %rcx ## 8-byte Reload
cmpq %r13, %rcx
setb %al
addq %rcx, %r15
adcq %rdx, %rax
movq %rax, -600(%rbp) ## 8-byte Spill
addq %r9, %r10
movq %r10, -264(%rbp) ## 8-byte Spill
adcq %r8, %rbx
movq %rbx, -464(%rbp) ## 8-byte Spill
addq %r12, %r14
movq %r14, -544(%rbp) ## 8-byte Spill
adcq -576(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -344(%rbp) ## 8-byte Spill
movq -144(%rbp), %rdx ## 8-byte Reload
movq -208(%rbp), %r13 ## 8-byte Reload
addq %r13, %rdx
movq %rdx, -144(%rbp) ## 8-byte Spill
adcq -456(%rbp), %r11 ## 8-byte Folded Reload
movq %r11, -88(%rbp) ## 8-byte Spill
movq -280(%rbp), %rax ## 8-byte Reload
addq -584(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -280(%rbp) ## 8-byte Spill
movq -368(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -96(%rbp) ## 8-byte Folded Spill
addb $255, -560(%rbp) ## 1-byte Folded Spill
movq -608(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -504(%rbp) ## 8-byte Folded Spill
setb -650(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq -472(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, %rax
movq %rcx, -992(%rbp) ## 8-byte Spill
movq -328(%rbp), %r8 ## 8-byte Reload
movq -752(%rbp), %rdi ## 8-byte Reload
cmpq %r8, %rdi
adcq -64(%rbp), %r8 ## 8-byte Folded Reload
addb $255, -696(%rbp) ## 1-byte Folded Spill
adcq -760(%rbp), %rdi ## 8-byte Folded Reload
setb -649(%rbp) ## 1-byte Folded Spill
movq %r8, %rcx
adcq %rax, %rcx
movq %rcx, -680(%rbp) ## 8-byte Spill
addb $255, -272(%rbp) ## 1-byte Folded Spill
movq -448(%rbp), %rax ## 8-byte Reload
adcq %rax, -160(%rbp) ## 8-byte Folded Spill
setb -712(%rbp) ## 1-byte Folded Spill
movq %rdx, %rax
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -744(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
movq -888(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -104(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -736(%rbp) ## 8-byte Spill
addb $255, -568(%rbp) ## 1-byte Folded Spill
adcq -672(%rbp), %rcx ## 8-byte Folded Reload
setb -568(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rdx, %rcx
movq %rcx, -672(%rbp) ## 8-byte Spill
addb $255, -912(%rbp) ## 1-byte Folded Spill
movq -896(%rbp), %rax ## 8-byte Reload
adcq -416(%rbp), %rax ## 8-byte Folded Reload
setb -560(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r14
movq %r14, -888(%rbp) ## 8-byte Spill
movq -432(%rbp), %rax ## 8-byte Reload
movq -1000(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -184(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -936(%rbp) ## 8-byte Spill
addb $255, -904(%rbp) ## 1-byte Folded Spill
adcq -952(%rbp), %rcx ## 8-byte Folded Reload
setb -608(%rbp) ## 1-byte Folded Spill
movq %rax, %rdi
adcq %r14, %rdi
movq %rdi, -896(%rbp) ## 8-byte Spill
addb $255, -880(%rbp) ## 1-byte Folded Spill
movq -872(%rbp), %rax ## 8-byte Reload
adcq -840(%rbp), %rax ## 8-byte Folded Reload
setb -912(%rbp) ## 1-byte Folded Spill
adcq %rdi, %r10
movq %r10, -880(%rbp) ## 8-byte Spill
movq -176(%rbp), %rax ## 8-byte Reload
movq -944(%rbp), %rdi ## 8-byte Reload
cmpq %rax, %rdi
movq %rax, %rbx
adcq -384(%rbp), %rbx ## 8-byte Folded Reload
movq %rbx, -872(%rbp) ## 8-byte Spill
addb $255, -928(%rbp) ## 1-byte Folded Spill
adcq -960(%rbp), %rdi ## 8-byte Folded Reload
setb -904(%rbp) ## 1-byte Folded Spill
adcq %r10, %rbx
movb -652(%rbp), %cl ## 1-byte Reload
movl %ecx, %eax
addb $255, %al
movq %r15, %rax
adcq %rbx, %rax
movq %rax, %r12
movq %rax, -960(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
mulq -120(%rbp) ## 8-byte Folded Reload
movq %rdx, -416(%rbp) ## 8-byte Spill
xorl %r11d, %r11d
movq -256(%rbp), %rdi ## 8-byte Reload
cmpq -536(%rbp), %rdi ## 8-byte Folded Reload
setb %r11b
addq -424(%rbp), %rdi ## 8-byte Folded Reload
adcq %rdx, %r11
movq %r11, -944(%rbp) ## 8-byte Spill
addb $255, %cl
adcq %r15, %rbx
setb -928(%rbp) ## 1-byte Folded Spill
movq -80(%rbp), %r14 ## 8-byte Reload
movq %r14, %rax
movq -136(%rbp), %r10 ## 8-byte Reload
mulq %r10
movq %rdx, -272(%rbp) ## 8-byte Spill
imulq %r10, %r14
movq %r14, -360(%rbp) ## 8-byte Spill
movq -488(%rbp), %rax ## 8-byte Reload
addq %rax, -496(%rbp) ## 8-byte Folded Spill
movq -168(%rbp), %rax ## 8-byte Reload
movq -704(%rbp), %rcx ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -168(%rbp) ## 8-byte Spill
cmpq %rcx, %rax
movq -288(%rbp), %rcx ## 8-byte Reload
adcq $0, %rcx
movq %rcx, %rax
addq %r13, %rax
movq -456(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
adcq $0, %rax
setb %al
movzbl %al, %eax
addq %r13, %rcx
movq %rcx, -288(%rbp) ## 8-byte Spill
movq %r14, %rcx
adcq %rbx, %rcx
movq %rbx, %r14
movq %rcx, -840(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -160(%rbp) ## 8-byte Spill
addb $255, -651(%rbp) ## 1-byte Folded Spill
movq -48(%rbp), %r9 ## 8-byte Reload
adcq -512(%rbp), %r9 ## 8-byte Folded Reload
movq %r9, -48(%rbp) ## 8-byte Spill
movq -632(%rbp), %rax ## 8-byte Reload
addq %rax, -640(%rbp) ## 8-byte Folded Spill
movq -152(%rbp), %rax ## 8-byte Reload
movq -720(%rbp), %rcx ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -152(%rbp) ## 8-byte Spill
cmpq %rcx, %rax
movq -296(%rbp), %rcx ## 8-byte Reload
adcq $0, %rcx
movq %rcx, %rax
movq -352(%rbp), %rbx ## 8-byte Reload
addq %rbx, %rax
movq -304(%rbp), %rdx ## 8-byte Reload
movq %rdx, %rax
adcq $0, %rax
setb %al
addq %rbx, %rcx
movq %rcx, -296(%rbp) ## 8-byte Spill
adcq %rdx, -528(%rbp) ## 8-byte Folded Spill
movzbl %al, %eax
movq -576(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rax
movq %rax, -208(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq -1008(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -240(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -952(%rbp) ## 8-byte Spill
addb $255, -653(%rbp) ## 1-byte Folded Spill
adcq -1016(%rbp), %rcx ## 8-byte Folded Reload
setb -304(%rbp) ## 1-byte Folded Spill
adcq %r12, %rax
movq %rax, -120(%rbp) ## 8-byte Spill
movq -552(%rbp), %rax ## 8-byte Reload
cmpq -408(%rbp), %rax ## 8-byte Folded Reload
movq -320(%rbp), %rcx ## 8-byte Reload
adcq $0, %rcx
addb $255, -800(%rbp) ## 1-byte Folded Spill
adcq -784(%rbp), %rax ## 8-byte Folded Reload
setb -488(%rbp) ## 1-byte Folded Spill
movq -624(%rbp), %rdx ## 8-byte Reload
leaq (%rcx,%rdx), %rax
adcq %r9, %rax
movq %rax, -632(%rbp) ## 8-byte Spill
movq %rcx, %rax
addq %rdx, %rax
movq %rdx, %rbx
movq -400(%rbp), %rdx ## 8-byte Reload
movq %rdx, %rax
adcq $0, %rax
setb %al
addq %rbx, %rcx
movq %rcx, -320(%rbp) ## 8-byte Spill
adcq %rdx, -312(%rbp) ## 8-byte Folded Spill
movzbl %al, %r13d
adcq -232(%rbp), %r13 ## 8-byte Folded Reload
addb $255, -616(%rbp) ## 1-byte Folded Spill
movq -128(%rbp), %rax ## 8-byte Reload
adcq -480(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %r11
addb $255, -832(%rbp) ## 1-byte Folded Spill
adcq -824(%rbp), %rdi ## 8-byte Folded Reload
adcq $0, %r11
movq %r11, -408(%rbp) ## 8-byte Spill
movq -968(%rbp), %rdx ## 8-byte Reload
cmpq -216(%rbp), %rdx ## 8-byte Folded Reload
movq -336(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -336(%rbp) ## 8-byte Spill
addb $255, -920(%rbp) ## 1-byte Folded Spill
adcq -984(%rbp), %rdx ## 8-byte Folded Reload
setb -616(%rbp) ## 1-byte Folded Spill
movq %rax, %rdx
adcq %r11, %rdx
movq %rdx, %rbx
movq %rdx, -752(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -112(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, -504(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -552(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -96(%rbp), %r15 ## 8-byte Reload
cmpq -368(%rbp), %r15 ## 8-byte Folded Reload
setb %dil
movq %rdi, -128(%rbp) ## 8-byte Spill
movq %r15, %rax
addq %rcx, %rax
movq %rdi, %rcx
adcq %rdx, %rcx
movq %rcx, -696(%rbp) ## 8-byte Spill
addb $255, -650(%rbp) ## 1-byte Folded Spill
movq -280(%rbp), %rcx ## 8-byte Reload
adcq -472(%rbp), %rcx ## 8-byte Folded Reload
setb -760(%rbp) ## 1-byte Folded Spill
adcq %rbx, %rax
movq %rax, %rcx
movq %rax, -800(%rbp) ## 8-byte Spill
movq -328(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r8
adcq -64(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -784(%rbp) ## 8-byte Spill
addb $255, -649(%rbp) ## 1-byte Folded Spill
adcq -992(%rbp), %r8 ## 8-byte Folded Reload
setb -448(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, -480(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -440(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, -256(%rbp) ## 8-byte Spill
mulq %rcx
movq %rdx, -352(%rbp) ## 8-byte Spill
xorl %r8d, %r8d
cmpq %r14, -88(%rbp) ## 8-byte Folded Reload
setb %r8b
xorl %eax, %eax
cmpq %r10, -344(%rbp) ## 8-byte Folded Reload
setb %al
movq %rax, %r15
movq -72(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -200(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rdi
mulq %rcx
movq %rdx, -400(%rbp) ## 8-byte Spill
xorl %r10d, %r10d
movq -976(%rbp), %rbx ## 8-byte Reload
cmpq %rbx, -464(%rbp) ## 8-byte Folded Reload
setb %r10b
movq -392(%rbp), %rax ## 8-byte Reload
movq %rax, %r9
movq -248(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r9
mulq %rcx
xorl %eax, %eax
movq -600(%rbp), %r12 ## 8-byte Reload
cmpq -664(%rbp), %r12 ## 8-byte Folded Reload
setb %al
movq %rax, %r14
xorl %eax, %eax
cmpq -232(%rbp), %r13 ## 8-byte Folded Reload
setb %al
addq -688(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -216(%rbp) ## 8-byte Spill
movq %rbx, %rcx
movq %rax, %rbx
adcq %rcx, %rbx
xorl %eax, %eax
cmpq %rcx, %rbx
movq %rbx, %rcx
setb %al
movq %rax, %r11
xorl %eax, %eax
movq -208(%rbp), %rbx ## 8-byte Reload
cmpq -576(%rbp), %rbx ## 8-byte Folded Reload
setb %al
addq %rdi, %rbx
movq %rbx, -208(%rbp) ## 8-byte Spill
movq -400(%rbp), %r13 ## 8-byte Reload
adcq %r13, %rax
movq %rax, -280(%rbp) ## 8-byte Spill
addq %r9, %rcx
movq %rcx, -112(%rbp) ## 8-byte Spill
movq %rdx, -456(%rbp) ## 8-byte Spill
adcq %rdx, %r11
movq %r11, -472(%rbp) ## 8-byte Spill
addq %r12, %r9
adcq %rdx, %r14
movq %r14, -624(%rbp) ## 8-byte Spill
addq -464(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -464(%rbp) ## 8-byte Spill
adcq %r13, %r10
movq %r10, -720(%rbp) ## 8-byte Spill
movq -360(%rbp), %rdx ## 8-byte Reload
addq -344(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -360(%rbp) ## 8-byte Spill
adcq -272(%rbp), %r15 ## 8-byte Folded Reload
movq %r15, -688(%rbp) ## 8-byte Spill
movq -88(%rbp), %rax ## 8-byte Reload
addq -256(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -88(%rbp) ## 8-byte Spill
adcq -352(%rbp), %r8 ## 8-byte Folded Reload
movq %r8, -232(%rbp) ## 8-byte Spill
addb $255, -712(%rbp) ## 1-byte Folded Spill
movq -680(%rbp), %rcx ## 8-byte Reload
adcq -144(%rbp), %rcx ## 8-byte Folded Reload
setb -600(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq -480(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, %rbx
movq %rcx, -824(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
movq -736(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -104(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -920(%rbp) ## 8-byte Spill
addb $255, -568(%rbp) ## 1-byte Folded Spill
adcq -744(%rbp), %rcx ## 8-byte Folded Reload
setb -832(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rbx, %rcx
movq %rcx, -736(%rbp) ## 8-byte Spill
addb $255, -560(%rbp) ## 1-byte Folded Spill
movq -672(%rbp), %rax ## 8-byte Reload
adcq -544(%rbp), %rax ## 8-byte Folded Reload
setb -680(%rbp) ## 1-byte Folded Spill
movq %rdx, %rax
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -744(%rbp) ## 8-byte Spill
movq -432(%rbp), %rax ## 8-byte Reload
movq -936(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -184(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -664(%rbp) ## 8-byte Spill
addb $255, -608(%rbp) ## 1-byte Folded Spill
adcq -888(%rbp), %rcx ## 8-byte Folded Reload
setb -672(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rdx, %rcx
movq %rcx, -568(%rbp) ## 8-byte Spill
addb $255, -912(%rbp) ## 1-byte Folded Spill
movq -896(%rbp), %rax ## 8-byte Reload
adcq -264(%rbp), %rax ## 8-byte Folded Reload
setb -712(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq %rcx, %rax
movq %rax, %rcx
movq %rax, -560(%rbp) ## 8-byte Spill
movq -176(%rbp), %rax ## 8-byte Reload
movq -872(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -384(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -608(%rbp) ## 8-byte Spill
addb $255, -904(%rbp) ## 1-byte Folded Spill
adcq -880(%rbp), %rdx ## 8-byte Folded Reload
setb -704(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movb -928(%rbp), %bl ## 1-byte Reload
movl %ebx, %edx
addb $255, %dl
movq %r9, %r12
adcq %rax, %r12
xorl %ecx, %ecx
movq -944(%rbp), %rdx ## 8-byte Reload
cmpq %rdx, -408(%rbp) ## 8-byte Folded Reload
setb %cl
movq -128(%rbp), %rdx ## 8-byte Reload
addq -552(%rbp), %rdx ## 8-byte Folded Reload
movq -504(%rbp), %rdi ## 8-byte Reload
addq %rdi, -96(%rbp) ## 8-byte Folded Spill
adcq %rdx, %rcx
movq %rcx, -144(%rbp) ## 8-byte Spill
movq -56(%rbp), %rcx ## 8-byte Reload
movq -952(%rbp), %rdx ## 8-byte Reload
cmpq %rcx, %rdx
adcq -240(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -512(%rbp) ## 8-byte Spill
addb $255, -304(%rbp) ## 1-byte Folded Spill
adcq -960(%rbp), %rdx ## 8-byte Folded Reload
setb -304(%rbp) ## 1-byte Folded Spill
adcq %r12, %rcx
movq %rcx, -544(%rbp) ## 8-byte Spill
addb $255, %bl
adcq %r9, %rax
setb -576(%rbp) ## 1-byte Folded Spill
addb $255, -488(%rbp) ## 1-byte Folded Spill
movq -320(%rbp), %rax ## 8-byte Reload
adcq -48(%rbp), %rax ## 8-byte Folded Reload
setb %r8b
movq -312(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
movq -848(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rax
movq %rax, -264(%rbp) ## 8-byte Spill
movq -520(%rbp), %rcx ## 8-byte Reload
movq %rcx, %r13
imulq -728(%rbp), %r13 ## 8-byte Folded Reload
movq %rcx, %r9
shlq $32, %r9
movq %rcx, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -128(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rcx, %r9
setb %al
movq -768(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rax), %r14
xorl %ebx, %ebx
addq %rdx, %r14
setb %bl
addq %rax, %rcx
adcq -776(%rbp), %rbx ## 8-byte Folded Reload
setb -344(%rbp) ## 1-byte Folded Spill
movq %r13, %r15
movq -632(%rbp), %r11 ## 8-byte Reload
adcq %r11, %r15
addb $255, %r8b
adcq %r10, %rdi
setb %al
movq -216(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rdx
movq -120(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rdx
movq %rdx, -312(%rbp) ## 8-byte Spill
movq %r14, %rdi
movq -816(%rbp), %rdx ## 8-byte Reload
addq %rdx, %rdi
movq -640(%rbp), %rdi ## 8-byte Reload
movq %rdi, %r8
adcq %rbx, %r8
addq %rdx, %r14
adcq %rdi, %rbx
setb %bl
movq -152(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rdx
adcq %r15, %rdx
movq %rdx, -816(%rbp) ## 8-byte Spill
addb $255, %al
adcq %r10, %rcx
setb -488(%rbp) ## 1-byte Folded Spill
movq -112(%rbp), %rax ## 8-byte Reload
adcq -544(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -320(%rbp) ## 8-byte Spill
addb $255, -304(%rbp) ## 1-byte Folded Spill
adcq -512(%rbp), %r12 ## 8-byte Folded Reload
setb -640(%rbp) ## 1-byte Folded Spill
movq -520(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, %r12
subq %rcx, %r9
addq -128(%rbp), %r9 ## 8-byte Folded Reload
movq %r13, -48(%rbp) ## 8-byte Spill
adcq %r13, %r12
addb $255, -344(%rbp) ## 1-byte Folded Spill
adcq %r13, %r11
setb %r13b
movq %r9, %rax
movq -264(%rbp), %r11 ## 8-byte Reload
adcq %r11, %rax
movq %rax, -344(%rbp) ## 8-byte Spill
addb $255, %bl
adcq %rdi, %r15
setb -304(%rbp) ## 1-byte Folded Spill
movq -296(%rbp), %r15 ## 8-byte Reload
adcq %rax, %r15
movq %r14, %rdi
movq %r14, -776(%rbp) ## 8-byte Spill
movq %r14, %rax
imulq -728(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rbx
movq %rax, -216(%rbp) ## 8-byte Spill
shlq $32, %r14
movq %rdi, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, %rcx
movq %rdx, -120(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rdi, %r14
setb %al
leaq (%r8,%rax), %rdx
xorl %edi, %edi
addq %rcx, %rdx
setb %dil
addq %rax, %r8
adcq -816(%rbp), %rdi ## 8-byte Folded Reload
setb -816(%rbp) ## 1-byte Folded Spill
movq %rbx, %r8
adcq %r15, %r8
addb $255, %r13b
adcq %r11, %r9
setb %r11b
movq %r12, %r10
movq -312(%rbp), %r9 ## 8-byte Reload
adcq %r9, %r10
movq %rdx, %rcx
movq -808(%rbp), %rax ## 8-byte Reload
addq %rax, %rcx
movq -496(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rcx
adcq %rdi, %rcx
movq %rcx, -264(%rbp) ## 8-byte Spill
addq %rax, %rdx
movq %rdx, -152(%rbp) ## 8-byte Spill
adcq %rbx, %rdi
movq -168(%rbp), %rax ## 8-byte Reload
setb -808(%rbp) ## 1-byte Folded Spill
adcq %r8, %rax
movq %rax, -496(%rbp) ## 8-byte Spill
movq -48(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r12
movq %rax, %r13
adcq -128(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -520(%rbp) ## 8-byte Spill
addb $255, %r11b
adcq %r9, %r12
setb -312(%rbp) ## 1-byte Folded Spill
adcq -320(%rbp), %r13 ## 8-byte Folded Reload
addb $255, -488(%rbp) ## 1-byte Folded Spill
movq -544(%rbp), %rax ## 8-byte Reload
adcq -112(%rbp), %rax ## 8-byte Folded Reload
setb -632(%rbp) ## 1-byte Folded Spill
addb $255, -304(%rbp) ## 1-byte Folded Spill
movq -344(%rbp), %rax ## 8-byte Reload
adcq -296(%rbp), %rax ## 8-byte Folded Reload
movq -528(%rbp), %rax ## 8-byte Reload
movq %rax, %r11
setb %dl
adcq %r10, %r11
movq -776(%rbp), %rbx ## 8-byte Reload
subq %rbx, %r14
addb $255, -816(%rbp) ## 1-byte Folded Spill
movq -216(%rbp), %r12 ## 8-byte Reload
adcq %r12, %r15
movq -120(%rbp), %rdi ## 8-byte Reload
leaq (%r14,%rdi), %rcx
setb -112(%rbp) ## 1-byte Folded Spill
adcq %r11, %rcx
addb $255, -808(%rbp) ## 1-byte Folded Spill
adcq -168(%rbp), %r8 ## 8-byte Folded Reload
movq -288(%rbp), %r9 ## 8-byte Reload
setb %r8b
adcq %rcx, %r9
addb $255, %dl
adcq %rax, %r10
movq -208(%rbp), %r15 ## 8-byte Reload
setb %r10b
adcq %r13, %r15
addb $255, -312(%rbp) ## 1-byte Folded Spill
movq -320(%rbp), %rax ## 8-byte Reload
adcq -520(%rbp), %rax ## 8-byte Folded Reload
setb -488(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
addq %rdi, %r14
adcq %r12, %rdx
addb $255, -112(%rbp) ## 1-byte Folded Spill
adcq %r11, %r14
movq %rdx, %rax
movq %rdx, %r14
movq %rdx, -848(%rbp) ## 8-byte Spill
setb %r11b
adcq %r15, %rax
addb $255, %r8b
adcq -288(%rbp), %rcx ## 8-byte Folded Reload
movq -840(%rbp), %rdx ## 8-byte Reload
movq %rdx, %rdi
setb %cl
adcq %rax, %rdi
movq %rdi, -168(%rbp) ## 8-byte Spill
addb $255, %r10b
adcq -208(%rbp), %r13 ## 8-byte Folded Reload
setb -312(%rbp) ## 1-byte Folded Spill
addb $255, %cl
adcq %rdx, %rax
movq -152(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
setb -344(%rbp) ## 1-byte Folded Spill
setb -768(%rbp) ## 1-byte Folded Spill
imulq -728(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rdi
movq %rbx, %r10
shlq $32, %r10
movq %rbx, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
xorl %ecx, %ecx
subq %rbx, %r10
movq %rbx, %r8
setb %cl
movq -264(%rbp), %rbx ## 8-byte Reload
leaq (%rbx,%rcx), %r12
xorl %eax, %eax
addq %rdx, %r12
setb %al
addq %rcx, %rbx
adcq -496(%rbp), %rax ## 8-byte Folded Reload
movq %rdi, %r13
setb %cl
adcq %r9, %r13
addb $255, %r11b
adcq %r14, %r15
setb -528(%rbp) ## 1-byte Folded Spill
subq %r8, %r10
addb $255, %cl
adcq %rdi, %r9
movq %rdi, %r8
movq %rdi, -296(%rbp) ## 8-byte Spill
movq %rdx, %r15
movq %rdx, -112(%rbp) ## 8-byte Spill
leaq (%r10,%rdx), %rcx
setb %bl
adcq -168(%rbp), %rcx ## 8-byte Folded Reload
setb -816(%rbp) ## 1-byte Folded Spill
setb -840(%rbp) ## 1-byte Folded Spill
movq -792(%rbp), %rdx ## 8-byte Reload
addq -592(%rbp), %rdx ## 8-byte Folded Reload
movq -536(%rbp), %rdi ## 8-byte Reload
movq -584(%rbp), %r14 ## 8-byte Reload
adcq %r14, %rdi
movq %r12, %rcx
movq -856(%rbp), %r9 ## 8-byte Reload
addq %r9, %rcx
movq %rdx, %rcx
adcq %rax, %rcx
addq %r9, %r12
adcq %rax, %rdx
movq %rdi, %r9
setb %r11b
adcq %r13, %r9
movq -152(%rbp), %rax ## 8-byte Reload
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
addq %r15, %r10
adcq %r8, %rdx
movq %rdx, -592(%rbp) ## 8-byte Spill
addb $255, %bl
adcq -168(%rbp), %r10 ## 8-byte Folded Reload
cmpq %r14, %rdi
movq -368(%rbp), %r8 ## 8-byte Reload
adcq $0, %r8
addb $255, %r11b
adcq %r13, %rdi
movq -256(%rbp), %r11 ## 8-byte Reload
leaq (%r8,%r11), %rax
movq %rax, %rdi
setb %dl
adcq %r10, %rdi
movq %rdi, %r14
movq %rdi, -808(%rbp) ## 8-byte Spill
addb $255, %dl
adcq %rax, %r10
setb -536(%rbp) ## 1-byte Folded Spill
movq %r12, -792(%rbp) ## 8-byte Spill
movq %r12, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -208(%rbp) ## 8-byte Spill
movq %r12, %rax
imulq -728(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rbx
movq %rax, -168(%rbp) ## 8-byte Spill
movq %r12, %rdi
shlq $32, %rdi
xorl %eax, %eax
subq %r12, %rdi
movq %rdi, -288(%rbp) ## 8-byte Spill
setb %al
leaq (%rcx,%rax), %rdi
addq %rdx, %rdi
movq %rdi, -320(%rbp) ## 8-byte Spill
movl $0, %edx
setb %dl
addq %rax, %rcx
adcq %r9, %rdx
movq %rdx, -544(%rbp) ## 8-byte Spill
movq %rbx, %rax
setb -856(%rbp) ## 1-byte Folded Spill
adcq %r14, %rax
movq %rax, -368(%rbp) ## 8-byte Spill
movq %r8, %rax
addq %r11, %rax
movq -352(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
adcq $0, %rax
setb %r9b
movq -192(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -136(%rbp), %rdi ## 8-byte Reload
mulq %rdi
imulq %rdi, %rcx
addq %r11, %r8
movq %rcx, %rax
movq %rcx, %r15
adcq %rbx, %rax
movq %rax, -584(%rbp) ## 8-byte Spill
movzbl %r9b, %eax
adcq %rdx, %rax
movq %rax, -776(%rbp) ## 8-byte Spill
movq %rdx, %r11
movq %rdx, -304(%rbp) ## 8-byte Spill
addb $255, -616(%rbp) ## 1-byte Folded Spill
movq -408(%rbp), %rax ## 8-byte Reload
adcq %rax, -336(%rbp) ## 8-byte Folded Spill
movq -144(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -760(%rbp) ## 1-byte Folded Spill
movq -752(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -96(%rbp) ## 8-byte Folded Spill
adcq $0, %rax
xorl %ecx, %ecx
cmpq -696(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r14
movq %rax, -144(%rbp) ## 8-byte Spill
setb %cl
movq %rcx, %r10
movq -224(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -440(%rbp), %r8 ## 8-byte Reload
mulq %r8
movq %rcx, %r9
imulq %r8, %r9
movq %r9, -264(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -232(%rbp), %rcx ## 8-byte Reload
cmpq %rbx, %rcx
setb %dil
movq %rcx, %rax
addq %r9, %rax
movq %rdx, -496(%rbp) ## 8-byte Spill
leaq (%rdi,%rdx), %rax
adcq %rdx, %rdi
movq %rdi, -440(%rbp) ## 8-byte Spill
addq %r9, %rcx
movq %rcx, %rdx
movq %rcx, -232(%rbp) ## 8-byte Spill
adcq %rax, %r10
movq %r10, -96(%rbp) ## 8-byte Spill
movq -784(%rbp), %rcx ## 8-byte Reload
cmpq -328(%rbp), %rcx ## 8-byte Folded Reload
movq -64(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -64(%rbp) ## 8-byte Spill
addb $255, -448(%rbp) ## 1-byte Folded Spill
adcq -800(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rcx
setb -888(%rbp) ## 1-byte Folded Spill
adcq %r14, %rcx
movq %rcx, -880(%rbp) ## 8-byte Spill
addb $255, -600(%rbp) ## 1-byte Folded Spill
movq -88(%rbp), %rax ## 8-byte Reload
adcq -480(%rbp), %rax ## 8-byte Folded Reload
movq %rdx, %rax
setb -936(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -408(%rbp) ## 8-byte Spill
movq -376(%rbp), %rax ## 8-byte Reload
movq -920(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -104(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -760(%rbp) ## 8-byte Spill
addb $255, -832(%rbp) ## 1-byte Folded Spill
adcq -824(%rbp), %rcx ## 8-byte Folded Reload
setb -696(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, %rcx
movq %rax, -824(%rbp) ## 8-byte Spill
xorl %edx, %edx
movq -688(%rbp), %rax ## 8-byte Reload
cmpq -272(%rbp), %rax ## 8-byte Folded Reload
setb %dl
addq %rax, %r15
movq %r15, -832(%rbp) ## 8-byte Spill
adcq %r11, %rdx
movq %rdx, -88(%rbp) ## 8-byte Spill
addb $255, -680(%rbp) ## 1-byte Folded Spill
movq -736(%rbp), %rax ## 8-byte Reload
adcq -360(%rbp), %rax ## 8-byte Folded Reload
movq %r15, %rax
setb -680(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -360(%rbp) ## 8-byte Spill
movq -432(%rbp), %rax ## 8-byte Reload
movq -664(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -184(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -912(%rbp) ## 8-byte Spill
addb $255, -672(%rbp) ## 1-byte Folded Spill
adcq -744(%rbp), %rcx ## 8-byte Folded Reload
setb -920(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, -872(%rbp) ## 8-byte Spill
movq -80(%rbp), %r8 ## 8-byte Reload
movq %r8, %rax
movq -200(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, -352(%rbp) ## 8-byte Spill
imulq %rcx, %r8
xorl %eax, %eax
movq -400(%rbp), %r11 ## 8-byte Reload
cmpq %r11, -720(%rbp) ## 8-byte Folded Reload
setb %al
movq %rax, -152(%rbp) ## 8-byte Spill
movq -72(%rbp), %r14 ## 8-byte Reload
movq %r14, %rax
movq -248(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, -480(%rbp) ## 8-byte Spill
imulq %rcx, %r14
xorl %r9d, %r9d
movq -624(%rbp), %r13 ## 8-byte Reload
movq -456(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %r13
setb %r9b
movq -392(%rbp), %r12 ## 8-byte Reload
movq %r12, %rax
mulq %r12
movq %rdx, -904(%rbp) ## 8-byte Spill
imulq %r12, %r12
xorl %r15d, %r15d
movq -472(%rbp), %r10 ## 8-byte Reload
cmpq %rcx, %r10
setb %r15b
xorl %ebx, %ebx
movq -280(%rbp), %rcx ## 8-byte Reload
cmpq %r11, %rcx
setb %bl
xorl %edi, %edi
movq -160(%rbp), %rax ## 8-byte Reload
cmpq -272(%rbp), %rax ## 8-byte Folded Reload
setb %dil
addq %r8, %rax
movq %rax, -160(%rbp) ## 8-byte Spill
movq -352(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdi
movq %rdi, -336(%rbp) ## 8-byte Spill
addq %r14, %rcx
movq %rcx, -280(%rbp) ## 8-byte Spill
movq -480(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %rbx
movq %rbx, -328(%rbp) ## 8-byte Spill
addq %r10, %r12
movq %r12, -752(%rbp) ## 8-byte Spill
adcq %rdx, %r15
movq %r15, -896(%rbp) ## 8-byte Spill
addq %r13, %r14
movq %r14, %rbx
movq %r14, -688(%rbp) ## 8-byte Spill
adcq %rdi, %r9
movq %r9, -256(%rbp) ## 8-byte Spill
addq -720(%rbp), %r8 ## 8-byte Folded Reload
movq -152(%rbp), %r11 ## 8-byte Reload
adcq %rax, %r11
addb $255, -712(%rbp) ## 1-byte Folded Spill
movq -568(%rbp), %rax ## 8-byte Reload
adcq -464(%rbp), %rax ## 8-byte Folded Reload
movq %r8, %r15
setb -400(%rbp) ## 1-byte Folded Spill
movq -872(%rbp), %r13 ## 8-byte Reload
adcq %r13, %r15
movq -176(%rbp), %r14 ## 8-byte Reload
movq -608(%rbp), %rdx ## 8-byte Reload
cmpq %r14, %rdx
adcq -384(%rbp), %r14 ## 8-byte Folded Reload
addb $255, -704(%rbp) ## 1-byte Folded Spill
adcq -560(%rbp), %rdx ## 8-byte Folded Reload
movq %r14, %rdi
setb -608(%rbp) ## 1-byte Folded Spill
adcq %r15, %rdi
movq %rdi, -664(%rbp) ## 8-byte Spill
movb -576(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %rbx, %rdx
adcq %rdi, %rdx
movq %rdx, -624(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
cmpq %rax, -512(%rbp) ## 8-byte Folded Reload
movq %rax, %rbx
adcq -240(%rbp), %rbx ## 8-byte Folded Reload
movq %rbx, -600(%rbp) ## 8-byte Spill
movb -640(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rdx, %rbx
movq %rbx, -704(%rbp) ## 8-byte Spill
movb -632(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rbx, %r12
movq %r12, -616(%rbp) ## 8-byte Spill
movq -48(%rbp), %rax ## 8-byte Reload
cmpq %rax, -520(%rbp) ## 8-byte Folded Reload
movq %rax, %rbx
adcq -128(%rbp), %rbx ## 8-byte Folded Reload
movq %rbx, -568(%rbp) ## 8-byte Spill
movb -488(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %r12, %rbx
movq %rbx, -456(%rbp) ## 8-byte Spill
movb -312(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %rcx, %rdx
adcq %rbx, %rdx
movq %rdx, -448(%rbp) ## 8-byte Spill
movq -216(%rbp), %rax ## 8-byte Reload
cmpq %rax, -848(%rbp) ## 8-byte Folded Reload
movq %rax, %rcx
adcq -120(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -712(%rbp) ## 8-byte Spill
movb -528(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rdx, %rcx
movq %rcx, -560(%rbp) ## 8-byte Spill
addb $255, -768(%rbp) ## 1-byte Folded Spill
movq -160(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -784(%rbp) ## 8-byte Spill
addb $255, -840(%rbp) ## 1-byte Folded Spill
movq -592(%rbp), %rcx ## 8-byte Reload
adcq %rax, %rcx
movq %rcx, -800(%rbp) ## 8-byte Spill
movb -536(%rbp), %al ## 1-byte Reload
addb $255, %al
movq -584(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -272(%rbp) ## 8-byte Spill
addb $255, -888(%rbp) ## 1-byte Folded Spill
movq -144(%rbp), %rax ## 8-byte Reload
adcq %rax, -64(%rbp) ## 8-byte Folded Spill
movq -96(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -936(%rbp) ## 1-byte Folded Spill
movq -880(%rbp), %rcx ## 8-byte Reload
adcq -232(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %rax
xorl %ecx, %ecx
cmpq -440(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r10
movq %rax, -96(%rbp) ## 8-byte Spill
setb %cl
movq %rcx, %r9
movq -224(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
movq -136(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, -720(%rbp) ## 8-byte Spill
imulq %rcx, %rbx
movq %rbx, -440(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -88(%rbp), %rcx ## 8-byte Reload
cmpq -304(%rbp), %rcx ## 8-byte Folded Reload
setb %dil
movq %rcx, %rax
addq %rbx, %rax
leaq (%rdi,%rdx), %rax
adcq %rdx, %rdi
movq %rdi, -520(%rbp) ## 8-byte Spill
addq %rbx, %rcx
movq %rcx, -88(%rbp) ## 8-byte Spill
adcq %rax, %r9
movq %r9, -64(%rbp) ## 8-byte Spill
movq -760(%rbp), %rdx ## 8-byte Reload
cmpq -376(%rbp), %rdx ## 8-byte Folded Reload
movq -104(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -104(%rbp) ## 8-byte Spill
addb $255, -696(%rbp) ## 1-byte Folded Spill
adcq -408(%rbp), %rdx ## 8-byte Folded Reload
setb -672(%rbp) ## 1-byte Folded Spill
movq %rax, %rdx
adcq %r10, %rdx
movq %rdx, -736(%rbp) ## 8-byte Spill
addb $255, -680(%rbp) ## 1-byte Folded Spill
movq -824(%rbp), %rax ## 8-byte Reload
adcq %rax, -832(%rbp) ## 8-byte Folded Spill
setb -744(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %rdx, %rax
movq %rax, %rcx
movq %rax, -472(%rbp) ## 8-byte Spill
movq -432(%rbp), %rax ## 8-byte Reload
movq -912(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -184(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -464(%rbp) ## 8-byte Spill
addb $255, -920(%rbp) ## 1-byte Folded Spill
adcq -360(%rbp), %rdx ## 8-byte Folded Reload
setb -512(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rdi
movq %rax, -768(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -200(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rbx
mulq %rcx
xorl %ecx, %ecx
movq %r11, %rax
movq -352(%rbp), %r12 ## 8-byte Reload
cmpq %r12, %r11
setb %cl
addq %rbx, %rax
movq %rax, -152(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -376(%rbp) ## 8-byte Spill
movq %rdx, %r11
movq %rdx, -408(%rbp) ## 8-byte Spill
addb $255, -400(%rbp) ## 1-byte Folded Spill
adcq %r13, %r8
setb -696(%rbp) ## 1-byte Folded Spill
adcq %rdi, %rax
movq %rax, %rcx
movq %rax, -760(%rbp) ## 8-byte Spill
movq -176(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r14
adcq -384(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -680(%rbp) ## 8-byte Spill
addb $255, -608(%rbp) ## 1-byte Folded Spill
adcq %r15, %r14
setb -608(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, -360(%rbp) ## 8-byte Spill
movq -80(%rbp), %rax ## 8-byte Reload
movq %rax, %r9
movq -248(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r9
mulq %rcx
movq %rdx, %r14
xorl %eax, %eax
movq -480(%rbp), %r10 ## 8-byte Reload
cmpq %r10, -256(%rbp) ## 8-byte Folded Reload
setb %al
movq %rax, -144(%rbp) ## 8-byte Spill
movq -72(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
movq -392(%rbp), %rcx ## 8-byte Reload
mulq %rcx
movq %rdx, -400(%rbp) ## 8-byte Spill
movq %rdi, %r15
imulq %rcx, %r15
xorl %r13d, %r13d
movq -896(%rbp), %r8 ## 8-byte Reload
cmpq -904(%rbp), %r8 ## 8-byte Folded Reload
setb %r13b
xorl %eax, %eax
movq -328(%rbp), %rcx ## 8-byte Reload
cmpq %r10, %rcx
setb %al
movq %rax, %r10
xorl %edx, %edx
movq -336(%rbp), %rax ## 8-byte Reload
cmpq %r12, %rax
setb %dl
movq %rdx, %r12
xorl %edi, %edi
movq -776(%rbp), %rdx ## 8-byte Reload
cmpq -304(%rbp), %rdx ## 8-byte Folded Reload
setb %dil
addq %rdx, %rbx
movq %rbx, -232(%rbp) ## 8-byte Spill
adcq %r11, %rdi
movq %rdi, -480(%rbp) ## 8-byte Spill
addq %r9, %rax
movq %rax, %rdx
movq %rax, -336(%rbp) ## 8-byte Spill
adcq %r14, %r12
movq %r12, -136(%rbp) ## 8-byte Spill
addq %r15, %rcx
movq %rcx, -328(%rbp) ## 8-byte Spill
movq -400(%rbp), %rax ## 8-byte Reload
adcq %rax, %r10
movq %r10, -776(%rbp) ## 8-byte Spill
addq %r8, %r15
movq %r15, %rbx
movq %r15, -352(%rbp) ## 8-byte Spill
adcq %rax, %r13
movq %r13, -840(%rbp) ## 8-byte Spill
movq %r9, %r13
addq -256(%rbp), %r13 ## 8-byte Folded Reload
movq -144(%rbp), %r11 ## 8-byte Reload
adcq %r14, %r11
movq %r14, %r8
addb $255, -576(%rbp) ## 1-byte Folded Spill
movq -664(%rbp), %rax ## 8-byte Reload
adcq -688(%rbp), %rax ## 8-byte Folded Reload
setb -664(%rbp) ## 1-byte Folded Spill
movq %r13, %r14
adcq -360(%rbp), %r14 ## 8-byte Folded Reload
movq -56(%rbp), %r15 ## 8-byte Reload
movq -600(%rbp), %rdi ## 8-byte Reload
cmpq %r15, %rdi
adcq -240(%rbp), %r15 ## 8-byte Folded Reload
addb $255, -640(%rbp) ## 1-byte Folded Spill
adcq -624(%rbp), %rdi ## 8-byte Folded Reload
setb -600(%rbp) ## 1-byte Folded Spill
movq %r15, %rdi
adcq %r14, %rdi
movq %rdi, -624(%rbp) ## 8-byte Spill
addb $255, -632(%rbp) ## 1-byte Folded Spill
movq -704(%rbp), %rax ## 8-byte Reload
adcq -752(%rbp), %rax ## 8-byte Folded Reload
setb -256(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %rdi, %rax
movq %rax, %rdi
movq %rax, -704(%rbp) ## 8-byte Spill
movq -48(%rbp), %rax ## 8-byte Reload
movq -568(%rbp), %rbx ## 8-byte Reload
cmpq %rax, %rbx
adcq -128(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -752(%rbp) ## 8-byte Spill
addb $255, -488(%rbp) ## 1-byte Folded Spill
adcq -616(%rbp), %rbx ## 8-byte Folded Reload
setb -304(%rbp) ## 1-byte Folded Spill
movq %rax, %rbx
adcq %rdi, %rbx
movq %rbx, -616(%rbp) ## 8-byte Spill
addb $255, -312(%rbp) ## 1-byte Folded Spill
movq -456(%rbp), %rax ## 8-byte Reload
adcq -280(%rbp), %rax ## 8-byte Folded Reload
setb -456(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %rbx, %rax
movq %rax, %rcx
movq %rax, -568(%rbp) ## 8-byte Spill
movq -216(%rbp), %rax ## 8-byte Reload
movq -712(%rbp), %rbx ## 8-byte Reload
cmpq %rax, %rbx
movq %rax, %rdi
adcq -120(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -688(%rbp) ## 8-byte Spill
addb $255, -528(%rbp) ## 1-byte Folded Spill
adcq -448(%rbp), %rbx ## 8-byte Folded Reload
setb -448(%rbp) ## 1-byte Folded Spill
movq %rdi, %rbx
adcq %rcx, %rbx
movq %rbx, -712(%rbp) ## 8-byte Spill
addb $255, -344(%rbp) ## 1-byte Folded Spill
movq -560(%rbp), %rax ## 8-byte Reload
adcq -160(%rbp), %rax ## 8-byte Folded Reload
setb -344(%rbp) ## 1-byte Folded Spill
movq %rdx, %rax
adcq %rbx, %rax
movq %rax, %rdx
movq %rax, -560(%rbp) ## 8-byte Spill
movq -296(%rbp), %rax ## 8-byte Reload
movq -592(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -112(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -848(%rbp) ## 8-byte Spill
addb $255, -816(%rbp) ## 1-byte Folded Spill
adcq %rcx, -784(%rbp) ## 8-byte Folded Spill
setb -816(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rdx, %rcx
movq %rcx, -784(%rbp) ## 8-byte Spill
addb $255, -536(%rbp) ## 1-byte Folded Spill
movq -800(%rbp), %rax ## 8-byte Reload
adcq -584(%rbp), %rax ## 8-byte Folded Reload
setb -800(%rbp) ## 1-byte Folded Spill
movq -232(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, %r9
movq %rax, -536(%rbp) ## 8-byte Spill
movq -792(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
movq -288(%rbp), %rax ## 8-byte Reload
subq %rbx, %rax
addq -208(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r10
movq %rax, -288(%rbp) ## 8-byte Spill
movq -168(%rbp), %rbx ## 8-byte Reload
adcq %rbx, %rdx
movq %rdx, %rdi
movq %rdx, -312(%rbp) ## 8-byte Spill
movq -424(%rbp), %rcx ## 8-byte Reload
addq -864(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -424(%rbp) ## 8-byte Spill
movq -416(%rbp), %rdx ## 8-byte Reload
adcq -504(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -416(%rbp) ## 8-byte Spill
movq -320(%rbp), %rax ## 8-byte Reload
addq -648(%rbp), %rax ## 8-byte Folded Reload
movq -544(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rdx, %rax
setb -488(%rbp) ## 1-byte Folded Spill
adcq -368(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -592(%rbp) ## 8-byte Spill
addb $255, -856(%rbp) ## 1-byte Folded Spill
adcq %rbx, -808(%rbp) ## 8-byte Folded Spill
movq -272(%rbp), %rax ## 8-byte Reload
setb -528(%rbp) ## 1-byte Folded Spill
adcq %r10, %rax
movq %rdi, %rax
setb -584(%rbp) ## 1-byte Folded Spill
adcq %r9, %rax
movq %rax, -640(%rbp) ## 8-byte Spill
addb $255, -672(%rbp) ## 1-byte Folded Spill
movq -96(%rbp), %rax ## 8-byte Reload
adcq %rax, -104(%rbp) ## 8-byte Folded Spill
movq -64(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -744(%rbp) ## 1-byte Folded Spill
movq -736(%rbp), %rcx ## 8-byte Reload
adcq -88(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %rax
xorl %ecx, %ecx
cmpq -520(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r10
movq %rax, -64(%rbp) ## 8-byte Spill
setb %cl
movq %rcx, %r9
movq -224(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -200(%rbp), %rdi ## 8-byte Reload
mulq %rdi
imulq %rdi, %rcx
movq %rcx, -632(%rbp) ## 8-byte Spill
xorl %ebx, %ebx
movq -376(%rbp), %rdi ## 8-byte Reload
cmpq -408(%rbp), %rdi ## 8-byte Folded Reload
setb %bl
movq %rdi, %rax
addq %rcx, %rax
movq %rdx, -576(%rbp) ## 8-byte Spill
leaq (%rbx,%rdx), %rax
adcq %rdx, %rbx
movq %rbx, -200(%rbp) ## 8-byte Spill
addq %rcx, %rdi
movq %rdi, -376(%rbp) ## 8-byte Spill
adcq %rax, %r9
movq %r9, -160(%rbp) ## 8-byte Spill
movq -464(%rbp), %rcx ## 8-byte Reload
cmpq -432(%rbp), %rcx ## 8-byte Folded Reload
movq -184(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -184(%rbp) ## 8-byte Spill
addb $255, -512(%rbp) ## 1-byte Folded Spill
adcq -472(%rbp), %rcx ## 8-byte Folded Reload
setb -88(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %r10, %rcx
movq %rcx, -736(%rbp) ## 8-byte Spill
addb $255, -696(%rbp) ## 1-byte Folded Spill
movq -768(%rbp), %rax ## 8-byte Reload
adcq %rax, -152(%rbp) ## 8-byte Folded Spill
setb -744(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -512(%rbp) ## 8-byte Spill
movq -176(%rbp), %rax ## 8-byte Reload
movq -680(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -384(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -472(%rbp) ## 8-byte Spill
addb $255, -608(%rbp) ## 1-byte Folded Spill
adcq -760(%rbp), %rcx ## 8-byte Folded Reload
setb -520(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, %rbx
movq %rax, -856(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
movq -248(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rdx
movq %rdx, %rdi
mulq %rcx
movq %rdx, %r12
xorl %ecx, %ecx
movq %r11, %rax
cmpq %r8, %r11
setb %cl
addq %rdi, %rax
movq %rax, -144(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -432(%rbp) ## 8-byte Spill
movq %rdx, -280(%rbp) ## 8-byte Spill
addb $255, -664(%rbp) ## 1-byte Folded Spill
adcq -360(%rbp), %r13 ## 8-byte Folded Reload
setb -464(%rbp) ## 1-byte Folded Spill
adcq %rbx, %rax
movq %rax, %rcx
movq %rax, -608(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r15
adcq -240(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -768(%rbp) ## 8-byte Spill
addb $255, -600(%rbp) ## 1-byte Folded Spill
adcq %r14, %r15
setb -864(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, -664(%rbp) ## 8-byte Spill
movq -80(%rbp), %rax ## 8-byte Reload
movq %rax, %r13
movq -392(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r13
mulq %rcx
movq %rdx, -600(%rbp) ## 8-byte Spill
xorl %r9d, %r9d
movq -840(%rbp), %r15 ## 8-byte Reload
movq -400(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %r15
setb %r9b
movq -72(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
imulq %rax, %rdx
movq %rdx, -96(%rbp) ## 8-byte Spill
mulq %rax
movq %rdx, %r10
movq %rdx, -680(%rbp) ## 8-byte Spill
xorl %r14d, %r14d
movq -776(%rbp), %r11 ## 8-byte Reload
cmpq %rcx, %r11
setb %r14b
xorl %ecx, %ecx
movq -136(%rbp), %rax ## 8-byte Reload
cmpq %r8, %rax
setb %cl
movq %rcx, %rdx
xorl %ebx, %ebx
movq -480(%rbp), %rcx ## 8-byte Reload
cmpq -408(%rbp), %rcx ## 8-byte Folded Reload
setb %bl
addq %rcx, %rdi
movq %rdi, -400(%rbp) ## 8-byte Spill
adcq %r12, %rbx
movq %rbx, -360(%rbp) ## 8-byte Spill
addq %r13, %rax
movq %rax, %r8
movq %rax, -136(%rbp) ## 8-byte Spill
movq -600(%rbp), %r12 ## 8-byte Reload
adcq %r12, %rdx
movq %rdx, -104(%rbp) ## 8-byte Spill
movq -96(%rbp), %rdx ## 8-byte Reload
addq %r11, %rdx
movq %rdx, -96(%rbp) ## 8-byte Spill
adcq %r10, %r14
movq %r14, -672(%rbp) ## 8-byte Spill
addq %r15, %r13
adcq %r12, %r9
movq %r9, -152(%rbp) ## 8-byte Spill
movq %r12, %r9
addb $255, -256(%rbp) ## 1-byte Folded Spill
movq -624(%rbp), %rax ## 8-byte Reload
adcq -352(%rbp), %rax ## 8-byte Folded Reload
setb -256(%rbp) ## 1-byte Folded Spill
movq %r13, %rax
movq -664(%rbp), %r11 ## 8-byte Reload
adcq %r11, %rax
movq %rax, -624(%rbp) ## 8-byte Spill
movq -48(%rbp), %r14 ## 8-byte Reload
movq -752(%rbp), %rcx ## 8-byte Reload
cmpq %r14, %rcx
adcq -128(%rbp), %r14 ## 8-byte Folded Reload
addb $255, -304(%rbp) ## 1-byte Folded Spill
adcq -704(%rbp), %rcx ## 8-byte Folded Reload
setb -352(%rbp) ## 1-byte Folded Spill
movq %r14, %rcx
adcq %rax, %rcx
movq %rcx, -792(%rbp) ## 8-byte Spill
addb $255, -456(%rbp) ## 1-byte Folded Spill
movq -616(%rbp), %rax ## 8-byte Reload
adcq -328(%rbp), %rax ## 8-byte Folded Reload
setb -408(%rbp) ## 1-byte Folded Spill
movq %rdx, %rax
adcq %rcx, %rax
movq %rax, -696(%rbp) ## 8-byte Spill
movq -216(%rbp), %r12 ## 8-byte Reload
movq -688(%rbp), %rcx ## 8-byte Reload
cmpq %r12, %rcx
adcq -120(%rbp), %r12 ## 8-byte Folded Reload
addb $255, -448(%rbp) ## 1-byte Folded Spill
adcq -568(%rbp), %rcx ## 8-byte Folded Reload
setb -456(%rbp) ## 1-byte Folded Spill
movq %r12, %rcx
adcq %rax, %rcx
movq %rcx, -568(%rbp) ## 8-byte Spill
addb $255, -344(%rbp) ## 1-byte Folded Spill
movq -712(%rbp), %rax ## 8-byte Reload
adcq -336(%rbp), %rax ## 8-byte Folded Reload
setb -448(%rbp) ## 1-byte Folded Spill
movq %r8, %rax
adcq %rcx, %rax
movq %rax, %rdx
movq %rax, -776(%rbp) ## 8-byte Spill
movq -296(%rbp), %rax ## 8-byte Reload
movq -848(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -112(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -760(%rbp) ## 8-byte Spill
addb $255, -816(%rbp) ## 1-byte Folded Spill
adcq -560(%rbp), %rcx ## 8-byte Folded Reload
setb -808(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rdx, %rcx
movq %rcx, -560(%rbp) ## 8-byte Spill
addb $255, -800(%rbp) ## 1-byte Folded Spill
movq -784(%rbp), %rax ## 8-byte Reload
adcq -232(%rbp), %rax ## 8-byte Folded Reload
setb -616(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq %rcx, %rax
movq %rax, -328(%rbp) ## 8-byte Spill
addb $255, -88(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %rax ## 8-byte Reload
adcq %rax, -184(%rbp) ## 8-byte Folded Spill
movq -160(%rbp), %r10 ## 8-byte Reload
adcq $0, %r10
addb $255, -744(%rbp) ## 1-byte Folded Spill
movq -736(%rbp), %rcx ## 8-byte Reload
adcq -376(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %r10
xorl %ecx, %ecx
cmpq -200(%rbp), %r10 ## 8-byte Folded Reload
movq %r10, -160(%rbp) ## 8-byte Spill
setb %cl
movq %rcx, %r8
movq -224(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -248(%rbp), %rdi ## 8-byte Reload
mulq %rdi
movq %rdx, -88(%rbp) ## 8-byte Spill
imulq %rdi, %rcx
movq %rcx, -232(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -432(%rbp), %rbx ## 8-byte Reload
cmpq -280(%rbp), %rbx ## 8-byte Folded Reload
setb %dil
movq %rbx, %rax
addq %rcx, %rax
leaq (%rdi,%rdx), %rax
adcq %rdx, %rdi
movq %rdi, -248(%rbp) ## 8-byte Spill
addq %rcx, %rbx
movq %rbx, -432(%rbp) ## 8-byte Spill
adcq %rax, %r8
movq %r8, -200(%rbp) ## 8-byte Spill
movq -472(%rbp), %rcx ## 8-byte Reload
cmpq -176(%rbp), %rcx ## 8-byte Folded Reload
movq -384(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -384(%rbp) ## 8-byte Spill
addb $255, -520(%rbp) ## 1-byte Folded Spill
adcq -512(%rbp), %rcx ## 8-byte Folded Reload
setb -184(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %r10, %rcx
movq %rcx, -480(%rbp) ## 8-byte Spill
addb $255, -464(%rbp) ## 1-byte Folded Spill
movq -856(%rbp), %rax ## 8-byte Reload
adcq %rax, -144(%rbp) ## 8-byte Folded Spill
setb -64(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %rcx, %rax
movq %rax, %rcx
movq %rax, -344(%rbp) ## 8-byte Spill
movq -56(%rbp), %rax ## 8-byte Reload
movq -768(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -240(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -704(%rbp) ## 8-byte Spill
addb $255, -864(%rbp) ## 1-byte Folded Spill
adcq -608(%rbp), %rdx ## 8-byte Folded Reload
setb -144(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
movq %rax, %rdi
movq %rax, -304(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
movq %rax, %r15
movq -392(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r15
mulq %rcx
movq %rdx, %r8
xorl %ecx, %ecx
movq -152(%rbp), %rax ## 8-byte Reload
cmpq %r9, %rax
setb %cl
addq %r15, %rax
movq %rax, -152(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -336(%rbp) ## 8-byte Spill
addb $255, -256(%rbp) ## 1-byte Folded Spill
adcq %r11, %r13
setb -256(%rbp) ## 1-byte Folded Spill
movq %rax, %rdx
adcq %rdi, %rdx
movq %rdx, -472(%rbp) ## 8-byte Spill
movq -48(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r14
adcq -128(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -712(%rbp) ## 8-byte Spill
addb $255, -352(%rbp) ## 1-byte Folded Spill
adcq -624(%rbp), %r14 ## 8-byte Folded Reload
setb -352(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, %r13
movq %rax, -520(%rbp) ## 8-byte Spill
movq -80(%rbp), %rax ## 8-byte Reload
movq %rax, %rbx
movq -72(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %rbx
mulq %rcx
xorl %eax, %eax
movq -672(%rbp), %r14 ## 8-byte Reload
cmpq -680(%rbp), %r14 ## 8-byte Folded Reload
setb %al
movq %rax, %r10
xorl %edi, %edi
movq -104(%rbp), %rax ## 8-byte Reload
cmpq %r9, %rax
setb %dil
xorl %ecx, %ecx
movq -360(%rbp), %r11 ## 8-byte Reload
cmpq -280(%rbp), %r11 ## 8-byte Folded Reload
setb %cl
addq %r11, %r15
adcq %r8, %rcx
movq %rcx, -624(%rbp) ## 8-byte Spill
movq %r8, %r11
movq %r8, -688(%rbp) ## 8-byte Spill
addq %rbx, %rax
movq %rax, %r8
movq %rax, -104(%rbp) ## 8-byte Spill
movq %rdx, -752(%rbp) ## 8-byte Spill
adcq %rdx, %rdi
movq %rdi, -608(%rbp) ## 8-byte Spill
addq %r14, %rbx
movq %rbx, -376(%rbp) ## 8-byte Spill
adcq %rdx, %r10
movq %r10, -768(%rbp) ## 8-byte Spill
addb $255, -408(%rbp) ## 1-byte Folded Spill
movq -792(%rbp), %rax ## 8-byte Reload
adcq -96(%rbp), %rax ## 8-byte Folded Reload
setb -176(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %r13, %rax
movq %rax, %rdx
movq %rax, -512(%rbp) ## 8-byte Spill
movq -216(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r12
adcq -120(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -792(%rbp) ## 8-byte Spill
addb $255, -456(%rbp) ## 1-byte Folded Spill
adcq -696(%rbp), %r12 ## 8-byte Folded Reload
setb -408(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rdx, %rcx
movq %rcx, -856(%rbp) ## 8-byte Spill
addb $255, -448(%rbp) ## 1-byte Folded Spill
movq -568(%rbp), %rax ## 8-byte Reload
adcq -136(%rbp), %rax ## 8-byte Folded Reload
setb -816(%rbp) ## 1-byte Folded Spill
movq %r8, %rax
adcq %rcx, %rax
movq %rax, %rcx
movq %rax, -800(%rbp) ## 8-byte Spill
movq -296(%rbp), %rax ## 8-byte Reload
movq -760(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -112(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -784(%rbp) ## 8-byte Spill
addb $255, -808(%rbp) ## 1-byte Folded Spill
adcq -776(%rbp), %rdx ## 8-byte Folded Reload
setb -808(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rax
addb $255, -616(%rbp) ## 1-byte Folded Spill
movq -560(%rbp), %rcx ## 8-byte Reload
adcq -400(%rbp), %rcx ## 8-byte Folded Reload
setb %dl
movq %r15, %rdi
adcq %rax, %rdi
movq %rdi, %r14
movq %rdi, -568(%rbp) ## 8-byte Spill
addb $255, %dl
adcq %r15, %rax
setb -136(%rbp) ## 1-byte Folded Spill
setb -616(%rbp) ## 1-byte Folded Spill
movq -320(%rbp), %rax ## 8-byte Reload
addq -648(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -320(%rbp) ## 8-byte Spill
movq -424(%rbp), %rax ## 8-byte Reload
adcq -544(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -424(%rbp) ## 8-byte Spill
addb $255, -528(%rbp) ## 1-byte Folded Spill
movq -288(%rbp), %rcx ## 8-byte Reload
adcq -272(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -288(%rbp) ## 8-byte Spill
movq -416(%rbp), %rdx ## 8-byte Reload
cmpq -504(%rbp), %rdx ## 8-byte Folded Reload
movq -552(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -488(%rbp) ## 1-byte Folded Spill
adcq -368(%rbp), %rdx ## 8-byte Folded Reload
setb -456(%rbp) ## 1-byte Folded Spill
movq -264(%rbp), %rbx ## 8-byte Reload
leaq (%rax,%rbx), %rdx
movq %rax, %r9
movq %rax, -552(%rbp) ## 8-byte Spill
adcq %rcx, %rdx
movq %rdx, -448(%rbp) ## 8-byte Spill
movq -168(%rbp), %rcx ## 8-byte Reload
movq -312(%rbp), %rdi ## 8-byte Reload
cmpq %rcx, %rdi
movq %rcx, %rax
movq %rcx, %r10
movq -208(%rbp), %rdx ## 8-byte Reload
adcq %rdx, %rax
addb $255, -584(%rbp) ## 1-byte Folded Spill
adcq -536(%rbp), %rdi ## 8-byte Folded Reload
setb %cl
movq %rax, %rdi
movq -328(%rbp), %r8 ## 8-byte Reload
adcq %r8, %rdi
movq %rdi, -528(%rbp) ## 8-byte Spill
addq %rbx, %r9
movq %r9, -864(%rbp) ## 8-byte Spill
movq -440(%rbp), %rbx ## 8-byte Reload
adcq -496(%rbp), %rbx ## 8-byte Folded Reload
movq %rbx, -440(%rbp) ## 8-byte Spill
setb -464(%rbp) ## 1-byte Folded Spill
cmpq %r10, %rax
adcq %rdx, %r10
movq %r10, -560(%rbp) ## 8-byte Spill
addb $255, %cl
adcq %r8, %rax
setb -328(%rbp) ## 1-byte Folded Spill
movq %r10, %rax
adcq %r14, %rax
movq %rax, -400(%rbp) ## 8-byte Spill
addb $255, -184(%rbp) ## 1-byte Folded Spill
movq -160(%rbp), %rax ## 8-byte Reload
adcq %rax, -384(%rbp) ## 8-byte Folded Spill
movq -200(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -64(%rbp) ## 1-byte Folded Spill
movq -480(%rbp), %rcx ## 8-byte Reload
adcq -432(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %rax
movq %rax, %rcx
movq %rax, -200(%rbp) ## 8-byte Spill
movq -704(%rbp), %rdx ## 8-byte Reload
cmpq -56(%rbp), %rdx ## 8-byte Folded Reload
movq -240(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -240(%rbp) ## 8-byte Spill
addb $255, -144(%rbp) ## 1-byte Folded Spill
adcq -344(%rbp), %rdx ## 8-byte Folded Reload
setb -144(%rbp) ## 1-byte Folded Spill
movq %rax, %rdx
adcq %rcx, %rdx
movq %rdx, %rdi
movq %rdx, -272(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
movq -392(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rcx
movq %rcx, -64(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -56(%rbp) ## 8-byte Spill
xorl %r8d, %r8d
movq -336(%rbp), %rax ## 8-byte Reload
cmpq %r11, %rax
setb %r8b
movq %rax, %rbx
addq %rcx, %rbx
movq %r8, %rax
adcq %rdx, %rax
movq %rax, -96(%rbp) ## 8-byte Spill
addb $255, -256(%rbp) ## 1-byte Folded Spill
movq -304(%rbp), %rax ## 8-byte Reload
adcq %rax, -152(%rbp) ## 8-byte Folded Spill
setb -416(%rbp) ## 1-byte Folded Spill
adcq %rdi, %rbx
movq %rbx, -536(%rbp) ## 8-byte Spill
movq -48(%rbp), %r13 ## 8-byte Reload
movq -712(%rbp), %rcx ## 8-byte Reload
cmpq %r13, %rcx
adcq -128(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -584(%rbp) ## 8-byte Spill
addb $255, -352(%rbp) ## 1-byte Folded Spill
adcq -472(%rbp), %rcx ## 8-byte Folded Reload
setb -360(%rbp) ## 1-byte Folded Spill
adcq %rbx, %r13
movq %r13, -256(%rbp) ## 8-byte Spill
movq -192(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movq -72(%rbp), %rdi ## 8-byte Reload
mulq %rdi
movq %rdx, %r11
imulq %rdi, %rcx
movq %rcx, %rbx
xorl %r9d, %r9d
movq -768(%rbp), %r14 ## 8-byte Reload
movq -752(%rbp), %rdi ## 8-byte Reload
cmpq %rdi, %r14
setb %r9b
movq -80(%rbp), %rax ## 8-byte Reload
movq %rax, %r15
imulq %rax, %r15
mulq %rax
movq %rdx, -280(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -608(%rbp), %r12 ## 8-byte Reload
cmpq %rdi, %r12
setb %al
movq %rax, %rcx
xorl %eax, %eax
movq -624(%rbp), %r10 ## 8-byte Reload
cmpq -688(%rbp), %r10 ## 8-byte Folded Reload
setb %al
addq %rbx, %r10
movq %r11, -304(%rbp) ## 8-byte Spill
adcq %r11, %rax
movq %rax, -184(%rbp) ## 8-byte Spill
addq %r12, %r15
movq %r15, -368(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -504(%rbp) ## 8-byte Spill
addq %r14, %rbx
movq %rbx, -480(%rbp) ## 8-byte Spill
adcq %r11, %r9
movq %r9, -344(%rbp) ## 8-byte Spill
addb $255, -176(%rbp) ## 1-byte Folded Spill
movq -520(%rbp), %rax ## 8-byte Reload
adcq %rax, -376(%rbp) ## 8-byte Folded Spill
setb -520(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %r13, %rax
movq %rax, %rdx
movq %rax, -352(%rbp) ## 8-byte Spill
movq -216(%rbp), %rax ## 8-byte Reload
movq -792(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -120(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -472(%rbp) ## 8-byte Spill
addb $255, -408(%rbp) ## 1-byte Folded Spill
adcq -512(%rbp), %rcx ## 8-byte Folded Reload
setb -512(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rdx, %rcx
movq %rcx, -488(%rbp) ## 8-byte Spill
addb $255, -816(%rbp) ## 1-byte Folded Spill
movq -856(%rbp), %rax ## 8-byte Reload
adcq -104(%rbp), %rax ## 8-byte Folded Reload
setb -544(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r15
movq %r15, -408(%rbp) ## 8-byte Spill
movq -296(%rbp), %rax ## 8-byte Reload
movq -784(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -112(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -312(%rbp) ## 8-byte Spill
addb $255, -808(%rbp) ## 1-byte Folded Spill
adcq -800(%rbp), %rdx ## 8-byte Folded Reload
setb -648(%rbp) ## 1-byte Folded Spill
adcq %r15, %rax
addb $255, -616(%rbp) ## 1-byte Folded Spill
movq %r10, %r11
adcq %rax, %r11
movq -200(%rbp), %rcx ## 8-byte Reload
cmpq -248(%rbp), %rcx ## 8-byte Folded Reload
movl $0, %ecx
setb %cl
addq -56(%rbp), %r8 ## 8-byte Folded Reload
movq -64(%rbp), %rdx ## 8-byte Reload
addq %rdx, -336(%rbp) ## 8-byte Folded Spill
adcq %r8, %rcx
movq %rcx, -248(%rbp) ## 8-byte Spill
addb $255, -136(%rbp) ## 1-byte Folded Spill
adcq %r10, %rax
setb -176(%rbp) ## 1-byte Folded Spill
movq -168(%rbp), %rax ## 8-byte Reload
movq -560(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -208(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -152(%rbp) ## 8-byte Spill
addb $255, -328(%rbp) ## 1-byte Folded Spill
adcq -568(%rbp), %rdx ## 8-byte Folded Reload
setb %r10b
adcq %r11, %rax
movq %rax, -160(%rbp) ## 8-byte Spill
movq -320(%rbp), %rcx ## 8-byte Reload
movq %rcx, %r13
imulq -728(%rbp), %r13 ## 8-byte Folded Reload
movq %rcx, %r9
shlq $32, %r9
movq %rcx, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -104(%rbp) ## 8-byte Spill
xorl %ebx, %ebx
subq %rcx, %r9
movq %rcx, %rax
setb %bl
movq -424(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rbx), %rcx
addq %rdx, %rcx
movq %rcx, -384(%rbp) ## 8-byte Spill
movl $0, %ecx
setb %cl
addq %rbx, %rdi
adcq -592(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -376(%rbp) ## 8-byte Spill
setb %r14b
movq %r13, %rcx
movq %r13, %rbx
movq %r13, -136(%rbp) ## 8-byte Spill
movq -448(%rbp), %r12 ## 8-byte Reload
adcq %r12, %rcx
movq %rcx, -328(%rbp) ## 8-byte Spill
addb $255, -456(%rbp) ## 1-byte Folded Spill
movq -864(%rbp), %rcx ## 8-byte Reload
adcq -288(%rbp), %rcx ## 8-byte Folded Reload
setb %r15b
movq -440(%rbp), %rcx ## 8-byte Reload
movq %rcx, %r13
movq -640(%rbp), %r8 ## 8-byte Reload
adcq %r8, %r13
addb $255, %r10b
adcq -152(%rbp), %r11 ## 8-byte Folded Reload
setb -288(%rbp) ## 1-byte Folded Spill
setb -592(%rbp) ## 1-byte Folded Spill
movq -264(%rbp), %rdx ## 8-byte Reload
addq %rdx, -552(%rbp) ## 8-byte Folded Spill
adcq $0, -496(%rbp) ## 8-byte Folded Spill
setb %dl
addb $255, -464(%rbp) ## 1-byte Folded Spill
movzbl %dl, %edx
movq -720(%rbp), %r11 ## 8-byte Reload
adcq %r11, %rdx
addb $255, %r15b
adcq %r8, %rcx
setb %r10b
movq -632(%rbp), %rdi ## 8-byte Reload
leaq (%rdx,%rdi), %rcx
movq -528(%rbp), %r8 ## 8-byte Reload
adcq %r8, %rcx
movq %rcx, %r15
subq %rax, %r9
addb $255, %r14b
adcq %rbx, %r12
setb %r14b
movq -104(%rbp), %rcx ## 8-byte Reload
leaq (%r9,%rcx), %rcx
adcq %r13, %rcx
movq %rcx, -440(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
cmpq %r11, %rdx
setb %cl
addq %rdi, %rdx
movq -576(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %rcx
addb $255, %r10b
adcq %r8, %rdx
setb %r8b
movq -232(%rbp), %rdx ## 8-byte Reload
leaq (%rcx,%rdx), %rbx
movq -400(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rbx
movq %rbx, -264(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
cmpq %rdi, %rcx
setb %r12b
addq %rdx, %rcx
adcq -88(%rbp), %r12 ## 8-byte Folded Reload
addb $255, %r8b
adcq %r10, %rcx
setb -232(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %rcx ## 8-byte Reload
leaq (%r12,%rcx), %rcx
adcq -160(%rbp), %rcx ## 8-byte Folded Reload
setb -424(%rbp) ## 1-byte Folded Spill
setb -552(%rbp) ## 1-byte Folded Spill
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq -104(%rbp), %rax ## 8-byte Reload
addq %rax, %r9
movq -136(%rbp), %rcx ## 8-byte Reload
adcq %rcx, %rdx
cmpq %rcx, %rdx
movq %rdx, -392(%rbp) ## 8-byte Spill
adcq %rax, %rcx
movq %rcx, -576(%rbp) ## 8-byte Spill
addb $255, %r14b
adcq %r13, %r9
setb -720(%rbp) ## 1-byte Folded Spill
movq %r15, %rax
adcq %rdx, %rax
setb -496(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %rbx, %rax
movq %rax, -432(%rbp) ## 8-byte Spill
addb $255, -144(%rbp) ## 1-byte Folded Spill
movq -200(%rbp), %rax ## 8-byte Reload
adcq %rax, -240(%rbp) ## 8-byte Folded Spill
movq -248(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -416(%rbp) ## 1-byte Folded Spill
movq -272(%rbp), %rdx ## 8-byte Reload
adcq %rdx, -336(%rbp) ## 8-byte Folded Spill
adcq $0, %rax
movq %rax, %rcx
movq %rax, -248(%rbp) ## 8-byte Spill
movq -584(%rbp), %rax ## 8-byte Reload
cmpq -48(%rbp), %rax ## 8-byte Folded Reload
movq -128(%rbp), %rdx ## 8-byte Reload
adcq $0, %rdx
movq %rdx, -128(%rbp) ## 8-byte Spill
addb $255, -360(%rbp) ## 1-byte Folded Spill
adcq -536(%rbp), %rax ## 8-byte Folded Reload
setb -144(%rbp) ## 1-byte Folded Spill
movq %rdx, %rax
adcq %rcx, %rax
movq %rax, %rdi
movq %rax, -272(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq %rax, %rbx
movq -72(%rbp), %rdx ## 8-byte Reload
imulq %rdx, %rbx
movq %rbx, -528(%rbp) ## 8-byte Spill
mulq %rdx
movq %rdx, -200(%rbp) ## 8-byte Spill
xorl %r9d, %r9d
movq -344(%rbp), %r13 ## 8-byte Reload
movq -304(%rbp), %r11 ## 8-byte Reload
cmpq %r11, %r13
setb %r9b
movq %r13, %rcx
addq %rbx, %rcx
movq %r9, %rax
adcq %rdx, %rax
movq %rax, -240(%rbp) ## 8-byte Spill
addb $255, -520(%rbp) ## 1-byte Folded Spill
movq -256(%rbp), %rax ## 8-byte Reload
adcq %rax, -480(%rbp) ## 8-byte Folded Spill
setb -416(%rbp) ## 1-byte Folded Spill
adcq %rdi, %rcx
movq %rcx, -640(%rbp) ## 8-byte Spill
movq -216(%rbp), %r8 ## 8-byte Reload
movq -472(%rbp), %rdx ## 8-byte Reload
cmpq %r8, %rdx
adcq -120(%rbp), %r8 ## 8-byte Folded Reload
movq %r8, -632(%rbp) ## 8-byte Spill
addb $255, -512(%rbp) ## 1-byte Folded Spill
adcq -352(%rbp), %rdx ## 8-byte Folded Reload
setb -360(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r8
movq %r8, -536(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
movq %rax, %r14
movq -80(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r14
mulq %rcx
xorl %eax, %eax
movq -504(%rbp), %rcx ## 8-byte Reload
cmpq -280(%rbp), %rcx ## 8-byte Folded Reload
setb %al
movq %rax, %rdi
xorl %ebx, %ebx
movq -184(%rbp), %rax ## 8-byte Reload
cmpq %r11, %rax
setb %bl
addq %r14, %rax
movq %rax, %r10
movq %rax, -184(%rbp) ## 8-byte Spill
movq %rdx, -400(%rbp) ## 8-byte Spill
adcq %rdx, %rbx
movq %rbx, -320(%rbp) ## 8-byte Spill
addq %rcx, %r14
adcq %rdx, %rdi
movq %rdi, -72(%rbp) ## 8-byte Spill
addb $255, -544(%rbp) ## 1-byte Folded Spill
movq -488(%rbp), %rax ## 8-byte Reload
adcq -368(%rbp), %rax ## 8-byte Folded Reload
setb -368(%rbp) ## 1-byte Folded Spill
movq %r14, %rax
adcq %r8, %rax
movq %rax, %rcx
movq %rax, -488(%rbp) ## 8-byte Spill
movq -296(%rbp), %rax ## 8-byte Reload
movq -312(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
adcq -112(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -584(%rbp) ## 8-byte Spill
addb $255, -648(%rbp) ## 1-byte Folded Spill
adcq -408(%rbp), %rdx ## 8-byte Folded Reload
setb -544(%rbp) ## 1-byte Folded Spill
movq %rax, %rdx
adcq %rcx, %rdx
movq %rdx, -280(%rbp) ## 8-byte Spill
movb -176(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %r10, %rdi
adcq %rdx, %rdi
movq -168(%rbp), %rax ## 8-byte Reload
cmpq %rax, -152(%rbp) ## 8-byte Folded Reload
movq %rax, %rdx
adcq -208(%rbp), %rdx ## 8-byte Folded Reload
addb $255, -592(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq %rdx, %rax
movq %rdx, -648(%rbp) ## 8-byte Spill
setb -48(%rbp) ## 1-byte Folded Spill
setb -504(%rbp) ## 1-byte Folded Spill
addb $255, -720(%rbp) ## 1-byte Folded Spill
adcq %r15, -392(%rbp) ## 8-byte Folded Spill
xorl %r8d, %r8d
cmpq -88(%rbp), %r12 ## 8-byte Folded Reload
setb %r8b
addq -64(%rbp), %r12 ## 8-byte Folded Reload
movq -56(%rbp), %r10 ## 8-byte Reload
adcq %r10, %r8
addb $255, -232(%rbp) ## 1-byte Folded Spill
adcq -160(%rbp), %r12 ## 8-byte Folded Reload
movq %r12, -160(%rbp) ## 8-byte Spill
xorl %r15d, %r15d
movq -248(%rbp), %rax ## 8-byte Reload
cmpq -96(%rbp), %rax ## 8-byte Folded Reload
setb %r15b
movq -200(%rbp), %rbx ## 8-byte Reload
addq %rbx, %r9
movq -528(%rbp), %r11 ## 8-byte Reload
addq %r11, %r13
adcq %r9, %r15
addb $255, -288(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rdi
movq %rdi, -232(%rbp) ## 8-byte Spill
movq -136(%rbp), %rcx ## 8-byte Reload
movq -576(%rbp), %rax ## 8-byte Reload
cmpq %rcx, %rax
movq %rcx, %rdx
adcq -104(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -152(%rbp) ## 8-byte Spill
addb $255, -496(%rbp) ## 1-byte Folded Spill
adcq -264(%rbp), %rax ## 8-byte Folded Reload
setb -288(%rbp) ## 1-byte Folded Spill
adcq %r12, %rdx
movq %rdx, -336(%rbp) ## 8-byte Spill
xorl %r9d, %r9d
cmpq %r10, %r8
setb %r9b
addq %r11, %r8
adcq %rbx, %r9
addb $255, -552(%rbp) ## 1-byte Folded Spill
movq %r8, %rax
adcq %rdi, %rax
movq %rax, -56(%rbp) ## 8-byte Spill
addb $255, -144(%rbp) ## 1-byte Folded Spill
movq -248(%rbp), %rax ## 8-byte Reload
adcq %rax, -128(%rbp) ## 8-byte Folded Spill
adcq $0, %r15
addb $255, -416(%rbp) ## 1-byte Folded Spill
adcq -272(%rbp), %r13 ## 8-byte Folded Reload
adcq $0, %r15
movq -632(%rbp), %rcx ## 8-byte Reload
cmpq -216(%rbp), %rcx ## 8-byte Folded Reload
movq -120(%rbp), %rbx ## 8-byte Reload
adcq $0, %rbx
movq %rbx, -120(%rbp) ## 8-byte Spill
addb $255, -360(%rbp) ## 1-byte Folded Spill
adcq -640(%rbp), %rcx ## 8-byte Folded Reload
setb -216(%rbp) ## 1-byte Folded Spill
adcq %r15, %rbx
movq %rbx, -96(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq %rax, %r13
movq -80(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r13
mulq %rcx
movq %rdx, %r12
xorl %edi, %edi
movq -72(%rbp), %rax ## 8-byte Reload
movq -400(%rbp), %r10 ## 8-byte Reload
cmpq %r10, %rax
setb %dil
movq %rax, %rcx
addq %r13, %rcx
movq %rdi, %rax
adcq %rdx, %rax
movq %rax, -128(%rbp) ## 8-byte Spill
addb $255, -368(%rbp) ## 1-byte Folded Spill
adcq -536(%rbp), %r14 ## 8-byte Folded Reload
setb -552(%rbp) ## 1-byte Folded Spill
adcq %rbx, %rcx
movq %rcx, %rdx
movq %rcx, -416(%rbp) ## 8-byte Spill
movq -296(%rbp), %rax ## 8-byte Reload
movq -584(%rbp), %rcx ## 8-byte Reload
cmpq %rax, %rcx
adcq -112(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -272(%rbp) ## 8-byte Spill
addb $255, -544(%rbp) ## 1-byte Folded Spill
adcq -488(%rbp), %rcx ## 8-byte Folded Reload
setb -88(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, %r11
movq %rax, -144(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
movq %rax, %rbx
imulq %rax, %rbx
mulq %rax
movq %rdx, -496(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -320(%rbp), %rax ## 8-byte Reload
cmpq %r10, %rax
setb %cl
addq %rax, %rbx
movq %rbx, -320(%rbp) ## 8-byte Spill
adcq %rdx, %rcx
movq %rcx, -64(%rbp) ## 8-byte Spill
addb $255, -176(%rbp) ## 1-byte Folded Spill
movq -280(%rbp), %rax ## 8-byte Reload
adcq -184(%rbp), %rax ## 8-byte Folded Reload
setb -176(%rbp) ## 1-byte Folded Spill
movq %rbx, %rdx
adcq %r11, %rdx
movq -168(%rbp), %rax ## 8-byte Reload
cmpq %rax, -648(%rbp) ## 8-byte Folded Reload
movq %rax, %rcx
adcq -208(%rbp), %rcx ## 8-byte Folded Reload
addb $255, -504(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
movq %rcx, %r11
adcq %rdx, %rax
xorl %r10d, %r10d
cmpq -240(%rbp), %r15 ## 8-byte Folded Reload
setb %r10b
movq %r12, -360(%rbp) ## 8-byte Spill
addq %r12, %rdi
addq %r13, -72(%rbp) ## 8-byte Folded Spill
adcq %rdi, %r10
addb $255, -424(%rbp) ## 1-byte Folded Spill
adcq -232(%rbp), %r8 ## 8-byte Folded Reload
setb %cl
leaq (%r9,%r13), %rdi
adcq %rax, %rdi
movq %rdi, -424(%rbp) ## 8-byte Spill
addb $255, -48(%rbp) ## 1-byte Folded Spill
adcq %r11, %rdx
movq %r11, %r8
setb -80(%rbp) ## 1-byte Folded Spill
xorl %r14d, %r14d
cmpq -200(%rbp), %r9 ## 8-byte Folded Reload
setb %r14b
addq %r9, %r13
adcq %r12, %r14
addb $255, %cl
adcq %rax, %r13
setb -184(%rbp) ## 1-byte Folded Spill
setb %r13b
movq -136(%rbp), %r11 ## 8-byte Reload
movq -152(%rbp), %rax ## 8-byte Reload
cmpq %r11, %rax
movq %r11, %rbx
movq -104(%rbp), %rdx ## 8-byte Reload
adcq %rdx, %rbx
addb $255, -288(%rbp) ## 1-byte Folded Spill
adcq -160(%rbp), %rax ## 8-byte Folded Reload
setb %cl
movq %rbx, %rax
movq -56(%rbp), %r9 ## 8-byte Reload
adcq %r9, %rax
movq %rax, -200(%rbp) ## 8-byte Spill
cmpq %r11, %rbx
adcq %rdx, %r11
addb $255, %cl
adcq %r9, %rbx
setb -240(%rbp) ## 1-byte Folded Spill
movq %r11, %rax
adcq %rdi, %rax
movq %rax, -248(%rbp) ## 8-byte Spill
addb $255, -216(%rbp) ## 1-byte Folded Spill
adcq %r15, -120(%rbp) ## 8-byte Folded Spill
adcq $0, %r10
addb $255, -552(%rbp) ## 1-byte Folded Spill
movq -96(%rbp), %rax ## 8-byte Reload
adcq %rax, -72(%rbp) ## 8-byte Folded Spill
adcq $0, %r10
movq -272(%rbp), %rcx ## 8-byte Reload
cmpq -296(%rbp), %rcx ## 8-byte Folded Reload
movq -112(%rbp), %rbx ## 8-byte Reload
adcq $0, %rbx
movq %rbx, -112(%rbp) ## 8-byte Spill
addb $255, -88(%rbp) ## 1-byte Folded Spill
adcq -416(%rbp), %rcx ## 8-byte Folded Reload
setb -216(%rbp) ## 1-byte Folded Spill
adcq %r10, %rbx
movq -224(%rbp), %rax ## 8-byte Reload
movq %rax, %r12
movq -192(%rbp), %rcx ## 8-byte Reload
imulq %rcx, %r12
mulq %rcx
xorl %edi, %edi
movq -64(%rbp), %r9 ## 8-byte Reload
cmpq -496(%rbp), %r9 ## 8-byte Folded Reload
setb %dil
addq %r12, %r9
movq %rdi, %rax
adcq %rdx, %rax
movq %rax, -296(%rbp) ## 8-byte Spill
addb $255, -176(%rbp) ## 1-byte Folded Spill
movq -144(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -320(%rbp) ## 8-byte Folded Spill
setb -56(%rbp) ## 1-byte Folded Spill
adcq %rbx, %r9
movq -168(%rbp), %r15 ## 8-byte Reload
cmpq %r15, %r8
adcq -208(%rbp), %r15 ## 8-byte Folded Reload
movb -80(%rbp), %cl ## 1-byte Reload
addb $255, %cl
movq %r15, %rcx
adcq %r9, %rcx
movq %rcx, -120(%rbp) ## 8-byte Spill
xorl %r8d, %r8d
movq %r14, %rax
cmpq -360(%rbp), %r14 ## 8-byte Folded Reload
setb %r8b
addq %r12, %rax
movq %rax, -192(%rbp) ## 8-byte Spill
adcq %rdx, %r8
movq %rdx, -48(%rbp) ## 8-byte Spill
addb $255, %r13b
adcq %rcx, %rax
movq %rax, -176(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
cmpq -128(%rbp), %r10 ## 8-byte Folded Reload
setb %cl
addq %rdx, %rdi
addq -64(%rbp), %r12 ## 8-byte Folded Reload
adcq %rdi, %rcx
movq -136(%rbp), %r13 ## 8-byte Reload
cmpq %r13, %r11
movq %r13, %r14
adcq -104(%rbp), %r14 ## 8-byte Folded Reload
addb $255, -240(%rbp) ## 1-byte Folded Spill
adcq -424(%rbp), %r11 ## 8-byte Folded Reload
setb -64(%rbp) ## 1-byte Folded Spill
movq %r14, %rdi
adcq %rax, %rdi
movq %rdi, -72(%rbp) ## 8-byte Spill
addb $255, -216(%rbp) ## 1-byte Folded Spill
adcq %r10, -112(%rbp) ## 8-byte Folded Spill
adcq $0, %rcx
addb $255, -56(%rbp) ## 1-byte Folded Spill
adcq %rbx, %r12
adcq $0, %rcx
xorl %r11d, %r11d
cmpq -296(%rbp), %rcx ## 8-byte Folded Reload
setb %r11b
movq -224(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
mulq %rdi
imulq %rdi, %rdi
xorl %r10d, %r10d
cmpq -48(%rbp), %r8 ## 8-byte Folded Reload
setb %r10b
movq %r8, %rbx
addq %rdi, %rbx
leaq (%r10,%rdx), %rbx
adcq %rdx, %r10
addq %r8, %rdi
adcq %rbx, %r11
cmpq -168(%rbp), %r15 ## 8-byte Folded Reload
movq -208(%rbp), %rdx ## 8-byte Reload
adcq $0, %rdx
addb $255, -80(%rbp) ## 1-byte Folded Spill
adcq %r9, %r15
setb %r8b
movq %rdx, %rax
movq %rdx, %r9
adcq %rcx, %rax
addb $255, -184(%rbp) ## 1-byte Folded Spill
movq -120(%rbp), %rdx ## 8-byte Reload
adcq %rdx, -192(%rbp) ## 8-byte Folded Spill
setb %r15b
movq %rdi, %rbx
adcq %rax, %rbx
cmpq %r13, %r14
movq -104(%rbp), %r12 ## 8-byte Reload
adcq %r12, %r13
addb $255, -64(%rbp) ## 1-byte Folded Spill
adcq -176(%rbp), %r14 ## 8-byte Folded Reload
setb %r14b
movq %r13, %rdx
adcq %rbx, %rdx
movq %rdx, -120(%rbp) ## 8-byte Spill
addb $255, %r8b
adcq %rcx, %r9
adcq $0, %r11
addb $255, %r15b
adcq %rdi, %rax
adcq $0, %r11
xorl %eax, %eax
cmpq %r10, %r11
movq %r11, -184(%rbp) ## 8-byte Spill
setb %al
movq %rax, %rcx
movq %rax, -192(%rbp) ## 8-byte Spill
addb $255, %r14b
adcq %r13, %rbx
setb -128(%rbp) ## 1-byte Folded Spill
adcq $0, %r11
movq %r11, %r14
adcq $0, %rcx
movq %rcx, -216(%rbp) ## 8-byte Spill
cmpq -136(%rbp), %r13 ## 8-byte Folded Reload
adcq $0, %r12
movq %r12, -104(%rbp) ## 8-byte Spill
movl $4294967295, %eax ## imm = 0xFFFFFFFF
cmpq %rax, -384(%rbp) ## 8-byte Folded Reload
movq -376(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rdx
sbbq $0, %rdx
movq %rdx, -224(%rbp) ## 8-byte Spill
cmpq %rdx, %rcx
movq -328(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rcx
sbbq $0, %rcx
movq %rcx, -136(%rbp) ## 8-byte Spill
xorl %edx, %edx
movq -440(%rbp), %rdi ## 8-byte Reload
cmpq %rax, %rdi
movl $4294967295, %r12d ## imm = 0xFFFFFFFF
movl $0, %eax
sbbq %rax, %rax
cmpq %rcx, %rbx
movabsq $-4294967295, %r11 ## imm = 0xFFFFFFFF00000001
leaq (%rdi,%r11), %rcx
movq %rcx, %rdi
sbbq $0, %rdi
movq %rdi, -80(%rbp) ## 8-byte Spill
cmpq %rdi, %rcx
sbbq $0, %rax
movq -392(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rcx
shrq %rcx
cmpq $2147483647, %rcx ## imm = 0x7FFFFFFF
leaq (%rbx,%r11), %rcx
leaq 1(%rax,%rcx), %rcx
movq %rcx, -64(%rbp) ## 8-byte Spill
movl $0, %edi
sbbq %rdi, %rdi
leaq (%rbx,%r11), %rax
incq %rax
cmpq %rcx, %rax
sbbq $0, %rdi
movq -432(%rbp), %rcx ## 8-byte Reload
cmpq %r12, %rcx
movl $0, %eax
sbbq %rax, %rax
leaq (%rcx,%r11), %rbx
addq %rbx, %rdi
movq %rdi, -112(%rbp) ## 8-byte Spill
cmpq %rdi, %rbx
sbbq $0, %rax
movq -336(%rbp), %rcx ## 8-byte Reload
cmpq %r12, %rcx
movl $0, %edi
sbbq %rdi, %rdi
leaq (%rcx,%r11), %r8
addq %r8, %rax
movq %rax, -240(%rbp) ## 8-byte Spill
cmpq %rax, %r8
sbbq $0, %rdi
movq -200(%rbp), %rax ## 8-byte Reload
cmpq %r12, %rax
movl $0, %ebx
sbbq %rbx, %rbx
leaq (%rax,%r11), %r8
addq %r8, %rdi
movq %rdi, -208(%rbp) ## 8-byte Spill
cmpq %rdi, %r8
sbbq $0, %rbx
movq -248(%rbp), %rax ## 8-byte Reload
cmpq %r12, %rax
movl $0, %r10d
sbbq %r10, %r10
leaq (%rax,%r11), %r8
addq %r8, %rbx
cmpq %rbx, %r8
sbbq $0, %r10
movq -72(%rbp), %rax ## 8-byte Reload
cmpq %r12, %rax
movl $0, %r8d
sbbq %r8, %r8
leaq (%rax,%r11), %r15
addq %r15, %r10
cmpq %r10, %r15
sbbq $0, %r8
movq -120(%rbp), %r13 ## 8-byte Reload
cmpq %r12, %r13
movl $0, %r15d
sbbq %r15, %r15
leaq (%r11,%r13), %r9
addq %r9, %r8
cmpq %r8, %r9
sbbq $0, %r15
movq -104(%rbp), %rax ## 8-byte Reload
leaq (%r14,%rax), %r9
cmpq %r12, %r9
leaq (%r9,%r11), %r9
movl $0, %r12d
sbbq %r12, %r12
addq %r9, %r15
cmpq %r15, %r9
sbbq $0, %r12
movq %rax, %r9
addq %rax, %r14
movq %r14, -176(%rbp) ## 8-byte Spill
adcq -216(%rbp), %r12 ## 8-byte Folded Reload
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq -184(%rbp), %r9 ## 8-byte Folded Reload
movq -192(%rbp), %rdi ## 8-byte Reload
adcq $0, %rdi
cmpq %r12, %rdi
movq -384(%rbp), %r14 ## 8-byte Reload
leaq (%r14,%r11), %r9
sbbq %rdx, %rdx
movq -728(%rbp), %r11 ## 8-byte Reload
xorq %rdx, %r11
andq %rdx, %r14
andq %r11, %r9
orq %r14, %r9
movq -376(%rbp), %rdi ## 8-byte Reload
andq %rdx, %rdi
movq -224(%rbp), %rax ## 8-byte Reload
andq %r11, %rax
orq %rdi, %rax
movq %rax, -224(%rbp) ## 8-byte Spill
movq -328(%rbp), %rdi ## 8-byte Reload
andq %rdx, %rdi
movq -136(%rbp), %rax ## 8-byte Reload
andq %r11, %rax
orq %rdi, %rax
movq %rax, -136(%rbp) ## 8-byte Spill
movq -440(%rbp), %rdi ## 8-byte Reload
andq %rdx, %rdi
movq -80(%rbp), %rax ## 8-byte Reload
andq %r11, %rax
orq %rdi, %rax
movq %rax, -80(%rbp) ## 8-byte Spill
movq -392(%rbp), %rdi ## 8-byte Reload
andq %rdx, %rdi
movq -64(%rbp), %r12 ## 8-byte Reload
andq %r11, %r12
orq %rdi, %r12
movq -432(%rbp), %rax ## 8-byte Reload
andq %rdx, %rax
movq -112(%rbp), %r14 ## 8-byte Reload
andq %r11, %r14
orq %rax, %r14
andq %rdx, %rcx
movq -240(%rbp), %rdi ## 8-byte Reload
andq %r11, %rdi
orq %rcx, %rdi
movq -200(%rbp), %rax ## 8-byte Reload
andq %rdx, %rax
movq -208(%rbp), %rcx ## 8-byte Reload
andq %r11, %rcx
orq %rax, %rcx
movq -248(%rbp), %rax ## 8-byte Reload
andq %rdx, %rax
andq %r11, %rbx
orq %rax, %rbx
movq -72(%rbp), %rax ## 8-byte Reload
andq %rdx, %rax
andq %r11, %r10
orq %rax, %r10
andq %rdx, %r13
andq %r11, %r8
orq %r13, %r8
andq -176(%rbp), %rdx ## 8-byte Folded Reload
andq %r15, %r11
orq %rdx, %r11
movq %r9, (%rsi)
movq -224(%rbp), %rax ## 8-byte Reload
movq %rax, 4(%rsi)
movq -136(%rbp), %rax ## 8-byte Reload
movq %rax, 8(%rsi)
movq -80(%rbp), %rax ## 8-byte Reload
movq %rax, 12(%rsi)
movq %r12, 16(%rsi)
movq %r14, 20(%rsi)
movq %rdi, 24(%rsi)
movq %rcx, 28(%rsi)
movq %rbx, 32(%rsi)
movq %r10, 36(%rsi)
movq %r8, 40(%rsi)
movq %r11, 44(%rsi)
addq $848, %rsp ## imm = 0x350
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _fiat_p384_add ## -- Begin function fiat_p384_add
.p2align 4, 0x90
_fiat_p384_add: ## @fiat_p384_add
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq 4(%rdi), %r8
movq (%rsi), %rcx
movq 4(%rsi), %r10
movq (%rdi), %rbx
movq %rcx, %rax
addq %rbx, %rax
movq %r10, %r9
adcq %r8, %r9
addq %rbx, %rcx
movq %rcx, %r11
movq %rcx, -120(%rbp) ## 8-byte Spill
adcq %r8, %r10
movq 8(%rdi), %rbx
movq %rbx, %rax
adcq $0, %rax
xorl %ecx, %ecx
cmpq %rbx, %rax
setb %cl
addq 8(%rsi), %rax
movq %rax, %r15
movq 12(%rdi), %rax
adcq %rax, %rcx
xorl %ebx, %ebx
cmpq %rax, %rcx
setb %bl
addq 12(%rsi), %rcx
movq %rcx, %r10
movq 16(%rdi), %rax
adcq %rax, %rbx
xorl %ecx, %ecx
cmpq %rax, %rbx
setb %cl
addq 16(%rsi), %rbx
movq %rbx, %r13
movq 20(%rdi), %rax
adcq %rax, %rcx
xorl %ebx, %ebx
cmpq %rax, %rcx
setb %bl
addq 20(%rsi), %rcx
movq %rcx, %r14
movq 24(%rdi), %rax
adcq %rax, %rbx
xorl %ecx, %ecx
cmpq %rax, %rbx
setb %cl
addq 24(%rsi), %rbx
movq %rbx, -104(%rbp) ## 8-byte Spill
movq 28(%rdi), %rax
adcq %rax, %rcx
xorl %ebx, %ebx
cmpq %rax, %rcx
setb %bl
addq 28(%rsi), %rcx
movq %rcx, -96(%rbp) ## 8-byte Spill
movq 32(%rdi), %rax
adcq %rax, %rbx
xorl %ecx, %ecx
cmpq %rax, %rbx
setb %cl
addq 32(%rsi), %rbx
movq %rbx, -88(%rbp) ## 8-byte Spill
movq 36(%rdi), %rax
adcq %rax, %rcx
xorl %r12d, %r12d
cmpq %rax, %rcx
setb %r12b
addq 36(%rsi), %rcx
movq %rcx, -80(%rbp) ## 8-byte Spill
movq 40(%rdi), %rax
adcq %rax, %r12
xorl %ecx, %ecx
cmpq %rax, %r12
setb %cl
addq 40(%rsi), %r12
movq 44(%rdi), %rax
adcq %rax, %rcx
xorl %edi, %edi
cmpq %rax, %rcx
setb %dil
addq 44(%rsi), %rcx
movq %rcx, -72(%rbp) ## 8-byte Spill
adcq $0, %rdi
movq %rdi, -112(%rbp) ## 8-byte Spill
movl $4294967295, %r8d ## imm = 0xFFFFFFFF
cmpq %r8, %r11
movq %r9, -152(%rbp) ## 8-byte Spill
movq %r9, %rax
sbbq $0, %rax
movq %rax, -56(%rbp) ## 8-byte Spill
cmpq %rax, %r9
movq %r15, -168(%rbp) ## 8-byte Spill
movq %r15, %rax
sbbq $0, %rax
movq %rax, -64(%rbp) ## 8-byte Spill
cmpq %rax, %r15
movabsq $-4294967295, %rbx ## imm = 0xFFFFFFFF00000001
movq %r10, -160(%rbp) ## 8-byte Spill
leaq (%r10,%rbx), %rax
movq %rax, %rcx
sbbq $0, %rcx
movq %rcx, -48(%rbp) ## 8-byte Spill
xorl %edi, %edi
cmpq %r8, %r10
movl $0, %esi
sbbq %rsi, %rsi
cmpq %rcx, %rax
sbbq $0, %rsi
movq %r13, -136(%rbp) ## 8-byte Spill
leaq (%rbx,%r13), %rax
leaq (%rsi,%rax), %rcx
incq %rcx
movq %rcx, -144(%rbp) ## 8-byte Spill
movq %r13, %rax
shrq %rax
cmpq $2147483647, %rax ## imm = 0x7FFFFFFF
movl $0, %r15d
sbbq %r15, %r15
leaq (%rbx,%r13), %rax
incq %rax
cmpq %rcx, %rax
sbbq $0, %r15
movq %r14, -128(%rbp) ## 8-byte Spill
leaq (%r14,%rbx), %rax
movq %rbx, %rcx
addq %rax, %r15
cmpq %r8, %r14
movl $0, %ebx
sbbq %rbx, %rbx
cmpq %r15, %rax
sbbq $0, %rbx
movq -104(%rbp), %r8 ## 8-byte Reload
movq %rcx, %r14
leaq (%r8,%rcx), %rax
addq %rax, %rbx
movl $4294967295, %r11d ## imm = 0xFFFFFFFF
cmpq %r11, %r8
movl $0, %r10d
sbbq %r10, %r10
cmpq %rbx, %rax
sbbq $0, %r10
movq -96(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%r14), %rax
addq %rax, %r10
cmpq %r11, %rcx
movl $0, %r9d
sbbq %r9, %r9
cmpq %r10, %rax
sbbq $0, %r9
movq -88(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%r14), %rax
addq %rax, %r9
cmpq %r11, %rcx
movl $0, %r8d
sbbq %r8, %r8
cmpq %r9, %rax
sbbq $0, %r8
movq -80(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%r14), %rax
addq %rax, %r8
cmpq %r11, %rcx
movl $0, %esi
sbbq %rsi, %rsi
cmpq %r8, %rax
sbbq $0, %rsi
leaq (%r12,%r14), %rax
addq %rax, %rsi
cmpq %r11, %r12
movl $0, %r11d
sbbq %r11, %r11
cmpq %rsi, %rax
sbbq $0, %r11
movq -72(%rbp), %rax ## 8-byte Reload
addq %rax, %r14
addq %r14, %r11
xorl %ecx, %ecx
movl $4294967295, %r13d ## imm = 0xFFFFFFFF
cmpq %r13, %rax
setb %cl
cmpq %r11, %r14
movq -112(%rbp), %r14 ## 8-byte Reload
movq %r14, %rax
sbbq %rcx, %rax
cmpq %rax, %r14
sbbq %rdi, %rdi
movq -120(%rbp), %rcx ## 8-byte Reload
movabsq $-4294967295, %rax ## imm = 0xFFFFFFFF00000001
leaq (%rcx,%rax), %r14
movl $4294967294, %eax ## imm = 0xFFFFFFFE
orq $1, %rax
xorq %rdi, %rax
andq %rdi, %rcx
andq %rax, %r14
orq %rcx, %r14
movq -152(%rbp), %r13 ## 8-byte Reload
andq %rdi, %r13
movq -56(%rbp), %rcx ## 8-byte Reload
andq %rax, %rcx
orq %r13, %rcx
movq %rcx, -56(%rbp) ## 8-byte Spill
movq -168(%rbp), %r13 ## 8-byte Reload
andq %rdi, %r13
movq -64(%rbp), %rcx ## 8-byte Reload
andq %rax, %rcx
orq %r13, %rcx
movq %rcx, -64(%rbp) ## 8-byte Spill
movq -160(%rbp), %r13 ## 8-byte Reload
andq %rdi, %r13
movq -48(%rbp), %rcx ## 8-byte Reload
andq %rax, %rcx
orq %r13, %rcx
movq %rcx, -48(%rbp) ## 8-byte Spill
movq -136(%rbp), %rcx ## 8-byte Reload
andq %rdi, %rcx
movq -144(%rbp), %r13 ## 8-byte Reload
andq %rax, %r13
orq %rcx, %r13
movq -128(%rbp), %rcx ## 8-byte Reload
andq %rdi, %rcx
andq %rax, %r15
orq %rcx, %r15
movq -104(%rbp), %rcx ## 8-byte Reload
andq %rdi, %rcx
andq %rax, %rbx
orq %rcx, %rbx
movq -96(%rbp), %rcx ## 8-byte Reload
andq %rdi, %rcx
andq %rax, %r10
orq %rcx, %r10
movq -88(%rbp), %rcx ## 8-byte Reload
andq %rdi, %rcx
andq %rax, %r9
orq %rcx, %r9
movq -80(%rbp), %rcx ## 8-byte Reload
andq %rdi, %rcx
andq %rax, %r8
orq %rcx, %r8
andq %rdi, %r12
andq %rax, %rsi
orq %r12, %rsi
andq -72(%rbp), %rdi ## 8-byte Folded Reload
andq %r11, %rax
orq %rdi, %rax
movq %r14, (%rdx)
movq -56(%rbp), %rcx ## 8-byte Reload
movq %rcx, 4(%rdx)
movq -64(%rbp), %rcx ## 8-byte Reload
movq %rcx, 8(%rdx)
movq -48(%rbp), %rcx ## 8-byte Reload
movq %rcx, 12(%rdx)
movq %r13, 16(%rdx)
movq %r15, 20(%rdx)
movq %rbx, 24(%rdx)
movq %r10, 28(%rdx)
movq %r9, 32(%rdx)
movq %r8, 36(%rdx)
movq %rsi, 40(%rdx)
movq %rax, 44(%rdx)
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _fiat_p384_sub ## -- Begin function fiat_p384_sub
.p2align 4, 0x90
_fiat_p384_sub: ## @fiat_p384_sub
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq (%rdi), %rax
movq 4(%rdi), %rbx
movq 8(%rdi), %r11
movq 12(%rdi), %r9
xorl %r10d, %r10d
subq 4(%rsi), %rbx
setb %r10b
subq (%rsi), %rax
movq %rax, -64(%rbp) ## 8-byte Spill
movq %rbx, %rcx
sbbq $0, %rcx
movq %rcx, -56(%rbp) ## 8-byte Spill
xorl %r8d, %r8d
subq 12(%rsi), %r9
setb %r8b
subq 8(%rsi), %r11
movq %r9, %rax
sbbq $0, %rax
movq %rax, %r14
movq %rax, -96(%rbp) ## 8-byte Spill
movq 16(%rdi), %rax
cmpq %rcx, %rbx
movq %r11, %rbx
sbbq %r10, %rbx
movq %rbx, -48(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
cmpq %rbx, %r11
setb %cl
subq %rcx, %r14
movq %r14, -88(%rbp) ## 8-byte Spill
negq %rcx
xorl %r15d, %r15d
subq 16(%rsi), %rax
setb %r15b
cmpq %r14, %r9
movq %rax, %r10
sbbq %r8, %r10
movq 20(%rdi), %rbx
xorl %r8d, %r8d
subq 20(%rsi), %rbx
setb %r8b
cmpq %r10, %rax
movq %rbx, %r11
sbbq %r15, %r11
movq 24(%rdi), %rax
xorl %r9d, %r9d
subq 24(%rsi), %rax
setb %r9b
cmpq %r11, %rbx
movq %rax, %r15
sbbq %r8, %r15
movq 28(%rdi), %rbx
xorl %r8d, %r8d
subq 28(%rsi), %rbx
setb %r8b
cmpq %r15, %rax
movq %rbx, %r12
sbbq %r9, %r12
movq 32(%rdi), %rax
xorl %r9d, %r9d
subq 32(%rsi), %rax
setb %r9b
cmpq %r12, %rbx
movq %rax, %r13
sbbq %r8, %r13
movq 36(%rdi), %rbx
xorl %r14d, %r14d
subq 36(%rsi), %rbx
setb %r14b
cmpq %r13, %rax
movq %rbx, %rax
sbbq %r9, %rax
movq %rax, -80(%rbp) ## 8-byte Spill
movq 40(%rdi), %r9
xorl %r8d, %r8d
subq 40(%rsi), %r9
setb %r8b
cmpq %rax, %rbx
movq %r9, %rax
sbbq %r14, %rax
movq %rax, -72(%rbp) ## 8-byte Spill
movq 44(%rdi), %rdi
xorl %r14d, %r14d
subq 44(%rsi), %rdi
setb %r14b
cmpq %rax, %r9
movq %rdi, %r9
sbbq %r8, %r9
xorl %r8d, %r8d
cmpq %r9, %rdi
movl $0, %ebx
sbbq %rbx, %rbx
cmpq %rbx, %r14
movl $4294967294, %esi ## imm = 0xFFFFFFFE
leaq 1(%rsi), %rdi
cmoveq %r8, %rdi
addq %rdi, -64(%rbp) ## 8-byte Folded Spill
adcq $0, -56(%rbp) ## 8-byte Folded Spill
adcq $0, -48(%rbp) ## 8-byte Folded Spill
setb %al
adcq -96(%rbp), %rcx ## 8-byte Folded Reload
cmpq %rbx, %r14
cmoveq %r8, %rsi
addb $255, %al
adcq %rdi, -88(%rbp) ## 8-byte Folded Spill
movq %r10, %r14
adcq $0, %r14
addq %rdi, %rcx
xorl %r8d, %r8d
cmpq %r10, %r14
setb %r8b
addq %rsi, %r14
adcq %r11, %r8
xorl %r10d, %r10d
cmpq %r11, %r8
setb %r10b
addq %rdi, %r8
adcq %r15, %r10
xorl %r11d, %r11d
cmpq %r15, %r10
setb %r11b
addq %rdi, %r10
adcq %r12, %r11
xorl %r15d, %r15d
cmpq %r12, %r11
setb %r15b
addq %rdi, %r11
adcq %r13, %r15
xorl %r12d, %r12d
cmpq %r13, %r15
setb %r12b
addq %rdi, %r15
movq -80(%rbp), %rsi ## 8-byte Reload
adcq %rsi, %r12
xorl %ebx, %ebx
cmpq %rsi, %r12
setb %bl
addq %rdi, %r12
movq -72(%rbp), %rax ## 8-byte Reload
adcq %rax, %rbx
xorl %esi, %esi
cmpq %rax, %rbx
setb %sil
addq %rdi, %r9
addq %rdi, %rbx
adcq %r9, %rsi
movq -64(%rbp), %rdi ## 8-byte Reload
movq %rdi, (%rdx)
movq -56(%rbp), %rdi ## 8-byte Reload
movq %rdi, 4(%rdx)
movq -48(%rbp), %rdi ## 8-byte Reload
movq %rdi, 8(%rdx)
movq %rcx, 12(%rdx)
movq %r14, 16(%rdx)
movq %r8, 20(%rdx)
movq %r10, 24(%rdx)
movq %r11, 28(%rdx)
movq %r15, 32(%rdx)
movq %r12, 36(%rdx)
movq %rbx, 40(%rdx)
movq %rsi, 44(%rdx)
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _fiat_p384_opp ## -- Begin function fiat_p384_opp
.p2align 4, 0x90
_fiat_p384_opp: ## @fiat_p384_opp
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq (%rdi), %r8
movq %r8, -56(%rbp) ## 8-byte Spill
movq 4(%rdi), %rax
movq 44(%rdi), %rbx
movq %rbx, -104(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
negq %rbx
movq %rbx, -112(%rbp) ## 8-byte Spill
setb %cl
movq %rcx, %r11
movq %rcx, -80(%rbp) ## 8-byte Spill
movq %r8, %rcx
negq %rcx
movq %rcx, -64(%rbp) ## 8-byte Spill
movl $0, %edx
sbbq %rdx, %rdx
subq %rax, %rdx
movq %rdx, -48(%rbp) ## 8-byte Spill
negq %rax
movl $0, %ebx
sbbq %rbx, %rbx
movq 8(%rdi), %rcx
cmpq %rdx, %rax
sbbq %rcx, %rbx
negq %rcx
movl $0, %eax
sbbq %rax, %rax
cmpq %rcx, %rbx
movl $0, %ecx
seta %cl
movq 12(%rdi), %r8
subq %r8, %rax
movq %rax, -96(%rbp) ## 8-byte Spill
subq %rcx, %rax
movq %rax, -88(%rbp) ## 8-byte Spill
negq %rcx
movq %rcx, -72(%rbp) ## 8-byte Spill
negq %r8
movl $0, %r10d
sbbq %r10, %r10
cmpq %rax, %r8
movq 16(%rdi), %rax
sbbq %rax, %r10
negq %rax
movl $0, %r14d
sbbq %r14, %r14
cmpq %r10, %rax
movq 20(%rdi), %rax
sbbq %rax, %r14
negq %rax
movl $0, %r8d
sbbq %r8, %r8
cmpq %r14, %rax
movq 24(%rdi), %rax
sbbq %rax, %r8
negq %rax
movl $0, %r9d
sbbq %r9, %r9
cmpq %r8, %rax
movq 28(%rdi), %rax
sbbq %rax, %r9
negq %rax
movl $0, %r13d
sbbq %r13, %r13
cmpq %r9, %rax
movq 32(%rdi), %rax
sbbq %rax, %r13
negq %rax
movl $0, %r15d
sbbq %r15, %r15
cmpq %r13, %rax
movq 36(%rdi), %rax
sbbq %rax, %r15
negq %rax
movl $0, %r12d
sbbq %r12, %r12
cmpq %r15, %rax
movq 40(%rdi), %rax
sbbq %rax, %r12
negq %rax
movl $0, %edi
sbbq %rdi, %rdi
cmpq %r12, %rax
sbbq -104(%rbp), %rdi ## 8-byte Folded Reload
cmpq %rdi, -112(%rbp) ## 8-byte Folded Reload
movl $0, %ecx
sbbq %rcx, %rcx
cmpq %rcx, %r11
movl $4294967294, %eax ## imm = 0xFFFFFFFE
leaq 1(%rax), %r11
movl $0, %edx
cmoveq %rdx, %r11
movq %r11, %rdx
subq -56(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -56(%rbp) ## 8-byte Spill
cmpq -64(%rbp), %rdx ## 8-byte Folded Reload
movl $0, %edx
setb %dl
addq -48(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -64(%rbp) ## 8-byte Spill
adcq $0, %rbx
movq %rbx, -48(%rbp) ## 8-byte Spill
setb %bl
movq -72(%rbp), %rdx ## 8-byte Reload
adcq -96(%rbp), %rdx ## 8-byte Folded Reload
cmpq %rcx, -80(%rbp) ## 8-byte Folded Reload
movl $0, %ecx
cmoveq %rcx, %rax
addb $255, %bl
adcq %r11, -88(%rbp) ## 8-byte Folded Spill
movq %r10, %rbx
adcq $0, %rbx
addq %r11, %rdx
movq %rdx, -72(%rbp) ## 8-byte Spill
xorl %edx, %edx
cmpq %r10, %rbx
setb %dl
addq %rax, %rbx
adcq %r14, %rdx
xorl %r10d, %r10d
cmpq %r14, %rdx
setb %r10b
addq %r11, %rdx
adcq %r8, %r10
xorl %r14d, %r14d
cmpq %r8, %r10
setb %r14b
addq %r11, %r10
adcq %r9, %r14
xorl %r8d, %r8d
cmpq %r9, %r14
setb %r8b
addq %r11, %r14
adcq %r13, %r8
xorl %eax, %eax
cmpq %r13, %r8
setb %al
addq %r11, %r8
adcq %r15, %rax
xorl %ecx, %ecx
cmpq %r15, %rax
setb %cl
addq %r11, %rax
adcq %r12, %rcx
xorl %r9d, %r9d
cmpq %r12, %rcx
setb %r9b
addq %r11, %rdi
addq %r11, %rcx
adcq %rdi, %r9
movq -56(%rbp), %rdi ## 8-byte Reload
movq %rdi, (%rsi)
movq -64(%rbp), %rdi ## 8-byte Reload
movq %rdi, 4(%rsi)
movq -48(%rbp), %rdi ## 8-byte Reload
movq %rdi, 8(%rsi)
movq -72(%rbp), %rdi ## 8-byte Reload
movq %rdi, 12(%rsi)
movq %rbx, 16(%rsi)
movq %rdx, 20(%rsi)
movq %r10, 24(%rsi)
movq %r14, 28(%rsi)
movq %r8, 32(%rsi)
movq %rax, 36(%rsi)
movq %rcx, 40(%rsi)
movq %r9, 44(%rsi)
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _fiat_p384_from_montgomery ## -- Begin function fiat_p384_from_montgomery
.p2align 4, 0x90
_fiat_p384_from_montgomery: ## @fiat_p384_from_montgomery
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $160, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq (%rdi), %r8
movq %rdi, -224(%rbp) ## 8-byte Spill
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
movq %r8, %rax
mulq %rcx
movl $4294967295, %r11d ## imm = 0xFFFFFFFF
movq %r8, %r9
shlq $32, %r9
subq %r8, %r9
movq %rdx, %r15
movq %rdx, %rbx
adcq $0, %r15
movq 4(%rdi), %r10
leaq (%r15,%r10), %rcx
movq %rcx, %rax
mulq %r11
movq %rdx, %r14
movq %rcx, %rdi
shlq $32, %rdi
xorl %r13d, %r13d
subq %rcx, %rdi
setb %r13b
addq %r10, %r15
adcq %rdx, %r13
movq %rdx, -144(%rbp) ## 8-byte Spill
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
leaq 1(%rcx), %rax
subq %r8, %r9
movq %r8, %r12
imulq %rax, %r12
movq %rax, %r11
movq %rax, -280(%rbp) ## 8-byte Spill
movq %r8, %rax
mulq %rcx
movq %rdx, %rcx
addq %rbx, %r9
movq %rbx, %r8
adcq %r12, %rcx
xorl %r10d, %r10d
cmpq %r12, %rcx
setb %r10b
subq %r15, %rdi
movq %r15, %rbx
movq %r15, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
imulq %r11, %rbx
addq %r14, %rdi
adcq %rbx, %rdx
movq %rbx, %rax
addq %r9, %rax
movq %rdi, %rax
adcq %rcx, %rax
movq %rax, -120(%rbp) ## 8-byte Spill
addq %rbx, %r9
adcq %rcx, %rdi
movq %r8, %rdi
movq %r8, -48(%rbp) ## 8-byte Spill
movq %r12, %r8
leaq (%r12,%rdi), %rax
movq %rax, %r14
adcq %r10, %r14
addq %r10, %rax
cmpq %r12, %rax
movq %r12, %r15
adcq %rdi, %r15
cmpq %r12, %r15
movq %r12, %r11
movq %r12, -104(%rbp) ## 8-byte Spill
adcq %rdi, %r11
movq %r11, -248(%rbp) ## 8-byte Spill
movq %rbx, -256(%rbp) ## 8-byte Spill
cmpq %rbx, %rdx
movq %rbx, %r10
movq -144(%rbp), %rcx ## 8-byte Reload
adcq %rcx, %r10
cmpq %rbx, %r10
adcq %rcx, %rbx
movq %rbx, -64(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
cmpq %rax, %r14
setb %r12b
addq %rdx, %r14
adcq %r15, %r12
cmpq %r15, %r12
movl $0, %eax
setb %al
addq %r10, %r12
adcq %r11, %rax
movq %rax, -72(%rbp) ## 8-byte Spill
movq -224(%rbp), %rdx ## 8-byte Reload
addq 8(%rdx), %r13
movq %r8, %rdi
adcq $0, %rdi
adcq $0, %r9
movq -120(%rbp), %r8 ## 8-byte Reload
adcq $0, %r8
adcq $0, %r14
adcq $0, %r12
leaq (%rax,%rbx), %rax
adcq $0, %rax
movq %rax, -80(%rbp) ## 8-byte Spill
setb -160(%rbp) ## 1-byte Folded Spill
movq %r13, %rax
imulq -280(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rbx
movq %r13, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
movq %rdx, %rcx
movq %r13, %r10
shlq $32, %r10
movq %r13, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
xorl %r15d, %r15d
subq %r13, %r10
setb %r15b
subq %r13, %r10
movq %rcx, %rax
movq %rcx, -168(%rbp) ## 8-byte Spill
addq %rcx, %r10
movq %rbx, -264(%rbp) ## 8-byte Spill
adcq %rbx, %rdx
movq %rdx, -216(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
addq %rdi, %r15
setb %cl
addq %rax, %r15
adcq %r9, %rcx
xorl %r11d, %r11d
cmpq %r9, %rcx
setb %r11b
xorl %r13d, %r13d
addq %r8, %r11
setb %r13b
addq %rbx, %r11
adcq %r14, %r13
xorl %r9d, %r9d
cmpq %r14, %r13
setb %r9b
addq %r10, %r13
adcq %r12, %r9
movq -224(%rbp), %rax ## 8-byte Reload
addq 12(%rax), %r15
adcq $0, %rcx
adcq $0, %r11
adcq $0, %r13
setb -112(%rbp) ## 1-byte Folded Spill
movq %r15, %rax
imulq -280(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r8
movq %r15, %r10
shlq $32, %r10
movq %r15, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -176(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r15, %r10
setb %al
leaq (%rcx,%rax), %rdi
xorl %ebx, %ebx
addq %rdx, %rdi
movq %rdi, -136(%rbp) ## 8-byte Spill
setb %bl
addq %rax, %rcx
adcq %r11, %rbx
movq %rbx, -240(%rbp) ## 8-byte Spill
setb %bl
movq %r8, %rax
movq %r8, -120(%rbp) ## 8-byte Spill
adcq %r13, %rax
movq %rax, -88(%rbp) ## 8-byte Spill
xorl %r14d, %r14d
cmpq %r12, %r9
setb %r14b
addq -216(%rbp), %r9 ## 8-byte Folded Reload
adcq -80(%rbp), %r14 ## 8-byte Folded Reload
addb $255, -112(%rbp) ## 1-byte Folded Spill
adcq $0, %r9
setb -96(%rbp) ## 1-byte Folded Spill
movq -104(%rbp), %rdx ## 8-byte Reload
movq -248(%rbp), %rcx ## 8-byte Reload
cmpq %rdx, %rcx
movq %rdx, %rax
movq %rdx, %r12
movq -48(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %rax
xorl %edx, %edx
movq -72(%rbp), %r11 ## 8-byte Reload
cmpq %rcx, %r11
setb %dl
movq -64(%rbp), %rcx ## 8-byte Reload
addq %rcx, %r11
adcq %rax, %rdx
subq %r15, %r10
addb $255, %bl
adcq %r8, %r13
setb -128(%rbp) ## 1-byte Folded Spill
movq -176(%rbp), %rbx ## 8-byte Reload
leaq (%r10,%rbx), %rbx
adcq %r9, %rbx
movq %rbx, -72(%rbp) ## 8-byte Spill
cmpq %r12, %rax
movq %r12, %rbx
adcq %rdi, %rbx
movq %rbx, %r8
movq %rbx, -248(%rbp) ## 8-byte Spill
movq -256(%rbp), %rdi ## 8-byte Reload
cmpq %rdi, %rcx
movq %rdi, %r13
movq -144(%rbp), %rcx ## 8-byte Reload
adcq %rcx, %r13
cmpq %rdi, %r13
movq %rdi, %rbx
adcq %rcx, %rbx
movq %rbx, -200(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
cmpq %rax, %rdx
setb %r12b
addq %rdx, %r13
adcq %r8, %r12
addb $255, -160(%rbp) ## 1-byte Folded Spill
adcq $0, %r13
leaq (%r12,%rbx), %rax
adcq $0, %rax
movq %rax, %rcx
movq %rax, -232(%rbp) ## 8-byte Spill
setb -112(%rbp) ## 1-byte Folded Spill
movq -264(%rbp), %r11 ## 8-byte Reload
cmpq %r11, -216(%rbp) ## 8-byte Folded Reload
movq -168(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %r11
xorl %r8d, %r8d
cmpq -80(%rbp), %r14 ## 8-byte Folded Reload
setb %r8b
addq %r11, %r14
adcq %r13, %r8
addb $255, -96(%rbp) ## 1-byte Folded Spill
adcq $0, %r14
setb -80(%rbp) ## 1-byte Folded Spill
movq %r15, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, %rbx
movq -176(%rbp), %rax ## 8-byte Reload
addq %rax, %r10
adcq -120(%rbp), %rbx ## 8-byte Folded Reload
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq %r9, %r10
setb -192(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %r14, %rax
movq %rax, -64(%rbp) ## 8-byte Spill
movq -264(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r11
movq %rax, %rdx
adcq %rdi, %rdx
movq %rdx, -96(%rbp) ## 8-byte Spill
cmpq %r13, %r8
movl $0, %eax
setb %al
addq %rdx, %r8
adcq %rcx, %rax
movq %rax, -128(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq -136(%rbp), %rcx ## 8-byte Reload
addq 16(%rax), %rcx
movq %rcx, -136(%rbp) ## 8-byte Spill
movq -240(%rbp), %r11 ## 8-byte Reload
adcq $0, %r11
movq -88(%rbp), %r15 ## 8-byte Reload
adcq $0, %r15
movq -72(%rbp), %r13 ## 8-byte Reload
adcq $0, %r13
setb -72(%rbp) ## 1-byte Folded Spill
addb $255, -80(%rbp) ## 1-byte Folded Spill
adcq $0, %r8
setb -208(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
imulq -280(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r9
movq %rcx, %r10
shlq $32, %r10
movq %rcx, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -216(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rcx, %r10
setb %al
leaq (%r11,%rax), %rcx
xorl %edi, %edi
addq %rdx, %rcx
movq %rcx, -160(%rbp) ## 8-byte Spill
setb %dil
addq %rax, %r11
adcq %r15, %rdi
movq %rdi, -240(%rbp) ## 8-byte Spill
setb %r15b
movq %r9, %rax
movq %r9, %r11
movq %r9, -80(%rbp) ## 8-byte Spill
adcq %r13, %rax
movq %rax, -88(%rbp) ## 8-byte Spill
movq -120(%rbp), %rax ## 8-byte Reload
cmpq %rax, %rbx
adcq -176(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -152(%rbp) ## 8-byte Spill
addb $255, -192(%rbp) ## 1-byte Folded Spill
adcq %r14, %rbx
setb -192(%rbp) ## 1-byte Folded Spill
adcq %r8, %rax
movq %rax, %rdi
movq -104(%rbp), %rdx ## 8-byte Reload
movq -248(%rbp), %rcx ## 8-byte Reload
cmpq %rdx, %rcx
movq -48(%rbp), %r9 ## 8-byte Reload
adcq %r9, %rdx
xorl %eax, %eax
cmpq %rcx, %r12
setb %al
movq -200(%rbp), %rcx ## 8-byte Reload
addq %rcx, %r12
adcq %rdx, %rax
addb $255, -72(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %rbx ## 8-byte Reload
adcq $0, %rbx
movq %rbx, -64(%rbp) ## 8-byte Spill
adcq $0, %rdi
movq %rdi, -72(%rbp) ## 8-byte Spill
setb -184(%rbp) ## 1-byte Folded Spill
subq -136(%rbp), %r10 ## 8-byte Folded Reload
addb $255, %r15b
adcq %r11, %r13
setb %r14b
movq -216(%rbp), %r13 ## 8-byte Reload
leaq (%r10,%r13), %rdi
adcq %rbx, %rdi
movq %rdi, -248(%rbp) ## 8-byte Spill
movq -256(%rbp), %rbx ## 8-byte Reload
cmpq %rbx, %rcx
movq %rbx, %r15
movq -144(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %r15
cmpq %rbx, %r15
adcq %rdi, %rbx
movq %rbx, -200(%rbp) ## 8-byte Spill
xorl %edi, %edi
cmpq %rdx, %rax
setb %dil
cmpq -104(%rbp), %rdx ## 8-byte Folded Reload
adcq %rbx, %r9
addq %rax, %r15
movq %rdi, %rax
adcq %r9, %rax
movq %rax, -272(%rbp) ## 8-byte Spill
addb $255, -112(%rbp) ## 1-byte Folded Spill
adcq $0, %r15
adcq $0, %rax
movq %rax, %rcx
setb -48(%rbp) ## 1-byte Folded Spill
movq -264(%rbp), %r12 ## 8-byte Reload
cmpq %r12, -96(%rbp) ## 8-byte Folded Reload
adcq -168(%rbp), %r12 ## 8-byte Folded Reload
xorl %r9d, %r9d
movq -128(%rbp), %r11 ## 8-byte Reload
cmpq -232(%rbp), %r11 ## 8-byte Folded Reload
setb %r9b
addq %r12, %r11
adcq %r15, %r9
addb $255, -208(%rbp) ## 1-byte Folded Spill
adcq $0, %r11
setb -112(%rbp) ## 1-byte Folded Spill
movq -136(%rbp), %rax ## 8-byte Reload
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
addq %r13, %r10
adcq -80(%rbp), %rdx ## 8-byte Folded Reload
addb $255, %r14b
adcq -64(%rbp), %r10 ## 8-byte Folded Reload
setb -136(%rbp) ## 1-byte Folded Spill
movq %rdx, %rax
adcq -72(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -104(%rbp) ## 8-byte Spill
movq -120(%rbp), %r14 ## 8-byte Reload
movq -152(%rbp), %rdi ## 8-byte Reload
cmpq %r14, %rdi
movq -176(%rbp), %r13 ## 8-byte Reload
adcq %r13, %r14
addb $255, -192(%rbp) ## 1-byte Folded Spill
adcq %r8, %rdi
setb -64(%rbp) ## 1-byte Folded Spill
movq %r14, %rax
adcq %r11, %rax
addb $255, -184(%rbp) ## 1-byte Folded Spill
adcq $0, %rax
movq %rax, %rbx
movq %rax, -152(%rbp) ## 8-byte Spill
setb -96(%rbp) ## 1-byte Folded Spill
movq -264(%rbp), %r8 ## 8-byte Reload
cmpq %r8, %r12
movq %r8, %rdi
adcq -168(%rbp), %rdi ## 8-byte Folded Reload
xorl %eax, %eax
cmpq %r15, %r9
setb %al
addq %rdi, %r9
movq %rcx, %r15
adcq %rcx, %rax
addb $255, -112(%rbp) ## 1-byte Folded Spill
adcq $0, %r9
setb %r12b
movq -80(%rbp), %r10 ## 8-byte Reload
cmpq %r10, %rdx
adcq -216(%rbp), %r10 ## 8-byte Folded Reload
addb $255, -136(%rbp) ## 1-byte Folded Spill
adcq -72(%rbp), %rdx ## 8-byte Folded Reload
setb -192(%rbp) ## 1-byte Folded Spill
movq %r10, %rcx
adcq %rbx, %rcx
movq %rcx, -72(%rbp) ## 8-byte Spill
movq -120(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %r14
adcq %r13, %rcx
movq %rcx, -184(%rbp) ## 8-byte Spill
addb $255, -64(%rbp) ## 1-byte Folded Spill
adcq %r11, %r14
setb -232(%rbp) ## 1-byte Folded Spill
adcq %r9, %rcx
addb $255, -96(%rbp) ## 1-byte Folded Spill
adcq $0, %rcx
movq %rcx, -288(%rbp) ## 8-byte Spill
setb -208(%rbp) ## 1-byte Folded Spill
xorl %edx, %edx
movq -200(%rbp), %rbx ## 8-byte Reload
cmpq %rbx, -272(%rbp) ## 8-byte Folded Reload
setb %dl
movq %r8, %rcx
cmpq %r8, %rdi
movq %r8, %r14
movq -168(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %r14
cmpq %r8, %r14
adcq %rdi, %rcx
movq %rcx, -128(%rbp) ## 8-byte Spill
cmpq -256(%rbp), %rbx ## 8-byte Folded Reload
movq -144(%rbp), %rdi ## 8-byte Reload
adcq %rcx, %rdi
addb $255, -48(%rbp) ## 1-byte Folded Spill
adcq %rdi, %rdx
xorl %ecx, %ecx
cmpq %r15, %rax
setb %cl
addq %rax, %r14
adcq %rdx, %rcx
movq %rcx, -256(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq -160(%rbp), %rcx ## 8-byte Reload
addq 20(%rax), %rcx
movq %rcx, -160(%rbp) ## 8-byte Spill
movq -240(%rbp), %rdi ## 8-byte Reload
adcq $0, %rdi
movq -88(%rbp), %r15 ## 8-byte Reload
adcq $0, %r15
movq -248(%rbp), %r8 ## 8-byte Reload
adcq $0, %r8
movq %r8, -248(%rbp) ## 8-byte Spill
setb -88(%rbp) ## 1-byte Folded Spill
addb $255, %r12b
adcq $0, %r14
setb -48(%rbp) ## 1-byte Folded Spill
movq %rcx, %r12
imulq -280(%rbp), %r12 ## 8-byte Folded Reload
movq %r12, -136(%rbp) ## 8-byte Spill
movq %rcx, %r11
shlq $32, %r11
movq %rcx, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -144(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rcx, %r11
setb %al
leaq (%rdi,%rax), %rcx
xorl %ebx, %ebx
addq %rdx, %rcx
movq %rcx, -64(%rbp) ## 8-byte Spill
setb %bl
addq %rax, %rdi
adcq %r15, %rbx
movq %rbx, -96(%rbp) ## 8-byte Spill
setb -272(%rbp) ## 1-byte Folded Spill
adcq %r8, %r12
movq %r12, -112(%rbp) ## 8-byte Spill
movq -80(%rbp), %r13 ## 8-byte Reload
cmpq %r13, %r10
movq %r13, %rax
movq -216(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %rax
movq %rdi, %r8
addb $255, -192(%rbp) ## 1-byte Folded Spill
adcq -152(%rbp), %r10 ## 8-byte Folded Reload
setb %r15b
movq %rax, %rcx
movq -288(%rbp), %r12 ## 8-byte Reload
adcq %r12, %rcx
movq %rcx, -240(%rbp) ## 8-byte Spill
addb $255, -88(%rbp) ## 1-byte Folded Spill
movq -104(%rbp), %rdi ## 8-byte Reload
adcq $0, %rdi
movq %rdi, -104(%rbp) ## 8-byte Spill
setb -88(%rbp) ## 1-byte Folded Spill
movq -120(%rbp), %r10 ## 8-byte Reload
movq -184(%rbp), %rcx ## 8-byte Reload
cmpq %r10, %rcx
adcq -176(%rbp), %r10 ## 8-byte Folded Reload
addb $255, -232(%rbp) ## 1-byte Folded Spill
adcq %r9, %rcx
setb -232(%rbp) ## 1-byte Folded Spill
movq %r10, %rbx
adcq %r14, %rbx
addb $255, -208(%rbp) ## 1-byte Folded Spill
adcq $0, %rbx
setb -152(%rbp) ## 1-byte Folded Spill
cmpq %r13, %rax
adcq %r8, %r13
addb $255, %r15b
adcq %r12, %rax
setb %al
movq %r13, %r12
movq %r13, -200(%rbp) ## 8-byte Spill
adcq %rbx, %r12
addb $255, -48(%rbp) ## 1-byte Folded Spill
movq -256(%rbp), %r15 ## 8-byte Reload
adcq $0, %r15
setb -208(%rbp) ## 1-byte Folded Spill
addb $255, %al
adcq %r13, %rbx
setb -48(%rbp) ## 1-byte Folded Spill
movq -160(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
subq %rcx, %r11
addq -144(%rbp), %r11 ## 8-byte Folded Reload
movq -136(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdx
movq %rdx, -184(%rbp) ## 8-byte Spill
addb $255, -272(%rbp) ## 1-byte Folded Spill
adcq %rax, -248(%rbp) ## 8-byte Folded Spill
setb -160(%rbp) ## 1-byte Folded Spill
movq %r11, %rax
adcq %rdi, %rax
movq %rax, %rcx
movq -120(%rbp), %rdx ## 8-byte Reload
cmpq %rdx, %r10
movq -176(%rbp), %r9 ## 8-byte Reload
adcq %r9, %rdx
addb $255, -232(%rbp) ## 1-byte Folded Spill
adcq %r14, %r10
setb %r8b
movq %rdx, %rax
adcq %r15, %rax
movq %rax, %r10
movq -224(%rbp), %rbx ## 8-byte Reload
movq -64(%rbp), %r13 ## 8-byte Reload
addq 24(%rbx), %r13
adcq $0, -96(%rbp) ## 8-byte Folded Spill
adcq $0, -112(%rbp) ## 8-byte Folded Spill
adcq $0, %rcx
movq %rcx, -288(%rbp) ## 8-byte Spill
setb -272(%rbp) ## 1-byte Folded Spill
addb $255, -88(%rbp) ## 1-byte Folded Spill
movq -72(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
adcq $0, -240(%rbp) ## 8-byte Folded Spill
adcq $0, %r12
movq %r12, -88(%rbp) ## 8-byte Spill
setb -192(%rbp) ## 1-byte Folded Spill
xorl %edi, %edi
movq -128(%rbp), %rbx ## 8-byte Reload
cmpq %rbx, -256(%rbp) ## 8-byte Folded Reload
setb %dil
movq -120(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %rdx
adcq %r9, %rcx
movq %rcx, -232(%rbp) ## 8-byte Spill
cmpq -264(%rbp), %rbx ## 8-byte Folded Reload
movq -168(%rbp), %rbx ## 8-byte Reload
adcq %rcx, %rbx
addb $255, -208(%rbp) ## 1-byte Folded Spill
movq %rdi, %rcx
adcq %rbx, %rcx
addb $255, %r8b
adcq %r15, %rdx
adcq $0, %rcx
movq %rcx, -208(%rbp) ## 8-byte Spill
addb $255, -152(%rbp) ## 1-byte Folded Spill
adcq $0, %r10
movq %r10, -304(%rbp) ## 8-byte Spill
adcq $0, %rcx
movq %rcx, -72(%rbp) ## 8-byte Spill
setb -128(%rbp) ## 1-byte Folded Spill
addb $255, -160(%rbp) ## 1-byte Folded Spill
adcq -104(%rbp), %r11 ## 8-byte Folded Reload
setb %r8b
movq -184(%rbp), %r14 ## 8-byte Reload
movq %r14, %rcx
adcq %rax, %rcx
movq %rcx, %rdi
movq %rax, %r9
movq %r13, %rbx
movq %r13, -64(%rbp) ## 8-byte Spill
imulq -280(%rbp), %r13 ## 8-byte Folded Reload
movq %rbx, %r15
shlq $32, %r15
movq %rbx, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
movq %rdx, -256(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rbx, %r15
setb %al
movq -96(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rax), %rbx
xorl %r11d, %r11d
addq %rdx, %rbx
movq %rbx, -160(%rbp) ## 8-byte Spill
setb %r11b
addq %rax, %rcx
adcq -112(%rbp), %r11 ## 8-byte Folded Reload
setb -264(%rbp) ## 1-byte Folded Spill
movq %r13, %rax
movq -288(%rbp), %r12 ## 8-byte Reload
adcq %r12, %rax
movq %rax, -168(%rbp) ## 8-byte Spill
movq -136(%rbp), %rcx ## 8-byte Reload
movq %r14, %rdx
cmpq %rcx, %r14
movq %rcx, %rax
movq %rcx, %r14
movq -144(%rbp), %rcx ## 8-byte Reload
adcq %rcx, %rax
addb $255, %r8b
adcq %rdx, %r9
setb %dl
movq %rax, %r8
movq -240(%rbp), %rbx ## 8-byte Reload
adcq %rbx, %r8
addb $255, -272(%rbp) ## 1-byte Folded Spill
adcq $0, %rdi
movq %rdi, -112(%rbp) ## 8-byte Spill
setb -96(%rbp) ## 1-byte Folded Spill
cmpq %r14, %rax
movq %r14, %rdi
adcq %rcx, %rdi
addb $255, %dl
adcq %rbx, %rax
setb -240(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
adcq -88(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -104(%rbp) ## 8-byte Spill
movq -80(%rbp), %r9 ## 8-byte Reload
cmpq %r9, -200(%rbp) ## 8-byte Folded Reload
movq -216(%rbp), %rbx ## 8-byte Reload
adcq %rbx, %r9
movb -48(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %r9, %rax
adcq %r10, %rax
addb $255, -192(%rbp) ## 1-byte Folded Spill
adcq $0, %rax
movq %rax, %r10
movq %rax, -152(%rbp) ## 8-byte Spill
setb -200(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
subq %rcx, %r15
addq -256(%rbp), %r15 ## 8-byte Folded Reload
movq %r13, -248(%rbp) ## 8-byte Spill
adcq %r13, %rdx
movq %rdx, -184(%rbp) ## 8-byte Spill
addb $255, -264(%rbp) ## 1-byte Folded Spill
adcq %r13, %r12
setb -288(%rbp) ## 1-byte Folded Spill
movq %r15, %r12
adcq -112(%rbp), %r12 ## 8-byte Folded Reload
cmpq %r14, %rdi
adcq -144(%rbp), %r14 ## 8-byte Folded Reload
movq %r14, -272(%rbp) ## 8-byte Spill
addb $255, -240(%rbp) ## 1-byte Folded Spill
adcq -88(%rbp), %rdi ## 8-byte Folded Reload
setb -192(%rbp) ## 1-byte Folded Spill
adcq %r10, %r14
movq -80(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r9
adcq %rbx, %rax
movq %rax, -64(%rbp) ## 8-byte Spill
addb $255, -48(%rbp) ## 1-byte Folded Spill
adcq -304(%rbp), %r9 ## 8-byte Folded Reload
setb -240(%rbp) ## 1-byte Folded Spill
adcq -72(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rdx
movq -224(%rbp), %rax ## 8-byte Reload
movq -160(%rbp), %rcx ## 8-byte Reload
addq 28(%rax), %rcx
adcq $0, %r11
movq -168(%rbp), %r13 ## 8-byte Reload
adcq $0, %r13
adcq $0, %r12
setb -48(%rbp) ## 1-byte Folded Spill
addb $255, -200(%rbp) ## 1-byte Folded Spill
adcq $0, %rdx
movq %rdx, -88(%rbp) ## 8-byte Spill
setb -200(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
movq %rcx, %rdx
imulq -280(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, %rdi
movq %rcx, %r10
shlq $32, %r10
movq %rcx, -160(%rbp) ## 8-byte Spill
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -264(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rcx, %r10
setb %al
leaq (%r11,%rax), %rcx
xorl %r9d, %r9d
addq %rdx, %rcx
movq %rcx, -304(%rbp) ## 8-byte Spill
setb %r9b
addq %rax, %r11
adcq %r13, %r9
setb %r11b
movq %rdi, %rax
movq %rdi, %r13
adcq %r12, %rax
movq %rax, -320(%rbp) ## 8-byte Spill
addb $255, -96(%rbp) ## 1-byte Folded Spill
adcq $0, %r8
movq -104(%rbp), %rbx ## 8-byte Reload
adcq $0, %rbx
movq %rbx, -104(%rbp) ## 8-byte Spill
adcq $0, %r14
movq %r14, -96(%rbp) ## 8-byte Spill
setb -296(%rbp) ## 1-byte Folded Spill
addb $255, -288(%rbp) ## 1-byte Folded Spill
adcq -112(%rbp), %r15 ## 8-byte Folded Reload
setb %al
movq -184(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rdx
adcq %r8, %rdx
addb $255, -48(%rbp) ## 1-byte Folded Spill
adcq $0, %rdx
movq %rdx, %r15
movq %rdx, -288(%rbp) ## 8-byte Spill
setb -112(%rbp) ## 1-byte Folded Spill
movq -248(%rbp), %rdx ## 8-byte Reload
cmpq %rdx, %rcx
movq -256(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %rdx
addb $255, %al
adcq %rcx, %r8
setb %r8b
movq %rdx, %rax
adcq %rbx, %rax
movq %rax, -48(%rbp) ## 8-byte Spill
subq -160(%rbp), %r10 ## 8-byte Folded Reload
addb $255, %r11b
movq %r13, -168(%rbp) ## 8-byte Spill
adcq %r13, %r12
setb -184(%rbp) ## 1-byte Folded Spill
movq -264(%rbp), %r14 ## 8-byte Reload
leaq (%r10,%r14), %rax
adcq %r15, %rax
movq %rax, %r15
xorl %ebx, %ebx
movq -232(%rbp), %rax ## 8-byte Reload
cmpq %rax, -208(%rbp) ## 8-byte Folded Reload
setb %bl
movq -80(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, -64(%rbp) ## 8-byte Folded Reload
adcq -216(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -208(%rbp) ## 8-byte Spill
cmpq -120(%rbp), %rax ## 8-byte Folded Reload
movq -176(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq %rax, %rbx
movq -248(%rbp), %r11 ## 8-byte Reload
cmpq %r11, %rdx
adcq %rdi, %r11
addb $255, %r8b
adcq -104(%rbp), %rdx ## 8-byte Folded Reload
setb -312(%rbp) ## 1-byte Folded Spill
movq %r11, %rax
adcq -96(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -232(%rbp) ## 8-byte Spill
movq -136(%rbp), %r12 ## 8-byte Reload
movq -272(%rbp), %rcx ## 8-byte Reload
cmpq %r12, %rcx
adcq -144(%rbp), %r12 ## 8-byte Folded Reload
addb $255, -192(%rbp) ## 1-byte Folded Spill
adcq -152(%rbp), %rcx ## 8-byte Folded Reload
setb -128(%rbp) ## 1-byte Folded Spill
movq %r12, %rax
adcq -88(%rbp), %rax ## 8-byte Folded Reload
addb $255, -296(%rbp) ## 1-byte Folded Spill
adcq $0, %rax
movq %rax, -328(%rbp) ## 8-byte Spill
setb -192(%rbp) ## 1-byte Folded Spill
movq -160(%rbp), %rax ## 8-byte Reload
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
addq %r14, %r10
adcq %r13, %rdx
movq %rdx, -176(%rbp) ## 8-byte Spill
movq -224(%rbp), %rax ## 8-byte Reload
movq -304(%rbp), %r14 ## 8-byte Reload
addq 32(%rax), %r14
adcq $0, %r9
movq -320(%rbp), %r8 ## 8-byte Reload
adcq $0, %r8
adcq $0, %r15
movq %r15, -104(%rbp) ## 8-byte Spill
setb -272(%rbp) ## 1-byte Folded Spill
addb $255, -112(%rbp) ## 1-byte Folded Spill
movq -48(%rbp), %rdi ## 8-byte Reload
adcq $0, %rdi
movq %rdi, -48(%rbp) ## 8-byte Spill
setb -152(%rbp) ## 1-byte Folded Spill
addb $255, -240(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %rax ## 8-byte Reload
adcq -72(%rbp), %rax ## 8-byte Folded Reload
movq %rbx, %rax
adcq $0, %rax
movq %rax, -296(%rbp) ## 8-byte Spill
addb $255, -200(%rbp) ## 1-byte Folded Spill
adcq $0, %rax
movq %rax, -240(%rbp) ## 8-byte Spill
setb -200(%rbp) ## 1-byte Folded Spill
movq %r14, %rcx
imulq -280(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -120(%rbp) ## 8-byte Spill
movq %r14, %r13
shlq $32, %r13
movq %r14, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -160(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r14, %r13
setb %al
leaq (%r9,%rax), %rbx
xorl %r15d, %r15d
addq %rdx, %rbx
movq %rbx, -64(%rbp) ## 8-byte Spill
setb %r15b
addq %rax, %r9
adcq %r8, %r15
setb -304(%rbp) ## 1-byte Folded Spill
adcq -104(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -112(%rbp) ## 8-byte Spill
addb $255, -184(%rbp) ## 1-byte Folded Spill
adcq -288(%rbp), %r10 ## 8-byte Folded Reload
setb -49(%rbp) ## 1-byte Folded Spill
movq -176(%rbp), %r9 ## 8-byte Reload
adcq %rdi, %r9
movq -248(%rbp), %r10 ## 8-byte Reload
cmpq %r10, %r11
movq %r10, %rax
movq -256(%rbp), %rbx ## 8-byte Reload
adcq %rbx, %rax
addb $255, -312(%rbp) ## 1-byte Folded Spill
adcq -96(%rbp), %r11 ## 8-byte Folded Reload
setb %r11b
movq %rax, %rdx
movq -328(%rbp), %r8 ## 8-byte Reload
adcq %r8, %rdx
addb $255, -272(%rbp) ## 1-byte Folded Spill
adcq $0, %r9
movq %r9, -320(%rbp) ## 8-byte Spill
setb -184(%rbp) ## 1-byte Folded Spill
movq -136(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %r12
movq %rcx, %rdi
adcq -144(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -96(%rbp) ## 8-byte Spill
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq -88(%rbp), %r12 ## 8-byte Folded Reload
setb -272(%rbp) ## 1-byte Folded Spill
adcq -240(%rbp), %rdi ## 8-byte Folded Reload
addb $255, -192(%rbp) ## 1-byte Folded Spill
adcq $0, %rdi
setb -192(%rbp) ## 1-byte Folded Spill
cmpq %r10, %rax
adcq %rbx, %r10
movq %r10, -88(%rbp) ## 8-byte Spill
addb $255, %r11b
adcq %r8, %rax
setb %r8b
adcq %rdi, %r10
movq %r10, -72(%rbp) ## 8-byte Spill
addb $255, -152(%rbp) ## 1-byte Folded Spill
movq -232(%rbp), %rbx ## 8-byte Reload
adcq $0, %rbx
adcq $0, %rdx
movq %rdx, -128(%rbp) ## 8-byte Spill
setb -312(%rbp) ## 1-byte Folded Spill
movq %r14, %rax
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
subq %r14, %r13
addq -160(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -288(%rbp) ## 8-byte Spill
movq -120(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdx
movq %rdx, -152(%rbp) ## 8-byte Spill
addb $255, -304(%rbp) ## 1-byte Folded Spill
adcq %rax, -104(%rbp) ## 8-byte Folded Spill
setb -304(%rbp) ## 1-byte Folded Spill
movq %r13, %rax
adcq %r9, %rax
movq %rax, %r10
movq -168(%rbp), %rax ## 8-byte Reload
movq -176(%rbp), %rdx ## 8-byte Reload
cmpq %rax, %rdx
movq -264(%rbp), %r12 ## 8-byte Reload
adcq %r12, %rax
addb $255, -49(%rbp) ## 1-byte Folded Spill
adcq %rdx, -48(%rbp) ## 8-byte Folded Spill
setb %r9b
movq %rax, %rcx
adcq %rbx, %rcx
movq %rcx, -48(%rbp) ## 8-byte Spill
movq %rbx, %r14
xorl %r13d, %r13d
movq -208(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, -296(%rbp) ## 8-byte Folded Reload
setb %r13b
movq -136(%rbp), %r11 ## 8-byte Reload
cmpq %r11, -96(%rbp) ## 8-byte Folded Reload
adcq -144(%rbp), %r11 ## 8-byte Folded Reload
cmpq -80(%rbp), %rcx ## 8-byte Folded Reload
movq -216(%rbp), %rcx ## 8-byte Reload
adcq %r11, %rcx
addb $255, -200(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r13
movq -224(%rbp), %rcx ## 8-byte Reload
movq -64(%rbp), %rbx ## 8-byte Reload
addq 36(%rcx), %rbx
adcq $0, %r15
adcq $0, -112(%rbp) ## 8-byte Folded Spill
adcq $0, %r10
movq %r10, -232(%rbp) ## 8-byte Spill
setb -208(%rbp) ## 1-byte Folded Spill
addb $255, %r8b
adcq -88(%rbp), %rdi ## 8-byte Folded Reload
setb -328(%rbp) ## 1-byte Folded Spill
movq -168(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %rax
adcq %r12, %rcx
addb $255, %r9b
adcq %r14, %rax
setb %r10b
movq %rcx, %rax
adcq -128(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -80(%rbp) ## 8-byte Spill
addb $255, -312(%rbp) ## 1-byte Folded Spill
movq -72(%rbp), %r8 ## 8-byte Reload
adcq $0, %r8
setb -49(%rbp) ## 1-byte Folded Spill
movq %rbx, -64(%rbp) ## 8-byte Spill
movq %rbx, %rax
imulq -280(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r9
movq %rax, -216(%rbp) ## 8-byte Spill
movq %rbx, %r14
shlq $32, %r14
movq %rbx, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -176(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rbx, %r14
setb %al
leaq (%r15,%rax), %rbx
xorl %edi, %edi
addq %rdx, %rbx
movq %rbx, -104(%rbp) ## 8-byte Spill
setb %dil
addq %rax, %r15
adcq -112(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -112(%rbp) ## 8-byte Spill
setb -296(%rbp) ## 1-byte Folded Spill
movq %r9, %rax
adcq -232(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -200(%rbp) ## 8-byte Spill
movq -168(%rbp), %r9 ## 8-byte Reload
cmpq %r9, %rcx
adcq %r12, %r9
addb $255, %r10b
adcq -128(%rbp), %rcx ## 8-byte Folded Reload
setb -128(%rbp) ## 1-byte Folded Spill
movq %r9, %rax
movq %r8, -72(%rbp) ## 8-byte Spill
adcq %r8, %rax
addb $255, -184(%rbp) ## 1-byte Folded Spill
movq -48(%rbp), %rbx ## 8-byte Reload
adcq $0, %rbx
movq %rbx, -48(%rbp) ## 8-byte Spill
adcq $0, -80(%rbp) ## 8-byte Folded Spill
adcq $0, %rax
movq %rax, -312(%rbp) ## 8-byte Spill
setb -184(%rbp) ## 1-byte Folded Spill
addb $255, -272(%rbp) ## 1-byte Folded Spill
movq -96(%rbp), %rax ## 8-byte Reload
adcq -240(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %r13
xorl %edi, %edi
cmpq %r11, %r13
setb %dil
movq -248(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, -88(%rbp) ## 8-byte Folded Reload
movq %rcx, %r10
movq -256(%rbp), %rax ## 8-byte Reload
adcq %rax, %r10
cmpq %rcx, %r10
adcq %rax, %rcx
movq %rcx, -96(%rbp) ## 8-byte Spill
cmpq -136(%rbp), %r11 ## 8-byte Folded Reload
movq -144(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
addb $255, -192(%rbp) ## 1-byte Folded Spill
adcq $0, %r13
adcq %rax, %rdi
movb -328(%rbp), %dl ## 1-byte Reload
movl %edx, %eax
addb $255, %al
movq %r10, %r12
adcq %r13, %r12
addb $255, -49(%rbp) ## 1-byte Folded Spill
adcq $0, %r12
movq %r12, -136(%rbp) ## 8-byte Spill
setb %r15b
addb $255, -304(%rbp) ## 1-byte Folded Spill
movq -320(%rbp), %rax ## 8-byte Reload
adcq %rax, -288(%rbp) ## 8-byte Folded Spill
setb -192(%rbp) ## 1-byte Folded Spill
movq -152(%rbp), %rax ## 8-byte Reload
movq %rax, %r8
adcq %rbx, %r8
addb $255, -208(%rbp) ## 1-byte Folded Spill
adcq $0, %r8
setb -88(%rbp) ## 1-byte Folded Spill
movq -168(%rbp), %r11 ## 8-byte Reload
cmpq %r11, %r9
adcq -264(%rbp), %r11 ## 8-byte Folded Reload
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq -72(%rbp), %r9 ## 8-byte Folded Reload
setb -144(%rbp) ## 1-byte Folded Spill
movq %r11, %rcx
adcq %r12, %rcx
movq %rcx, %r12
addb $255, %dl
adcq %r10, %r13
adcq $0, %rdi
movq %rdi, -208(%rbp) ## 8-byte Spill
addb $255, %r15b
movq %rdi, %rdx
adcq $0, %rdx
movq %rdx, -240(%rbp) ## 8-byte Spill
setb -128(%rbp) ## 1-byte Folded Spill
movq -120(%rbp), %r13 ## 8-byte Reload
cmpq %r13, %rax
movq %r13, %rdx
movq -160(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %rdx
addb $255, -192(%rbp) ## 1-byte Folded Spill
adcq %rax, -48(%rbp) ## 8-byte Folded Spill
setb %cl
movq %rdx, %rax
movq -80(%rbp), %r9 ## 8-byte Reload
adcq %r9, %rax
movq %rax, -48(%rbp) ## 8-byte Spill
movq -64(%rbp), %rax ## 8-byte Reload
subq %rax, %r14
addb $255, -296(%rbp) ## 1-byte Folded Spill
movq -232(%rbp), %rbx ## 8-byte Reload
adcq -216(%rbp), %rbx ## 8-byte Folded Reload
setb %r15b
movq -176(%rbp), %r10 ## 8-byte Reload
leaq (%r14,%r10), %rbx
adcq %r8, %rbx
movq %rbx, -152(%rbp) ## 8-byte Spill
cmpq %r13, %rdx
adcq %rdi, %r13
movq %rdi, %rbx
addb $255, %cl
adcq %r9, %rdx
setb %dil
movq %r13, %rcx
movq -312(%rbp), %r9 ## 8-byte Reload
adcq %r9, %rcx
movq %rcx, -232(%rbp) ## 8-byte Spill
addb $255, -184(%rbp) ## 1-byte Folded Spill
adcq $0, %r12
movq %r12, -296(%rbp) ## 8-byte Spill
setb -80(%rbp) ## 1-byte Folded Spill
addb $255, -88(%rbp) ## 1-byte Folded Spill
movq -48(%rbp), %rcx ## 8-byte Reload
adcq $0, %rcx
movq %rcx, -48(%rbp) ## 8-byte Spill
setb -272(%rbp) ## 1-byte Folded Spill
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, %r12
addq %r10, %r14
adcq -216(%rbp), %r12 ## 8-byte Folded Reload
addb $255, %r15b
adcq %r8, %r14
setb -64(%rbp) ## 1-byte Folded Spill
movq %r12, %rax
adcq %rcx, %rax
movq %rax, %r8
movq -120(%rbp), %r15 ## 8-byte Reload
cmpq %r15, %r13
adcq %rbx, %r15
addb $255, %dil
adcq %r9, %r13
setb -184(%rbp) ## 1-byte Folded Spill
movq %r15, %rax
movq -296(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rax
movq %rax, -72(%rbp) ## 8-byte Spill
movq -168(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r11
adcq -264(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -288(%rbp) ## 8-byte Spill
addb $255, -144(%rbp) ## 1-byte Folded Spill
adcq -136(%rbp), %r11 ## 8-byte Folded Reload
setb -192(%rbp) ## 1-byte Folded Spill
adcq -240(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rdx
movq -224(%rbp), %rax ## 8-byte Reload
movq -104(%rbp), %rdi ## 8-byte Reload
addq 40(%rax), %rdi
movq -112(%rbp), %rbx ## 8-byte Reload
adcq $0, %rbx
movq -200(%rbp), %r9 ## 8-byte Reload
adcq $0, %r9
movq -152(%rbp), %r13 ## 8-byte Reload
adcq $0, %r13
adcq $0, %r8
movq %r8, -112(%rbp) ## 8-byte Spill
setb -320(%rbp) ## 1-byte Folded Spill
addb $255, -80(%rbp) ## 1-byte Folded Spill
adcq $0, %rdx
movq %rdx, -88(%rbp) ## 8-byte Spill
setb -200(%rbp) ## 1-byte Folded Spill
movq %rdi, %rax
imulq -280(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r14
movq %rdi, %r11
shlq $32, %r11
movq %rdi, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
xorl %eax, %eax
subq %rdi, %r11
movq %rdi, %r8
movq %rdi, -104(%rbp) ## 8-byte Spill
setb %al
leaq (%rbx,%rax), %rdi
xorl %ecx, %ecx
addq %rdx, %rdi
movq %rdi, -136(%rbp) ## 8-byte Spill
movq %rdx, %rdi
movq %rdx, -144(%rbp) ## 8-byte Spill
setb %cl
addq %rax, %rbx
adcq %r9, %rcx
movq %rcx, -304(%rbp) ## 8-byte Spill
setb %dl
movq %r14, %rcx
movq %r14, -80(%rbp) ## 8-byte Spill
movq %r14, %rax
movq %r13, %rbx
adcq %r13, %rax
movq %rax, -152(%rbp) ## 8-byte Spill
addb $255, -184(%rbp) ## 1-byte Folded Spill
adcq %r15, %r10
setb %r9b
addb $255, -272(%rbp) ## 1-byte Folded Spill
movq -232(%rbp), %r10 ## 8-byte Reload
adcq $0, %r10
movq -72(%rbp), %r14 ## 8-byte Reload
adcq $0, %r14
movq %r14, -72(%rbp) ## 8-byte Spill
setb -312(%rbp) ## 1-byte Folded Spill
subq %r8, %r11
addb $255, %dl
adcq %rcx, %rbx
setb -296(%rbp) ## 1-byte Folded Spill
leaq (%r11,%rdi), %rax
adcq -112(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -184(%rbp) ## 8-byte Spill
movq -216(%rbp), %r13 ## 8-byte Reload
cmpq %r13, %r12
movq %r13, %rdx
movq -176(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdx
addb $255, -64(%rbp) ## 1-byte Folded Spill
adcq -48(%rbp), %r12 ## 8-byte Folded Reload
setb %bl
movq %rdx, %rcx
adcq %r10, %rcx
movq %r10, %rdi
addb $255, -320(%rbp) ## 1-byte Folded Spill
adcq $0, %rcx
movq %rcx, %r10
movq %rcx, -272(%rbp) ## 8-byte Spill
setb -232(%rbp) ## 1-byte Folded Spill
movq -120(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %r15
movq %rcx, %r15
adcq -160(%rbp), %r15 ## 8-byte Folded Reload
movl %r9d, %ecx
movl %r9d, %r8d
addb $255, %cl
movq %r15, %rcx
adcq -88(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -48(%rbp) ## 8-byte Spill
cmpq %r13, %rdx
movq %r13, %rcx
adcq %rax, %rcx
movq %rax, %r9
addb $255, %bl
adcq %rdi, %rdx
setb -320(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq %r14, %rax
movq %rax, -64(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
movq -96(%rbp), %rax ## 8-byte Reload
cmpq %rax, -208(%rbp) ## 8-byte Folded Reload
setb %r12b
movq -168(%rbp), %rbx ## 8-byte Reload
movq -288(%rbp), %r14 ## 8-byte Reload
cmpq %rbx, %r14
adcq -264(%rbp), %rbx ## 8-byte Folded Reload
cmpq -248(%rbp), %rax ## 8-byte Folded Reload
movq -256(%rbp), %rax ## 8-byte Reload
adcq %rbx, %rax
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq %rax, %r12
addb $255, -312(%rbp) ## 1-byte Folded Spill
movq -48(%rbp), %rdi ## 8-byte Reload
adcq $0, %rdi
movq %rdi, -48(%rbp) ## 8-byte Spill
setb -256(%rbp) ## 1-byte Folded Spill
movq -104(%rbp), %rax ## 8-byte Reload
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
addq -144(%rbp), %r11 ## 8-byte Folded Reload
adcq -80(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -96(%rbp) ## 8-byte Spill
addb $255, -296(%rbp) ## 1-byte Folded Spill
adcq -112(%rbp), %r11 ## 8-byte Folded Reload
setb -248(%rbp) ## 1-byte Folded Spill
movq %rdx, %rax
adcq %r10, %rax
movq %rax, %rdx
cmpq %r13, %rcx
movq %r13, %r10
adcq %r9, %r10
addb $255, -320(%rbp) ## 1-byte Folded Spill
adcq -72(%rbp), %rcx ## 8-byte Folded Reload
setb -208(%rbp) ## 1-byte Folded Spill
movq %r10, %rax
adcq %rdi, %rax
movq %rax, -128(%rbp) ## 8-byte Spill
addb $255, -192(%rbp) ## 1-byte Folded Spill
adcq -240(%rbp), %r14 ## 8-byte Folded Reload
adcq $0, %r12
xorl %r9d, %r9d
cmpq %rbx, %r12
setb %r9b
movq -120(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %r15
movq %rcx, %r14
movq -160(%rbp), %rax ## 8-byte Reload
adcq %rax, %r14
cmpq %rcx, %r14
adcq %rax, %rcx
movq %rcx, -112(%rbp) ## 8-byte Spill
cmpq -168(%rbp), %rbx ## 8-byte Folded Reload
movq -264(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
addb $255, -200(%rbp) ## 1-byte Folded Spill
adcq $0, %r12
adcq %rax, %r9
addb $255, %r8b
adcq -88(%rbp), %r15 ## 8-byte Folded Reload
setb -88(%rbp) ## 1-byte Folded Spill
movq %r14, %rax
adcq %r12, %rax
movq %rax, %rcx
movq -224(%rbp), %rax ## 8-byte Reload
movq -136(%rbp), %rbx ## 8-byte Reload
addq 44(%rax), %rbx
movq %rbx, -136(%rbp) ## 8-byte Spill
movq -304(%rbp), %rdi ## 8-byte Reload
adcq $0, %rdi
movq -152(%rbp), %r15 ## 8-byte Reload
adcq $0, %r15
movq -184(%rbp), %r8 ## 8-byte Reload
adcq $0, %r8
adcq $0, %rdx
movq %rdx, -200(%rbp) ## 8-byte Spill
setb -72(%rbp) ## 1-byte Folded Spill
addb $255, -256(%rbp) ## 1-byte Folded Spill
adcq $0, %rcx
movq %rcx, -104(%rbp) ## 8-byte Spill
setb -152(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
imulq -280(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r11
movq %rax, -168(%rbp) ## 8-byte Spill
movq %rbx, %rcx
shlq $32, %rcx
movq %rbx, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -224(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rbx, %rcx
setb %al
leaq (%rdi,%rax), %rbx
addq %rdx, %rbx
movq %rbx, -240(%rbp) ## 8-byte Spill
movl $0, %edx
setb %dl
addq %rax, %rdi
adcq %r15, %rdx
movq %rdx, -256(%rbp) ## 8-byte Spill
setb -184(%rbp) ## 1-byte Folded Spill
movq %r11, %rax
adcq %r8, %rax
movq %rax, -264(%rbp) ## 8-byte Spill
movq -216(%rbp), %r13 ## 8-byte Reload
cmpq %r13, %r10
adcq -176(%rbp), %r13 ## 8-byte Folded Reload
addb $255, -208(%rbp) ## 1-byte Folded Spill
adcq -48(%rbp), %r10 ## 8-byte Folded Reload
setb -208(%rbp) ## 1-byte Folded Spill
movq %r13, %rax
adcq -104(%rbp), %rax ## 8-byte Folded Reload
addb $255, -232(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %rdi ## 8-byte Reload
adcq $0, %rdi
movq %rdi, -64(%rbp) ## 8-byte Spill
movq -128(%rbp), %r10 ## 8-byte Reload
adcq $0, %r10
adcq $0, %rax
movq %rax, -192(%rbp) ## 8-byte Spill
setb -128(%rbp) ## 1-byte Folded Spill
addb $255, -88(%rbp) ## 1-byte Folded Spill
adcq %r14, %r12
adcq $0, %r9
movq %r9, -232(%rbp) ## 8-byte Spill
addb $255, -152(%rbp) ## 1-byte Folded Spill
adcq $0, %r9
movq %r9, -48(%rbp) ## 8-byte Spill
setb -88(%rbp) ## 1-byte Folded Spill
movq -80(%rbp), %r9 ## 8-byte Reload
movq -96(%rbp), %rbx ## 8-byte Reload
cmpq %r9, %rbx
movq %r9, %rdx
movq -144(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdx
addb $255, -248(%rbp) ## 1-byte Folded Spill
adcq -272(%rbp), %rbx ## 8-byte Folded Reload
setb %r15b
movq %rdx, %r12
adcq %rdi, %r12
movq -136(%rbp), %r11 ## 8-byte Reload
subq %r11, %rcx
addb $255, -184(%rbp) ## 1-byte Folded Spill
movq -168(%rbp), %r14 ## 8-byte Reload
adcq %r14, %r8
setb -96(%rbp) ## 1-byte Folded Spill
movq -224(%rbp), %rbx ## 8-byte Reload
leaq (%rcx,%rbx), %rdi
movq -200(%rbp), %r8 ## 8-byte Reload
adcq %r8, %rdi
movq %rdi, -248(%rbp) ## 8-byte Spill
cmpq %r9, %rdx
adcq %rax, %r9
addb $255, %r15b
adcq -64(%rbp), %rdx ## 8-byte Folded Reload
setb -152(%rbp) ## 1-byte Folded Spill
movq %r9, %r15
adcq %r10, %r15
movq %r10, %rdi
addb $255, -72(%rbp) ## 1-byte Folded Spill
adcq $0, %r12
adcq $0, %r15
setb -64(%rbp) ## 1-byte Folded Spill
movq %r11, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
addq %rbx, %rcx
adcq %r14, %rdx
addb $255, -96(%rbp) ## 1-byte Folded Spill
adcq %r8, %rcx
setb %r10b
movq %rdx, %rax
adcq %r12, %rax
movq %rax, -136(%rbp) ## 8-byte Spill
movq -80(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r9
movq -144(%rbp), %r11 ## 8-byte Reload
adcq %r11, %rax
addb $255, -152(%rbp) ## 1-byte Folded Spill
adcq %rdi, %r9
setb -152(%rbp) ## 1-byte Folded Spill
movq %rax, %r8
movq -192(%rbp), %r9 ## 8-byte Reload
adcq %r9, %r8
movq -216(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %r13
adcq -176(%rbp), %rcx ## 8-byte Folded Reload
addb $255, -208(%rbp) ## 1-byte Folded Spill
adcq -104(%rbp), %r13 ## 8-byte Folded Reload
setb -208(%rbp) ## 1-byte Folded Spill
movq %rcx, %rdi
adcq -48(%rbp), %rdi ## 8-byte Folded Reload
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq $0, %rdi
movq %rdi, -72(%rbp) ## 8-byte Spill
setb -96(%rbp) ## 1-byte Folded Spill
cmpq %r14, %rdx
movq -224(%rbp), %rbx ## 8-byte Reload
adcq %rbx, %r14
addb $255, %r10b
adcq %r12, %rdx
setb %dil
movq %r14, %rdx
adcq %r15, %rdx
movq %rdx, -104(%rbp) ## 8-byte Spill
addb $255, -64(%rbp) ## 1-byte Folded Spill
adcq $0, %r8
setb -200(%rbp) ## 1-byte Folded Spill
movq -80(%rbp), %r13 ## 8-byte Reload
cmpq %r13, %rax
adcq %r11, %r13
addb $255, -152(%rbp) ## 1-byte Folded Spill
adcq %r9, %rax
setb -128(%rbp) ## 1-byte Folded Spill
movq %r13, %r10
adcq -72(%rbp), %r10 ## 8-byte Folded Reload
movq -168(%rbp), %r11 ## 8-byte Reload
cmpq %r11, %r14
movq %r11, %rax
adcq %rbx, %rax
movq %rbx, %r12
addb $255, %dil
adcq %r15, %r14
setb %r9b
movq %rax, %rdi
adcq %r8, %rdi
movq %rdi, -64(%rbp) ## 8-byte Spill
xorl %edi, %edi
movq -112(%rbp), %rbx ## 8-byte Reload
cmpq %rbx, -232(%rbp) ## 8-byte Folded Reload
setb %dil
movq -216(%rbp), %rdx ## 8-byte Reload
cmpq %rdx, %rcx
movq %rdx, %r15
adcq -176(%rbp), %r15 ## 8-byte Folded Reload
cmpq -120(%rbp), %rbx ## 8-byte Folded Reload
movq -160(%rbp), %rbx ## 8-byte Reload
adcq %r15, %rbx
addb $255, -88(%rbp) ## 1-byte Folded Spill
adcq %rbx, %rdi
addb $255, -200(%rbp) ## 1-byte Folded Spill
adcq $0, %r10
setb %bl
cmpq %r11, %rax
movq %r11, %r14
adcq %r12, %r14
addb $255, %r9b
adcq %r8, %rax
setb -120(%rbp) ## 1-byte Folded Spill
movq %r14, %rax
adcq %r10, %rax
movq %rax, -160(%rbp) ## 8-byte Spill
addb $255, -208(%rbp) ## 1-byte Folded Spill
adcq -48(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %rdi
xorl %r9d, %r9d
cmpq %r15, %rdi
setb %r9b
movq -80(%rbp), %r12 ## 8-byte Reload
cmpq %r12, %r13
movq %r12, %r8
movq -144(%rbp), %rax ## 8-byte Reload
adcq %rax, %r8
cmpq %r12, %r8
adcq %rax, %r12
cmpq %rdx, %r15
movq -176(%rbp), %rax ## 8-byte Reload
adcq %r12, %rax
addb $255, -96(%rbp) ## 1-byte Folded Spill
adcq $0, %rdi
adcq %rax, %r9
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq -72(%rbp), %r13 ## 8-byte Folded Reload
setb %r13b
movq %r8, %rdx
adcq %rdi, %rdx
addb $255, %bl
adcq $0, %rdx
setb %cl
movb %cl, -72(%rbp) ## 1-byte Spill
cmpq %r11, %r14
movq %r11, %rbx
movq -224(%rbp), %r15 ## 8-byte Reload
adcq %r15, %rbx
addb $255, -120(%rbp) ## 1-byte Folded Spill
adcq %r10, %r14
setb %r11b
movq %rbx, %rax
adcq %rdx, %rax
movq %rax, -216(%rbp) ## 8-byte Spill
addb $255, %r13b
adcq %r8, %rdi
adcq $0, %r9
xorl %r14d, %r14d
addb $255, %cl
movq %r9, %rcx
adcq $0, %rcx
setb %r10b
addb $255, %r11b
adcq %rbx, %rdx
setb %r8b
movq %rcx, %rax
adcq $0, %rax
movq %rax, -48(%rbp) ## 8-byte Spill
xorl %r11d, %r11d
cmpq %r12, %r9
setb %r11b
movq -168(%rbp), %rax ## 8-byte Reload
cmpq %rax, %rbx
movq %rax, %r13
adcq %r15, %r13
cmpq %rax, %r13
movq %rax, %rdx
movq %rax, %rbx
adcq %r15, %rdx
cmpq -80(%rbp), %r12 ## 8-byte Folded Reload
movq -144(%rbp), %r15 ## 8-byte Reload
adcq %rdx, %r15
addb $255, %r10b
movq %r15, %rdi
adcq %r11, %rdi
movl %r8d, %eax
addb $255, %al
adcq %r13, %rcx
adcq $0, %rdi
movq %rdi, -176(%rbp) ## 8-byte Spill
movb -72(%rbp), %al ## 1-byte Reload
movb %al, %r14b
addb $255, %r8b
adcq %r9, %r14
adcq %r11, %r15
xorl %eax, %eax
cmpq %rbx, %rdx
setb %al
cmpq %rdx, %rdi
adcq -224(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -80(%rbp) ## 8-byte Spill
movl $4294967295, %edx ## imm = 0xFFFFFFFF
movq -240(%rbp), %r11 ## 8-byte Reload
cmpq %rdx, %r11
movq -256(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
sbbq $0, %rcx
movq %rcx, -120(%rbp) ## 8-byte Spill
cmpq %rcx, %rax
movq -264(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
sbbq $0, %rcx
movq %rcx, -224(%rbp) ## 8-byte Spill
cmpq %rcx, %rax
movabsq $-4294967295, %rax ## imm = 0xFFFFFFFF00000001
movq -248(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rbx
movq %rbx, %rcx
sbbq $0, %rcx
movq %rcx, -144(%rbp) ## 8-byte Spill
cmpq %rdx, %rdi
movl $0, %edx
sbbq %rdx, %rdx
cmpq %rcx, %rbx
sbbq $0, %rdx
movq -136(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rcx
addq %rcx, %rdx
incq %rdx
movq %rdx, -168(%rbp) ## 8-byte Spill
movq %rdi, %rcx
shrq %rcx
cmpq $2147483647, %rcx ## imm = 0x7FFFFFFF
movl $0, %r12d
sbbq %r12, %r12
leaq (%rdi,%rax), %rcx
incq %rcx
cmpq %rdx, %rcx
sbbq $0, %r12
movq -104(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rcx
addq %rcx, %r12
movl $4294967295, %edx ## imm = 0xFFFFFFFF
cmpq %rdx, %rdi
movl $0, %r14d
sbbq %r14, %r14
cmpq %r12, %rcx
sbbq $0, %r14
movq -64(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rcx
addq %rcx, %r14
cmpq %rdx, %rdi
movl $0, %r10d
sbbq %r10, %r10
cmpq %r14, %rcx
sbbq $0, %r10
movq -160(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rcx
addq %rcx, %r10
cmpq %rdx, %rdi
movl $0, %r9d
sbbq %r9, %r9
cmpq %r10, %rcx
sbbq $0, %r9
movq -216(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rcx
addq %rcx, %r9
cmpq %rdx, %rdi
movl $4294967295, %edx ## imm = 0xFFFFFFFF
movl $0, %r8d
sbbq %r8, %r8
cmpq %r9, %rcx
sbbq $0, %r8
addq -48(%rbp), %r13 ## 8-byte Folded Reload
movq %r13, -48(%rbp) ## 8-byte Spill
adcq %rax, %r15
leaq (%rax,%r13), %rbx
addq %rbx, %r8
cmpq %rdx, %r13
movl $0, %edx
sbbq %rdx, %rdx
cmpq %r8, %rbx
sbbq $0, %rdx
addq %r15, %rdx
leaq (%r11,%rax), %rdi
movq %r11, %r13
movq -80(%rbp), %rcx ## 8-byte Reload
addq %rcx, %rax
movl $4294967295, %ebx ## imm = 0xFFFFFFFF
cmpq %rbx, -176(%rbp) ## 8-byte Folded Reload
movl $4294967295, %ebx ## imm = 0xFFFFFFFF
movl $0, %r11d
sbbq %r11, %r11
cmpq %rdx, %r15
sbbq $0, %r11
addq %rax, %r11
cmpq %rbx, %rcx
movl $0, %r15d
sbbq %r15, %r15
xorl %ecx, %ecx
cmpq %r11, %rax
setb %cl
xorl %eax, %eax
cmpq %rcx, %r15
setne %al
negq %rax
movq -280(%rbp), %rcx ## 8-byte Reload
xorq %rax, %rcx
andq %rax, %r13
andq %rcx, %rdi
orq %r13, %rdi
movq %rdi, -280(%rbp) ## 8-byte Spill
movq -256(%rbp), %r15 ## 8-byte Reload
andq %rax, %r15
movq -120(%rbp), %rbx ## 8-byte Reload
andq %rcx, %rbx
orq %r15, %rbx
movq %rbx, -120(%rbp) ## 8-byte Spill
movq -264(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
movq -224(%rbp), %rdi ## 8-byte Reload
andq %rcx, %rdi
orq %rbx, %rdi
movq %rdi, -224(%rbp) ## 8-byte Spill
movq -248(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
movq -144(%rbp), %rdi ## 8-byte Reload
andq %rcx, %rdi
orq %rbx, %rdi
movq -136(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
movq -168(%rbp), %r15 ## 8-byte Reload
andq %rcx, %r15
orq %rbx, %r15
movq -104(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
andq %rcx, %r12
orq %rbx, %r12
movq -64(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
andq %rcx, %r14
orq %rbx, %r14
movq -160(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
andq %rcx, %r10
orq %rbx, %r10
movq -216(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
andq %rcx, %r9
orq %rbx, %r9
movq -48(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
andq %rcx, %r8
orq %rbx, %r8
movq -176(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
andq %rcx, %rdx
orq %rbx, %rdx
andq -80(%rbp), %rax ## 8-byte Folded Reload
andq %r11, %rcx
orq %rax, %rcx
movq -280(%rbp), %rax ## 8-byte Reload
movq %rax, (%rsi)
movq -120(%rbp), %rax ## 8-byte Reload
movq %rax, 4(%rsi)
movq -224(%rbp), %rax ## 8-byte Reload
movq %rax, 8(%rsi)
movq %rdi, 12(%rsi)
movq %r15, 16(%rsi)
movq %r12, 20(%rsi)
movq %r14, 24(%rsi)
movq %r10, 28(%rsi)
movq %r9, 32(%rsi)
movq %r8, 36(%rsi)
movq %rdx, 40(%rsi)
movq %rcx, 44(%rsi)
addq $160, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _fiat_p384_to_montgomery ## -- Begin function fiat_p384_to_montgomery
.p2align 4, 0x90
_fiat_p384_to_montgomery: ## @fiat_p384_to_montgomery
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $400, %rsp ## imm = 0x190
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r11
movq (%rdi), %rbx
movq %rdi, -232(%rbp) ## 8-byte Spill
movq %rbx, %rax
shrq $63, %rax
movq %rax, %r15
movq %rax, -112(%rbp) ## 8-byte Spill
movq %rbx, %r10
shlq $32, %r10
xorl %r9d, %r9d
subq %rbx, %r10
setb %r9b
subq %rbx, %r10
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
movq %rbx, %rax
mulq %rcx
movq %rdx, %r14
incq %rcx
movq %rcx, -384(%rbp) ## 8-byte Spill
movq %rbx, %r8
movl $4294967295, %edx ## imm = 0xFFFFFFFF
movq %rbx, %rax
mulq %rdx
movq %rdx, -224(%rbp) ## 8-byte Spill
imulq %rcx, %r8
movq %r8, -304(%rbp) ## 8-byte Spill
movq %r10, %rdi
addq %rdx, %rdi
movq %r8, %rax
adcq %r14, %rax
movq %rax, -88(%rbp) ## 8-byte Spill
cmpq %r8, %rax
movq %r8, %rax
adcq %rdx, %rax
movq %rax, -120(%rbp) ## 8-byte Spill
cmpq %r8, %rax
adcq %rdx, %r8
movq %r8, -144(%rbp) ## 8-byte Spill
orq %r10, %r9
xorl %r8d, %r8d
addq %rdx, %r9
setb %r8b
movl $4294967294, %edx ## imm = 0xFFFFFFFE
leaq 3(%rdx), %r13
movq %rbx, -200(%rbp) ## 8-byte Spill
imulq %rbx, %r13
leaq (%rbx,%rbx), %r12
cmpq %r12, %r13
movq %r15, %rcx
adcq $0, %rcx
movq 4(%r11), %rbx
movq %rbx, %rax
shlq $32, %rax
subq %rbx, %rax
subq %rbx, %rax
movq %rax, %r15
movq %rbx, %rax
mulq %rdx
movq %rdx, -288(%rbp) ## 8-byte Spill
addq %rbx, %r9
movq %rbx, -320(%rbp) ## 8-byte Spill
movq %r15, %rax
movq %r15, -216(%rbp) ## 8-byte Spill
leaq (%r15,%r14), %r15
adcq %r8, %r15
xorl %r8d, %r8d
cmpq %rax, %r15
setb %r8b
xorl %r11d, %r11d
addq %r13, %r8
setb %r11b
addq %rdi, %rcx
movq -88(%rbp), %rdi ## 8-byte Reload
adcq %r10, %rdi
movq %rdi, -88(%rbp) ## 8-byte Spill
adcq %r14, -120(%rbp) ## 8-byte Folded Spill
adcq -144(%rbp), %r12 ## 8-byte Folded Reload
movq %r12, -208(%rbp) ## 8-byte Spill
setb -184(%rbp) ## 1-byte Folded Spill
addq %rdx, %r8
adcq %rcx, %r11
xorl %edx, %edx
cmpq %rcx, %r11
setb %dl
leaq (%rbx,%rbx), %rax
movq %rax, -192(%rbp) ## 8-byte Spill
addq %rax, %r11
adcq %rdi, %rdx
movq %rdx, -368(%rbp) ## 8-byte Spill
movq %r9, %r14
imulq -384(%rbp), %r14 ## 8-byte Folded Reload
movq %r14, -64(%rbp) ## 8-byte Spill
movq %r9, %r12
shlq $32, %r12
movq %r9, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
movq %rdx, -136(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r9, %r12
setb %al
leaq (%r15,%rax), %r13
xorl %ecx, %ecx
addq %rdx, %r13
setb %cl
addq %rax, %r15
adcq %r8, %rcx
setb -104(%rbp) ## 1-byte Folded Spill
adcq %r11, %r14
movq -232(%rbp), %r15 ## 8-byte Reload
movq 8(%r15), %rbx
movq %rbx, -128(%rbp) ## 8-byte Spill
movq %rbx, %rdi
shlq $32, %rdi
subq %rbx, %rdi
movq %rbx, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, -96(%rbp) ## 8-byte Spill
subq %rbx, %rdi
movq %rdi, -160(%rbp) ## 8-byte Spill
movq %r13, %rax
addq %rbx, %rax
movq %rdi, %r8
adcq %rcx, %r8
addq %rbx, %r13
adcq %rdi, %rcx
setb -72(%rbp) ## 1-byte Folded Spill
movq %rdx, %r10
adcq %r14, %r10
movq %r13, %rdi
shlq $32, %rdi
movq %r13, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
movq %rdx, %rcx
movq %rdx, -264(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r13, %rdi
movq %rdi, -152(%rbp) ## 8-byte Spill
setb %al
leaq (%r8,%rax), %rdx
xorl %edi, %edi
addq %rcx, %rdx
setb %dil
addq %rax, %r8
adcq %r10, %rdi
setb %r8b
movq 12(%r15), %rbx
movq %rbx, -168(%rbp) ## 8-byte Spill
movq %rbx, %rcx
shlq $32, %rcx
subq %rbx, %rcx
subq %rbx, %rcx
movq %rcx, -80(%rbp) ## 8-byte Spill
movq %rdx, %rax
addq %rbx, %rax
movq %rcx, %rax
adcq %rdi, %rax
movq %rax, -296(%rbp) ## 8-byte Spill
addq %rbx, %rdx
movq %rdx, -336(%rbp) ## 8-byte Spill
adcq %rcx, %rdi
setb %r15b
subq %r9, %r12
movq %r9, %rax
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
movq -320(%rbp), %rcx ## 8-byte Reload
shrq $63, %rcx
addq -136(%rbp), %r12 ## 8-byte Folded Reload
movq -64(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdx
movq %rdx, -56(%rbp) ## 8-byte Spill
addb $255, -104(%rbp) ## 1-byte Folded Spill
adcq %rax, %r11
setb -248(%rbp) ## 1-byte Folded Spill
movq -368(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rcx), %r11
movq %rcx, -176(%rbp) ## 8-byte Spill
adcq %r12, %r11
addb $255, -72(%rbp) ## 1-byte Folded Spill
adcq -96(%rbp), %r14 ## 8-byte Folded Reload
setb -328(%rbp) ## 1-byte Folded Spill
movq -128(%rbp), %r14 ## 8-byte Reload
leaq (%r14,%r14), %rax
movq %rax, -72(%rbp) ## 8-byte Spill
adcq %r11, %rax
movq %r13, %rbx
imulq -384(%rbp), %rbx ## 8-byte Folded Reload
movl %r8d, %edx
addb $255, %dl
movq %rbx, %r9
movq %rbx, -104(%rbp) ## 8-byte Spill
adcq %rax, %r9
xorl %r10d, %r10d
movq %rdi, %rdx
cmpq -88(%rbp), %rdi ## 8-byte Folded Reload
setb %r10b
addq %rcx, %rdx
movq %rdx, %rcx
movq -120(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %r10
addb $255, %r8b
adcq %rbx, %rax
setb -88(%rbp) ## 1-byte Folded Spill
setb -280(%rbp) ## 1-byte Folded Spill
movq -168(%rbp), %rax ## 8-byte Reload
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, -368(%rbp) ## 8-byte Spill
movl %r15d, %eax
addb $255, %al
movq %r9, %rax
adcq %rdx, %rax
setb -360(%rbp) ## 1-byte Folded Spill
setb -344(%rbp) ## 1-byte Folded Spill
addb $255, %r15b
adcq %rdx, %r9
movq %r9, -256(%rbp) ## 8-byte Spill
xorl %r15d, %r15d
cmpq %rdi, %r10
setb %r15b
addq -216(%rbp), %r10 ## 8-byte Folded Reload
adcq -208(%rbp), %r15 ## 8-byte Folded Reload
addb $255, -248(%rbp) ## 1-byte Folded Spill
adcq %r12, %rcx
setb -240(%rbp) ## 1-byte Folded Spill
movq -56(%rbp), %r9 ## 8-byte Reload
adcq %r10, %r9
shrq $63, %r14
movq %r14, -120(%rbp) ## 8-byte Spill
addb $255, -328(%rbp) ## 1-byte Folded Spill
adcq -72(%rbp), %r11 ## 8-byte Folded Reload
setb -312(%rbp) ## 1-byte Folded Spill
adcq %r9, %r14
addb $255, -184(%rbp) ## 1-byte Folded Spill
movq -112(%rbp), %rcx ## 8-byte Reload
adcq -200(%rbp), %rcx ## 8-byte Folded Reload
setb %r8b
movq -304(%rbp), %rdx ## 8-byte Reload
cmpq %rdx, -144(%rbp) ## 8-byte Folded Reload
movq %rdx, %r12
movq -224(%rbp), %rax ## 8-byte Reload
adcq %rax, %r12
cmpq %rdx, %r12
movq %rdx, %rbx
adcq %rax, %rbx
cmpq %rdx, %rbx
movq %rdx, %rdi
adcq %rax, %rdi
movq %rdi, %r11
movq %rdi, -328(%rbp) ## 8-byte Spill
movq -152(%rbp), %rdi ## 8-byte Reload
subq %r13, %rdi
movq %r13, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
addq %rcx, %r12
movzbl %r8b, %eax
adcq %rbx, %rax
movq %rax, -248(%rbp) ## 8-byte Spill
movq %r11, %rax
adcq $0, %rax
movq %rax, -200(%rbp) ## 8-byte Spill
setb -216(%rbp) ## 1-byte Folded Spill
movq %rdi, %r8
addq -264(%rbp), %r8 ## 8-byte Folded Reload
movq %r8, -152(%rbp) ## 8-byte Spill
adcq -104(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -112(%rbp) ## 8-byte Spill
addb $255, -280(%rbp) ## 1-byte Folded Spill
adcq %r14, %r8
addb $255, -344(%rbp) ## 1-byte Folded Spill
movq -168(%rbp), %rax ## 8-byte Reload
leaq (%rax,%rax), %rcx
movq %rcx, %rdx
movq %rcx, -184(%rbp) ## 8-byte Spill
adcq %r8, %rdx
movq %rdx, -144(%rbp) ## 8-byte Spill
xorl %r11d, %r11d
cmpq -208(%rbp), %r15 ## 8-byte Folded Reload
setb %r11b
addq -288(%rbp), %r15 ## 8-byte Folded Reload
adcq %r12, %r11
movq -64(%rbp), %r13 ## 8-byte Reload
movq -56(%rbp), %rdi ## 8-byte Reload
cmpq %r13, %rdi
adcq -136(%rbp), %r13 ## 8-byte Folded Reload
addb $255, -240(%rbp) ## 1-byte Folded Spill
adcq %rdi, %r10
setb -344(%rbp) ## 1-byte Folded Spill
movq %r13, %rbx
adcq %r15, %rbx
addb $255, -312(%rbp) ## 1-byte Folded Spill
adcq -120(%rbp), %r9 ## 8-byte Folded Reload
setb -240(%rbp) ## 1-byte Folded Spill
movq -160(%rbp), %r9 ## 8-byte Reload
adcq %rbx, %r9
addb $255, -88(%rbp) ## 1-byte Folded Spill
adcq %r14, -152(%rbp) ## 8-byte Folded Spill
setb -56(%rbp) ## 1-byte Folded Spill
movq -112(%rbp), %r10 ## 8-byte Reload
adcq %r9, %r10
shrq $63, %rax
movq %rax, -88(%rbp) ## 8-byte Spill
addb $255, -360(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r8
setb -280(%rbp) ## 1-byte Folded Spill
adcq %r10, %rax
movq %rax, -288(%rbp) ## 8-byte Spill
xorl %r8d, %r8d
cmpq %r12, %r11
setb %r8b
addq -192(%rbp), %r11 ## 8-byte Folded Reload
movq -248(%rbp), %rax ## 8-byte Reload
adcq %rax, %r8
xorl %edx, %edx
cmpq %rax, %r8
setb %dl
movq -320(%rbp), %rdi ## 8-byte Reload
movq -176(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rdi), %rax
addq %rax, %r8
adcq -200(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -208(%rbp) ## 8-byte Spill
addq %rdi, %rcx
movq %rdx, %rax
adcq $0, %rax
movq %rax, -360(%rbp) ## 8-byte Spill
setb -152(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %rdx ## 8-byte Reload
cmpq %rdx, %r13
movq %rdx, %rax
movq %rdx, %rdi
adcq -136(%rbp), %rax ## 8-byte Folded Reload
addb $255, -344(%rbp) ## 1-byte Folded Spill
adcq %r15, %r13
setb %r12b
movq %rax, %r15
adcq %r11, %r15
addb $255, -240(%rbp) ## 1-byte Folded Spill
adcq -160(%rbp), %rbx ## 8-byte Folded Reload
setb %r14b
movq -96(%rbp), %rdx ## 8-byte Reload
adcq %r15, %rdx
movq %rdx, -160(%rbp) ## 8-byte Spill
movq -104(%rbp), %r13 ## 8-byte Reload
movq -112(%rbp), %rcx ## 8-byte Reload
cmpq %r13, %rcx
adcq -264(%rbp), %r13 ## 8-byte Folded Reload
addb $255, -56(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r9
setb -320(%rbp) ## 1-byte Folded Spill
movq %r13, %rbx
adcq %rdx, %rbx
addb $255, -280(%rbp) ## 1-byte Folded Spill
adcq -88(%rbp), %r10 ## 8-byte Folded Reload
setb -112(%rbp) ## 1-byte Folded Spill
movq -80(%rbp), %rcx ## 8-byte Reload
adcq %rbx, %rcx
movq %rcx, -192(%rbp) ## 8-byte Spill
movq %rdi, %rcx
cmpq %rdi, %rax
movq -136(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rdi
addb $255, %r12b
adcq %r11, %rax
setb %r11b
movq %rdi, %rdx
adcq %r8, %rdx
addb $255, %r14b
adcq -96(%rbp), %r15 ## 8-byte Folded Reload
setb %r14b
movq -72(%rbp), %r12 ## 8-byte Reload
movq %r12, %rax
adcq %rdx, %rax
movq %rax, -280(%rbp) ## 8-byte Spill
movq -304(%rbp), %rax ## 8-byte Reload
cmpq %rax, -328(%rbp) ## 8-byte Folded Reload
adcq -224(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -328(%rbp) ## 8-byte Spill
addb $255, -216(%rbp) ## 1-byte Folded Spill
adcq $0, %rax
movq %rax, -56(%rbp) ## 8-byte Spill
setb -176(%rbp) ## 1-byte Folded Spill
cmpq %rcx, %rdi
movq %r10, %r15
adcq %r10, %rcx
addb $255, %r11b
adcq %r8, %rdi
setb %r11b
movq %rcx, %rax
movq -360(%rbp), %r9 ## 8-byte Reload
adcq %r9, %rax
addb $255, %r14b
adcq %r12, %rdx
setb %r10b
movq -128(%rbp), %rdi ## 8-byte Reload
movq -120(%rbp), %rdx ## 8-byte Reload
leaq (%rdx,%rdi), %r8
movq %r8, %rdx
adcq %rax, %rdx
movq %rdx, -72(%rbp) ## 8-byte Spill
addb $255, -112(%rbp) ## 1-byte Folded Spill
adcq -80(%rbp), %rbx ## 8-byte Folded Reload
setb -216(%rbp) ## 1-byte Folded Spill
xorl %edx, %edx
movq -208(%rbp), %rdi ## 8-byte Reload
cmpq -200(%rbp), %rdi ## 8-byte Folded Reload
setb %dl
addb $255, -152(%rbp) ## 1-byte Folded Spill
adcq -56(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, %rbx
movq %rdx, -344(%rbp) ## 8-byte Spill
movq -64(%rbp), %rdx ## 8-byte Reload
cmpq %rdx, %rcx
adcq %r15, %rdx
movq %rdx, -248(%rbp) ## 8-byte Spill
addb $255, %r11b
adcq %r9, %rcx
setb -360(%rbp) ## 1-byte Folded Spill
movq %rdx, %rcx
adcq %rbx, %rcx
addb $255, %r10b
adcq %r8, %rax
adcq $0, %rcx
movq %rcx, -80(%rbp) ## 8-byte Spill
setb -112(%rbp) ## 1-byte Folded Spill
movq -104(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r13
movq %rax, %rcx
movq %rax, %r10
movq -264(%rbp), %rbx ## 8-byte Reload
adcq %rbx, %rcx
addb $255, -320(%rbp) ## 1-byte Folded Spill
adcq -160(%rbp), %r13 ## 8-byte Folded Reload
setb %r8b
movq %rcx, %rax
movq -280(%rbp), %r9 ## 8-byte Reload
adcq %r9, %rax
movq %rax, -240(%rbp) ## 8-byte Spill
movq -336(%rbp), %rdi ## 8-byte Reload
movq %rdi, %r11
imulq -384(%rbp), %r11 ## 8-byte Folded Reload
movq %r11, -96(%rbp) ## 8-byte Spill
movq %rdi, %r13
shlq $32, %r13
movq %rdi, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -152(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rdi, %r13
setb %al
movq -296(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %r12
xorl %r15d, %r15d
addq %rdx, %r12
setb %r15b
addq %rax, %rdi
adcq -256(%rbp), %r15 ## 8-byte Folded Reload
setb -256(%rbp) ## 1-byte Folded Spill
adcq -144(%rbp), %r11 ## 8-byte Folded Reload
cmpq %r10, %rcx
movq %r10, %rax
adcq %rbx, %rax
addb $255, %r8b
adcq %r9, %rcx
setb %r10b
movq %rax, %r14
movq %rax, %r9
movq %rax, -272(%rbp) ## 8-byte Spill
movq -72(%rbp), %r8 ## 8-byte Reload
adcq %r8, %r14
movq -232(%rbp), %rax ## 8-byte Reload
movq 16(%rax), %rdi
movq %rdi, %rbx
shlq $32, %rbx
subq %rdi, %rbx
movq %rdi, %rax
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
movq %rdx, -208(%rbp) ## 8-byte Spill
subq %rdi, %rbx
movq %r12, %rax
addq %rdi, %rax
movq %rbx, %rax
movq %rbx, -200(%rbp) ## 8-byte Spill
adcq %r15, %rax
movq %rax, -280(%rbp) ## 8-byte Spill
addq %rdi, %r12
movq %r12, -160(%rbp) ## 8-byte Spill
movq %rdi, %r12
movq %rdi, -320(%rbp) ## 8-byte Spill
adcq %rbx, %r15
setb %al
movq %rdx, %rcx
adcq %r11, %rcx
movq %rcx, -296(%rbp) ## 8-byte Spill
addb $255, %r10b
adcq %r9, %r8
setb -312(%rbp) ## 1-byte Folded Spill
movb -216(%rbp), %r8b ## 1-byte Reload
movl %r8d, %edx
addb $255, %dl
movq -368(%rbp), %rdx ## 8-byte Reload
movq %rdx, %rcx
movq -240(%rbp), %rbx ## 8-byte Reload
adcq %rbx, %rcx
movq %rcx, -72(%rbp) ## 8-byte Spill
subq -336(%rbp), %r13 ## 8-byte Folded Reload
addb $255, -256(%rbp) ## 1-byte Folded Spill
movq -96(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -144(%rbp) ## 8-byte Folded Spill
setb -256(%rbp) ## 1-byte Folded Spill
movq -152(%rbp), %r15 ## 8-byte Reload
leaq (%r13,%r15), %rdi
movq -288(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rdi
addb $255, %r8b
adcq %rdx, %rbx
setb %dl
movq -184(%rbp), %r9 ## 8-byte Reload
movq %r9, %rcx
adcq %r14, %rcx
movq %rcx, -240(%rbp) ## 8-byte Spill
addb $255, %al
adcq -208(%rbp), %r11 ## 8-byte Folded Reload
setb %r11b
leaq (%r12,%r12), %rbx
movq %rbx, %rax
movq %rbx, -368(%rbp) ## 8-byte Spill
adcq %rdi, %rax
movq %rax, -144(%rbp) ## 8-byte Spill
addb $255, %dl
adcq %r9, %r14
setb -184(%rbp) ## 1-byte Folded Spill
movq -336(%rbp), %rax ## 8-byte Reload
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
addq %r15, %r13
movq -96(%rbp), %r12 ## 8-byte Reload
adcq %r12, %rdx
addb $255, -256(%rbp) ## 1-byte Folded Spill
adcq %r10, %r13
setb %r9b
movq %rdx, %rax
movq -192(%rbp), %rcx ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -256(%rbp) ## 8-byte Spill
addb $255, %r11b
adcq %rbx, %rdi
setb -216(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %r10 ## 8-byte Reload
movq -248(%rbp), %r14 ## 8-byte Reload
cmpq %r10, %r14
adcq -136(%rbp), %r10 ## 8-byte Folded Reload
movq -328(%rbp), %rax ## 8-byte Reload
cmpq -304(%rbp), %rax ## 8-byte Folded Reload
movq -224(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
addb $255, -176(%rbp) ## 1-byte Folded Spill
adcq %r10, %rax
movq %rax, %r8
movq -120(%rbp), %rax ## 8-byte Reload
addq -128(%rbp), %rax ## 8-byte Folded Reload
movq -80(%rbp), %rbx ## 8-byte Reload
adcq $0, %rbx
movq %rbx, -80(%rbp) ## 8-byte Spill
setb -224(%rbp) ## 1-byte Folded Spill
cmpq %r12, %rdx
movq %r12, %r11
adcq %r15, %r11
addb $255, %r9b
adcq %rcx, %rdx
setb -128(%rbp) ## 1-byte Folded Spill
movq %r11, %rax
adcq -72(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -176(%rbp) ## 8-byte Spill
movq -104(%rbp), %rdi ## 8-byte Reload
cmpq %rdi, -272(%rbp) ## 8-byte Folded Reload
movq -264(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdi
movb -312(%rbp), %r9b ## 1-byte Reload
movl %r9d, %edx
addb $255, %dl
movq %rdi, %r15
adcq %rbx, %r15
movb -184(%rbp), %r13b ## 1-byte Reload
movl %r13d, %edx
addb $255, %dl
movq -168(%rbp), %rdx ## 8-byte Reload
movq -88(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rdx), %r12
movq %r12, %rcx
adcq %r15, %rcx
movq %rcx, -120(%rbp) ## 8-byte Spill
xorl %edx, %edx
movq -344(%rbp), %rcx ## 8-byte Reload
cmpq -56(%rbp), %rcx ## 8-byte Folded Reload
setb %dl
addb $255, -360(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r14
adcq %r8, %rdx
xorl %r8d, %r8d
cmpq %r10, %rdx
setb %r8b
movq -104(%rbp), %r14 ## 8-byte Reload
cmpq %r14, %rdi
movq %r14, %rbx
adcq %rax, %rbx
cmpq %r14, %rbx
movq %rbx, -192(%rbp) ## 8-byte Spill
adcq %rax, %r14
movq %r14, -328(%rbp) ## 8-byte Spill
cmpq -64(%rbp), %r10 ## 8-byte Folded Reload
movl $0, %ecx
movb -224(%rbp), %al ## 1-byte Reload
movb %al, %cl
movq -136(%rbp), %rax ## 8-byte Reload
adcq %r14, %rax
addb $255, -112(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rcx
movq %rcx, -112(%rbp) ## 8-byte Spill
adcq %rax, %r8
movq %r8, -304(%rbp) ## 8-byte Spill
addb $255, %r9b
adcq -80(%rbp), %rdi ## 8-byte Folded Reload
setb -64(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %rcx, %rax
addb $255, %r13b
adcq %r12, %r15
adcq $0, %rax
movq %rax, -336(%rbp) ## 8-byte Spill
setb -80(%rbp) ## 1-byte Folded Spill
movq -96(%rbp), %r14 ## 8-byte Reload
cmpq %r14, %r11
movq %r14, %rbx
movq -152(%rbp), %r13 ## 8-byte Reload
adcq %r13, %rbx
addb $255, -128(%rbp) ## 1-byte Folded Spill
adcq -72(%rbp), %r11 ## 8-byte Folded Reload
setb %r9b
movq %rbx, %rax
movq -240(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rax
movq %rax, -56(%rbp) ## 8-byte Spill
movq -160(%rbp), %rdi ## 8-byte Reload
movq %rdi, %r15
imulq -384(%rbp), %r15 ## 8-byte Folded Reload
movq %r15, -136(%rbp) ## 8-byte Spill
movq %rdi, %r11
shlq $32, %r11
movq %rdi, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
movq %rdx, -224(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rdi, %r11
setb %al
movq -280(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rax), %r8
xorl %edi, %edi
addq %rdx, %r8
setb %dil
addq %rax, %rcx
adcq -296(%rbp), %rdi ## 8-byte Folded Reload
setb -72(%rbp) ## 1-byte Folded Spill
adcq -144(%rbp), %r15 ## 8-byte Folded Reload
cmpq %r14, %rbx
adcq %r13, %r14
addb $255, %r9b
adcq %r10, %rbx
setb %r10b
movq %r14, %rax
movq -120(%rbp), %r9 ## 8-byte Reload
adcq %r9, %rax
movq %rax, -248(%rbp) ## 8-byte Spill
movq -232(%rbp), %rax ## 8-byte Reload
movq 20(%rax), %rbx
movq %rbx, -128(%rbp) ## 8-byte Spill
movq %rbx, %rcx
shlq $32, %rcx
subq %rbx, %rcx
movq %rbx, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, -288(%rbp) ## 8-byte Spill
subq %rbx, %rcx
movq %rcx, -360(%rbp) ## 8-byte Spill
movq %r8, %rax
addq %rbx, %rax
movq %rcx, %r12
adcq %rdi, %r12
addq %rbx, %r8
adcq %rcx, %rdi
setb -272(%rbp) ## 1-byte Folded Spill
adcq %r15, %rdx
movq %rdx, -344(%rbp) ## 8-byte Spill
movq -88(%rbp), %rdx ## 8-byte Reload
addq -168(%rbp), %rdx ## 8-byte Folded Reload
movq -336(%rbp), %rcx ## 8-byte Reload
adcq $0, %rcx
movq %rcx, -336(%rbp) ## 8-byte Spill
setb %dl
movq -96(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r14
movq %rax, %rbx
adcq %r13, %rbx
addb $255, %r10b
adcq %r9, %r14
setb -88(%rbp) ## 1-byte Folded Spill
movq %rbx, %rdi
movq %rbx, %r9
adcq %rcx, %rdi
movq %rdi, -120(%rbp) ## 8-byte Spill
addb $255, -64(%rbp) ## 1-byte Folded Spill
movq -112(%rbp), %rax ## 8-byte Reload
adcq -192(%rbp), %rax ## 8-byte Folded Reload
movq -304(%rbp), %rdi ## 8-byte Reload
adcq $0, %rdi
movq %rdi, -304(%rbp) ## 8-byte Spill
addb $255, -80(%rbp) ## 1-byte Folded Spill
movzbl %dl, %edx
adcq %rdi, %rdx
movq %rdx, -296(%rbp) ## 8-byte Spill
setb -280(%rbp) ## 1-byte Folded Spill
movq -320(%rbp), %rdi ## 8-byte Reload
shrq $63, %rdi
movb -216(%rbp), %cl ## 1-byte Reload
movl %ecx, %edx
addb $255, %dl
movq %rdi, %rax
movq %rdi, -192(%rbp) ## 8-byte Spill
movq -256(%rbp), %rdx ## 8-byte Reload
adcq %rdx, %rax
movq %rax, -312(%rbp) ## 8-byte Spill
subq -160(%rbp), %r11 ## 8-byte Folded Reload
addb $255, -72(%rbp) ## 1-byte Folded Spill
movq -136(%rbp), %rbx ## 8-byte Reload
adcq %rbx, -144(%rbp) ## 8-byte Folded Spill
setb -240(%rbp) ## 1-byte Folded Spill
movq -224(%rbp), %rbx ## 8-byte Reload
leaq (%r11,%rbx), %rbx
adcq %rax, %rbx
movq %rbx, %r14
movq %rbx, -392(%rbp) ## 8-byte Spill
addb $255, %cl
adcq %rdi, %rdx
setb %dl
movq -200(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
movq -176(%rbp), %rcx ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -408(%rbp) ## 8-byte Spill
addb $255, -272(%rbp) ## 1-byte Folded Spill
adcq -288(%rbp), %r15 ## 8-byte Folded Reload
setb -41(%rbp) ## 1-byte Folded Spill
movq -128(%rbp), %rax ## 8-byte Reload
leaq (%rax,%rax), %r10
movq %r10, -80(%rbp) ## 8-byte Spill
adcq %r14, %r10
movq %r10, %r13
movq %r10, -456(%rbp) ## 8-byte Spill
addb $255, %dl
adcq %rbx, %rcx
setb %r15b
movq -208(%rbp), %r10 ## 8-byte Reload
movq %r10, %rax
movq -56(%rbp), %rcx ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -72(%rbp) ## 8-byte Spill
movq %r8, -184(%rbp) ## 8-byte Spill
movq %r8, %rax
imulq -384(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rbx
movq %rax, -168(%rbp) ## 8-byte Spill
movq %r8, %rdi
shlq $32, %rdi
movq %r8, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -64(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r8, %rdi
movq %rdi, -112(%rbp) ## 8-byte Spill
setb %al
leaq (%r12,%rax), %r14
xorl %edi, %edi
addq %rdx, %r14
setb %dil
addq %rax, %r12
adcq -344(%rbp), %rdi ## 8-byte Folded Reload
setb -200(%rbp) ## 1-byte Folded Spill
movq %rbx, %r8
adcq %r13, %r8
addb $255, %r15b
adcq %r10, %rcx
setb %r12b
movq -368(%rbp), %r10 ## 8-byte Reload
movq %r10, %rax
movq -248(%rbp), %rbx ## 8-byte Reload
adcq %rbx, %rax
movq %rax, -216(%rbp) ## 8-byte Spill
movq -232(%rbp), %rax ## 8-byte Reload
movq 24(%rax), %r13
movq %r13, -144(%rbp) ## 8-byte Spill
movq %r13, %rcx
shlq $32, %rcx
subq %r13, %rcx
movq %r13, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, -344(%rbp) ## 8-byte Spill
subq %r13, %rcx
movq %r14, %rax
addq %r13, %rax
movq %rcx, -376(%rbp) ## 8-byte Spill
movq %rcx, %r15
adcq %rdi, %r15
addq %r13, %r14
adcq %rcx, %rdi
setb -176(%rbp) ## 1-byte Folded Spill
adcq %r8, %rdx
movq %rdx, -432(%rbp) ## 8-byte Spill
movq -96(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r9
movq %rax, %rcx
adcq -152(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -256(%rbp) ## 8-byte Spill
addb $255, -88(%rbp) ## 1-byte Folded Spill
adcq -336(%rbp), %r9 ## 8-byte Folded Reload
setb -512(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
adcq -296(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rcx
addb $255, %r12b
adcq %r10, %rbx
setb -272(%rbp) ## 1-byte Folded Spill
movq -320(%rbp), %rax ## 8-byte Reload
movq -192(%rbp), %rdx ## 8-byte Reload
leaq (%rdx,%rax), %rdx
movq %rdx, -400(%rbp) ## 8-byte Spill
movq -120(%rbp), %rax ## 8-byte Reload
adcq %rdx, %rax
adcq $0, %rcx
movq %rcx, -208(%rbp) ## 8-byte Spill
setb -464(%rbp) ## 1-byte Folded Spill
movq -160(%rbp), %rax ## 8-byte Reload
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
movq -224(%rbp), %r13 ## 8-byte Reload
addq %r13, %r11
movq -136(%rbp), %r9 ## 8-byte Reload
adcq %r9, %rdx
addb $255, -240(%rbp) ## 1-byte Folded Spill
adcq -312(%rbp), %r11 ## 8-byte Folded Reload
setb %r10b
movq %rdx, %rax
movq -408(%rbp), %r11 ## 8-byte Reload
adcq %r11, %rax
movq %rax, -312(%rbp) ## 8-byte Spill
movq -128(%rbp), %rdi ## 8-byte Reload
shrq $63, %rdi
movq %rdi, -56(%rbp) ## 8-byte Spill
addb $255, -41(%rbp) ## 1-byte Folded Spill
movq -392(%rbp), %rcx ## 8-byte Reload
adcq -80(%rbp), %rcx ## 8-byte Folded Reload
setb -248(%rbp) ## 1-byte Folded Spill
adcq %rax, %rdi
movq %rdi, %rcx
movq %rdi, -448(%rbp) ## 8-byte Spill
movq -112(%rbp), %rax ## 8-byte Reload
subq -184(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -112(%rbp) ## 8-byte Spill
addb $255, -200(%rbp) ## 1-byte Folded Spill
movq -456(%rbp), %rdi ## 8-byte Reload
adcq -168(%rbp), %rdi ## 8-byte Folded Reload
setb -456(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %rdi ## 8-byte Reload
leaq (%rax,%rdi), %rax
adcq %rcx, %rax
movq %rax, -504(%rbp) ## 8-byte Spill
addb $255, -176(%rbp) ## 1-byte Folded Spill
adcq -344(%rbp), %r8 ## 8-byte Folded Reload
setb -392(%rbp) ## 1-byte Folded Spill
movq -144(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rdi), %r12
movq %r12, -176(%rbp) ## 8-byte Spill
adcq %rax, %r12
movq %r12, -480(%rbp) ## 8-byte Spill
movq %r9, %r8
cmpq %r9, %rdx
movq %r9, %rbx
adcq %r13, %rbx
addb $255, %r10b
adcq %r11, %rdx
setb %r9b
movq %rbx, %rax
movq -72(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rax
movq %rax, -472(%rbp) ## 8-byte Spill
movq %r14, -440(%rbp) ## 8-byte Spill
movq %r14, %r11
imulq -384(%rbp), %r11 ## 8-byte Folded Reload
movq %r11, -88(%rbp) ## 8-byte Spill
movq %r14, %rdi
shlq $32, %rdi
movq %r14, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
movq %rdx, -336(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r14, %rdi
movq %rdi, -200(%rbp) ## 8-byte Spill
setb %al
leaq (%r15,%rax), %rcx
xorl %edi, %edi
addq %rdx, %rcx
movq %rcx, %r14
setb %dil
addq %rax, %r15
adcq -432(%rbp), %rdi ## 8-byte Folded Reload
setb -416(%rbp) ## 1-byte Folded Spill
adcq %r12, %r11
cmpq %r8, %rbx
movq %r8, %rcx
adcq %r13, %rcx
addb $255, %r9b
adcq %r10, %rbx
setb %r9b
movq %rcx, %rax
movq -216(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rax
movq %rax, -160(%rbp) ## 8-byte Spill
movq -232(%rbp), %rax ## 8-byte Reload
movq 28(%rax), %r15
movq %r15, -72(%rbp) ## 8-byte Spill
movq %r15, %rbx
shlq $32, %rbx
subq %r15, %rbx
movq %r15, %rax
movl $4294967294, %r12d ## imm = 0xFFFFFFFE
mulq %r12
movq %rdx, -368(%rbp) ## 8-byte Spill
subq %r15, %rbx
movq %r14, %rax
addq %r15, %rax
movq %rbx, -408(%rbp) ## 8-byte Spill
movq %rbx, %rax
adcq %rdi, %rax
movq %rax, -488(%rbp) ## 8-byte Spill
addq %r15, %r14
movq %r14, -240(%rbp) ## 8-byte Spill
adcq %rbx, %rdi
setb -536(%rbp) ## 1-byte Folded Spill
movq %rdx, %rax
adcq %r11, %rax
movq %rax, -424(%rbp) ## 8-byte Spill
addb $255, -272(%rbp) ## 1-byte Folded Spill
movq -120(%rbp), %rax ## 8-byte Reload
adcq -400(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -120(%rbp) ## 8-byte Spill
cmpq %r8, %rcx
adcq %r13, %r8
movq %r8, -544(%rbp) ## 8-byte Spill
addb $255, %r9b
adcq %r10, %rcx
setb -272(%rbp) ## 1-byte Folded Spill
adcq %rax, %r8
movq %r8, -216(%rbp) ## 8-byte Spill
movq -192(%rbp), %rax ## 8-byte Reload
addq -320(%rbp), %rax ## 8-byte Folded Reload
adcq $0, -208(%rbp) ## 8-byte Folded Spill
setb -41(%rbp) ## 1-byte Folded Spill
movq -184(%rbp), %rax ## 8-byte Reload
mulq %r12
movl $4294967294, %r12d ## imm = 0xFFFFFFFE
movq %rdx, %r15
movq -112(%rbp), %rbx ## 8-byte Reload
addq -64(%rbp), %rbx ## 8-byte Folded Reload
movq -168(%rbp), %r13 ## 8-byte Reload
adcq %r13, %r15
xorl %eax, %eax
movq -328(%rbp), %rdx ## 8-byte Reload
cmpq %rdx, -304(%rbp) ## 8-byte Folded Reload
setb %al
movq %rax, %rdi
movq -96(%rbp), %rax ## 8-byte Reload
cmpq %rax, -256(%rbp) ## 8-byte Folded Reload
movq %rax, %rcx
adcq -152(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -400(%rbp) ## 8-byte Spill
cmpq -104(%rbp), %rdx ## 8-byte Folded Reload
movq -264(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
addb $255, -280(%rbp) ## 1-byte Folded Spill
adcq %rax, %rdi
movq %rdi, -328(%rbp) ## 8-byte Spill
addb $255, -248(%rbp) ## 1-byte Folded Spill
movq -312(%rbp), %rax ## 8-byte Reload
adcq -56(%rbp), %rax ## 8-byte Folded Reload
setb %r10b
movq -360(%rbp), %r8 ## 8-byte Reload
movq %r8, %rcx
movq -472(%rbp), %r9 ## 8-byte Reload
adcq %r9, %rcx
addb $255, -456(%rbp) ## 1-byte Folded Spill
adcq -448(%rbp), %rbx ## 8-byte Folded Reload
setb %r14b
movq %r15, %rdx
adcq %rcx, %rdx
movq %rdx, -456(%rbp) ## 8-byte Spill
movq -144(%rbp), %rax ## 8-byte Reload
shrq $63, %rax
movq %rax, -192(%rbp) ## 8-byte Spill
addb $255, -392(%rbp) ## 1-byte Folded Spill
movq -504(%rbp), %rdi ## 8-byte Reload
adcq -176(%rbp), %rdi ## 8-byte Folded Reload
setb -392(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, %rdi
movq %rax, -496(%rbp) ## 8-byte Spill
movq -440(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
mulq %r12
movq -200(%rbp), %r12 ## 8-byte Reload
subq %rbx, %r12
addq -336(%rbp), %r12 ## 8-byte Folded Reload
movq %r12, -200(%rbp) ## 8-byte Spill
movq -88(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdx
movq %rdx, -432(%rbp) ## 8-byte Spill
addb $255, -416(%rbp) ## 1-byte Folded Spill
adcq %rax, -480(%rbp) ## 8-byte Folded Spill
setb -440(%rbp) ## 1-byte Folded Spill
movq %r12, %rdx
adcq %rdi, %rdx
movq %rdx, -448(%rbp) ## 8-byte Spill
addb $255, -536(%rbp) ## 1-byte Folded Spill
adcq -368(%rbp), %r11 ## 8-byte Folded Reload
setb -504(%rbp) ## 1-byte Folded Spill
movq -72(%rbp), %rax ## 8-byte Reload
leaq (%rax,%rax), %rax
movq %rax, -112(%rbp) ## 8-byte Spill
adcq %rdx, %rax
movq %rax, -104(%rbp) ## 8-byte Spill
addb $255, %r10b
adcq %r8, %r9
setb %r8b
movq -288(%rbp), %r9 ## 8-byte Reload
movq %r9, %r10
adcq -160(%rbp), %r10 ## 8-byte Folded Reload
cmpq %r13, %r15
movq -64(%rbp), %r12 ## 8-byte Reload
adcq %r12, %r13
addb $255, %r14b
adcq %r15, %rcx
setb %r11b
movq %r13, %rax
adcq %r10, %rax
movq %rax, -472(%rbp) ## 8-byte Spill
movq -240(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
imulq -384(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %r14
movq %rax, -304(%rbp) ## 8-byte Spill
movq %rbx, %rdi
shlq $32, %rdi
movq %rbx, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
movq %rdx, -320(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rbx, %rdi
movq %rdi, -184(%rbp) ## 8-byte Spill
setb %al
movq -488(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rax), %r15
xorl %edi, %edi
addq %rdx, %r15
setb %dil
addq %rax, %rcx
adcq -424(%rbp), %rdi ## 8-byte Folded Reload
setb -416(%rbp) ## 1-byte Folded Spill
movq %r14, %rax
adcq -104(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -248(%rbp) ## 8-byte Spill
addb $255, %r8b
adcq %r9, -160(%rbp) ## 8-byte Folded Spill
setb -264(%rbp) ## 1-byte Folded Spill
movq -80(%rbp), %r8 ## 8-byte Reload
adcq -216(%rbp), %r8 ## 8-byte Folded Reload
movq -168(%rbp), %r9 ## 8-byte Reload
cmpq %r9, %r13
adcq %r12, %r9
addb $255, %r11b
adcq %r10, %r13
setb %r13b
movq %r9, %rax
adcq %r8, %rax
movq %rax, -488(%rbp) ## 8-byte Spill
movq -232(%rbp), %rax ## 8-byte Reload
movq 32(%rax), %rcx
movq %rcx, -160(%rbp) ## 8-byte Spill
movq %rcx, %rbx
shlq $32, %rbx
subq %rcx, %rbx
movq %rcx, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, %r14
movq %rdx, -280(%rbp) ## 8-byte Spill
subq %rcx, %rbx
movq %rbx, -360(%rbp) ## 8-byte Spill
movq %r15, %rax
addq %rcx, %rax
movq %rbx, %r11
adcq %rdi, %r11
addq %rcx, %r15
adcq %rbx, %rdi
setb -520(%rbp) ## 1-byte Folded Spill
adcq -248(%rbp), %r14 ## 8-byte Folded Reload
movq -136(%rbp), %rbx ## 8-byte Reload
movq -544(%rbp), %rcx ## 8-byte Reload
cmpq %rbx, %rcx
movq -224(%rbp), %r10 ## 8-byte Reload
adcq %r10, %rbx
addb $255, -272(%rbp) ## 1-byte Folded Spill
adcq -120(%rbp), %rcx ## 8-byte Folded Reload
setb -424(%rbp) ## 1-byte Folded Spill
movq %rbx, %r12
adcq -208(%rbp), %r12 ## 8-byte Folded Reload
addb $255, -264(%rbp) ## 1-byte Folded Spill
movq -216(%rbp), %rax ## 8-byte Reload
adcq -80(%rbp), %rax ## 8-byte Folded Reload
setb -216(%rbp) ## 1-byte Folded Spill
movq -128(%rbp), %rax ## 8-byte Reload
movq -56(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rax), %rdi
movq %rdi, -560(%rbp) ## 8-byte Spill
adcq %r12, %rdi
movq -168(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r9
movq %rax, %rcx
adcq -64(%rbp), %rcx ## 8-byte Folded Reload
addb $255, %r13b
adcq %r8, %r9
setb %r8b
movq %rcx, %rax
movq %rcx, %r9
movq %rcx, -528(%rbp) ## 8-byte Spill
adcq %rdi, %rax
movq %rax, -536(%rbp) ## 8-byte Spill
movq %r15, -312(%rbp) ## 8-byte Spill
movq %r15, %rcx
shlq $32, %rcx
movq %r15, %rax
movl $4294967295, %edx ## imm = 0xFFFFFFFF
mulq %rdx
movq %rdx, -264(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %r15, %rcx
movq %rcx, -288(%rbp) ## 8-byte Spill
setb %al
leaq (%r11,%rax), %rcx
addq %rdx, %rcx
movq %rcx, -120(%rbp) ## 8-byte Spill
movl $0, %r15d
setb %r15b
addq %rax, %r11
adcq %r14, %r15
movq %r15, -552(%rbp) ## 8-byte Spill
setb -272(%rbp) ## 1-byte Folded Spill
addb $255, %r8b
adcq %r9, %rdi
setb -80(%rbp) ## 1-byte Folded Spill
addb $255, -512(%rbp) ## 1-byte Folded Spill
movq -256(%rbp), %rax ## 8-byte Reload
adcq -296(%rbp), %rax ## 8-byte Folded Reload
movq -328(%rbp), %rax ## 8-byte Reload
adcq $0, %rax
movq %rax, -328(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movb -41(%rbp), %dl ## 1-byte Reload
movb %dl, %cl
addb $255, -464(%rbp) ## 1-byte Folded Spill
adcq %rax, %rcx
movq %rcx, %rdx
movq %rcx, -544(%rbp) ## 8-byte Spill
setb -480(%rbp) ## 1-byte Folded Spill
movq -136(%rbp), %rax ## 8-byte Reload
cmpq %rax, %rbx
adcq %r10, %rax
movq %rax, -568(%rbp) ## 8-byte Spill
addb $255, -424(%rbp) ## 1-byte Folded Spill
adcq -208(%rbp), %rbx ## 8-byte Folded Reload
setb -464(%rbp) ## 1-byte Folded Spill
movq %rax, %rcx
adcq %rdx, %rcx
addb $255, -216(%rbp) ## 1-byte Folded Spill
adcq -560(%rbp), %r12 ## 8-byte Folded Reload
adcq $0, %rcx
setb -512(%rbp) ## 1-byte Folded Spill
movq -56(%rbp), %rax ## 8-byte Reload
addq -128(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %rcx
movq %rcx, -208(%rbp) ## 8-byte Spill
setb -41(%rbp) ## 1-byte Folded Spill
addb $255, -392(%rbp) ## 1-byte Folded Spill
movq -456(%rbp), %rax ## 8-byte Reload
adcq -192(%rbp), %rax ## 8-byte Folded Reload
setb %r10b
movq -376(%rbp), %r9 ## 8-byte Reload
movq %r9, %r14
movq -472(%rbp), %r13 ## 8-byte Reload
adcq %r13, %r14
addb $255, -440(%rbp) ## 1-byte Folded Spill
movq -496(%rbp), %rax ## 8-byte Reload
adcq %rax, -200(%rbp) ## 8-byte Folded Spill
setb -56(%rbp) ## 1-byte Folded Spill
movq -432(%rbp), %r11 ## 8-byte Reload
movq %r11, %rax
adcq %r14, %rax
movq %rax, %rdx
movq %rax, -456(%rbp) ## 8-byte Spill
movq -72(%rbp), %rax ## 8-byte Reload
shrq $63, %rax
movq %rax, -128(%rbp) ## 8-byte Spill
addb $255, -504(%rbp) ## 1-byte Folded Spill
movq -448(%rbp), %rcx ## 8-byte Reload
adcq -112(%rbp), %rcx ## 8-byte Folded Reload
setb -392(%rbp) ## 1-byte Folded Spill
adcq %rdx, %rax
movq %rax, %r8
movq %rax, -424(%rbp) ## 8-byte Spill
movq -184(%rbp), %rbx ## 8-byte Reload
movq -240(%rbp), %rdx ## 8-byte Reload
subq %rdx, %rbx
movq -232(%rbp), %rax ## 8-byte Reload
movq 36(%rax), %rcx
movq %rcx, -216(%rbp) ## 8-byte Spill
movq %rcx, %rdi
shlq $32, %rdi
subq %rcx, %rdi
movq %rdx, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
subq %rcx, %rdi
movq %rdi, -256(%rbp) ## 8-byte Spill
movq -120(%rbp), %rax ## 8-byte Reload
addq %rcx, %rax
movq %rdi, %rax
adcq %r15, %rax
movq %rax, -240(%rbp) ## 8-byte Spill
movq %rbx, %rcx
addq -320(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -184(%rbp) ## 8-byte Spill
movq -304(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdx
movq %rdx, -504(%rbp) ## 8-byte Spill
addb $255, -416(%rbp) ## 1-byte Folded Spill
adcq %rax, -104(%rbp) ## 8-byte Folded Spill
setb -496(%rbp) ## 1-byte Folded Spill
adcq %r8, %rcx
movq %rcx, -440(%rbp) ## 8-byte Spill
addb $255, -520(%rbp) ## 1-byte Folded Spill
movq -248(%rbp), %rax ## 8-byte Reload
adcq -280(%rbp), %rax ## 8-byte Folded Reload
setb -448(%rbp) ## 1-byte Folded Spill
movq -160(%rbp), %rax ## 8-byte Reload
leaq (%rax,%rax), %r8
movq %r8, -200(%rbp) ## 8-byte Spill
adcq %rcx, %r8
movq -312(%rbp), %r12 ## 8-byte Reload
imulq -384(%rbp), %r12 ## 8-byte Folded Reload
movq %r12, -104(%rbp) ## 8-byte Spill
movb -272(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %r8, %r12
addb $255, %r10b
adcq %r9, %r13
setb %r10b
movq -344(%rbp), %r9 ## 8-byte Reload
movq %r9, %rdi
movq -488(%rbp), %r15 ## 8-byte Reload
adcq %r15, %rdi
movq -88(%rbp), %rcx ## 8-byte Reload
movq %r11, %rdx
cmpq %rcx, %r11
movq -336(%rbp), %r11 ## 8-byte Reload
adcq %r11, %rcx
addb $255, -56(%rbp) ## 1-byte Folded Spill
adcq %rdx, %r14
setb %r13b
movq %rcx, %rax
adcq %rdi, %rax
movq %rax, -520(%rbp) ## 8-byte Spill
movq -216(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
movq %rdx, -296(%rbp) ## 8-byte Spill
addq %rbx, -120(%rbp) ## 8-byte Folded Spill
movq -552(%rbp), %rax ## 8-byte Reload
adcq -256(%rbp), %rax ## 8-byte Folded Reload
setb -376(%rbp) ## 1-byte Folded Spill
movq %rdx, %rax
adcq %r12, %rax
movq %rax, -248(%rbp) ## 8-byte Spill
addb $255, %r10b
adcq %r9, %r15
setb %r10b
movq -176(%rbp), %r9 ## 8-byte Reload
movq %r9, %rdx
movq -536(%rbp), %r14 ## 8-byte Reload
adcq %r14, %rdx
movq -88(%rbp), %rbx ## 8-byte Reload
cmpq %rbx, %rcx
adcq %r11, %rbx
addb $255, %r13b
adcq %rdi, %rcx
setb %r13b
movq %rbx, %rax
adcq %rdx, %rax
movq %rax, -56(%rbp) ## 8-byte Spill
addb $255, -272(%rbp) ## 1-byte Folded Spill
adcq -104(%rbp), %r8 ## 8-byte Folded Reload
setb -272(%rbp) ## 1-byte Folded Spill
movq -168(%rbp), %r15 ## 8-byte Reload
cmpq %r15, -528(%rbp) ## 8-byte Folded Reload
adcq -64(%rbp), %r15 ## 8-byte Folded Reload
movb -80(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %r15, %rcx
adcq -208(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -528(%rbp) ## 8-byte Spill
addb $255, %r10b
adcq %r9, %r14
setb -432(%rbp) ## 1-byte Folded Spill
movq -144(%rbp), %rax ## 8-byte Reload
movq -192(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rax
movq %rax, -416(%rbp) ## 8-byte Spill
adcq %rcx, %rax
movq -88(%rbp), %r14 ## 8-byte Reload
cmpq %r14, %rbx
adcq %r11, %r14
addb $255, %r13b
adcq %rdx, %rbx
setb %r8b
movq %r14, %rcx
movq %r14, -472(%rbp) ## 8-byte Spill
adcq %rax, %rcx
movq %rcx, %r9
movq %rcx, -176(%rbp) ## 8-byte Spill
addb $255, -376(%rbp) ## 1-byte Folded Spill
adcq -296(%rbp), %r12 ## 8-byte Folded Reload
setb -376(%rbp) ## 1-byte Folded Spill
xorl %ebx, %ebx
movq -400(%rbp), %rdi ## 8-byte Reload
cmpq %rdi, -328(%rbp) ## 8-byte Folded Reload
setb %bl
movq -136(%rbp), %r11 ## 8-byte Reload
movq -568(%rbp), %r10 ## 8-byte Reload
cmpq %r11, %r10
adcq -224(%rbp), %r11 ## 8-byte Folded Reload
cmpq -96(%rbp), %rdi ## 8-byte Folded Reload
movq -152(%rbp), %rcx ## 8-byte Reload
adcq %r11, %rcx
addb $255, -480(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rbx
addb $255, -392(%rbp) ## 1-byte Folded Spill
movq -456(%rbp), %rcx ## 8-byte Reload
adcq -128(%rbp), %rcx ## 8-byte Folded Reload
setb %cl
movq -408(%rbp), %rdx ## 8-byte Reload
movq %rdx, %rdi
movq -520(%rbp), %r13 ## 8-byte Reload
adcq %r13, %rdi
movq %rdi, -152(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
addb $255, %r8b
adcq %r14, %rax
setb -96(%rbp) ## 1-byte Folded Spill
addb $255, %cl
adcq %rdx, %r13
setb -344(%rbp) ## 1-byte Folded Spill
movq -56(%rbp), %rax ## 8-byte Reload
adcq -368(%rbp), %rax ## 8-byte Folded Reload
setb -488(%rbp) ## 1-byte Folded Spill
movq %r9, %rax
adcq -112(%rbp), %rax ## 8-byte Folded Reload
setb -408(%rbp) ## 1-byte Folded Spill
addb $255, -464(%rbp) ## 1-byte Folded Spill
adcq -544(%rbp), %r10 ## 8-byte Folded Reload
adcq $0, %rbx
xorl %r8d, %r8d
cmpq %r11, %rbx
setb %r8b
movq -168(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %r15
movq %rcx, %r10
movq -64(%rbp), %rax ## 8-byte Reload
adcq %rax, %r10
cmpq %rcx, %r10
adcq %rax, %rcx
movq %rcx, -464(%rbp) ## 8-byte Spill
cmpq -136(%rbp), %r11 ## 8-byte Folded Reload
movb -41(%rbp), %al ## 1-byte Reload
movb %al, %r12b
movq -224(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
addb $255, -512(%rbp) ## 1-byte Folded Spill
adcq %rbx, %r12
adcq %rax, %r8
addb $255, -80(%rbp) ## 1-byte Folded Spill
adcq -208(%rbp), %r15 ## 8-byte Folded Reload
setb %r15b
movq %r10, %r9
adcq %r12, %r9
addb $255, -432(%rbp) ## 1-byte Folded Spill
movq -528(%rbp), %rax ## 8-byte Reload
adcq -416(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %r9
setb %r11b
movq -120(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rbx
shlq $32, %rbx
movq %rdi, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
movq %rdx, -136(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rdi, %rbx
movq %rbx, -80(%rbp) ## 8-byte Spill
setb %al
movq -240(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rax), %rbx
xorl %edi, %edi
addq %rdx, %rbx
setb %dil
addq %rax, %rcx
adcq -248(%rbp), %rdi ## 8-byte Folded Reload
setb -224(%rbp) ## 1-byte Folded Spill
movq -232(%rbp), %rax ## 8-byte Reload
movq 40(%rax), %rcx
movq %rcx, -208(%rbp) ## 8-byte Spill
movq %rcx, %rdx
shlq $32, %rdx
subq %rcx, %rdx
subq %rcx, %rdx
movq %rdx, -328(%rbp) ## 8-byte Spill
movq %rbx, %rax
addq %rcx, %rax
movq %rdx, %rax
adcq %rdi, %rax
movq %rax, -416(%rbp) ## 8-byte Spill
addq %rcx, %rbx
movq %rbx, -240(%rbp) ## 8-byte Spill
adcq %rdx, %rdi
setb -400(%rbp) ## 1-byte Folded Spill
movq -192(%rbp), %rax ## 8-byte Reload
addq -144(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %r9
setb %r14b
movq -88(%rbp), %rax ## 8-byte Reload
cmpq %rax, -472(%rbp) ## 8-byte Folded Reload
movq %rax, %rdx
movq %rax, %rdi
movq -336(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdx
movb -96(%rbp), %bl ## 1-byte Reload
movl %ebx, %ecx
addb $255, %cl
movq %rdx, %rcx
adcq %r9, %rcx
movq %rcx, %r13
movq %rcx, -144(%rbp) ## 8-byte Spill
addb $255, %r15b
adcq %r10, %r12
adcq $0, %r8
movq %r8, -544(%rbp) ## 8-byte Spill
addb $255, %r11b
movzbl %r14b, %ecx
adcq %r8, %rcx
movq %rcx, -456(%rbp) ## 8-byte Spill
setb -432(%rbp) ## 1-byte Folded Spill
cmpq %rdi, %rdx
adcq %rax, %rdi
movq %rdi, -248(%rbp) ## 8-byte Spill
addb $255, %bl
adcq %r9, %rdx
setb -392(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rdi
movb -408(%rbp), %al ## 1-byte Reload
addb $255, %al
movq -72(%rbp), %rcx ## 8-byte Reload
movq -128(%rbp), %rdx ## 8-byte Reload
leaq (%rdx,%rcx), %rax
adcq %r13, %rax
adcq $0, %rdi
setb -512(%rbp) ## 1-byte Folded Spill
addq %rcx, %rdx
movq %rdx, -128(%rbp) ## 8-byte Spill
adcq $0, %rdi
movq %rdi, -72(%rbp) ## 8-byte Spill
setb -41(%rbp) ## 1-byte Folded Spill
addb $255, -496(%rbp) ## 1-byte Folded Spill
movq -424(%rbp), %rax ## 8-byte Reload
adcq %rax, -184(%rbp) ## 8-byte Folded Spill
setb -424(%rbp) ## 1-byte Folded Spill
movq -504(%rbp), %r14 ## 8-byte Reload
movq %r14, %rcx
adcq -152(%rbp), %rcx ## 8-byte Folded Reload
movq -160(%rbp), %r9 ## 8-byte Reload
shrq $63, %r9
movq %r9, -192(%rbp) ## 8-byte Spill
addb $255, -448(%rbp) ## 1-byte Folded Spill
movq -440(%rbp), %rax ## 8-byte Reload
adcq -200(%rbp), %rax ## 8-byte Folded Reload
setb -496(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r9
movq -312(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
movl $4294967294, %r12d ## imm = 0xFFFFFFFE
mulq %r12
movq -288(%rbp), %r11 ## 8-byte Reload
subq %rdi, %r11
addq -264(%rbp), %r11 ## 8-byte Folded Reload
movq %r11, -288(%rbp) ## 8-byte Spill
adcq -104(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -312(%rbp) ## 8-byte Spill
movb -272(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %r9, %r11
movb -376(%rbp), %al ## 1-byte Reload
addb $255, %al
movq -216(%rbp), %r13 ## 8-byte Reload
leaq (%r13,%r13), %rbx
movq %rbx, -184(%rbp) ## 8-byte Spill
adcq %r11, %rbx
movq -120(%rbp), %r15 ## 8-byte Reload
movq %r15, %r8
imulq -384(%rbp), %r8 ## 8-byte Folded Reload
movq %r8, -96(%rbp) ## 8-byte Spill
movb -224(%rbp), %al ## 1-byte Reload
addb $255, %al
adcq %rbx, %r8
addb $255, -344(%rbp) ## 1-byte Folded Spill
movq -56(%rbp), %rdi ## 8-byte Reload
adcq -368(%rbp), %rdi ## 8-byte Folded Reload
movq %rdi, -56(%rbp) ## 8-byte Spill
movq -208(%rbp), %r10 ## 8-byte Reload
movq %r10, %rax
mulq %r12
movb -400(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %rdx, %rax
movq %rdx, -344(%rbp) ## 8-byte Spill
adcq %r8, %rax
movq %rax, -448(%rbp) ## 8-byte Spill
addb $255, -488(%rbp) ## 1-byte Folded Spill
movq -176(%rbp), %rax ## 8-byte Reload
adcq -112(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -176(%rbp) ## 8-byte Spill
movq -304(%rbp), %r12 ## 8-byte Reload
cmpq %r12, %r14
adcq -320(%rbp), %r12 ## 8-byte Folded Reload
addb $255, -424(%rbp) ## 1-byte Folded Spill
adcq %r14, -152(%rbp) ## 8-byte Folded Spill
setb -440(%rbp) ## 1-byte Folded Spill
movq %r12, %rax
adcq %rdi, %rax
movq %rax, %rdi
movq %rax, -520(%rbp) ## 8-byte Spill
addb $255, -408(%rbp) ## 1-byte Folded Spill
movq -144(%rbp), %rax ## 8-byte Reload
adcq %rax, -128(%rbp) ## 8-byte Folded Spill
addb $255, -496(%rbp) ## 1-byte Folded Spill
adcq -192(%rbp), %rcx ## 8-byte Folded Reload
setb -528(%rbp) ## 1-byte Folded Spill
movq -360(%rbp), %rax ## 8-byte Reload
adcq %rdi, %rax
movq %rax, -472(%rbp) ## 8-byte Spill
addb $255, -272(%rbp) ## 1-byte Folded Spill
adcq %r9, -288(%rbp) ## 8-byte Folded Spill
setb -480(%rbp) ## 1-byte Folded Spill
movq -312(%rbp), %rcx ## 8-byte Reload
adcq %rax, %rcx
movq %rcx, -568(%rbp) ## 8-byte Spill
shrq $63, %r13
movq %r13, -288(%rbp) ## 8-byte Spill
addb $255, -376(%rbp) ## 1-byte Folded Spill
adcq -184(%rbp), %r11 ## 8-byte Folded Reload
setb -536(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r13
movq %r13, -552(%rbp) ## 8-byte Spill
movq -80(%rbp), %rcx ## 8-byte Reload
subq %r15, %rcx
movq %rcx, -80(%rbp) ## 8-byte Spill
addb $255, -224(%rbp) ## 1-byte Folded Spill
adcq -96(%rbp), %rbx ## 8-byte Folded Reload
setb -560(%rbp) ## 1-byte Folded Spill
movq -136(%rbp), %rax ## 8-byte Reload
leaq (%rcx,%rax), %rax
adcq %r13, %rax
movq %rax, -504(%rbp) ## 8-byte Spill
addb $255, -400(%rbp) ## 1-byte Folded Spill
adcq %rdx, %r8
setb -408(%rbp) ## 1-byte Folded Spill
leaq (%r10,%r10), %r9
movq %r9, -368(%rbp) ## 8-byte Spill
adcq %rax, %r9
movq -240(%rbp), %rbx ## 8-byte Reload
movq %rbx, %r15
imulq -384(%rbp), %r15 ## 8-byte Folded Reload
movq %r15, -152(%rbp) ## 8-byte Spill
movq %rbx, %rdi
shlq $32, %rdi
movq %rbx, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
movq %rdx, -224(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rbx, %rdi
movq %rdi, -400(%rbp) ## 8-byte Spill
setb %al
movq -416(%rbp), %rbx ## 8-byte Reload
leaq (%rbx,%rax), %r14
xorl %ecx, %ecx
addq %rdx, %r14
movq %r14, -144(%rbp) ## 8-byte Spill
setb %cl
addq %rax, %rbx
adcq -448(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, %r8
movq %rcx, -112(%rbp) ## 8-byte Spill
setb -376(%rbp) ## 1-byte Folded Spill
adcq %r9, %r15
movq -304(%rbp), %r13 ## 8-byte Reload
cmpq %r13, %r12
movq %r13, %rdi
movq -320(%rbp), %rbx ## 8-byte Reload
adcq %rbx, %rdi
addb $255, -440(%rbp) ## 1-byte Folded Spill
adcq -56(%rbp), %r12 ## 8-byte Folded Reload
movq -232(%rbp), %rdx ## 8-byte Reload
movq 44(%rdx), %rcx
setb %r12b
movq %rdi, %r10
movq -176(%rbp), %rdx ## 8-byte Reload
adcq %rdx, %r10
cmpq %r13, %rdi
movq %r13, %r11
adcq %rbx, %r11
cmpq %r13, %r11
adcq %rbx, %r13
movq %r13, -272(%rbp) ## 8-byte Spill
movq %rcx, %rax
shlq $32, %rax
subq %rcx, %rax
subq %rcx, %rax
movq %rax, -56(%rbp) ## 8-byte Spill
addq %rcx, %r14
movq %rcx, -232(%rbp) ## 8-byte Spill
movq %r8, %rbx
adcq %rax, %rbx
setb %bl
addb $255, %r12b
adcq %rdx, %rdi
setb -345(%rbp) ## 1-byte Folded Spill
movq -128(%rbp), %rax ## 8-byte Reload
adcq %r11, %rax
setb -416(%rbp) ## 1-byte Folded Spill
movq -72(%rbp), %rax ## 8-byte Reload
adcq %r13, %rax
setb -488(%rbp) ## 1-byte Folded Spill
movq -120(%rbp), %rax ## 8-byte Reload
movl $4294967294, %edi ## imm = 0xFFFFFFFE
mulq %rdi
movq -80(%rbp), %rax ## 8-byte Reload
addq -136(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -80(%rbp) ## 8-byte Spill
adcq -96(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -176(%rbp) ## 8-byte Spill
addb $255, -376(%rbp) ## 1-byte Folded Spill
adcq -152(%rbp), %r9 ## 8-byte Folded Reload
setb -448(%rbp) ## 1-byte Folded Spill
setb -496(%rbp) ## 1-byte Folded Spill
movq %rcx, %rax
mulq %rdi
movq %rdx, -120(%rbp) ## 8-byte Spill
movl %ebx, %eax
addb $255, %al
movq %r15, %rax
adcq %rdx, %rax
setb -440(%rbp) ## 1-byte Folded Spill
setb -424(%rbp) ## 1-byte Folded Spill
addb $255, %bl
adcq %rdx, %r15
movq %r15, -376(%rbp) ## 8-byte Spill
addb $255, -528(%rbp) ## 1-byte Folded Spill
movq -520(%rbp), %rax ## 8-byte Reload
adcq -360(%rbp), %rax ## 8-byte Folded Reload
movq -280(%rbp), %rdi ## 8-byte Reload
movq %rdi, %r13
setb %r14b
adcq %r10, %r13
movq -104(%rbp), %r15 ## 8-byte Reload
movq -312(%rbp), %rcx ## 8-byte Reload
cmpq %r15, %rcx
movq %r15, %rbx
movq -264(%rbp), %r12 ## 8-byte Reload
adcq %r12, %rbx
addb $255, -480(%rbp) ## 1-byte Folded Spill
adcq %rcx, -472(%rbp) ## 8-byte Folded Spill
movq %rbx, %rcx
setb %r9b
adcq %r13, %rcx
addb $255, -536(%rbp) ## 1-byte Folded Spill
movq -568(%rbp), %rax ## 8-byte Reload
adcq -288(%rbp), %rax ## 8-byte Folded Reload
movq -256(%rbp), %r8 ## 8-byte Reload
setb -480(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r8
addb $255, -560(%rbp) ## 1-byte Folded Spill
movq -552(%rbp), %rax ## 8-byte Reload
adcq %rax, -80(%rbp) ## 8-byte Folded Spill
movq -176(%rbp), %rax ## 8-byte Reload
setb -312(%rbp) ## 1-byte Folded Spill
adcq %r8, %rax
movq %rax, -80(%rbp) ## 8-byte Spill
addb $255, -345(%rbp) ## 1-byte Folded Spill
adcq -128(%rbp), %r11 ## 8-byte Folded Reload
addb $255, %r14b
adcq %rdi, %r10
setb -280(%rbp) ## 1-byte Folded Spill
movq -200(%rbp), %r10 ## 8-byte Reload
adcq %r11, %r10
xorl %r14d, %r14d
movq -464(%rbp), %rdi ## 8-byte Reload
cmpq %rdi, -544(%rbp) ## 8-byte Folded Reload
setb %r14b
movq -88(%rbp), %rax ## 8-byte Reload
cmpq %rax, -248(%rbp) ## 8-byte Folded Reload
adcq -336(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -360(%rbp) ## 8-byte Spill
cmpq -168(%rbp), %rdi ## 8-byte Folded Reload
movq -64(%rbp), %rdi ## 8-byte Reload
adcq %rax, %rdi
addb $255, -432(%rbp) ## 1-byte Folded Spill
adcq %rdi, %r14
cmpq %r15, %rbx
adcq %r12, %r15
addb $255, %r9b
adcq %r13, %rbx
setb -64(%rbp) ## 1-byte Folded Spill
movq %r15, %rax
adcq %r10, %rax
movq %rax, -128(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
addb $255, -480(%rbp) ## 1-byte Folded Spill
adcq -256(%rbp), %rcx ## 8-byte Folded Reload
setb -168(%rbp) ## 1-byte Folded Spill
movq -296(%rbp), %r9 ## 8-byte Reload
adcq %rax, %r9
addb $255, -416(%rbp) ## 1-byte Folded Spill
movq -72(%rbp), %rbx ## 8-byte Reload
adcq -272(%rbp), %rbx ## 8-byte Folded Reload
movq %rbx, -72(%rbp) ## 8-byte Spill
movq -96(%rbp), %rdi ## 8-byte Reload
movq -176(%rbp), %rax ## 8-byte Reload
cmpq %rdi, %rax
adcq -136(%rbp), %rdi ## 8-byte Folded Reload
addb $255, -312(%rbp) ## 1-byte Folded Spill
adcq %rax, %r8
setb %r8b
movq %rdi, %rax
movq %rdi, -432(%rbp) ## 8-byte Spill
adcq %r9, %rax
movq %rax, -256(%rbp) ## 8-byte Spill
addb $255, -280(%rbp) ## 1-byte Folded Spill
adcq -200(%rbp), %r11 ## 8-byte Folded Reload
setb -176(%rbp) ## 1-byte Folded Spill
movq -160(%rbp), %rcx ## 8-byte Reload
movq -192(%rbp), %rdx ## 8-byte Reload
leaq (%rdx,%rcx), %r13
movq %r13, -280(%rbp) ## 8-byte Spill
adcq %rbx, %r13
movq -104(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %r15
adcq -264(%rbp), %rcx ## 8-byte Folded Reload
addb $255, -64(%rbp) ## 1-byte Folded Spill
adcq %r10, %r15
setb %bl
movq %r13, %rdx
adcq %rcx, %rdx
setb -64(%rbp) ## 1-byte Folded Spill
setb -200(%rbp) ## 1-byte Folded Spill
addb $255, %r8b
adcq %rdi, %r9
setb %r15b
movb %r15b, -464(%rbp) ## 1-byte Spill
movq -232(%rbp), %rdx ## 8-byte Reload
addq %rdx, -144(%rbp) ## 8-byte Folded Spill
movq -112(%rbp), %rdx ## 8-byte Reload
adcq -56(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -112(%rbp) ## 8-byte Spill
addb $255, %bl
adcq %rcx, %r13
addb $255, -392(%rbp) ## 1-byte Folded Spill
movq -456(%rbp), %rdx ## 8-byte Reload
adcq %rdx, -248(%rbp) ## 8-byte Folded Spill
adcq $0, %r14
xorl %r10d, %r10d
movq -360(%rbp), %rax ## 8-byte Reload
cmpq %rax, %r14
setb %r10b
movq -304(%rbp), %rdi ## 8-byte Reload
cmpq %rdi, -272(%rbp) ## 8-byte Folded Reload
movq %rdi, %r11
movq -320(%rbp), %rdx ## 8-byte Reload
adcq %rdx, %r11
cmpq %rdi, %r11
adcq %rdx, %rdi
movq %rdi, -312(%rbp) ## 8-byte Spill
cmpq -88(%rbp), %rax ## 8-byte Folded Reload
movb -41(%rbp), %dl ## 1-byte Reload
movb %dl, %r12b
movq -336(%rbp), %rdx ## 8-byte Reload
adcq %rdi, %rdx
addb $255, -512(%rbp) ## 1-byte Folded Spill
adcq %r14, %r12
adcq %rdx, %r10
movb -488(%rbp), %bl ## 1-byte Reload
movl %ebx, %edx
addb $255, %dl
movq %r11, %rdi
adcq %r12, %rdi
addb $255, -176(%rbp) ## 1-byte Folded Spill
movq -72(%rbp), %rax ## 8-byte Reload
adcq -280(%rbp), %rax ## 8-byte Folded Reload
adcq $0, %rdi
setb %r8b
addb $255, -168(%rbp) ## 1-byte Folded Spill
movq -128(%rbp), %rax ## 8-byte Reload
adcq -296(%rbp), %rax ## 8-byte Folded Reload
setb -168(%rbp) ## 1-byte Folded Spill
movq -184(%rbp), %rax ## 8-byte Reload
adcq %r13, %rax
movq %rax, -296(%rbp) ## 8-byte Spill
movq -96(%rbp), %r9 ## 8-byte Reload
cmpq %r9, -432(%rbp) ## 8-byte Folded Reload
adcq -136(%rbp), %r9 ## 8-byte Folded Reload
addb $255, %r15b
movq %rax, %rdx
adcq %r9, %rdx
setb -128(%rbp) ## 1-byte Folded Spill
movq -192(%rbp), %rdx ## 8-byte Reload
addq -160(%rbp), %rdx ## 8-byte Folded Reload
adcq $0, %rdi
setb %r14b
movq -104(%rbp), %rax ## 8-byte Reload
cmpq %rax, %rcx
movq %rax, %rcx
movq -264(%rbp), %rdx ## 8-byte Reload
adcq %rdx, %rcx
addb $255, -200(%rbp) ## 1-byte Folded Spill
movq %rcx, %r15
adcq %rdi, %r15
addb $255, %bl
adcq %r11, %r12
adcq $0, %r10
movq %r10, -512(%rbp) ## 8-byte Spill
addb $255, %r8b
movzbl %r14b, %eax
adcq %r10, %rax
movq %rax, -248(%rbp) ## 8-byte Spill
setb -272(%rbp) ## 1-byte Folded Spill
movq -104(%rbp), %r14 ## 8-byte Reload
cmpq %r14, %rcx
adcq %rdx, %r14
movq %r14, -160(%rbp) ## 8-byte Spill
addb $255, -64(%rbp) ## 1-byte Folded Spill
adcq %rdi, %rcx
setb -280(%rbp) ## 1-byte Folded Spill
adcq %rax, %r14
addb $255, -168(%rbp) ## 1-byte Folded Spill
adcq -184(%rbp), %r13 ## 8-byte Folded Reload
setb %r13b
movq -216(%rbp), %rcx ## 8-byte Reload
movq -288(%rbp), %r11 ## 8-byte Reload
leaq (%r11,%rcx), %rax
adcq %r15, %rax
adcq $0, %r14
setb -176(%rbp) ## 1-byte Folded Spill
addq %rcx, %r11
adcq $0, %r14
setb -360(%rbp) ## 1-byte Folded Spill
movq -208(%rbp), %rdi ## 8-byte Reload
shrq $63, %rdi
movq %rdi, -88(%rbp) ## 8-byte Spill
addb $255, -408(%rbp) ## 1-byte Folded Spill
movq -504(%rbp), %rax ## 8-byte Reload
adcq -368(%rbp), %rax ## 8-byte Folded Reload
setb %r12b
adcq -80(%rbp), %rdi ## 8-byte Folded Reload
movq -240(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rax
movl $4294967294, %ecx ## imm = 0xFFFFFFFE
mulq %rcx
movq -400(%rbp), %r8 ## 8-byte Reload
subq %rbx, %r8
addq -224(%rbp), %r8 ## 8-byte Folded Reload
adcq -152(%rbp), %rdx ## 8-byte Folded Reload
movq %rdx, -184(%rbp) ## 8-byte Spill
addb $255, -496(%rbp) ## 1-byte Folded Spill
movq %r8, %rax
adcq %rdi, %rax
addb $255, -464(%rbp) ## 1-byte Folded Spill
adcq %r9, -296(%rbp) ## 8-byte Folded Spill
addb $255, -424(%rbp) ## 1-byte Folded Spill
movq -232(%rbp), %rbx ## 8-byte Reload
leaq (%rbx,%rbx), %r10
movq %r10, %rdx
movq %r10, -240(%rbp) ## 8-byte Spill
adcq %rax, %rdx
movq %rdx, -192(%rbp) ## 8-byte Spill
addb $255, %r13b
adcq %r15, %r11
movq -96(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, %r9
movq -136(%rbp), %r13 ## 8-byte Reload
adcq %r13, %rcx
movb -128(%rbp), %r9b ## 1-byte Reload
movl %r9d, %edx
addb $255, %dl
movq %rcx, %rdx
adcq %r11, %rdx
movq %rdx, -288(%rbp) ## 8-byte Spill
addb $255, %r12b
movq -88(%rbp), %rdx ## 8-byte Reload
adcq %rdx, -80(%rbp) ## 8-byte Folded Spill
setb -392(%rbp) ## 1-byte Folded Spill
movq -328(%rbp), %r15 ## 8-byte Reload
adcq -256(%rbp), %r15 ## 8-byte Folded Reload
addb $255, -448(%rbp) ## 1-byte Folded Spill
adcq %rdi, %r8
setb -464(%rbp) ## 1-byte Folded Spill
movq -184(%rbp), %r12 ## 8-byte Reload
adcq %r15, %r12
shrq $63, %rbx
movq %rbx, -168(%rbp) ## 8-byte Spill
addb $255, -440(%rbp) ## 1-byte Folded Spill
adcq %r10, %rax
setb -41(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq %r12, %rax
movq %rax, -408(%rbp) ## 8-byte Spill
movq -96(%rbp), %rax ## 8-byte Reload
cmpq %rax, %rcx
adcq %r13, %rax
movq %rax, -200(%rbp) ## 8-byte Spill
addb $255, %r9b
adcq %r11, %rcx
setb -400(%rbp) ## 1-byte Folded Spill
adcq %r14, %rax
movq %rax, -72(%rbp) ## 8-byte Spill
movq -144(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
imulq -384(%rbp), %rax ## 8-byte Folded Reload
movq %rax, %rbx
movq %rax, -64(%rbp) ## 8-byte Spill
movq %rdi, %r10
shlq $32, %r10
movq %rdi, %rax
movl $4294967295, %ecx ## imm = 0xFFFFFFFF
mulq %rcx
movq %rdx, -128(%rbp) ## 8-byte Spill
xorl %eax, %eax
subq %rdi, %r10
setb %al
movq -112(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rcx
addq %rdx, %rcx
movq %rcx, -216(%rbp) ## 8-byte Spill
movl $0, %ecx
setb %cl
addq %rax, %rdi
adcq -376(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -80(%rbp) ## 8-byte Spill
setb -376(%rbp) ## 1-byte Folded Spill
movq %rbx, %rax
adcq -192(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -336(%rbp) ## 8-byte Spill
addb $255, -392(%rbp) ## 1-byte Folded Spill
movq -256(%rbp), %rax ## 8-byte Reload
adcq -328(%rbp), %rax ## 8-byte Folded Reload
setb %r13b
movq -344(%rbp), %rax ## 8-byte Reload
movq -296(%rbp), %r8 ## 8-byte Reload
adcq %r8, %rax
movq -152(%rbp), %rdi ## 8-byte Reload
movq -184(%rbp), %rcx ## 8-byte Reload
cmpq %rdi, %rcx
adcq -224(%rbp), %rdi ## 8-byte Folded Reload
addb $255, -464(%rbp) ## 1-byte Folded Spill
adcq %rcx, %r15
setb %r11b
movq %rdi, %rdx
adcq %rax, %rdx
addb $255, -41(%rbp) ## 1-byte Folded Spill
adcq -168(%rbp), %r12 ## 8-byte Folded Reload
setb %r12b
movq -56(%rbp), %rcx ## 8-byte Reload
adcq %rdx, %rcx
movq %rcx, -184(%rbp) ## 8-byte Spill
xorl %r15d, %r15d
movq -312(%rbp), %rbx ## 8-byte Reload
cmpq %rbx, -512(%rbp) ## 8-byte Folded Reload
setb %r15b
movq -104(%rbp), %r9 ## 8-byte Reload
cmpq %r9, -160(%rbp) ## 8-byte Folded Reload
adcq -264(%rbp), %r9 ## 8-byte Folded Reload
movq %r9, -256(%rbp) ## 8-byte Spill
cmpq -304(%rbp), %rbx ## 8-byte Folded Reload
movq -320(%rbp), %rbx ## 8-byte Reload
adcq %r9, %rbx
addb $255, -272(%rbp) ## 1-byte Folded Spill
adcq %rbx, %r15
addb $255, -400(%rbp) ## 1-byte Folded Spill
adcq -200(%rbp), %r14 ## 8-byte Folded Reload
setb -112(%rbp) ## 1-byte Folded Spill
addb $255, %r13b
adcq -344(%rbp), %r8 ## 8-byte Folded Reload
setb %r14b
movq -368(%rbp), %r9 ## 8-byte Reload
movq %r9, %r8
adcq -288(%rbp), %r8 ## 8-byte Folded Reload
movq -152(%rbp), %rbx ## 8-byte Reload
cmpq %rbx, %rdi
movq -224(%rbp), %r13 ## 8-byte Reload
adcq %r13, %rbx
addb $255, %r11b
adcq %rax, %rdi
setb -304(%rbp) ## 1-byte Folded Spill
movq %rbx, %rcx
adcq %r8, %rcx
addb $255, %r12b
adcq -56(%rbp), %rdx ## 8-byte Folded Reload
setb -296(%rbp) ## 1-byte Folded Spill
movq -120(%rbp), %rax ## 8-byte Reload
adcq %rcx, %rax
movq %rax, -56(%rbp) ## 8-byte Spill
movq -144(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
movl $4294967294, %edx ## imm = 0xFFFFFFFE
mulq %rdx
subq %rdi, %r10
addq -128(%rbp), %r10 ## 8-byte Folded Reload
movq -64(%rbp), %rax ## 8-byte Reload
adcq %rax, %rdx
addb $255, -376(%rbp) ## 1-byte Folded Spill
adcq %rax, -192(%rbp) ## 8-byte Folded Spill
setb %r11b
movq %r10, %rax
movq -408(%rbp), %r12 ## 8-byte Reload
adcq %r12, %rax
movq %rax, -320(%rbp) ## 8-byte Spill
addb $255, %r14b
adcq %r9, -288(%rbp) ## 8-byte Folded Spill
setb -192(%rbp) ## 1-byte Folded Spill
movq -208(%rbp), %rax ## 8-byte Reload
movq -88(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rdi
movq %rdi, -288(%rbp) ## 8-byte Spill
adcq -72(%rbp), %rdi ## 8-byte Folded Reload
movq -152(%rbp), %r9 ## 8-byte Reload
cmpq %r9, %rbx
adcq %r13, %r9
addb $255, -304(%rbp) ## 1-byte Folded Spill
adcq %r8, %rbx
setb %r8b
movq %r9, %rax
adcq %rdi, %rax
addb $255, -296(%rbp) ## 1-byte Folded Spill
adcq -120(%rbp), %rcx ## 8-byte Folded Reload
setb %cl
movq -240(%rbp), %r14 ## 8-byte Reload
movq %r14, %rbx
adcq %rax, %rbx
movq %rbx, -144(%rbp) ## 8-byte Spill
addb $255, %r11b
adcq %r12, %r10
setb %r11b
movq %rdx, %rbx
movq -184(%rbp), %r12 ## 8-byte Reload
adcq %r12, %rbx
movq %rbx, -120(%rbp) ## 8-byte Spill
xorl %r10d, %r10d
addb $255, %cl
adcq %r14, %rax
setb -368(%rbp) ## 1-byte Folded Spill
setb -328(%rbp) ## 1-byte Folded Spill
addb $255, %r8b
adcq %r9, %rdi
setb -296(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %r8 ## 8-byte Reload
cmpq %r8, %rdx
movq -128(%rbp), %rax ## 8-byte Reload
adcq %rax, %r8
addb $255, %r11b
adcq %r12, %rdx
setb %r13b
movq %r8, %rcx
adcq -56(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -304(%rbp) ## 8-byte Spill
addb $255, -280(%rbp) ## 1-byte Folded Spill
movq -248(%rbp), %rcx ## 8-byte Reload
adcq %rcx, -160(%rbp) ## 8-byte Folded Spill
adcq $0, %r15
xorl %r14d, %r14d
movq -256(%rbp), %rdx ## 8-byte Reload
cmpq %rdx, %r15
setb %r14b
movq -96(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, -200(%rbp) ## 8-byte Folded Reload
movq %rcx, %r12
movq -136(%rbp), %rdi ## 8-byte Reload
adcq %rdi, %r12
cmpq %rcx, %r12
movq %rcx, %rbx
adcq %rdi, %rbx
cmpq -104(%rbp), %rdx ## 8-byte Folded Reload
movb -360(%rbp), %cl ## 1-byte Reload
movb %cl, %r10b
movq -264(%rbp), %rcx ## 8-byte Reload
adcq %rbx, %rcx
movq %rbx, %rdi
addb $255, -176(%rbp) ## 1-byte Folded Spill
adcq %r15, %r10
adcq %rcx, %r14
movb -112(%rbp), %cl ## 1-byte Reload
addb $255, %cl
movq %r12, %rcx
adcq %r10, %rcx
addb $255, -192(%rbp) ## 1-byte Folded Spill
movq -288(%rbp), %rdx ## 8-byte Reload
adcq %rdx, -72(%rbp) ## 8-byte Folded Spill
adcq $0, %rcx
setb -160(%rbp) ## 1-byte Folded Spill
movq -88(%rbp), %rdx ## 8-byte Reload
addq -208(%rbp), %rdx ## 8-byte Folded Reload
adcq $0, %rcx
setb -72(%rbp) ## 1-byte Folded Spill
movq -64(%rbp), %r11 ## 8-byte Reload
cmpq %r11, %r8
adcq %rax, %r11
addb $255, %r13b
adcq -56(%rbp), %r8 ## 8-byte Folded Reload
setb -264(%rbp) ## 1-byte Folded Spill
movq %r11, %rax
adcq -144(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -88(%rbp) ## 8-byte Spill
movq -152(%rbp), %r13 ## 8-byte Reload
cmpq %r13, %r9
movq %r13, %rbx
movq -224(%rbp), %r15 ## 8-byte Reload
adcq %r15, %rbx
movb -296(%rbp), %al ## 1-byte Reload
addb $255, %al
movq %rbx, %r8
adcq %rcx, %r8
xorl %r9d, %r9d
addb $255, -328(%rbp) ## 1-byte Folded Spill
movq -232(%rbp), %rax ## 8-byte Reload
movq -168(%rbp), %rdx ## 8-byte Reload
leaq (%rdx,%rax), %rax
movq %rax, -56(%rbp) ## 8-byte Spill
adcq %r8, %rax
movq %rax, -208(%rbp) ## 8-byte Spill
addb $255, -112(%rbp) ## 1-byte Folded Spill
adcq %r12, %r10
adcq $0, %r14
xorl %r10d, %r10d
cmpq %rdi, %r14
setb %r10b
cmpq %r13, %rbx
movq %r13, %r12
adcq %r15, %r12
cmpq %r13, %r12
adcq %r15, %r13
movq %r13, -104(%rbp) ## 8-byte Spill
cmpq -96(%rbp), %rdi ## 8-byte Folded Reload
movb -72(%rbp), %al ## 1-byte Reload
movb %al, %r9b
movq -136(%rbp), %rdx ## 8-byte Reload
adcq %r13, %rdx
addb $255, -160(%rbp) ## 1-byte Folded Spill
adcq %r14, %r9
adcq %rdx, %r10
addb $255, -296(%rbp) ## 1-byte Folded Spill
adcq %rcx, %rbx
setb %r14b
movq %r12, %rdi
adcq %r9, %rdi
addb $255, -368(%rbp) ## 1-byte Folded Spill
adcq -56(%rbp), %r8 ## 8-byte Folded Reload
adcq $0, %rdi
setb %r15b
movq -64(%rbp), %r8 ## 8-byte Reload
cmpq %r8, %r11
movq %r8, %r13
movq -128(%rbp), %rdx ## 8-byte Reload
adcq %rdx, %r13
addb $255, -264(%rbp) ## 1-byte Folded Spill
adcq -144(%rbp), %r11 ## 8-byte Folded Reload
setb %r11b
movq %r13, %rcx
movq -208(%rbp), %rax ## 8-byte Reload
adcq %rax, %rcx
movq %rcx, -264(%rbp) ## 8-byte Spill
movq -168(%rbp), %rcx ## 8-byte Reload
addq -232(%rbp), %rcx ## 8-byte Folded Reload
adcq $0, %rdi
setb %cl
movl %r15d, %ebx
addb $255, %bl
movzbl %cl, %ecx
movq %rcx, %rbx
adcq $0, %rbx
movq %rbx, -232(%rbp) ## 8-byte Spill
cmpq %r8, %r13
movq %r8, %rbx
adcq %rdx, %rbx
addb $255, %r11b
adcq %rax, %r13
setb %al
movq %rbx, %r13
adcq %rdi, %r13
addb $255, %r14b
adcq %r12, %r9
adcq $0, %r10
addb $255, %r15b
adcq %r10, %rcx
setb %r15b
addb $255, %al
adcq %rbx, %rdi
setb %r11b
movq %rcx, %rax
adcq $0, %rax
movq %rax, -160(%rbp) ## 8-byte Spill
xorl %r14d, %r14d
movq -104(%rbp), %rdi ## 8-byte Reload
cmpq %rdi, %r10
setb %r14b
movq %r8, %rax
cmpq %r8, %rbx
movq %r8, %rbx
movq %rdx, %r9
adcq %rdx, %rbx
cmpq %r8, %rbx
movq %rbx, %r8
movq %rbx, -136(%rbp) ## 8-byte Spill
movq %rax, %rdx
adcq %r9, %rdx
cmpq -152(%rbp), %rdi ## 8-byte Folded Reload
movq -224(%rbp), %rbx ## 8-byte Reload
adcq %rdx, %rbx
addb $255, %r15b
movq %rbx, %rdi
movq %rbx, %r15
adcq %r14, %rdi
movl %r11d, %ebx
addb $255, %bl
adcq %r8, %rcx
adcq $0, %rdi
movq %rdi, -168(%rbp) ## 8-byte Spill
addb $255, %r11b
adcq -232(%rbp), %r10 ## 8-byte Folded Reload
adcq %r14, %r15
xorl %ecx, %ecx
cmpq %rax, %rdx
setb %cl
cmpq %rdx, %rdi
adcq %r9, %rcx
movq %rcx, -64(%rbp) ## 8-byte Spill
movl $4294967295, %edx ## imm = 0xFFFFFFFF
movq -216(%rbp), %r11 ## 8-byte Reload
cmpq %rdx, %r11
movq -80(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
sbbq $0, %rcx
movq %rcx, -104(%rbp) ## 8-byte Spill
cmpq %rcx, %rax
movq -336(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
sbbq $0, %rcx
movq %rcx, -232(%rbp) ## 8-byte Spill
cmpq %rcx, %rax
movabsq $-4294967295, %rax ## imm = 0xFFFFFFFF00000001
movq -320(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rbx
movq %rbx, %rcx
sbbq $0, %rcx
movq %rcx, -96(%rbp) ## 8-byte Spill
cmpq %rdx, %rdi
movl $0, %edx
sbbq %rdx, %rdx
cmpq %rcx, %rbx
sbbq $0, %rdx
movq -120(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rcx
addq %rcx, %rdx
incq %rdx
movq %rdx, -152(%rbp) ## 8-byte Spill
movq %rdi, %rcx
shrq %rcx
cmpq $2147483647, %rcx ## imm = 0x7FFFFFFF
movl $0, %r12d
sbbq %r12, %r12
leaq (%rdi,%rax), %rcx
incq %rcx
cmpq %rdx, %rcx
sbbq $0, %r12
movq -304(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rcx
addq %rcx, %r12
movl $4294967295, %edx ## imm = 0xFFFFFFFF
cmpq %rdx, %rdi
movl $0, %r14d
sbbq %r14, %r14
cmpq %r12, %rcx
sbbq $0, %r14
movq -88(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rcx
addq %rcx, %r14
cmpq %rdx, %rdi
movl $0, %r10d
sbbq %r10, %r10
cmpq %r14, %rcx
sbbq $0, %r10
movq -264(%rbp), %rdi ## 8-byte Reload
leaq (%rdi,%rax), %rcx
addq %rcx, %r10
cmpq %rdx, %rdi
movl $0, %r9d
sbbq %r9, %r9
cmpq %r10, %rcx
sbbq $0, %r9
movq %r13, -144(%rbp) ## 8-byte Spill
leaq (%rax,%r13), %rcx
addq %rcx, %r9
cmpq %rdx, %r13
movl $4294967295, %edi ## imm = 0xFFFFFFFF
movl $0, %r8d
sbbq %r8, %r8
cmpq %r9, %rcx
sbbq $0, %r8
movq -136(%rbp), %rcx ## 8-byte Reload
addq -160(%rbp), %rcx ## 8-byte Folded Reload
movq %rcx, -136(%rbp) ## 8-byte Spill
adcq %rax, %r15
leaq (%rcx,%rax), %rbx
addq %rbx, %r8
cmpq %rdi, %rcx
movl $0, %edx
sbbq %rdx, %rdx
cmpq %r8, %rbx
sbbq $0, %rdx
addq %r15, %rdx
leaq (%r11,%rax), %r13
movq %r11, %rbx
movq -64(%rbp), %rcx ## 8-byte Reload
addq %rcx, %rax
cmpq %rdi, -168(%rbp) ## 8-byte Folded Reload
movl $0, %r11d
sbbq %r11, %r11
cmpq %rdx, %r15
sbbq $0, %r11
addq %rax, %r11
cmpq %rdi, %rcx
movl $0, %r15d
sbbq %r15, %r15
xorl %ecx, %ecx
cmpq %r11, %rax
setb %cl
xorl %eax, %eax
cmpq %rcx, %r15
setne %al
negq %rax
movq -384(%rbp), %rcx ## 8-byte Reload
xorq %rax, %rcx
andq %rax, %rbx
andq %rcx, %r13
orq %rbx, %r13
movq -80(%rbp), %r15 ## 8-byte Reload
andq %rax, %r15
movq -104(%rbp), %rbx ## 8-byte Reload
andq %rcx, %rbx
orq %r15, %rbx
movq %rbx, -104(%rbp) ## 8-byte Spill
movq -336(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
movq -232(%rbp), %rdi ## 8-byte Reload
andq %rcx, %rdi
orq %rbx, %rdi
movq %rdi, -232(%rbp) ## 8-byte Spill
movq -320(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
movq -96(%rbp), %rdi ## 8-byte Reload
andq %rcx, %rdi
orq %rbx, %rdi
movq %rdi, -96(%rbp) ## 8-byte Spill
movq -120(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
movq -152(%rbp), %r15 ## 8-byte Reload
andq %rcx, %r15
orq %rbx, %r15
movq -304(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
andq %rcx, %r12
orq %rbx, %r12
movq -88(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
andq %rcx, %r14
orq %rbx, %r14
movq -264(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
andq %rcx, %r10
orq %rbx, %r10
movq -144(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
andq %rcx, %r9
orq %rbx, %r9
movq -136(%rbp), %rdi ## 8-byte Reload
andq %rax, %rdi
andq %rcx, %r8
orq %rdi, %r8
movq -168(%rbp), %rbx ## 8-byte Reload
andq %rax, %rbx
andq %rcx, %rdx
orq %rbx, %rdx
andq -64(%rbp), %rax ## 8-byte Folded Reload
andq %r11, %rcx
orq %rax, %rcx
movq %r13, (%rsi)
movq -104(%rbp), %rax ## 8-byte Reload
movq %rax, 4(%rsi)
movq -232(%rbp), %rax ## 8-byte Reload
movq %rax, 8(%rsi)
movq -96(%rbp), %rax ## 8-byte Reload
movq %rax, 12(%rsi)
movq %r15, 16(%rsi)
movq %r12, 20(%rsi)
movq %r14, 24(%rsi)
movq %r10, 28(%rsi)
movq %r9, 32(%rsi)
movq %r8, 36(%rsi)
movq %rdx, 40(%rsi)
movq %rcx, 44(%rsi)
addq $400, %rsp ## imm = 0x190
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _fiat_p384_nonzero ## -- Begin function fiat_p384_nonzero
.p2align 4, 0x90
_fiat_p384_nonzero: ## @fiat_p384_nonzero
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq 4(%rdi), %rax
orq (%rdi), %rax
orq 8(%rdi), %rax
orq 12(%rdi), %rax
orq 16(%rdi), %rax
orq 20(%rdi), %rax
orq 24(%rdi), %rax
orq 28(%rdi), %rax
orq 32(%rdi), %rax
orq 36(%rdi), %rax
orq 40(%rdi), %rax
orq 44(%rdi), %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _fiat_p384_selectznz ## -- Begin function fiat_p384_selectznz
.p2align 4, 0x90
_fiat_p384_selectznz: ## @fiat_p384_selectznz
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r10
movq %rdx, %r9
xorl %r8d, %r8d
negq %rdi
sbbq %r8, %r8
movl $4294967295, %eax ## imm = 0xFFFFFFFF
xorq %r8, %rax
movq (%rdx), %rdi
andq %r8, %rdi
movq (%rsi), %rcx
andq %rax, %rcx
orq %rdi, %rcx
movq %rcx, -56(%rbp) ## 8-byte Spill
movq 4(%rdx), %rdi
andq %r8, %rdi
movq 4(%rsi), %rcx
andq %rax, %rcx
orq %rdi, %rcx
movq %rcx, -48(%rbp) ## 8-byte Spill
movq 8(%rdx), %rdi
andq %r8, %rdi
movq 8(%rsi), %r11
andq %rax, %r11
orq %rdi, %r11
movq 12(%rdx), %rdi
andq %r8, %rdi
movq 12(%rsi), %r14
andq %rax, %r14
orq %rdi, %r14
movq 16(%rdx), %rdi
andq %r8, %rdi
movq 16(%rsi), %r15
andq %rax, %r15
orq %rdi, %r15
movq 20(%rdx), %rdi
andq %r8, %rdi
movq 20(%rsi), %r12
andq %rax, %r12
orq %rdi, %r12
movq 24(%rdx), %rdi
andq %r8, %rdi
movq 24(%rsi), %r13
andq %rax, %r13
orq %rdi, %r13
movq 28(%rdx), %rbx
andq %r8, %rbx
movq 28(%rsi), %rdi
andq %rax, %rdi
orq %rbx, %rdi
movq 32(%rdx), %rcx
andq %r8, %rcx
movq 32(%rsi), %rbx
andq %rax, %rbx
orq %rcx, %rbx
movq 36(%rdx), %rcx
andq %r8, %rcx
movq 36(%rsi), %rdx
andq %rax, %rdx
orq %rcx, %rdx
movq 40(%r9), %rcx
andq %r8, %rcx
andq 44(%r9), %r8
movq 40(%rsi), %r9
andq %rax, %r9
orq %rcx, %r9
andq 44(%rsi), %rax
orq %r8, %rax
movq -56(%rbp), %rcx ## 8-byte Reload
movq %rcx, (%r10)
movq -48(%rbp), %rcx ## 8-byte Reload
movq %rcx, 4(%r10)
movq %r11, 8(%r10)
movq %r14, 12(%r10)
movq %r15, 16(%r10)
movq %r12, 20(%r10)
movq %r13, 24(%r10)
movq %rdi, 28(%r10)
movq %rbx, 32(%r10)
movq %rdx, 36(%r10)
movq %r9, 40(%r10)
movq %rax, 44(%r10)
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _fiat_p384_to_bytes ## -- Begin function fiat_p384_to_bytes
.p2align 4, 0x90
_fiat_p384_to_bytes: ## @fiat_p384_to_bytes
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq (%rdi), %rax
movq 4(%rdi), %rcx
movq 8(%rdi), %rdx
movq 12(%rdi), %rbx
movq 16(%rdi), %r13
movq 20(%rdi), %r12
movq 24(%rdi), %r15
movq 28(%rdi), %r14
movq 32(%rdi), %r11
movq 36(%rdi), %r10
movq 40(%rdi), %r9
movq 44(%rdi), %r8
movb %al, (%rsi)
movb %ah, 1(%rsi)
movq %rax, %rdi
shrq $16, %rax
movb %al, 2(%rsi)
shrq $24, %rdi
movb %dil, 3(%rsi)
movq %rcx, %rax
movb %cl, 4(%rsi)
movb %ch, 5(%rsi)
shrq $16, %rcx
movb %cl, 6(%rsi)
shrq $24, %rax
movb %al, 7(%rsi)
movq %rdx, %rax
movb %dl, 8(%rsi)
movb %dh, 9(%rsi)
shrq $16, %rdx
movb %dl, 10(%rsi)
shrq $24, %rax
movb %al, 11(%rsi)
movq %rbx, %rax
movb %bl, 12(%rsi)
movb %bh, 13(%rsi)
shrq $16, %rbx
movb %bl, 14(%rsi)
shrq $24, %rax
movb %al, 15(%rsi)
movq %r13, %rcx
movq %r13, %rax
movb %cl, 16(%rsi)
movb %ch, 17(%rsi)
shrq $16, %rcx
movb %cl, 18(%rsi)
shrq $24, %rax
movb %al, 19(%rsi)
movq %r12, %rcx
movq %r12, %rax
movb %cl, 20(%rsi)
movb %ch, 21(%rsi)
shrq $16, %rcx
movb %cl, 22(%rsi)
shrq $24, %rax
movb %al, 23(%rsi)
movq %r15, %rcx
movq %r15, %rax
movb %cl, 24(%rsi)
movb %ch, 25(%rsi)
shrq $16, %rcx
movb %cl, 26(%rsi)
shrq $24, %rax
movb %al, 27(%rsi)
movq %r14, %rcx
movq %r14, %rax
movb %cl, 28(%rsi)
movb %ch, 29(%rsi)
shrq $16, %rcx
movb %cl, 30(%rsi)
shrq $24, %rax
movb %al, 31(%rsi)
movq %r11, %rcx
movq %r11, %rax
movb %cl, 32(%rsi)
movb %ch, 33(%rsi)
shrq $16, %rcx
movb %cl, 34(%rsi)
shrq $24, %rax
movb %al, 35(%rsi)
movq %r10, %rcx
movq %r10, %rax
movb %cl, 36(%rsi)
movb %ch, 37(%rsi)
shrq $16, %rcx
movb %cl, 38(%rsi)
shrq $24, %rax
movb %al, 39(%rsi)
movq %r9, %rcx
movq %r9, %rax
movb %cl, 40(%rsi)
movb %ch, 41(%rsi)
shrq $16, %rcx
movb %cl, 42(%rsi)
shrq $24, %rax
movb %al, 43(%rsi)
movq %r8, %rcx
movq %r8, %rax
movb %cl, 44(%rsi)
movb %ch, 45(%rsi)
shrq $16, %rcx
movb %cl, 46(%rsi)
shrq $24, %rax
movb %al, 47(%rsi)
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _fiat_p384_from_bytes ## -- Begin function fiat_p384_from_bytes
.p2align 4, 0x90
_fiat_p384_from_bytes: ## @fiat_p384_from_bytes
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl (%rdi), %r14d
movl 4(%rdi), %r11d
movl 44(%rdi), %r8d
movl 40(%rdi), %r9d
movl 36(%rdi), %r10d
movl 32(%rdi), %r15d
movl 28(%rdi), %r12d
movl 24(%rdi), %ebx
movl 20(%rdi), %edx
movl 16(%rdi), %ecx
movl 12(%rdi), %eax
movl 8(%rdi), %edi
movq %r14, (%rsi)
movq %r11, 4(%rsi)
movq %rdi, 8(%rsi)
movq %rax, 12(%rsi)
movq %rcx, 16(%rsi)
movq %rdx, 20(%rsi)
movq %rbx, 24(%rsi)
movq %r12, 28(%rsi)
movq %r15, 32(%rsi)
movq %r10, 36(%rsi)
movq %r9, 40(%rsi)
movq %r8, 44(%rsi)
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _fiat_p384_mul ; -- Begin function fiat_p384_mul
.p2align 2
_fiat_p384_mul: ; @fiat_p384_mul
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #1152
.cfi_def_cfa_offset 1248
str x2, [sp, #712] ; 8-byte Folded Spill
ldr x13, [x0]
mov x2, x0
str x1, [sp, #936] ; 8-byte Folded Spill
ldp x22, x10, [x1]
ldur x7, [x1, #4]
ldur x23, [x1, #12]
ldr x8, [x1, #16]
str x8, [sp, #888] ; 8-byte Folded Spill
str x13, [sp, #1024] ; 8-byte Folded Spill
mul x17, x8, x13
str x17, [sp, #1128] ; 8-byte Folded Spill
mul x0, x23, x13
str x0, [sp, #1144] ; 8-byte Folded Spill
umulh x11, x23, x13
str x11, [sp, #1104] ; 8-byte Folded Spill
mul x8, x10, x13
umulh x12, x10, x13
mul x14, x7, x13
umulh x15, x7, x13
mul x9, x22, x13
umulh x16, x22, x13
adds x14, x14, x16
adcs x15, x8, x15
cmp x15, x8
cinc x8, x12, lo
str x8, [sp, #1008] ; 8-byte Folded Spill
adds x24, x8, x0
adcs x8, x17, x11
str x8, [sp, #1056] ; 8-byte Folded Spill
lsl x16, x9, #32
sub x20, x16, x9
mov w8, #-1
umulh x6, x9, x8
mov w8, #-1
cmp x9, x16
cset w16, hi
adds x17, x14, x16
adds x0, x17, x6
str x6, [sp, #856] ; 8-byte Folded Spill
cset w17, hs
cmn x14, x16
ldur x5, [x2, #4]
mov x13, x2
str x2, [sp, #984] ; 8-byte Folded Spill
adcs x16, x15, x17
mrs x11, NZCV
str x11, [sp, #1048] ; 8-byte Folded Spill
adcs x1, x20, x24
mul x14, x10, x5
mul x15, x7, x5
umulh x2, x7, x5
mul x3, x22, x5
umulh x4, x22, x5
mov x27, x5
adds x4, x15, x4
adcs x17, x14, x2
cmn x0, x3
adcs x2, x4, x16
adds x5, x0, x3
lsl x0, x5, #32
sub x12, x0, x5
str x12, [sp, #928] ; 8-byte Folded Spill
adcs xzr, x16, x4
mrs x4, NZCV
adcs x16, x17, x1
cmp x5, x0
umulh x8, x5, x8
str x8, [sp, #800] ; 8-byte Folded Spill
cset w0, hi
adds x3, x2, x0
adds x19, x3, x8
cset w3, hs
cmn x2, x0
adcs x21, x16, x3
mrs x8, NZCV
str x8, [sp, #976] ; 8-byte Folded Spill
mrs x25, NZCV
cmp x17, x14
str x10, [sp, #1088] ; 8-byte Folded Spill
umulh x14, x10, x27
cinc x11, x14, lo
str x11, [sp, #1136] ; 8-byte Folded Spill
ldr x13, [x13, #8]
str x13, [sp, #1072] ; 8-byte Folded Spill
mul x14, x7, x13
umulh x16, x22, x13
adds x14, x14, x16
mov w8, #-2
mul x0, x9, x8
umulh x26, x9, x8
mul x28, x10, x13
umulh x9, x7, x13
adcs x10, x28, x9
mul x30, x22, x13
cmn x19, x30
str x27, [sp, #1064] ; 8-byte Folded Spill
ldr x16, [sp, #888] ; 8-byte Folded Reload
mul x8, x16, x27
str x8, [sp, #1120] ; 8-byte Folded Spill
mul x9, x23, x27
str x9, [sp, #1112] ; 8-byte Folded Spill
umulh x13, x23, x27
str x13, [sp, #1080] ; 8-byte Folded Spill
adcs x27, x14, x21
adds x15, x11, x9
adcs x13, x8, x13
str x13, [sp, #992] ; 8-byte Folded Spill
adds x9, x0, x6
str x20, [sp, #1016] ; 8-byte Folded Spill
adcs x11, x20, x26
str x11, [sp, #864] ; 8-byte Folded Spill
ldr x8, [sp, #1048] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x24, x20
mrs x3, NZCV
ldr x8, [sp, #1056] ; 8-byte Folded Reload
adcs x6, x9, x8
msr NZCV, x4
adcs xzr, x1, x17
mrs x17, NZCV
str x17, [sp, #1048] ; 8-byte Folded Spill
adcs x4, x15, x6
msr NZCV, x25
adcs x25, x12, x4
adds x26, x19, x30
lsl x19, x26, #32
sub x2, x19, x26
str x2, [sp, #904] ; 8-byte Folded Spill
adcs xzr, x21, x14
mrs x14, NZCV
adcs x17, x10, x25
ldr x0, [sp, #1144] ; 8-byte Folded Reload
ldr x12, [sp, #1008] ; 8-byte Folded Reload
cmn x12, x0
ldr x0, [sp, #1128] ; 8-byte Folded Reload
ldr x1, [sp, #1104] ; 8-byte Folded Reload
adcs xzr, x1, x0
ldr x24, [sp, #1024] ; 8-byte Folded Reload
umulh x1, x16, x24
adcs x20, x1, xzr
cmp x20, x1
ldr x0, [sp, #936] ; 8-byte Folded Reload
ldur x30, [x0, #20]
mul x1, x30, x24
cset w21, lo
adds x0, x20, x1
str x0, [sp, #944] ; 8-byte Folded Spill
umulh x1, x30, x24
str x1, [sp, #896] ; 8-byte Folded Spill
adcs x21, x21, x1
msr NZCV, x3
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #912] ; 8-byte Folded Spill
adcs x8, x11, x0
str x8, [sp, #1056] ; 8-byte Folded Spill
ldr x9, [sp, #1048] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x6, x15
mrs x9, NZCV
str x9, [sp, #1048] ; 8-byte Folded Spill
adcs x11, x13, x8
str x11, [sp, #792] ; 8-byte Folded Spill
cmp x10, x28
mov w9, #-2
mul x8, x5, x9
umulh x9, x5, x9
ldr x13, [sp, #1072] ; 8-byte Folded Reload
ldr x3, [sp, #1088] ; 8-byte Folded Reload
umulh x12, x3, x13
cinc x15, x12, lo
str x15, [sp, #1008] ; 8-byte Folded Spill
mul x12, x16, x13
str x12, [sp, #1144] ; 8-byte Folded Spill
mov x0, x16
str x23, [sp, #1096] ; 8-byte Folded Spill
mul x16, x23, x13
str x16, [sp, #1000] ; 8-byte Folded Spill
umulh x1, x23, x13
str x1, [sp, #960] ; 8-byte Folded Spill
adds x13, x15, x16
str x13, [sp, #840] ; 8-byte Folded Spill
adcs x12, x12, x1
str x12, [sp, #1128] ; 8-byte Folded Spill
ldr x12, [sp, #800] ; 8-byte Folded Reload
adds x12, x8, x12
str x12, [sp, #832] ; 8-byte Folded Spill
ldr x8, [sp, #928] ; 8-byte Folded Reload
adcs x20, x8, x9
str x20, [sp, #760] ; 8-byte Folded Spill
ldr x9, [sp, #976] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x4, x8
mrs x8, NZCV
str x8, [sp, #880] ; 8-byte Folded Spill
adcs x9, x12, x11
str x9, [sp, #816] ; 8-byte Folded Spill
msr NZCV, x14
adcs xzr, x25, x10
mrs x8, NZCV
str x8, [sp, #872] ; 8-byte Folded Spill
adcs x11, x13, x9
str x11, [sp, #976] ; 8-byte Folded Spill
cmp x26, x19
mov w8, #-1
umulh x12, x26, x8
str x12, [sp, #808] ; 8-byte Folded Spill
cset w9, hi
adds x10, x27, x9
adds x13, x10, x12
cset w10, hs
cmn x27, x9
adcs x9, x17, x10
mrs x10, NZCV
str x10, [sp, #952] ; 8-byte Folded Spill
ldr x6, [sp, #984] ; 8-byte Folded Reload
ldur x19, [x6, #12]
adcs x16, x2, x11
str x16, [sp, #776] ; 8-byte Folded Spill
mov x14, x7
str x7, [sp, #1040] ; 8-byte Folded Spill
mul x10, x7, x19
str x22, [sp, #1032] ; 8-byte Folded Spill
umulh x11, x22, x19
adds x12, x10, x11
mul x15, x3, x19
mov x7, x3
umulh x10, x14, x19
adcs x28, x15, x10
mul x11, x22, x19
cmn x13, x11
adcs x10, x12, x9
adds x1, x13, x11
lsl x14, x1, #32
sub x3, x14, x1
str x3, [sp, #920] ; 8-byte Folded Spill
adcs xzr, x9, x12
mrs x25, NZCV
adcs x11, x28, x16
cmp x1, x14
umulh x8, x1, x8
str x8, [sp, #824] ; 8-byte Folded Spill
cset w12, hi
adds x14, x10, x12
adds x13, x14, x8
cset w9, hs
cmn x10, x12
adcs x22, x11, x9
mrs x8, NZCV
str x8, [sp, #848] ; 8-byte Folded Spill
mrs x2, NZCV
ldr x9, [sp, #1136] ; 8-byte Folded Reload
ldr x10, [sp, #1112] ; 8-byte Folded Reload
cmn x9, x10
ldr x9, [sp, #1120] ; 8-byte Folded Reload
ldr x10, [sp, #1080] ; 8-byte Folded Reload
adcs xzr, x10, x9
ldr x17, [sp, #1064] ; 8-byte Folded Reload
umulh x10, x0, x17
adcs x12, x10, xzr
ldr x8, [sp, #896] ; 8-byte Folded Reload
cmp x21, x8
ldr x16, [sp, #936] ; 8-byte Folded Reload
ldr x8, [x16, #24]
str x8, [sp, #1112] ; 8-byte Folded Spill
mul x11, x8, x24
cset w14, lo
adds x27, x21, x11
umulh x23, x8, x24
adcs x5, x14, x23
ldr x9, [sp, #1016] ; 8-byte Folded Reload
ldr x4, [sp, #864] ; 8-byte Folded Reload
cmp x4, x9
ldr x11, [sp, #856] ; 8-byte Folded Reload
add x8, x9, x11
str x8, [sp, #896] ; 8-byte Folded Spill
cinc x11, x8, lo
ldr x8, [sp, #912] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #944] ; 8-byte Folded Reload
adcs xzr, x8, x4
mrs x8, NZCV
str x8, [sp, #944] ; 8-byte Folded Spill
adcs x21, x11, x27
str x21, [sp, #864] ; 8-byte Folded Spill
cmp x12, x10
mov x9, x17
str x30, [sp, #1104] ; 8-byte Folded Spill
mul x10, x30, x17
cset w17, lo
adds x8, x12, x10
str x8, [sp, #912] ; 8-byte Folded Spill
umulh x4, x30, x9
adcs x17, x17, x4
ldr x9, [sp, #1048] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #1056] ; 8-byte Folded Reload
ldr x10, [sp, #992] ; 8-byte Folded Reload
adcs xzr, x9, x10
mrs x9, NZCV
str x9, [sp, #784] ; 8-byte Folded Spill
adcs x10, x8, x21
str x10, [sp, #752] ; 8-byte Folded Spill
ldr x8, [sp, #880] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #832] ; 8-byte Folded Reload
ldr x9, [sp, #792] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #744] ; 8-byte Folded Spill
adcs x10, x20, x10
str x10, [sp, #792] ; 8-byte Folded Spill
ldr x8, [sp, #872] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #840] ; 8-byte Folded Reload
ldr x9, [sp, #816] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #840] ; 8-byte Folded Spill
ldr x9, [sp, #1128] ; 8-byte Folded Reload
adcs x8, x9, x10
str x8, [sp, #880] ; 8-byte Folded Spill
cmp x28, x15
mov x24, x7
umulh x10, x7, x19
cinc x9, x10, lo
str x9, [sp, #992] ; 8-byte Folded Spill
ldr x21, [x6, #16]
str x21, [sp, #1048] ; 8-byte Folded Spill
ldr x7, [sp, #1040] ; 8-byte Folded Reload
mul x10, x7, x21
ldr x14, [sp, #1032] ; 8-byte Folded Reload
umulh x15, x14, x21
adds x6, x10, x15
mov w12, #-2
mul x10, x26, x12
umulh x20, x26, x12
mul x24, x24, x21
umulh x15, x7, x21
adcs x15, x24, x15
mul x26, x14, x21
cmn x13, x26
str x19, [sp, #968] ; 8-byte Folded Spill
mul x14, x0, x19
str x14, [sp, #1120] ; 8-byte Folded Spill
ldr x12, [sp, #1096] ; 8-byte Folded Reload
mul x7, x12, x19
str x7, [sp, #1080] ; 8-byte Folded Spill
umulh x30, x12, x19
str x30, [sp, #1056] ; 8-byte Folded Spill
adcs x12, x6, x22
str x12, [sp, #832] ; 8-byte Folded Spill
adds x21, x9, x7
adcs x12, x14, x30
str x12, [sp, #1136] ; 8-byte Folded Spill
ldr x12, [sp, #808] ; 8-byte Folded Reload
adds x30, x10, x12
ldr x10, [sp, #904] ; 8-byte Folded Reload
adcs x20, x10, x20
str x20, [sp, #736] ; 8-byte Folded Spill
ldr x9, [sp, #952] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #976] ; 8-byte Folded Reload
adcs xzr, x9, x10
mrs x9, NZCV
str x9, [sp, #952] ; 8-byte Folded Spill
adcs x12, x30, x8
msr NZCV, x25
ldr x8, [sp, #776] ; 8-byte Folded Reload
adcs xzr, x8, x28
mrs x8, NZCV
str x8, [sp, #816] ; 8-byte Folded Spill
adcs x28, x21, x12
msr NZCV, x2
adcs x19, x3, x28
adds x25, x13, x26
lsl x14, x25, #32
sub x2, x14, x25
str x2, [sp, #976] ; 8-byte Folded Spill
adcs xzr, x22, x6
mrs x6, NZCV
adcs x7, x15, x19
ldr x8, [sp, #1008] ; 8-byte Folded Reload
ldr x9, [sp, #1000] ; 8-byte Folded Reload
cmn x8, x9
ldr x8, [sp, #1144] ; 8-byte Folded Reload
ldr x9, [sp, #960] ; 8-byte Folded Reload
adcs xzr, x9, x8
ldr x8, [sp, #1072] ; 8-byte Folded Reload
umulh x9, x0, x8
adcs x10, x9, xzr
cmp x5, x23
ldur x16, [x16, #28]
str x16, [sp, #1144] ; 8-byte Folded Spill
ldr x13, [sp, #1024] ; 8-byte Folded Reload
mul x23, x16, x13
cset w26, lo
adds x3, x5, x23
str x3, [sp, #728] ; 8-byte Folded Spill
umulh x13, x16, x13
str x13, [sp, #584] ; 8-byte Folded Spill
adcs x13, x26, x13
str x13, [sp, #632] ; 8-byte Folded Spill
ldr x13, [sp, #1016] ; 8-byte Folded Reload
cmp x11, x13
ldr x23, [sp, #896] ; 8-byte Folded Reload
cinc x26, x23, lo
ldr x13, [sp, #944] ; 8-byte Folded Reload
msr NZCV, x13
adcs xzr, x27, x11
mrs x11, NZCV
str x11, [sp, #624] ; 8-byte Folded Spill
adcs x11, x26, x3
str x11, [sp, #944] ; 8-byte Folded Spill
cmp x17, x4
cset w13, lo
cmp x10, x9
ldr x4, [sp, #1104] ; 8-byte Folded Reload
mul x9, x4, x8
cset w5, lo
adds x3, x10, x9
str x3, [sp, #872] ; 8-byte Folded Spill
umulh x8, x4, x8
str x8, [sp, #592] ; 8-byte Folded Spill
adcs x8, x5, x8
str x8, [sp, #680] ; 8-byte Folded Spill
ldr x9, [sp, #1064] ; 8-byte Folded Reload
ldr x10, [sp, #1112] ; 8-byte Folded Reload
mul x8, x10, x9
adds x8, x17, x8
str x8, [sp, #776] ; 8-byte Folded Spill
umulh x4, x10, x9
adcs x9, x13, x4
str x9, [sp, #672] ; 8-byte Folded Spill
ldr x9, [sp, #784] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #912] ; 8-byte Folded Reload
ldr x10, [sp, #864] ; 8-byte Folded Reload
adcs xzr, x10, x9
mrs x9, NZCV
str x9, [sp, #664] ; 8-byte Folded Spill
adcs x11, x8, x11
str x11, [sp, #768] ; 8-byte Folded Spill
ldr x5, [sp, #928] ; 8-byte Folded Reload
ldr x10, [sp, #760] ; 8-byte Folded Reload
cmp x10, x5
ldr x9, [sp, #800] ; 8-byte Folded Reload
add x9, x5, x9
str x9, [sp, #864] ; 8-byte Folded Spill
cinc x8, x9, lo
str x8, [sp, #784] ; 8-byte Folded Spill
ldr x9, [sp, #744] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #752] ; 8-byte Folded Reload
adcs xzr, x9, x10
mrs x9, NZCV
str x9, [sp, #656] ; 8-byte Folded Spill
adcs x9, x8, x11
str x9, [sp, #744] ; 8-byte Folded Spill
ldr x8, [sp, #840] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #1128] ; 8-byte Folded Reload
ldr x10, [sp, #792] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #648] ; 8-byte Folded Spill
adcs x8, x3, x9
str x8, [sp, #760] ; 8-byte Folded Spill
ldr x9, [sp, #952] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #880] ; 8-byte Folded Reload
adcs xzr, x9, x30
mrs x9, NZCV
str x9, [sp, #720] ; 8-byte Folded Spill
adcs x9, x20, x8
str x9, [sp, #752] ; 8-byte Folded Spill
ldr x8, [sp, #816] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x12, x21
mrs x8, NZCV
str x8, [sp, #704] ; 8-byte Folded Spill
ldr x8, [sp, #1136] ; 8-byte Folded Reload
adcs x11, x8, x9
str x11, [sp, #576] ; 8-byte Folded Spill
cmp x15, x24
mov w9, #-2
mul x8, x1, x9
umulh x9, x1, x9
ldr x10, [sp, #1048] ; 8-byte Folded Reload
ldr x1, [sp, #1088] ; 8-byte Folded Reload
umulh x12, x1, x10
cinc x16, x12, lo
str x16, [sp, #912] ; 8-byte Folded Spill
mul x13, x0, x10
str x13, [sp, #952] ; 8-byte Folded Spill
mov x30, x0
ldr x12, [sp, #1096] ; 8-byte Folded Reload
mul x17, x12, x10
str x17, [sp, #880] ; 8-byte Folded Spill
umulh x10, x12, x10
str x10, [sp, #840] ; 8-byte Folded Spill
adds x16, x16, x17
str x16, [sp, #696] ; 8-byte Folded Spill
adcs x10, x13, x10
str x10, [sp, #1128] ; 8-byte Folded Spill
ldr x12, [sp, #824] ; 8-byte Folded Reload
adds x10, x8, x12
str x10, [sp, #640] ; 8-byte Folded Spill
ldr x8, [sp, #920] ; 8-byte Folded Reload
adcs x9, x8, x9
str x9, [sp, #960] ; 8-byte Folded Spill
ldr x9, [sp, #848] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x28, x8
mrs x8, NZCV
str x8, [sp, #608] ; 8-byte Folded Spill
adcs x8, x10, x11
str x8, [sp, #616] ; 8-byte Folded Spill
msr NZCV, x6
adcs xzr, x19, x15
mrs x9, NZCV
str x9, [sp, #600] ; 8-byte Folded Spill
adcs x11, x16, x8
str x11, [sp, #792] ; 8-byte Folded Spill
cmp x25, x14
mov w16, #-1
umulh x9, x25, x16
str x9, [sp, #816] ; 8-byte Folded Spill
cset w8, hi
ldr x13, [sp, #832] ; 8-byte Folded Reload
adds x10, x13, x8
adds x10, x10, x9
cset w12, hs
cmn x13, x8
adcs x13, x7, x12
mrs x8, NZCV
str x8, [sp, #688] ; 8-byte Folded Spill
ldr x8, [sp, #984] ; 8-byte Folded Reload
ldur x24, [x8, #20]
adcs x11, x2, x11
str x11, [sp, #528] ; 8-byte Folded Spill
ldr x14, [sp, #1040] ; 8-byte Folded Reload
mul x8, x14, x24
ldr x0, [sp, #1032] ; 8-byte Folded Reload
umulh x12, x0, x24
adds x17, x8, x12
mul x9, x1, x24
umulh x8, x14, x24
adcs x27, x9, x8
mul x8, x0, x24
cmn x10, x8
adcs x15, x17, x13
adds x19, x10, x8
lsl x0, x19, #32
sub x6, x0, x19
str x6, [sp, #1008] ; 8-byte Folded Spill
adcs xzr, x13, x17
mrs x21, NZCV
adcs x10, x27, x11
cmp x19, x0
umulh x13, x19, x16
str x13, [sp, #848] ; 8-byte Folded Spill
cset w12, hi
adds x8, x15, x12
adds x17, x8, x13
cset w13, hs
cmn x15, x12
adcs x8, x10, x13
str x8, [sp, #456] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #832] ; 8-byte Folded Spill
mrs x22, NZCV
ldr x8, [sp, #1080] ; 8-byte Folded Reload
ldr x10, [sp, #992] ; 8-byte Folded Reload
cmn x10, x8
ldr x8, [sp, #1120] ; 8-byte Folded Reload
ldr x10, [sp, #1056] ; 8-byte Folded Reload
adcs xzr, x10, x8
mov x16, x30
ldr x7, [sp, #968] ; 8-byte Folded Reload
umulh x12, x30, x7
adcs x13, x12, xzr
ldr x11, [sp, #632] ; 8-byte Folded Reload
ldr x8, [sp, #584] ; 8-byte Folded Reload
cmp x11, x8
ldr x3, [sp, #936] ; 8-byte Folded Reload
ldr x8, [x3, #32]
str x8, [sp, #1120] ; 8-byte Folded Spill
ldr x14, [sp, #1024] ; 8-byte Folded Reload
mul x10, x8, x14
cset w15, lo
adds x10, x11, x10
str x10, [sp, #568] ; 8-byte Folded Spill
umulh x8, x8, x14
str x8, [sp, #584] ; 8-byte Folded Spill
adcs x8, x15, x8
str x8, [sp, #632] ; 8-byte Folded Spill
ldr x1, [sp, #1016] ; 8-byte Folded Reload
cmp x26, x1
cinc x0, x23, lo
ldr x8, [sp, #624] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #728] ; 8-byte Folded Reload
adcs xzr, x8, x26
mrs x8, NZCV
str x8, [sp, #536] ; 8-byte Folded Spill
adcs x26, x0, x10
str x26, [sp, #560] ; 8-byte Folded Spill
ldr x23, [sp, #672] ; 8-byte Folded Reload
cmp x23, x4
cset w11, lo
ldr x2, [sp, #680] ; 8-byte Folded Reload
ldr x8, [sp, #592] ; 8-byte Folded Reload
cmp x2, x8
cset w14, lo
cmp x13, x12
ldr x8, [sp, #1104] ; 8-byte Folded Reload
mul x12, x8, x7
cset w15, lo
adds x13, x13, x12
str x13, [sp, #624] ; 8-byte Folded Spill
umulh x10, x8, x7
str x10, [sp, #592] ; 8-byte Folded Spill
adcs x8, x15, x10
str x8, [sp, #544] ; 8-byte Folded Spill
ldr x8, [sp, #1112] ; 8-byte Folded Reload
ldr x10, [sp, #1072] ; 8-byte Folded Reload
mul x12, x8, x10
adds x15, x2, x12
str x15, [sp, #552] ; 8-byte Folded Spill
umulh x30, x8, x10
adcs x8, x14, x30
str x8, [sp, #680] ; 8-byte Folded Spill
ldr x10, [sp, #1144] ; 8-byte Folded Reload
ldr x8, [sp, #1064] ; 8-byte Folded Reload
mul x12, x10, x8
adds x12, x23, x12
str x12, [sp, #520] ; 8-byte Folded Spill
umulh x28, x10, x8
adcs x8, x11, x28
str x8, [sp, #672] ; 8-byte Folded Spill
ldr x8, [sp, #664] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #944] ; 8-byte Folded Reload
ldr x10, [sp, #776] ; 8-byte Folded Reload
adcs xzr, x8, x10
mrs x8, NZCV
str x8, [sp, #664] ; 8-byte Folded Spill
adcs x11, x12, x26
ldr x8, [sp, #784] ; 8-byte Folded Reload
cmp x8, x5
ldr x5, [sp, #864] ; 8-byte Folded Reload
cinc x4, x5, lo
ldr x10, [sp, #656] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #768] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
stp x8, x11, [sp, #504] ; 16-byte Folded Spill
adcs x11, x4, x11
str x11, [sp, #656] ; 8-byte Folded Spill
ldr x8, [sp, #648] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #872] ; 8-byte Folded Reload
ldr x10, [sp, #744] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #496] ; 8-byte Folded Spill
adcs x12, x15, x11
str x12, [sp, #648] ; 8-byte Folded Spill
ldr x8, [sp, #904] ; 8-byte Folded Reload
ldr x11, [sp, #736] ; 8-byte Folded Reload
cmp x11, x8
ldr x10, [sp, #808] ; 8-byte Folded Reload
add x10, x8, x10
str x10, [sp, #784] ; 8-byte Folded Spill
cinc x2, x10, lo
ldr x8, [sp, #720] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #760] ; 8-byte Folded Reload
adcs xzr, x8, x11
mrs x8, NZCV
str x8, [sp, #488] ; 8-byte Folded Spill
adcs x11, x2, x12
str x11, [sp, #744] ; 8-byte Folded Spill
ldr x8, [sp, #704] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #1136] ; 8-byte Folded Reload
ldr x10, [sp, #752] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #480] ; 8-byte Folded Spill
adcs x11, x13, x11
str x11, [sp, #752] ; 8-byte Folded Spill
ldr x8, [sp, #608] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #640] ; 8-byte Folded Reload
ldr x10, [sp, #576] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #576] ; 8-byte Folded Spill
ldr x8, [sp, #960] ; 8-byte Folded Reload
adcs x11, x8, x11
str x11, [sp, #736] ; 8-byte Folded Spill
ldr x8, [sp, #600] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #696] ; 8-byte Folded Reload
ldr x10, [sp, #616] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #472] ; 8-byte Folded Spill
ldr x8, [sp, #1128] ; 8-byte Folded Reload
adcs x12, x8, x11
str x12, [sp, #616] ; 8-byte Folded Spill
cmp x27, x9
ldr x8, [sp, #1088] ; 8-byte Folded Reload
mov x26, x24
umulh x9, x8, x24
cinc x10, x9, lo
str x10, [sp, #872] ; 8-byte Folded Spill
ldr x24, [sp, #984] ; 8-byte Folded Reload
ldr x15, [x24, #24]
str x15, [sp, #1080] ; 8-byte Folded Spill
ldr x9, [sp, #1040] ; 8-byte Folded Reload
mul x11, x9, x15
ldr x23, [sp, #1032] ; 8-byte Folded Reload
umulh x13, x23, x15
adds x11, x11, x13
mov w14, #-2
mul x13, x25, x14
umulh x25, x25, x14
mul x8, x8, x15
str x8, [sp, #464] ; 8-byte Folded Spill
umulh x14, x9, x15
adcs x20, x8, x14
mul x8, x23, x15
cmn x17, x8
str x26, [sp, #1000] ; 8-byte Folded Spill
mul x7, x16, x26
str x7, [sp, #768] ; 8-byte Folded Spill
mov x15, x16
ldr x14, [sp, #1096] ; 8-byte Folded Reload
mul x16, x14, x26
str x16, [sp, #760] ; 8-byte Folded Spill
umulh x26, x14, x26
str x26, [sp, #728] ; 8-byte Folded Spill
ldr x23, [sp, #456] ; 8-byte Folded Reload
adcs x9, x11, x23
str x9, [sp, #640] ; 8-byte Folded Spill
adds x16, x10, x16
str x16, [sp, #776] ; 8-byte Folded Spill
adcs x9, x7, x26
str x9, [sp, #992] ; 8-byte Folded Spill
ldr x9, [sp, #816] ; 8-byte Folded Reload
adds x14, x13, x9
ldr x9, [sp, #976] ; 8-byte Folded Reload
adcs x13, x9, x25
str x13, [sp, #944] ; 8-byte Folded Spill
ldr x10, [sp, #688] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #792] ; 8-byte Folded Reload
adcs xzr, x10, x9
mrs x9, NZCV
str x9, [sp, #792] ; 8-byte Folded Spill
adcs x13, x14, x12
msr NZCV, x21
ldr x9, [sp, #528] ; 8-byte Folded Reload
adcs xzr, x9, x27
mrs x9, NZCV
str x9, [sp, #432] ; 8-byte Folded Spill
adcs x26, x16, x13
msr NZCV, x22
adcs x9, x6, x26
str x9, [sp, #400] ; 8-byte Folded Spill
adds x25, x17, x8
lsl x16, x25, #32
sub x7, x16, x25
str x7, [sp, #1056] ; 8-byte Folded Spill
adcs xzr, x23, x11
mrs x21, NZCV
adcs x23, x20, x9
ldr x8, [sp, #912] ; 8-byte Folded Reload
ldr x11, [sp, #880] ; 8-byte Folded Reload
cmn x8, x11
ldr x8, [sp, #952] ; 8-byte Folded Reload
ldr x11, [sp, #840] ; 8-byte Folded Reload
adcs xzr, x11, x8
ldr x11, [sp, #1048] ; 8-byte Folded Reload
umulh x6, x15, x11
adcs x22, x6, xzr
ldr x9, [sp, #632] ; 8-byte Folded Reload
ldr x8, [sp, #584] ; 8-byte Folded Reload
cmp x9, x8
ldur x27, [x3, #36]
str x27, [sp, #1136] ; 8-byte Folded Spill
ldr x3, [sp, #1024] ; 8-byte Folded Reload
mul x17, x27, x3
cset w8, lo
adds x9, x9, x17
str x9, [sp, #632] ; 8-byte Folded Spill
umulh x17, x27, x3
str x17, [sp, #424] ; 8-byte Folded Spill
adcs x10, x8, x17
cmp x0, x1
ldr x3, [sp, #896] ; 8-byte Folded Reload
cinc x27, x3, lo
ldr x8, [sp, #536] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #568] ; 8-byte Folded Reload
adcs xzr, x8, x0
mrs x8, NZCV
stp x8, x10, [sp, #448] ; 16-byte Folded Spill
adcs x8, x27, x9
str x8, [sp, #696] ; 8-byte Folded Spill
ldr x9, [sp, #672] ; 8-byte Folded Reload
cmp x9, x28
cset w10, lo
ldr x28, [sp, #680] ; 8-byte Folded Reload
cmp x28, x30
cset w12, lo
ldr x15, [sp, #592] ; 8-byte Folded Reload
ldr x30, [sp, #544] ; 8-byte Folded Reload
cmp x30, x15
cset w15, lo
cmp x22, x6
ldr x1, [sp, #1104] ; 8-byte Folded Reload
mul x17, x1, x11
cset w0, lo
adds x22, x22, x17
str x22, [sp, #704] ; 8-byte Folded Spill
umulh x11, x1, x11
str x11, [sp, #440] ; 8-byte Folded Spill
adcs x11, x0, x11
str x11, [sp, #608] ; 8-byte Folded Spill
ldr x0, [sp, #1112] ; 8-byte Folded Reload
ldr x1, [sp, #968] ; 8-byte Folded Reload
mul x17, x0, x1
adds x11, x30, x17
str x11, [sp, #720] ; 8-byte Folded Spill
umulh x17, x0, x1
str x17, [sp, #392] ; 8-byte Folded Spill
adcs x15, x15, x17
str x15, [sp, #544] ; 8-byte Folded Spill
ldr x17, [sp, #1144] ; 8-byte Folded Reload
ldr x0, [sp, #1072] ; 8-byte Folded Reload
mul x15, x17, x0
adds x1, x28, x15
str x1, [sp, #688] ; 8-byte Folded Spill
umulh x15, x17, x0
str x15, [sp, #384] ; 8-byte Folded Spill
adcs x12, x12, x15
str x12, [sp, #536] ; 8-byte Folded Spill
ldr x15, [sp, #1120] ; 8-byte Folded Reload
ldr x17, [sp, #1064] ; 8-byte Folded Reload
mul x12, x15, x17
adds x12, x9, x12
str x12, [sp, #672] ; 8-byte Folded Spill
umulh x0, x15, x17
adcs x9, x10, x0
str x9, [sp, #528] ; 8-byte Folded Spill
ldr x9, [sp, #664] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #560] ; 8-byte Folded Reload
ldr x10, [sp, #520] ; 8-byte Folded Reload
adcs xzr, x9, x10
mrs x9, NZCV
str x9, [sp, #520] ; 8-byte Folded Spill
adcs x9, x12, x8
str x9, [sp, #664] ; 8-byte Folded Spill
ldr x8, [sp, #928] ; 8-byte Folded Reload
cmp x4, x8
cinc x12, x5, lo
str x12, [sp, #368] ; 8-byte Folded Spill
ldr x8, [sp, #504] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #512] ; 8-byte Folded Reload
adcs xzr, x8, x4
mrs x8, NZCV
str x8, [sp, #512] ; 8-byte Folded Spill
adcs x8, x12, x9
str x8, [sp, #600] ; 8-byte Folded Spill
ldr x9, [sp, #496] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #552] ; 8-byte Folded Reload
ldr x10, [sp, #656] ; 8-byte Folded Reload
adcs xzr, x10, x9
mrs x9, NZCV
str x9, [sp, #504] ; 8-byte Folded Spill
adcs x9, x1, x8
str x9, [sp, #656] ; 8-byte Folded Spill
ldr x8, [sp, #904] ; 8-byte Folded Reload
cmp x2, x8
ldr x8, [sp, #784] ; 8-byte Folded Reload
cinc x30, x8, lo
ldr x8, [sp, #488] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #648] ; 8-byte Folded Reload
adcs xzr, x8, x2
mrs x8, NZCV
str x8, [sp, #496] ; 8-byte Folded Spill
adcs x8, x30, x9
str x8, [sp, #592] ; 8-byte Folded Spill
ldr x9, [sp, #480] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #624] ; 8-byte Folded Reload
ldr x10, [sp, #744] ; 8-byte Folded Reload
adcs xzr, x10, x9
mrs x9, NZCV
str x9, [sp, #488] ; 8-byte Folded Spill
adcs x9, x11, x8
str x9, [sp, #648] ; 8-byte Folded Spill
ldr x8, [sp, #920] ; 8-byte Folded Reload
ldr x12, [sp, #960] ; 8-byte Folded Reload
cmp x12, x8
ldr x11, [sp, #824] ; 8-byte Folded Reload
add x11, x8, x11
str x11, [sp, #880] ; 8-byte Folded Spill
cinc x17, x11, lo
ldr x8, [sp, #576] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #752] ; 8-byte Folded Reload
adcs xzr, x8, x12
mrs x8, NZCV
str x8, [sp, #480] ; 8-byte Folded Spill
adcs x9, x17, x9
str x9, [sp, #584] ; 8-byte Folded Spill
ldr x8, [sp, #472] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #1128] ; 8-byte Folded Reload
ldr x10, [sp, #736] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #472] ; 8-byte Folded Spill
adcs x9, x22, x9
str x9, [sp, #624] ; 8-byte Folded Spill
ldr x8, [sp, #792] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #616] ; 8-byte Folded Reload
adcs xzr, x8, x14
mrs x8, NZCV
str x8, [sp, #576] ; 8-byte Folded Spill
ldr x8, [sp, #944] ; 8-byte Folded Reload
adcs x9, x8, x9
str x9, [sp, #616] ; 8-byte Folded Spill
ldr x8, [sp, #432] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #776] ; 8-byte Folded Reload
adcs xzr, x13, x8
mrs x8, NZCV
str x8, [sp, #568] ; 8-byte Folded Spill
ldr x8, [sp, #992] ; 8-byte Folded Reload
adcs x9, x8, x9
str x9, [sp, #432] ; 8-byte Folded Spill
ldr x8, [sp, #464] ; 8-byte Folded Reload
cmp x20, x8
mov w11, #-2
mul x8, x19, x11
umulh x11, x19, x11
ldr x13, [sp, #1080] ; 8-byte Folded Reload
ldr x2, [sp, #1088] ; 8-byte Folded Reload
umulh x12, x2, x13
cinc x15, x12, lo
str x15, [sp, #752] ; 8-byte Folded Spill
ldr x1, [sp, #888] ; 8-byte Folded Reload
mul x14, x1, x13
str x14, [sp, #912] ; 8-byte Folded Spill
ldr x12, [sp, #1096] ; 8-byte Folded Reload
mul x4, x12, x13
str x4, [sp, #744] ; 8-byte Folded Spill
umulh x13, x12, x13
str x13, [sp, #736] ; 8-byte Folded Spill
adds x15, x15, x4
str x15, [sp, #560] ; 8-byte Folded Spill
adcs x12, x14, x13
str x12, [sp, #792] ; 8-byte Folded Spill
ldr x12, [sp, #848] ; 8-byte Folded Reload
adds x12, x8, x12
str x12, [sp, #464] ; 8-byte Folded Spill
ldr x8, [sp, #1008] ; 8-byte Folded Reload
adcs x11, x8, x11
str x11, [sp, #776] ; 8-byte Folded Spill
ldr x11, [sp, #832] ; 8-byte Folded Reload
msr NZCV, x11
adcs xzr, x26, x8
mrs x8, NZCV
str x8, [sp, #416] ; 8-byte Folded Spill
adcs x8, x12, x9
msr NZCV, x21
ldr x9, [sp, #400] ; 8-byte Folded Reload
adcs xzr, x9, x20
mrs x9, NZCV
stp x9, x8, [sp, #400] ; 16-byte Folded Spill
adcs x13, x15, x8
str x13, [sp, #680] ; 8-byte Folded Spill
cmp x25, x16
mov w14, #-1
umulh x11, x25, x14
str x11, [sp, #840] ; 8-byte Folded Spill
cset w8, hi
ldr x10, [sp, #640] ; 8-byte Folded Reload
adds x9, x10, x8
adds x9, x9, x11
cset w11, hs
cmn x10, x8
adcs x4, x23, x11
mrs x8, NZCV
str x8, [sp, #552] ; 8-byte Folded Spill
ldur x26, [x24, #28]
adcs x10, x7, x13
str x10, [sp, #184] ; 8-byte Folded Spill
ldr x16, [sp, #1040] ; 8-byte Folded Reload
mul x8, x16, x26
ldr x15, [sp, #1032] ; 8-byte Folded Reload
umulh x11, x15, x26
adds x13, x8, x11
mul x7, x2, x26
umulh x8, x16, x26
adcs x21, x7, x8
mul x8, x15, x26
cmn x9, x8
adcs x12, x13, x4
adds x6, x9, x8
lsl x15, x6, #32
sub x8, x15, x6
str x8, [sp, #952] ; 8-byte Folded Spill
adcs xzr, x4, x13
mrs x22, NZCV
adcs x8, x21, x10
cmp x6, x15
umulh x13, x6, x14
str x13, [sp, #832] ; 8-byte Folded Spill
cset w10, hi
adds x11, x12, x10
adds x13, x11, x13
cset w11, hs
cmn x12, x10
adcs x8, x8, x11
str x8, [sp, #224] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #640] ; 8-byte Folded Spill
mrs x23, NZCV
ldr x8, [sp, #872] ; 8-byte Folded Reload
ldr x9, [sp, #760] ; 8-byte Folded Reload
cmn x8, x9
ldr x8, [sp, #768] ; 8-byte Folded Reload
ldr x9, [sp, #728] ; 8-byte Folded Reload
adcs xzr, x9, x8
ldr x9, [sp, #1000] ; 8-byte Folded Reload
umulh x8, x1, x9
adcs x10, x8, xzr
ldr x15, [sp, #456] ; 8-byte Folded Reload
ldr x11, [sp, #424] ; 8-byte Folded Reload
cmp x15, x11
ldr x11, [sp, #936] ; 8-byte Folded Reload
ldr x28, [x11, #40]
ldr x14, [sp, #1024] ; 8-byte Folded Reload
mul x11, x28, x14
cset w12, lo
adds x15, x15, x11
str x15, [sp, #256] ; 8-byte Folded Spill
umulh x4, x28, x14
adcs x11, x12, x4
str x11, [sp, #424] ; 8-byte Folded Spill
ldr x11, [sp, #1016] ; 8-byte Folded Reload
cmp x27, x11
cinc x5, x3, lo
ldr x11, [sp, #448] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #632] ; 8-byte Folded Reload
adcs xzr, x11, x27
mrs x11, NZCV
str x11, [sp, #168] ; 8-byte Folded Spill
adcs x2, x5, x15
str x2, [sp, #448] ; 8-byte Folded Spill
ldr x27, [sp, #528] ; 8-byte Folded Reload
cmp x27, x0
cset w11, lo
ldr x24, [sp, #536] ; 8-byte Folded Reload
ldp x12, x14, [sp, #384] ; 16-byte Folded Reload
cmp x24, x12
cset w12, lo
ldr x20, [sp, #544] ; 8-byte Folded Reload
cmp x20, x14
cset w14, lo
ldr x19, [sp, #608] ; 8-byte Folded Reload
ldr x15, [sp, #440] ; 8-byte Folded Reload
cmp x19, x15
cset w15, lo
cmp x10, x8
ldr x0, [sp, #1104] ; 8-byte Folded Reload
mul x8, x0, x9
cset w16, lo
adds x3, x10, x8
str x3, [sp, #632] ; 8-byte Folded Spill
umulh x8, x0, x9
str x8, [sp, #376] ; 8-byte Folded Spill
adcs x8, x16, x8
str x8, [sp, #440] ; 8-byte Folded Spill
ldr x9, [sp, #1112] ; 8-byte Folded Reload
ldr x10, [sp, #1048] ; 8-byte Folded Reload
mul x8, x9, x10
adds x19, x19, x8
str x19, [sp, #608] ; 8-byte Folded Spill
umulh x8, x9, x10
str x8, [sp, #360] ; 8-byte Folded Spill
adcs x8, x15, x8
str x8, [sp, #392] ; 8-byte Folded Spill
ldr x9, [sp, #1144] ; 8-byte Folded Reload
ldr x10, [sp, #968] ; 8-byte Folded Reload
mul x8, x9, x10
adds x20, x20, x8
str x20, [sp, #456] ; 8-byte Folded Spill
umulh x8, x9, x10
str x8, [sp, #320] ; 8-byte Folded Spill
adcs x8, x14, x8
str x8, [sp, #384] ; 8-byte Folded Spill
ldr x10, [sp, #1120] ; 8-byte Folded Reload
ldr x9, [sp, #1072] ; 8-byte Folded Reload
mul x8, x10, x9
adds x0, x24, x8
str x0, [sp, #232] ; 8-byte Folded Spill
umulh x8, x10, x9
str x8, [sp, #216] ; 8-byte Folded Spill
adcs x8, x12, x8
str x8, [sp, #160] ; 8-byte Folded Spill
ldr x9, [sp, #1064] ; 8-byte Folded Reload
ldr x10, [sp, #1136] ; 8-byte Folded Reload
mul x8, x10, x9
adds x12, x27, x8
str x12, [sp, #176] ; 8-byte Folded Spill
umulh x8, x10, x9
str x8, [sp, #544] ; 8-byte Folded Spill
adcs x8, x11, x8
str x8, [sp, #528] ; 8-byte Folded Spill
ldr x8, [sp, #520] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #696] ; 8-byte Folded Reload
ldr x9, [sp, #672] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #144] ; 8-byte Folded Spill
adcs x10, x12, x2
str x10, [sp, #520] ; 8-byte Folded Spill
ldr x15, [sp, #928] ; 8-byte Folded Reload
ldr x9, [sp, #368] ; 8-byte Folded Reload
cmp x9, x15
ldr x16, [sp, #864] ; 8-byte Folded Reload
cinc x14, x16, lo
ldr x8, [sp, #512] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #664] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #512] ; 8-byte Folded Spill
adcs x10, x14, x10
str x10, [sp, #696] ; 8-byte Folded Spill
ldr x8, [sp, #504] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #688] ; 8-byte Folded Reload
ldr x9, [sp, #600] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #600] ; 8-byte Folded Spill
adcs x9, x0, x10
str x9, [sp, #688] ; 8-byte Folded Spill
ldr x24, [sp, #904] ; 8-byte Folded Reload
cmp x30, x24
ldr x2, [sp, #784] ; 8-byte Folded Reload
cinc x0, x2, lo
ldr x8, [sp, #496] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #656] ; 8-byte Folded Reload
adcs xzr, x8, x30
mrs x8, NZCV
str x8, [sp, #496] ; 8-byte Folded Spill
adcs x10, x0, x9
str x10, [sp, #336] ; 8-byte Folded Spill
ldr x8, [sp, #488] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #720] ; 8-byte Folded Reload
ldr x9, [sp, #592] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #304] ; 8-byte Folded Spill
adcs x9, x20, x10
str x9, [sp, #352] ; 8-byte Folded Spill
ldr x8, [sp, #920] ; 8-byte Folded Reload
cmp x17, x8
ldr x8, [sp, #880] ; 8-byte Folded Reload
cinc x10, x8, lo
str x10, [sp, #328] ; 8-byte Folded Spill
ldr x8, [sp, #480] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #648] ; 8-byte Folded Reload
adcs xzr, x8, x17
mrs x8, NZCV
str x8, [sp, #296] ; 8-byte Folded Spill
adcs x10, x10, x9
str x10, [sp, #312] ; 8-byte Folded Spill
ldr x8, [sp, #472] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #704] ; 8-byte Folded Reload
ldr x9, [sp, #584] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #240] ; 8-byte Folded Spill
adcs x9, x19, x10
str x9, [sp, #344] ; 8-byte Folded Spill
ldr x8, [sp, #976] ; 8-byte Folded Reload
ldr x12, [sp, #944] ; 8-byte Folded Reload
cmp x12, x8
ldr x10, [sp, #816] ; 8-byte Folded Reload
add x8, x8, x10
str x8, [sp, #872] ; 8-byte Folded Spill
cinc x20, x8, lo
ldr x8, [sp, #576] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #624] ; 8-byte Folded Reload
adcs xzr, x8, x12
mrs x8, NZCV
str x8, [sp, #208] ; 8-byte Folded Spill
adcs x10, x20, x9
str x10, [sp, #288] ; 8-byte Folded Spill
ldr x8, [sp, #568] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #992] ; 8-byte Folded Reload
ldr x9, [sp, #616] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #200] ; 8-byte Folded Spill
adcs x10, x3, x10
str x10, [sp, #280] ; 8-byte Folded Spill
ldr x8, [sp, #416] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #464] ; 8-byte Folded Reload
ldr x9, [sp, #432] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #264] ; 8-byte Folded Spill
ldr x8, [sp, #776] ; 8-byte Folded Reload
adcs x10, x8, x10
str x10, [sp, #272] ; 8-byte Folded Spill
ldp x8, x9, [sp, #400] ; 16-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #560] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #248] ; 8-byte Folded Spill
ldr x8, [sp, #792] ; 8-byte Folded Reload
adcs x11, x8, x10
str x11, [sp, #408] ; 8-byte Folded Spill
cmp x21, x7
ldr x9, [sp, #1088] ; 8-byte Folded Reload
mov x30, x26
umulh x12, x9, x26
cinc x10, x12, lo
str x10, [sp, #728] ; 8-byte Folded Spill
ldr x8, [sp, #984] ; 8-byte Folded Reload
ldr x8, [x8, #32]
str x8, [sp, #992] ; 8-byte Folded Spill
ldr x26, [sp, #1040] ; 8-byte Folded Reload
mul x12, x26, x8
ldr x19, [sp, #1032] ; 8-byte Folded Reload
umulh x17, x19, x8
adds x17, x12, x17
mov w12, #-2
mul x3, x25, x12
umulh x7, x25, x12
mul x12, x9, x8
umulh x25, x26, x8
adcs x26, x12, x25
mul x27, x19, x8
cmn x13, x27
str x30, [sp, #960] ; 8-byte Folded Spill
mul x9, x1, x30
str x9, [sp, #720] ; 8-byte Folded Spill
ldr x8, [sp, #1096] ; 8-byte Folded Reload
mul x19, x8, x30
str x19, [sp, #672] ; 8-byte Folded Spill
umulh x25, x8, x30
str x25, [sp, #664] ; 8-byte Folded Spill
ldr x30, [sp, #224] ; 8-byte Folded Reload
adcs x8, x17, x30
str x8, [sp, #480] ; 8-byte Folded Spill
adds x19, x10, x19
str x19, [sp, #192] ; 8-byte Folded Spill
adcs x8, x9, x25
str x8, [sp, #768] ; 8-byte Folded Spill
ldr x25, [sp, #840] ; 8-byte Folded Reload
adds x10, x3, x25
str x10, [sp, #504] ; 8-byte Folded Spill
ldr x8, [sp, #1056] ; 8-byte Folded Reload
adcs x9, x8, x7
str x9, [sp, #760] ; 8-byte Folded Spill
ldr x9, [sp, #552] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #680] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #152] ; 8-byte Folded Spill
adcs x8, x10, x11
str x8, [sp, #472] ; 8-byte Folded Spill
msr NZCV, x22
ldr x9, [sp, #184] ; 8-byte Folded Reload
adcs xzr, x9, x21
mrs x9, NZCV
str x9, [sp, #136] ; 8-byte Folded Spill
adcs x8, x19, x8
str x8, [sp, #624] ; 8-byte Folded Spill
msr NZCV, x23
ldr x25, [sp, #952] ; 8-byte Folded Reload
adcs x8, x25, x8
str x8, [sp, #184] ; 8-byte Folded Spill
adds x23, x13, x27
lsl x22, x23, #32
sub x9, x22, x23
str x9, [sp, #944] ; 8-byte Folded Spill
adcs xzr, x30, x17
mrs x19, NZCV
adcs x3, x26, x8
ldr x8, [sp, #752] ; 8-byte Folded Reload
ldr x9, [sp, #744] ; 8-byte Folded Reload
cmn x8, x9
ldr x8, [sp, #912] ; 8-byte Folded Reload
ldr x9, [sp, #736] ; 8-byte Folded Reload
adcs xzr, x9, x8
mov x9, x1
ldr x21, [sp, #1080] ; 8-byte Folded Reload
umulh x7, x1, x21
adcs x27, x7, xzr
ldr x10, [sp, #424] ; 8-byte Folded Reload
cmp x10, x4
ldr x8, [sp, #936] ; 8-byte Folded Reload
ldur x8, [x8, #44]
str x8, [sp, #912] ; 8-byte Folded Spill
ldr x13, [sp, #1024] ; 8-byte Folded Reload
mul x17, x8, x13
cset w4, lo
adds x10, x10, x17
str x10, [sp, #576] ; 8-byte Folded Spill
umulh x17, x8, x13
adcs x8, x4, x17
str x8, [sp, #736] ; 8-byte Folded Spill
ldr x8, [sp, #1016] ; 8-byte Folded Reload
cmp x5, x8
ldr x8, [sp, #896] ; 8-byte Folded Reload
cinc x4, x8, lo
ldr x8, [sp, #168] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #256] ; 8-byte Folded Reload
adcs xzr, x8, x5
mrs x8, NZCV
str x8, [sp, #536] ; 8-byte Folded Spill
adcs x13, x4, x10
str x13, [sp, #560] ; 8-byte Folded Spill
ldr x8, [sp, #544] ; 8-byte Folded Reload
ldr x11, [sp, #528] ; 8-byte Folded Reload
cmp x11, x8
str x28, [sp, #1128] ; 8-byte Folded Spill
ldr x10, [sp, #1064] ; 8-byte Folded Reload
mul x17, x28, x10
cset w1, lo
adds x17, x11, x17
str x17, [sp, #552] ; 8-byte Folded Spill
umulh x10, x28, x10
str x10, [sp, #656] ; 8-byte Folded Spill
adcs x8, x1, x10
str x8, [sp, #616] ; 8-byte Folded Spill
ldr x8, [sp, #144] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #448] ; 8-byte Folded Reload
ldr x10, [sp, #176] ; 8-byte Folded Reload
adcs xzr, x8, x10
mrs x8, NZCV
str x8, [sp, #464] ; 8-byte Folded Spill
adcs x8, x17, x13
str x8, [sp, #544] ; 8-byte Folded Spill
cmp x14, x15
cinc x5, x16, lo
ldr x10, [sp, #512] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #520] ; 8-byte Folded Reload
adcs xzr, x10, x14
mrs x10, NZCV
str x10, [sp, #448] ; 8-byte Folded Spill
adcs x13, x5, x8
str x13, [sp, #568] ; 8-byte Folded Spill
ldr x8, [sp, #216] ; 8-byte Folded Reload
ldr x15, [sp, #160] ; 8-byte Folded Reload
cmp x15, x8
ldr x10, [sp, #1072] ; 8-byte Folded Reload
ldr x11, [sp, #1136] ; 8-byte Folded Reload
mul x8, x11, x10
cset w14, lo
adds x8, x15, x8
str x8, [sp, #520] ; 8-byte Folded Spill
umulh x10, x11, x10
str x10, [sp, #368] ; 8-byte Folded Spill
adcs x10, x14, x10
str x10, [sp, #432] ; 8-byte Folded Spill
ldr x10, [sp, #600] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #696] ; 8-byte Folded Reload
ldr x11, [sp, #232] ; 8-byte Folded Reload
adcs xzr, x10, x11
mrs x10, NZCV
str x10, [sp, #424] ; 8-byte Folded Spill
adcs x10, x8, x13
str x10, [sp, #512] ; 8-byte Folded Spill
cmp x0, x24
cinc x8, x2, lo
str x8, [sp, #648] ; 8-byte Folded Spill
ldr x11, [sp, #496] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #688] ; 8-byte Folded Reload
adcs xzr, x11, x0
mrs x11, NZCV
str x11, [sp, #416] ; 8-byte Folded Spill
adcs x10, x8, x10
str x10, [sp, #496] ; 8-byte Folded Spill
ldp x30, x28, [sp, #384] ; 16-byte Folded Reload
ldr x8, [sp, #320] ; 8-byte Folded Reload
cmp x30, x8
cset w14, lo
ldr x8, [sp, #360] ; 8-byte Folded Reload
cmp x28, x8
cset w8, lo
ldr x24, [sp, #440] ; 8-byte Folded Reload
ldr x11, [sp, #376] ; 8-byte Folded Reload
cmp x24, x11
cset w15, lo
cmp x27, x7
cset w16, lo
cmp x26, x12
mov w11, #-2
mul x12, x6, x11
umulh x11, x6, x11
ldr x17, [sp, #992] ; 8-byte Folded Reload
ldr x13, [sp, #1088] ; 8-byte Folded Reload
umulh x0, x13, x17
cinc x1, x0, lo
str x1, [sp, #696] ; 8-byte Folded Spill
mul x0, x9, x17
str x0, [sp, #704] ; 8-byte Folded Spill
mov x7, x9
ldr x9, [sp, #1096] ; 8-byte Folded Reload
mul x2, x9, x17
str x2, [sp, #688] ; 8-byte Folded Spill
umulh x9, x9, x17
str x9, [sp, #680] ; 8-byte Folded Spill
adds x1, x1, x2
str x1, [sp, #600] ; 8-byte Folded Spill
adcs x9, x0, x9
str x9, [sp, #752] ; 8-byte Folded Spill
ldr x0, [sp, #832] ; 8-byte Folded Reload
adds x2, x12, x0
str x2, [sp, #592] ; 8-byte Folded Spill
adcs x9, x25, x11
str x9, [sp, #744] ; 8-byte Folded Spill
ldr x11, [sp, #1104] ; 8-byte Folded Reload
mov x12, x21
mul x9, x11, x21
adds x21, x27, x9
str x21, [sp, #584] ; 8-byte Folded Spill
umulh x9, x11, x12
str x9, [sp, #232] ; 8-byte Folded Spill
adcs x9, x16, x9
str x9, [sp, #224] ; 8-byte Folded Spill
ldr x11, [sp, #1000] ; 8-byte Folded Reload
ldr x12, [sp, #1112] ; 8-byte Folded Reload
mul x9, x12, x11
adds x16, x24, x9
str x16, [sp, #488] ; 8-byte Folded Spill
umulh x9, x12, x11
str x9, [sp, #528] ; 8-byte Folded Spill
adcs x9, x15, x9
str x9, [sp, #256] ; 8-byte Folded Spill
ldr x12, [sp, #1048] ; 8-byte Folded Reload
ldr x11, [sp, #1144] ; 8-byte Folded Reload
mul x9, x11, x12
adds x15, x28, x9
str x15, [sp, #400] ; 8-byte Folded Spill
umulh x0, x11, x12
adcs x8, x8, x0
str x8, [sp, #216] ; 8-byte Folded Spill
ldr x9, [sp, #968] ; 8-byte Folded Reload
ldr x11, [sp, #1120] ; 8-byte Folded Reload
mul x8, x11, x9
adds x12, x30, x8
str x12, [sp, #384] ; 8-byte Folded Spill
umulh x8, x11, x9
adcs x9, x14, x8
str x9, [sp, #144] ; 8-byte Folded Spill
ldr x9, [sp, #304] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #456] ; 8-byte Folded Reload
ldr x11, [sp, #336] ; 8-byte Folded Reload
adcs xzr, x11, x9
mrs x9, NZCV
str x9, [sp, #336] ; 8-byte Folded Spill
adcs x10, x12, x10
str x10, [sp, #360] ; 8-byte Folded Spill
ldr x30, [sp, #920] ; 8-byte Folded Reload
ldr x12, [sp, #328] ; 8-byte Folded Reload
cmp x12, x30
ldr x9, [sp, #880] ; 8-byte Folded Reload
cinc x11, x9, lo
str x11, [sp, #112] ; 8-byte Folded Spill
ldr x9, [sp, #296] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #352] ; 8-byte Folded Reload
adcs xzr, x9, x12
mrs x9, NZCV
adcs x10, x11, x10
stp x10, x9, [sp, #320] ; 16-byte Folded Spill
ldr x9, [sp, #240] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #608] ; 8-byte Folded Reload
ldr x11, [sp, #312] ; 8-byte Folded Reload
adcs xzr, x11, x9
mrs x9, NZCV
str x9, [sp, #312] ; 8-byte Folded Spill
adcs x10, x15, x10
str x10, [sp, #352] ; 8-byte Folded Spill
ldr x9, [sp, #976] ; 8-byte Folded Reload
cmp x20, x9
ldr x9, [sp, #872] ; 8-byte Folded Reload
cinc x9, x9, lo
str x9, [sp, #440] ; 8-byte Folded Spill
ldr x11, [sp, #208] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #344] ; 8-byte Folded Reload
adcs xzr, x11, x20
mrs x11, NZCV
adcs x10, x9, x10
stp x11, x10, [sp, #296] ; 16-byte Folded Spill
ldr x9, [sp, #200] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #632] ; 8-byte Folded Reload
ldr x11, [sp, #288] ; 8-byte Folded Reload
adcs xzr, x11, x9
mrs x9, NZCV
str x9, [sp, #288] ; 8-byte Folded Spill
adcs x12, x16, x10
str x12, [sp, #344] ; 8-byte Folded Spill
ldr x9, [sp, #1008] ; 8-byte Folded Reload
ldr x11, [sp, #776] ; 8-byte Folded Reload
cmp x11, x9
ldr x10, [sp, #848] ; 8-byte Folded Reload
add x10, x9, x10
str x10, [sp, #896] ; 8-byte Folded Spill
cinc x10, x10, lo
str x10, [sp, #104] ; 8-byte Folded Spill
ldr x9, [sp, #264] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #280] ; 8-byte Folded Reload
adcs xzr, x9, x11
mrs x9, NZCV
str x9, [sp, #168] ; 8-byte Folded Spill
adcs x10, x10, x12
str x10, [sp, #392] ; 8-byte Folded Spill
ldr x9, [sp, #248] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #792] ; 8-byte Folded Reload
ldr x11, [sp, #272] ; 8-byte Folded Reload
adcs xzr, x11, x9
mrs x9, NZCV
str x9, [sp, #160] ; 8-byte Folded Spill
adcs x10, x21, x10
str x10, [sp, #280] ; 8-byte Folded Spill
ldr x9, [sp, #152] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #504] ; 8-byte Folded Reload
ldr x11, [sp, #408] ; 8-byte Folded Reload
adcs xzr, x11, x9
mrs x9, NZCV
str x9, [sp, #208] ; 8-byte Folded Spill
ldr x9, [sp, #760] ; 8-byte Folded Reload
adcs x10, x9, x10
str x10, [sp, #272] ; 8-byte Folded Spill
ldr x9, [sp, #136] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #472] ; 8-byte Folded Reload
ldr x11, [sp, #192] ; 8-byte Folded Reload
adcs xzr, x9, x11
mrs x9, NZCV
str x9, [sp, #200] ; 8-byte Folded Spill
ldr x9, [sp, #768] ; 8-byte Folded Reload
adcs x10, x9, x10
ldr x9, [sp, #640] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #624] ; 8-byte Folded Reload
adcs xzr, x9, x25
mrs x9, NZCV
str x9, [sp, #192] ; 8-byte Folded Spill
adcs x9, x2, x10
stp x9, x10, [sp, #240] ; 16-byte Folded Spill
msr NZCV, x19
ldr x10, [sp, #184] ; 8-byte Folded Reload
adcs xzr, x10, x26
mrs x10, NZCV
str x10, [sp, #184] ; 8-byte Folded Spill
adcs x9, x1, x9
cmp x23, x22
mov w10, #-1
umulh x27, x23, x10
cset w11, hi
ldr x10, [sp, #480] ; 8-byte Folded Reload
adds x15, x10, x11
adds x2, x15, x27
str x27, [sp, #776] ; 8-byte Folded Spill
cset w15, hs
cmn x10, x11
adcs x11, x3, x15
mrs x15, NZCV
ldr x25, [sp, #944] ; 8-byte Folded Reload
adcs x17, x25, x9
str x17, [sp, #176] ; 8-byte Folded Spill
msr NZCV, x15
adcs xzr, x9, x25
mrs x9, NZCV
str x9, [sp, #152] ; 8-byte Folded Spill
str x9, [sp, #120] ; 8-byte Folded Spill
ldr x9, [sp, #984] ; 8-byte Folded Reload
ldur x14, [x9, #36]
str x14, [sp, #936] ; 8-byte Folded Spill
ldr x12, [sp, #1040] ; 8-byte Folded Reload
mul x9, x12, x14
ldr x10, [sp, #1032] ; 8-byte Folded Reload
umulh x15, x10, x14
adds x15, x9, x15
mul x13, x13, x14
str x13, [sp, #128] ; 8-byte Folded Spill
umulh x9, x12, x14
adcs x28, x13, x9
mul x3, x10, x14
cmn x2, x3
adcs x9, x15, x11
str x9, [sp, #264] ; 8-byte Folded Spill
adds x9, x2, x3
str x9, [sp, #792] ; 8-byte Folded Spill
lsl x21, x9, #32
sub x26, x21, x9
str x26, [sp, #1024] ; 8-byte Folded Spill
adcs xzr, x11, x15
mrs x9, NZCV
str x9, [sp, #96] ; 8-byte Folded Spill
adcs x9, x28, x17
str x9, [sp, #88] ; 8-byte Folded Spill
ldr x9, [sp, #728] ; 8-byte Folded Reload
ldr x11, [sp, #672] ; 8-byte Folded Reload
cmn x9, x11
ldr x9, [sp, #720] ; 8-byte Folded Reload
ldr x11, [sp, #664] ; 8-byte Folded Reload
adcs xzr, x11, x9
ldr x12, [sp, #960] ; 8-byte Folded Reload
umulh x2, x7, x12
adcs x3, x2, xzr
ldr x11, [sp, #1016] ; 8-byte Folded Reload
cmp x4, x11
ldr x11, [sp, #856] ; 8-byte Folded Reload
cinc x9, x11, lo
str x9, [sp, #504] ; 8-byte Folded Spill
ldr x10, [sp, #536] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #576] ; 8-byte Folded Reload
adcs xzr, x10, x4
mrs x10, NZCV
ldr x6, [sp, #912] ; 8-byte Folded Reload
ldr x24, [sp, #1064] ; 8-byte Folded Reload
mul x19, x6, x24
ldr x4, [sp, #736] ; 8-byte Folded Reload
adcs x9, x9, x4
stp x10, x9, [sp, #472] ; 16-byte Folded Spill
ldr x22, [sp, #616] ; 8-byte Folded Reload
adds x4, x22, x19
ldr x10, [sp, #464] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #560] ; 8-byte Folded Reload
ldr x11, [sp, #552] ; 8-byte Folded Reload
adcs xzr, x10, x11
mrs x10, NZCV
str x10, [sp, #408] ; 8-byte Folded Spill
adcs x10, x4, x9
ldr x4, [sp, #928] ; 8-byte Folded Reload
cmp x5, x4
ldr x9, [sp, #864] ; 8-byte Folded Reload
cinc x9, x9, lo
str x9, [sp, #640] ; 8-byte Folded Spill
ldr x11, [sp, #448] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #544] ; 8-byte Folded Reload
adcs xzr, x11, x5
mrs x11, NZCV
str x11, [sp, #376] ; 8-byte Folded Spill
adcs x1, x9, x10
stp x1, x10, [sp, #456] ; 16-byte Folded Spill
ldr x17, [sp, #432] ; 8-byte Folded Reload
ldr x9, [sp, #368] ; 8-byte Folded Reload
cmp x17, x9
cset w4, lo
ldr x16, [sp, #144] ; 8-byte Folded Reload
cmp x16, x8
cset w15, lo
ldp x14, x8, [sp, #216] ; 16-byte Folded Reload
cmp x14, x0
cset w5, lo
ldr x9, [sp, #528] ; 8-byte Folded Reload
ldr x11, [sp, #256] ; 8-byte Folded Reload
cmp x11, x9
cset w0, lo
ldr x9, [sp, #232] ; 8-byte Folded Reload
cmp x8, x9
cset w7, lo
cmp x3, x2
mov w20, #-2
mul x2, x23, x20
umulh x13, x23, x20
cset w20, lo
adds x2, x2, x27
adcs x13, x25, x13
str x13, [sp, #720] ; 8-byte Folded Spill
ldr x10, [sp, #1104] ; 8-byte Folded Reload
mul x13, x10, x12
adds x25, x3, x13
str x25, [sp, #624] ; 8-byte Folded Spill
umulh x9, x10, x12
str x9, [sp, #448] ; 8-byte Folded Spill
adcs x9, x20, x9
str x9, [sp, #560] ; 8-byte Folded Spill
ldr x9, [sp, #1080] ; 8-byte Folded Reload
ldr x12, [sp, #1112] ; 8-byte Folded Reload
mul x13, x12, x9
adds x10, x8, x13
str x10, [sp, #664] ; 8-byte Folded Spill
umulh x9, x12, x9
str x9, [sp, #368] ; 8-byte Folded Spill
adcs x9, x7, x9
str x9, [sp, #552] ; 8-byte Folded Spill
ldr x9, [sp, #1000] ; 8-byte Folded Reload
ldr x12, [sp, #1144] ; 8-byte Folded Reload
mul x13, x12, x9
adds x20, x11, x13
str x20, [sp, #632] ; 8-byte Folded Spill
umulh x9, x12, x9
str x9, [sp, #256] ; 8-byte Folded Spill
adcs x9, x0, x9
str x9, [sp, #544] ; 8-byte Folded Spill
ldr x12, [sp, #1048] ; 8-byte Folded Reload
ldr x0, [sp, #1120] ; 8-byte Folded Reload
mul x13, x0, x12
adds x9, x14, x13
str x9, [sp, #608] ; 8-byte Folded Spill
umulh x12, x0, x12
str x12, [sp, #232] ; 8-byte Folded Spill
adcs x12, x5, x12
str x12, [sp, #536] ; 8-byte Folded Spill
ldr x0, [sp, #1136] ; 8-byte Folded Reload
ldr x12, [sp, #968] ; 8-byte Folded Reload
mul x13, x0, x12
adds x23, x16, x13
str x23, [sp, #576] ; 8-byte Folded Spill
umulh x12, x0, x12
str x12, [sp, #224] ; 8-byte Folded Spill
adcs x8, x15, x12
str x8, [sp, #528] ; 8-byte Folded Spill
ldr x13, [sp, #1128] ; 8-byte Folded Reload
ldr x12, [sp, #1072] ; 8-byte Folded Reload
mul x8, x13, x12
adds x5, x17, x8
umulh x8, x13, x12
str x8, [sp, #672] ; 8-byte Folded Spill
adcs x27, x4, x8
ldp x11, x8, [sp, #416] ; 16-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #568] ; 8-byte Folded Reload
ldr x12, [sp, #520] ; 8-byte Folded Reload
adcs xzr, x8, x12
mrs x8, NZCV
str x8, [sp, #144] ; 8-byte Folded Spill
adcs x13, x5, x1
str x13, [sp, #216] ; 8-byte Folded Spill
ldr x7, [sp, #904] ; 8-byte Folded Reload
ldr x8, [sp, #648] ; 8-byte Folded Reload
cmp x8, x7
ldr x4, [sp, #784] ; 8-byte Folded Reload
cinc x3, x4, lo
msr NZCV, x11
ldr x12, [sp, #512] ; 8-byte Folded Reload
adcs xzr, x12, x8
mrs x8, NZCV
str x8, [sp, #136] ; 8-byte Folded Spill
adcs x11, x3, x13
str x11, [sp, #424] ; 8-byte Folded Spill
ldr x8, [sp, #336] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #496] ; 8-byte Folded Reload
ldr x12, [sp, #384] ; 8-byte Folded Reload
adcs xzr, x8, x12
mrs x8, NZCV
str x8, [sp, #336] ; 8-byte Folded Spill
adcs x11, x23, x11
str x11, [sp, #520] ; 8-byte Folded Spill
ldr x13, [sp, #112] ; 8-byte Folded Reload
cmp x13, x30
ldr x8, [sp, #880] ; 8-byte Folded Reload
cinc x8, x8, lo
str x8, [sp, #568] ; 8-byte Folded Spill
ldr x12, [sp, #328] ; 8-byte Folded Reload
msr NZCV, x12
ldr x12, [sp, #360] ; 8-byte Folded Reload
adcs xzr, x12, x13
mrs x12, NZCV
str x12, [sp, #328] ; 8-byte Folded Spill
adcs x8, x8, x11
str x8, [sp, #416] ; 8-byte Folded Spill
ldp x11, x12, [sp, #312] ; 16-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #400] ; 8-byte Folded Reload
adcs xzr, x12, x11
mrs x11, NZCV
str x11, [sp, #320] ; 8-byte Folded Spill
adcs x11, x9, x8
str x11, [sp, #512] ; 8-byte Folded Spill
ldr x8, [sp, #976] ; 8-byte Folded Reload
ldr x9, [sp, #440] ; 8-byte Folded Reload
cmp x9, x8
ldr x8, [sp, #872] ; 8-byte Folded Reload
cinc x8, x8, lo
str x8, [sp, #648] ; 8-byte Folded Spill
ldr x12, [sp, #296] ; 8-byte Folded Reload
msr NZCV, x12
ldr x12, [sp, #352] ; 8-byte Folded Reload
adcs xzr, x12, x9
mrs x9, NZCV
str x9, [sp, #312] ; 8-byte Folded Spill
adcs x9, x8, x11
str x9, [sp, #400] ; 8-byte Folded Spill
ldr x8, [sp, #288] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #488] ; 8-byte Folded Reload
ldr x11, [sp, #304] ; 8-byte Folded Reload
adcs xzr, x11, x8
mrs x8, NZCV
str x8, [sp, #304] ; 8-byte Folded Spill
adcs x9, x20, x9
str x9, [sp, #496] ; 8-byte Folded Spill
ldr x8, [sp, #1008] ; 8-byte Folded Reload
ldr x11, [sp, #104] ; 8-byte Folded Reload
cmp x11, x8
ldr x8, [sp, #896] ; 8-byte Folded Reload
cinc x12, x8, lo
str x12, [sp, #112] ; 8-byte Folded Spill
ldr x8, [sp, #168] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #344] ; 8-byte Folded Reload
adcs xzr, x8, x11
mrs x8, NZCV
str x8, [sp, #296] ; 8-byte Folded Spill
adcs x9, x12, x9
str x9, [sp, #344] ; 8-byte Folded Spill
ldr x8, [sp, #160] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #584] ; 8-byte Folded Reload
ldr x11, [sp, #392] ; 8-byte Folded Reload
adcs xzr, x11, x8
mrs x8, NZCV
str x8, [sp, #288] ; 8-byte Folded Spill
adcs x9, x10, x9
str x9, [sp, #584] ; 8-byte Folded Spill
ldr x8, [sp, #1056] ; 8-byte Folded Reload
ldr x13, [sp, #760] ; 8-byte Folded Reload
cmp x13, x8
ldr x12, [sp, #840] ; 8-byte Folded Reload
add x8, x8, x12
str x8, [sp, #856] ; 8-byte Folded Spill
cinc x8, x8, lo
ldr x10, [sp, #208] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #280] ; 8-byte Folded Reload
adcs xzr, x10, x13
mrs x10, NZCV
str x10, [sp, #280] ; 8-byte Folded Spill
adcs x9, x8, x9
stp x9, x8, [sp, #384] ; 16-byte Folded Spill
ldr x8, [sp, #200] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #768] ; 8-byte Folded Reload
ldr x10, [sp, #272] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #272] ; 8-byte Folded Spill
adcs x9, x25, x9
str x9, [sp, #488] ; 8-byte Folded Spill
ldr x8, [sp, #192] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #592] ; 8-byte Folded Reload
ldr x10, [sp, #248] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #360] ; 8-byte Folded Spill
ldr x8, [sp, #744] ; 8-byte Folded Reload
adcs x9, x8, x9
str x9, [sp, #592] ; 8-byte Folded Spill
ldr x8, [sp, #184] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #600] ; 8-byte Folded Reload
ldr x10, [sp, #240] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #352] ; 8-byte Folded Spill
ldr x8, [sp, #752] ; 8-byte Folded Reload
adcs x8, x8, x9
ldr x9, [sp, #120] ; 8-byte Folded Reload
msr NZCV, x9
adcs x10, x2, x8
str x10, [sp, #600] ; 8-byte Folded Spill
ldr x9, [sp, #656] ; 8-byte Folded Reload
cmp x22, x9
cset w12, lo
adds x13, x22, x19
umulh x11, x6, x24
adcs x1, x12, x11
str x1, [sp, #80] ; 8-byte Folded Spill
ldr x9, [sp, #152] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x8, x2
mrs x8, NZCV
str x8, [sp, #616] ; 8-byte Folded Spill
str x8, [sp, #248] ; 8-byte Folded Spill
ldr x8, [sp, #128] ; 8-byte Folded Reload
cmp x28, x8
ldr x9, [sp, #936] ; 8-byte Folded Reload
ldr x16, [sp, #1088] ; 8-byte Folded Reload
umulh x8, x16, x9
cinc x8, x8, lo
str x8, [sp, #768] ; 8-byte Folded Spill
ldr x11, [sp, #1096] ; 8-byte Folded Reload
mul x11, x11, x9
str x11, [sp, #760] ; 8-byte Folded Spill
adds x8, x8, x11
ldr x9, [sp, #96] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #176] ; 8-byte Folded Reload
adcs xzr, x9, x28
mrs x9, NZCV
str x9, [sp, #440] ; 8-byte Folded Spill
adcs x0, x8, x10
str x0, [sp, #656] ; 8-byte Folded Spill
ldr x14, [sp, #792] ; 8-byte Folded Reload
cmp x14, x21
mov w8, #-1
umulh x11, x14, x8
str x11, [sp, #864] ; 8-byte Folded Spill
cset w8, hi
ldr x10, [sp, #264] ; 8-byte Folded Reload
adds x9, x10, x8
adds x11, x9, x11
cset w9, hs
cmn x10, x8
ldr x8, [sp, #88] ; 8-byte Folded Reload
adcs x8, x8, x9
mrs x9, NZCV
str x9, [sp, #264] ; 8-byte Folded Spill
ldr x9, [sp, #984] ; 8-byte Folded Reload
ldr x14, [x9, #40]
str x14, [sp, #1064] ; 8-byte Folded Spill
ldr x10, [sp, #1040] ; 8-byte Folded Reload
mul x9, x10, x14
ldr x15, [sp, #1032] ; 8-byte Folded Reload
umulh x12, x15, x14
adcs x0, x26, x0
adds x9, x9, x12
mul x16, x16, x14
str x16, [sp, #240] ; 8-byte Folded Spill
umulh x12, x10, x14
adcs x16, x16, x12
stp x0, x16, [sp, #200] ; 16-byte Folded Spill
mul x12, x15, x14
cmn x11, x12
adcs x10, x9, x8
str x10, [sp, #432] ; 8-byte Folded Spill
adds x10, x11, x12
str x10, [sp, #728] ; 8-byte Folded Spill
lsl x26, x10, #32
sub x10, x26, x10
str x10, [sp, #1016] ; 8-byte Folded Spill
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #16] ; 8-byte Folded Spill
adcs x8, x16, x0
str x8, [sp, #24] ; 8-byte Folded Spill
ldr x8, [sp, #696] ; 8-byte Folded Reload
ldr x9, [sp, #688] ; 8-byte Folded Reload
cmn x8, x9
ldr x8, [sp, #704] ; 8-byte Folded Reload
ldr x9, [sp, #680] ; 8-byte Folded Reload
adcs xzr, x9, x8
ldr x2, [sp, #888] ; 8-byte Folded Reload
ldr x11, [sp, #992] ; 8-byte Folded Reload
umulh x12, x2, x11
adcs x14, x12, xzr
ldr x8, [sp, #472] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #736] ; 8-byte Folded Reload
ldr x9, [sp, #504] ; 8-byte Folded Reload
adcs xzr, x8, x9
adcs x8, x1, xzr
ldr x9, [sp, #408] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #480] ; 8-byte Folded Reload
adcs xzr, x9, x13
adcs x23, x8, xzr
ldr x8, [sp, #928] ; 8-byte Folded Reload
ldr x9, [sp, #640] ; 8-byte Folded Reload
cmp x9, x8
ldr x8, [sp, #800] ; 8-byte Folded Reload
cinc x10, x8, lo
str x10, [sp, #504] ; 8-byte Folded Spill
ldr x8, [sp, #376] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #464] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #48] ; 8-byte Folded Spill
mov x1, x6
ldr x8, [sp, #1072] ; 8-byte Folded Reload
mul x0, x6, x8
adcs x10, x10, x23
str x10, [sp, #640] ; 8-byte Folded Spill
mov x20, x27
adds x8, x27, x0
ldr x9, [sp, #144] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #456] ; 8-byte Folded Reload
adcs xzr, x9, x5
mrs x9, NZCV
str x9, [sp, #40] ; 8-byte Folded Spill
adcs x9, x8, x10
str x9, [sp, #480] ; 8-byte Folded Spill
cmp x3, x7
cinc x22, x4, lo
ldr x8, [sp, #136] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #216] ; 8-byte Folded Reload
adcs xzr, x8, x3
mrs x8, NZCV
str x8, [sp, #32] ; 8-byte Folded Spill
adcs x25, x22, x9
str x25, [sp, #216] ; 8-byte Folded Spill
ldr x13, [sp, #528] ; 8-byte Folded Reload
ldr x8, [sp, #224] ; 8-byte Folded Reload
cmp x13, x8
cset w5, lo
ldr x10, [sp, #536] ; 8-byte Folded Reload
ldr x8, [sp, #232] ; 8-byte Folded Reload
cmp x10, x8
cset w4, lo
ldr x21, [sp, #544] ; 8-byte Folded Reload
ldr x8, [sp, #256] ; 8-byte Folded Reload
cmp x21, x8
cset w19, lo
ldr x7, [sp, #552] ; 8-byte Folded Reload
ldr x8, [sp, #368] ; 8-byte Folded Reload
cmp x7, x8
cset w15, lo
ldr x3, [sp, #560] ; 8-byte Folded Reload
ldr x8, [sp, #448] ; 8-byte Folded Reload
cmp x3, x8
cset w16, lo
cmp x14, x12
ldr x9, [sp, #1104] ; 8-byte Folded Reload
mul x12, x9, x11
cset w17, lo
adds x6, x14, x12
str x6, [sp, #256] ; 8-byte Folded Spill
umulh x9, x9, x11
str x9, [sp, #408] ; 8-byte Folded Spill
adcs x8, x17, x9
str x8, [sp, #368] ; 8-byte Folded Spill
ldr x8, [sp, #960] ; 8-byte Folded Reload
ldr x9, [sp, #1112] ; 8-byte Folded Reload
mul x12, x9, x8
adds x17, x3, x12
umulh x30, x9, x8
adcs x8, x16, x30
str x8, [sp, #472] ; 8-byte Folded Spill
ldr x8, [sp, #1080] ; 8-byte Folded Reload
ldr x9, [sp, #1144] ; 8-byte Folded Reload
mul x14, x9, x8
adds x11, x7, x14
str x11, [sp, #456] ; 8-byte Folded Spill
umulh x28, x9, x8
adcs x8, x15, x28
str x8, [sp, #128] ; 8-byte Folded Spill
ldr x8, [sp, #1000] ; 8-byte Folded Reload
ldr x9, [sp, #1120] ; 8-byte Folded Reload
mul x14, x9, x8
adds x12, x21, x14
umulh x24, x9, x8
adcs x19, x19, x24
ldr x8, [sp, #1048] ; 8-byte Folded Reload
ldr x9, [sp, #1136] ; 8-byte Folded Reload
mul x14, x9, x8
adds x10, x10, x14
stp x10, x12, [sp, #224] ; 16-byte Folded Spill
umulh x7, x9, x8
adcs x8, x4, x7
str x8, [sp, #192] ; 8-byte Folded Spill
ldr x8, [sp, #1128] ; 8-byte Folded Reload
ldr x9, [sp, #968] ; 8-byte Folded Reload
mul x21, x8, x9
adds x13, x13, x21
str x13, [sp, #184] ; 8-byte Folded Spill
umulh x9, x8, x9
str x9, [sp, #784] ; 8-byte Folded Spill
adcs x8, x5, x9
str x8, [sp, #56] ; 8-byte Folded Spill
ldr x8, [sp, #336] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #576] ; 8-byte Folded Reload
ldr x9, [sp, #424] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #120] ; 8-byte Folded Spill
adcs x13, x13, x25
str x13, [sp, #336] ; 8-byte Folded Spill
ldr x8, [sp, #920] ; 8-byte Folded Reload
ldr x9, [sp, #568] ; 8-byte Folded Reload
cmp x9, x8
ldr x8, [sp, #880] ; 8-byte Folded Reload
cinc x21, x8, lo
ldr x8, [sp, #328] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #520] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #104] ; 8-byte Folded Spill
adcs x13, x21, x13
str x13, [sp, #160] ; 8-byte Folded Spill
ldr x8, [sp, #320] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #608] ; 8-byte Folded Reload
ldr x9, [sp, #416] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #608] ; 8-byte Folded Spill
adcs x10, x10, x13
str x10, [sp, #176] ; 8-byte Folded Spill
ldr x8, [sp, #976] ; 8-byte Folded Reload
ldr x9, [sp, #648] ; 8-byte Folded Reload
cmp x9, x8
ldr x8, [sp, #872] ; 8-byte Folded Reload
cinc x5, x8, lo
ldr x8, [sp, #312] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #512] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #96] ; 8-byte Folded Spill
adcs x10, x5, x10
str x10, [sp, #152] ; 8-byte Folded Spill
ldr x8, [sp, #304] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #632] ; 8-byte Folded Reload
ldr x9, [sp, #400] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #632] ; 8-byte Folded Spill
adcs x10, x12, x10
str x10, [sp, #648] ; 8-byte Folded Spill
ldr x9, [sp, #112] ; 8-byte Folded Reload
ldr x8, [sp, #1008] ; 8-byte Folded Reload
cmp x9, x8
ldr x8, [sp, #896] ; 8-byte Folded Reload
cinc x25, x8, lo
ldr x8, [sp, #296] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #496] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #112] ; 8-byte Folded Spill
adcs x10, x25, x10
str x10, [sp, #144] ; 8-byte Folded Spill
ldr x8, [sp, #288] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #664] ; 8-byte Folded Reload
ldr x9, [sp, #344] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #344] ; 8-byte Folded Spill
adcs x10, x11, x10
str x10, [sp, #168] ; 8-byte Folded Spill
ldr x8, [sp, #1056] ; 8-byte Folded Reload
ldr x9, [sp, #392] ; 8-byte Folded Reload
cmp x9, x8
ldr x8, [sp, #856] ; 8-byte Folded Reload
cinc x27, x8, lo
ldr x8, [sp, #280] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #584] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #88] ; 8-byte Folded Spill
adcs x14, x27, x10
ldr x8, [sp, #272] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #624] ; 8-byte Folded Reload
ldr x9, [sp, #384] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x16, NZCV
adcs x10, x17, x14
str x10, [sp, #136] ; 8-byte Folded Spill
ldr x8, [sp, #952] ; 8-byte Folded Reload
ldr x11, [sp, #744] ; 8-byte Folded Reload
cmp x11, x8
ldr x9, [sp, #832] ; 8-byte Folded Reload
add x9, x8, x9
str x9, [sp, #800] ; 8-byte Folded Spill
cinc x3, x9, lo
ldr x8, [sp, #360] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #488] ; 8-byte Folded Reload
adcs xzr, x8, x11
mrs x8, NZCV
str x8, [sp, #72] ; 8-byte Folded Spill
adcs x10, x3, x10
str x10, [sp, #744] ; 8-byte Folded Spill
ldr x8, [sp, #352] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #752] ; 8-byte Folded Reload
ldr x9, [sp, #592] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #64] ; 8-byte Folded Spill
adcs x9, x6, x10
ldr x8, [sp, #248] ; 8-byte Folded Reload
msr NZCV, x8
ldr x12, [sp, #720] ; 8-byte Folded Reload
adcs x4, x12, x9
str x4, [sp, #568] ; 8-byte Folded Spill
ldr x8, [sp, #672] ; 8-byte Folded Reload
cmp x20, x8
ldr x8, [sp, #1072] ; 8-byte Folded Reload
umulh x8, x1, x8
cset w10, lo
cinc x11, x8, lo
cmn x20, x0
adcs x8, x10, x8
str x8, [sp, #696] ; 8-byte Folded Spill
ldr x8, [sp, #80] ; 8-byte Folded Reload
cmp x23, x8
cset w8, lo
adds x15, x20, x0
adcs x13, x8, x11
ldr x8, [sp, #616] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x9, x12
mrs x8, NZCV
str x8, [sp, #736] ; 8-byte Folded Spill
str x8, [sp, #248] ; 8-byte Folded Spill
ldr x9, [sp, #768] ; 8-byte Folded Reload
ldr x10, [sp, #760] ; 8-byte Folded Reload
adds x9, x9, x10
ldr x8, [sp, #936] ; 8-byte Folded Reload
mul x11, x2, x8
str x11, [sp, #552] ; 8-byte Folded Spill
ldr x10, [sp, #1096] ; 8-byte Folded Reload
umulh x8, x10, x8
str x8, [sp, #544] ; 8-byte Folded Spill
adcs x8, x11, x8
str x8, [sp, #560] ; 8-byte Folded Spill
ldr x11, [sp, #440] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #600] ; 8-byte Folded Reload
adcs xzr, x11, x9
mrs x9, NZCV
str x9, [sp, #704] ; 8-byte Folded Spill
mov w9, #-2
ldr x11, [sp, #792] ; 8-byte Folded Reload
mul x11, x11, x9
str x11, [sp, #512] ; 8-byte Folded Spill
adcs x12, x8, x4
str x12, [sp, #536] ; 8-byte Folded Spill
ldr x9, [sp, #864] ; 8-byte Folded Reload
adds x9, x11, x9
ldr x8, [sp, #264] ; 8-byte Folded Reload
msr NZCV, x8
ldr x11, [sp, #1024] ; 8-byte Folded Reload
ldr x8, [sp, #656] ; 8-byte Folded Reload
adcs xzr, x8, x11
mrs x8, NZCV
str x8, [sp, #688] ; 8-byte Folded Spill
adcs x11, x9, x12
str x11, [sp, #528] ; 8-byte Folded Spill
ldr x12, [sp, #208] ; 8-byte Folded Reload
ldr x8, [sp, #240] ; 8-byte Folded Reload
cmp x12, x8
ldr x4, [sp, #1088] ; 8-byte Folded Reload
ldr x8, [sp, #1064] ; 8-byte Folded Reload
umulh x9, x4, x8
cinc x9, x9, lo
str x9, [sp, #424] ; 8-byte Folded Spill
mul x8, x10, x8
str x8, [sp, #448] ; 8-byte Folded Spill
adds x9, x9, x8
ldr x8, [sp, #16] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #200] ; 8-byte Folded Reload
adcs xzr, x8, x12
mrs x8, NZCV
str x8, [sp, #672] ; 8-byte Folded Spill
adcs x8, x9, x11
str x8, [sp, #520] ; 8-byte Folded Spill
ldr x10, [sp, #728] ; 8-byte Folded Reload
cmp x10, x26
mov w9, #-1
umulh x11, x10, x9
str x11, [sp, #928] ; 8-byte Folded Spill
cset w9, hi
ldr x12, [sp, #432] ; 8-byte Folded Reload
adds x10, x12, x9
adds x1, x10, x11
cset w10, hs
cmn x12, x9
ldr x9, [sp, #24] ; 8-byte Folded Reload
adcs x20, x9, x10
mrs x9, NZCV
str x9, [sp, #680] ; 8-byte Folded Spill
ldr x9, [sp, #984] ; 8-byte Folded Reload
ldur x12, [x9, #44]
str x12, [sp, #1072] ; 8-byte Folded Spill
ldr x0, [sp, #1040] ; 8-byte Folded Reload
mul x9, x0, x12
ldr x6, [sp, #1032] ; 8-byte Folded Reload
umulh x10, x6, x12
ldr x11, [sp, #1016] ; 8-byte Folded Reload
adcs x11, x11, x8
adds x9, x9, x10
stp x9, x20, [sp, #304] ; 16-byte Folded Spill
umulh x10, x0, x12
mul x0, x6, x12
mul x8, x4, x12
str x8, [sp, #464] ; 8-byte Folded Spill
adcs x8, x8, x10
cmn x1, x0
adcs xzr, x20, x9
mrs x9, NZCV
str x8, [sp, #376] ; 8-byte Folded Spill
adcs x4, x8, x11
str x4, [sp, #752] ; 8-byte Folded Spill
msr NZCV, x16
adcs xzr, x14, x17
mrs x12, NZCV
str x12, [sp, #984] ; 8-byte Folded Spill
msr NZCV, x9
adcs xzr, x11, x8
mrs x8, NZCV
str x8, [sp, #664] ; 8-byte Folded Spill
str x8, [sp, #656] ; 8-byte Folded Spill
ldp x9, x8, [sp, #40] ; 16-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #504] ; 8-byte Folded Reload
adcs xzr, x23, x8
adcs x8, x13, xzr
msr NZCV, x9
ldr x9, [sp, #640] ; 8-byte Folded Reload
adcs xzr, x9, x15
adcs x9, x8, xzr
ldr x8, [sp, #904] ; 8-byte Folded Reload
cmp x22, x8
ldr x8, [sp, #808] ; 8-byte Folded Reload
cinc x8, x8, lo
str x8, [sp, #504] ; 8-byte Folded Spill
ldr x10, [sp, #32] ; 8-byte Folded Reload
msr NZCV, x10
ldp x6, x10, [sp, #472] ; 16-byte Folded Reload
adcs xzr, x10, x22
mrs x11, NZCV
str x11, [sp, #808] ; 8-byte Folded Spill
adcs x4, x8, x9
stp x4, x9, [sp, #280] ; 16-byte Folded Spill
ldr x26, [sp, #56] ; 8-byte Folded Reload
ldr x8, [sp, #784] ; 8-byte Folded Reload
cmp x26, x8
cset w8, lo
ldr x22, [sp, #192] ; 8-byte Folded Reload
cmp x22, x7
cset w9, lo
cmp x19, x24
cset w11, lo
ldr x7, [sp, #128] ; 8-byte Folded Reload
cmp x7, x28
cset w13, lo
cmp x6, x30
cset w15, lo
ldr x10, [sp, #408] ; 8-byte Folded Reload
ldr x2, [sp, #368] ; 8-byte Folded Reload
cmp x2, x10
ldr x10, [sp, #992] ; 8-byte Folded Reload
ldr x14, [sp, #1112] ; 8-byte Folded Reload
mul x16, x14, x10
cset w17, lo
adds x2, x2, x16
str x2, [sp, #584] ; 8-byte Folded Spill
umulh x10, x14, x10
str x10, [sp, #488] ; 8-byte Folded Spill
adcs x10, x17, x10
str x10, [sp, #496] ; 8-byte Folded Spill
ldr x10, [sp, #960] ; 8-byte Folded Reload
ldr x14, [sp, #1144] ; 8-byte Folded Reload
mul x16, x14, x10
adds x20, x6, x16
umulh x10, x14, x10
str x10, [sp, #416] ; 8-byte Folded Spill
adcs x10, x15, x10
str x10, [sp, #440] ; 8-byte Folded Spill
ldr x10, [sp, #1080] ; 8-byte Folded Reload
ldr x16, [sp, #1120] ; 8-byte Folded Reload
mul x15, x16, x10
adds x14, x7, x15
str x14, [sp, #472] ; 8-byte Folded Spill
umulh x10, x16, x10
str x10, [sp, #360] ; 8-byte Folded Spill
adcs x10, x13, x10
str x10, [sp, #384] ; 8-byte Folded Spill
ldr x10, [sp, #1000] ; 8-byte Folded Reload
ldr x15, [sp, #1136] ; 8-byte Folded Reload
mul x13, x15, x10
adds x17, x19, x13
umulh x10, x15, x10
stp x10, x17, [sp, #392] ; 16-byte Folded Spill
adcs x10, x11, x10
str x10, [sp, #328] ; 8-byte Folded Spill
ldr x13, [sp, #1048] ; 8-byte Folded Reload
ldr x10, [sp, #1128] ; 8-byte Folded Reload
mul x11, x10, x13
adds x15, x22, x11
str x15, [sp, #264] ; 8-byte Folded Spill
ldr x11, [sp, #912] ; 8-byte Folded Reload
ldr x16, [sp, #968] ; 8-byte Folded Reload
mul x6, x11, x16
str x6, [sp, #208] ; 8-byte Folded Spill
umulh x7, x11, x16
umulh x10, x10, x13
str x10, [sp, #576] ; 8-byte Folded Spill
adcs x9, x9, x10
str x9, [sp, #320] ; 8-byte Folded Spill
adds x9, x26, x6
adcs x8, x8, x7
str x8, [sp, #904] ; 8-byte Folded Spill
ldr x8, [sp, #120] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #216] ; 8-byte Folded Reload
ldr x10, [sp, #184] ; 8-byte Folded Reload
adcs xzr, x8, x10
mrs x8, NZCV
str x8, [sp, #968] ; 8-byte Folded Spill
adcs x10, x9, x4
str x10, [sp, #240] ; 8-byte Folded Spill
ldr x28, [sp, #920] ; 8-byte Folded Reload
cmp x21, x28
ldr x8, [sp, #880] ; 8-byte Folded Reload
cinc x24, x8, lo
ldr x8, [sp, #104] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #336] ; 8-byte Folded Reload
adcs xzr, x8, x21
mrs x9, NZCV
str x9, [sp, #592] ; 8-byte Folded Spill
adcs x8, x24, x10
str x8, [sp, #272] ; 8-byte Folded Spill
ldr x9, [sp, #608] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #224] ; 8-byte Folded Reload
ldr x10, [sp, #160] ; 8-byte Folded Reload
adcs xzr, x10, x9
mrs x10, NZCV
str x10, [sp, #608] ; 8-byte Folded Spill
adcs x8, x15, x8
str x8, [sp, #296] ; 8-byte Folded Spill
ldr x22, [sp, #976] ; 8-byte Folded Reload
cmp x5, x22
ldr x9, [sp, #872] ; 8-byte Folded Reload
cinc x30, x9, lo
ldr x9, [sp, #96] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #176] ; 8-byte Folded Reload
adcs xzr, x9, x5
mrs x11, NZCV
str x11, [sp, #600] ; 8-byte Folded Spill
adcs x8, x30, x8
str x8, [sp, #368] ; 8-byte Folded Spill
ldr x9, [sp, #632] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #232] ; 8-byte Folded Reload
ldr x10, [sp, #152] ; 8-byte Folded Reload
adcs xzr, x10, x9
mrs x11, NZCV
str x11, [sp, #640] ; 8-byte Folded Spill
adcs x9, x17, x8
str x9, [sp, #352] ; 8-byte Folded Spill
ldr x8, [sp, #1008] ; 8-byte Folded Reload
cmp x25, x8
ldr x8, [sp, #896] ; 8-byte Folded Reload
cinc x10, x8, lo
str x10, [sp, #232] ; 8-byte Folded Spill
ldr x8, [sp, #112] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #648] ; 8-byte Folded Reload
adcs xzr, x8, x25
mrs x13, NZCV
str x13, [sp, #632] ; 8-byte Folded Spill
adcs x8, x10, x9
ldr x9, [sp, #344] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #456] ; 8-byte Folded Reload
ldr x10, [sp, #144] ; 8-byte Folded Reload
adcs xzr, x10, x9
mrs x11, NZCV
str x11, [sp, #624] ; 8-byte Folded Spill
adcs x9, x14, x8
stp x8, x9, [sp, #336] ; 16-byte Folded Spill
ldr x8, [sp, #1056] ; 8-byte Folded Reload
cmp x27, x8
ldr x8, [sp, #856] ; 8-byte Folded Reload
cinc x8, x8, lo
str x8, [sp, #408] ; 8-byte Folded Spill
ldr x10, [sp, #88] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #168] ; 8-byte Folded Reload
adcs xzr, x10, x27
mrs x13, NZCV
str x13, [sp, #616] ; 8-byte Folded Spill
adcs x17, x8, x9
msr NZCV, x12
adcs x8, x20, x17
str x8, [sp, #456] ; 8-byte Folded Spill
ldr x11, [sp, #952] ; 8-byte Folded Reload
cmp x3, x11
ldr x11, [sp, #800] ; 8-byte Folded Reload
cinc x9, x11, lo
str x9, [sp, #432] ; 8-byte Folded Spill
ldr x10, [sp, #72] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #136] ; 8-byte Folded Reload
adcs xzr, x10, x3
mrs x11, NZCV
str x11, [sp, #880] ; 8-byte Folded Spill
adcs x8, x9, x8
str x8, [sp, #480] ; 8-byte Folded Spill
ldr x9, [sp, #64] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #744] ; 8-byte Folded Reload
ldr x10, [sp, #256] ; 8-byte Folded Reload
adcs xzr, x9, x10
mrs x12, NZCV
str x12, [sp, #648] ; 8-byte Folded Spill
adcs x2, x2, x8
ldr x11, [sp, #944] ; 8-byte Folded Reload
ldr x8, [sp, #720] ; 8-byte Folded Reload
cmp x8, x11
ldr x12, [sp, #776] ; 8-byte Folded Reload
add x11, x11, x12
str x11, [sp, #1040] ; 8-byte Folded Spill
cinc x27, x11, lo
ldr x8, [sp, #248] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x2, x27
mrs x11, NZCV
str x11, [sp, #744] ; 8-byte Folded Spill
str x11, [sp, #720] ; 8-byte Folded Spill
adds x8, x1, x0
lsl x9, x8, #32
stp x9, x8, [sp, #248] ; 16-byte Folded Spill
sub x10, x9, x8
str x10, [sp, #1032] ; 8-byte Folded Spill
ldp x9, x8, [sp, #304] ; 16-byte Folded Reload
adcs x8, x9, x8
ldr x23, [sp, #424] ; 8-byte Folded Reload
ldr x19, [sp, #448] ; 8-byte Folded Reload
adds x11, x23, x19
ldr x0, [sp, #888] ; 8-byte Folded Reload
ldr x21, [sp, #1064] ; 8-byte Folded Reload
mul x4, x0, x21
ldr x12, [sp, #1096] ; 8-byte Folded Reload
umulh x14, x12, x21
adcs x9, x4, x14
stp x9, x8, [sp, #304] ; 16-byte Folded Spill
ldr x10, [sp, #864] ; 8-byte Folded Reload
ldr x13, [sp, #512] ; 8-byte Folded Reload
adds x13, x13, x10
mov w10, #-2
ldr x15, [sp, #792] ; 8-byte Folded Reload
umulh x10, x15, x10
ldr x15, [sp, #1024] ; 8-byte Folded Reload
adcs x8, x15, x10
str x8, [sp, #512] ; 8-byte Folded Spill
ldr x10, [sp, #784] ; 8-byte Folded Reload
cmp x26, x10
cinc x10, x7, lo
ldr x15, [sp, #696] ; 8-byte Folded Reload
ldr x6, [sp, #288] ; 8-byte Folded Reload
cmp x6, x15
cset w15, lo
ldr x1, [sp, #208] ; 8-byte Folded Reload
adds x16, x26, x1
adcs x15, x15, x10
ldr x10, [sp, #984] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x17, x20
mrs x10, NZCV
str x10, [sp, #216] ; 8-byte Folded Spill
mrs x26, NZCV
ldr x10, [sp, #768] ; 8-byte Folded Reload
ldr x17, [sp, #760] ; 8-byte Folded Reload
cmn x10, x17
ldr x10, [sp, #552] ; 8-byte Folded Reload
ldr x17, [sp, #544] ; 8-byte Folded Reload
adcs xzr, x17, x10
ldr x3, [sp, #936] ; 8-byte Folded Reload
umulh x17, x0, x3
mov x20, x0
adcs x0, x17, xzr
ldr x10, [sp, #736] ; 8-byte Folded Reload
msr NZCV, x10
adcs x25, x27, x2
str x25, [sp, #224] ; 8-byte Folded Spill
ldr x5, [sp, #1104] ; 8-byte Folded Reload
mul x1, x5, x3
adds x2, x0, x1
ldr x10, [sp, #704] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #568] ; 8-byte Folded Reload
ldr x7, [sp, #560] ; 8-byte Folded Reload
adcs xzr, x10, x7
mrs x10, NZCV
str x10, [sp, #568] ; 8-byte Folded Spill
adcs x2, x2, x25
str x2, [sp, #768] ; 8-byte Folded Spill
ldr x10, [sp, #688] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #536] ; 8-byte Folded Reload
adcs xzr, x10, x13
mrs x10, NZCV
str x10, [sp, #688] ; 8-byte Folded Spill
adcs x8, x8, x2
str x8, [sp, #704] ; 8-byte Folded Spill
ldr x10, [sp, #672] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #528] ; 8-byte Folded Reload
adcs xzr, x10, x11
mrs x10, NZCV
str x10, [sp, #672] ; 8-byte Folded Spill
adcs x9, x9, x8
str x9, [sp, #696] ; 8-byte Folded Spill
ldr x10, [sp, #464] ; 8-byte Folded Reload
ldr x11, [sp, #376] ; 8-byte Folded Reload
cmp x11, x10
mov w10, #-2
ldr x13, [sp, #728] ; 8-byte Folded Reload
mul x11, x13, x10
umulh x13, x13, x10
ldr x10, [sp, #1072] ; 8-byte Folded Reload
mul x8, x12, x10
str x8, [sp, #792] ; 8-byte Folded Spill
umulh x7, x12, x10
str x7, [sp, #760] ; 8-byte Folded Spill
ldr x2, [sp, #1088] ; 8-byte Folded Reload
umulh x2, x2, x10
cinc x2, x2, lo
str x2, [sp, #728] ; 8-byte Folded Spill
mul x10, x20, x10
str x10, [sp, #736] ; 8-byte Folded Spill
adds x2, x2, x8
adcs x8, x10, x7
str x8, [sp, #784] ; 8-byte Folded Spill
ldr x10, [sp, #928] ; 8-byte Folded Reload
adds x12, x11, x10
str x12, [sp, #528] ; 8-byte Folded Spill
ldr x10, [sp, #1016] ; 8-byte Folded Reload
adcs x8, x10, x13
str x8, [sp, #984] ; 8-byte Folded Spill
ldr x11, [sp, #680] ; 8-byte Folded Reload
msr NZCV, x11
ldr x8, [sp, #520] ; 8-byte Folded Reload
adcs xzr, x8, x10
mrs x8, NZCV
str x8, [sp, #520] ; 8-byte Folded Spill
adcs x11, x12, x9
ldr x10, [sp, #656] ; 8-byte Folded Reload
msr NZCV, x10
adcs x10, x2, x11
str x10, [sp, #1088] ; 8-byte Folded Spill
cmp x0, x17
cset w17, lo
adds x8, x0, x1
str x8, [sp, #1096] ; 8-byte Folded Spill
umulh x0, x5, x3
mov x13, x5
adcs x17, x17, x0
ldr x10, [sp, #664] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x11, x2
mrs x8, NZCV
str x8, [sp, #664] ; 8-byte Folded Spill
str x8, [sp, #376] ; 8-byte Folded Spill
cmn x23, x19
adcs xzr, x14, x4
umulh x11, x20, x21
adcs x1, x11, xzr
ldr x10, [sp, #808] ; 8-byte Folded Reload
msr NZCV, x10
ldr x8, [sp, #504] ; 8-byte Folded Reload
adcs xzr, x6, x8
adcs x12, x15, xzr
ldr x10, [sp, #968] ; 8-byte Folded Reload
msr NZCV, x10
ldr x8, [sp, #280] ; 8-byte Folded Reload
adcs xzr, x8, x16
adcs x7, x12, xzr
cmp x24, x28
ldr x10, [sp, #824] ; 8-byte Folded Reload
cinc x9, x10, lo
str x9, [sp, #464] ; 8-byte Folded Spill
ldr x10, [sp, #592] ; 8-byte Folded Reload
msr NZCV, x10
ldr x8, [sp, #240] ; 8-byte Folded Reload
adcs xzr, x8, x24
mrs x8, NZCV
str x8, [sp, #288] ; 8-byte Folded Spill
ldr x28, [sp, #912] ; 8-byte Folded Reload
ldr x19, [sp, #1048] ; 8-byte Folded Reload
mul x23, x28, x19
adcs x12, x9, x7
str x12, [sp, #592] ; 8-byte Folded Spill
ldr x20, [sp, #320] ; 8-byte Folded Reload
adds x8, x20, x23
ldr x10, [sp, #608] ; 8-byte Folded Reload
msr NZCV, x10
ldp x10, x9, [sp, #264] ; 16-byte Folded Reload
adcs xzr, x9, x10
mrs x9, NZCV
str x9, [sp, #280] ; 8-byte Folded Spill
adcs x9, x8, x12
str x9, [sp, #824] ; 8-byte Folded Spill
cmp x30, x22
ldr x8, [sp, #872] ; 8-byte Folded Reload
cinc x6, x8, lo
ldr x8, [sp, #600] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #296] ; 8-byte Folded Reload
adcs xzr, x8, x30
mrs x8, NZCV
str x8, [sp, #872] ; 8-byte Folded Spill
adcs x22, x6, x9
str x22, [sp, #680] ; 8-byte Folded Spill
ldr x5, [sp, #328] ; 8-byte Folded Reload
ldp x4, x8, [sp, #384] ; 16-byte Folded Reload
cmp x5, x8
cset w8, lo
ldr x9, [sp, #360] ; 8-byte Folded Reload
cmp x4, x9
cset w9, lo
ldr x14, [sp, #440] ; 8-byte Folded Reload
ldr x10, [sp, #416] ; 8-byte Folded Reload
cmp x14, x10
cset w15, lo
ldp x10, x30, [sp, #488] ; 16-byte Folded Reload
cmp x30, x10
cset w16, lo
cmp x17, x0
cset w0, lo
cmp x1, x11
mul x11, x13, x21
cset w2, lo
adds x11, x1, x11
str x11, [sp, #968] ; 8-byte Folded Spill
umulh x10, x13, x21
str x10, [sp, #544] ; 8-byte Folded Spill
adcs x10, x2, x10
str x10, [sp, #560] ; 8-byte Folded Spill
ldr x10, [sp, #1112] ; 8-byte Folded Reload
mul x11, x10, x3
adds x11, x17, x11
str x11, [sp, #920] ; 8-byte Folded Spill
umulh x10, x10, x3
str x10, [sp, #536] ; 8-byte Folded Spill
adcs x10, x0, x10
str x10, [sp, #552] ; 8-byte Folded Spill
ldr x10, [sp, #1144] ; 8-byte Folded Reload
ldr x12, [sp, #992] ; 8-byte Folded Reload
mul x11, x10, x12
adds x1, x30, x11
str x1, [sp, #808] ; 8-byte Folded Spill
umulh x11, x10, x12
adcs x10, x16, x11
stp x11, x10, [sp, #496] ; 16-byte Folded Spill
ldr x10, [sp, #1120] ; 8-byte Folded Reload
ldr x12, [sp, #960] ; 8-byte Folded Reload
mul x11, x10, x12
adds x11, x14, x11
umulh x10, x10, x12
str x10, [sp, #600] ; 8-byte Folded Spill
adcs x10, x15, x10
str x10, [sp, #656] ; 8-byte Folded Spill
ldr x10, [sp, #1136] ; 8-byte Folded Reload
ldr x12, [sp, #1080] ; 8-byte Folded Reload
mul x15, x10, x12
adds x14, x4, x15
str x14, [sp, #448] ; 8-byte Folded Spill
umulh x25, x10, x12
adcs x9, x9, x25
str x9, [sp, #392] ; 8-byte Folded Spill
ldr x10, [sp, #1128] ; 8-byte Folded Reload
ldr x12, [sp, #1000] ; 8-byte Folded Reload
mul x9, x10, x12
adds x17, x5, x9
umulh x9, x10, x12
str x9, [sp, #608] ; 8-byte Folded Spill
adcs x13, x8, x9
ldr x8, [sp, #640] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #400] ; 8-byte Folded Reload
ldr x9, [sp, #368] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #328] ; 8-byte Folded Spill
adcs x0, x17, x22
ldr x4, [sp, #1008] ; 8-byte Folded Reload
ldr x9, [sp, #232] ; 8-byte Folded Reload
cmp x9, x4
ldr x8, [sp, #896] ; 8-byte Folded Reload
cinc x10, x8, lo
ldr x8, [sp, #632] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #352] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #352] ; 8-byte Folded Spill
adcs x22, x10, x0
ldr x8, [sp, #624] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #472] ; 8-byte Folded Reload
ldr x9, [sp, #336] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #368] ; 8-byte Folded Spill
adcs x24, x14, x22
ldr x9, [sp, #408] ; 8-byte Folded Reload
ldr x8, [sp, #1056] ; 8-byte Folded Reload
cmp x9, x8
ldr x5, [sp, #856] ; 8-byte Folded Reload
cinc x15, x5, lo
ldr x8, [sp, #616] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #344] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #360] ; 8-byte Folded Spill
adcs x12, x15, x24
msr NZCV, x26
adcs x9, x11, x12
str x9, [sp, #440] ; 8-byte Folded Spill
ldr x8, [sp, #576] ; 8-byte Folded Reload
cmp x20, x8
umulh x14, x28, x19
cset w2, lo
cinc x3, x14, lo
cmn x20, x23
adcs x8, x2, x14
str x8, [sp, #384] ; 8-byte Folded Spill
ldr x8, [sp, #904] ; 8-byte Folded Reload
cmp x7, x8
cset w14, lo
adds x21, x20, x23
adcs x19, x14, x3
ldr x8, [sp, #216] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x12, x11
mrs x8, NZCV
str x8, [sp, #904] ; 8-byte Folded Spill
mrs x16, NZCV
ldr x12, [sp, #952] ; 8-byte Folded Reload
ldr x26, [sp, #432] ; 8-byte Folded Reload
cmp x26, x12
ldr x14, [sp, #800] ; 8-byte Folded Reload
cinc x11, x14, lo
ldr x8, [sp, #880] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #456] ; 8-byte Folded Reload
adcs xzr, x8, x26
mrs x8, NZCV
str x8, [sp, #576] ; 8-byte Folded Spill
adcs x26, x11, x9
ldr x8, [sp, #648] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #584] ; 8-byte Folded Reload
ldr x9, [sp, #480] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #584] ; 8-byte Folded Spill
adcs x30, x1, x26
ldr x20, [sp, #944] ; 8-byte Folded Reload
cmp x27, x20
ldr x27, [sp, #1040] ; 8-byte Folded Reload
cinc x2, x27, lo
ldr x8, [sp, #720] ; 8-byte Folded Reload
msr NZCV, x8
adcs x9, x2, x30
str x9, [sp, #472] ; 8-byte Folded Spill
ldr x8, [sp, #568] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #224] ; 8-byte Folded Reload
ldr x1, [sp, #1096] ; 8-byte Folded Reload
adcs xzr, x8, x1
mrs x8, NZCV
str x8, [sp, #416] ; 8-byte Folded Spill
ldr x8, [sp, #920] ; 8-byte Folded Reload
adcs x1, x8, x9
str x1, [sp, #640] ; 8-byte Folded Spill
ldr x8, [sp, #1024] ; 8-byte Folded Reload
ldr x28, [sp, #512] ; 8-byte Folded Reload
cmp x28, x8
ldr x9, [sp, #864] ; 8-byte Folded Reload
add x8, x8, x9
str x8, [sp, #1096] ; 8-byte Folded Spill
cinc x3, x8, lo
ldr x8, [sp, #688] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #768] ; 8-byte Folded Reload
adcs xzr, x8, x28
mrs x8, NZCV
str x8, [sp, #408] ; 8-byte Folded Spill
adcs x1, x3, x1
str x1, [sp, #456] ; 8-byte Folded Spill
ldr x8, [sp, #672] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #304] ; 8-byte Folded Reload
ldr x9, [sp, #704] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #400] ; 8-byte Folded Spill
ldr x8, [sp, #968] ; 8-byte Folded Reload
adcs x1, x8, x1
str x1, [sp, #488] ; 8-byte Folded Spill
ldr x8, [sp, #520] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #696] ; 8-byte Folded Reload
ldr x9, [sp, #528] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #432] ; 8-byte Folded Spill
ldr x8, [sp, #984] ; 8-byte Folded Reload
adcs x8, x8, x1
ldr x9, [sp, #376] ; 8-byte Folded Reload
msr NZCV, x9
ldr x1, [sp, #784] ; 8-byte Folded Reload
adcs xzr, x8, x1
mrs x9, NZCV
str x9, [sp, #424] ; 8-byte Folded Spill
ldr x28, [sp, #664] ; 8-byte Folded Reload
msr NZCV, x28
adcs x8, x1, x8
str x8, [sp, #696] ; 8-byte Folded Spill
msr NZCV, x16
mrs x23, NZCV
msr NZCV, x9
mrs x28, NZCV
ldp x16, x9, [sp, #280] ; 16-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #464] ; 8-byte Folded Reload
adcs xzr, x7, x9
adcs x9, x19, xzr
msr NZCV, x16
ldr x16, [sp, #592] ; 8-byte Folded Reload
adcs xzr, x16, x21
adcs x16, x9, xzr
ldr x9, [sp, #976] ; 8-byte Folded Reload
cmp x6, x9
ldr x9, [sp, #816] ; 8-byte Folded Reload
cinc x7, x9, lo
str x7, [sp, #664] ; 8-byte Folded Spill
ldr x9, [sp, #872] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #824] ; 8-byte Folded Reload
adcs xzr, x9, x6
mrs x9, NZCV
str x9, [sp, #872] ; 8-byte Folded Spill
ldr x1, [sp, #912] ; 8-byte Folded Reload
ldr x21, [sp, #1000] ; 8-byte Folded Reload
mul x9, x1, x21
adcs x6, x7, x16
str x6, [sp, #632] ; 8-byte Folded Spill
mov x19, x16
str x16, [sp, #520] ; 8-byte Folded Spill
adds x16, x13, x9
ldr x8, [sp, #328] ; 8-byte Folded Reload
msr NZCV, x8
ldr x7, [sp, #680] ; 8-byte Folded Reload
adcs xzr, x7, x17
mrs x17, NZCV
str x17, [sp, #824] ; 8-byte Folded Spill
adcs x16, x16, x6
str x16, [sp, #624] ; 8-byte Folded Spill
cmp x10, x4
ldr x8, [sp, #896] ; 8-byte Folded Reload
cinc x17, x8, lo
str x17, [sp, #616] ; 8-byte Folded Spill
ldr x8, [sp, #352] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x0, x10
mrs x10, NZCV
str x10, [sp, #816] ; 8-byte Folded Spill
adcs x6, x17, x16
str x6, [sp, #592] ; 8-byte Folded Spill
ldr x8, [sp, #392] ; 8-byte Folded Reload
cmp x8, x25
cset w10, lo
ldr x7, [sp, #656] ; 8-byte Folded Reload
ldr x16, [sp, #600] ; 8-byte Folded Reload
cmp x7, x16
ldr x0, [sp, #960] ; 8-byte Folded Reload
ldr x4, [sp, #1136] ; 8-byte Folded Reload
mul x16, x4, x0
cset w17, lo
adds x16, x7, x16
umulh x0, x4, x0
str x0, [sp, #672] ; 8-byte Folded Spill
adcs x17, x17, x0
str x17, [sp, #688] ; 8-byte Folded Spill
ldr x0, [sp, #1080] ; 8-byte Folded Reload
ldr x4, [sp, #1128] ; 8-byte Folded Reload
mul x17, x4, x0
adds x7, x8, x17
str x7, [sp, #600] ; 8-byte Folded Spill
umulh x17, x4, x0
str x17, [sp, #896] ; 8-byte Folded Spill
adcs x10, x10, x17
str x10, [sp, #1048] ; 8-byte Folded Spill
ldr x8, [sp, #368] ; 8-byte Folded Reload
msr NZCV, x8
ldr x10, [sp, #448] ; 8-byte Folded Reload
adcs xzr, x22, x10
mrs x10, NZCV
str x10, [sp, #784] ; 8-byte Folded Spill
adcs x10, x7, x6
str x10, [sp, #568] ; 8-byte Folded Spill
ldr x17, [sp, #1056] ; 8-byte Folded Reload
cmp x15, x17
cinc x6, x5, lo
ldr x8, [sp, #360] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x24, x15
mrs x15, NZCV
str x15, [sp, #768] ; 8-byte Folded Spill
adcs x15, x6, x10
ldr x8, [sp, #904] ; 8-byte Folded Reload
msr NZCV, x8
adcs x7, x16, x15
str x7, [sp, #680] ; 8-byte Folded Spill
ldr x10, [sp, #608] ; 8-byte Folded Reload
cmp x13, x10
umulh x17, x1, x21
mov x24, x1
cset w0, lo
cinc x1, x17, lo
cmn x13, x9
adcs x10, x0, x17
str x10, [sp, #904] ; 8-byte Folded Spill
ldr x8, [sp, #384] ; 8-byte Folded Reload
cmp x19, x8
cset w17, lo
adds x9, x13, x9
str x9, [sp, #720] ; 8-byte Folded Spill
adcs x9, x17, x1
str x9, [sp, #704] ; 8-byte Folded Spill
msr NZCV, x23
adcs xzr, x15, x16
mrs x8, NZCV
str x8, [sp, #976] ; 8-byte Folded Spill
str x8, [sp, #880] ; 8-byte Folded Spill
ldr x8, [sp, #792] ; 8-byte Folded Reload
ldr x9, [sp, #728] ; 8-byte Folded Reload
cmn x9, x8
ldr x8, [sp, #760] ; 8-byte Folded Reload
ldr x9, [sp, #736] ; 8-byte Folded Reload
adcs xzr, x8, x9
ldr x16, [sp, #1072] ; 8-byte Folded Reload
ldr x8, [sp, #888] ; 8-byte Folded Reload
umulh x8, x8, x16
adcs x9, x8, xzr
cmp x11, x12
cinc x12, x14, lo
str x12, [sp, #608] ; 8-byte Folded Spill
ldr x10, [sp, #576] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #440] ; 8-byte Folded Reload
adcs xzr, x10, x11
mrs x10, NZCV
str x10, [sp, #888] ; 8-byte Folded Spill
ldr x25, [sp, #992] ; 8-byte Folded Reload
ldr x22, [sp, #1120] ; 8-byte Folded Reload
mul x11, x22, x25
adcs x12, x12, x7
str x12, [sp, #648] ; 8-byte Folded Spill
ldr x17, [sp, #504] ; 8-byte Folded Reload
adds x13, x17, x11
ldr x10, [sp, #584] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #808] ; 8-byte Folded Reload
adcs xzr, x26, x10
mrs x10, NZCV
str x10, [sp, #808] ; 8-byte Folded Spill
adcs x10, x13, x12
str x10, [sp, #656] ; 8-byte Folded Spill
cmp x2, x20
cinc x20, x27, lo
ldr x12, [sp, #744] ; 8-byte Folded Reload
msr NZCV, x12
adcs xzr, x30, x2
mrs x12, NZCV
str x12, [sp, #792] ; 8-byte Folded Spill
adcs x14, x20, x10
str x14, [sp, #584] ; 8-byte Folded Spill
ldr x2, [sp, #552] ; 8-byte Folded Reload
ldr x10, [sp, #536] ; 8-byte Folded Reload
cmp x2, x10
cset w12, lo
ldr x0, [sp, #560] ; 8-byte Folded Reload
ldr x10, [sp, #544] ; 8-byte Folded Reload
cmp x0, x10
cset w13, lo
cmp x9, x8
ldr x10, [sp, #1104] ; 8-byte Folded Reload
mul x8, x10, x16
cset w15, lo
adds x8, x9, x8
umulh x9, x10, x16
str x9, [sp, #512] ; 8-byte Folded Spill
adcs x9, x15, x9
str x9, [sp, #480] ; 8-byte Folded Spill
ldr x10, [sp, #1064] ; 8-byte Folded Reload
ldr x21, [sp, #1112] ; 8-byte Folded Reload
mul x9, x21, x10
adds x15, x0, x9
str x15, [sp, #576] ; 8-byte Folded Spill
umulh x26, x21, x10
adcs x19, x13, x26
ldr x10, [sp, #936] ; 8-byte Folded Reload
ldr x13, [sp, #1144] ; 8-byte Folded Reload
mul x9, x13, x10
adds x9, x2, x9
str x9, [sp, #560] ; 8-byte Folded Spill
umulh x2, x13, x10
adcs x5, x12, x2
ldr x10, [sp, #416] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #920] ; 8-byte Folded Reload
ldr x12, [sp, #472] ; 8-byte Folded Reload
adcs xzr, x12, x10
mrs x10, NZCV
str x10, [sp, #920] ; 8-byte Folded Spill
adcs x10, x9, x14
str x10, [sp, #552] ; 8-byte Folded Spill
ldr x9, [sp, #1024] ; 8-byte Folded Reload
cmp x3, x9
ldr x4, [sp, #1096] ; 8-byte Folded Reload
cinc x9, x4, lo
str x9, [sp, #536] ; 8-byte Folded Spill
ldr x12, [sp, #408] ; 8-byte Folded Reload
msr NZCV, x12
ldr x12, [sp, #640] ; 8-byte Folded Reload
adcs xzr, x12, x3
mrs x12, NZCV
str x12, [sp, #760] ; 8-byte Folded Spill
adcs x10, x9, x10
str x10, [sp, #544] ; 8-byte Folded Spill
ldr x9, [sp, #400] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #968] ; 8-byte Folded Reload
ldr x12, [sp, #456] ; 8-byte Folded Reload
adcs xzr, x12, x9
mrs x9, NZCV
str x9, [sp, #744] ; 8-byte Folded Spill
adcs x9, x15, x10
ldr x10, [sp, #1016] ; 8-byte Folded Reload
ldr x13, [sp, #984] ; 8-byte Folded Reload
cmp x13, x10
ldr x12, [sp, #928] ; 8-byte Folded Reload
add x10, x10, x12
str x10, [sp, #1104] ; 8-byte Folded Spill
cinc x10, x10, lo
str x10, [sp, #528] ; 8-byte Folded Spill
ldr x12, [sp, #432] ; 8-byte Folded Reload
msr NZCV, x12
ldp x12, x15, [sp, #488] ; 16-byte Folded Reload
adcs xzr, x12, x13
mrs x12, NZCV
adcs x14, x10, x9
ldr x13, [sp, #424] ; 8-byte Folded Reload
msr NZCV, x13
adcs x3, x8, x14
str x3, [sp, #464] ; 8-byte Folded Spill
cmp x17, x15
cset w15, lo
adds x11, x17, x11
str x11, [sp, #640] ; 8-byte Folded Spill
umulh x0, x22, x25
adcs x16, x15, x0
msr NZCV, x28
adcs xzr, x14, x8
mrs x8, NZCV
str x8, [sp, #736] ; 8-byte Folded Spill
mrs x15, NZCV
msr NZCV, x12
adcs xzr, x9, x10
mrs x8, NZCV
str x8, [sp, #968] ; 8-byte Folded Spill
str x8, [sp, #728] ; 8-byte Folded Spill
ldr x11, [sp, #256] ; 8-byte Folded Reload
mov w8, #-1
umulh x9, x11, x8
mov w12, #-2
mul x8, x11, x12
adds x17, x8, x9
mov x13, x9
ldr x9, [sp, #248] ; 8-byte Folded Reload
cmp x11, x9
cset w10, hi
ldr x14, [sp, #312] ; 8-byte Folded Reload
adds x9, x14, x10
mov x23, #-4294967295
str x13, [sp, #1000] ; 8-byte Folded Spill
adds x9, x9, x13
str x9, [sp, #504] ; 8-byte Folded Spill
add x9, x9, x23
str x9, [sp, #488] ; 8-byte Folded Spill
cset w9, hs
cmn x14, x10
ldr x10, [sp, #752] ; 8-byte Folded Reload
adcs x9, x10, x9
str x9, [sp, #752] ; 8-byte Folded Spill
mrs x9, NZCV
str x9, [sp, #472] ; 8-byte Folded Spill
ldr x9, [sp, #1088] ; 8-byte Folded Reload
ldr x10, [sp, #1032] ; 8-byte Folded Reload
adcs xzr, x9, x10
mrs x14, NZCV
ldr x9, [sp, #696] ; 8-byte Folded Reload
adcs x17, x17, x9
str x17, [sp, #496] ; 8-byte Folded Spill
str x15, [sp, #440] ; 8-byte Folded Spill
adds x15, x8, x13
umulh x17, x11, x12
adcs x8, x10, x17
msr NZCV, x14
adcs xzr, x9, x15
mrs x9, NZCV
stp x9, x8, [sp, #448] ; 16-byte Folded Spill
adcs x8, x8, x3
str x8, [sp, #696] ; 8-byte Folded Spill
ldr x8, [sp, #872] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #664] ; 8-byte Folded Reload
ldr x9, [sp, #520] ; 8-byte Folded Reload
adcs xzr, x9, x8
ldr x8, [sp, #704] ; 8-byte Folded Reload
adcs x14, x8, xzr
ldr x8, [sp, #824] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #720] ; 8-byte Folded Reload
ldr x9, [sp, #632] ; 8-byte Folded Reload
adcs xzr, x9, x8
adcs x23, x14, xzr
ldr x9, [sp, #616] ; 8-byte Folded Reload
ldr x8, [sp, #1008] ; 8-byte Folded Reload
cmp x9, x8
ldr x8, [sp, #848] ; 8-byte Folded Reload
cinc x10, x8, lo
str x10, [sp, #704] ; 8-byte Folded Spill
ldr x8, [sp, #816] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #624] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #632] ; 8-byte Folded Spill
ldr x8, [sp, #1080] ; 8-byte Folded Reload
mul x17, x24, x8
adcs x30, x10, x23
ldr x8, [sp, #1048] ; 8-byte Folded Reload
adds x15, x8, x17
ldr x8, [sp, #784] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #600] ; 8-byte Folded Reload
ldr x9, [sp, #592] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #624] ; 8-byte Folded Spill
adcs x9, x15, x30
str x9, [sp, #720] ; 8-byte Folded Spill
ldr x8, [sp, #1056] ; 8-byte Folded Reload
cmp x6, x8
ldr x8, [sp, #856] ; 8-byte Folded Reload
cinc x7, x8, lo
ldr x8, [sp, #768] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #568] ; 8-byte Folded Reload
adcs xzr, x8, x6
mrs x8, NZCV
str x8, [sp, #664] ; 8-byte Folded Spill
adcs x1, x7, x9
str x1, [sp, #768] ; 8-byte Folded Spill
ldr x12, [sp, #688] ; 8-byte Folded Reload
ldr x8, [sp, #672] ; 8-byte Folded Reload
cmp x12, x8
cset w9, lo
mov x6, x16
cmp x16, x0
cset w8, lo
cmp x5, x2
cset w24, lo
cmp x19, x26
cset w15, lo
ldr x11, [sp, #480] ; 8-byte Folded Reload
ldr x10, [sp, #512] ; 8-byte Folded Reload
cmp x11, x10
ldr x10, [sp, #1072] ; 8-byte Folded Reload
mul x16, x21, x10
cset w3, lo
adds x11, x11, x16
str x11, [sp, #984] ; 8-byte Folded Spill
umulh x10, x21, x10
str x10, [sp, #872] ; 8-byte Folded Spill
adcs x10, x3, x10
str x10, [sp, #1008] ; 8-byte Folded Spill
ldr x10, [sp, #1144] ; 8-byte Folded Reload
ldr x11, [sp, #1064] ; 8-byte Folded Reload
mul x16, x10, x11
adds x26, x19, x16
str x26, [sp, #848] ; 8-byte Folded Spill
umulh x10, x10, x11
str x10, [sp, #856] ; 8-byte Folded Spill
adcs x10, x15, x10
str x10, [sp, #1112] ; 8-byte Folded Spill
mov x10, x22
ldr x11, [sp, #936] ; 8-byte Folded Reload
mul x22, x22, x11
adds x3, x5, x22
str x3, [sp, #824] ; 8-byte Folded Spill
umulh x10, x10, x11
str x10, [sp, #784] ; 8-byte Folded Spill
adcs x10, x24, x10
str x10, [sp, #816] ; 8-byte Folded Spill
ldr x10, [sp, #1136] ; 8-byte Folded Reload
mul x5, x10, x25
adds x15, x6, x5
str x15, [sp, #672] ; 8-byte Folded Spill
umulh x22, x10, x25
adcs x27, x8, x22
ldr x8, [sp, #1128] ; 8-byte Folded Reload
ldr x10, [sp, #960] ; 8-byte Folded Reload
mul x28, x8, x10
adds x28, x12, x28
umulh x16, x8, x10
mov x19, x10
adcs x13, x9, x16
ldr x8, [sp, #880] ; 8-byte Folded Reload
msr NZCV, x8
adcs x5, x28, x1
ldr x14, [sp, #952] ; 8-byte Folded Reload
ldr x8, [sp, #608] ; 8-byte Folded Reload
cmp x8, x14
ldr x24, [sp, #800] ; 8-byte Folded Reload
cinc x0, x24, lo
ldr x9, [sp, #888] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #680] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #616] ; 8-byte Folded Spill
adcs x6, x0, x5
ldr x8, [sp, #808] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #648] ; 8-byte Folded Reload
ldr x9, [sp, #640] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #648] ; 8-byte Folded Spill
adcs x21, x15, x6
ldr x15, [sp, #944] ; 8-byte Folded Reload
cmp x20, x15
ldr x1, [sp, #1040] ; 8-byte Folded Reload
cinc x2, x1, lo
ldr x8, [sp, #792] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #656] ; 8-byte Folded Reload
adcs xzr, x8, x20
mrs x8, NZCV
str x8, [sp, #640] ; 8-byte Folded Spill
adcs x10, x2, x21
str x10, [sp, #808] ; 8-byte Folded Spill
ldr x8, [sp, #920] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #584] ; 8-byte Folded Reload
ldr x9, [sp, #560] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #680] ; 8-byte Folded Spill
adcs x25, x3, x10
ldr x9, [sp, #536] ; 8-byte Folded Reload
ldr x8, [sp, #1024] ; 8-byte Folded Reload
cmp x9, x8
cinc x3, x4, lo
ldr x8, [sp, #760] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #552] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #656] ; 8-byte Folded Spill
adcs x10, x3, x25
str x10, [sp, #888] ; 8-byte Folded Spill
ldr x8, [sp, #744] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #576] ; 8-byte Folded Reload
ldr x9, [sp, #544] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #688] ; 8-byte Folded Spill
adcs x10, x26, x10
str x10, [sp, #760] ; 8-byte Folded Spill
ldr x8, [sp, #1016] ; 8-byte Folded Reload
ldr x9, [sp, #528] ; 8-byte Folded Reload
cmp x9, x8
ldr x8, [sp, #1104] ; 8-byte Folded Reload
cinc x4, x8, lo
ldr x8, [sp, #728] ; 8-byte Folded Reload
msr NZCV, x8
adcs x10, x4, x10
str x10, [sp, #880] ; 8-byte Folded Spill
ldr x26, [sp, #1048] ; 8-byte Folded Reload
ldr x8, [sp, #896] ; 8-byte Folded Reload
cmp x26, x8
ldr x8, [sp, #912] ; 8-byte Folded Reload
ldr x9, [sp, #1080] ; 8-byte Folded Reload
umulh x9, x8, x9
cset w11, lo
cinc x12, x9, lo
cmn x26, x17
adcs x20, x11, x9
ldr x9, [sp, #904] ; 8-byte Folded Reload
cmp x23, x9
cset w9, lo
adds x17, x26, x17
adcs x9, x9, x12
ldr x11, [sp, #736] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #984] ; 8-byte Folded Reload
adcs x11, x11, x10
str x11, [sp, #1080] ; 8-byte Folded Spill
ldr x10, [sp, #632] ; 8-byte Folded Reload
msr NZCV, x10
ldr x11, [sp, #704] ; 8-byte Folded Reload
adcs xzr, x23, x11
adcs x9, x9, xzr
ldr x10, [sp, #624] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x30, x17
adcs x30, x9, xzr
cmp x30, x20
cset w10, lo
cmp x13, x16
mul x9, x8, x19
umulh x16, x8, x19
cset w17, lo
cinc x23, x16, lo
cmn x13, x9
adcs x8, x17, x16
str x8, [sp, #960] ; 8-byte Folded Spill
adds x9, x13, x9
str x9, [sp, #792] ; 8-byte Folded Spill
adcs x8, x10, x23
str x8, [sp, #744] ; 8-byte Folded Spill
ldr x8, [sp, #1056] ; 8-byte Folded Reload
cmp x7, x8
ldr x8, [sp, #840] ; 8-byte Folded Reload
cinc x10, x8, lo
str x10, [sp, #736] ; 8-byte Folded Spill
ldr x8, [sp, #664] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #720] ; 8-byte Folded Reload
adcs xzr, x8, x7
mrs x8, NZCV
str x8, [sp, #728] ; 8-byte Folded Spill
adcs x19, x10, x30
ldr x8, [sp, #976] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #768] ; 8-byte Folded Reload
adcs xzr, x8, x28
mrs x8, NZCV
str x8, [sp, #720] ; 8-byte Folded Spill
adcs x26, x9, x19
cmp x0, x14
cinc x16, x24, lo
ldr x8, [sp, #616] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x5, x0
mrs x8, NZCV
str x8, [sp, #704] ; 8-byte Folded Spill
adcs x12, x16, x26
str x12, [sp, #768] ; 8-byte Folded Spill
cmp x27, x22
ldr x8, [sp, #992] ; 8-byte Folded Reload
ldr x10, [sp, #1128] ; 8-byte Folded Reload
mul x9, x10, x8
cset w17, lo
adds x11, x27, x9
str x11, [sp, #800] ; 8-byte Folded Spill
umulh x9, x10, x8
str x9, [sp, #976] ; 8-byte Folded Spill
adcs x28, x17, x9
ldr x8, [sp, #648] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #672] ; 8-byte Folded Reload
adcs xzr, x6, x8
mrs x8, NZCV
str x8, [sp, #672] ; 8-byte Folded Spill
adcs x20, x11, x12
cmp x2, x15
cinc x17, x1, lo
ldr x8, [sp, #640] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x21, x2
mrs x8, NZCV
str x8, [sp, #664] ; 8-byte Folded Spill
adcs x11, x17, x20
str x11, [sp, #904] ; 8-byte Folded Spill
ldr x12, [sp, #816] ; 8-byte Folded Reload
ldr x8, [sp, #784] ; 8-byte Folded Reload
cmp x12, x8
ldr x9, [sp, #936] ; 8-byte Folded Reload
ldr x10, [sp, #1136] ; 8-byte Folded Reload
mul x23, x10, x9
cset w8, lo
adds x5, x12, x23
umulh x10, x10, x9
str x10, [sp, #920] ; 8-byte Folded Spill
mov x21, x9
adcs x23, x8, x10
ldr x8, [sp, #680] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #824] ; 8-byte Folded Reload
ldr x9, [sp, #808] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #824] ; 8-byte Folded Spill
adcs x8, x5, x11
ldr x11, [sp, #1024] ; 8-byte Folded Reload
cmp x3, x11
ldr x24, [sp, #1096] ; 8-byte Folded Reload
cinc x12, x24, lo
ldr x9, [sp, #656] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x25, x3
mrs x9, NZCV
str x9, [sp, #784] ; 8-byte Folded Spill
adcs x14, x12, x8
str x14, [sp, #840] ; 8-byte Folded Spill
mov x3, x8
ldr x13, [sp, #1112] ; 8-byte Folded Reload
ldr x8, [sp, #856] ; 8-byte Folded Reload
cmp x13, x8
ldr x9, [sp, #1120] ; 8-byte Folded Reload
ldr x10, [sp, #1064] ; 8-byte Folded Reload
mul x6, x9, x10
cset w8, lo
adds x13, x13, x6
str x13, [sp, #896] ; 8-byte Folded Spill
umulh x6, x9, x10
adcs x1, x8, x6
ldr x8, [sp, #688] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #848] ; 8-byte Folded Reload
ldr x9, [sp, #888] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #688] ; 8-byte Folded Spill
adcs x10, x13, x14
ldr x14, [sp, #1016] ; 8-byte Folded Reload
cmp x4, x14
ldr x13, [sp, #1104] ; 8-byte Folded Reload
cinc x22, x13, lo
ldr x8, [sp, #968] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #760] ; 8-byte Folded Reload
adcs xzr, x8, x4
mrs x15, NZCV
ldr x8, [sp, #1072] ; 8-byte Folded Reload
ldr x9, [sp, #1144] ; 8-byte Folded Reload
mul x0, x9, x8
str x0, [sp, #888] ; 8-byte Folded Spill
adcs x9, x22, x10
ldr x8, [sp, #1008] ; 8-byte Folded Reload
adds x8, x8, x0
ldr x0, [sp, #440] ; 8-byte Folded Reload
msr NZCV, x0
ldr x0, [sp, #984] ; 8-byte Folded Reload
ldr x2, [sp, #880] ; 8-byte Folded Reload
adcs xzr, x2, x0
mrs x27, NZCV
adcs x0, x8, x9
str x0, [sp, #1056] ; 8-byte Folded Spill
msr NZCV, x15
adcs xzr, x10, x22
mrs x10, NZCV
str x10, [sp, #968] ; 8-byte Folded Spill
str x10, [sp, #760] ; 8-byte Folded Spill
msr NZCV, x27
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #984] ; 8-byte Folded Spill
str x8, [sp, #856] ; 8-byte Folded Spill
ldr x7, [sp, #1032] ; 8-byte Folded Reload
ldr x9, [sp, #456] ; 8-byte Folded Reload
cmp x9, x7
ldr x8, [sp, #1000] ; 8-byte Folded Reload
add x8, x7, x8
str x8, [sp, #1048] ; 8-byte Folded Spill
cinc x0, x8, lo
ldr x8, [sp, #448] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #464] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #848] ; 8-byte Folded Spill
ldr x8, [sp, #1080] ; 8-byte Folded Reload
adcs x8, x0, x8
str x8, [sp, #1112] ; 8-byte Folded Spill
ldr x8, [sp, #728] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #736] ; 8-byte Folded Reload
adcs xzr, x30, x8
ldr x8, [sp, #744] ; 8-byte Folded Reload
adcs x25, x8, xzr
ldr x8, [sp, #720] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #792] ; 8-byte Folded Reload
adcs xzr, x19, x8
adcs x19, x25, xzr
ldr x8, [sp, #952] ; 8-byte Folded Reload
cmp x16, x8
ldr x8, [sp, #832] ; 8-byte Folded Reload
cinc x8, x8, lo
str x8, [sp, #952] ; 8-byte Folded Spill
ldr x9, [sp, #704] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x26, x16
mrs x9, NZCV
str x9, [sp, #832] ; 8-byte Folded Spill
ldr x16, [sp, #912] ; 8-byte Folded Reload
ldr x2, [sp, #992] ; 8-byte Folded Reload
mul x26, x16, x2
adcs x10, x8, x19
str x10, [sp, #880] ; 8-byte Folded Spill
adds x30, x28, x26
ldr x8, [sp, #672] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #800] ; 8-byte Folded Reload
ldr x9, [sp, #768] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #816] ; 8-byte Folded Spill
adcs x27, x30, x10
ldr x8, [sp, #944] ; 8-byte Folded Reload
cmp x17, x8
ldr x8, [sp, #1040] ; 8-byte Folded Reload
cinc x30, x8, lo
ldr x8, [sp, #664] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x20, x17
mrs x8, NZCV
str x8, [sp, #808] ; 8-byte Folded Spill
ldr x17, [sp, #1128] ; 8-byte Folded Reload
mov x20, x21
mul x21, x17, x21
adcs x25, x30, x27
adds x8, x23, x21
ldr x9, [sp, #824] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #904] ; 8-byte Folded Reload
adcs xzr, x9, x5
mrs x9, NZCV
str x9, [sp, #800] ; 8-byte Folded Spill
adcs x8, x8, x25
str x8, [sp, #1040] ; 8-byte Folded Spill
cmp x12, x11
cinc x15, x24, lo
ldr x9, [sp, #784] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x3, x12
mrs x9, NZCV
str x9, [sp, #792] ; 8-byte Folded Spill
adcs x11, x15, x8
str x11, [sp, #904] ; 8-byte Folded Spill
cmp x1, x6
ldr x9, [sp, #1064] ; 8-byte Folded Reload
ldr x10, [sp, #1136] ; 8-byte Folded Reload
mul x6, x10, x9
cset w8, lo
adds x12, x1, x6
str x12, [sp, #824] ; 8-byte Folded Spill
umulh x6, x10, x9
adcs x4, x8, x6
ldr x8, [sp, #688] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #896] ; 8-byte Folded Reload
ldr x9, [sp, #840] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #896] ; 8-byte Folded Spill
adcs x1, x12, x11
cmp x22, x14
cinc x22, x13, lo
ldr x8, [sp, #760] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x1, x22
mrs x8, NZCV
str x8, [sp, #840] ; 8-byte Folded Spill
mrs x13, NZCV
ldr x8, [sp, #472] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #1088] ; 8-byte Folded Reload
adcs x8, x7, x8
str x8, [sp, #1088] ; 8-byte Folded Spill
ldr x8, [sp, #920] ; 8-byte Folded Reload
cmp x23, x8
cset w3, lo
adds x21, x23, x21
umulh x23, x17, x20
adcs x3, x3, x23
ldr x9, [sp, #1008] ; 8-byte Folded Reload
ldr x8, [sp, #872] ; 8-byte Folded Reload
cmp x9, x8
cset w8, lo
ldr x10, [sp, #888] ; 8-byte Folded Reload
cmn x9, x10
ldr x12, [sp, #1072] ; 8-byte Folded Reload
ldr x9, [sp, #1144] ; 8-byte Folded Reload
umulh x24, x9, x12
adcs x9, x8, x24
ldr x8, [sp, #976] ; 8-byte Folded Reload
cmp x28, x8
umulh x8, x16, x2
cset w5, lo
cinc x10, x8, lo
cmn x28, x26
adcs x5, x5, x8
ldr x8, [sp, #960] ; 8-byte Folded Reload
cmp x19, x8
cset w8, lo
adds x2, x28, x26
adcs x11, x8, x10
ldr x8, [sp, #968] ; 8-byte Folded Reload
msr NZCV, x8
adcs x1, x22, x1
ldr x8, [sp, #856] ; 8-byte Folded Reload
msr NZCV, x8
mrs x26, NZCV
msr NZCV, x13
mrs x8, NZCV
str x8, [sp, #968] ; 8-byte Folded Spill
cmp x0, x7
ldr x8, [sp, #1048] ; 8-byte Folded Reload
cinc x13, x8, lo
ldr x8, [sp, #848] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #1080] ; 8-byte Folded Reload
adcs xzr, x8, x0
mrs x8, NZCV
str x8, [sp, #1008] ; 8-byte Folded Spill
ldr x8, [sp, #1056] ; 8-byte Folded Reload
adcs x8, x13, x8
str x8, [sp, #1144] ; 8-byte Folded Spill
cmp x9, x24
ldr x10, [sp, #1120] ; 8-byte Folded Reload
mul x24, x10, x12
cset w8, lo
adds x14, x9, x24
umulh x24, x10, x12
adcs x10, x8, x24
ldr x8, [sp, #984] ; 8-byte Folded Reload
msr NZCV, x8
adcs x8, x14, x1
str x8, [sp, #1120] ; 8-byte Folded Spill
ldr x8, [sp, #832] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #952] ; 8-byte Folded Reload
adcs xzr, x19, x8
adcs x11, x11, xzr
ldr x8, [sp, #816] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #880] ; 8-byte Folded Reload
adcs xzr, x8, x2
adcs x11, x11, xzr
ldr x8, [sp, #944] ; 8-byte Folded Reload
cmp x30, x8
ldr x8, [sp, #776] ; 8-byte Folded Reload
cinc x0, x8, lo
ldr x8, [sp, #808] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x27, x30
mrs x8, NZCV
str x8, [sp, #984] ; 8-byte Folded Spill
mov x8, x16
mov x16, x20
mul x20, x8, x20
adcs x27, x0, x11
adds x28, x3, x20
ldr x9, [sp, #800] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x25, x21
mrs x9, NZCV
str x9, [sp, #976] ; 8-byte Folded Spill
adcs x21, x28, x27
ldr x19, [sp, #1024] ; 8-byte Folded Reload
cmp x15, x19
ldr x9, [sp, #1096] ; 8-byte Folded Reload
cinc x28, x9, lo
ldr x9, [sp, #792] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #1040] ; 8-byte Folded Reload
adcs xzr, x9, x15
mrs x9, NZCV
str x9, [sp, #992] ; 8-byte Folded Spill
adcs x25, x28, x21
cmp x4, x6
ldr x9, [sp, #1128] ; 8-byte Folded Reload
ldr x15, [sp, #1064] ; 8-byte Folded Reload
mul x6, x9, x15
cset w30, lo
adds x17, x4, x6
umulh x2, x9, x15
str x2, [sp, #1080] ; 8-byte Folded Spill
adcs x30, x30, x2
ldr x9, [sp, #896] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #904] ; 8-byte Folded Reload
ldr x15, [sp, #824] ; 8-byte Folded Reload
adcs xzr, x9, x15
mrs x9, NZCV
str x9, [sp, #960] ; 8-byte Folded Spill
adcs x2, x17, x25
ldr x9, [sp, #1016] ; 8-byte Folded Reload
cmp x22, x9
ldr x6, [sp, #1104] ; 8-byte Folded Reload
cinc x4, x6, lo
ldr x9, [sp, #840] ; 8-byte Folded Reload
msr NZCV, x9
adcs x22, x4, x2
cmp x3, x23
umulh x23, x8, x16
mov x15, x8
cset w8, lo
cinc x9, x23, lo
cmn x3, x20
adcs x8, x8, x23
str x8, [sp, #1040] ; 8-byte Folded Spill
cmp x11, x5
cset w5, lo
adds x20, x3, x20
adcs x9, x5, x9
ldr x16, [sp, #1136] ; 8-byte Folded Reload
mul x3, x16, x12
adds x5, x10, x3
msr NZCV, x26
adcs xzr, x1, x14
mrs x14, NZCV
adcs x1, x5, x22
ldr x8, [sp, #968] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x2, x4
mrs x2, NZCV
str x2, [sp, #1096] ; 8-byte Folded Spill
mrs x23, NZCV
cmp x10, x24
cset w2, lo
adds x10, x10, x3
umulh x24, x16, x12
adcs x26, x2, x24
msr NZCV, x14
adcs xzr, x22, x10
mrs x10, NZCV
mrs x14, NZCV
cmp x13, x7
ldr x16, [sp, #1048] ; 8-byte Folded Reload
cinc x3, x16, lo
ldr x8, [sp, #1008] ; 8-byte Folded Reload
msr NZCV, x8
ldr x2, [sp, #1056] ; 8-byte Folded Reload
adcs xzr, x2, x13
mrs x13, NZCV
ldr x8, [sp, #1120] ; 8-byte Folded Reload
adcs x2, x3, x8
str x2, [sp, #1136] ; 8-byte Folded Spill
msr NZCV, x14
mrs x5, NZCV
cmp x3, x7
cinc x22, x16, lo
mov x2, x16
msr NZCV, x13
adcs xzr, x8, x3
mrs x13, NZCV
adcs x14, x22, x1
str x14, [sp, #1120] ; 8-byte Folded Spill
ldr x8, [sp, #984] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x11, x0
adcs x9, x9, xzr
ldr x8, [sp, #976] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x27, x20
adcs x9, x9, xzr
cmp x28, x19
ldr x11, [sp, #864] ; 8-byte Folded Reload
cinc x11, x11, lo
ldr x8, [sp, #992] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x21, x28
mrs x27, NZCV
ldr x3, [sp, #1064] ; 8-byte Folded Reload
mul x16, x15, x3
adcs x0, x11, x9
adds x19, x30, x16
ldr x8, [sp, #960] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x25, x17
mrs x14, NZCV
adcs x17, x19, x0
ldr x28, [sp, #1016] ; 8-byte Folded Reload
cmp x4, x28
cinc x4, x6, lo
msr NZCV, x23
adcs x19, x4, x17
cmp x26, x24
ldr x6, [sp, #1128] ; 8-byte Folded Reload
mul x20, x6, x12
cset w21, lo
adds x20, x26, x20
umulh x23, x6, x12
adcs x21, x21, x23
msr NZCV, x10
adcs x10, x20, x19
ldr x6, [sp, #1080] ; 8-byte Folded Reload
cmp x30, x6
umulh x6, x15, x3
cset w24, lo
cinc x25, x6, lo
cmn x30, x16
adcs x6, x24, x6
ldr x8, [sp, #1040] ; 8-byte Folded Reload
cmp x9, x8
cset w8, lo
adds x24, x30, x16
adcs x8, x8, x25
cmp x22, x7
cinc x25, x2, lo
msr NZCV, x13
adcs xzr, x1, x22
mrs x13, NZCV
adcs x16, x25, x10
msr NZCV, x27
adcs xzr, x9, x11
adcs x8, x8, xzr
msr NZCV, x14
adcs xzr, x0, x24
adcs x8, x8, xzr
cmp x8, x6
mul x9, x15, x12
umulh x11, x15, x12
cset w12, lo
cmp x21, x23
cset w15, lo
cinc x0, x11, lo
cmn x21, x9
adcs x11, x15, x11
adds x9, x21, x9
adcs x12, x12, x0
cmp x4, x28
ldr x15, [sp, #928] ; 8-byte Folded Reload
cinc x15, x15, lo
ldr x14, [sp, #1096] ; 8-byte Folded Reload
msr NZCV, x14
adcs xzr, x17, x4
mrs x17, NZCV
adcs x0, x15, x8
msr NZCV, x5
adcs xzr, x19, x20
mrs x14, NZCV
adcs x1, x9, x0
cmp x25, x7
cinc x4, x2, lo
msr NZCV, x13
adcs xzr, x10, x25
mrs x10, NZCV
adcs x13, x4, x1
msr NZCV, x17
adcs xzr, x8, x15
adcs x8, x12, xzr
msr NZCV, x14
adcs xzr, x0, x9
adcs x8, x8, xzr
cmp x8, x11
cset w9, lo
msr NZCV, x10
adcs xzr, x1, x4
mrs x10, NZCV
adcs x11, x8, xzr
adcs x12, x9, xzr
cmp x4, x7
ldr x14, [sp, #1000] ; 8-byte Folded Reload
cinc x14, x14, lo
adds x15, x11, x14
mov w25, #-1
ldp x6, x26, [sp, #496] ; 16-byte Folded Reload
cmp x26, x25
cset w17, lo
ldr x0, [sp, #752] ; 8-byte Folded Reload
sub x17, x0, x17
cmp x0, x17
mov x27, x0
cset w0, lo
ldr x1, [sp, #1088] ; 8-byte Folded Reload
sub x0, x1, x0
cmp x1, x0
cset w1, lo
cmp x6, x25
csetm x4, lo
mov x24, #-4294967295
add x5, x6, x24
mov x28, x6
sub x1, x5, x1
cmp x5, x1
cset w5, lo
sub x4, x4, x5
mov x5, #-65534
movk x5, #0, lsl #16
ldr x7, [sp, #696] ; 8-byte Folded Reload
mov w6, #-2
cmp x7, x6
csetm x6, lo
add x5, x7, x5
mov x30, x7
add x4, x4, x5
cmp x5, x4
cset w5, lo
sub x5, x6, x5
ldr x2, [sp, #1112] ; 8-byte Folded Reload
cmp x2, x25
csetm x6, lo
add x7, x2, x24
add x5, x5, x7
cmp x7, x5
cset w7, lo
sub x6, x6, x7
ldr x2, [sp, #1144] ; 8-byte Folded Reload
cmp x2, x25
add x7, x2, x24
add x6, x6, x7
csetm x19, lo
cmp x7, x6
cset w7, lo
sub x7, x19, x7
ldr x3, [sp, #1136] ; 8-byte Folded Reload
cmp x3, x25
add x19, x3, x24
add x7, x7, x19
csetm x20, lo
cmp x19, x7
cset w19, lo
sub x19, x20, x19
ldr x2, [sp, #1120] ; 8-byte Folded Reload
cmp x2, x25
add x20, x2, x24
add x19, x19, x20
csetm x21, lo
cmp x20, x19
cset w20, lo
sub x20, x21, x20
cmp x16, x25
add x21, x16, x24
add x20, x20, x21
csetm x22, lo
cmp x21, x20
cset w21, lo
sub x21, x22, x21
cmp x13, x25
add x22, x13, x24
add x21, x21, x22
csetm x23, lo
cmp x22, x21
add x22, x15, x24
cset w24, lo
cmp x15, x25
mov w25, #-1
sub x15, x23, x24
add x15, x15, x22
csetm x23, lo
cmp x22, x15
cset w22, lo
sub x22, x23, x22
adds x11, x11, x14
adcs x12, x22, x12
msr NZCV, x10
adcs xzr, x8, x14
adcs x8, x9, xzr
mov x9, #-4294967296
cmp x8, x12
csel x8, x25, x9, hs
csetm x9, lo
and x10, x26, x9
ldr x12, [sp, #488] ; 8-byte Folded Reload
and x12, x8, x12
orr x10, x12, x10
and x12, x27, x9
and x14, x8, x17
orr x12, x14, x12
ldr x14, [sp, #1088] ; 8-byte Folded Reload
and x14, x14, x9
and x17, x8, x0
orr x14, x17, x14
and x17, x28, x9
and x0, x8, x1
orr x17, x0, x17
and x0, x30, x9
and x1, x8, x4
orr x0, x1, x0
ldr x1, [sp, #712] ; 8-byte Folded Reload
str x10, [x1]
stur x12, [x1, #4]
ldr x10, [sp, #1112] ; 8-byte Folded Reload
and x10, x10, x9
and x12, x8, x5
orr x10, x12, x10
str x14, [x1, #8]
stur x17, [x1, #12]
ldr x12, [sp, #1144] ; 8-byte Folded Reload
and x12, x12, x9
and x14, x8, x6
orr x12, x14, x12
and x14, x3, x9
and x17, x8, x7
orr x14, x17, x14
str x0, [x1, #16]
stur x10, [x1, #20]
and x10, x2, x9
and x17, x8, x19
orr x10, x17, x10
and x16, x16, x9
and x17, x8, x20
orr x16, x17, x16
and x13, x13, x9
and x17, x8, x21
orr x13, x17, x13
and x9, x11, x9
str x12, [x1, #24]
stur x14, [x1, #28]
and x8, x8, x15
str x10, [x1, #32]
stur x16, [x1, #36]
orr x8, x8, x9
str x13, [x1, #40]
stur x8, [x1, #44]
add sp, sp, #1152
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _fiat_p384_square ; -- Begin function fiat_p384_square
.p2align 2
_fiat_p384_square: ; @fiat_p384_square
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #1104
.cfi_def_cfa_offset 1200
str x1, [sp, #648] ; 8-byte Folded Spill
str x0, [sp, #1040] ; 8-byte Folded Spill
ldp x9, x16, [x0]
str x9, [sp, #976] ; 8-byte Folded Spill
ldur x4, [x0, #4]
ldur x10, [x0, #12]
ldr x8, [x0, #16]
mul x15, x8, x9
str x15, [sp, #1088] ; 8-byte Folded Spill
mov x24, x8
mul x25, x10, x9
umulh x8, x10, x9
str x8, [sp, #1080] ; 8-byte Folded Spill
mov x22, x10
mul x0, x16, x9
umulh x3, x16, x9
mul x11, x4, x9
umulh x12, x4, x9
mul x10, x9, x9
umulh x13, x9, x9
adds x13, x11, x13
adcs x14, x0, x12
cmp x14, x0
cinc x27, x3, lo
adds x9, x27, x25
adcs x1, x15, x8
str x1, [sp, #920] ; 8-byte Folded Spill
lsl x15, x10, #32
sub x6, x15, x10
mov w8, #-1
umulh x8, x10, x8
cmp x10, x15
cset w15, hi
adds x17, x13, x15
adds x17, x17, x8
mov x21, x8
str x8, [sp, #832] ; 8-byte Folded Spill
cset w2, hs
cmn x13, x15
adcs x13, x14, x2
mrs x7, NZCV
adcs x19, x6, x9
mul x20, x16, x4
mul x14, x4, x4
umulh x15, x4, x4
mov x2, x4
adds x12, x14, x12
adcs x23, x20, x15
cmn x17, x11
adcs x26, x12, x13
adds x4, x17, x11
lsl x28, x4, #32
sub x5, x28, x4
str x5, [sp, #960] ; 8-byte Folded Spill
adcs xzr, x13, x12
mrs x12, NZCV
mov w8, #-2
mul x17, x10, x8
umulh x10, x10, x8
adcs x30, x23, x19
cmp x23, x20
str x2, [sp, #1016] ; 8-byte Folded Spill
mul x11, x24, x2
str x11, [sp, #1032] ; 8-byte Folded Spill
mov x14, x24
str x22, [sp, #1056] ; 8-byte Folded Spill
mul x13, x22, x2
str x13, [sp, #936] ; 8-byte Folded Spill
umulh x15, x22, x2
str x15, [sp, #968] ; 8-byte Folded Spill
umulh x8, x16, x2
cinc x2, x8, lo
str x2, [sp, #1096] ; 8-byte Folded Spill
adds x2, x2, x13
str x2, [sp, #1064] ; 8-byte Folded Spill
adcs x13, x11, x15
str x13, [sp, #1072] ; 8-byte Folded Spill
adds x21, x17, x21
str x6, [sp, #952] ; 8-byte Folded Spill
adcs x24, x6, x10
str x24, [sp, #904] ; 8-byte Folded Spill
msr NZCV, x7
adcs xzr, x9, x6
mrs x9, NZCV
str x9, [sp, #1000] ; 8-byte Folded Spill
adcs x22, x21, x1
msr NZCV, x12
adcs xzr, x19, x23
mrs x9, NZCV
str x9, [sp, #912] ; 8-byte Folded Spill
adcs x13, x2, x22
str x13, [sp, #1024] ; 8-byte Folded Spill
cmp x4, x28
mov w9, #-1
umulh x11, x4, x9
str x11, [sp, #848] ; 8-byte Folded Spill
cset w10, hi
adds x12, x26, x10
adds x12, x12, x11
cset w7, hs
cmn x26, x10
adcs x23, x30, x7
mrs x10, NZCV
str x10, [sp, #928] ; 8-byte Folded Spill
adcs x6, x5, x13
adds x10, x20, x3
mul x26, x16, x16
adcs x20, x26, x8
cmn x12, x0
adcs x3, x10, x23
adds x0, x12, x0
lsl x8, x0, #32
sub x17, x8, x0
str x17, [sp, #944] ; 8-byte Folded Spill
adcs xzr, x23, x10
mrs x1, NZCV
adcs x12, x20, x6
cmp x0, x8
umulh x9, x0, x9
str x9, [sp, #856] ; 8-byte Folded Spill
cset w8, hi
adds x23, x3, x8
adds x9, x23, x9
cset w11, hs
cmn x3, x8
adcs x5, x12, x11
mrs x8, NZCV
str x8, [sp, #864] ; 8-byte Folded Spill
mrs x13, NZCV
mov x7, x25
cmn x27, x25
ldr x8, [sp, #1088] ; 8-byte Folded Reload
ldr x2, [sp, #1080] ; 8-byte Folded Reload
adcs xzr, x2, x8
ldr x15, [sp, #976] ; 8-byte Folded Reload
umulh x8, x14, x15
str x8, [sp, #896] ; 8-byte Folded Spill
adcs x11, x8, xzr
cmp x11, x8
ldr x10, [sp, #1040] ; 8-byte Folded Reload
ldur x12, [x10, #20]
str x12, [sp, #1048] ; 8-byte Folded Spill
mul x8, x12, x15
str x8, [sp, #992] ; 8-byte Folded Spill
cset w27, lo
adds x30, x11, x8
umulh x15, x12, x15
adcs x19, x27, x15
str x15, [sp, #880] ; 8-byte Folded Spill
ldr x8, [sp, #1000] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #920] ; 8-byte Folded Reload
adcs xzr, x8, x21
mrs x8, NZCV
str x8, [sp, #872] ; 8-byte Folded Spill
adcs x27, x24, x30
ldr x8, [sp, #912] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #1064] ; 8-byte Folded Reload
adcs xzr, x22, x8
mrs x8, NZCV
str x8, [sp, #920] ; 8-byte Folded Spill
ldr x8, [sp, #1072] ; 8-byte Folded Reload
adcs x25, x8, x27
cmp x20, x26
mov w8, #-2
mul x22, x4, x8
umulh x8, x4, x8
str x16, [sp, #984] ; 8-byte Folded Spill
umulh x4, x16, x16
cinc x4, x4, lo
str x4, [sp, #1064] ; 8-byte Folded Spill
ldr x26, [sp, #936] ; 8-byte Folded Reload
adds x21, x26, x2
ldr x23, [sp, #1056] ; 8-byte Folded Reload
mul x3, x23, x16
str x3, [sp, #656] ; 8-byte Folded Spill
ldr x12, [sp, #968] ; 8-byte Folded Reload
adcs x28, x3, x12
cmn x9, x7
mul x11, x14, x16
str x11, [sp, #624] ; 8-byte Folded Spill
mov x2, x14
umulh x14, x23, x16
str x14, [sp, #816] ; 8-byte Folded Spill
adcs x16, x21, x5
str x16, [sp, #888] ; 8-byte Folded Spill
adds x23, x4, x3
adcs x16, x11, x14
str x16, [sp, #1000] ; 8-byte Folded Spill
ldr x11, [sp, #848] ; 8-byte Folded Reload
adds x3, x22, x11
ldr x11, [sp, #960] ; 8-byte Folded Reload
adcs x22, x11, x8
str x22, [sp, #664] ; 8-byte Folded Spill
ldr x8, [sp, #928] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #1024] ; 8-byte Folded Reload
adcs xzr, x8, x11
mrs x8, NZCV
str x8, [sp, #912] ; 8-byte Folded Spill
adcs x14, x3, x25
msr NZCV, x1
adcs xzr, x6, x20
mrs x8, NZCV
str x8, [sp, #800] ; 8-byte Folded Spill
adcs x20, x23, x14
msr NZCV, x13
adcs x13, x17, x20
adds x24, x9, x7
lsl x6, x24, #32
sub x4, x6, x24
str x4, [sp, #1024] ; 8-byte Folded Spill
adcs xzr, x5, x21
mrs x17, NZCV
adcs x21, x28, x13
ldr x8, [sp, #1096] ; 8-byte Folded Reload
cmn x8, x26
ldr x16, [sp, #1032] ; 8-byte Folded Reload
adcs xzr, x12, x16
mov x7, x2
ldr x9, [sp, #1016] ; 8-byte Folded Reload
umulh x2, x2, x9
adcs x11, x2, xzr
cmp x19, x15
ldr x5, [x10, #24]
str x5, [sp, #1080] ; 8-byte Folded Spill
ldr x15, [sp, #976] ; 8-byte Folded Reload
mul x10, x5, x15
str x10, [sp, #808] ; 8-byte Folded Spill
cset w12, lo
adds x8, x19, x10
str x8, [sp, #752] ; 8-byte Folded Spill
umulh x10, x5, x15
str x10, [sp, #1096] ; 8-byte Folded Spill
adcs x10, x12, x10
str x10, [sp, #720] ; 8-byte Folded Spill
ldr x10, [sp, #952] ; 8-byte Folded Reload
ldr x1, [sp, #904] ; 8-byte Folded Reload
cmp x1, x10
ldr x12, [sp, #832] ; 8-byte Folded Reload
add x10, x10, x12
str x10, [sp, #928] ; 8-byte Folded Spill
cinc x26, x10, lo
ldr x10, [sp, #872] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x30, x1
mrs x10, NZCV
str x10, [sp, #704] ; 8-byte Folded Spill
adcs x1, x26, x8
str x1, [sp, #824] ; 8-byte Folded Spill
cmp x11, x2
mov x10, x9
ldr x8, [sp, #1048] ; 8-byte Folded Reload
mul x9, x8, x9
str x9, [sp, #840] ; 8-byte Folded Spill
cset w15, lo
adds x9, x11, x9
str x9, [sp, #776] ; 8-byte Folded Spill
umulh x30, x8, x10
adcs x8, x15, x30
str x8, [sp, #768] ; 8-byte Folded Spill
ldr x8, [sp, #920] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #1072] ; 8-byte Folded Reload
adcs xzr, x27, x8
mrs x8, NZCV
str x8, [sp, #712] ; 8-byte Folded Spill
adcs x8, x9, x1
str x8, [sp, #792] ; 8-byte Folded Spill
ldr x9, [sp, #912] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x25, x3
mrs x9, NZCV
str x9, [sp, #744] ; 8-byte Folded Spill
adcs x9, x22, x8
str x9, [sp, #784] ; 8-byte Folded Spill
ldr x8, [sp, #800] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x14, x23
mrs x8, NZCV
str x8, [sp, #736] ; 8-byte Folded Spill
mov w8, #-2
mul x11, x0, x8
umulh x14, x0, x8
ldr x8, [sp, #1000] ; 8-byte Folded Reload
adcs x0, x8, x9
str x0, [sp, #696] ; 8-byte Folded Spill
ldr x19, [sp, #656] ; 8-byte Folded Reload
cmp x28, x19
ldr x9, [sp, #1056] ; 8-byte Folded Reload
mul x8, x7, x9
str x8, [sp, #904] ; 8-byte Folded Spill
mov x22, x7
mul x10, x9, x9
str x10, [sp, #920] ; 8-byte Folded Spill
umulh x12, x9, x9
str x12, [sp, #912] ; 8-byte Folded Spill
ldr x1, [sp, #816] ; 8-byte Folded Reload
cinc x15, x1, lo
str x15, [sp, #872] ; 8-byte Folded Spill
adds x9, x15, x10
str x9, [sp, #728] ; 8-byte Folded Spill
adcs x10, x8, x12
str x10, [sp, #968] ; 8-byte Folded Spill
ldr x8, [sp, #856] ; 8-byte Folded Reload
adds x11, x11, x8
str x11, [sp, #640] ; 8-byte Folded Spill
ldr x8, [sp, #944] ; 8-byte Folded Reload
adcs x10, x8, x14
str x10, [sp, #936] ; 8-byte Folded Spill
ldr x10, [sp, #864] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x20, x8
mrs x8, NZCV
str x8, [sp, #680] ; 8-byte Folded Spill
adcs x8, x11, x0
msr NZCV, x17
adcs xzr, x13, x28
mrs x10, NZCV
str x10, [sp, #672] ; 8-byte Folded Spill
adcs x9, x9, x8
str x9, [sp, #800] ; 8-byte Folded Spill
cmp x24, x6
mov w12, #-1
umulh x13, x24, x12
str x13, [sp, #864] ; 8-byte Folded Spill
cset w10, hi
ldr x14, [sp, #888] ; 8-byte Folded Reload
adds x11, x14, x10
adds x11, x11, x13
cset w13, hs
cmn x14, x10
adcs x13, x21, x13
mrs x10, NZCV
str x10, [sp, #688] ; 8-byte Folded Spill
adcs x14, x4, x9
str x14, [sp, #592] ; 8-byte Folded Spill
ldr x9, [sp, #896] ; 8-byte Folded Reload
adds x15, x16, x9
ldr x16, [sp, #624] ; 8-byte Folded Reload
adcs x27, x16, x2
ldr x9, [sp, #1088] ; 8-byte Folded Reload
cmn x11, x9
adcs x10, x15, x13
adds x5, x11, x9
lsl x2, x5, #32
sub x28, x2, x5
str x28, [sp, #1032] ; 8-byte Folded Spill
adcs xzr, x13, x15
mrs x3, NZCV
adcs x9, x27, x14
cmp x5, x2
umulh x11, x5, x12
str x11, [sp, #888] ; 8-byte Folded Spill
cset w13, hi
adds x14, x10, x13
adds x14, x14, x11
cset w11, hs
cmn x10, x13
adcs x23, x9, x11
mrs x9, NZCV
str x9, [sp, #760] ; 8-byte Folded Spill
mrs x0, NZCV
ldr x9, [sp, #1064] ; 8-byte Folded Reload
cmn x9, x19
adcs xzr, x1, x16
mov x20, x16
ldr x12, [sp, #984] ; 8-byte Folded Reload
umulh x10, x7, x12
adcs x9, x10, xzr
ldr x11, [sp, #1096] ; 8-byte Folded Reload
ldr x16, [sp, #720] ; 8-byte Folded Reload
cmp x16, x11
ldr x17, [sp, #1040] ; 8-byte Folded Reload
ldur x13, [x17, #28]
str x13, [sp, #1072] ; 8-byte Folded Spill
ldr x1, [sp, #976] ; 8-byte Folded Reload
mul x15, x13, x1
str x15, [sp, #816] ; 8-byte Folded Spill
cset w11, lo
adds x16, x16, x15
str x16, [sp, #656] ; 8-byte Folded Spill
umulh x15, x13, x1
str x15, [sp, #1064] ; 8-byte Folded Spill
adcs x11, x11, x15
str x11, [sp, #720] ; 8-byte Folded Spill
ldr x19, [sp, #952] ; 8-byte Folded Reload
cmp x26, x19
ldr x1, [sp, #928] ; 8-byte Folded Reload
cinc x2, x1, lo
ldr x11, [sp, #704] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #752] ; 8-byte Folded Reload
adcs xzr, x11, x26
mrs x11, NZCV
str x11, [sp, #600] ; 8-byte Folded Spill
adcs x7, x2, x16
str x7, [sp, #568] ; 8-byte Folded Spill
ldr x21, [sp, #768] ; 8-byte Folded Reload
cmp x21, x30
cset w15, lo
cmp x9, x10
ldr x11, [sp, #1048] ; 8-byte Folded Reload
mul x26, x11, x12
cset w16, lo
adds x4, x9, x26
str x4, [sp, #632] ; 8-byte Folded Spill
umulh x6, x11, x12
adcs x9, x16, x6
str x9, [sp, #616] ; 8-byte Folded Spill
ldr x12, [sp, #1080] ; 8-byte Folded Reload
ldr x9, [sp, #1016] ; 8-byte Folded Reload
mul x11, x12, x9
str x11, [sp, #752] ; 8-byte Folded Spill
adds x13, x21, x11
str x13, [sp, #584] ; 8-byte Folded Spill
umulh x11, x12, x9
str x11, [sp, #608] ; 8-byte Folded Spill
adcs x16, x15, x11
ldr x9, [sp, #712] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #824] ; 8-byte Folded Reload
ldr x11, [sp, #776] ; 8-byte Folded Reload
adcs xzr, x9, x11
mrs x9, NZCV
str x9, [sp, #560] ; 8-byte Folded Spill
adcs x12, x13, x7
str x12, [sp, #704] ; 8-byte Folded Spill
ldr x9, [sp, #960] ; 8-byte Folded Reload
ldr x11, [sp, #664] ; 8-byte Folded Reload
cmp x11, x9
ldr x7, [sp, #848] ; 8-byte Folded Reload
add x13, x9, x7
str x13, [sp, #896] ; 8-byte Folded Spill
cinc x13, x13, lo
ldr x9, [sp, #744] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #792] ; 8-byte Folded Reload
adcs xzr, x9, x11
mrs x9, NZCV
str x9, [sp, #792] ; 8-byte Folded Spill
adcs x12, x13, x12
str x12, [sp, #576] ; 8-byte Folded Spill
ldr x9, [sp, #736] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #784] ; 8-byte Folded Reload
ldr x11, [sp, #1000] ; 8-byte Folded Reload
adcs xzr, x9, x11
mrs x9, NZCV
str x9, [sp, #472] ; 8-byte Folded Spill
adcs x12, x4, x12
str x12, [sp, #520] ; 8-byte Folded Spill
ldr x9, [sp, #680] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #696] ; 8-byte Folded Reload
ldr x11, [sp, #640] ; 8-byte Folded Reload
adcs xzr, x9, x11
mrs x9, NZCV
str x9, [sp, #496] ; 8-byte Folded Spill
ldr x9, [sp, #936] ; 8-byte Folded Reload
adcs x11, x9, x12
str x11, [sp, #512] ; 8-byte Folded Spill
ldr x9, [sp, #672] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #728] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #480] ; 8-byte Folded Spill
ldr x8, [sp, #968] ; 8-byte Folded Reload
adcs x11, x8, x11
str x11, [sp, #488] ; 8-byte Folded Spill
cmp x27, x20
cinc x9, x10, lo
str x9, [sp, #784] ; 8-byte Folded Spill
ldr x8, [sp, #880] ; 8-byte Folded Reload
ldr x10, [sp, #840] ; 8-byte Folded Reload
adds x15, x10, x8
mov w8, #-2
mul x4, x24, x8
umulh x24, x24, x8
adcs x21, x26, x30
ldr x30, [sp, #992] ; 8-byte Folded Reload
cmn x14, x30
str x22, [sp, #1008] ; 8-byte Folded Spill
ldr x20, [sp, #1056] ; 8-byte Folded Reload
umulh x25, x22, x20
mul x22, x22, x22
str x22, [sp, #776] ; 8-byte Folded Spill
adcs x12, x15, x23
str x12, [sp, #664] ; 8-byte Folded Spill
ldr x12, [sp, #904] ; 8-byte Folded Reload
adds x10, x9, x12
adcs x7, x22, x25
str x7, [sp, #1000] ; 8-byte Folded Spill
ldr x7, [sp, #864] ; 8-byte Folded Reload
adds x9, x4, x7
str x9, [sp, #696] ; 8-byte Folded Spill
ldr x4, [sp, #1024] ; 8-byte Folded Reload
adcs x8, x4, x24
str x8, [sp, #880] ; 8-byte Folded Spill
ldr x8, [sp, #688] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #800] ; 8-byte Folded Reload
adcs xzr, x8, x4
mrs x8, NZCV
str x8, [sp, #464] ; 8-byte Folded Spill
adcs x24, x9, x11
msr NZCV, x3
ldr x8, [sp, #592] ; 8-byte Folded Reload
adcs xzr, x8, x27
mrs x8, NZCV
str x8, [sp, #456] ; 8-byte Folded Spill
adcs x8, x10, x24
str x8, [sp, #736] ; 8-byte Folded Spill
msr NZCV, x0
adcs x28, x28, x8
adds x22, x14, x30
lsl x0, x22, #32
sub x30, x0, x22
str x30, [sp, #992] ; 8-byte Folded Spill
adcs xzr, x23, x15
mrs x4, NZCV
adcs x9, x21, x28
str x9, [sp, #448] ; 8-byte Folded Spill
ldr x9, [sp, #920] ; 8-byte Folded Reload
ldr x11, [sp, #872] ; 8-byte Folded Reload
cmn x11, x9
ldr x9, [sp, #912] ; 8-byte Folded Reload
adcs xzr, x9, x12
mov x23, x25
adcs x14, x25, xzr
ldr x9, [sp, #1064] ; 8-byte Folded Reload
ldr x8, [sp, #720] ; 8-byte Folded Reload
cmp x8, x9
ldr x9, [x17, #32]
str x9, [sp, #1088] ; 8-byte Folded Spill
ldr x25, [sp, #976] ; 8-byte Folded Reload
mul x17, x9, x25
str x17, [sp, #592] ; 8-byte Folded Spill
cset w3, lo
adds x8, x8, x17
str x8, [sp, #728] ; 8-byte Folded Spill
umulh x17, x9, x25
str x17, [sp, #744] ; 8-byte Folded Spill
adcs x9, x3, x17
str x9, [sp, #680] ; 8-byte Folded Spill
cmp x2, x19
cinc x3, x1, lo
ldr x9, [sp, #600] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #656] ; 8-byte Folded Reload
adcs xzr, x9, x2
mrs x9, NZCV
str x9, [sp, #712] ; 8-byte Folded Spill
adcs x17, x3, x8
str x17, [sp, #672] ; 8-byte Folded Spill
ldr x7, [sp, #608] ; 8-byte Folded Reload
cmp x16, x7
ldr x8, [sp, #1072] ; 8-byte Folded Reload
ldr x12, [sp, #1016] ; 8-byte Folded Reload
mul x9, x8, x12
str x9, [sp, #768] ; 8-byte Folded Spill
cset w11, lo
adds x15, x16, x9
str x15, [sp, #624] ; 8-byte Folded Spill
umulh x9, x8, x12
str x9, [sp, #552] ; 8-byte Folded Spill
adcs x8, x11, x9
str x8, [sp, #600] ; 8-byte Folded Spill
ldr x8, [sp, #560] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #584] ; 8-byte Folded Reload
ldr x9, [sp, #568] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #560] ; 8-byte Folded Spill
adcs x11, x15, x17
str x11, [sp, #584] ; 8-byte Folded Spill
ldr x27, [sp, #960] ; 8-byte Folded Reload
cmp x13, x27
ldr x9, [sp, #896] ; 8-byte Folded Reload
cinc x25, x9, lo
ldr x8, [sp, #792] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #704] ; 8-byte Folded Reload
adcs xzr, x8, x13
mrs x8, NZCV
str x8, [sp, #536] ; 8-byte Folded Spill
adcs x8, x25, x11
str x8, [sp, #544] ; 8-byte Folded Spill
ldr x2, [sp, #616] ; 8-byte Folded Reload
cmp x2, x6
cset w9, lo
cmp x14, x23
mov x19, x23
cset w11, lo
cmp x21, x26
mov w16, #-2
mul x15, x5, x16
umulh x16, x5, x16
cinc x17, x6, lo
str x17, [sp, #800] ; 8-byte Folded Spill
ldr x23, [sp, #1048] ; 8-byte Folded Reload
mul x1, x23, x20
umulh x5, x23, x20
ldr x26, [sp, #1008] ; 8-byte Folded Reload
mul x13, x23, x26
str x13, [sp, #840] ; 8-byte Folded Spill
str x1, [sp, #792] ; 8-byte Folded Spill
adds x17, x17, x1
str x5, [sp, #920] ; 8-byte Folded Spill
adcs x12, x13, x5
str x12, [sp, #704] ; 8-byte Folded Spill
ldr x13, [sp, #888] ; 8-byte Folded Reload
adds x23, x15, x13
str x23, [sp, #640] ; 8-byte Folded Spill
ldr x15, [sp, #1032] ; 8-byte Folded Reload
adcs x13, x15, x16
str x13, [sp, #872] ; 8-byte Folded Spill
adds x20, x14, x1
str x20, [sp, #528] ; 8-byte Folded Spill
adcs x11, x11, x5
str x11, [sp, #688] ; 8-byte Folded Spill
ldr x13, [sp, #1080] ; 8-byte Folded Reload
ldr x16, [sp, #984] ; 8-byte Folded Reload
mul x12, x13, x16
adds x14, x2, x12
str x14, [sp, #504] ; 8-byte Folded Spill
mov x1, x12
str x12, [sp, #568] ; 8-byte Folded Spill
umulh x11, x13, x16
adcs x13, x9, x11
str x11, [sp, #352] ; 8-byte Folded Spill
ldr x9, [sp, #472] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #632] ; 8-byte Folded Reload
ldr x12, [sp, #576] ; 8-byte Folded Reload
adcs xzr, x12, x9
mrs x9, NZCV
adcs x12, x14, x8
str x12, [sp, #720] ; 8-byte Folded Spill
ldr x8, [sp, #944] ; 8-byte Folded Reload
ldr x16, [sp, #936] ; 8-byte Folded Reload
cmp x16, x8
ldr x14, [sp, #856] ; 8-byte Folded Reload
add x14, x8, x14
str x14, [sp, #912] ; 8-byte Folded Spill
cinc x6, x14, lo
ldr x8, [sp, #496] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #520] ; 8-byte Folded Reload
adcs xzr, x8, x16
mrs x8, NZCV
stp x8, x9, [sp, #432] ; 16-byte Folded Spill
adcs x9, x6, x12
str x9, [sp, #632] ; 8-byte Folded Spill
ldp x8, x12, [sp, #480] ; 16-byte Folded Reload
msr NZCV, x8
ldr x16, [sp, #968] ; 8-byte Folded Reload
ldr x8, [sp, #512] ; 8-byte Folded Reload
adcs xzr, x8, x16
mrs x8, NZCV
str x8, [sp, #424] ; 8-byte Folded Spill
adcs x8, x20, x9
str x8, [sp, #496] ; 8-byte Folded Spill
ldr x9, [sp, #464] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #696] ; 8-byte Folded Reload
adcs xzr, x12, x9
mrs x9, NZCV
str x9, [sp, #416] ; 8-byte Folded Spill
ldr x16, [sp, #880] ; 8-byte Folded Reload
adcs x8, x16, x8
ldr x9, [sp, #456] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x24, x10
mrs x9, NZCV
str x9, [sp, #408] ; 8-byte Folded Spill
ldr x16, [sp, #1000] ; 8-byte Folded Reload
adcs x10, x16, x8
stp x10, x8, [sp, #480] ; 16-byte Folded Spill
ldr x9, [sp, #760] ; 8-byte Folded Reload
msr NZCV, x9
ldr x8, [sp, #736] ; 8-byte Folded Reload
adcs xzr, x8, x15
mrs x8, NZCV
str x8, [sp, #392] ; 8-byte Folded Spill
adcs x8, x23, x10
msr NZCV, x4
adcs xzr, x28, x21
mrs x10, NZCV
adcs x14, x17, x8
str x14, [sp, #520] ; 8-byte Folded Spill
msr NZCV, x10
adcs xzr, x8, x17
mrs x8, NZCV
str x8, [sp, #472] ; 8-byte Folded Spill
str x8, [sp, #360] ; 8-byte Folded Spill
str x22, [sp, #824] ; 8-byte Folded Spill
cmp x22, x0
mov w8, #-1
umulh x9, x22, x8
str x9, [sp, #936] ; 8-byte Folded Spill
cset w8, hi
ldr x12, [sp, #664] ; 8-byte Folded Reload
adds x10, x12, x8
adds x4, x10, x9
cset w10, hs
cmn x12, x8
ldr x8, [sp, #448] ; 8-byte Folded Reload
adcs x8, x8, x10
mrs x9, NZCV
adcs x14, x30, x14
stp x9, x14, [sp, #368] ; 16-byte Folded Spill
ldr x10, [sp, #1096] ; 8-byte Folded Reload
ldr x9, [sp, #752] ; 8-byte Folded Reload
adds x17, x9, x10
adcs x7, x1, x7
ldr x12, [sp, #808] ; 8-byte Folded Reload
cmn x4, x12
adcs x9, x17, x8
str x9, [sp, #400] ; 8-byte Folded Spill
adds x21, x4, x12
lsl x20, x21, #32
sub x2, x20, x21
str x2, [sp, #968] ; 8-byte Folded Spill
adcs xzr, x8, x17
mrs x23, NZCV
adcs x28, x7, x14
ldr x8, [sp, #784] ; 8-byte Folded Reload
ldr x9, [sp, #904] ; 8-byte Folded Reload
cmn x8, x9
ldr x8, [sp, #776] ; 8-byte Folded Reload
adcs xzr, x19, x8
ldr x8, [sp, #1040] ; 8-byte Folded Reload
ldur x0, [x8, #36]
ldr x16, [sp, #976] ; 8-byte Folded Reload
mul x22, x0, x16
str x0, [sp, #1096] ; 8-byte Folded Spill
umulh x17, x26, x26
adcs x4, x17, xzr
ldr x15, [sp, #680] ; 8-byte Folded Reload
adds x19, x15, x22
mov x30, x22
str x22, [sp, #616] ; 8-byte Folded Spill
ldr x5, [sp, #952] ; 8-byte Folded Reload
cmp x3, x5
ldr x22, [sp, #928] ; 8-byte Folded Reload
cinc x9, x22, lo
str x9, [sp, #384] ; 8-byte Folded Spill
ldr x8, [sp, #712] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #728] ; 8-byte Folded Reload
adcs xzr, x8, x3
mrs x8, NZCV
str x8, [sp, #464] ; 8-byte Folded Spill
adcs x9, x9, x19
str x9, [sp, #696] ; 8-byte Folded Spill
ldr x5, [sp, #552] ; 8-byte Folded Reload
ldr x12, [sp, #600] ; 8-byte Folded Reload
cmp x12, x5
cset w3, lo
cmp x13, x11
cset w19, lo
ldr x8, [sp, #920] ; 8-byte Folded Reload
ldr x11, [sp, #688] ; 8-byte Folded Reload
cmp x11, x8
cset w24, lo
cmp x4, x17
cset w17, lo
ldr x8, [sp, #840] ; 8-byte Folded Reload
adds x14, x4, x8
str x14, [sp, #736] ; 8-byte Folded Spill
ldr x8, [sp, #1048] ; 8-byte Folded Reload
umulh x4, x8, x26
adcs x8, x17, x4
str x8, [sp, #760] ; 8-byte Folded Spill
ldr x17, [sp, #1080] ; 8-byte Folded Reload
ldr x8, [sp, #1056] ; 8-byte Folded Reload
mul x26, x17, x8
adds x10, x11, x26
str x10, [sp, #728] ; 8-byte Folded Spill
str x26, [sp, #784] ; 8-byte Folded Spill
umulh x8, x17, x8
str x8, [sp, #712] ; 8-byte Folded Spill
adcs x8, x24, x8
str x8, [sp, #688] ; 8-byte Folded Spill
ldr x11, [sp, #984] ; 8-byte Folded Reload
ldr x17, [sp, #1072] ; 8-byte Folded Reload
mul x1, x17, x11
adds x24, x13, x1
str x24, [sp, #656] ; 8-byte Folded Spill
str x1, [sp, #752] ; 8-byte Folded Spill
umulh x8, x17, x11
adcs x11, x19, x8
str x11, [sp, #608] ; 8-byte Folded Spill
mov x19, x8
ldr x11, [sp, #1016] ; 8-byte Folded Reload
ldr x13, [sp, #1088] ; 8-byte Folded Reload
mul x8, x13, x11
str x8, [sp, #344] ; 8-byte Folded Spill
adds x17, x12, x8
str x17, [sp, #576] ; 8-byte Folded Spill
umulh x13, x13, x11
adcs x11, x3, x13
str x11, [sp, #600] ; 8-byte Folded Spill
str x13, [sp, #216] ; 8-byte Folded Spill
ldr x8, [sp, #560] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #672] ; 8-byte Folded Reload
ldr x11, [sp, #624] ; 8-byte Folded Reload
adcs xzr, x8, x11
mrs x8, NZCV
str x8, [sp, #456] ; 8-byte Folded Spill
adcs x8, x17, x9
str x8, [sp, #560] ; 8-byte Folded Spill
cmp x25, x27
ldr x3, [sp, #896] ; 8-byte Folded Reload
cinc x27, x3, lo
ldr x9, [sp, #536] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #584] ; 8-byte Folded Reload
adcs xzr, x9, x25
mrs x9, NZCV
str x9, [sp, #448] ; 8-byte Folded Spill
adcs x8, x27, x8
str x8, [sp, #512] ; 8-byte Folded Spill
ldr x9, [sp, #440] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #544] ; 8-byte Folded Reload
ldr x11, [sp, #504] ; 8-byte Folded Reload
adcs xzr, x9, x11
mrs x9, NZCV
str x9, [sp, #440] ; 8-byte Folded Spill
adcs x9, x24, x8
str x9, [sp, #544] ; 8-byte Folded Spill
ldr x24, [sp, #944] ; 8-byte Folded Reload
cmp x6, x24
ldr x11, [sp, #912] ; 8-byte Folded Reload
cinc x8, x11, lo
str x8, [sp, #672] ; 8-byte Folded Spill
ldr x11, [sp, #432] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #720] ; 8-byte Folded Reload
adcs xzr, x11, x6
mrs x11, NZCV
str x11, [sp, #432] ; 8-byte Folded Spill
adcs x8, x8, x9
str x8, [sp, #504] ; 8-byte Folded Spill
ldr x9, [sp, #424] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #632] ; 8-byte Folded Reload
ldr x11, [sp, #528] ; 8-byte Folded Reload
adcs xzr, x9, x11
mrs x9, NZCV
str x9, [sp, #424] ; 8-byte Folded Spill
adcs x10, x10, x8
str x10, [sp, #536] ; 8-byte Folded Spill
ldr x8, [sp, #1024] ; 8-byte Folded Reload
ldr x9, [sp, #880] ; 8-byte Folded Reload
cmp x9, x8
ldr x11, [sp, #864] ; 8-byte Folded Reload
add x8, x8, x11
str x8, [sp, #904] ; 8-byte Folded Spill
cinc x8, x8, lo
str x8, [sp, #720] ; 8-byte Folded Spill
ldr x11, [sp, #416] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #496] ; 8-byte Folded Reload
adcs xzr, x11, x9
mrs x9, NZCV
str x9, [sp, #416] ; 8-byte Folded Spill
adcs x9, x8, x10
str x9, [sp, #496] ; 8-byte Folded Spill
ldr x8, [sp, #408] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #1000] ; 8-byte Folded Reload
ldr x10, [sp, #488] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #408] ; 8-byte Folded Spill
adcs x8, x14, x9
str x8, [sp, #528] ; 8-byte Folded Spill
ldr x9, [sp, #392] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #640] ; 8-byte Folded Reload
ldr x10, [sp, #480] ; 8-byte Folded Reload
adcs xzr, x10, x9
mrs x9, NZCV
str x9, [sp, #488] ; 8-byte Folded Spill
ldr x9, [sp, #872] ; 8-byte Folded Reload
adcs x9, x9, x8
ldr x8, [sp, #360] ; 8-byte Folded Reload
msr NZCV, x8
ldr x6, [sp, #704] ; 8-byte Folded Reload
adcs x10, x6, x9
str x10, [sp, #632] ; 8-byte Folded Spill
ldr x11, [sp, #744] ; 8-byte Folded Reload
cmp x15, x11
cset w11, lo
adds x30, x15, x30
mov x17, x16
umulh x14, x0, x16
str x14, [sp, #808] ; 8-byte Folded Spill
adcs x12, x11, x14
ldr x8, [sp, #472] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x9, x6
mrs x8, NZCV
str x8, [sp, #624] ; 8-byte Folded Spill
str x8, [sp, #392] ; 8-byte Folded Spill
ldr x9, [sp, #824] ; 8-byte Folded Reload
mov w8, #-2
mul x8, x9, x8
str x8, [sp, #472] ; 8-byte Folded Spill
ldr x9, [sp, #936] ; 8-byte Folded Reload
adds x9, x8, x9
ldr x8, [sp, #368] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #992] ; 8-byte Folded Reload
ldr x11, [sp, #520] ; 8-byte Folded Reload
adcs xzr, x11, x8
mrs x8, NZCV
str x8, [sp, #520] ; 8-byte Folded Spill
adcs x8, x9, x10
str x8, [sp, #584] ; 8-byte Folded Spill
ldr x9, [sp, #568] ; 8-byte Folded Reload
cmp x7, x9
ldr x9, [sp, #352] ; 8-byte Folded Reload
cinc x9, x9, lo
str x9, [sp, #776] ; 8-byte Folded Spill
adds x9, x9, x26
msr NZCV, x23
ldr x10, [sp, #376] ; 8-byte Folded Reload
adcs xzr, x10, x7
mrs x10, NZCV
str x10, [sp, #480] ; 8-byte Folded Spill
adcs x8, x9, x8
str x8, [sp, #568] ; 8-byte Folded Spill
str x21, [sp, #664] ; 8-byte Folded Spill
cmp x21, x20
mov w9, #-1
umulh x11, x21, x9
str x11, [sp, #880] ; 8-byte Folded Spill
cset w9, hi
ldr x14, [sp, #400] ; 8-byte Folded Reload
adds x10, x14, x9
adds x10, x10, x11
cset w11, hs
cmn x14, x9
adcs x14, x28, x11
mrs x9, NZCV
str x9, [sp, #400] ; 8-byte Folded Spill
adcs x16, x2, x8
ldr x8, [sp, #1064] ; 8-byte Folded Reload
ldr x9, [sp, #768] ; 8-byte Folded Reload
adds x15, x9, x8
adcs x9, x1, x5
stp x9, x16, [sp, #368] ; 16-byte Folded Spill
ldr x8, [sp, #816] ; 8-byte Folded Reload
cmn x10, x8
adcs x11, x15, x14
str x11, [sp, #552] ; 8-byte Folded Spill
adds x8, x10, x8
str x8, [sp, #680] ; 8-byte Folded Spill
lsl x7, x8, #32
sub x8, x7, x8
str x8, [sp, #1000] ; 8-byte Folded Spill
adcs xzr, x14, x15
mrs x8, NZCV
str x8, [sp, #352] ; 8-byte Folded Spill
adcs x8, x9, x16
str x8, [sp, #360] ; 8-byte Folded Spill
ldr x8, [sp, #800] ; 8-byte Folded Reload
ldr x9, [sp, #792] ; 8-byte Folded Reload
cmn x8, x9
ldr x8, [sp, #920] ; 8-byte Folded Reload
ldr x9, [sp, #840] ; 8-byte Folded Reload
adcs xzr, x8, x9
ldr x8, [sp, #1040] ; 8-byte Folded Reload
ldr x9, [x8, #40]
mov x25, x17
mul x23, x9, x17
mov x28, x9
str x9, [sp, #1064] ; 8-byte Folded Spill
adcs x9, x4, xzr
mov x21, x12
adds x11, x12, x23
str x23, [sp, #640] ; 8-byte Folded Spill
ldr x2, [sp, #952] ; 8-byte Folded Reload
ldr x10, [sp, #384] ; 8-byte Folded Reload
cmp x10, x2
cinc x17, x22, lo
ldr x8, [sp, #464] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x30, x10
mrs x8, NZCV
str x8, [sp, #336] ; 8-byte Folded Spill
adcs x20, x17, x11
str x20, [sp, #312] ; 8-byte Folded Spill
ldr x6, [sp, #600] ; 8-byte Folded Reload
cmp x6, x13
cset w10, lo
ldr x5, [sp, #608] ; 8-byte Folded Reload
cmp x5, x19
cset w15, lo
ldr x16, [sp, #712] ; 8-byte Folded Reload
ldr x1, [sp, #688] ; 8-byte Folded Reload
cmp x1, x16
cset w14, lo
ldr x0, [sp, #760] ; 8-byte Folded Reload
cmp x0, x4
cset w13, lo
cmp x9, x4
ldr x8, [sp, #1048] ; 8-byte Folded Reload
mul x11, x8, x8
cset w12, lo
adds x11, x9, x11
umulh x30, x8, x8
adcs x8, x12, x30
str x8, [sp, #792] ; 8-byte Folded Spill
ldr x8, [sp, #1080] ; 8-byte Folded Reload
ldr x9, [sp, #1008] ; 8-byte Folded Reload
mul x12, x8, x9
adds x4, x0, x12
str x4, [sp, #296] ; 8-byte Folded Spill
mov x0, x12
str x12, [sp, #464] ; 8-byte Folded Spill
umulh x12, x8, x9
str x12, [sp, #840] ; 8-byte Folded Spill
adcs x8, x13, x12
str x8, [sp, #800] ; 8-byte Folded Spill
ldr x8, [sp, #1056] ; 8-byte Folded Reload
ldr x9, [sp, #1072] ; 8-byte Folded Reload
mul x12, x9, x8
str x12, [sp, #704] ; 8-byte Folded Spill
adds x13, x1, x12
umulh x8, x9, x8
str x8, [sp, #688] ; 8-byte Folded Spill
adcs x8, x14, x8
str x8, [sp, #816] ; 8-byte Folded Spill
ldr x8, [sp, #984] ; 8-byte Folded Reload
ldr x12, [sp, #1088] ; 8-byte Folded Reload
mul x9, x12, x8
str x9, [sp, #768] ; 8-byte Folded Spill
adds x14, x5, x9
stp x14, x13, [sp, #264] ; 16-byte Folded Spill
umulh x9, x12, x8
str x9, [sp, #760] ; 8-byte Folded Spill
adcs x8, x15, x9
str x8, [sp, #328] ; 8-byte Folded Spill
ldr x12, [sp, #1016] ; 8-byte Folded Reload
ldr x8, [sp, #1096] ; 8-byte Folded Reload
mul x9, x8, x12
str x9, [sp, #608] ; 8-byte Folded Spill
adds x15, x6, x9
str x15, [sp, #224] ; 8-byte Folded Spill
umulh x6, x8, x12
adcs x8, x10, x6
str x8, [sp, #320] ; 8-byte Folded Spill
str x6, [sp, #600] ; 8-byte Folded Spill
ldr x8, [sp, #456] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #696] ; 8-byte Folded Reload
ldr x9, [sp, #576] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #456] ; 8-byte Folded Spill
adcs x9, x15, x20
str x9, [sp, #288] ; 8-byte Folded Spill
ldr x8, [sp, #960] ; 8-byte Folded Reload
cmp x27, x8
cinc x20, x3, lo
ldr x8, [sp, #448] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #560] ; 8-byte Folded Reload
adcs xzr, x8, x27
mrs x8, NZCV
str x8, [sp, #448] ; 8-byte Folded Spill
adcs x10, x20, x9
str x10, [sp, #304] ; 8-byte Folded Spill
ldr x8, [sp, #440] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #656] ; 8-byte Folded Reload
ldr x9, [sp, #512] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #176] ; 8-byte Folded Spill
adcs x10, x14, x10
str x10, [sp, #512] ; 8-byte Folded Spill
mov x26, x24
ldr x8, [sp, #672] ; 8-byte Folded Reload
cmp x8, x24
ldr x24, [sp, #912] ; 8-byte Folded Reload
cinc x22, x24, lo
ldr x9, [sp, #432] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #544] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #168] ; 8-byte Folded Spill
adcs x10, x22, x10
str x10, [sp, #544] ; 8-byte Folded Spill
ldr x8, [sp, #424] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #728] ; 8-byte Folded Reload
ldr x9, [sp, #504] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #504] ; 8-byte Folded Spill
adcs x10, x13, x10
str x10, [sp, #208] ; 8-byte Folded Spill
ldr x8, [sp, #1024] ; 8-byte Folded Reload
ldr x9, [sp, #720] ; 8-byte Folded Reload
cmp x9, x8
ldr x8, [sp, #904] ; 8-byte Folded Reload
cinc x27, x8, lo
ldr x8, [sp, #416] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #536] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #160] ; 8-byte Folded Spill
adcs x10, x27, x10
str x10, [sp, #248] ; 8-byte Folded Spill
ldr x8, [sp, #408] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #736] ; 8-byte Folded Reload
ldr x9, [sp, #496] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #152] ; 8-byte Folded Spill
adcs x12, x4, x10
str x12, [sp, #200] ; 8-byte Folded Spill
ldr x8, [sp, #1032] ; 8-byte Folded Reload
ldr x10, [sp, #872] ; 8-byte Folded Reload
cmp x10, x8
ldr x9, [sp, #888] ; 8-byte Folded Reload
add x9, x8, x9
str x9, [sp, #920] ; 8-byte Folded Spill
cinc x5, x9, lo
ldr x8, [sp, #488] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #528] ; 8-byte Folded Reload
adcs xzr, x8, x10
mrs x8, NZCV
str x8, [sp, #528] ; 8-byte Folded Spill
adcs x8, x5, x12
ldr x9, [sp, #392] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x8, x11
mrs x9, NZCV
str x9, [sp, #184] ; 8-byte Folded Spill
str x9, [sp, #144] ; 8-byte Folded Spill
ldr x9, [sp, #784] ; 8-byte Folded Reload
ldr x10, [sp, #776] ; 8-byte Folded Reload
adds x10, x10, x9
adcs x1, x0, x16
str x1, [sp, #432] ; 8-byte Folded Spill
ldr x9, [sp, #936] ; 8-byte Folded Reload
ldr x12, [sp, #472] ; 8-byte Folded Reload
adds x9, x12, x9
mov w14, #-2
ldr x12, [sp, #824] ; 8-byte Folded Reload
umulh x12, x12, x14
ldr x13, [sp, #992] ; 8-byte Folded Reload
adcs x0, x13, x12
str x0, [sp, #408] ; 8-byte Folded Spill
ldr x12, [sp, #808] ; 8-byte Folded Reload
cmp x21, x12
cset w12, lo
adds x13, x21, x23
umulh x15, x28, x25
mov x16, x25
str x15, [sp, #824] ; 8-byte Folded Spill
adcs x3, x12, x15
ldr x12, [sp, #624] ; 8-byte Folded Reload
msr NZCV, x12
adcs x11, x11, x8
str x11, [sp, #440] ; 8-byte Folded Spill
ldr x8, [sp, #520] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #632] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #576] ; 8-byte Folded Spill
adcs x8, x0, x11
ldr x9, [sp, #480] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #584] ; 8-byte Folded Reload
adcs xzr, x9, x10
mrs x9, NZCV
str x9, [sp, #584] ; 8-byte Folded Spill
ldr x9, [sp, #664] ; 8-byte Folded Reload
mul x28, x9, x14
adcs x9, x1, x8
stp x9, x8, [sp, #416] ; 16-byte Folded Spill
ldr x1, [sp, #880] ; 8-byte Folded Reload
adds x8, x28, x1
ldr x10, [sp, #400] ; 8-byte Folded Reload
msr NZCV, x10
ldr x23, [sp, #968] ; 8-byte Folded Reload
ldr x10, [sp, #568] ; 8-byte Folded Reload
adcs xzr, x10, x23
mrs x10, NZCV
str x10, [sp, #568] ; 8-byte Folded Spill
adcs x10, x8, x9
str x10, [sp, #400] ; 8-byte Folded Spill
ldr x8, [sp, #752] ; 8-byte Folded Reload
ldr x9, [sp, #368] ; 8-byte Folded Reload
cmp x9, x8
cinc x8, x19, lo
str x8, [sp, #728] ; 8-byte Folded Spill
ldr x19, [sp, #704] ; 8-byte Folded Reload
adds x8, x8, x19
ldr x11, [sp, #352] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #376] ; 8-byte Folded Reload
adcs xzr, x11, x9
mrs x9, NZCV
str x9, [sp, #560] ; 8-byte Folded Spill
adcs x11, x8, x10
str x11, [sp, #384] ; 8-byte Folded Spill
ldr x9, [sp, #680] ; 8-byte Folded Reload
cmp x9, x7
mov w8, #-1
umulh x10, x9, x8
str x10, [sp, #872] ; 8-byte Folded Spill
cset w8, hi
ldr x12, [sp, #552] ; 8-byte Folded Reload
adds x9, x12, x8
adds x25, x9, x10
cset w9, hs
cmn x12, x8
ldr x8, [sp, #360] ; 8-byte Folded Reload
adcs x8, x8, x9
str x8, [sp, #192] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #552] ; 8-byte Folded Spill
ldr x8, [sp, #1040] ; 8-byte Folded Reload
ldur x8, [x8, #44]
str x8, [sp, #1040] ; 8-byte Folded Spill
mul x4, x8, x16
ldr x8, [sp, #1000] ; 8-byte Folded Reload
adcs x8, x8, x11
str x8, [sp, #736] ; 8-byte Folded Spill
adds x8, x3, x4
str x4, [sp, #632] ; 8-byte Folded Spill
cmp x17, x2
ldr x9, [sp, #928] ; 8-byte Folded Reload
cinc x10, x9, lo
str x10, [sp, #352] ; 8-byte Folded Spill
ldp x14, x9, [sp, #328] ; 16-byte Folded Reload
msr NZCV, x9
adcs xzr, x13, x17
mrs x9, NZCV
str x9, [sp, #536] ; 8-byte Folded Spill
adcs x16, x10, x8
str x16, [sp, #240] ; 8-byte Folded Spill
ldr x0, [sp, #320] ; 8-byte Folded Reload
cmp x0, x6
cset w15, lo
ldr x9, [sp, #760] ; 8-byte Folded Reload
cmp x14, x9
cset w9, lo
ldr x6, [sp, #688] ; 8-byte Folded Reload
ldr x7, [sp, #816] ; 8-byte Folded Reload
cmp x7, x6
cset w10, lo
ldr x11, [sp, #840] ; 8-byte Folded Reload
ldr x21, [sp, #800] ; 8-byte Folded Reload
cmp x21, x11
cset w11, lo
ldr x17, [sp, #792] ; 8-byte Folded Reload
cmp x17, x30
ldr x2, [sp, #1080] ; 8-byte Folded Reload
ldr x12, [sp, #1048] ; 8-byte Folded Reload
mul x8, x2, x12
str x8, [sp, #720] ; 8-byte Folded Spill
cset w13, lo
adds x17, x17, x8
umulh x8, x2, x12
str x8, [sp, #696] ; 8-byte Folded Spill
adcs x8, x13, x8
str x8, [sp, #376] ; 8-byte Folded Spill
ldr x8, [sp, #1008] ; 8-byte Folded Reload
ldr x2, [sp, #1072] ; 8-byte Folded Reload
mul x13, x2, x8
adds x12, x21, x13
mov x21, x13
str x13, [sp, #392] ; 8-byte Folded Spill
umulh x8, x2, x8
str x8, [sp, #672] ; 8-byte Folded Spill
adcs x8, x11, x8
stp x12, x8, [sp, #360] ; 16-byte Folded Spill
ldr x13, [sp, #1056] ; 8-byte Folded Reload
ldr x2, [sp, #1088] ; 8-byte Folded Reload
mul x11, x2, x13
str x11, [sp, #792] ; 8-byte Folded Spill
adds x8, x7, x11
umulh x11, x2, x13
str x11, [sp, #656] ; 8-byte Folded Spill
adcs x10, x10, x11
str x10, [sp, #280] ; 8-byte Folded Spill
ldr x11, [sp, #984] ; 8-byte Folded Reload
ldr x13, [sp, #1096] ; 8-byte Folded Reload
mul x10, x13, x11
str x10, [sp, #752] ; 8-byte Folded Spill
adds x2, x14, x10
stp x2, x8, [sp, #328] ; 16-byte Folded Spill
umulh x10, x13, x11
str x10, [sp, #800] ; 8-byte Folded Spill
adcs x9, x9, x10
str x9, [sp, #256] ; 8-byte Folded Spill
ldr x10, [sp, #1016] ; 8-byte Folded Reload
ldr x13, [sp, #1064] ; 8-byte Folded Reload
mul x9, x13, x10
str x9, [sp, #624] ; 8-byte Folded Spill
adds x11, x0, x9
umulh x9, x13, x10
str x9, [sp, #816] ; 8-byte Folded Spill
adcs x7, x15, x9
str x7, [sp, #40] ; 8-byte Folded Spill
ldr x9, [sp, #456] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #312] ; 8-byte Folded Reload
ldr x10, [sp, #224] ; 8-byte Folded Reload
adcs xzr, x9, x10
mrs x9, NZCV
str x9, [sp, #456] ; 8-byte Folded Spill
adcs x10, x11, x16
stp x10, x11, [sp, #224] ; 16-byte Folded Spill
ldr x9, [sp, #960] ; 8-byte Folded Reload
cmp x20, x9
ldr x11, [sp, #896] ; 8-byte Folded Reload
cinc x13, x11, lo
ldr x11, [sp, #448] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #288] ; 8-byte Folded Reload
adcs xzr, x11, x20
mrs x11, NZCV
str x11, [sp, #448] ; 8-byte Folded Spill
adcs x11, x13, x10
ldr x10, [sp, #176] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #304] ; 8-byte Folded Reload
ldr x14, [sp, #264] ; 8-byte Folded Reload
adcs xzr, x10, x14
mrs x10, NZCV
str x10, [sp, #520] ; 8-byte Folded Spill
adcs x10, x2, x11
stp x11, x10, [sp, #312] ; 16-byte Folded Spill
cmp x22, x26
cinc x14, x24, lo
str x14, [sp, #176] ; 8-byte Folded Spill
ldr x11, [sp, #168] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #512] ; 8-byte Folded Reload
adcs xzr, x11, x22
mrs x11, NZCV
str x11, [sp, #512] ; 8-byte Folded Spill
adcs x11, x14, x10
str x11, [sp, #288] ; 8-byte Folded Spill
ldr x10, [sp, #504] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #544] ; 8-byte Folded Reload
ldr x14, [sp, #272] ; 8-byte Folded Reload
adcs xzr, x10, x14
mrs x10, NZCV
str x10, [sp, #504] ; 8-byte Folded Spill
adcs x10, x8, x11
str x10, [sp, #304] ; 8-byte Folded Spill
ldr x8, [sp, #1024] ; 8-byte Folded Reload
cmp x27, x8
ldr x8, [sp, #904] ; 8-byte Folded Reload
cinc x8, x8, lo
ldr x11, [sp, #160] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #208] ; 8-byte Folded Reload
adcs xzr, x11, x27
mrs x11, NZCV
str x11, [sp, #496] ; 8-byte Folded Spill
adcs x10, x8, x10
stp x8, x10, [sp, #264] ; 16-byte Folded Spill
ldr x8, [sp, #152] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #296] ; 8-byte Folded Reload
ldr x11, [sp, #248] ; 8-byte Folded Reload
adcs xzr, x11, x8
mrs x8, NZCV
str x8, [sp, #488] ; 8-byte Folded Spill
adcs x10, x12, x10
str x10, [sp, #296] ; 8-byte Folded Spill
ldr x8, [sp, #1032] ; 8-byte Folded Reload
cmp x5, x8
ldr x8, [sp, #920] ; 8-byte Folded Reload
cinc x11, x8, lo
str x11, [sp, #248] ; 8-byte Folded Spill
ldr x8, [sp, #528] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #200] ; 8-byte Folded Reload
adcs xzr, x8, x5
mrs x8, NZCV
str x8, [sp, #480] ; 8-byte Folded Spill
adcs x8, x11, x10
ldr x10, [sp, #144] ; 8-byte Folded Reload
msr NZCV, x10
adcs x2, x17, x8
ldr x10, [sp, #824] ; 8-byte Folded Reload
cmp x3, x10
cset w10, lo
adds x5, x3, x4
ldr x12, [sp, #1040] ; 8-byte Folded Reload
ldr x11, [sp, #976] ; 8-byte Folded Reload
umulh x11, x12, x11
str x11, [sp, #528] ; 8-byte Folded Spill
adcs x14, x10, x11
stp x2, x14, [sp, #144] ; 16-byte Folded Spill
ldp x10, x16, [sp, #184] ; 16-byte Folded Reload
msr NZCV, x10
adcs xzr, x8, x17
mrs x8, NZCV
str x8, [sp, #544] ; 8-byte Folded Spill
str x8, [sp, #472] ; 8-byte Folded Spill
ldr x8, [sp, #744] ; 8-byte Folded Reload
ldr x10, [sp, #344] ; 8-byte Folded Reload
adds x8, x10, x8
ldr x10, [sp, #768] ; 8-byte Folded Reload
ldr x11, [sp, #216] ; 8-byte Folded Reload
adcs x17, x10, x11
ldr x11, [sp, #592] ; 8-byte Folded Reload
cmn x25, x11
adcs x10, x8, x16
str x10, [sp, #344] ; 8-byte Folded Spill
ldr x10, [sp, #728] ; 8-byte Folded Reload
adds x4, x10, x19
adcs x15, x21, x6
str x15, [sp, #160] ; 8-byte Folded Spill
adds x19, x28, x1
ldr x10, [sp, #664] ; 8-byte Folded Reload
mov w0, #-2
umulh x10, x10, x0
adcs x0, x23, x10
str x0, [sp, #200] ; 8-byte Folded Spill
adds x6, x25, x11
lsl x10, x6, #32
stp x10, x17, [sp, #208] ; 16-byte Folded Spill
sub x11, x10, x6
str x11, [sp, #976] ; 8-byte Folded Spill
adcs xzr, x16, x8
mrs x8, NZCV
str x8, [sp, #136] ; 8-byte Folded Spill
ldr x8, [sp, #736] ; 8-byte Folded Reload
adcs x8, x17, x8
str x8, [sp, #56] ; 8-byte Folded Spill
ldr x8, [sp, #784] ; 8-byte Folded Reload
ldr x10, [sp, #776] ; 8-byte Folded Reload
cmn x10, x8
ldr x8, [sp, #712] ; 8-byte Folded Reload
ldr x10, [sp, #464] ; 8-byte Folded Reload
adcs xzr, x8, x10
ldr x8, [sp, #840] ; 8-byte Folded Reload
adcs x1, x8, xzr
ldr x8, [sp, #992] ; 8-byte Folded Reload
ldr x11, [sp, #408] ; 8-byte Folded Reload
cmp x11, x8
ldr x10, [sp, #936] ; 8-byte Folded Reload
add x8, x8, x10
str x8, [sp, #928] ; 8-byte Folded Spill
cinc x3, x8, lo
ldr x8, [sp, #576] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #440] ; 8-byte Folded Reload
adcs xzr, x8, x11
mrs x8, NZCV
str x8, [sp, #32] ; 8-byte Folded Spill
adcs x27, x3, x2
ldr x8, [sp, #720] ; 8-byte Folded Reload
adds x11, x1, x8
ldr x8, [sp, #584] ; 8-byte Folded Reload
msr NZCV, x8
ldp x10, x8, [sp, #424] ; 16-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #48] ; 8-byte Folded Spill
adcs x28, x11, x27
ldr x8, [sp, #568] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #416] ; 8-byte Folded Reload
adcs xzr, x8, x19
mrs x8, NZCV
str x8, [sp, #408] ; 8-byte Folded Spill
adcs x10, x0, x28
str x10, [sp, #120] ; 8-byte Folded Spill
ldr x8, [sp, #560] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #400] ; 8-byte Folded Reload
adcs xzr, x8, x4
mrs x8, NZCV
str x8, [sp, #400] ; 8-byte Folded Spill
mov w8, #-2
ldr x20, [sp, #680] ; 8-byte Folded Reload
mul x2, x20, x8
adcs x30, x15, x10
ldr x0, [sp, #872] ; 8-byte Folded Reload
adds x11, x2, x0
ldr x8, [sp, #552] ; 8-byte Folded Reload
msr NZCV, x8
ldr x15, [sp, #1000] ; 8-byte Folded Reload
ldr x8, [sp, #384] ; 8-byte Folded Reload
adcs xzr, x8, x15
mrs x8, NZCV
str x8, [sp, #64] ; 8-byte Folded Spill
adcs x8, x11, x30
str x8, [sp, #184] ; 8-byte Folded Spill
ldr x10, [sp, #352] ; 8-byte Folded Reload
ldr x8, [sp, #952] ; 8-byte Folded Reload
cmp x10, x8
ldr x8, [sp, #832] ; 8-byte Folded Reload
cinc x11, x8, lo
str x11, [sp, #560] ; 8-byte Folded Spill
ldr x8, [sp, #536] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x5, x10
mrs x8, NZCV
str x8, [sp, #104] ; 8-byte Folded Spill
ldr x8, [sp, #1016] ; 8-byte Folded Reload
mul x24, x12, x8
adcs x12, x11, x14
str x12, [sp, #352] ; 8-byte Folded Spill
adds x11, x7, x24
str x24, [sp, #464] ; 8-byte Folded Spill
ldr x8, [sp, #456] ; 8-byte Folded Reload
msr NZCV, x8
ldp x10, x8, [sp, #232] ; 16-byte Folded Reload
adcs xzr, x8, x10
mrs x8, NZCV
str x8, [sp, #96] ; 8-byte Folded Spill
adcs x10, x11, x12
str x10, [sp, #424] ; 8-byte Folded Spill
cmp x13, x9
ldr x8, [sp, #896] ; 8-byte Folded Reload
cinc x26, x8, lo
ldr x8, [sp, #448] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #224] ; 8-byte Folded Reload
adcs xzr, x8, x13
mrs x8, NZCV
adcs x7, x26, x10
stp x7, x8, [sp, #440] ; 16-byte Folded Spill
ldr x8, [sp, #800] ; 8-byte Folded Reload
ldr x22, [sp, #256] ; 8-byte Folded Reload
cmp x22, x8
cset w9, lo
ldr x19, [sp, #656] ; 8-byte Folded Reload
ldr x5, [sp, #280] ; 8-byte Folded Reload
cmp x5, x19
cset w11, lo
ldr x21, [sp, #672] ; 8-byte Folded Reload
ldp x4, x16, [sp, #368] ; 16-byte Folded Reload
cmp x4, x21
cset w12, lo
ldr x13, [sp, #696] ; 8-byte Folded Reload
cmp x16, x13
ldr x8, [sp, #1072] ; 8-byte Folded Reload
ldr x10, [sp, #1048] ; 8-byte Folded Reload
mul x23, x8, x10
str x23, [sp, #712] ; 8-byte Folded Spill
cset w14, lo
adds x16, x16, x23
umulh x8, x8, x10
str x8, [sp, #192] ; 8-byte Folded Spill
adcs x8, x14, x8
str x8, [sp, #240] ; 8-byte Folded Spill
ldr x8, [sp, #1088] ; 8-byte Folded Reload
ldr x10, [sp, #1008] ; 8-byte Folded Reload
mul x14, x8, x10
str x14, [sp, #776] ; 8-byte Folded Spill
adds x17, x4, x14
str x17, [sp, #128] ; 8-byte Folded Spill
umulh x14, x8, x10
str x14, [sp, #664] ; 8-byte Folded Spill
adcs x8, x12, x14
str x8, [sp, #112] ; 8-byte Folded Spill
ldr x8, [sp, #1096] ; 8-byte Folded Reload
ldr x10, [sp, #1056] ; 8-byte Folded Reload
mul x12, x8, x10
str x12, [sp, #584] ; 8-byte Folded Spill
adds x4, x5, x12
str x4, [sp, #232] ; 8-byte Folded Spill
umulh x12, x8, x10
str x12, [sp, #784] ; 8-byte Folded Spill
adcs x8, x11, x12
str x8, [sp, #280] ; 8-byte Folded Spill
ldr x8, [sp, #1064] ; 8-byte Folded Reload
ldr x10, [sp, #984] ; 8-byte Folded Reload
mul x11, x8, x10
str x11, [sp, #592] ; 8-byte Folded Spill
adds x23, x22, x11
umulh x11, x8, x10
str x11, [sp, #832] ; 8-byte Folded Spill
adcs x8, x9, x11
str x8, [sp, #168] ; 8-byte Folded Spill
ldr x8, [sp, #520] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #328] ; 8-byte Folded Reload
ldr x9, [sp, #312] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #376] ; 8-byte Folded Spill
adcs x25, x23, x7
ldr x14, [sp, #944] ; 8-byte Folded Reload
ldr x7, [sp, #176] ; 8-byte Folded Reload
cmp x7, x14
ldr x8, [sp, #912] ; 8-byte Folded Reload
cinc x12, x8, lo
ldr x8, [sp, #512] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #320] ; 8-byte Folded Reload
adcs xzr, x8, x7
mrs x8, NZCV
str x8, [sp, #16] ; 8-byte Folded Spill
adcs x10, x12, x25
str x10, [sp, #312] ; 8-byte Folded Spill
ldr x8, [sp, #504] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #336] ; 8-byte Folded Reload
ldr x9, [sp, #288] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #176] ; 8-byte Folded Spill
adcs x10, x4, x10
ldr x8, [sp, #1024] ; 8-byte Folded Reload
ldr x9, [sp, #264] ; 8-byte Folded Reload
cmp x9, x8
ldr x22, [sp, #904] ; 8-byte Folded Reload
cinc x4, x22, lo
ldr x8, [sp, #496] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #304] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
stp x10, x8, [sp, #256] ; 16-byte Folded Spill
adcs x10, x4, x10
str x10, [sp, #304] ; 8-byte Folded Spill
ldr x8, [sp, #488] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #360] ; 8-byte Folded Reload
ldr x9, [sp, #272] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #88] ; 8-byte Folded Spill
adcs x10, x17, x10
str x10, [sp, #488] ; 8-byte Folded Spill
ldr x9, [sp, #248] ; 8-byte Folded Reload
ldr x8, [sp, #1032] ; 8-byte Folded Reload
cmp x9, x8
ldr x7, [sp, #920] ; 8-byte Folded Reload
cinc x5, x7, lo
ldr x8, [sp, #480] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #296] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #80] ; 8-byte Folded Spill
adcs x11, x5, x10
ldr x8, [sp, #472] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x11, x16
mrs x8, NZCV
str x8, [sp, #568] ; 8-byte Folded Spill
str x8, [sp, #72] ; 8-byte Folded Spill
str x6, [sp, #744] ; 8-byte Folded Spill
ldr x8, [sp, #208] ; 8-byte Folded Reload
cmp x6, x8
mov w8, #-1
umulh x9, x6, x8
str x9, [sp, #896] ; 8-byte Folded Spill
cset w10, hi
ldr x8, [sp, #344] ; 8-byte Folded Reload
adds x6, x8, x10
adds x9, x6, x9
str x9, [sp, #24] ; 8-byte Folded Spill
cset w9, hs
cmn x8, x10
ldr x8, [sp, #56] ; 8-byte Folded Reload
adcs x6, x8, x9
mrs x8, NZCV
str x8, [sp, #576] ; 8-byte Folded Spill
str x8, [sp, #56] ; 8-byte Folded Spill
adds x8, x2, x0
mov w9, #-2
umulh x9, x20, x9
adcs x0, x15, x9
str x0, [sp, #680] ; 8-byte Folded Spill
ldr x9, [sp, #840] ; 8-byte Folded Reload
cmp x1, x9
cset w9, lo
ldr x15, [sp, #720] ; 8-byte Folded Reload
adds x15, x1, x15
adcs x20, x9, x13
ldr x9, [sp, #816] ; 8-byte Folded Reload
ldr x13, [sp, #40] ; 8-byte Folded Reload
cmp x13, x9
cset w9, lo
adds x13, x13, x24
ldr x2, [sp, #1040] ; 8-byte Folded Reload
ldr x10, [sp, #1016] ; 8-byte Folded Reload
umulh x17, x2, x10
str x17, [sp, #456] ; 8-byte Folded Spill
adcs x17, x9, x17
str x17, [sp, #248] ; 8-byte Folded Spill
ldr x9, [sp, #544] ; 8-byte Folded Reload
msr NZCV, x9
adcs x11, x16, x11
stp x20, x11, [sp, #336] ; 16-byte Folded Spill
ldr x9, [sp, #728] ; 8-byte Folded Reload
ldr x10, [sp, #704] ; 8-byte Folded Reload
cmn x9, x10
ldr x9, [sp, #688] ; 8-byte Folded Reload
ldr x10, [sp, #392] ; 8-byte Folded Reload
adcs xzr, x9, x10
adcs x16, x21, xzr
str x16, [sp, #432] ; 8-byte Folded Spill
ldr x9, [sp, #992] ; 8-byte Folded Reload
cmp x3, x9
ldr x9, [sp, #928] ; 8-byte Folded Reload
cinc x10, x9, lo
str x10, [sp, #328] ; 8-byte Folded Spill
ldr x9, [sp, #32] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #144] ; 8-byte Folded Reload
adcs xzr, x9, x3
mrs x9, NZCV
str x9, [sp, #296] ; 8-byte Folded Spill
ldr x9, [sp, #1080] ; 8-byte Folded Reload
mul x9, x9, x9
str x9, [sp, #224] ; 8-byte Folded Spill
adcs x10, x10, x11
str x10, [sp, #536] ; 8-byte Folded Spill
adds x9, x20, x9
ldr x11, [sp, #48] ; 8-byte Folded Reload
msr NZCV, x11
adcs xzr, x27, x15
mrs x11, NZCV
str x11, [sp, #384] ; 8-byte Folded Spill
adcs x11, x9, x10
str x11, [sp, #552] ; 8-byte Folded Spill
ldr x9, [sp, #968] ; 8-byte Folded Reload
ldr x15, [sp, #200] ; 8-byte Folded Reload
cmp x15, x9
ldr x10, [sp, #880] ; 8-byte Folded Reload
add x9, x9, x10
str x9, [sp, #952] ; 8-byte Folded Spill
cinc x9, x9, lo
ldr x10, [sp, #408] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x28, x15
mrs x10, NZCV
str x10, [sp, #368] ; 8-byte Folded Spill
adcs x10, x9, x11
stp x10, x9, [sp, #408] ; 16-byte Folded Spill
ldr x9, [sp, #712] ; 8-byte Folded Reload
adds x9, x16, x9
ldr x11, [sp, #400] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #160] ; 8-byte Folded Reload
ldr x15, [sp, #120] ; 8-byte Folded Reload
adcs xzr, x15, x11
mrs x11, NZCV
str x11, [sp, #360] ; 8-byte Folded Spill
adcs x9, x9, x10
str x9, [sp, #520] ; 8-byte Folded Spill
ldr x10, [sp, #64] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x30, x8
mrs x8, NZCV
str x8, [sp, #400] ; 8-byte Folded Spill
adcs x11, x0, x9
str x11, [sp, #512] ; 8-byte Folded Spill
ldr x8, [sp, #768] ; 8-byte Folded Reload
ldr x10, [sp, #216] ; 8-byte Folded Reload
cmp x10, x8
ldr x8, [sp, #760] ; 8-byte Folded Reload
cinc x9, x8, lo
str x9, [sp, #200] ; 8-byte Folded Spill
ldr x8, [sp, #792] ; 8-byte Folded Reload
adds x9, x9, x8
ldr x8, [sp, #776] ; 8-byte Folded Reload
adcs x15, x8, x19
str x15, [sp, #392] ; 8-byte Folded Spill
ldr x20, [sp, #808] ; 8-byte Folded Reload
ldr x1, [sp, #608] ; 8-byte Folded Reload
adds x8, x1, x20
ldr x19, [sp, #616] ; 8-byte Folded Reload
ldr x16, [sp, #24] ; 8-byte Folded Reload
cmn x16, x19
adcs x8, x8, x6
str x8, [sp, #760] ; 8-byte Folded Spill
ldr x8, [sp, #136] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #736] ; 8-byte Folded Reload
adcs xzr, x8, x10
mrs x0, NZCV
ldr x21, [sp, #184] ; 8-byte Folded Reload
adcs xzr, x21, x9
mrs x8, NZCV
str x8, [sp, #320] ; 8-byte Folded Spill
adcs x8, x15, x11
str x8, [sp, #840] ; 8-byte Folded Spill
ldr x8, [sp, #104] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #152] ; 8-byte Folded Reload
ldr x10, [sp, #560] ; 8-byte Folded Reload
adcs xzr, x8, x10
adcs x8, x17, xzr
ldr x10, [sp, #96] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #352] ; 8-byte Folded Reload
adcs xzr, x10, x13
adcs x10, x8, xzr
str x10, [sp, #208] ; 8-byte Folded Spill
ldr x8, [sp, #960] ; 8-byte Folded Reload
cmp x26, x8
ldr x8, [sp, #848] ; 8-byte Folded Reload
cinc x8, x8, lo
str x8, [sp, #560] ; 8-byte Folded Spill
ldr x11, [sp, #448] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #424] ; 8-byte Folded Reload
adcs xzr, x11, x26
mrs x11, NZCV
str x11, [sp, #480] ; 8-byte Folded Spill
ldr x30, [sp, #984] ; 8-byte Folded Reload
mul x3, x2, x30
mov x26, x2
adcs x11, x8, x10
str x11, [sp, #504] ; 8-byte Folded Spill
ldr x27, [sp, #168] ; 8-byte Folded Reload
adds x8, x27, x3
ldr x10, [sp, #376] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #440] ; 8-byte Folded Reload
adcs xzr, x10, x23
mrs x10, NZCV
str x10, [sp, #376] ; 8-byte Folded Spill
adcs x10, x8, x11
str x10, [sp, #472] ; 8-byte Folded Spill
cmp x12, x14
ldr x8, [sp, #912] ; 8-byte Folded Reload
cinc x8, x8, lo
str x8, [sp, #440] ; 8-byte Folded Spill
ldr x11, [sp, #16] ; 8-byte Folded Reload
msr NZCV, x11
adcs xzr, x25, x12
mrs x11, NZCV
str x11, [sp, #352] ; 8-byte Folded Spill
adcs x2, x8, x10
str x2, [sp, #424] ; 8-byte Folded Spill
ldr x8, [sp, #784] ; 8-byte Folded Reload
ldr x25, [sp, #280] ; 8-byte Folded Reload
cmp x25, x8
cset w10, lo
ldr x8, [sp, #664] ; 8-byte Folded Reload
ldr x14, [sp, #112] ; 8-byte Folded Reload
cmp x14, x8
cset w15, lo
ldr x23, [sp, #192] ; 8-byte Folded Reload
ldr x11, [sp, #240] ; 8-byte Folded Reload
cmp x11, x23
ldr x8, [sp, #1048] ; 8-byte Folded Reload
ldr x12, [sp, #1088] ; 8-byte Folded Reload
mul x24, x12, x8
cset w17, lo
adds x11, x11, x24
umulh x8, x12, x8
str x8, [sp, #216] ; 8-byte Folded Spill
adcs x8, x17, x8
str x8, [sp, #704] ; 8-byte Folded Spill
ldr x8, [sp, #1096] ; 8-byte Folded Reload
ldr x12, [sp, #1008] ; 8-byte Folded Reload
mul x13, x8, x12
str x13, [sp, #736] ; 8-byte Folded Spill
adds x14, x14, x13
str x14, [sp, #544] ; 8-byte Folded Spill
umulh x13, x8, x12
str x13, [sp, #728] ; 8-byte Folded Spill
adcs x8, x15, x13
str x8, [sp, #720] ; 8-byte Folded Spill
ldr x8, [sp, #1064] ; 8-byte Folded Reload
ldr x13, [sp, #1056] ; 8-byte Folded Reload
mul x12, x8, x13
str x12, [sp, #960] ; 8-byte Folded Spill
adds x15, x25, x12
umulh x12, x8, x13
str x12, [sp, #848] ; 8-byte Folded Spill
adcs x8, x10, x12
str x8, [sp, #912] ; 8-byte Folded Spill
ldr x8, [sp, #176] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #312] ; 8-byte Folded Reload
ldr x10, [sp, #232] ; 8-byte Folded Reload
adcs xzr, x8, x10
mrs x8, NZCV
str x8, [sp, #240] ; 8-byte Folded Spill
adcs x10, x15, x2
stp x10, x15, [sp, #280] ; 16-byte Folded Spill
ldr x8, [sp, #1024] ; 8-byte Folded Reload
cmp x4, x8
cinc x8, x22, lo
str x8, [sp, #496] ; 8-byte Folded Spill
ldr x12, [sp, #264] ; 8-byte Folded Reload
msr NZCV, x12
ldr x12, [sp, #256] ; 8-byte Folded Reload
adcs xzr, x12, x4
mrs x12, NZCV
str x12, [sp, #232] ; 8-byte Folded Spill
adcs x8, x8, x10
ldr x10, [sp, #88] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #304] ; 8-byte Folded Reload
ldr x12, [sp, #128] ; 8-byte Folded Reload
adcs xzr, x10, x12
mrs x10, NZCV
stp x10, x8, [sp, #264] ; 16-byte Folded Spill
adcs x10, x14, x8
ldr x8, [sp, #1032] ; 8-byte Folded Reload
cmp x5, x8
cinc x8, x7, lo
stp x8, x10, [sp, #304] ; 16-byte Folded Spill
ldr x12, [sp, #80] ; 8-byte Folded Reload
msr NZCV, x12
ldr x12, [sp, #488] ; 8-byte Folded Reload
adcs xzr, x12, x5
mrs x12, NZCV
str x12, [sp, #256] ; 8-byte Folded Spill
adcs x4, x8, x10
ldr x8, [sp, #72] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x4, x11
mrs x8, NZCV
str x8, [sp, #688] ; 8-byte Folded Spill
str x8, [sp, #488] ; 8-byte Folded Spill
adds x10, x1, x20
ldr x8, [sp, #600] ; 8-byte Folded Reload
ldr x12, [sp, #752] ; 8-byte Folded Reload
adcs x28, x12, x8
msr NZCV, x0
adcs x8, x9, x21
str x8, [sp, #808] ; 8-byte Folded Spill
ldr x9, [sp, #56] ; 8-byte Folded Reload
msr NZCV, x9
ldr x25, [sp, #976] ; 8-byte Folded Reload
adcs x12, x25, x8
str x12, [sp, #600] ; 8-byte Folded Spill
adds x8, x16, x19
str x8, [sp, #768] ; 8-byte Folded Spill
lsl x9, x8, #32
str x9, [sp, #608] ; 8-byte Folded Spill
sub x9, x9, x8
str x9, [sp, #1016] ; 8-byte Folded Spill
adcs xzr, x6, x10
mrs x13, NZCV
adcs x8, x28, x12
str x8, [sp, #616] ; 8-byte Folded Spill
ldr x8, [sp, #672] ; 8-byte Folded Reload
ldr x9, [sp, #432] ; 8-byte Folded Reload
cmp x9, x8
cset w10, lo
ldr x8, [sp, #712] ; 8-byte Folded Reload
adds x2, x9, x8
umulh x14, x26, x30
mov x17, x23
adcs x10, x10, x23
ldr x8, [sp, #832] ; 8-byte Folded Reload
cmp x27, x8
cset w0, lo
str x14, [sp, #432] ; 8-byte Folded Spill
cinc x1, x14, lo
str x3, [sp, #448] ; 8-byte Folded Spill
cmn x27, x3
adcs x8, x0, x14
str x8, [sp, #672] ; 8-byte Folded Spill
ldr x8, [sp, #208] ; 8-byte Folded Reload
ldr x9, [sp, #248] ; 8-byte Folded Reload
cmp x8, x9
cset w0, lo
adds x6, x27, x3
adcs x12, x0, x1
ldr x9, [sp, #568] ; 8-byte Folded Reload
msr NZCV, x9
adcs x11, x11, x4
str x11, [sp, #568] ; 8-byte Folded Spill
ldr x9, [sp, #696] ; 8-byte Folded Reload
ldp x1, x14, [sp, #328] ; 16-byte Folded Reload
cmp x14, x9
cset w16, lo
ldr x9, [sp, #224] ; 8-byte Folded Reload
adds x0, x14, x9
ldr x15, [sp, #1080] ; 8-byte Folded Reload
umulh x3, x15, x15
adcs x16, x16, x3
ldr x14, [sp, #792] ; 8-byte Folded Reload
ldr x9, [sp, #200] ; 8-byte Folded Reload
cmn x9, x14
ldr x14, [sp, #776] ; 8-byte Folded Reload
ldr x9, [sp, #656] ; 8-byte Folded Reload
adcs xzr, x9, x14
ldr x14, [sp, #664] ; 8-byte Folded Reload
adcs x5, x14, xzr
ldr x19, [sp, #992] ; 8-byte Folded Reload
cmp x1, x19
ldr x20, [sp, #928] ; 8-byte Folded Reload
cinc x23, x20, lo
ldr x9, [sp, #296] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #344] ; 8-byte Folded Reload
adcs xzr, x9, x1
mrs x9, NZCV
adcs x7, x23, x11
stp x9, x7, [sp, #328] ; 16-byte Folded Spill
cmp x16, x3
cset w3, lo
cmp x10, x17
cset w21, lo
cmp x5, x14
cset w22, lo
adds x24, x5, x24
ldr x9, [sp, #216] ; 8-byte Folded Reload
adcs x1, x22, x9
ldr x14, [sp, #1072] ; 8-byte Folded Reload
mul x11, x14, x15
adds x17, x10, x11
str x17, [sp, #344] ; 8-byte Folded Spill
umulh x4, x14, x15
adcs x21, x21, x4
adds x11, x16, x11
str x11, [sp, #776] ; 8-byte Folded Spill
adcs x22, x3, x4
ldr x10, [sp, #384] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #536] ; 8-byte Folded Reload
adcs xzr, x10, x0
mrs x10, NZCV
str x10, [sp, #296] ; 8-byte Folded Spill
adcs x14, x11, x7
str x14, [sp, #384] ; 8-byte Folded Spill
ldr x3, [sp, #968] ; 8-byte Folded Reload
ldr x11, [sp, #416] ; 8-byte Folded Reload
cmp x11, x3
ldr x5, [sp, #952] ; 8-byte Folded Reload
cinc x7, x5, lo
ldr x10, [sp, #368] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #552] ; 8-byte Folded Reload
adcs xzr, x10, x11
mrs x10, NZCV
str x10, [sp, #368] ; 8-byte Folded Spill
adcs x11, x7, x14
str x11, [sp, #712] ; 8-byte Folded Spill
ldr x10, [sp, #360] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #408] ; 8-byte Folded Reload
adcs xzr, x10, x2
mrs x10, NZCV
str x10, [sp, #192] ; 8-byte Folded Spill
adcs x14, x17, x11
str x14, [sp, #536] ; 8-byte Folded Spill
ldr x10, [sp, #1000] ; 8-byte Folded Reload
ldr x15, [sp, #680] ; 8-byte Folded Reload
cmp x15, x10
ldr x11, [sp, #872] ; 8-byte Folded Reload
add x10, x10, x11
str x10, [sp, #984] ; 8-byte Folded Spill
cinc x0, x10, lo
ldp x11, x10, [sp, #392] ; 16-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #520] ; 8-byte Folded Reload
adcs xzr, x10, x15
mrs x10, NZCV
str x10, [sp, #184] ; 8-byte Folded Spill
adcs x16, x0, x14
ldr x10, [sp, #320] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #512] ; 8-byte Folded Reload
adcs xzr, x10, x11
mrs x2, NZCV
adcs x10, x24, x16
str x10, [sp, #512] ; 8-byte Folded Spill
msr NZCV, x2
adcs xzr, x16, x24
mrs x10, NZCV
str x10, [sp, #408] ; 8-byte Folded Spill
str x10, [sp, #392] ; 8-byte Folded Spill
ldr x10, [sp, #744] ; 8-byte Folded Reload
mov w11, #-2
mul x27, x10, x11
ldr x10, [sp, #896] ; 8-byte Folded Reload
adds x16, x27, x10
ldr x10, [sp, #576] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #808] ; 8-byte Folded Reload
adcs xzr, x10, x25
mrs x10, NZCV
str x10, [sp, #400] ; 8-byte Folded Spill
ldr x10, [sp, #840] ; 8-byte Folded Reload
adcs x11, x16, x10
str x11, [sp, #416] ; 8-byte Folded Spill
ldr x10, [sp, #752] ; 8-byte Folded Reload
cmp x28, x10
ldr x10, [sp, #800] ; 8-byte Folded Reload
cinc x10, x10, lo
str x10, [sp, #680] ; 8-byte Folded Spill
ldr x30, [sp, #584] ; 8-byte Folded Reload
adds x24, x10, x30
msr NZCV, x13
ldr x10, [sp, #600] ; 8-byte Folded Reload
adcs xzr, x10, x28
mrs x10, NZCV
str x10, [sp, #248] ; 8-byte Folded Spill
adcs x10, x24, x11
str x10, [sp, #800] ; 8-byte Folded Spill
ldr x10, [sp, #480] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #560] ; 8-byte Folded Reload
adcs xzr, x8, x10
adcs x12, x12, xzr
ldr x8, [sp, #376] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #504] ; 8-byte Folded Reload
adcs xzr, x8, x6
adcs x11, x12, xzr
str x11, [sp, #664] ; 8-byte Folded Spill
ldr x10, [sp, #440] ; 8-byte Folded Reload
ldr x8, [sp, #944] ; 8-byte Folded Reload
cmp x10, x8
ldr x8, [sp, #856] ; 8-byte Folded Reload
cinc x12, x8, lo
str x12, [sp, #224] ; 8-byte Folded Spill
ldr x8, [sp, #352] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #472] ; 8-byte Folded Reload
adcs xzr, x8, x10
mrs x8, NZCV
str x8, [sp, #200] ; 8-byte Folded Spill
ldr x8, [sp, #1056] ; 8-byte Folded Reload
mul x10, x26, x8
str x10, [sp, #856] ; 8-byte Folded Spill
adcs x11, x12, x11
str x11, [sp, #208] ; 8-byte Folded Spill
ldr x8, [sp, #912] ; 8-byte Folded Reload
adds x12, x8, x10
ldr x8, [sp, #240] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #424] ; 8-byte Folded Reload
ldr x10, [sp, #288] ; 8-byte Folded Reload
adcs xzr, x8, x10
mrs x8, NZCV
str x8, [sp, #424] ; 8-byte Folded Spill
adcs x12, x12, x11
ldr x8, [sp, #1024] ; 8-byte Folded Reload
ldr x10, [sp, #496] ; 8-byte Folded Reload
cmp x10, x8
ldr x8, [sp, #904] ; 8-byte Folded Reload
cinc x25, x8, lo
ldr x8, [sp, #232] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #280] ; 8-byte Folded Reload
adcs xzr, x8, x10
mrs x8, NZCV
str x8, [sp, #240] ; 8-byte Folded Spill
ldr x11, [sp, #1008] ; 8-byte Folded Reload
ldr x14, [sp, #1064] ; 8-byte Folded Reload
mul x17, x14, x11
adcs x10, x25, x12
stp x10, x12, [sp, #472] ; 16-byte Folded Spill
ldr x26, [sp, #720] ; 8-byte Folded Reload
adds x13, x26, x17
str x17, [sp, #904] ; 8-byte Folded Spill
ldp x8, x12, [sp, #264] ; 16-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #544] ; 8-byte Folded Reload
adcs xzr, x12, x8
mrs x8, NZCV
str x8, [sp, #232] ; 8-byte Folded Spill
adcs x10, x13, x10
str x10, [sp, #496] ; 8-byte Folded Spill
ldr x12, [sp, #304] ; 8-byte Folded Reload
ldr x8, [sp, #1032] ; 8-byte Folded Reload
cmp x12, x8
ldr x8, [sp, #920] ; 8-byte Folded Reload
cinc x28, x8, lo
ldr x8, [sp, #256] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #312] ; 8-byte Folded Reload
adcs xzr, x8, x12
mrs x8, NZCV
str x8, [sp, #256] ; 8-byte Folded Spill
adcs x12, x28, x10
ldr x13, [sp, #704] ; 8-byte Folded Reload
cmp x13, x9
cset w16, lo
cmp x22, x4
cset w15, lo
cmp x21, x4
cset w24, lo
cmp x1, x9
ldr x8, [sp, #1088] ; 8-byte Folded Reload
ldr x9, [sp, #1080] ; 8-byte Folded Reload
mul x10, x8, x9
cset w4, lo
adds x6, x1, x10
umulh x2, x8, x9
adcs x8, x4, x2
str x8, [sp, #600] ; 8-byte Folded Spill
ldr x8, [sp, #1072] ; 8-byte Folded Reload
mul x4, x8, x8
adds x9, x21, x4
str x9, [sp, #552] ; 8-byte Folded Spill
umulh x8, x8, x8
str x8, [sp, #696] ; 8-byte Folded Spill
adcs x8, x24, x8
str x8, [sp, #576] ; 8-byte Folded Spill
adds x22, x22, x10
str x22, [sp, #656] ; 8-byte Folded Spill
adcs x8, x15, x2
str x8, [sp, #560] ; 8-byte Folded Spill
ldr x8, [sp, #1096] ; 8-byte Folded Reload
ldr x15, [sp, #1048] ; 8-byte Folded Reload
mul x10, x8, x15
str x10, [sp, #752] ; 8-byte Folded Spill
adds x1, x13, x10
umulh x4, x8, x15
adcs x8, x16, x4
str x8, [sp, #520] ; 8-byte Folded Spill
ldr x8, [sp, #488] ; 8-byte Folded Reload
msr NZCV, x8
adcs x10, x1, x12
str x10, [sp, #704] ; 8-byte Folded Spill
ldr x24, [sp, #728] ; 8-byte Folded Reload
cmp x26, x24
cset w16, lo
adds x8, x26, x17
str x8, [sp, #720] ; 8-byte Folded Spill
umulh x21, x14, x11
adcs x8, x16, x21
str x8, [sp, #808] ; 8-byte Folded Spill
ldr x8, [sp, #688] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x12, x1
mrs x8, NZCV
str x8, [sp, #792] ; 8-byte Folded Spill
str x8, [sp, #504] ; 8-byte Folded Spill
cmp x23, x19
cinc x8, x20, lo
str x8, [sp, #360] ; 8-byte Folded Spill
ldr x11, [sp, #328] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #568] ; 8-byte Folded Reload
adcs xzr, x11, x23
mrs x11, NZCV
str x11, [sp, #320] ; 8-byte Folded Spill
adcs x10, x8, x10
str x10, [sp, #568] ; 8-byte Folded Spill
ldr x8, [sp, #296] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #776] ; 8-byte Folded Reload
ldr x11, [sp, #336] ; 8-byte Folded Reload
adcs xzr, x11, x8
mrs x8, NZCV
str x8, [sp, #312] ; 8-byte Folded Spill
adcs x10, x22, x10
str x10, [sp, #328] ; 8-byte Folded Spill
cmp x7, x3
cinc x8, x5, lo
str x8, [sp, #544] ; 8-byte Folded Spill
ldr x11, [sp, #368] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #384] ; 8-byte Folded Reload
adcs xzr, x11, x7
mrs x11, NZCV
str x11, [sp, #304] ; 8-byte Folded Spill
adcs x10, x8, x10
str x10, [sp, #488] ; 8-byte Folded Spill
ldr x8, [sp, #192] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #712] ; 8-byte Folded Reload
ldr x11, [sp, #344] ; 8-byte Folded Reload
adcs xzr, x8, x11
mrs x8, NZCV
str x8, [sp, #296] ; 8-byte Folded Spill
adcs x9, x9, x10
str x9, [sp, #352] ; 8-byte Folded Spill
ldr x8, [sp, #1000] ; 8-byte Folded Reload
cmp x0, x8
ldr x8, [sp, #984] ; 8-byte Folded Reload
cinc x19, x8, lo
ldr x8, [sp, #184] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #536] ; 8-byte Folded Reload
adcs xzr, x8, x0
mrs x8, NZCV
str x8, [sp, #288] ; 8-byte Folded Spill
adcs x14, x19, x9
ldr x8, [sp, #392] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x14, x6
mrs x8, NZCV
str x8, [sp, #392] ; 8-byte Folded Spill
str x8, [sp, #280] ; 8-byte Folded Spill
mov x26, x30
ldr x23, [sp, #680] ; 8-byte Folded Reload
adds x17, x23, x30
ldr x10, [sp, #784] ; 8-byte Folded Reload
ldr x15, [sp, #736] ; 8-byte Folded Reload
adcs x16, x15, x10
ldr x8, [sp, #896] ; 8-byte Folded Reload
adds x3, x27, x8
ldr x30, [sp, #1040] ; 8-byte Folded Reload
ldr x9, [sp, #1056] ; 8-byte Folded Reload
umulh x0, x30, x9
stp x0, x16, [sp, #368] ; 16-byte Folded Spill
ldr x9, [sp, #744] ; 8-byte Folded Reload
mov w11, #-2
umulh x5, x9, x11
ldr x9, [sp, #976] ; 8-byte Folded Reload
adcs x7, x9, x5
ldr x1, [sp, #848] ; 8-byte Folded Reload
ldr x12, [sp, #912] ; 8-byte Folded Reload
cmp x12, x1
cset w5, lo
cinc x20, x0, lo
ldr x11, [sp, #856] ; 8-byte Folded Reload
cmn x12, x11
adcs x13, x5, x0
str x13, [sp, #264] ; 8-byte Folded Spill
ldr x0, [sp, #672] ; 8-byte Folded Reload
ldr x13, [sp, #664] ; 8-byte Folded Reload
cmp x13, x0
cset w5, lo
adds x22, x12, x11
adcs x20, x5, x20
ldr x11, [sp, #408] ; 8-byte Folded Reload
msr NZCV, x11
adcs x5, x6, x14
str x5, [sp, #336] ; 8-byte Folded Spill
ldr x11, [sp, #400] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #840] ; 8-byte Folded Reload
adcs xzr, x11, x3
mrs x11, NZCV
ldr x6, [sp, #512] ; 8-byte Folded Reload
adcs x0, x7, x6
str x0, [sp, #384] ; 8-byte Folded Spill
ldr x12, [sp, #768] ; 8-byte Folded Reload
ldr x14, [sp, #608] ; 8-byte Folded Reload
cmp x12, x14
mov w14, #-1
umulh x14, x12, x14
str x14, [sp, #944] ; 8-byte Folded Spill
cset w3, hi
ldr x12, [sp, #760] ; 8-byte Folded Reload
adds x27, x12, x3
adds x14, x27, x14
cset w27, hs
cmn x12, x3
ldr x12, [sp, #616] ; 8-byte Folded Reload
adcs x12, x12, x27
stp x14, x12, [sp, #400] ; 16-byte Folded Spill
mrs x12, NZCV
str x12, [sp, #272] ; 8-byte Folded Spill
ldr x12, [sp, #1016] ; 8-byte Folded Reload
ldr x14, [sp, #800] ; 8-byte Folded Reload
adcs x12, x12, x14
str x12, [sp, #440] ; 8-byte Folded Spill
cmp x7, x9
add x8, x9, x8
str x8, [sp, #1056] ; 8-byte Folded Spill
cinc x27, x8, lo
msr NZCV, x11
adcs xzr, x6, x7
mrs x8, NZCV
str x8, [sp, #216] ; 8-byte Folded Spill
adcs x8, x27, x5
str x8, [sp, #840] ; 8-byte Folded Spill
ldr x8, [sp, #248] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #416] ; 8-byte Folded Reload
adcs xzr, x8, x17
mrs x8, NZCV
str x8, [sp, #248] ; 8-byte Folded Spill
adcs x8, x16, x0
str x8, [sp, #416] ; 8-byte Folded Spill
cmn x23, x26
adcs xzr, x10, x15
adcs x11, x24, xzr
ldr x8, [sp, #200] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #224] ; 8-byte Folded Reload
adcs xzr, x13, x8
adcs x0, x20, xzr
ldr x8, [sp, #424] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #208] ; 8-byte Folded Reload
adcs xzr, x8, x22
adcs x9, x0, xzr
str x9, [sp, #912] ; 8-byte Folded Spill
ldr x8, [sp, #1024] ; 8-byte Folded Reload
cmp x25, x8
ldr x8, [sp, #864] ; 8-byte Folded Reload
cinc x8, x8, lo
str x8, [sp, #712] ; 8-byte Folded Spill
ldr x12, [sp, #240] ; 8-byte Folded Reload
msr NZCV, x12
ldr x12, [sp, #480] ; 8-byte Folded Reload
adcs xzr, x12, x25
mrs x12, NZCV
str x12, [sp, #776] ; 8-byte Folded Spill
ldr x12, [sp, #1008] ; 8-byte Folded Reload
mul x12, x30, x12
str x12, [sp, #344] ; 8-byte Folded Spill
mov x7, x30
adcs x14, x8, x9
str x14, [sp, #688] ; 8-byte Folded Spill
ldr x8, [sp, #808] ; 8-byte Folded Reload
adds x12, x8, x12
ldr x8, [sp, #232] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #472] ; 8-byte Folded Reload
ldr x9, [sp, #720] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #760] ; 8-byte Folded Spill
adcs x8, x12, x14
str x8, [sp, #744] ; 8-byte Folded Spill
ldr x9, [sp, #1032] ; 8-byte Folded Reload
cmp x28, x9
ldr x9, [sp, #920] ; 8-byte Folded Reload
cinc x12, x9, lo
str x12, [sp, #664] ; 8-byte Folded Spill
ldr x9, [sp, #256] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #496] ; 8-byte Folded Reload
adcs xzr, x9, x28
mrs x9, NZCV
str x9, [sp, #784] ; 8-byte Folded Spill
adcs x14, x12, x8
str x14, [sp, #616] ; 8-byte Folded Spill
ldr x5, [sp, #520] ; 8-byte Folded Reload
cmp x5, x4
cset w6, lo
ldr x16, [sp, #560] ; 8-byte Folded Reload
cmp x16, x2
cset w3, lo
ldr x8, [sp, #696] ; 8-byte Folded Reload
ldr x12, [sp, #576] ; 8-byte Folded Reload
cmp x12, x8
cset w20, lo
ldr x15, [sp, #600] ; 8-byte Folded Reload
cmp x15, x2
cset w17, lo
cmp x11, x24
cset w13, lo
ldr x8, [sp, #752] ; 8-byte Folded Reload
adds x22, x11, x8
adcs x2, x13, x4
cmp x2, x4
cset w4, lo
ldr x8, [sp, #624] ; 8-byte Folded Reload
ldr x9, [sp, #824] ; 8-byte Folded Reload
adds x13, x8, x9
ldr x8, [sp, #816] ; 8-byte Folded Reload
ldr x9, [sp, #592] ; 8-byte Folded Reload
adcs x11, x9, x8
cmp x11, x9
ldr x8, [sp, #832] ; 8-byte Folded Reload
cinc x9, x8, lo
str x9, [sp, #920] ; 8-byte Folded Spill
ldr x8, [sp, #960] ; 8-byte Folded Reload
cmn x9, x8
ldr x8, [sp, #904] ; 8-byte Folded Reload
adcs xzr, x1, x8
adcs x0, x21, xzr
cmp x0, x21
ldr x8, [sp, #1064] ; 8-byte Folded Reload
ldr x30, [sp, #1048] ; 8-byte Folded Reload
mul x24, x8, x30
cset w25, lo
adds x9, x0, x24
str x9, [sp, #424] ; 8-byte Folded Spill
umulh x9, x8, x30
adcs x8, x25, x9
str x8, [sp, #752] ; 8-byte Folded Spill
str x9, [sp, #480] ; 8-byte Folded Spill
ldr x10, [sp, #1080] ; 8-byte Folded Reload
ldr x8, [sp, #1096] ; 8-byte Folded Reload
mul x25, x8, x10
adds x2, x2, x25
umulh x23, x8, x10
str x23, [sp, #536] ; 8-byte Folded Spill
adcs x8, x4, x23
str x8, [sp, #720] ; 8-byte Folded Spill
ldr x8, [sp, #1088] ; 8-byte Folded Reload
ldr x10, [sp, #1072] ; 8-byte Folded Reload
mul x26, x8, x10
adds x28, x15, x26
umulh x8, x8, x10
str x8, [sp, #608] ; 8-byte Folded Spill
adcs x10, x17, x8
str x10, [sp, #696] ; 8-byte Folded Spill
adds x10, x12, x26
str x10, [sp, #672] ; 8-byte Folded Spill
adcs x17, x20, x8
str x17, [sp, #680] ; 8-byte Folded Spill
adds x8, x16, x25
str x8, [sp, #576] ; 8-byte Folded Spill
adcs x17, x3, x23
str x17, [sp, #600] ; 8-byte Folded Spill
adds x17, x5, x24
adcs x9, x6, x9
str x9, [sp, #472] ; 8-byte Folded Spill
ldr x9, [sp, #504] ; 8-byte Folded Reload
msr NZCV, x9
adcs x12, x17, x14
stp x12, x17, [sp, #504] ; 16-byte Folded Spill
ldr x26, [sp, #992] ; 8-byte Folded Reload
ldr x15, [sp, #360] ; 8-byte Folded Reload
cmp x15, x26
ldr x9, [sp, #928] ; 8-byte Folded Reload
cinc x14, x9, lo
str x14, [sp, #496] ; 8-byte Folded Spill
ldr x9, [sp, #320] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #704] ; 8-byte Folded Reload
adcs xzr, x9, x15
mrs x9, NZCV
str x9, [sp, #704] ; 8-byte Folded Spill
adcs x9, x14, x12
str x9, [sp, #560] ; 8-byte Folded Spill
ldr x12, [sp, #312] ; 8-byte Folded Reload
msr NZCV, x12
ldr x12, [sp, #656] ; 8-byte Folded Reload
ldr x14, [sp, #568] ; 8-byte Folded Reload
adcs xzr, x14, x12
mrs x12, NZCV
str x12, [sp, #736] ; 8-byte Folded Spill
adcs x12, x8, x9
str x12, [sp, #568] ; 8-byte Folded Spill
ldr x25, [sp, #968] ; 8-byte Folded Reload
ldr x14, [sp, #544] ; 8-byte Folded Reload
cmp x14, x25
ldr x8, [sp, #952] ; 8-byte Folded Reload
cinc x9, x8, lo
str x9, [sp, #520] ; 8-byte Folded Spill
ldr x8, [sp, #304] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #328] ; 8-byte Folded Reload
adcs xzr, x8, x14
mrs x8, NZCV
str x8, [sp, #728] ; 8-byte Folded Spill
adcs x9, x9, x12
str x9, [sp, #656] ; 8-byte Folded Spill
ldr x8, [sp, #296] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #552] ; 8-byte Folded Reload
ldr x12, [sp, #488] ; 8-byte Folded Reload
adcs xzr, x12, x8
mrs x8, NZCV
str x8, [sp, #824] ; 8-byte Folded Spill
adcs x8, x10, x9
str x8, [sp, #624] ; 8-byte Folded Spill
ldr x9, [sp, #1000] ; 8-byte Folded Reload
cmp x19, x9
ldr x9, [sp, #984] ; 8-byte Folded Reload
cinc x10, x9, lo
str x10, [sp, #592] ; 8-byte Folded Spill
ldr x9, [sp, #288] ; 8-byte Folded Reload
msr NZCV, x9
ldp x23, x9, [sp, #344] ; 16-byte Folded Reload
adcs xzr, x9, x19
mrs x9, NZCV
str x9, [sp, #816] ; 8-byte Folded Spill
adcs x12, x10, x8
ldr x8, [sp, #280] ; 8-byte Folded Reload
msr NZCV, x8
adcs x15, x28, x12
ldr x8, [sp, #808] ; 8-byte Folded Reload
cmp x8, x21
ldr x9, [sp, #1008] ; 8-byte Folded Reload
umulh x24, x7, x9
cset w10, lo
cinc x17, x24, lo
cmn x8, x23
mov x9, x8
adcs x8, x10, x24
str x8, [sp, #584] ; 8-byte Folded Spill
ldr x8, [sp, #912] ; 8-byte Folded Reload
ldr x10, [sp, #264] ; 8-byte Folded Reload
cmp x8, x10
cset w10, lo
adds x8, x9, x23
str x8, [sp, #552] ; 8-byte Folded Spill
adcs x8, x10, x17
str x8, [sp, #544] ; 8-byte Folded Spill
ldr x8, [sp, #976] ; 8-byte Folded Reload
cmp x27, x8
ldr x8, [sp, #1056] ; 8-byte Folded Reload
cinc x8, x8, lo
ldr x9, [sp, #216] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #336] ; 8-byte Folded Reload
adcs xzr, x9, x27
mrs x10, NZCV
adcs x17, x8, x15
mov x6, x8
str x8, [sp, #488] ; 8-byte Folded Spill
ldr x8, [sp, #392] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x12, x28
mrs x9, NZCV
str x9, [sp, #864] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #808] ; 8-byte Folded Spill
ldr x8, [sp, #248] ; 8-byte Folded Reload
msr NZCV, x8
ldp x9, x8, [sp, #376] ; 16-byte Folded Reload
adcs xzr, x8, x9
mrs x12, NZCV
mov w8, #-2
ldr x0, [sp, #768] ; 8-byte Folded Reload
mul x14, x0, x8
ldr x5, [sp, #840] ; 8-byte Folded Reload
adcs x16, x22, x5
ldr x19, [sp, #944] ; 8-byte Folded Reload
adds x1, x14, x19
ldr x9, [sp, #272] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #1016] ; 8-byte Folded Reload
ldr x3, [sp, #800] ; 8-byte Folded Reload
adcs xzr, x3, x9
mrs x3, NZCV
ldr x4, [sp, #416] ; 8-byte Folded Reload
adcs x1, x1, x4
msr NZCV, x12
adcs xzr, x5, x22
mrs x5, NZCV
adcs x12, x2, x17
msr NZCV, x10
adcs xzr, x15, x6
mrs x10, NZCV
str x10, [sp, #840] ; 8-byte Folded Spill
str x10, [sp, #800] ; 8-byte Folded Spill
adds x10, x14, x19
mov x20, x19
umulh x14, x0, x8
adcs x14, x9, x14
msr NZCV, x3
adcs xzr, x4, x10
mrs x3, NZCV
adcs x15, x14, x16
ldr x10, [sp, #640] ; 8-byte Folded Reload
ldp x0, x8, [sp, #400] ; 16-byte Folded Reload
cmn x0, x10
adcs x6, x13, x8
adds x10, x0, x10
lsl x7, x10, #32
sub x27, x7, x10
adcs xzr, x8, x13
mrs x13, NZCV
ldr x0, [sp, #440] ; 8-byte Folded Reload
adcs x19, x11, x0
cmp x14, x9
add x8, x9, x20
str x8, [sp, #1008] ; 8-byte Folded Spill
cinc x8, x8, lo
msr NZCV, x3
adcs xzr, x16, x14
mrs x4, NZCV
adcs x16, x8, x12
mov x28, x8
str x8, [sp, #408] ; 8-byte Folded Spill
msr NZCV, x5
adcs xzr, x17, x2
mrs x8, NZCV
str x8, [sp, #832] ; 8-byte Folded Spill
str x8, [sp, #768] ; 8-byte Folded Spill
ldr x8, [sp, #960] ; 8-byte Folded Reload
ldr x9, [sp, #920] ; 8-byte Folded Reload
adds x8, x9, x8
ldr x9, [sp, #904] ; 8-byte Folded Reload
ldr x14, [sp, #848] ; 8-byte Folded Reload
adcs x17, x9, x14
msr NZCV, x13
adcs xzr, x0, x11
mrs x11, NZCV
adcs x13, x8, x1
cmp x10, x7
mov w20, #-1
umulh x9, x10, x20
cset w2, hi
adds x3, x6, x2
adds x3, x3, x9
mov x21, x9
str x9, [sp, #920] ; 8-byte Folded Spill
cset w5, hs
cmn x6, x2
adcs x2, x19, x5
mrs x5, NZCV
adcs x6, x27, x13
msr NZCV, x11
adcs xzr, x1, x8
mrs x8, NZCV
adcs x1, x17, x15
ldr x9, [sp, #528] ; 8-byte Folded Reload
ldr x11, [sp, #464] ; 8-byte Folded Reload
adds x11, x11, x9
ldp x14, x9, [sp, #448] ; 16-byte Folded Reload
adcs x7, x14, x9
ldr x9, [sp, #632] ; 8-byte Folded Reload
cmn x3, x9
adcs x0, x11, x2
adds x19, x3, x9
lsl x3, x19, #32
sub x22, x3, x19
str x22, [sp, #904] ; 8-byte Folded Spill
adcs xzr, x2, x11
mrs x2, NZCV
adcs x9, x7, x6
str x9, [sp, #416] ; 8-byte Folded Spill
msr NZCV, x8
adcs xzr, x15, x17
mrs x8, NZCV
ldr x17, [sp, #424] ; 8-byte Folded Reload
adcs x15, x17, x16
msr NZCV, x4
adcs xzr, x12, x28
mrs x9, NZCV
str x9, [sp, #848] ; 8-byte Folded Spill
str x9, [sp, #456] ; 8-byte Folded Spill
mov w9, #-2
mul x11, x10, x9
umulh x10, x10, x9
adds x12, x11, x21
str x27, [sp, #1024] ; 8-byte Folded Spill
adcs x11, x27, x10
msr NZCV, x5
adcs xzr, x13, x27
mrs x10, NZCV
adcs x13, x12, x1
cmp x7, x14
ldr x9, [sp, #432] ; 8-byte Folded Reload
cinc x4, x9, lo
ldr x9, [sp, #856] ; 8-byte Folded Reload
adds x14, x4, x9
msr NZCV, x2
adcs xzr, x6, x7
mrs x2, NZCV
adcs x5, x14, x13
str x5, [sp, #360] ; 8-byte Folded Spill
msr NZCV, x10
adcs xzr, x1, x12
mrs x10, NZCV
mov x1, x11
str x11, [sp, #400] ; 8-byte Folded Spill
adcs x12, x11, x15
msr NZCV, x8
adcs xzr, x16, x17
mrs x8, NZCV
str x8, [sp, #640] ; 8-byte Folded Spill
stp x19, x8, [sp, #440] ; 16-byte Folded Spill
adds x8, x4, x9
ldr x11, [sp, #368] ; 8-byte Folded Reload
adcs x14, x23, x11
msr NZCV, x2
adcs xzr, x13, x8
mrs x8, NZCV
adcs x13, x14, x12
str x13, [sp, #464] ; 8-byte Folded Spill
msr NZCV, x10
adcs xzr, x15, x1
mrs x10, NZCV
str x10, [sp, #632] ; 8-byte Folded Spill
msr NZCV, x8
adcs xzr, x12, x14
mrs x8, NZCV
str x8, [sp, #528] ; 8-byte Folded Spill
stp x8, x10, [sp, #424] ; 16-byte Folded Spill
cmp x19, x3
umulh x8, x19, x20
str x8, [sp, #960] ; 8-byte Folded Spill
cset w13, hi
adds x10, x0, x13
mov x14, #-4294967295
adds x10, x10, x8
str x10, [sp, #392] ; 8-byte Folded Spill
add x8, x10, x14
str x8, [sp, #376] ; 8-byte Folded Spill
cset w8, hs
cmn x0, x13
ldr x10, [sp, #416] ; 8-byte Folded Reload
adcs x8, x10, x8
str x8, [sp, #416] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #352] ; 8-byte Folded Spill
adcs x8, x22, x5
str x8, [sp, #384] ; 8-byte Folded Spill
cmn x4, x9
adcs xzr, x11, x23
adcs x28, x24, xzr
ldr x8, [sp, #776] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #712] ; 8-byte Folded Reload
ldr x9, [sp, #912] ; 8-byte Folded Reload
adcs xzr, x9, x8
ldr x8, [sp, #544] ; 8-byte Folded Reload
adcs x8, x8, xzr
ldr x9, [sp, #760] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #688] ; 8-byte Folded Reload
ldr x10, [sp, #552] ; 8-byte Folded Reload
adcs xzr, x9, x10
adcs x17, x8, xzr
ldr x8, [sp, #584] ; 8-byte Folded Reload
cmp x17, x8
ldr x8, [sp, #1040] ; 8-byte Folded Reload
mul x13, x8, x30
umulh x0, x8, x30
cset w14, lo
ldp x8, x4, [sp, #472] ; 16-byte Folded Reload
cmp x8, x4
cset w15, lo
cinc x16, x0, lo
cmn x8, x13
adcs x9, x15, x0
str x9, [sp, #552] ; 8-byte Folded Spill
adds x30, x8, x13
adcs x8, x14, x16
str x8, [sp, #472] ; 8-byte Folded Spill
ldr x9, [sp, #664] ; 8-byte Folded Reload
ldr x8, [sp, #1032] ; 8-byte Folded Reload
cmp x9, x8
ldr x8, [sp, #888] ; 8-byte Folded Reload
cinc x19, x8, lo
ldr x8, [sp, #784] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #744] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #784] ; 8-byte Folded Spill
adcs x3, x19, x17
ldr x8, [sp, #792] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #616] ; 8-byte Folded Reload
ldr x9, [sp, #512] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #888] ; 8-byte Folded Spill
adcs x10, x30, x3
str x10, [sp, #664] ; 8-byte Folded Spill
ldr x9, [sp, #496] ; 8-byte Folded Reload
cmp x9, x26
ldr x8, [sp, #928] ; 8-byte Folded Reload
cinc x5, x8, lo
ldr x8, [sp, #704] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #504] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #512] ; 8-byte Folded Spill
adcs x11, x5, x10
str x11, [sp, #776] ; 8-byte Folded Spill
ldr x10, [sp, #600] ; 8-byte Folded Reload
ldr x14, [sp, #536] ; 8-byte Folded Reload
cmp x10, x14
ldr x16, [sp, #1080] ; 8-byte Folded Reload
ldr x9, [sp, #1064] ; 8-byte Folded Reload
mul x20, x9, x16
cset w21, lo
adds x10, x10, x20
str x10, [sp, #544] ; 8-byte Folded Spill
umulh x7, x9, x16
adcs x23, x21, x7
ldr x8, [sp, #736] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #576] ; 8-byte Folded Reload
ldr x9, [sp, #560] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #560] ; 8-byte Folded Spill
adcs x26, x10, x11
ldr x8, [sp, #520] ; 8-byte Folded Reload
cmp x8, x25
ldr x9, [sp, #952] ; 8-byte Folded Reload
cinc x12, x9, lo
ldr x9, [sp, #728] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #568] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
str x8, [sp, #728] ; 8-byte Folded Spill
adcs x6, x12, x26
str x6, [sp, #712] ; 8-byte Folded Spill
ldr x15, [sp, #680] ; 8-byte Folded Reload
ldr x8, [sp, #608] ; 8-byte Folded Reload
cmp x15, x8
cset w25, lo
ldr x2, [sp, #696] ; 8-byte Folded Reload
cmp x2, x8
cset w10, lo
ldr x1, [sp, #720] ; 8-byte Folded Reload
cmp x1, x14
cset w14, lo
ldr x9, [sp, #752] ; 8-byte Folded Reload
cmp x9, x4
cset w8, lo
cmp x28, x24
cset w21, lo
adds x24, x28, x13
str x24, [sp, #928] ; 8-byte Folded Spill
adcs x13, x21, x0
str x13, [sp, #856] ; 8-byte Folded Spill
adds x28, x9, x20
str x28, [sp, #912] ; 8-byte Folded Spill
adcs x8, x8, x7
str x8, [sp, #792] ; 8-byte Folded Spill
ldr x8, [sp, #1096] ; 8-byte Folded Reload
ldr x22, [sp, #1072] ; 8-byte Folded Reload
mul x20, x8, x22
adds x4, x1, x20
str x4, [sp, #760] ; 8-byte Folded Spill
umulh x11, x8, x22
adcs x8, x14, x11
str x8, [sp, #616] ; 8-byte Folded Spill
ldr x8, [sp, #1088] ; 8-byte Folded Reload
mul x27, x8, x8
adds x2, x2, x27
str x2, [sp, #696] ; 8-byte Folded Spill
umulh x27, x8, x8
adcs x8, x10, x27
str x8, [sp, #600] ; 8-byte Folded Spill
adds x1, x15, x20
adcs x13, x25, x11
ldr x8, [sp, #824] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #672] ; 8-byte Folded Reload
ldr x10, [sp, #656] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #824] ; 8-byte Folded Spill
adcs x20, x1, x6
ldr x25, [sp, #1000] ; 8-byte Folded Reload
ldr x8, [sp, #592] ; 8-byte Folded Reload
cmp x8, x25
ldr x10, [sp, #984] ; 8-byte Folded Reload
cinc x15, x10, lo
ldr x14, [sp, #816] ; 8-byte Folded Reload
msr NZCV, x14
ldr x14, [sp, #624] ; 8-byte Folded Reload
adcs xzr, x14, x8
mrs x8, NZCV
str x8, [sp, #816] ; 8-byte Folded Spill
adcs x14, x15, x20
str x14, [sp, #704] ; 8-byte Folded Spill
ldr x8, [sp, #808] ; 8-byte Folded Reload
msr NZCV, x8
adcs x9, x2, x14
str x9, [sp, #576] ; 8-byte Folded Spill
ldr x8, [sp, #976] ; 8-byte Folded Reload
ldr x14, [sp, #488] ; 8-byte Folded Reload
cmp x14, x8
ldr x8, [sp, #1056] ; 8-byte Folded Reload
cinc x14, x8, lo
ldr x8, [sp, #800] ; 8-byte Folded Reload
msr NZCV, x8
adcs x9, x14, x9
str x9, [sp, #568] ; 8-byte Folded Spill
ldr x8, [sp, #768] ; 8-byte Folded Reload
msr NZCV, x8
adcs x9, x4, x9
str x9, [sp, #584] ; 8-byte Folded Spill
ldr x6, [sp, #1016] ; 8-byte Folded Reload
ldr x8, [sp, #408] ; 8-byte Folded Reload
cmp x8, x6
ldr x4, [sp, #1008] ; 8-byte Folded Reload
cinc x21, x4, lo
ldr x8, [sp, #456] ; 8-byte Folded Reload
msr NZCV, x8
adcs x2, x21, x9
str x2, [sp, #608] ; 8-byte Folded Spill
ldr x8, [sp, #448] ; 8-byte Folded Reload
msr NZCV, x8
adcs x28, x28, x2
str x28, [sp, #688] ; 8-byte Folded Spill
ldr x8, [sp, #1024] ; 8-byte Folded Reload
ldr x2, [sp, #400] ; 8-byte Folded Reload
cmp x2, x8
ldr x2, [sp, #920] ; 8-byte Folded Reload
add x8, x8, x2
str x8, [sp, #1032] ; 8-byte Folded Spill
cinc x2, x8, lo
ldr x8, [sp, #432] ; 8-byte Folded Reload
msr NZCV, x8
adcs x28, x2, x28
str x28, [sp, #624] ; 8-byte Folded Spill
ldr x8, [sp, #424] ; 8-byte Folded Reload
msr NZCV, x8
adcs x8, x24, x28
str x8, [sp, #1048] ; 8-byte Folded Spill
ldr x8, [sp, #784] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x17, x19
ldr x8, [sp, #472] ; 8-byte Folded Reload
adcs x17, x8, xzr
ldr x8, [sp, #888] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x3, x30
adcs x8, x17, xzr
ldr x9, [sp, #552] ; 8-byte Folded Reload
cmp x8, x9
mov x9, x8
str x8, [sp, #592] ; 8-byte Folded Spill
ldr x8, [sp, #1040] ; 8-byte Folded Reload
mul x3, x8, x16
umulh x16, x8, x16
cset w17, lo
cmp x23, x7
cset w30, lo
cinc x8, x16, lo
cmn x23, x3
adcs x19, x30, x16
str x19, [sp, #784] ; 8-byte Folded Spill
mov x30, x16
str x16, [sp, #888] ; 8-byte Folded Spill
adds x16, x23, x3
str x16, [sp, #744] ; 8-byte Folded Spill
adcs x8, x17, x8
str x8, [sp, #736] ; 8-byte Folded Spill
ldr x8, [sp, #992] ; 8-byte Folded Reload
cmp x5, x8
ldr x8, [sp, #936] ; 8-byte Folded Reload
cinc x23, x8, lo
ldr x8, [sp, #512] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #664] ; 8-byte Folded Reload
adcs xzr, x8, x5
mrs x8, NZCV
str x8, [sp, #664] ; 8-byte Folded Spill
adcs x17, x23, x9
ldr x8, [sp, #560] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #776] ; 8-byte Folded Reload
ldr x9, [sp, #544] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #656] ; 8-byte Folded Spill
adcs x8, x16, x17
str x8, [sp, #776] ; 8-byte Folded Spill
ldr x9, [sp, #968] ; 8-byte Folded Reload
cmp x12, x9
ldr x9, [sp, #952] ; 8-byte Folded Reload
cinc x24, x9, lo
ldr x9, [sp, #728] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x26, x12
mrs x12, NZCV
str x12, [sp, #728] ; 8-byte Folded Spill
adcs x16, x24, x8
str x16, [sp, #768] ; 8-byte Folded Spill
cmp x13, x11
ldr x12, [sp, #1064] ; 8-byte Folded Reload
mul x8, x12, x22
cset w5, lo
adds x13, x13, x8
str x13, [sp, #720] ; 8-byte Folded Spill
umulh x19, x12, x22
adcs x12, x5, x19
ldr x9, [sp, #824] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #712] ; 8-byte Folded Reload
adcs xzr, x9, x1
mrs x1, NZCV
str x1, [sp, #680] ; 8-byte Folded Spill
adcs x13, x13, x16
str x13, [sp, #712] ; 8-byte Folded Spill
cmp x15, x25
cinc x26, x10, lo
ldr x9, [sp, #816] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x20, x15
mrs x10, NZCV
str x10, [sp, #672] ; 8-byte Folded Spill
adcs x5, x26, x13
str x5, [sp, #752] ; 8-byte Folded Spill
ldr x22, [sp, #600] ; 8-byte Folded Reload
cmp x22, x27
cset w10, lo
ldr x20, [sp, #616] ; 8-byte Folded Reload
cmp x20, x11
cset w9, lo
ldr x13, [sp, #792] ; 8-byte Folded Reload
cmp x13, x7
cset w1, lo
ldr x11, [sp, #856] ; 8-byte Folded Reload
cmp x11, x0
cset w16, lo
adds x7, x11, x3
adcs x11, x16, x30
str x11, [sp, #816] ; 8-byte Folded Spill
adds x16, x13, x8
str x16, [sp, #856] ; 8-byte Folded Spill
adcs x8, x1, x19
str x8, [sp, #808] ; 8-byte Folded Spill
ldr x8, [sp, #1096] ; 8-byte Folded Reload
ldr x11, [sp, #1088] ; 8-byte Folded Reload
mul x0, x8, x11
adds x1, x20, x0
str x1, [sp, #824] ; 8-byte Folded Spill
umulh x13, x8, x11
adcs x8, x9, x13
str x8, [sp, #792] ; 8-byte Folded Spill
adds x9, x22, x0
str x9, [sp, #800] ; 8-byte Folded Spill
adcs x10, x10, x13
ldr x8, [sp, #864] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #704] ; 8-byte Folded Reload
ldr x11, [sp, #696] ; 8-byte Folded Reload
adcs xzr, x8, x11
mrs x8, NZCV
str x8, [sp, #600] ; 8-byte Folded Spill
adcs x9, x9, x5
str x9, [sp, #616] ; 8-byte Folded Spill
ldr x5, [sp, #976] ; 8-byte Folded Reload
cmp x14, x5
ldr x11, [sp, #1056] ; 8-byte Folded Reload
cinc x27, x11, lo
ldr x8, [sp, #840] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #576] ; 8-byte Folded Reload
adcs xzr, x8, x14
mrs x8, NZCV
str x8, [sp, #576] ; 8-byte Folded Spill
adcs x9, x27, x9
str x9, [sp, #840] ; 8-byte Folded Spill
ldr x8, [sp, #832] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #760] ; 8-byte Folded Reload
ldr x14, [sp, #568] ; 8-byte Folded Reload
adcs xzr, x14, x8
mrs x8, NZCV
str x8, [sp, #760] ; 8-byte Folded Spill
adcs x9, x1, x9
str x9, [sp, #864] ; 8-byte Folded Spill
cmp x21, x6
cinc x22, x4, lo
ldr x8, [sp, #848] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #584] ; 8-byte Folded Reload
adcs xzr, x8, x21
mrs x8, NZCV
str x8, [sp, #704] ; 8-byte Folded Spill
adcs x9, x22, x9
str x9, [sp, #832] ; 8-byte Folded Spill
ldr x8, [sp, #640] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #912] ; 8-byte Folded Reload
ldr x14, [sp, #608] ; 8-byte Folded Reload
adcs xzr, x14, x8
mrs x8, NZCV
str x8, [sp, #696] ; 8-byte Folded Spill
adcs x9, x16, x9
str x9, [sp, #848] ; 8-byte Folded Spill
ldr x8, [sp, #1024] ; 8-byte Folded Reload
cmp x2, x8
ldr x8, [sp, #1032] ; 8-byte Folded Reload
cinc x28, x8, lo
ldr x8, [sp, #632] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #688] ; 8-byte Folded Reload
adcs xzr, x8, x2
mrs x8, NZCV
str x8, [sp, #688] ; 8-byte Folded Spill
adcs x14, x28, x9
ldr x8, [sp, #528] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #928] ; 8-byte Folded Reload
ldr x9, [sp, #624] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x8, NZCV
adcs x9, x7, x14
str x9, [sp, #1080] ; 8-byte Folded Spill
msr NZCV, x8
adcs xzr, x14, x7
mrs x8, NZCV
str x8, [sp, #640] ; 8-byte Folded Spill
mrs x14, NZCV
mov w9, #-2
ldr x3, [sp, #440] ; 8-byte Folded Reload
mul x15, x3, x9
ldr x16, [sp, #960] ; 8-byte Folded Reload
adds x7, x15, x16
ldr x8, [sp, #352] ; 8-byte Folded Reload
msr NZCV, x8
ldr x0, [sp, #904] ; 8-byte Folded Reload
ldr x8, [sp, #360] ; 8-byte Folded Reload
adcs xzr, x8, x0
mrs x8, NZCV
ldr x2, [sp, #464] ; 8-byte Folded Reload
adcs x4, x7, x2
str x4, [sp, #992] ; 8-byte Folded Spill
str x14, [sp, #912] ; 8-byte Folded Spill
adds x14, x15, x16
umulh x15, x3, x9
adcs x9, x0, x15
str x9, [sp, #936] ; 8-byte Folded Spill
msr NZCV, x8
adcs xzr, x2, x14
mrs x8, NZCV
str x8, [sp, #928] ; 8-byte Folded Spill
ldr x8, [sp, #1048] ; 8-byte Folded Reload
adcs x8, x9, x8
str x8, [sp, #952] ; 8-byte Folded Spill
ldr x8, [sp, #664] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #592] ; 8-byte Folded Reload
adcs xzr, x8, x23
ldr x8, [sp, #736] ; 8-byte Folded Reload
adcs x8, x8, xzr
ldr x9, [sp, #656] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #744] ; 8-byte Folded Reload
adcs xzr, x17, x9
adcs x16, x8, xzr
ldr x8, [sp, #784] ; 8-byte Folded Reload
cmp x16, x8
ldr x9, [sp, #1040] ; 8-byte Folded Reload
ldr x8, [sp, #1072] ; 8-byte Folded Reload
mul x14, x9, x8
umulh x20, x9, x8
cset w15, lo
cmp x12, x19
cset w17, lo
cinc x23, x20, lo
cmn x12, x14
adcs x8, x17, x20
str x8, [sp, #744] ; 8-byte Folded Spill
adds x12, x12, x14
str x12, [sp, #784] ; 8-byte Folded Spill
adcs x8, x15, x23
str x8, [sp, #736] ; 8-byte Folded Spill
ldr x8, [sp, #968] ; 8-byte Folded Reload
cmp x24, x8
ldr x8, [sp, #880] ; 8-byte Folded Reload
cinc x23, x8, lo
ldr x8, [sp, #728] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #776] ; 8-byte Folded Reload
adcs xzr, x8, x24
mrs x24, NZCV
adcs x6, x23, x16
ldr x8, [sp, #680] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #768] ; 8-byte Folded Reload
ldr x9, [sp, #720] ; 8-byte Folded Reload
adcs xzr, x8, x9
mrs x8, NZCV
str x8, [sp, #728] ; 8-byte Folded Spill
adcs x9, x12, x6
str x9, [sp, #880] ; 8-byte Folded Spill
cmp x26, x25
ldr x8, [sp, #984] ; 8-byte Folded Reload
cinc x1, x8, lo
ldr x8, [sp, #672] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #712] ; 8-byte Folded Reload
adcs xzr, x8, x26
mrs x8, NZCV
str x8, [sp, #768] ; 8-byte Folded Spill
adcs x0, x1, x9
str x0, [sp, #776] ; 8-byte Folded Spill
cmp x10, x13
ldr x12, [sp, #1064] ; 8-byte Folded Reload
ldr x2, [sp, #1088] ; 8-byte Folded Reload
mul x8, x12, x2
cset w9, lo
adds x26, x10, x8
umulh x12, x12, x2
adcs x9, x9, x12
ldr x10, [sp, #600] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #800] ; 8-byte Folded Reload
ldr x15, [sp, #752] ; 8-byte Folded Reload
adcs xzr, x15, x10
mrs x10, NZCV
str x10, [sp, #800] ; 8-byte Folded Spill
adcs x30, x26, x0
cmp x27, x5
cinc x4, x11, lo
ldr x10, [sp, #576] ; 8-byte Folded Reload
msr NZCV, x10
ldr x10, [sp, #616] ; 8-byte Folded Reload
adcs xzr, x10, x27
mrs x10, NZCV
str x10, [sp, #752] ; 8-byte Folded Spill
adcs x27, x4, x30
ldr x17, [sp, #792] ; 8-byte Folded Reload
cmp x17, x13
cset w13, lo
ldr x15, [sp, #808] ; 8-byte Folded Reload
cmp x15, x19
cset w19, lo
ldr x10, [sp, #888] ; 8-byte Folded Reload
ldr x11, [sp, #816] ; 8-byte Folded Reload
cmp x11, x10
cset w21, lo
adds x5, x11, x14
str x5, [sp, #816] ; 8-byte Folded Spill
adcs x10, x21, x20
str x10, [sp, #968] ; 8-byte Folded Spill
adds x0, x15, x8
str x0, [sp, #808] ; 8-byte Folded Spill
adcs x8, x19, x12
str x8, [sp, #888] ; 8-byte Folded Spill
ldr x10, [sp, #1096] ; 8-byte Folded Reload
mul x8, x10, x10
adds x7, x17, x8
umulh x14, x10, x10
adcs x19, x13, x14
ldr x8, [sp, #760] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #824] ; 8-byte Folded Reload
ldr x10, [sp, #840] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x8, NZCV
str x8, [sp, #792] ; 8-byte Folded Spill
adcs x3, x7, x27
ldr x13, [sp, #1016] ; 8-byte Folded Reload
cmp x22, x13
ldr x10, [sp, #1008] ; 8-byte Folded Reload
cinc x11, x10, lo
ldr x8, [sp, #704] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #864] ; 8-byte Folded Reload
adcs xzr, x8, x22
mrs x8, NZCV
str x8, [sp, #760] ; 8-byte Folded Spill
adcs x21, x11, x3
ldr x8, [sp, #696] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #856] ; 8-byte Folded Reload
ldr x15, [sp, #832] ; 8-byte Folded Reload
adcs xzr, x15, x8
mrs x8, NZCV
str x8, [sp, #840] ; 8-byte Folded Spill
adcs x22, x0, x21
ldr x0, [sp, #1024] ; 8-byte Folded Reload
cmp x28, x0
ldr x17, [sp, #1032] ; 8-byte Folded Reload
cinc x15, x17, lo
ldr x8, [sp, #688] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #848] ; 8-byte Folded Reload
adcs xzr, x8, x28
mrs x8, NZCV
str x8, [sp, #832] ; 8-byte Folded Spill
adcs x25, x15, x22
ldr x8, [sp, #640] ; 8-byte Folded Reload
msr NZCV, x8
adcs x8, x5, x25
str x8, [sp, #1072] ; 8-byte Folded Spill
msr NZCV, x24
adcs xzr, x16, x23
ldr x8, [sp, #736] ; 8-byte Folded Reload
adcs x16, x8, xzr
ldr x8, [sp, #728] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #784] ; 8-byte Folded Reload
adcs xzr, x6, x8
adcs x6, x16, xzr
ldr x8, [sp, #744] ; 8-byte Folded Reload
cmp x6, x8
ldr x8, [sp, #1040] ; 8-byte Folded Reload
mul x23, x8, x2
umulh x5, x8, x2
cset w24, lo
cmp x9, x12
cset w16, lo
cinc x8, x5, lo
cmn x9, x23
adcs x16, x16, x5
str x16, [sp, #984] ; 8-byte Folded Spill
adds x9, x9, x23
str x9, [sp, #864] ; 8-byte Folded Spill
adcs x8, x24, x8
str x8, [sp, #856] ; 8-byte Folded Spill
ldr x8, [sp, #1000] ; 8-byte Folded Reload
cmp x1, x8
ldr x8, [sp, #872] ; 8-byte Folded Reload
cinc x16, x8, lo
str x16, [sp, #1000] ; 8-byte Folded Spill
ldr x8, [sp, #768] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #880] ; 8-byte Folded Reload
adcs xzr, x8, x1
mrs x8, NZCV
str x8, [sp, #848] ; 8-byte Folded Spill
adcs x28, x16, x6
ldr x8, [sp, #800] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #776] ; 8-byte Folded Reload
adcs xzr, x8, x26
mrs x8, NZCV
str x8, [sp, #824] ; 8-byte Folded Spill
adcs x26, x9, x28
ldr x2, [sp, #976] ; 8-byte Folded Reload
cmp x4, x2
ldr x8, [sp, #1056] ; 8-byte Folded Reload
cinc x16, x8, lo
ldr x8, [sp, #752] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x30, x4
mrs x8, NZCV
str x8, [sp, #800] ; 8-byte Folded Spill
adcs x30, x16, x26
cmp x19, x14
ldr x8, [sp, #1064] ; 8-byte Folded Reload
ldr x1, [sp, #1096] ; 8-byte Folded Reload
mul x14, x8, x1
cset w9, lo
adds x24, x19, x14
umulh x4, x8, x1
adcs x19, x9, x4
ldr x8, [sp, #792] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x27, x7
mrs x8, NZCV
str x8, [sp, #880] ; 8-byte Folded Spill
adcs x27, x24, x30
cmp x11, x13
cinc x13, x10, lo
ldr x8, [sp, #760] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x3, x11
mrs x8, NZCV
str x8, [sp, #792] ; 8-byte Folded Spill
adcs x9, x13, x27
ldr x8, [sp, #888] ; 8-byte Folded Reload
cmp x8, x12
cset w12, lo
adds x11, x8, x14
str x11, [sp, #872] ; 8-byte Folded Spill
adcs x10, x12, x4
ldr x8, [sp, #840] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #808] ; 8-byte Folded Reload
adcs xzr, x21, x8
mrs x8, NZCV
str x8, [sp, #840] ; 8-byte Folded Spill
adcs x8, x11, x9
str x8, [sp, #888] ; 8-byte Folded Spill
mov x21, x9
cmp x15, x0
cinc x12, x17, lo
ldr x9, [sp, #832] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x22, x15
mrs x9, NZCV
str x9, [sp, #832] ; 8-byte Folded Spill
adcs x11, x12, x8
ldr x8, [sp, #968] ; 8-byte Folded Reload
cmp x8, x20
cset w1, lo
adds x3, x8, x23
adcs x23, x1, x5
ldr x8, [sp, #912] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #816] ; 8-byte Folded Reload
adcs xzr, x25, x8
mrs x1, NZCV
adcs x8, x3, x11
str x8, [sp, #1056] ; 8-byte Folded Spill
msr NZCV, x1
adcs xzr, x11, x3
mrs x15, NZCV
mrs x3, NZCV
ldr x7, [sp, #904] ; 8-byte Folded Reload
ldr x11, [sp, #936] ; 8-byte Folded Reload
cmp x11, x7
ldr x8, [sp, #960] ; 8-byte Folded Reload
add x1, x7, x8
cinc x8, x1, lo
ldr x9, [sp, #928] ; 8-byte Folded Reload
msr NZCV, x9
ldr x9, [sp, #1048] ; 8-byte Folded Reload
adcs xzr, x9, x11
mrs x9, NZCV
ldr x14, [sp, #1080] ; 8-byte Folded Reload
adcs x11, x8, x14
str x11, [sp, #1088] ; 8-byte Folded Spill
msr NZCV, x3
mrs x11, NZCV
str x11, [sp, #912] ; 8-byte Folded Spill
cmp x8, x7
cinc x3, x1, lo
msr NZCV, x9
adcs xzr, x14, x8
mrs x8, NZCV
str x8, [sp, #968] ; 8-byte Folded Spill
ldr x8, [sp, #1072] ; 8-byte Folded Reload
adcs x8, x3, x8
str x8, [sp, #1080] ; 8-byte Folded Spill
ldr x8, [sp, #848] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #1000] ; 8-byte Folded Reload
adcs xzr, x6, x8
ldr x8, [sp, #856] ; 8-byte Folded Reload
adcs x6, x8, xzr
ldr x8, [sp, #824] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #864] ; 8-byte Folded Reload
adcs xzr, x28, x8
adcs x6, x6, xzr
cmp x16, x2
ldr x8, [sp, #896] ; 8-byte Folded Reload
cinc x8, x8, lo
str x8, [sp, #1048] ; 8-byte Folded Spill
ldr x9, [sp, #800] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x26, x16
mrs x9, NZCV
str x9, [sp, #976] ; 8-byte Folded Spill
ldr x9, [sp, #1040] ; 8-byte Folded Reload
ldr x11, [sp, #1096] ; 8-byte Folded Reload
mul x16, x9, x11
adcs x28, x8, x6
adds x8, x19, x16
ldr x14, [sp, #880] ; 8-byte Folded Reload
msr NZCV, x14
adcs xzr, x30, x24
mrs x14, NZCV
str x14, [sp, #936] ; 8-byte Folded Spill
adcs x22, x8, x28
ldr x24, [sp, #1016] ; 8-byte Folded Reload
cmp x13, x24
ldr x8, [sp, #1008] ; 8-byte Folded Reload
cinc x20, x8, lo
ldr x8, [sp, #792] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x27, x13
mrs x8, NZCV
str x8, [sp, #928] ; 8-byte Folded Spill
adcs x26, x20, x22
cmp x10, x4
cset w13, lo
cmp x23, x5
umulh x5, x9, x11
cset w27, lo
adds x23, x23, x16
adcs x8, x27, x5
str x8, [sp, #1008] ; 8-byte Folded Spill
ldr x14, [sp, #1064] ; 8-byte Folded Reload
mul x2, x14, x14
adds x11, x10, x2
umulh x2, x14, x14
adcs x13, x13, x2
ldr x8, [sp, #840] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #872] ; 8-byte Folded Reload
adcs xzr, x21, x8
mrs x8, NZCV
str x8, [sp, #896] ; 8-byte Folded Spill
adcs x0, x11, x26
ldr x25, [sp, #1024] ; 8-byte Folded Reload
cmp x12, x25
mov x30, x17
cinc x10, x17, lo
ldr x8, [sp, #832] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #888] ; 8-byte Folded Reload
adcs xzr, x8, x12
mrs x12, NZCV
adcs x21, x10, x0
msr NZCV, x15
adcs x27, x23, x21
cmp x19, x4
cset w15, lo
cinc x9, x5, lo
cmn x19, x16
adcs x15, x15, x5
ldr x8, [sp, #984] ; 8-byte Folded Reload
cmp x6, x8
cset w8, lo
adds x17, x19, x16
adcs x8, x8, x9
ldr x9, [sp, #912] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x21, x23
mrs x9, NZCV
mrs x16, NZCV
msr NZCV, x12
adcs xzr, x0, x10
mrs x0, NZCV
mrs x21, NZCV
cmp x3, x7
cinc x23, x1, lo
ldr x12, [sp, #968] ; 8-byte Folded Reload
msr NZCV, x12
ldr x12, [sp, #1072] ; 8-byte Folded Reload
adcs xzr, x12, x3
mrs x3, NZCV
ldr x4, [sp, #1056] ; 8-byte Folded Reload
adcs x12, x23, x4
str x12, [sp, #1096] ; 8-byte Folded Spill
msr NZCV, x16
mrs x12, NZCV
str x12, [sp, #1000] ; 8-byte Folded Spill
cmp x23, x7
cinc x16, x1, lo
msr NZCV, x3
adcs xzr, x4, x23
mrs x12, NZCV
adcs x3, x16, x27
str x3, [sp, #1072] ; 8-byte Folded Spill
ldr x3, [sp, #976] ; 8-byte Folded Reload
msr NZCV, x3
ldr x3, [sp, #1048] ; 8-byte Folded Reload
adcs xzr, x6, x3
adcs x8, x8, xzr
ldr x3, [sp, #936] ; 8-byte Folded Reload
msr NZCV, x3
adcs xzr, x28, x17
adcs x8, x8, xzr
cmp x20, x24
ldr x17, [sp, #944] ; 8-byte Folded Reload
cinc x6, x17, lo
ldr x17, [sp, #928] ; 8-byte Folded Reload
msr NZCV, x17
adcs xzr, x22, x20
mrs x4, NZCV
ldr x3, [sp, #1040] ; 8-byte Folded Reload
mul x17, x3, x14
adcs x19, x6, x8
adds x20, x13, x17
ldr x22, [sp, #896] ; 8-byte Folded Reload
msr NZCV, x22
adcs xzr, x26, x11
mrs x11, NZCV
adcs x20, x20, x19
cmp x10, x25
cinc x10, x30, lo
msr NZCV, x21
adcs x21, x10, x20
ldr x23, [sp, #1008] ; 8-byte Folded Reload
cmp x23, x5
umulh x5, x3, x14
cset w22, lo
adds x23, x23, x17
adcs x22, x22, x5
msr NZCV, x9
adcs x9, x23, x21
cmp x13, x2
cset w2, lo
cinc x24, x5, lo
cmn x13, x17
adcs x2, x2, x5
cmp x8, x15
cset w15, lo
adds x13, x13, x17
adcs x15, x15, x24
cmp x16, x7
cinc x24, x1, lo
msr NZCV, x12
adcs xzr, x27, x16
mrs x16, NZCV
adcs x17, x24, x9
msr NZCV, x4
adcs xzr, x8, x6
adcs x8, x15, xzr
msr NZCV, x11
adcs xzr, x19, x13
adcs x8, x8, xzr
cmp x8, x2
cset w11, lo
cmp x22, x5
mul x13, x3, x3
umulh x14, x3, x3
cset w15, lo
cinc x2, x14, lo
cmn x22, x13
adcs x14, x15, x14
adds x13, x22, x13
adcs x11, x11, x2
cmp x10, x25
ldr x15, [sp, #920] ; 8-byte Folded Reload
cinc x15, x15, lo
msr NZCV, x0
adcs xzr, x20, x10
mrs x10, NZCV
adcs x0, x15, x8
ldr x12, [sp, #1000] ; 8-byte Folded Reload
msr NZCV, x12
adcs xzr, x21, x23
mrs x2, NZCV
adcs x4, x13, x0
cmp x24, x7
cinc x5, x1, lo
msr NZCV, x16
adcs xzr, x9, x24
mrs x9, NZCV
adcs x1, x5, x4
msr NZCV, x10
adcs xzr, x8, x15
adcs x8, x11, xzr
msr NZCV, x2
adcs xzr, x0, x13
adcs x8, x8, xzr
cmp x8, x14
cset w10, lo
msr NZCV, x9
adcs xzr, x4, x5
mrs x9, NZCV
adcs x11, x8, xzr
adcs x13, x10, xzr
cmp x5, x7
ldr x12, [sp, #960] ; 8-byte Folded Reload
cinc x14, x12, lo
adds x15, x11, x14
mov w25, #-1
ldp x2, x12, [sp, #384] ; 16-byte Folded Reload
cmp x12, x25
cset w16, lo
ldr x0, [sp, #416] ; 8-byte Folded Reload
sub x16, x0, x16
cmp x0, x16
mov x26, x0
cset w0, lo
sub x0, x2, x0
cmp x2, x0
mov x27, x2
cset w2, lo
ldr x6, [sp, #992] ; 8-byte Folded Reload
cmp x6, x25
csetm x4, lo
mov x24, #-4294967295
add x5, x6, x24
mov x28, x6
sub x2, x5, x2
cmp x5, x2
cset w5, lo
sub x4, x4, x5
mov x5, #-65534
movk x5, #0, lsl #16
ldr x7, [sp, #952] ; 8-byte Folded Reload
mov w6, #-2
cmp x7, x6
csetm x6, lo
add x5, x7, x5
mov x30, x7
add x4, x4, x5
cmp x5, x4
cset w5, lo
sub x5, x6, x5
ldr x3, [sp, #1088] ; 8-byte Folded Reload
cmp x3, x25
csetm x6, lo
add x7, x3, x24
add x5, x5, x7
cmp x7, x5
cset w7, lo
sub x6, x6, x7
ldr x3, [sp, #1080] ; 8-byte Folded Reload
cmp x3, x25
add x7, x3, x24
add x6, x6, x7
csetm x19, lo
cmp x7, x6
cset w7, lo
sub x7, x19, x7
ldr x3, [sp, #1096] ; 8-byte Folded Reload
cmp x3, x25
add x19, x3, x24
add x7, x7, x19
csetm x20, lo
cmp x19, x7
cset w19, lo
sub x19, x20, x19
ldr x3, [sp, #1072] ; 8-byte Folded Reload
cmp x3, x25
add x20, x3, x24
add x19, x19, x20
csetm x21, lo
cmp x20, x19
cset w20, lo
sub x20, x21, x20
cmp x17, x25
add x21, x17, x24
add x20, x20, x21
csetm x22, lo
cmp x21, x20
cset w21, lo
sub x21, x22, x21
cmp x1, x25
add x22, x1, x24
add x21, x21, x22
csetm x23, lo
cmp x22, x21
add x22, x15, x24
cset w24, lo
cmp x15, x25
mov w25, #-1
sub x15, x23, x24
add x15, x15, x22
csetm x23, lo
cmp x22, x15
cset w22, lo
sub x22, x23, x22
adds x11, x11, x14
adcs x13, x22, x13
msr NZCV, x9
adcs xzr, x8, x14
adcs x8, x10, xzr
mov x9, #-4294967296
cmp x8, x13
csel x8, x25, x9, hs
csetm x9, lo
and x10, x12, x9
ldr x12, [sp, #376] ; 8-byte Folded Reload
and x13, x8, x12
orr x10, x13, x10
and x13, x26, x9
and x14, x8, x16
orr x13, x14, x13
and x14, x27, x9
and x16, x8, x0
orr x14, x16, x14
and x16, x28, x9
and x0, x8, x2
orr x16, x0, x16
and x0, x30, x9
and x2, x8, x4
orr x0, x2, x0
ldr x2, [sp, #648] ; 8-byte Folded Reload
str x10, [x2]
stur x13, [x2, #4]
ldr x10, [sp, #1088] ; 8-byte Folded Reload
and x10, x10, x9
and x13, x8, x5
orr x10, x13, x10
str x14, [x2, #8]
stur x16, [x2, #12]
ldr x12, [sp, #1080] ; 8-byte Folded Reload
and x13, x12, x9
and x14, x8, x6
orr x13, x14, x13
ldr x12, [sp, #1096] ; 8-byte Folded Reload
and x12, x12, x9
and x14, x8, x7
orr x12, x14, x12
str x0, [x2, #16]
stur x10, [x2, #20]
and x10, x3, x9
and x14, x8, x19
orr x10, x14, x10
and x14, x17, x9
and x16, x8, x20
orr x14, x16, x14
and x16, x1, x9
and x17, x8, x21
orr x16, x17, x16
and x9, x11, x9
str x13, [x2, #24]
stur x12, [x2, #28]
and x8, x8, x15
str x10, [x2, #32]
stur x14, [x2, #36]
orr x8, x8, x9
str x16, [x2, #40]
stur x8, [x2, #44]
add sp, sp, #1104
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _fiat_p384_add ; -- Begin function fiat_p384_add
.p2align 2
_fiat_p384_add: ; @fiat_p384_add
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
ldp x14, x26, [x0]
ldur x15, [x0, #4]
ldp x16, x28, [x1]
ldur x17, [x1, #4]
cmn x16, x14
adcs x8, x17, x15
ldur x24, [x0, #12]
ldp x6, x19, [x0, #16]
ldur x7, [x0, #20]
ldur x4, [x0, #28]
ldp x3, x10, [x0, #32]
ldur x12, [x0, #36]
ldur x9, [x0, #44]
ldur x27, [x1, #12]
ldp x23, x21, [x1, #16]
ldur x22, [x1, #20]
ldur x20, [x1, #28]
ldp x5, x13, [x1, #32]
ldur x0, [x1, #36]
ldur x11, [x1, #44]
mov x25, #-4294967295
adds x14, x16, x14
add x16, x14, x25
adcs xzr, x15, x17
adcs x15, x26, xzr
cmp x15, x26
cset w17, lo
adds x15, x15, x28
adcs x17, x24, x17
cmp x17, x24
cset w1, lo
adds x17, x17, x27
add x24, x17, x25
adcs x1, x6, x1
cmp x1, x6
cset w26, lo
mov x27, #-65534
movk x27, #0, lsl #16
adds x6, x1, x23
add x23, x6, x27
adcs x1, x7, x26
cmp x1, x7
cset w26, lo
adds x7, x1, x22
add x22, x7, x25
adcs x1, x19, x26
cmp x1, x19
cset w19, lo
adds x1, x1, x21
add x21, x1, x25
adcs x19, x4, x19
cmp x19, x4
cset w26, lo
adds x4, x19, x20
add x19, x4, x25
adcs x20, x3, x26
cmp x20, x3
cset w26, lo
adds x3, x20, x5
add x5, x3, x25
adcs x20, x12, x26
cmp x20, x12
cset w26, lo
adds x12, x20, x0
add x0, x12, x25
adcs x20, x10, x26
cmp x20, x10
cset w26, lo
adds x10, x20, x13
add x13, x10, x25
adcs x20, x9, x26
cmp x20, x9
cset w26, lo
adds x9, x20, x11
add x11, x9, x25
adcs x20, x26, xzr
mov w25, #-1
cmp x14, x25
cset w26, lo
sub x26, x8, x26
cmp x8, x26
cset w27, lo
sub x27, x15, x27
cmp x15, x27
cset w28, lo
cmp x17, x25
csetm x30, lo
sub x28, x24, x28
cmp x24, x28
cset w24, lo
sub x24, x30, x24
add x24, x24, x23
mov w30, #-2
cmp x6, x30
csetm x30, lo
cmp x23, x24
cset w23, lo
sub x23, x30, x23
add x23, x23, x22
cmp x7, x25
csetm x30, lo
cmp x22, x23
cset w22, lo
sub x22, x30, x22
add x22, x22, x21
cmp x1, x25
csetm x30, lo
cmp x21, x22
cset w21, lo
sub x21, x30, x21
add x21, x21, x19
cmp x4, x25
csetm x30, lo
cmp x19, x21
cset w19, lo
sub x19, x30, x19
add x19, x19, x5
cmp x3, x25
csetm x30, lo
cmp x5, x19
cset w5, lo
sub x5, x30, x5
add x5, x5, x0
cmp x12, x25
csetm x30, lo
cmp x0, x5
cset w0, lo
sub x0, x30, x0
add x0, x0, x13
cmp x10, x25
csetm x30, lo
cmp x13, x0
cset w13, lo
sub x13, x30, x13
add x13, x13, x11
cmp x9, x25
cset w30, lo
cmp x11, x13
cset w11, lo
sub x30, x20, x30
sub x11, x30, x11
mov x30, #-4294967296
cmp x20, x11
csel x11, x25, x30, hs
csetm x20, lo
and x14, x14, x20
and x16, x11, x16
orr x14, x16, x14
and x8, x8, x20
and x16, x11, x26
orr x8, x16, x8
and x15, x15, x20
and x16, x11, x27
orr x15, x16, x15
and x16, x17, x20
and x17, x11, x28
orr x16, x17, x16
and x17, x6, x20
and x6, x11, x24
orr x17, x6, x17
and x6, x7, x20
str x14, [x2]
stur x8, [x2, #4]
and x8, x11, x23
orr x8, x8, x6
and x14, x1, x20
and x1, x11, x22
str x15, [x2, #8]
stur x16, [x2, #12]
orr x14, x1, x14
and x15, x4, x20
and x16, x11, x21
orr x15, x16, x15
str x17, [x2, #16]
stur x8, [x2, #20]
and x8, x3, x20
and x16, x11, x19
orr x8, x16, x8
and x12, x12, x20
and x16, x11, x5
orr x12, x16, x12
and x10, x10, x20
and x16, x11, x0
orr x10, x16, x10
and x9, x9, x20
str x14, [x2, #24]
stur x15, [x2, #28]
and x11, x11, x13
str x8, [x2, #32]
stur x12, [x2, #36]
orr x8, x11, x9
str x10, [x2, #40]
stur x8, [x2, #44]
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _fiat_p384_sub ; -- Begin function fiat_p384_sub
.p2align 2
_fiat_p384_sub: ; @fiat_p384_sub
.cfi_startproc
; %bb.0:
stp x26, x25, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
.cfi_offset w19, -8
.cfi_offset w20, -16
.cfi_offset w21, -24
.cfi_offset w22, -32
.cfi_offset w23, -40
.cfi_offset w24, -48
.cfi_offset w25, -56
.cfi_offset w26, -64
ldp x8, x12, [x0]
ldur x10, [x0, #4]
ldur x13, [x0, #12]
ldp x20, x5, [x0, #16]
ldur x21, [x0, #20]
ldur x6, [x0, #28]
ldp x4, x11, [x0, #32]
ldur x16, [x0, #36]
ldur x9, [x0, #44]
ldp x14, x0, [x1]
ldur x17, [x1, #4]
ldur x22, [x1, #12]
ldp x23, x25, [x1, #16]
ldur x24, [x1, #20]
ldur x26, [x1, #28]
ldp x19, x3, [x1, #32]
ldur x7, [x1, #36]
ldur x15, [x1, #44]
subs x8, x8, x14
cset w14, lo
subs x17, x10, x17
sub x10, x17, x14
cset w14, lo
cmp x17, x10
cset w17, lo
subs x0, x12, x0
sub x12, x0, x14
cset w1, lo
sub x12, x12, x17
cmp x0, x12
cset w0, lo
csetm x14, lo
subs x22, x13, x22
sub x17, x22, x1
cset w1, lo
sub x13, x17, x0
cmp x22, x13
cset w0, lo
subs x20, x20, x23
sub x1, x20, x1
cset w22, lo
sub x0, x1, x0
cmp x20, x0
cset w1, lo
subs x20, x21, x24
sub x21, x20, x22
cset w22, lo
sub x1, x21, x1
cmp x20, x1
cset w20, lo
subs x21, x5, x25
sub x5, x21, x22
cset w22, lo
sub x5, x5, x20
cmp x21, x5
cset w20, lo
subs x21, x6, x26
sub x6, x21, x22
cset w22, lo
sub x6, x6, x20
cmp x21, x6
cset w20, lo
subs x19, x4, x19
sub x4, x19, x22
cset w21, lo
sub x4, x4, x20
cmp x19, x4
cset w19, lo
subs x7, x16, x7
sub x16, x7, x21
cset w20, lo
sub x16, x16, x19
cmp x7, x16
cset w7, lo
subs x3, x11, x3
sub x11, x3, x20
cset w19, lo
sub x11, x11, x7
cmp x3, x11
cset w3, lo
subs x15, x9, x15
sub x9, x15, x19
cset w7, lo
sub x9, x9, x3
cmp x15, x9
csetm x15, lo
mov w3, #-1
cmp x7, x15
csel x3, xzr, x3, eq
adds x8, x3, x8
adcs x10, x10, xzr
adcs x12, x12, xzr
mrs x19, NZCV
adcs x14, x17, x14
mov w17, #-2
cmp x7, x15
csel x15, xzr, x17, eq
add x14, x14, x3
msr NZCV, x19
adcs xzr, x13, x3
adcs x13, x0, xzr
cmp x13, x0
cset w17, lo
adds x13, x13, x15
adcs x15, x1, x17
cmp x15, x1
cset w17, lo
adds x15, x15, x3
adcs x17, x5, x17
cmp x17, x5
cset w0, lo
adds x17, x17, x3
adcs x0, x6, x0
cmp x0, x6
cset w1, lo
adds x0, x0, x3
adcs x1, x4, x1
cmp x1, x4
cset w4, lo
adds x1, x1, x3
adcs x4, x16, x4
cmp x4, x16
cset w16, lo
str x8, [x2]
stur x10, [x2, #4]
adds x8, x4, x3
str x12, [x2, #8]
stur x14, [x2, #12]
adcs x10, x11, x16
str x13, [x2, #16]
stur x15, [x2, #20]
cmp x10, x11
cset w11, lo
adds x10, x10, x3
str x17, [x2, #24]
stur x0, [x2, #28]
add x9, x3, x9
str x1, [x2, #32]
stur x8, [x2, #36]
adcs x8, x9, x11
str x10, [x2, #40]
stur x8, [x2, #44]
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
ldp x26, x25, [sp], #64 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _fiat_p384_opp ; -- Begin function fiat_p384_opp
.p2align 2
_fiat_p384_opp: ; @fiat_p384_opp
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
.cfi_offset w19, -8
.cfi_offset w20, -16
ldp x8, x11, [x0]
ldur x10, [x0, #4]
ldur x16, [x0, #12]
ldp x2, x5, [x0, #16]
ldur x3, [x0, #20]
ldur x6, [x0, #28]
ldp x7, x17, [x0, #32]
ldur x4, [x0, #36]
ldur x12, [x0, #44]
neg x9, x8
neg x13, x10
cmp x8, #0
csetm x14, ne
cmp x10, #0
csetm x15, ne
sub x10, x14, x10
cmp x10, x13
cset w13, hi
sub x14, x15, x11
sub x13, x14, x13
cmp x11, #0
csetm x15, ne
neg x11, x11
cmp x13, x11
cset w11, hi
csetm x14, hi
sub x15, x15, x16
sub x11, x15, x11
cmp x16, #0
csetm x0, ne
neg x16, x16
cmp x11, x16
cset w16, hi
sub x0, x0, x2
sub x16, x0, x16
cmp x2, #0
csetm x0, ne
neg x2, x2
cmp x16, x2
cset w2, hi
sub x0, x0, x3
sub x2, x0, x2
cmp x3, #0
csetm x0, ne
neg x3, x3
cmp x2, x3
cset w3, hi
sub x0, x0, x5
sub x3, x0, x3
cmp x5, #0
csetm x0, ne
neg x5, x5
cmp x3, x5
cset w5, hi
sub x0, x0, x6
sub x5, x0, x5
cmp x6, #0
csetm x0, ne
neg x6, x6
cmp x5, x6
cset w6, hi
sub x0, x0, x7
sub x6, x0, x6
cmp x7, #0
csetm x0, ne
neg x7, x7
cmp x6, x7
cset w7, hi
sub x0, x0, x4
sub x7, x0, x7
cmp x4, #0
csetm x0, ne
neg x4, x4
cmp x7, x4
cset w4, hi
sub x0, x0, x17
sub x0, x0, x4
cmp x17, #0
csetm x4, ne
neg x17, x17
cmp x0, x17
cset w17, hi
sub x4, x4, x12
sub x17, x4, x17
cmp x12, #0
cset w4, ne
neg x12, x12
cmp x17, x12
csetm x19, hi
mov w12, #-1
cmp x4, x19
csel x12, xzr, x12, eq
sub x8, x12, x8
cmp x8, x9
cset w9, lo
adds x9, x10, x9
adcs x10, x13, xzr
mrs x13, NZCV
adcs x14, x15, x14
mov w15, #-2
cmp x4, x19
csel x15, xzr, x15, eq
add x14, x14, x12
msr NZCV, x13
adcs xzr, x11, x12
adcs x11, x16, xzr
cmp x11, x16
cset w13, lo
adds x11, x11, x15
adcs x13, x2, x13
cmp x13, x2
cset w15, lo
adds x13, x13, x12
adcs x15, x3, x15
cmp x15, x3
cset w16, lo
adds x15, x15, x12
adcs x16, x5, x16
cmp x16, x5
cset w2, lo
adds x16, x16, x12
adcs x2, x6, x2
cmp x2, x6
cset w3, lo
adds x2, x2, x12
adcs x3, x7, x3
cmp x3, x7
cset w4, lo
str x8, [x1]
stur x9, [x1, #4]
adds x8, x3, x12
str x10, [x1, #8]
stur x14, [x1, #12]
adcs x9, x0, x4
str x11, [x1, #16]
stur x13, [x1, #20]
cmp x9, x0
cset w10, lo
adds x9, x9, x12
str x15, [x1, #24]
stur x16, [x1, #28]
add x11, x12, x17
str x2, [x1, #32]
stur x8, [x1, #36]
adcs x8, x11, x10
str x9, [x1, #40]
stur x8, [x1, #44]
ldp x20, x19, [sp], #16 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _fiat_p384_from_montgomery ; -- Begin function fiat_p384_from_montgomery
.p2align 2
_fiat_p384_from_montgomery: ; @fiat_p384_from_montgomery
.cfi_startproc
; %bb.0:
sub sp, sp, #336
.cfi_def_cfa_offset 336
stp x28, x27, [sp, #240] ; 16-byte Folded Spill
stp x26, x25, [sp, #256] ; 16-byte Folded Spill
stp x24, x23, [sp, #272] ; 16-byte Folded Spill
stp x22, x21, [sp, #288] ; 16-byte Folded Spill
stp x20, x19, [sp, #304] ; 16-byte Folded Spill
stp x29, x30, [sp, #320] ; 16-byte Folded Spill
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
str x1, [sp, #192] ; 8-byte Folded Spill
ldr x9, [x0]
ldur x10, [x0, #4]
mov x3, x0
mov w8, #-1
umulh x13, x9, x8
mov w8, #-1
lsl x11, x9, #32
cmp x9, x11
cinc x12, x13, hi
mov x0, x13
adds x13, x12, x10
lsl x14, x13, #32
cmp x13, x14
cset w13, hi
mov w15, #-2
umulh x17, x9, x15
mul x16, x9, x15
adds x10, x12, x10
sub x1, x14, x10
umulh x8, x10, x8
adcs x13, x8, x13
stp x8, x0, [sp, #200] ; 16-byte Folded Spill
adds x12, x16, x0
sub x16, x11, x9
adcs x9, x16, x17
cmp x9, x16
cset w11, lo
add x28, x16, x0
umulh x17, x10, x15
mul x10, x10, x15
cinc x0, x28, lo
adds x2, x10, x8
adcs x17, x1, x17
cmn x1, x12
adcs x4, x2, x9
adds x5, x1, x12
str x3, [sp, #232] ; 8-byte Folded Spill
ldr x6, [x3, #8]
ldur x10, [x3, #12]
adcs xzr, x9, x2
adcs x11, x28, x11
cmp x0, x16
cinc x2, x28, lo
cmp x2, x16
cinc x12, x28, lo
cmp x17, x1
add x3, x1, x8
cinc x7, x3, lo
cmp x7, x1
cinc x9, x3, lo
cmp x11, x0
cset w0, lo
adds x19, x11, x17
adcs x11, x2, x0
cmp x11, x2
cset w17, lo
adds x0, x11, x7
adcs x11, x12, x17
adds x2, x11, x9
adds x6, x13, x6
lsl x7, x6, #32
sub x17, x7, x6
adcs x20, x16, xzr
adcs x21, x5, xzr
adcs x22, x4, xzr
adcs x19, x19, xzr
adcs x23, x0, xzr
adcs x5, x2, xzr
mrs x13, NZCV
mov w8, #-1
umulh x14, x6, x8
mov w15, #-2
umulh x0, x6, x15
mul x4, x6, x15
mov w15, #-2
adds x24, x4, x14
adcs x4, x17, x0
cmp x6, x7
cset w0, hi
adds x0, x20, x0
cset w6, hs
adds x0, x0, x14
mov x2, x14
str x14, [sp, #168] ; 8-byte Folded Spill
adcs x6, x21, x6
cmp x6, x21
cset w7, lo
adds x7, x22, x7
cset w20, hs
adds x7, x7, x17
adcs x20, x19, x20
cmp x20, x19
cset w19, lo
adds x20, x20, x24
adcs x21, x23, x19
adds x19, x0, x10
lsl x0, x19, #32
sub x10, x0, x19
adcs x6, x6, xzr
adcs x7, x7, xzr
adcs x20, x20, xzr
mrs x22, NZCV
umulh x8, x19, x8
cmp x19, x0
cset w24, hi
adds x0, x6, x24
adds x0, x0, x8
cset w25, hs
cmn x6, x24
adcs x6, x7, x25
mrs x24, NZCV
adcs x14, x10, x20
str x14, [sp, #184] ; 8-byte Folded Spill
cmp x21, x23
cset w23, lo
adds x21, x21, x4
adcs x23, x5, x23
msr NZCV, x22
adcs x25, x21, xzr
mrs x26, NZCV
cmp x12, x16
cinc x22, x28, lo
cmp x11, x12
cset w12, lo
cmn x11, x9
adcs x12, x22, x12
mul x27, x19, x15
adds x11, x27, x8
msr NZCV, x24
adcs xzr, x20, x10
mrs x24, NZCV
adcs x14, x11, x25
cmp x22, x16
cinc x15, x28, lo
cmp x9, x1
cinc x9, x3, lo
cmp x9, x1
cinc x21, x3, lo
cmp x12, x22
cset w20, lo
adds x9, x12, x9
adcs x20, x15, x20
adds x12, x20, x21
msr NZCV, x13
adcs x9, x9, xzr
adcs x22, x12, xzr
mrs x11, NZCV
str x11, [sp, #144] ; 8-byte Folded Spill
mov x7, x17
cmp x4, x17
add x4, x17, x2
cinc x13, x4, lo
cmp x23, x5
cset w5, lo
adds x23, x23, x13
adcs x5, x9, x5
msr NZCV, x26
adcs x26, x23, xzr
mrs x30, NZCV
adds x23, x27, x8
mov x11, x8
str x8, [sp, #176] ; 8-byte Folded Spill
mov w8, #-2
umulh x19, x19, x8
adcs x27, x10, x19
msr NZCV, x24
adcs xzr, x25, x23
mrs x19, NZCV
adcs x2, x27, x26
cmp x13, x17
cinc x24, x4, lo
cmp x5, x9
cset w9, lo
adds x5, x5, x24
adcs x25, x22, x9
ldr x8, [sp, #232] ; 8-byte Folded Reload
ldr x9, [x8, #16]
adds x23, x0, x9
lsl x9, x23, #32
sub x17, x9, x23
adcs x6, x6, xzr
ldr x8, [sp, #184] ; 8-byte Folded Reload
adcs x8, x8, xzr
adcs x14, x14, xzr
mrs x13, NZCV
msr NZCV, x30
adcs x0, x5, xzr
mrs x30, NZCV
cmp x23, x9
mov w9, #-1
umulh x12, x23, x9
cset w9, hi
adds x5, x6, x9
adds x5, x5, x12
str x5, [sp, #128] ; 8-byte Folded Spill
cset w5, hs
cmn x6, x9
adcs x8, x8, x5
str x8, [sp, #160] ; 8-byte Folded Spill
mrs x9, NZCV
adcs x8, x17, x14
str x8, [sp, #152] ; 8-byte Folded Spill
mov x5, x10
cmp x27, x10
add x6, x10, x11
cinc x8, x6, lo
msr NZCV, x19
adcs xzr, x26, x27
mrs x19, NZCV
adcs x26, x8, x0
cmp x15, x16
cinc x27, x28, lo
cmp x20, x15
cset w11, lo
cmn x20, x21
adcs x20, x27, x11
msr NZCV, x13
adcs x2, x2, xzr
adcs x13, x26, xzr
mrs x10, NZCV
mov w28, #-2
mul x26, x23, x28
adds x11, x26, x12
msr NZCV, x9
adcs xzr, x14, x17
mrs x9, NZCV
adcs x11, x11, x2
str x11, [sp, #120] ; 8-byte Folded Spill
cmp x21, x1
cinc x14, x3, lo
cmp x14, x1
cinc x11, x3, lo
cmp x20, x27
cset w3, lo
cmp x27, x16
ldr x15, [sp, #208] ; 8-byte Folded Reload
cinc x16, x15, lo
adds x14, x20, x14
add x16, x16, x11
adcs x3, x16, x3
ldr x15, [sp, #144] ; 8-byte Folded Reload
msr NZCV, x15
adcs x20, x14, xzr
adcs x21, x3, xzr
mrs x14, NZCV
mov x15, x7
cmp x24, x7
cinc x24, x4, lo
cmp x25, x22
cset w16, lo
adds x22, x25, x24
adcs x25, x20, x16
msr NZCV, x30
adcs x7, x22, xzr
mrs x22, NZCV
adds x16, x26, x12
str x12, [sp, #184] ; 8-byte Folded Spill
umulh x23, x23, x28
adcs x23, x17, x23
msr NZCV, x9
adcs xzr, x2, x16
mrs x2, NZCV
adcs x9, x23, x13
str x9, [sp, #112] ; 8-byte Folded Spill
str x5, [sp, #224] ; 8-byte Folded Spill
cmp x8, x5
mov x28, x6
cinc x26, x6, lo
msr NZCV, x19
adcs xzr, x0, x8
mrs x19, NZCV
adcs x8, x26, x7
msr NZCV, x10
adcs x16, x8, xzr
mrs x10, NZCV
cmp x24, x15
cinc x27, x4, lo
cmp x25, x20
cset w8, lo
adds x0, x25, x27
adcs x20, x21, x8
msr NZCV, x22
adcs x0, x0, xzr
mrs x22, NZCV
cmp x23, x17
add x12, x17, x12
cinc x8, x12, lo
msr NZCV, x2
adcs xzr, x13, x23
mrs x2, NZCV
adcs x9, x8, x16
stp x15, x9, [sp, #136] ; 16-byte Folded Spill
cmp x26, x5
cinc x24, x6, lo
msr NZCV, x19
adcs xzr, x7, x26
mrs x9, NZCV
str x9, [sp, #104] ; 8-byte Folded Spill
adcs x19, x24, x0
msr NZCV, x10
adcs x25, x19, xzr
mrs x10, NZCV
cmp x3, x11
cset w19, lo
cmp x27, x15
cinc x23, x4, lo
cmp x23, x15
cinc x3, x4, lo
cmp x11, x1
ldr x9, [sp, #200] ; 8-byte Folded Reload
cinc x11, x9, lo
add x11, x11, x3
msr NZCV, x14
adcs x11, x11, x19
cmp x20, x21
cset w14, lo
adds x15, x20, x23
adcs x4, x11, x14
ldr x13, [sp, #232] ; 8-byte Folded Reload
ldur x11, [x13, #20]
ldr x9, [sp, #128] ; 8-byte Folded Reload
adds x11, x9, x11
lsl x19, x11, #32
sub x14, x19, x11
ldr x9, [sp, #160] ; 8-byte Folded Reload
adcs x20, x9, xzr
ldr x9, [sp, #152] ; 8-byte Folded Reload
adcs x6, x9, xzr
ldr x9, [sp, #120] ; 8-byte Folded Reload
adcs x27, x9, xzr
mrs x5, NZCV
msr NZCV, x22
adcs x30, x15, xzr
mrs x21, NZCV
cmp x11, x19
mov w9, #-1
umulh x1, x11, x9
cset w15, hi
adds x19, x20, x15
adds x9, x19, x1
mov x7, x1
cset w19, hs
cmn x20, x15
adcs x1, x6, x19
mrs x22, NZCV
adcs x15, x14, x27
str x15, [sp, #120] ; 8-byte Folded Spill
stp x12, x17, [sp, #208] ; 16-byte Folded Spill
cmp x8, x17
cinc x26, x12, lo
msr NZCV, x2
adcs xzr, x16, x8
mrs x16, NZCV
adcs x19, x26, x25
msr NZCV, x5
ldr x8, [sp, #112] ; 8-byte Folded Reload
adcs x20, x8, xzr
mrs x23, NZCV
ldr x15, [sp, #224] ; 8-byte Folded Reload
cmp x24, x15
cinc x5, x28, lo
ldr x8, [sp, #104] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x0, x24
mrs x0, NZCV
adcs x2, x5, x30
msr NZCV, x10
adcs x2, x2, xzr
mrs x8, NZCV
str x8, [sp, #96] ; 8-byte Folded Spill
cmp x26, x17
cinc x6, x12, lo
msr NZCV, x16
adcs xzr, x25, x26
mrs x16, NZCV
adcs x25, x6, x2
msr NZCV, x21
adcs x21, x4, xzr
mrs x24, NZCV
msr NZCV, x16
adcs xzr, x2, x6
mrs x8, NZCV
str x8, [sp, #128] ; 8-byte Folded Spill
str x8, [sp, #104] ; 8-byte Folded Spill
mov w8, #-2
umulh x16, x11, x8
mul x11, x11, x8
adds x26, x11, x7
mov x2, x7
str x7, [sp, #160] ; 8-byte Folded Spill
adcs x12, x14, x16
msr NZCV, x22
adcs xzr, x27, x14
mrs x27, NZCV
adcs x22, x26, x20
mov x17, x15
cmp x5, x15
cinc x8, x28, lo
msr NZCV, x0
adcs xzr, x30, x5
mrs x5, NZCV
ldr x16, [x13, #24]
adcs x30, x8, x21
adds x0, x9, x16
lsl x9, x0, #32
sub x7, x9, x0
adcs x1, x1, xzr
ldr x10, [sp, #120] ; 8-byte Folded Reload
adcs x11, x10, xzr
adcs x10, x22, xzr
mrs x13, NZCV
msr NZCV, x23
ldr x16, [sp, #144] ; 8-byte Folded Reload
adcs x23, x16, xzr
adcs x19, x19, xzr
adcs x22, x25, xzr
mrs x15, NZCV
cmp x4, x3
cset w4, lo
cmp x8, x17
cinc x25, x28, lo
ldr x16, [sp, #136] ; 8-byte Folded Reload
cmp x3, x16
ldr x16, [sp, #168] ; 8-byte Folded Reload
cinc x17, x16, lo
add x17, x17, x25
msr NZCV, x24
adcs x17, x17, x4
msr NZCV, x5
adcs xzr, x21, x8
adcs x8, x17, xzr
ldr x16, [sp, #96] ; 8-byte Folded Reload
msr NZCV, x16
adcs x17, x30, xzr
adcs x16, x8, xzr
stp x8, x16, [sp, #112] ; 16-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #136] ; 8-byte Folded Spill
msr NZCV, x27
adcs xzr, x20, x26
mrs x3, NZCV
adcs x5, x12, x23
cmp x0, x9
mov w8, #-1
umulh x8, x0, x8
cset w9, hi
adds x20, x1, x9
adds x24, x20, x8
mov x26, x8
cset w20, hs
cmn x1, x9
adcs x8, x11, x20
str x8, [sp, #88] ; 8-byte Folded Spill
mrs x11, NZCV
adcs x27, x7, x10
cmp x12, x14
add x21, x14, x2
cinc x20, x21, lo
msr NZCV, x3
adcs xzr, x23, x12
mrs x12, NZCV
adcs x23, x20, x19
msr NZCV, x13
adcs x4, x5, xzr
mrs x5, NZCV
cmp x20, x14
mov x1, x14
str x14, [sp, #200] ; 8-byte Folded Spill
cinc x3, x21, lo
msr NZCV, x12
adcs xzr, x19, x20
mrs x12, NZCV
adcs x19, x3, x22
ldp x8, x9, [sp, #208] ; 16-byte Folded Reload
cmp x6, x9
cinc x20, x8, lo
ldr x13, [sp, #104] ; 8-byte Folded Reload
msr NZCV, x13
adcs x2, x20, x17
msr NZCV, x15
adcs x28, x2, xzr
mrs x2, NZCV
mov w13, #-2
umulh x15, x0, x13
mul x0, x0, x13
adds x13, x0, x26
mov x14, x26
str x26, [sp, #168] ; 8-byte Folded Spill
adcs x26, x7, x15
msr NZCV, x11
adcs xzr, x10, x7
mrs x11, NZCV
adcs x0, x13, x4
cmp x3, x1
cinc x15, x21, lo
msr NZCV, x12
adcs xzr, x22, x3
mrs x10, NZCV
str x10, [sp, #96] ; 8-byte Folded Spill
adcs x22, x15, x28
cmp x20, x9
mov x1, x9
cinc x6, x8, lo
ldr x8, [sp, #128] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x17, x20
mrs x8, NZCV
str x8, [sp, #144] ; 8-byte Folded Spill
ldr x20, [sp, #232] ; 8-byte Folded Reload
ldur x12, [x20, #28]
adcs x17, x6, x16
adds x30, x24, x12
lsl x12, x30, #32
sub x3, x12, x30
ldr x8, [sp, #88] ; 8-byte Folded Reload
adcs x24, x8, xzr
adcs x8, x27, xzr
adcs x9, x0, xzr
mrs x10, NZCV
msr NZCV, x2
adcs x27, x17, xzr
str x27, [sp, #16] ; 8-byte Folded Spill
mrs x16, NZCV
str x16, [sp, #104] ; 8-byte Folded Spill
cmp x30, x12
mov w12, #-1
umulh x0, x30, x12
cset w12, hi
adds x17, x24, x12
adds x16, x17, x0
str x16, [sp, #40] ; 8-byte Folded Spill
cset w17, hs
cmn x24, x12
adcs x8, x8, x17
str x8, [sp, #88] ; 8-byte Folded Spill
mrs x24, NZCV
adcs x8, x3, x9
str x8, [sp, #80] ; 8-byte Folded Spill
msr NZCV, x5
adcs x8, x23, xzr
adcs x23, x19, xzr
adcs x16, x22, xzr
mrs x22, NZCV
msr NZCV, x11
adcs xzr, x4, x13
mrs x13, NZCV
adcs x11, x26, x8
msr NZCV, x10
adcs x5, x11, xzr
mrs x10, NZCV
str x10, [sp, #64] ; 8-byte Folded Spill
cmp x26, x7
add x2, x7, x14
cinc x10, x2, lo
msr NZCV, x13
adcs xzr, x8, x26
mrs x8, NZCV
mov w17, #-2
mul x13, x30, x17
adcs x11, x10, x23
str x11, [sp, #32] ; 8-byte Folded Spill
adds x19, x13, x0
mov x14, x0
msr NZCV, x24
adcs xzr, x9, x3
mrs x9, NZCV
str x9, [sp, #48] ; 8-byte Folded Spill
adcs x12, x19, x5
ldr x9, [sp, #112] ; 8-byte Folded Reload
cmp x9, x25
cset w4, lo
cmp x6, x1
ldr x9, [sp, #208] ; 8-byte Folded Reload
cinc x11, x9, lo
str x11, [sp, #112] ; 8-byte Folded Spill
ldr x9, [sp, #224] ; 8-byte Folded Reload
cmp x25, x9
ldr x9, [sp, #176] ; 8-byte Folded Reload
cinc x26, x9, lo
add x26, x26, x11
ldr x9, [sp, #136] ; 8-byte Folded Reload
msr NZCV, x9
adcs x4, x26, x4
cmp x10, x7
cinc x26, x2, lo
msr NZCV, x8
adcs xzr, x23, x10
mrs x8, NZCV
str x8, [sp, #8] ; 8-byte Folded Spill
adcs x8, x26, x16
str x8, [sp, #72] ; 8-byte Folded Spill
mov x0, x16
ldr x16, [sp, #200] ; 8-byte Folded Reload
cmp x15, x16
cinc x23, x21, lo
ldr x8, [sp, #96] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x28, x15
mrs x8, NZCV
str x8, [sp, #96] ; 8-byte Folded Spill
adcs x15, x23, x27
msr NZCV, x22
adcs x22, x15, xzr
mrs x8, NZCV
str x8, [sp, #24] ; 8-byte Folded Spill
adds x1, x13, x14
mov x24, x14
str x14, [sp, #128] ; 8-byte Folded Spill
umulh x25, x30, x17
adcs x25, x3, x25
ldr x27, [x20, #32]
ldr x8, [sp, #40] ; 8-byte Folded Reload
adds x28, x8, x27
lsl x30, x28, #32
sub x11, x30, x28
ldp x9, x8, [sp, #80] ; 16-byte Folded Reload
adcs x8, x8, xzr
adcs x9, x9, xzr
adcs x12, x12, xzr
mrs x10, NZCV
ldr x13, [sp, #64] ; 8-byte Folded Reload
msr NZCV, x13
ldr x13, [sp, #32] ; 8-byte Folded Reload
adcs x20, x13, xzr
mrs x14, NZCV
ldr x13, [sp, #144] ; 8-byte Folded Reload
msr NZCV, x13
ldr x13, [sp, #120] ; 8-byte Folded Reload
adcs xzr, x13, x6
adcs x13, x4, xzr
ldr x15, [sp, #104] ; 8-byte Folded Reload
msr NZCV, x15
adcs x27, x13, xzr
str x27, [sp, #88] ; 8-byte Folded Spill
mrs x15, NZCV
str x15, [sp, #64] ; 8-byte Folded Spill
cmp x28, x30
mov w15, #-1
umulh x15, x28, x15
cset w4, hi
adds x6, x8, x4
adds x30, x6, x15
str x15, [sp, #224] ; 8-byte Folded Spill
cset w6, hs
cmn x8, x4
adcs x8, x9, x6
str x8, [sp, #176] ; 8-byte Folded Spill
mrs x17, NZCV
adcs x8, x11, x12
str x8, [sp, #136] ; 8-byte Folded Spill
ldr x8, [sp, #48] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x5, x1
mrs x19, NZCV
adcs x8, x25, x20
cmp x26, x7
cinc x6, x2, lo
ldr x9, [sp, #8] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x0, x26
mrs x0, NZCV
adcs x5, x6, x22
msr NZCV, x10
adcs x9, x8, xzr
str x9, [sp, #80] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #144] ; 8-byte Folded Spill
mov x10, x16
cmp x23, x16
cinc x26, x21, lo
ldr x8, [sp, #96] ; 8-byte Folded Reload
msr NZCV, x8
ldp x8, x16, [sp, #16] ; 16-byte Folded Reload
adcs xzr, x8, x23
mrs x8, NZCV
str x8, [sp, #104] ; 8-byte Folded Spill
adcs x8, x26, x27
msr NZCV, x16
adcs x4, x8, xzr
mrs x8, NZCV
str x8, [sp, #120] ; 8-byte Folded Spill
cmp x6, x7
cinc x1, x2, lo
msr NZCV, x0
adcs xzr, x22, x6
mrs x22, NZCV
adcs x8, x1, x4
str x8, [sp, #48] ; 8-byte Folded Spill
msr NZCV, x14
ldr x8, [sp, #72] ; 8-byte Folded Reload
adcs x14, x8, xzr
adcs x27, x5, xzr
mrs x8, NZCV
str x8, [sp, #72] ; 8-byte Folded Spill
mov w8, #-2
umulh x0, x28, x8
mul x6, x28, x8
adds x28, x6, x15
str x11, [sp, #208] ; 8-byte Folded Spill
adcs x6, x11, x0
msr NZCV, x17
adcs xzr, x12, x11
mrs x8, NZCV
str x8, [sp, #96] ; 8-byte Folded Spill
adcs x0, x28, x9
cmp x25, x3
add x23, x3, x24
cinc x9, x23, lo
msr NZCV, x19
adcs xzr, x20, x25
mrs x11, NZCV
adcs x20, x9, x14
ldr x15, [sp, #112] ; 8-byte Folded Reload
cmp x13, x15
cset w12, lo
cmp x26, x10
cinc x25, x21, lo
ldr x8, [sp, #216] ; 8-byte Folded Reload
cmp x15, x8
ldr x8, [sp, #184] ; 8-byte Folded Reload
cinc x16, x8, lo
add x16, x16, x25
ldr x8, [sp, #64] ; 8-byte Folded Reload
msr NZCV, x8
adcs x5, x16, x12
ldr x8, [sp, #232] ; 8-byte Folded Reload
ldur x12, [x8, #36]
adds x19, x30, x12
lsl x12, x19, #32
sub x13, x12, x19
ldr x8, [sp, #176] ; 8-byte Folded Reload
adcs x30, x8, xzr
ldr x8, [sp, #136] ; 8-byte Folded Reload
adcs x10, x8, xzr
adcs x0, x0, xzr
mrs x24, NZCV
msr NZCV, x22
adcs xzr, x4, x1
mrs x21, NZCV
mrs x4, NZCV
cmp x9, x3
cinc x8, x23, lo
msr NZCV, x11
adcs xzr, x14, x9
mrs x9, NZCV
adcs x11, x8, x27
ldr x14, [sp, #72] ; 8-byte Folded Reload
msr NZCV, x14
ldr x14, [sp, #48] ; 8-byte Folded Reload
adcs x17, x14, xzr
mrs x22, NZCV
cmp x19, x12
mov w12, #-1
umulh x15, x19, x12
str x15, [sp, #216] ; 8-byte Folded Spill
cset w12, hi
adds x14, x30, x12
adds x14, x14, x15
str x14, [sp, #136] ; 8-byte Folded Spill
cset w14, hs
cmn x30, x12
adcs x10, x10, x14
str x10, [sp, #184] ; 8-byte Folded Spill
mrs x30, NZCV
adcs x10, x13, x0
str x10, [sp, #176] ; 8-byte Folded Spill
mov x12, x13
cmp x8, x3
cinc x14, x23, lo
msr NZCV, x9
adcs xzr, x27, x8
mrs x10, NZCV
adcs x8, x14, x17
ldr x9, [sp, #144] ; 8-byte Folded Reload
msr NZCV, x9
adcs x20, x20, xzr
adcs x9, x11, xzr
adcs x15, x8, xzr
mrs x8, NZCV
str x8, [sp, #64] ; 8-byte Folded Spill
ldr x8, [sp, #104] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #88] ; 8-byte Folded Reload
adcs xzr, x8, x26
adcs x8, x5, xzr
cmp x8, x25
cset w11, lo
cmp x1, x7
cinc x13, x2, lo
cmp x13, x7
cinc x1, x2, lo
stp x1, x7, [sp, #144] ; 16-byte Folded Spill
ldr x16, [sp, #200] ; 8-byte Folded Reload
cmp x25, x16
ldr x2, [sp, #160] ; 8-byte Folded Reload
cinc x2, x2, lo
ldr x16, [sp, #120] ; 8-byte Folded Reload
msr NZCV, x16
adcs x7, x8, xzr
add x8, x2, x1
adcs x11, x8, x11
msr NZCV, x4
adcs x8, x13, x7
msr NZCV, x22
adcs x16, x8, xzr
mrs x2, NZCV
ldr x8, [sp, #96] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #80] ; 8-byte Folded Reload
adcs xzr, x8, x28
mrs x4, NZCV
adcs x22, x6, x20
msr NZCV, x24
adcs x22, x22, xzr
mrs x24, NZCV
cmp x14, x3
mov x8, x3
str x3, [sp, #56] ; 8-byte Folded Spill
cinc x26, x23, lo
msr NZCV, x10
adcs xzr, x17, x14
mrs x27, NZCV
adcs x10, x26, x16
msr NZCV, x21
adcs xzr, x7, x13
adcs x11, x11, xzr
str x11, [sp, #112] ; 8-byte Folded Spill
msr NZCV, x2
adcs x5, x11, xzr
str x5, [sp, #72] ; 8-byte Folded Spill
mrs x11, NZCV
str x11, [sp, #120] ; 8-byte Folded Spill
ldp x14, x21, [sp, #208] ; 16-byte Folded Reload
cmp x6, x14
ldr x11, [sp, #224] ; 8-byte Folded Reload
add x3, x14, x11
cinc x17, x3, lo
msr NZCV, x4
adcs xzr, x20, x6
mrs x2, NZCV
mov w11, #-2
mul x4, x19, x11
adcs x6, x17, x9
adds x7, x4, x21
msr NZCV, x30
mov x25, x12
adcs xzr, x0, x12
mrs x12, NZCV
adcs x20, x7, x22
cmp x17, x14
cinc x0, x3, lo
msr NZCV, x2
adcs xzr, x9, x17
mrs x9, NZCV
adcs x2, x0, x15
ldr x13, [sp, #64] ; 8-byte Folded Reload
msr NZCV, x13
adcs x1, x10, xzr
mrs x30, NZCV
msr NZCV, x24
adcs x13, x6, xzr
mrs x28, NZCV
adds x4, x4, x21
umulh x6, x19, x11
adcs x24, x25, x6
mov x7, x25
msr NZCV, x12
adcs xzr, x22, x4
mrs x10, NZCV
str x10, [sp, #200] ; 8-byte Folded Spill
adcs x4, x24, x13
cmp x0, x14
cinc x25, x3, lo
msr NZCV, x9
adcs xzr, x15, x0
mrs x15, NZCV
adcs x10, x25, x1
cmp x26, x8
cinc x6, x23, lo
msr NZCV, x27
adcs xzr, x16, x26
mrs x8, NZCV
str x8, [sp, #88] ; 8-byte Folded Spill
ldr x27, [sp, #232] ; 8-byte Folded Reload
ldr x8, [x27, #40]
adcs x19, x6, x5
ldr x9, [sp, #136] ; 8-byte Folded Reload
adds x0, x9, x8
lsl x8, x0, #32
sub x17, x8, x0
ldr x9, [sp, #184] ; 8-byte Folded Reload
adcs x22, x9, xzr
ldr x9, [sp, #176] ; 8-byte Folded Reload
adcs x9, x9, xzr
adcs x20, x20, xzr
adcs x4, x4, xzr
mrs x12, NZCV
msr NZCV, x30
adcs x30, x19, xzr
mrs x11, NZCV
str x11, [sp, #64] ; 8-byte Folded Spill
cmp x0, x8
mov w8, #-1
umulh x5, x0, x8
cset w8, hi
adds x19, x22, x8
adds x11, x19, x5
str x11, [sp, #80] ; 8-byte Folded Spill
cset w19, hs
cmn x22, x8
adcs x11, x9, x19
mrs x8, NZCV
adcs x9, x17, x20
stp x9, x11, [sp, #96] ; 16-byte Folded Spill
msr NZCV, x15
adcs xzr, x1, x25
mrs x26, NZCV
mrs x15, NZCV
msr NZCV, x28
adcs x1, x2, xzr
adcs x28, x10, xzr
mrs x19, NZCV
mov w22, #-2
mul x11, x0, x22
adds x2, x11, x5
mov x9, x5
str x5, [sp, #176] ; 8-byte Folded Spill
msr NZCV, x8
adcs xzr, x20, x17
str x17, [sp, #136] ; 8-byte Folded Spill
mrs x20, NZCV
adcs x2, x2, x4
cmp x24, x7
add x21, x7, x21
mov x5, x7
cinc x8, x21, lo
ldr x10, [sp, #200] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x13, x24
mrs x13, NZCV
adcs x16, x8, x1
msr NZCV, x12
adcs x7, x16, xzr
str x7, [sp, #160] ; 8-byte Folded Spill
mrs x12, NZCV
str x12, [sp, #200] ; 8-byte Folded Spill
cmp x25, x14
cinc x16, x3, lo
msr NZCV, x15
adcs x12, x16, x30
cmp x8, x5
mov x25, x5
cinc x15, x21, lo
mov x24, x21
msr NZCV, x13
adcs xzr, x1, x8
mrs x8, NZCV
adcs x13, x15, x28
str x13, [sp, #184] ; 8-byte Folded Spill
ldp x21, x5, [sp, #144] ; 16-byte Folded Reload
ldr x10, [sp, #112] ; 8-byte Folded Reload
cmp x10, x21
cset w13, lo
ldr x10, [sp, #56] ; 8-byte Folded Reload
cmp x6, x10
cinc x1, x23, lo
cmp x21, x5
ldr x5, [sp, #168] ; 8-byte Folded Reload
cinc x5, x5, lo
add x5, x5, x1
ldr x21, [sp, #120] ; 8-byte Folded Reload
msr NZCV, x21
adcs x13, x5, x13
msr NZCV, x19
adcs x21, x12, xzr
mrs x5, NZCV
adds x9, x11, x9
umulh x12, x0, x22
adcs x12, x17, x12
msr NZCV, x20
adcs xzr, x4, x9
mrs x9, NZCV
str x9, [sp, #168] ; 8-byte Folded Spill
adcs x9, x12, x7
cmp x15, x25
cinc x23, x24, lo
msr NZCV, x8
adcs xzr, x28, x15
mrs x20, NZCV
adcs x11, x23, x21
ldr x8, [sp, #88] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #72] ; 8-byte Folded Reload
adcs xzr, x8, x6
adcs x8, x13, xzr
cmp x8, x1
cset w13, lo
cmp x16, x14
cinc x7, x3, lo
cmp x7, x14
cinc x14, x3, lo
stp x14, x11, [sp, #144] ; 16-byte Folded Spill
cmp x1, x10
ldr x11, [sp, #128] ; 8-byte Folded Reload
cinc x11, x11, lo
ldr x10, [sp, #64] ; 8-byte Folded Reload
msr NZCV, x10
adcs x4, x8, xzr
add x8, x11, x14
adcs x6, x8, x13
msr NZCV, x26
adcs xzr, x30, x16
mrs x3, NZCV
ldur x8, [x27, #44]
adcs x11, x7, x4
ldr x10, [sp, #80] ; 8-byte Folded Reload
adds x0, x10, x8
lsl x8, x0, #32
sub x17, x8, x0
ldr x10, [sp, #104] ; 8-byte Folded Reload
adcs x1, x10, xzr
ldr x10, [sp, #96] ; 8-byte Folded Reload
adcs x30, x10, xzr
adcs x19, x2, xzr
adcs x10, x9, xzr
mrs x9, NZCV
str x9, [sp, #232] ; 8-byte Folded Spill
msr NZCV, x5
adcs x9, x11, xzr
mrs x11, NZCV
cmp x0, x8
mov w8, #-1
umulh x16, x0, x8
cset w13, hi
adds x8, x1, x13
mov x14, #-4294967295
adds x8, x8, x16
str x8, [sp, #112] ; 8-byte Folded Spill
add x8, x8, x14
stp x8, x10, [sp, #120] ; 16-byte Folded Spill
cset w15, hs
cmn x1, x13
adcs x10, x30, x15
mrs x15, NZCV
adcs x8, x17, x19
stp x8, x10, [sp, #96] ; 16-byte Folded Spill
mov x28, x25
cmp x23, x25
mov x13, x24
cinc x2, x24, lo
msr NZCV, x20
adcs xzr, x21, x23
mrs x14, NZCV
adcs x5, x2, x9
ldr x8, [sp, #200] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #184] ; 8-byte Folded Reload
adcs x20, x8, xzr
ldr x8, [sp, #152] ; 8-byte Folded Reload
adcs x21, x8, xzr
adcs x5, x5, xzr
mrs x22, NZCV
msr NZCV, x3
adcs xzr, x4, x7
adcs x7, x6, xzr
msr NZCV, x11
adcs x23, x7, xzr
mrs x25, NZCV
ldr x11, [sp, #136] ; 8-byte Folded Reload
cmp x12, x11
ldp x8, x3, [sp, #168] ; 16-byte Folded Reload
add x26, x11, x3
cinc x4, x26, lo
msr NZCV, x8
ldr x8, [sp, #160] ; 8-byte Folded Reload
adcs xzr, x8, x12
mrs x12, NZCV
mov w10, #-2
mul x24, x0, x10
adcs x27, x4, x20
mov x1, x16
adds x30, x24, x16
msr NZCV, x15
adcs xzr, x19, x17
mrs x15, NZCV
ldr x19, [sp, #128] ; 8-byte Folded Reload
adcs x6, x30, x19
cmp x4, x11
cinc x30, x26, lo
msr NZCV, x12
adcs xzr, x20, x4
mrs x12, NZCV
adcs x4, x30, x21
ldr x8, [sp, #232] ; 8-byte Folded Reload
msr NZCV, x8
adcs x20, x27, xzr
adcs x27, x4, xzr
mrs x8, NZCV
adds x4, x24, x16
umulh x0, x0, x10
adcs x24, x17, x0
msr NZCV, x15
adcs xzr, x19, x4
mrs x15, NZCV
adcs x0, x24, x20
cmp x30, x11
cinc x19, x26, lo
msr NZCV, x12
adcs xzr, x21, x30
mrs x16, NZCV
adcs x21, x19, x5
cmp x2, x28
mov x12, x28
cinc x30, x13, lo
mov x28, x13
msr NZCV, x14
adcs xzr, x9, x2
mrs x2, NZCV
adcs x10, x30, x23
msr NZCV, x22
adcs x22, x10, xzr
mrs x13, NZCV
cmp x24, x17
add x4, x17, x1
cinc x10, x4, lo
msr NZCV, x15
adcs xzr, x20, x24
mrs x15, NZCV
adcs x20, x10, x27
msr NZCV, x8
adcs x8, x21, xzr
mrs x21, NZCV
cmp x19, x11
cinc x24, x26, lo
msr NZCV, x16
adcs xzr, x5, x19
mrs x16, NZCV
adcs x5, x24, x22
cmp x10, x17
cinc x19, x4, lo
msr NZCV, x15
adcs xzr, x27, x10
mrs x10, NZCV
adcs x27, x19, x8
ldr x9, [sp, #144] ; 8-byte Folded Reload
cmp x7, x9
cset w15, lo
cmp x30, x12
cinc x7, x28, lo
ldr x28, [sp, #208] ; 8-byte Folded Reload
cmp x9, x28
ldr x28, [sp, #224] ; 8-byte Folded Reload
cinc x28, x28, lo
add x28, x28, x7
msr NZCV, x25
adcs x15, x28, x15
msr NZCV, x21
adcs x5, x5, xzr
mrs x21, NZCV
cmp x19, x17
cinc x25, x4, lo
msr NZCV, x10
adcs xzr, x8, x19
mrs x8, NZCV
adcs x10, x25, x5
msr NZCV, x2
adcs xzr, x23, x30
adcs x15, x15, xzr
cmp x15, x7
cset w2, lo
cmp x24, x11
cinc x19, x26, lo
cmp x19, x11
cinc x23, x26, lo
cmp x7, x12
ldr x14, [sp, #216] ; 8-byte Folded Reload
cinc x7, x14, lo
msr NZCV, x13
adcs x9, x15, xzr
add x15, x7, x23
adcs x15, x15, x2
msr NZCV, x16
adcs xzr, x22, x24
mrs x2, NZCV
adcs x12, x19, x9
msr NZCV, x21
adcs x7, x12, xzr
mrs x21, NZCV
cmp x25, x17
cinc x22, x4, lo
msr NZCV, x8
adcs xzr, x5, x25
mrs x8, NZCV
adcs x12, x22, x7
msr NZCV, x2
adcs xzr, x9, x19
adcs x9, x15, xzr
msr NZCV, x21
cset w15, hs
adcs x2, x9, xzr
mrs x5, NZCV
msr NZCV, x8
adcs xzr, x7, x22
mrs x8, NZCV
mrs x7, NZCV
adcs x19, x2, xzr
cmp x9, x23
cset w21, lo
cmp x22, x17
cinc x22, x4, lo
cmp x22, x17
cinc x4, x4, lo
cmp x23, x11
cinc x14, x3, lo
add x23, x14, x4
msr NZCV, x5
adcs x14, x23, x21
msr NZCV, x7
adcs xzr, x2, x22
adcs x14, x14, xzr
msr NZCV, x8
adcs xzr, x9, x15
adcs x8, x23, x21
cmp x4, x17
cinc x9, x1, lo
cmp x14, x4
cinc x15, x9, lo
mov w25, #-1
ldp x11, x13, [sp, #104] ; 16-byte Folded Reload
cmp x13, x25
cset w9, lo
sub x9, x11, x9
cmp x11, x9
mov x1, x11
cset w16, lo
ldr x11, [sp, #96] ; 8-byte Folded Reload
sub x16, x11, x16
cmp x11, x16
mov x3, x11
cset w2, lo
mov x11, #-4294967295
add x4, x6, x11
sub x2, x4, x2
mov x5, #-65534
movk x5, #0, lsl #16
add x5, x0, x5
cmp x6, x25
mov x28, x6
csetm x6, lo
cmp x4, x2
cset w4, lo
sub x4, x6, x4
add x4, x4, x5
add x6, x20, x11
mov w17, #-2
cmp x0, x17
csetm x7, lo
cmp x5, x4
cset w5, lo
sub x5, x7, x5
add x5, x5, x6
add x7, x27, x11
cmp x20, x25
csetm x21, lo
cmp x6, x5
cset w6, lo
sub x6, x21, x6
add x6, x6, x7
add x21, x10, x11
cmp x27, x25
csetm x23, lo
cmp x7, x6
cset w7, lo
sub x7, x23, x7
add x7, x7, x21
add x23, x12, x11
cmp x10, x25
csetm x24, lo
cmp x21, x7
cset w21, lo
sub x21, x24, x21
add x21, x21, x23
cmp x12, x25
csetm x24, lo
cmp x23, x21
cset w23, lo
adds x19, x19, x22
add x22, x19, x11
sub x23, x24, x23
add x23, x23, x22
adcs x8, x8, x11
mov x11, #-4294967295
cmp x19, x25
csetm x24, lo
cmp x22, x23
cset w22, lo
sub x22, x24, x22
add x22, x22, x8
add x11, x15, x11
cmp x14, x25
csetm x24, lo
cmp x8, x22
cset w8, lo
sub x8, x24, x8
add x8, x8, x11
cmp x15, x25
mov w26, #-1
csetm x24, lo
cmp x11, x8
cset w11, lo
mov x25, #-4294967296
cmp x24, x11
csel x11, x26, x25, eq
csetm x24, ne
and x17, x13, x24
ldr x13, [sp, #120] ; 8-byte Folded Reload
and x25, x11, x13
orr x17, x25, x17
and x13, x1, x24
and x9, x11, x9
orr x9, x9, x13
and x13, x3, x24
and x16, x11, x16
orr x13, x16, x13
and x16, x28, x24
and x1, x11, x2
orr x16, x1, x16
and x0, x0, x24
and x1, x11, x4
orr x0, x1, x0
ldr x1, [sp, #192] ; 8-byte Folded Reload
str x17, [x1]
stur x9, [x1, #4]
and x9, x20, x24
and x17, x11, x5
orr x9, x17, x9
str x13, [x1, #8]
stur x16, [x1, #12]
and x13, x27, x24
and x16, x11, x6
orr x13, x16, x13
and x10, x10, x24
and x16, x11, x7
orr x10, x16, x10
str x0, [x1, #16]
stur x9, [x1, #20]
and x9, x12, x24
and x12, x11, x21
orr x9, x12, x9
and x12, x19, x24
and x16, x11, x23
orr x12, x16, x12
and x14, x14, x24
and x16, x11, x22
orr x14, x16, x14
and x15, x15, x24
str x13, [x1, #24]
stur x10, [x1, #28]
and x8, x11, x8
str x9, [x1, #32]
stur x12, [x1, #36]
orr x8, x8, x15
str x14, [x1, #40]
stur x8, [x1, #44]
ldp x29, x30, [sp, #320] ; 16-byte Folded Reload
ldp x20, x19, [sp, #304] ; 16-byte Folded Reload
ldp x22, x21, [sp, #288] ; 16-byte Folded Reload
ldp x24, x23, [sp, #272] ; 16-byte Folded Reload
ldp x26, x25, [sp, #256] ; 16-byte Folded Reload
ldp x28, x27, [sp, #240] ; 16-byte Folded Reload
add sp, sp, #336
ret
.cfi_endproc
; -- End function
.globl _fiat_p384_to_montgomery ; -- Begin function fiat_p384_to_montgomery
.p2align 2
_fiat_p384_to_montgomery: ; @fiat_p384_to_montgomery
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #512
.cfi_def_cfa_offset 608
str x1, [sp, #328] ; 8-byte Folded Spill
ldr x6, [x0]
ldur x3, [x0, #4]
mov x1, x0
lsl x8, x6, #1
lsr x0, x6, #63
mov w5, #-2
mul x9, x6, x5
umulh x10, x6, x5
lsl x11, x6, #32
mov w12, #-1
umulh x16, x6, x12
stp x16, x0, [sp, #432] ; 16-byte Folded Spill
adds x12, x9, x16
sub x15, x11, x6
str x15, [sp, #496] ; 8-byte Folded Spill
adcs x13, x15, x10
cmp x13, x15
add x17, x15, x16
str x17, [sp, #464] ; 8-byte Folded Spill
cinc x14, x17, lo
cmp x14, x15
cinc x17, x17, lo
str x17, [sp, #408] ; 8-byte Folded Spill
cmp x6, x11
cset w15, hi
orr x15, x9, x15
adds x15, x15, x16
cset w16, hs
add x11, x11, x6
cmp x11, x8
cinc x0, x0, lo
mul x20, x3, x5
add x2, x20, x10
adds x25, x15, x3
lsl x15, x25, #32
sub x28, x15, x25
adcs x16, x2, x16
cmp x16, x20
cset w2, lo
adds x11, x11, x2
cset w2, hs
adds x12, x0, x12
adcs x0, x13, x9
adcs x27, x14, x10
adcs x19, x17, x8
mrs x8, NZCV
str x8, [sp, #400] ; 8-byte Folded Spill
lsl x13, x3, #1
str x13, [sp, #448] ; 8-byte Folded Spill
umulh x8, x3, x5
mov x21, x3
str x3, [sp, #304] ; 8-byte Folded Spill
str x8, [sp, #416] ; 8-byte Folded Spill
adds x8, x11, x8
adcs x9, x12, x2
cmp x9, x12
cset w10, lo
adds x3, x9, x13
mov w13, #-1
umulh x9, x25, x13
adcs x12, x0, x10
cmp x25, x15
cset w10, hi
adds x11, x16, x10
adds x11, x11, x9
mov x7, x9
cset w14, hs
cmn x16, x10
str x1, [sp, #504] ; 8-byte Folded Spill
ldr x10, [x1, #8]
adcs x8, x8, x14
mrs x24, NZCV
mul x15, x10, x5
adcs x9, x28, x3
cmn x11, x10
adcs x14, x15, x8
str x15, [sp, #392] ; 8-byte Folded Spill
adds x16, x11, x10
mov x17, x10
lsl x11, x16, #32
sub x26, x11, x16
adcs xzr, x8, x15
mrs x8, NZCV
stp x16, x8, [sp, #368] ; 16-byte Folded Spill
umulh x10, x10, x5
adcs x8, x10, x9
mov x23, x10
str x10, [sp, #312] ; 8-byte Folded Spill
cmp x16, x11
umulh x10, x16, x13
str x10, [sp, #472] ; 8-byte Folded Spill
cset w11, hi
adds x15, x14, x11
adds x15, x15, x10
cset w16, hs
cmn x14, x11
adcs x8, x8, x16
mrs x10, NZCV
str x10, [sp, #384] ; 8-byte Folded Spill
mrs x11, NZCV
ldur x10, [x1, #12]
cmn x15, x10
mul x14, x10, x5
adcs x13, x14, x8
mov x16, x14
str x14, [sp, #344] ; 8-byte Folded Spill
str x13, [sp, #424] ; 8-byte Folded Spill
adds x14, x15, x10
mov x13, x10
str x14, [sp, #280] ; 8-byte Folded Spill
lsl x10, x14, #32
str x10, [sp, #320] ; 8-byte Folded Spill
sub x10, x10, x14
str x10, [sp, #488] ; 8-byte Folded Spill
adcs xzr, x8, x16
mrs x16, NZCV
mrs x2, NZCV
umulh x8, x25, x5
mul x15, x25, x5
lsr x22, x21, #63
adds x21, x12, x22
mov x30, x7
str x7, [sp, #360] ; 8-byte Folded Spill
adds x25, x15, x7
adcs x14, x28, x8
msr NZCV, x24
adcs xzr, x3, x28
mrs x1, NZCV
adcs x21, x25, x21
ldr x8, [sp, #376] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x9, x23
mrs x10, NZCV
str x17, [sp, #456] ; 8-byte Folded Spill
lsl x23, x17, #1
adcs x15, x23, x21
msr NZCV, x11
adcs x11, x26, x15
cmp x12, x0
cset w0, lo
adds x12, x12, x22
adcs x0, x27, x0
ldr x8, [sp, #384] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x15, x26
mov x4, x26
str x26, [sp, #480] ; 8-byte Folded Spill
mrs x3, NZCV
mrs x9, NZCV
umulh x15, x13, x5
stp x9, x15, [sp, #376] ; 16-byte Folded Spill
msr NZCV, x2
adcs xzr, x11, x15
mrs x2, NZCV
mrs x8, NZCV
msr NZCV, x16
adcs x11, x15, x11
stp x11, x8, [sp, #288] ; 16-byte Folded Spill
cmp x0, x27
cset w16, lo
adds x27, x0, x20
mov x11, x19
adcs x0, x19, x16
msr NZCV, x1
adcs xzr, x12, x25
mrs x16, NZCV
adcs x1, x14, x27
msr NZCV, x10
adcs xzr, x21, x23
mrs x5, NZCV
lsr x26, x17, #63
adcs x25, x26, x1
ldr x8, [sp, #400] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #440] ; 8-byte Folded Reload
adcs x6, x8, x6
cset w20, hs
ldr x10, [sp, #496] ; 8-byte Folded Reload
ldr x8, [sp, #408] ; 8-byte Folded Reload
cmp x8, x10
ldr x9, [sp, #464] ; 8-byte Folded Reload
cinc x21, x9, lo
cmp x21, x10
cinc x8, x9, lo
cmp x8, x10
cinc x24, x9, lo
adds x12, x6, x21
adcs x17, x8, x20
mov w8, #-2
ldr x9, [sp, #368] ; 8-byte Folded Reload
umulh x6, x9, x8
mul x20, x9, x8
adcs x19, x24, xzr
str x19, [sp, #368] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #400] ; 8-byte Folded Spill
ldr x10, [sp, #472] ; 8-byte Folded Reload
adds x8, x20, x10
adcs x15, x4, x6
msr NZCV, x3
adcs x6, x8, x25
lsl x7, x13, #1
mov x9, x13
str x13, [sp, #352] ; 8-byte Folded Spill
msr NZCV, x2
adcs x13, x7, x6
stp x7, x13, [sp, #240] ; 16-byte Folded Spill
cmp x0, x11
cset w2, lo
ldr x13, [sp, #416] ; 8-byte Folded Reload
adds x13, x0, x13
adcs x2, x12, x2
cmp x14, x28
add x21, x28, x30
cinc x3, x21, lo
msr NZCV, x16
adcs xzr, x27, x14
mrs x0, NZCV
adcs x16, x3, x13
msr NZCV, x5
mov x4, x26
str x26, [sp, #336] ; 8-byte Folded Spill
adcs xzr, x1, x26
mrs x1, NZCV
ldr x30, [sp, #392] ; 8-byte Folded Reload
adcs x27, x30, x16
ldr x11, [sp, #376] ; 8-byte Folded Reload
msr NZCV, x11
adcs xzr, x25, x8
mrs x20, NZCV
adcs x5, x15, x27
ldp x8, x14, [sp, #296] ; 16-byte Folded Reload
msr NZCV, x8
adcs xzr, x6, x7
mrs x25, NZCV
lsr x8, x9, #63
adcs x9, x8, x5
str x9, [sp, #416] ; 8-byte Folded Spill
mov x9, x8
str x8, [sp, #440] ; 8-byte Folded Spill
adds x7, x22, x14
cmp x2, x12
cset w11, lo
ldr x8, [sp, #448] ; 8-byte Folded Reload
adds x2, x2, x8
adcs x11, x17, x11
cmp x11, x17
cset w12, lo
adds x11, x11, x7
adcs x12, x19, x12
cmn x22, x14
adcs x14, x12, xzr
mrs x6, NZCV
cmp x3, x28
cinc x7, x21, lo
msr NZCV, x0
adcs xzr, x13, x3
mrs x17, NZCV
adcs x13, x7, x2
msr NZCV, x1
adcs xzr, x16, x30
mrs x1, NZCV
ldr x22, [sp, #312] ; 8-byte Folded Reload
adcs x3, x22, x13
ldr x26, [sp, #480] ; 8-byte Folded Reload
cmp x15, x26
add x16, x26, x10
mov x30, x26
cinc x8, x16, lo
msr NZCV, x20
adcs xzr, x27, x15
mrs x26, NZCV
adcs x15, x8, x3
msr NZCV, x25
adcs xzr, x5, x9
mrs x25, NZCV
ldr x0, [sp, #344] ; 8-byte Folded Reload
adcs x9, x0, x15
str x9, [sp, #408] ; 8-byte Folded Spill
cmp x7, x28
cinc x27, x21, lo
msr NZCV, x17
adcs xzr, x2, x7
mrs x10, NZCV
adcs x2, x27, x11
msr NZCV, x1
adcs xzr, x13, x22
mrs x13, NZCV
adcs x1, x23, x2
ldr x9, [sp, #496] ; 8-byte Folded Reload
cmp x24, x9
ldr x9, [sp, #464] ; 8-byte Folded Reload
cinc x9, x9, lo
str x9, [sp, #464] ; 8-byte Folded Spill
ldr x17, [sp, #400] ; 8-byte Folded Reload
msr NZCV, x17
adcs x5, x9, xzr
str x5, [sp, #376] ; 8-byte Folded Spill
mrs x9, NZCV
str x9, [sp, #304] ; 8-byte Folded Spill
cmp x27, x28
cinc x19, x21, lo
msr NZCV, x10
adcs xzr, x11, x27
mrs x10, NZCV
adcs x11, x19, x14
ldr x9, [sp, #456] ; 8-byte Folded Reload
adds x27, x4, x9
msr NZCV, x13
adcs xzr, x2, x23
mrs x17, NZCV
adcs x13, x27, x11
msr NZCV, x25
adcs xzr, x15, x0
mrs x9, NZCV
stp x9, x9, [sp, #256] ; 16-byte Folded Spill
ldr x9, [sp, #368] ; 8-byte Folded Reload
cmp x12, x9
cset w12, lo
msr NZCV, x6
adcs x9, x5, x12
str x9, [sp, #344] ; 8-byte Folded Spill
cmp x19, x28
cinc x12, x21, lo
msr NZCV, x10
adcs xzr, x14, x19
mrs x10, NZCV
str x10, [sp, #296] ; 8-byte Folded Spill
adcs x10, x12, x9
mov x2, x12
msr NZCV, x17
adcs xzr, x11, x27
adcs x9, x10, xzr
str x9, [sp, #272] ; 8-byte Folded Spill
mrs x9, NZCV
str x9, [sp, #312] ; 8-byte Folded Spill
cmp x8, x30
cinc x11, x16, lo
msr NZCV, x26
adcs xzr, x3, x8
mrs x8, NZCV
adcs x12, x11, x1
ldr x7, [sp, #280] ; 8-byte Folded Reload
ldr x9, [sp, #320] ; 8-byte Folded Reload
cmp x7, x9
mov w9, #-1
umulh x10, x7, x9
cset w9, hi
ldr x15, [sp, #424] ; 8-byte Folded Reload
adds x14, x15, x9
adds x17, x14, x10
mov x0, x10
cset w14, hs
cmn x15, x9
ldr x9, [sp, #288] ; 8-byte Folded Reload
adcs x14, x9, x14
mrs x15, NZCV
ldr x23, [sp, #488] ; 8-byte Folded Reload
ldr x10, [sp, #248] ; 8-byte Folded Reload
adcs x5, x23, x10
cmp x11, x30
cinc x22, x16, lo
msr NZCV, x8
adcs xzr, x1, x11
mrs x1, NZCV
ldr x8, [sp, #504] ; 8-byte Folded Reload
ldr x8, [x8, #16]
adcs x4, x22, x13
cmn x17, x8
mov w9, #-2
mul x11, x8, x9
adcs x9, x11, x14
str x9, [sp, #288] ; 8-byte Folded Spill
adds x24, x17, x8
lsl x17, x24, #32
sub x30, x17, x24
adcs xzr, x14, x11
mrs x3, NZCV
mov w14, #-2
mov x26, x8
str x8, [sp, #368] ; 8-byte Folded Spill
umulh x9, x8, x14
adcs x8, x9, x5
str x8, [sp, #232] ; 8-byte Folded Spill
stp x9, x11, [sp, #392] ; 16-byte Folded Spill
msr NZCV, x1
adcs xzr, x13, x22
mrs x6, NZCV
mrs x1, NZCV
mul x25, x7, x14
mov w19, #-2
ldr x8, [sp, #256] ; 8-byte Folded Reload
msr NZCV, x8
ldr x13, [sp, #384] ; 8-byte Folded Reload
adcs x27, x13, x12
adds x14, x25, x0
msr NZCV, x15
mov x11, x23
adcs xzr, x10, x23
mrs x15, NZCV
ldr x8, [sp, #416] ; 8-byte Folded Reload
adcs x23, x14, x8
ldr x10, [sp, #264] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x12, x13
mrs x12, NZCV
ldr x13, [sp, #240] ; 8-byte Folded Reload
adcs x14, x13, x4
msr NZCV, x3
adcs xzr, x5, x9
mrs x3, NZCV
lsl x10, x26, #1
str x10, [sp, #384] ; 8-byte Folded Spill
adcs x9, x10, x23
str x9, [sp, #248] ; 8-byte Folded Spill
msr NZCV, x12
adcs xzr, x4, x13
mrs x13, NZCV
mrs x12, NZCV
adds x20, x25, x0
mov x9, x0
str x0, [sp, #320] ; 8-byte Folded Spill
umulh x25, x7, x19
adcs x25, x11, x25
msr NZCV, x15
adcs xzr, x8, x20
mrs x15, NZCV
ldr x4, [sp, #408] ; 8-byte Folded Reload
adcs x8, x25, x4
str x8, [sp, #416] ; 8-byte Folded Spill
msr NZCV, x3
adcs xzr, x23, x10
mrs x8, NZCV
stp x8, x8, [sp, #256] ; 16-byte Folded Spill
cmp x2, x28
cinc x8, x21, lo
ldr x10, [sp, #496] ; 8-byte Folded Reload
ldr x0, [sp, #464] ; 8-byte Folded Reload
cmp x0, x10
ldr x10, [sp, #432] ; 8-byte Folded Reload
cinc x0, x10, lo
ldr x10, [sp, #304] ; 8-byte Folded Reload
msr NZCV, x10
adcs x3, x8, x0
ldr x10, [sp, #456] ; 8-byte Folded Reload
ldr x0, [sp, #336] ; 8-byte Folded Reload
cmn x0, x10
ldr x10, [sp, #272] ; 8-byte Folded Reload
adcs x7, x10, xzr
mrs x10, NZCV
cmp x25, x11
add x0, x11, x9
mov x5, x11
cinc x9, x0, lo
mov x26, x0
msr NZCV, x15
adcs xzr, x4, x25
mrs x25, NZCV
adcs x11, x9, x27
str x11, [sp, #496] ; 8-byte Folded Spill
ldr x21, [sp, #480] ; 8-byte Folded Reload
cmp x22, x21
cinc x22, x16, lo
msr NZCV, x1
adcs x1, x22, x7
msr NZCV, x10
cset w11, hs
ldr x20, [sp, #352] ; 8-byte Folded Reload
ldr x10, [sp, #440] ; 8-byte Folded Reload
adds x15, x10, x20
msr NZCV, x12
adcs x19, x15, x1
ldr x10, [sp, #376] ; 8-byte Folded Reload
ldr x12, [sp, #344] ; 8-byte Folded Reload
cmp x12, x10
cset w10, lo
ldr x4, [sp, #296] ; 8-byte Folded Reload
msr NZCV, x4
adcs xzr, x12, x2
adcs x10, x3, x10
cmp x10, x8
cset w3, lo
cmp x22, x21
cinc x4, x16, lo
cmp x4, x21
cinc x16, x16, lo
str x16, [sp, #344] ; 8-byte Folded Spill
cmp x8, x28
ldr x8, [sp, #360] ; 8-byte Folded Reload
cinc x8, x8, lo
ldr x12, [sp, #312] ; 8-byte Folded Reload
msr NZCV, x12
adcs x12, x10, x11
add x8, x8, x16
adcs x8, x8, x3
str x8, [sp, #456] ; 8-byte Folded Spill
msr NZCV, x6
adcs xzr, x7, x22
mrs x11, NZCV
adcs x8, x4, x12
msr NZCV, x13
adcs xzr, x1, x15
adcs x23, x8, xzr
mrs x21, NZCV
cmp x9, x5
cinc x8, x0, lo
msr NZCV, x25
adcs xzr, x27, x9
mrs x9, NZCV
adcs x28, x8, x14
mov x3, x24
cmp x24, x17
mov w10, #-1
umulh x2, x24, x10
str x24, [sp, #208] ; 8-byte Folded Spill
cset w16, hi
ldr x13, [sp, #288] ; 8-byte Folded Reload
adds x17, x13, x16
adds x17, x17, x2
str x2, [sp, #408] ; 8-byte Folded Spill
cset w1, hs
cmn x13, x16
ldr x10, [sp, #232] ; 8-byte Folded Reload
adcs x1, x10, x1
mrs x22, NZCV
str x30, [sp, #448] ; 8-byte Folded Spill
ldr x0, [sp, #248] ; 8-byte Folded Reload
adcs x13, x30, x0
cmp x8, x5
mov x16, x5
cinc x25, x26, lo
msr NZCV, x9
adcs xzr, x14, x8
mrs x27, NZCV
ldr x7, [sp, #504] ; 8-byte Folded Reload
ldur x14, [x7, #20]
adcs x8, x25, x19
stp x26, x8, [sp, #424] ; 16-byte Folded Spill
cmn x17, x14
mov w8, #-2
mul x9, x14, x8
adcs x10, x9, x1
str x9, [sp, #304] ; 8-byte Folded Spill
adds x5, x17, x14
lsl x17, x5, #32
sub x15, x17, x5
str x15, [sp, #464] ; 8-byte Folded Spill
adcs xzr, x1, x9
mrs x9, NZCV
umulh x8, x14, x8
adcs x24, x8, x13
mov x6, x8
str x8, [sp, #296] ; 8-byte Folded Spill
ldr x8, [sp, #440] ; 8-byte Folded Reload
cmn x8, x20
adcs x23, x23, xzr
cset w8, hs
cmp x25, x16
cinc x1, x26, lo
msr NZCV, x27
adcs xzr, x19, x25
mrs x16, NZCV
str x16, [sp, #288] ; 8-byte Folded Spill
adcs x16, x1, x23
str x16, [sp, #440] ; 8-byte Folded Spill
msr NZCV, x11
adcs xzr, x12, x4
ldr x11, [sp, #456] ; 8-byte Folded Reload
adcs x11, x11, xzr
str x11, [sp, #280] ; 8-byte Folded Spill
msr NZCV, x21
adcs x4, x11, x8
str x4, [sp, #360] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #336] ; 8-byte Folded Spill
ldr x11, [sp, #368] ; 8-byte Folded Reload
lsr x12, x11, #63
mov w8, #-2
mul x21, x3, x8
mov w19, #-2
ldr x8, [sp, #256] ; 8-byte Folded Reload
msr NZCV, x8
ldr x16, [sp, #416] ; 8-byte Folded Reload
adcs x27, x12, x16
adds x8, x21, x2
msr NZCV, x22
adcs xzr, x0, x30
mrs x0, NZCV
str x0, [sp, #272] ; 8-byte Folded Spill
adcs x22, x8, x27
ldr x8, [sp, #264] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x16, x12
mov x3, x12
str x12, [sp, #216] ; 8-byte Folded Spill
mrs x8, NZCV
ldr x12, [sp, #400] ; 8-byte Folded Reload
ldr x16, [sp, #496] ; 8-byte Folded Reload
adcs x0, x12, x16
str x0, [sp, #312] ; 8-byte Folded Spill
msr NZCV, x9
adcs xzr, x13, x6
mrs x9, NZCV
str x9, [sp, #352] ; 8-byte Folded Spill
lsl x9, x14, #1
mov x25, x14
stp x9, x14, [sp, #224] ; 16-byte Folded Spill
adcs x6, x9, x22
mov x14, x9
msr NZCV, x8
adcs xzr, x16, x12
mrs x8, NZCV
ldr x0, [sp, #392] ; 8-byte Folded Reload
adcs x9, x0, x28
str x9, [sp, #496] ; 8-byte Folded Spill
cmp x5, x17
mov w9, #-1
umulh x30, x5, x9
cset w12, hi
adds x13, x10, x12
adds x13, x13, x30
str x30, [sp, #416] ; 8-byte Folded Spill
cset w17, hs
cmn x10, x12
adcs x10, x24, x17
mrs x9, NZCV
str x9, [sp, #400] ; 8-byte Folded Spill
adcs x16, x15, x6
msr NZCV, x8
adcs xzr, x28, x0
mrs x28, NZCV
mov x26, x7
ldr x8, [x7, #24]
ldr x12, [sp, #384] ; 8-byte Folded Reload
ldr x7, [sp, #432] ; 8-byte Folded Reload
adcs x9, x12, x7
str x9, [sp, #200] ; 8-byte Folded Spill
cmn x13, x8
mul x9, x8, x19
adcs x0, x9, x10
str x9, [sp, #264] ; 8-byte Folded Spill
adds x17, x13, x8
lsl x20, x17, #32
sub x15, x20, x17
str x15, [sp, #456] ; 8-byte Folded Spill
adcs xzr, x10, x9
mrs x13, NZCV
mov w9, #-2
umulh x24, x8, x9
mov x19, x8
str x8, [sp, #392] ; 8-byte Folded Spill
adcs x2, x24, x16
ldr x8, [sp, #488] ; 8-byte Folded Reload
cmp x1, x8
ldr x8, [sp, #424] ; 8-byte Folded Reload
cinc x8, x8, lo
str x8, [sp, #376] ; 8-byte Folded Spill
ldr x10, [sp, #288] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x23, x1
mrs x10, NZCV
stp x10, x24, [sp, #248] ; 16-byte Folded Spill
adcs x10, x8, x4
adds x23, x3, x11
msr NZCV, x28
adcs xzr, x7, x12
mrs x8, NZCV
str x8, [sp, #184] ; 8-byte Folded Spill
ldr x8, [sp, #440] ; 8-byte Folded Reload
adcs xzr, x8, x23
adcs x8, x10, xzr
str x8, [sp, #192] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #240] ; 8-byte Folded Spill
ldr x7, [sp, #408] ; 8-byte Folded Reload
adds x10, x21, x7
ldr x8, [sp, #208] ; 8-byte Folded Reload
umulh x1, x8, x9
mov w4, #-2
ldr x8, [sp, #448] ; 8-byte Folded Reload
adcs x21, x8, x1
ldr x9, [sp, #272] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x27, x10
mrs x9, NZCV
ldr x3, [sp, #312] ; 8-byte Folded Reload
adcs x12, x21, x3
str x12, [sp, #272] ; 8-byte Folded Spill
ldr x10, [sp, #352] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x22, x14
mrs x10, NZCV
str x10, [sp, #168] ; 8-byte Folded Spill
lsr x11, x25, #63
str x11, [sp, #128] ; 8-byte Folded Spill
mul x10, x5, x4
adcs x4, x11, x12
adds x14, x10, x30
ldr x11, [sp, #400] ; 8-byte Folded Reload
msr NZCV, x11
ldr x25, [sp, #464] ; 8-byte Folded Reload
adcs xzr, x6, x25
mrs x11, NZCV
str x11, [sp, #160] ; 8-byte Folded Spill
adcs x28, x14, x4
msr NZCV, x13
adcs xzr, x16, x24
mrs x11, NZCV
str x11, [sp, #152] ; 8-byte Folded Spill
lsl x11, x19, #1
str x11, [sp, #384] ; 8-byte Folded Spill
adcs x24, x11, x28
cmp x21, x8
add x13, x8, x7
mov x14, x8
cinc x12, x13, lo
msr NZCV, x9
adcs xzr, x3, x21
mrs x8, NZCV
ldr x9, [sp, #496] ; 8-byte Folded Reload
adcs x7, x12, x9
cmp x17, x20
mov w11, #-1
umulh x16, x17, x11
cset w11, hi
adds x6, x0, x11
adds x20, x6, x16
str x16, [sp, #432] ; 8-byte Folded Spill
cset w6, hs
cmn x0, x11
adcs x0, x2, x6
mrs x11, NZCV
str x11, [sp, #144] ; 8-byte Folded Spill
adcs x3, x15, x24
cmp x12, x14
mov x19, x14
cinc x22, x13, lo
str x13, [sp, #352] ; 8-byte Folded Spill
msr NZCV, x8
adcs xzr, x9, x12
mrs x14, NZCV
mov x21, x26
ldur x8, [x26, #28]
ldr x26, [sp, #200] ; 8-byte Folded Reload
adcs x6, x22, x26
cmn x20, x8
mov w11, #-2
mul x9, x8, x11
adcs x2, x9, x0
str x9, [sp, #312] ; 8-byte Folded Spill
adds x1, x20, x8
lsl x20, x1, #32
sub x27, x20, x1
str x27, [sp, #496] ; 8-byte Folded Spill
adcs xzr, x0, x9
mrs x0, NZCV
umulh x9, x8, x11
mov w30, #-2
mov x11, x8
str x8, [sp, #400] ; 8-byte Folded Spill
adcs x12, x9, x3
str x9, [sp, #288] ; 8-byte Folded Spill
ldr x8, [sp, #184] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #440] ; 8-byte Folded Reload
adcs x8, x23, x8
str x8, [sp, #208] ; 8-byte Folded Spill
cmp x22, x19
cinc x13, x13, lo
msr NZCV, x14
adcs xzr, x26, x22
mrs x14, NZCV
str x14, [sp, #176] ; 8-byte Folded Spill
adcs x23, x13, x8
ldr x8, [sp, #216] ; 8-byte Folded Reload
ldp x14, x22, [sp, #368] ; 16-byte Folded Reload
cmn x8, x14
ldr x8, [sp, #192] ; 8-byte Folded Reload
adcs x8, x8, xzr
str x8, [sp, #200] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #192] ; 8-byte Folded Spill
ldr x8, [sp, #416] ; 8-byte Folded Reload
adds x10, x10, x8
umulh x14, x5, x30
mov x26, x25
adcs x14, x25, x14
ldr x25, [sp, #344] ; 8-byte Folded Reload
ldr x15, [sp, #280] ; 8-byte Folded Reload
cmp x15, x25
cset w5, lo
ldp x19, x15, [sp, #480] ; 16-byte Folded Reload
cmp x22, x15
ldr x15, [sp, #424] ; 8-byte Folded Reload
cinc x22, x15, lo
str x22, [sp, #280] ; 8-byte Folded Spill
cmp x25, x19
ldr x19, [sp, #472] ; 8-byte Folded Reload
cinc x19, x19, lo
add x19, x19, x22
ldr x15, [sp, #336] ; 8-byte Folded Reload
msr NZCV, x15
adcs x15, x19, x5
str x15, [sp, #184] ; 8-byte Folded Spill
ldr x15, [sp, #168] ; 8-byte Folded Reload
msr NZCV, x15
ldr x25, [sp, #128] ; 8-byte Folded Reload
ldr x15, [sp, #272] ; 8-byte Folded Reload
adcs xzr, x15, x25
mrs x5, NZCV
ldr x22, [sp, #304] ; 8-byte Folded Reload
adcs x19, x22, x7
ldr x15, [sp, #160] ; 8-byte Folded Reload
msr NZCV, x15
adcs xzr, x4, x10
mrs x10, NZCV
adcs x30, x14, x19
str x30, [sp, #120] ; 8-byte Folded Spill
ldr x15, [sp, #152] ; 8-byte Folded Reload
msr NZCV, x15
ldr x4, [sp, #384] ; 8-byte Folded Reload
adcs xzr, x28, x4
mrs x15, NZCV
str x15, [sp, #88] ; 8-byte Folded Spill
mov w4, #-2
umulh x15, x17, x4
mul x17, x17, x4
ldr x4, [sp, #392] ; 8-byte Folded Reload
lsr x4, x4, #63
str x4, [sp, #344] ; 8-byte Folded Spill
adcs x4, x4, x30
str x4, [sp, #160] ; 8-byte Folded Spill
adds x17, x17, x16
str x17, [sp, #472] ; 8-byte Folded Spill
ldr x16, [sp, #456] ; 8-byte Folded Reload
adcs x28, x16, x15
ldr x15, [sp, #144] ; 8-byte Folded Reload
msr NZCV, x15
adcs xzr, x24, x16
mrs x15, NZCV
str x15, [sp, #48] ; 8-byte Folded Spill
adcs x15, x17, x4
str x15, [sp, #216] ; 8-byte Folded Spill
msr NZCV, x0
adcs xzr, x3, x9
mrs x9, NZCV
str x9, [sp, #40] ; 8-byte Folded Spill
lsl x9, x11, #1
str x9, [sp, #336] ; 8-byte Folded Spill
adcs x17, x9, x15
str x17, [sp, #56] ; 8-byte Folded Spill
msr NZCV, x5
adcs xzr, x7, x22
mrs x11, NZCV
ldr x9, [sp, #296] ; 8-byte Folded Reload
adcs x16, x9, x6
cmp x14, x26
add x22, x26, x8
cinc x3, x22, lo
msr NZCV, x10
adcs xzr, x19, x14
mrs x14, NZCV
adcs x10, x3, x16
str x10, [sp, #72] ; 8-byte Folded Spill
cmp x1, x20
mov w10, #-1
umulh x30, x1, x10
cset w10, hi
adds x4, x2, x10
adds x4, x4, x30
str x30, [sp, #440] ; 8-byte Folded Spill
cset w19, hs
cmn x2, x10
adcs x19, x12, x19
mrs x10, NZCV
str x10, [sp, #32] ; 8-byte Folded Spill
adcs x27, x27, x17
msr NZCV, x11
adcs xzr, x6, x9
mrs x15, NZCV
ldr x7, [sp, #224] ; 8-byte Folded Reload
adcs x0, x7, x23
cmp x3, x26
mov x2, x26
cinc x11, x22, lo
msr NZCV, x14
adcs xzr, x16, x3
mrs x24, NZCV
ldr x8, [x21, #32]
mov x5, x21
adcs x9, x11, x0
str x9, [sp, #80] ; 8-byte Folded Spill
cmn x4, x8
mov w9, #-2
mul x12, x8, x9
adcs x14, x12, x19
adds x10, x4, x8
lsl x16, x10, #32
sub x3, x16, x10
str x3, [sp, #480] ; 8-byte Folded Spill
adcs xzr, x19, x12
mrs x3, NZCV
umulh x9, x8, x9
mov x4, x8
str x8, [sp, #368] ; 8-byte Folded Spill
adcs x19, x9, x27
mov x17, x9
stp x9, x12, [sp, #296] ; 16-byte Folded Spill
ldr x21, [sp, #448] ; 8-byte Folded Reload
cmp x13, x21
ldr x6, [sp, #352] ; 8-byte Folded Reload
cinc x20, x6, lo
ldr x8, [sp, #176] ; 8-byte Folded Reload
msr NZCV, x8
ldp x12, x8, [sp, #200] ; 16-byte Folded Reload
adcs xzr, x8, x13
mrs x13, NZCV
adcs x9, x20, x12
mov x26, x25
ldr x25, [sp, #232] ; 8-byte Folded Reload
adds x8, x26, x25
msr NZCV, x15
adcs xzr, x23, x7
mrs x15, NZCV
adcs x7, x8, x9
cmp x11, x2
cinc x23, x22, lo
msr NZCV, x24
adcs xzr, x0, x11
mrs x11, NZCV
adcs x0, x23, x7
str x0, [sp, #152] ; 8-byte Folded Spill
str x10, [sp, #272] ; 8-byte Folded Spill
cmp x10, x16
mov w16, #-1
umulh x10, x10, x16
str x10, [sp, #424] ; 8-byte Folded Spill
cset w16, hi
adds x2, x14, x16
adds x10, x2, x10
cset w2, hs
cmn x14, x16
adcs x24, x19, x2
mrs x14, NZCV
str x14, [sp, #104] ; 8-byte Folded Spill
mrs x19, NZCV
ldr x14, [sp, #192] ; 8-byte Folded Reload
msr NZCV, x14
cset w14, hs
msr NZCV, x11
adcs xzr, x7, x23
mrs x11, NZCV
str x11, [sp, #192] ; 8-byte Folded Spill
str x11, [sp, #96] ; 8-byte Folded Spill
ldr x11, [sp, #248] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #360] ; 8-byte Folded Reload
ldr x16, [sp, #376] ; 8-byte Folded Reload
adcs xzr, x11, x16
ldr x11, [sp, #184] ; 8-byte Folded Reload
adcs x16, x11, xzr
ldr x11, [sp, #240] ; 8-byte Folded Reload
msr NZCV, x11
adcs x11, x16, x14
str x11, [sp, #184] ; 8-byte Folded Spill
mrs x14, NZCV
stp x16, x14, [sp, #136] ; 16-byte Folded Spill
cmp x20, x21
cinc x14, x6, lo
str x14, [sp, #64] ; 8-byte Folded Spill
msr NZCV, x13
adcs xzr, x12, x20
mrs x12, NZCV
adcs x11, x14, x11
msr NZCV, x15
adcs xzr, x9, x8
adcs x8, x11, xzr
mrs x9, NZCV
stp x12, x9, [sp, #168] ; 16-byte Folded Spill
cmn x26, x25
adcs x8, x8, xzr
str x8, [sp, #128] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #112] ; 8-byte Folded Spill
ldr x8, [sp, #88] ; 8-byte Folded Reload
msr NZCV, x8
ldp x26, x16, [sp, #336] ; 16-byte Folded Reload
ldr x8, [sp, #120] ; 8-byte Folded Reload
adcs xzr, x8, x16
mrs x8, NZCV
ldr x0, [sp, #264] ; 8-byte Folded Reload
ldr x6, [sp, #72] ; 8-byte Folded Reload
adcs x9, x0, x6
ldr x11, [sp, #48] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #472] ; 8-byte Folded Reload
ldr x12, [sp, #160] ; 8-byte Folded Reload
adcs xzr, x12, x11
mrs x25, NZCV
adcs x7, x28, x9
ldr x11, [sp, #40] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #216] ; 8-byte Folded Reload
adcs xzr, x11, x26
mrs x11, NZCV
str x11, [sp, #88] ; 8-byte Folded Spill
mov w14, #-2
umulh x11, x1, x14
mul x12, x1, x14
ldur x13, [x5, #36]
ldr x15, [sp, #400] ; 8-byte Folded Reload
lsr x15, x15, #63
str x15, [sp, #120] ; 8-byte Folded Spill
adcs x15, x15, x7
cmn x10, x13
mul x14, x13, x14
adcs x2, x14, x24
str x2, [sp, #160] ; 8-byte Folded Spill
mov x20, x14
stp x15, x14, [sp, #232] ; 16-byte Folded Spill
adds x14, x12, x30
ldr x12, [sp, #496] ; 8-byte Folded Reload
adcs x11, x12, x11
str x11, [sp, #248] ; 8-byte Folded Spill
ldr x11, [sp, #32] ; 8-byte Folded Reload
msr NZCV, x11
ldr x11, [sp, #56] ; 8-byte Folded Reload
adcs xzr, x11, x12
mrs x11, NZCV
str x11, [sp, #208] ; 8-byte Folded Spill
adcs x12, x14, x15
stp x12, x14, [sp, #216] ; 16-byte Folded Spill
msr NZCV, x3
adcs xzr, x27, x17
mrs x11, NZCV
str x11, [sp, #200] ; 8-byte Folded Spill
lsl x11, x4, #1
str x11, [sp, #376] ; 8-byte Folded Spill
adcs x17, x11, x12
msr NZCV, x19
ldr x19, [sp, #480] ; 8-byte Folded Reload
adcs x14, x19, x17
msr NZCV, x8
adcs xzr, x6, x0
mrs x15, NZCV
ldr x5, [sp, #256] ; 8-byte Folded Reload
ldr x2, [sp, #80] ; 8-byte Folded Reload
adcs x3, x5, x2
ldr x30, [sp, #456] ; 8-byte Folded Reload
cmp x28, x30
ldr x11, [sp, #432] ; 8-byte Folded Reload
add x27, x30, x11
cinc x0, x27, lo
msr NZCV, x25
adcs xzr, x9, x28
mrs x8, NZCV
adcs x12, x0, x3
adds x6, x10, x13
str x13, [sp, #360] ; 8-byte Folded Spill
lsl x21, x6, #32
sub x9, x21, x6
str x9, [sp, #472] ; 8-byte Folded Spill
adcs xzr, x24, x20
mrs x10, NZCV
mov w9, #-2
umulh x9, x13, x9
adcs x25, x9, x14
mov x24, x9
str x9, [sp, #264] ; 8-byte Folded Spill
msr NZCV, x15
adcs xzr, x2, x5
mrs x13, NZCV
ldr x9, [sp, #384] ; 8-byte Folded Reload
ldr x28, [sp, #152] ; 8-byte Folded Reload
adcs x15, x9, x28
cmp x0, x30
cinc x2, x27, lo
msr NZCV, x8
adcs xzr, x3, x0
mrs x3, NZCV
adcs x1, x2, x15
str x1, [sp, #80] ; 8-byte Folded Spill
ldr x8, [sp, #104] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x17, x19
mrs x8, NZCV
str x8, [sp, #104] ; 8-byte Folded Spill
str x8, [sp, #48] ; 8-byte Folded Spill
ldr x19, [sp, #464] ; 8-byte Folded Reload
cmp x23, x19
cinc x20, x22, lo
ldr x8, [sp, #96] ; 8-byte Folded Reload
msr NZCV, x8
ldr x23, [sp, #128] ; 8-byte Folded Reload
adcs x17, x20, x23
ldr x5, [sp, #392] ; 8-byte Folded Reload
adds x11, x16, x5
msr NZCV, x13
adcs xzr, x28, x9
mrs x4, NZCV
adcs x13, x11, x17
cmp x2, x30
cinc x0, x27, lo
msr NZCV, x3
adcs xzr, x15, x2
mrs x28, NZCV
adcs x3, x0, x13
str x3, [sp, #152] ; 8-byte Folded Spill
msr NZCV, x10
adcs xzr, x14, x24
mrs x8, NZCV
str x8, [sp, #96] ; 8-byte Folded Spill
str x8, [sp, #32] ; 8-byte Folded Spill
ldr x15, [sp, #280] ; 8-byte Folded Reload
ldr x9, [sp, #136] ; 8-byte Folded Reload
cmp x9, x15
cset w10, lo
ldr x8, [sp, #448] ; 8-byte Folded Reload
ldr x16, [sp, #64] ; 8-byte Folded Reload
cmp x16, x8
ldr x9, [sp, #352] ; 8-byte Folded Reload
cinc x14, x9, lo
ldr x9, [sp, #488] ; 8-byte Folded Reload
cmp x15, x9
ldp x15, x9, [sp, #312] ; 16-byte Folded Reload
cinc x2, x9, lo
add x2, x2, x14
ldr x9, [sp, #144] ; 8-byte Folded Reload
msr NZCV, x9
adcs x10, x2, x10
ldr x9, [sp, #88] ; 8-byte Folded Reload
msr NZCV, x9
ldr x24, [sp, #120] ; 8-byte Folded Reload
adcs xzr, x7, x24
mrs x7, NZCV
adcs x9, x15, x12
str x9, [sp, #144] ; 8-byte Folded Spill
ldr x9, [sp, #112] ; 8-byte Folded Reload
msr NZCV, x9
cset w9, hs
msr NZCV, x28
adcs xzr, x13, x0
mrs x13, NZCV
mrs x2, NZCV
msr NZCV, x7
adcs xzr, x12, x15
mrs x12, NZCV
str x12, [sp, #352] ; 8-byte Folded Spill
ldr x28, [sp, #288] ; 8-byte Folded Reload
adcs xzr, x1, x28
mrs x12, NZCV
str x12, [sp, #16] ; 8-byte Folded Spill
adcs xzr, x3, x26
mrs x12, NZCV
str x12, [sp, #24] ; 8-byte Folded Spill
mrs x7, NZCV
ldp x12, x15, [sp, #168] ; 16-byte Folded Reload
msr NZCV, x12
ldr x12, [sp, #184] ; 8-byte Folded Reload
adcs xzr, x12, x16
adcs x10, x10, xzr
cmp x10, x14
cset w1, lo
cmp x20, x19
cinc x12, x22, lo
cmp x12, x19
cinc x16, x22, lo
str x16, [sp, #168] ; 8-byte Folded Spill
cmp x14, x8
ldr x14, [sp, #408] ; 8-byte Folded Reload
cinc x14, x14, lo
msr NZCV, x15
adcs x10, x10, x9
add x9, x14, x16
adcs x15, x9, x1
ldr x9, [sp, #192] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x23, x20
mrs x20, NZCV
adcs x9, x12, x10
msr NZCV, x4
adcs xzr, x17, x11
adcs x11, x9, xzr
mrs x19, NZCV
str x6, [sp, #56] ; 8-byte Folded Spill
cmp x6, x21
mov w8, #-1
umulh x8, x6, x8
str x8, [sp, #448] ; 8-byte Folded Spill
cset w9, hi
ldr x14, [sp, #160] ; 8-byte Folded Reload
adds x17, x14, x9
adds x17, x17, x8
cset w1, hs
cmn x14, x9
adcs x4, x25, x1
mrs x8, NZCV
str x8, [sp, #184] ; 8-byte Folded Spill
mrs x3, NZCV
ldr x8, [sp, #504] ; 8-byte Folded Reload
ldr x6, [x8, #40]
cmn x17, x6
mov w9, #-2
mul x9, x6, x9
adcs x14, x9, x4
str x14, [sp, #408] ; 8-byte Folded Spill
str x9, [sp, #320] ; 8-byte Folded Spill
adds x1, x17, x6
lsl x17, x1, #32
sub x8, x17, x1
str x8, [sp, #384] ; 8-byte Folded Spill
adcs xzr, x4, x9
mrs x8, NZCV
str x8, [sp, #176] ; 8-byte Folded Spill
mrs x14, NZCV
ldr x8, [sp, #344] ; 8-byte Folded Reload
cmn x8, x5
adcs x11, x11, xzr
cset w8, hs
cmp x0, x30
str x27, [sp, #72] ; 8-byte Folded Spill
cinc x0, x27, lo
msr NZCV, x2
adcs x4, x0, x11
msr NZCV, x20
adcs xzr, x10, x12
adcs x9, x15, xzr
str x9, [sp, #112] ; 8-byte Folded Spill
msr NZCV, x19
adcs x8, x9, x8
str x8, [sp, #312] ; 8-byte Folded Spill
mrs x9, NZCV
str x9, [sp, #88] ; 8-byte Folded Spill
cmp x0, x30
cinc x9, x27, lo
str x9, [sp, #136] ; 8-byte Folded Spill
msr NZCV, x13
adcs xzr, x11, x0
mrs x10, NZCV
str x10, [sp, #192] ; 8-byte Folded Spill
adcs x8, x9, x8
ldr x9, [sp, #400] ; 8-byte Folded Reload
adds x10, x24, x9
msr NZCV, x7
adcs xzr, x4, x10
adcs x8, x8, xzr
mrs x10, NZCV
str x10, [sp, #280] ; 8-byte Folded Spill
adds x20, x24, x9
adcs x8, x8, xzr
str x8, [sp, #40] ; 8-byte Folded Spill
mrs x8, NZCV
str x8, [sp, #64] ; 8-byte Folded Spill
ldr x8, [sp, #208] ; 8-byte Folded Reload
msr NZCV, x8
ldp x9, x8, [sp, #224] ; 16-byte Folded Reload
adcs xzr, x8, x9
mrs x16, NZCV
ldr x22, [sp, #248] ; 8-byte Folded Reload
ldr x15, [sp, #144] ; 8-byte Folded Reload
adcs x19, x22, x15
ldr x8, [sp, #200] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #376] ; 8-byte Folded Reload
ldr x9, [sp, #216] ; 8-byte Folded Reload
adcs xzr, x9, x8
mrs x23, NZCV
mov w8, #-2
ldr x9, [sp, #272] ; 8-byte Folded Reload
umulh x10, x9, x8
mul x7, x9, x8
mov w5, #-2
ldp x12, x8, [sp, #360] ; 16-byte Folded Reload
lsr x13, x8, #63
adcs x26, x13, x19
str x13, [sp, #120] ; 8-byte Folded Spill
ldr x0, [sp, #424] ; 8-byte Folded Reload
adds x9, x7, x0
ldp x24, x8, [sp, #472] ; 16-byte Folded Reload
adcs x2, x8, x10
ldr x8, [sp, #48] ; 8-byte Folded Reload
msr NZCV, x8
adcs x10, x9, x26
lsl x8, x12, #1
ldr x11, [sp, #32] ; 8-byte Folded Reload
msr NZCV, x11
adcs x11, x8, x10
mov x27, x8
str x8, [sp, #232] ; 8-byte Folded Spill
msr NZCV, x3
adcs x3, x24, x11
ldr x8, [sp, #352] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #80] ; 8-byte Folded Reload
adcs x28, x28, x8
str x6, [sp, #488] ; 8-byte Folded Spill
umulh x8, x6, x5
mov w25, #-2
msr NZCV, x14
adcs x21, x8, x3
mov x14, x8
str x8, [sp, #352] ; 8-byte Folded Spill
ldr x8, [sp, #16] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #152] ; 8-byte Folded Reload
ldr x5, [sp, #336] ; 8-byte Folded Reload
adcs x5, x5, x8
ldr x30, [sp, #496] ; 8-byte Folded Reload
cmp x22, x30
mov x7, x22
ldr x8, [sp, #440] ; 8-byte Folded Reload
add x22, x30, x8
cinc x8, x22, lo
msr NZCV, x16
adcs xzr, x15, x7
mrs x15, NZCV
adcs x7, x8, x28
ldr x16, [sp, #24] ; 8-byte Folded Reload
msr NZCV, x16
adcs x16, x20, x4
str x16, [sp, #48] ; 8-byte Folded Spill
msr NZCV, x23
adcs xzr, x19, x13
mrs x13, NZCV
str x13, [sp, #288] ; 8-byte Folded Spill
ldr x20, [sp, #304] ; 8-byte Folded Reload
adcs x23, x20, x7
ldr x13, [sp, #104] ; 8-byte Folded Reload
msr NZCV, x13
adcs xzr, x26, x9
mrs x9, NZCV
str x9, [sp, #200] ; 8-byte Folded Spill
adcs x26, x2, x23
ldr x9, [sp, #96] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x10, x27
mrs x27, NZCV
lsr x9, x12, #63
ldr x4, [sp, #56] ; 8-byte Folded Reload
mul x12, x4, x25
adcs x19, x9, x26
mov x25, x9
str x9, [sp, #160] ; 8-byte Folded Spill
ldr x9, [sp, #448] ; 8-byte Folded Reload
adds x10, x12, x9
ldr x13, [sp, #184] ; 8-byte Folded Reload
msr NZCV, x13
adcs xzr, x11, x24
mrs x11, NZCV
str x11, [sp, #184] ; 8-byte Folded Spill
adcs x11, x10, x19
str x11, [sp, #272] ; 8-byte Folded Spill
ldr x10, [sp, #176] ; 8-byte Folded Reload
msr NZCV, x10
adcs xzr, x3, x14
mrs x10, NZCV
stp x10, x1, [sp, #248] ; 16-byte Folded Spill
lsl x10, x6, #1
str x10, [sp, #400] ; 8-byte Folded Spill
adcs x11, x10, x11
cmp x1, x17
mov w10, #-1
umulh x10, x1, x10
str x10, [sp, #392] ; 8-byte Folded Spill
cset w17, hi
ldr x13, [sp, #408] ; 8-byte Folded Reload
adds x3, x13, x17
adds x14, x3, x10
str x14, [sp, #224] ; 8-byte Folded Spill
cset w3, hs
cmn x13, x17
adcs x21, x21, x3
str x21, [sp, #144] ; 8-byte Folded Spill
mrs x16, NZCV
ldr x10, [sp, #384] ; 8-byte Folded Reload
adcs x17, x10, x11
cmp x8, x30
str x22, [sp, #80] ; 8-byte Folded Spill
cinc x1, x22, lo
msr NZCV, x15
adcs xzr, x28, x8
mrs x15, NZCV
ldr x8, [sp, #504] ; 8-byte Folded Reload
ldur x13, [x8, #44]
adcs x28, x1, x5
cmp x1, x30
cinc x8, x22, lo
cmp x8, x30
cinc x6, x22, lo
cmn x14, x13
mov w14, #-2
mul x3, x13, x14
str x3, [sp, #504] ; 8-byte Folded Spill
mov x14, x13
str x13, [sp, #408] ; 8-byte Folded Spill
adcs xzr, x21, x3
mrs x21, NZCV
mrs x13, NZCV
msr NZCV, x15
adcs xzr, x5, x1
mrs x1, NZCV
ldp x22, x15, [sp, #40] ; 16-byte Folded Reload
adcs xzr, x15, x8
mrs x3, NZCV
str x3, [sp, #104] ; 8-byte Folded Spill
adcs xzr, x22, x6
mov x30, x6
mrs x3, NZCV
str x3, [sp, #176] ; 8-byte Folded Spill
str x3, [sp, #152] ; 8-byte Folded Spill
adds x12, x12, x9
mov x6, x9
mov w9, #-2
umulh x5, x4, x9
adcs x5, x24, x5
msr NZCV, x16
adcs xzr, x11, x10
mrs x10, NZCV
umulh x9, x14, x9
str x9, [sp, #344] ; 8-byte Folded Spill
msr NZCV, x13
adcs xzr, x17, x9
mrs x11, NZCV
stp x11, x10, [sp, #208] ; 16-byte Folded Spill
str x11, [sp, #128] ; 8-byte Folded Spill
msr NZCV, x21
adcs x9, x9, x17
str x9, [sp, #336] ; 8-byte Folded Spill
ldr x9, [sp, #288] ; 8-byte Folded Reload
msr NZCV, x9
adcs xzr, x7, x20
mrs x11, NZCV
ldr x9, [sp, #296] ; 8-byte Folded Reload
adcs x16, x9, x28
ldr x21, [sp, #480] ; 8-byte Folded Reload
cmp x2, x21
add x3, x21, x0
cinc x17, x3, lo
ldr x13, [sp, #200] ; 8-byte Folded Reload
msr NZCV, x13
adcs xzr, x23, x2
mrs x14, NZCV
adcs x2, x17, x16
msr NZCV, x27
adcs xzr, x26, x25
mrs x4, NZCV
ldr x27, [sp, #240] ; 8-byte Folded Reload
adcs x7, x27, x2
ldr x13, [sp, #184] ; 8-byte Folded Reload
msr NZCV, x13
adcs xzr, x19, x12
mrs x12, NZCV
adcs x13, x5, x7
str x13, [sp, #200] ; 8-byte Folded Spill
str x10, [sp, #184] ; 8-byte Folded Spill
msr NZCV, x1
adcs x8, x8, x15
msr NZCV, x11
adcs xzr, x28, x9
mrs x9, NZCV
ldr x15, [sp, #376] ; 8-byte Folded Reload
adcs x11, x15, x8
ldr x0, [sp, #168] ; 8-byte Folded Reload
ldr x10, [sp, #112] ; 8-byte Folded Reload
cmp x10, x0
cset w10, lo
ldr x25, [sp, #456] ; 8-byte Folded Reload
ldr x20, [sp, #136] ; 8-byte Folded Reload
cmp x20, x25
ldr x13, [sp, #72] ; 8-byte Folded Reload
cinc x23, x13, lo
ldr x13, [sp, #464] ; 8-byte Folded Reload
cmp x0, x13
ldr x13, [sp, #416] ; 8-byte Folded Reload
cinc x13, x13, lo
add x13, x13, x23
ldr x0, [sp, #88] ; 8-byte Folded Reload
msr NZCV, x0
adcs x10, x13, x10
str x10, [sp, #112] ; 8-byte Folded Spill
cmp x17, x21
cinc x0, x3, lo
msr NZCV, x14
adcs xzr, x16, x17
mrs x16, NZCV
adcs x17, x0, x11
ldr x10, [sp, #64] ; 8-byte Folded Reload
msr NZCV, x10
cset w10, hs
str x10, [sp, #296] ; 8-byte Folded Spill
msr NZCV, x4
adcs xzr, x2, x27
mrs x10, NZCV
str x10, [sp, #416] ; 8-byte Folded Spill
ldr x19, [sp, #264] ; 8-byte Folded Reload
adcs x4, x19, x17
ldr x10, [sp, #104] ; 8-byte Folded Reload
msr NZCV, x10
adcs x14, x30, x22
mov x22, x30
cmp x5, x24
add x10, x24, x6
str x10, [sp, #464] ; 8-byte Folded Spill
cinc x10, x10, lo
msr NZCV, x12
adcs xzr, x7, x5
mrs x5, NZCV
adcs x12, x10, x4
str x12, [sp, #168] ; 8-byte Folded Spill
ldr x30, [sp, #368] ; 8-byte Folded Reload
ldr x27, [sp, #120] ; 8-byte Folded Reload
adds x13, x27, x30
msr NZCV, x9
adcs xzr, x8, x15
mrs x8, NZCV
adcs x12, x13, x14
cmp x0, x21
cinc x9, x3, lo
msr NZCV, x16
adcs xzr, x11, x0
mrs x1, NZCV
adcs xzr, x12, x9
mrs x11, NZCV
mrs x2, NZCV
msr NZCV, x5
adcs xzr, x4, x10
mrs x15, NZCV
str x15, [sp, #304] ; 8-byte Folded Spill
mrs x16, NZCV
ldr x5, [sp, #408] ; 8-byte Folded Reload
ldr x15, [sp, #224] ; 8-byte Folded Reload
adds x0, x15, x5
lsl x4, x0, #32
sub x15, x4, x0
str x15, [sp, #224] ; 8-byte Folded Spill
ldp x7, x15, [sp, #496] ; 16-byte Folded Reload
ldr x6, [sp, #144] ; 8-byte Folded Reload
adcs x26, x15, x6
msr NZCV, x1
adcs x12, x9, x12
ldr x15, [sp, #128] ; 8-byte Folded Reload
msr NZCV, x15
mrs x28, NZCV
msr NZCV, x2
mrs x1, NZCV
ldr x15, [sp, #192] ; 8-byte Folded Reload
msr NZCV, x15
ldr x15, [sp, #312] ; 8-byte Folded Reload
adcs xzr, x15, x20
ldr x15, [sp, #112] ; 8-byte Folded Reload
adcs x2, x15, xzr
cmp x2, x23
cset w6, lo
cmp x22, x7
ldr x20, [sp, #80] ; 8-byte Folded Reload
cinc x15, x20, lo
cmp x15, x7
cinc x20, x20, lo
str x20, [sp, #192] ; 8-byte Folded Spill
cmp x23, x25
ldr x7, [sp, #432] ; 8-byte Folded Reload
cinc x7, x7, lo
ldr x22, [sp, #280] ; 8-byte Folded Reload
msr NZCV, x22
ldr x22, [sp, #296] ; 8-byte Folded Reload
adcs x2, x2, x22
add x7, x7, x20
adcs x6, x7, x6
ldr x7, [sp, #152] ; 8-byte Folded Reload
msr NZCV, x7
adcs x7, x15, x2
msr NZCV, x8
adcs xzr, x14, x13
adcs x8, x7, xzr
mrs x13, NZCV
ldr x14, [sp, #416] ; 8-byte Folded Reload
msr NZCV, x14
adcs xzr, x17, x19
mrs x14, NZCV
ldr x23, [sp, #232] ; 8-byte Folded Reload
adcs x17, x23, x12
ldp x20, x24, [sp, #464] ; 16-byte Folded Reload
cmp x10, x24
cinc x7, x20, lo
msr NZCV, x16
adcs xzr, x17, x7
mrs x19, NZCV
mrs x16, NZCV
cmn x27, x30
adcs x8, x8, xzr
cset w10, hs
cmp x9, x21
cinc x9, x3, lo
msr NZCV, x11
adcs x22, x9, x8
ldr x11, [sp, #176] ; 8-byte Folded Reload
msr NZCV, x11
adcs xzr, x2, x15
adcs x11, x6, xzr
stp x3, x11, [sp, #288] ; 16-byte Folded Spill
msr NZCV, x13
adcs x10, x11, x10
mrs x13, NZCV
str x13, [sp, #416] ; 8-byte Folded Spill
cmp x9, x21
cinc x11, x3, lo
str x11, [sp, #280] ; 8-byte Folded Spill
msr NZCV, x1
adcs xzr, x8, x9
mrs x8, NZCV
stp x10, x8, [sp, #368] ; 16-byte Folded Spill
adcs x8, x11, x10
ldr x10, [sp, #360] ; 8-byte Folded Reload
ldr x11, [sp, #160] ; 8-byte Folded Reload
adds x9, x11, x10
msr NZCV, x14
adcs xzr, x12, x23
mrs x12, NZCV
adcs xzr, x22, x9
adcs x8, x8, xzr
mrs x9, NZCV
str x9, [sp, #456] ; 8-byte Folded Spill
adds x9, x11, x10
adcs x3, x8, xzr
mrs x8, NZCV
str x8, [sp, #432] ; 8-byte Folded Spill
ldr x8, [sp, #248] ; 8-byte Folded Reload
msr NZCV, x8
ldp x23, x8, [sp, #392] ; 16-byte Folded Reload
ldr x10, [sp, #272] ; 8-byte Folded Reload
adcs xzr, x10, x8
mrs x11, NZCV
ldr x8, [sp, #256] ; 8-byte Folded Reload
mov w13, #-2
umulh x10, x8, x13
mul x13, x8, x13
ldr x14, [sp, #488] ; 8-byte Folded Reload
lsr x8, x14, #63
ldr x27, [sp, #200] ; 8-byte Folded Reload
adcs x14, x8, x27
str x8, [sp, #272] ; 8-byte Folded Spill
adds x13, x13, x23
ldr x2, [sp, #384] ; 8-byte Folded Reload
adcs x1, x2, x10
ldr x10, [sp, #216] ; 8-byte Folded Reload
msr NZCV, x10
adcs x15, x13, x14
ldr x10, [sp, #304] ; 8-byte Folded Reload
msr NZCV, x10
adcs x10, x7, x17
str x10, [sp, #240] ; 8-byte Folded Spill
mov x10, x5
lsl x17, x5, #1
ldr x5, [sp, #208] ; 8-byte Folded Reload
msr NZCV, x5
adcs x30, x17, x15
mov x6, x17
str x17, [sp, #304] ; 8-byte Folded Spill
msr NZCV, x12
adcs x9, x9, x22
cmp x7, x24
cinc x17, x20, lo
msr NZCV, x16
adcs x12, x17, x9
str x12, [sp, #248] ; 8-byte Folded Spill
msr NZCV, x11
adcs xzr, x27, x8
mrs x8, NZCV
str x8, [sp, #312] ; 8-byte Folded Spill
ldr x7, [sp, #320] ; 8-byte Folded Reload
ldr x16, [sp, #168] ; 8-byte Folded Reload
adcs x22, x7, x16
ldr x8, [sp, #184] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x14, x13
mrs x8, NZCV
str x8, [sp, #360] ; 8-byte Folded Spill
adcs x13, x1, x22
msr NZCV, x28
adcs xzr, x15, x6
mrs x25, NZCV
lsr x8, x10, #63
adcs x10, x8, x13
mov x12, x8
str x8, [sp, #184] ; 8-byte Folded Spill
cmp x17, x24
cinc x27, x20, lo
msr NZCV, x19
adcs xzr, x9, x17
mrs x9, NZCV
str x9, [sp, #232] ; 8-byte Folded Spill
adcs x9, x27, x3
stp x9, x10, [sp, #256] ; 16-byte Folded Spill
mov x20, x3
cmp x0, x4
mov w8, #-1
umulh x6, x0, x8
mov x15, x0
cset w9, hi
adds x8, x26, x9
mov x11, #-4294967295
adds x8, x8, x6
add x11, x8, x11
stp x8, x11, [sp, #208] ; 16-byte Folded Spill
cset w11, hs
cmn x26, x9
ldr x8, [sp, #336] ; 8-byte Folded Reload
adcs x8, x8, x11
str x8, [sp, #336] ; 8-byte Folded Spill
mrs x11, NZCV
ldr x5, [sp, #224] ; 8-byte Folded Reload
adcs x8, x5, x30
str x8, [sp, #200] ; 8-byte Folded Spill
ldr x8, [sp, #312] ; 8-byte Folded Reload
msr NZCV, x8
adcs xzr, x16, x7
mrs x3, NZCV
ldp x9, x8, [sp, #352] ; 16-byte Folded Reload
ldr x24, [sp, #240] ; 8-byte Folded Reload
adcs x7, x9, x24
cmp x1, x2
add x17, x2, x23
mov x16, x2
cinc x19, x17, lo
msr NZCV, x8
adcs xzr, x22, x1
mrs x1, NZCV
adcs x22, x19, x7
msr NZCV, x25
adcs xzr, x13, x12
mrs x2, NZCV
ldr x10, [sp, #504] ; 8-byte Folded Reload
adcs x25, x10, x22
ldr x8, [sp, #296] ; 8-byte Folded Reload
ldr x12, [sp, #192] ; 8-byte Folded Reload
cmp x8, x12
cset w0, lo
ldp x23, x8, [sp, #280] ; 16-byte Folded Reload
cmp x23, x21
cinc x26, x8, lo
ldr x8, [sp, #496] ; 8-byte Folded Reload
cmp x12, x8
ldr x8, [sp, #440] ; 8-byte Folded Reload
cinc x4, x8, lo
add x4, x4, x26
ldr x8, [sp, #416] ; 8-byte Folded Reload
msr NZCV, x8
adcs x4, x4, x0
ldr x8, [sp, #232] ; 8-byte Folded Reload
msr NZCV, x8
mov x12, x27
adcs xzr, x20, x27
mrs x8, NZCV
str x8, [sp, #440] ; 8-byte Folded Spill
mrs x27, NZCV
msr NZCV, x3
adcs xzr, x24, x9
mrs x3, NZCV
ldr x9, [sp, #400] ; 8-byte Folded Reload
ldr x13, [sp, #248] ; 8-byte Folded Reload
adcs x21, x9, x13
cmp x19, x16
cinc x28, x17, lo
msr NZCV, x1
adcs xzr, x7, x19
mrs x1, NZCV
adcs x7, x28, x21
msr NZCV, x2
adcs xzr, x22, x10
mrs x2, NZCV
mov w8, #-2
umulh x19, x15, x8
mul x20, x15, x8
ldr x10, [sp, #344] ; 8-byte Folded Reload
adcs x24, x10, x7
adds x20, x20, x6
adcs x19, x5, x19
msr NZCV, x11
adcs xzr, x30, x5
mov x15, x5
mrs x30, NZCV
ldp x14, x22, [sp, #264] ; 16-byte Folded Reload
adcs x8, x20, x14
str x8, [sp, #504] ; 8-byte Folded Spill
ldr x5, [sp, #488] ; 8-byte Folded Reload
adds x0, x22, x5
msr NZCV, x3
adcs xzr, x13, x9
mrs x3, NZCV
ldr x13, [sp, #256] ; 8-byte Folded Reload
adcs x8, x0, x13
cmp x28, x16
cinc x11, x17, lo
msr NZCV, x1
adcs xzr, x21, x28
mrs x1, NZCV
adcs x21, x11, x8
msr NZCV, x2
adcs xzr, x7, x10
mrs x7, NZCV
ldr x10, [sp, #304] ; 8-byte Folded Reload
adcs x28, x10, x21
msr NZCV, x30
adcs xzr, x14, x20
mrs x20, NZCV
adcs x9, x19, x25
str x9, [sp, #496] ; 8-byte Folded Spill
ldr x9, [sp, #432] ; 8-byte Folded Reload
msr NZCV, x9
cset w30, hs
msr NZCV, x7
adcs xzr, x21, x10
mrs x7, NZCV
mrs x21, NZCV
msr NZCV, x1
adcs xzr, x8, x11
mrs x8, NZCV
str x8, [sp, #432] ; 8-byte Folded Spill
mrs x2, NZCV
cmp x19, x15
add x14, x15, x6
cinc x10, x14, lo
msr NZCV, x20
adcs xzr, x25, x19
mrs x20, NZCV
adcs x19, x10, x24
ldr x8, [sp, #376] ; 8-byte Folded Reload
msr NZCV, x8
ldr x8, [sp, #368] ; 8-byte Folded Reload
adcs xzr, x8, x23
adcs x4, x4, xzr
cmp x4, x26
cset w25, lo
ldp x8, x1, [sp, #464] ; 16-byte Folded Reload
cmp x12, x1
cinc x12, x8, lo
cmp x12, x1
cinc x9, x8, lo
ldr x8, [sp, #480] ; 8-byte Folded Reload
cmp x26, x8
ldr x26, [sp, #424] ; 8-byte Folded Reload
cinc x26, x26, lo
ldr x23, [sp, #456] ; 8-byte Folded Reload
msr NZCV, x23
adcs x4, x4, x30
add x26, x26, x9
adcs x25, x26, x25
msr NZCV, x27
adcs x26, x12, x4
msr NZCV, x3
adcs xzr, x13, x0
adcs x13, x26, xzr
mrs x3, NZCV
cmn x22, x5
adcs x13, x13, xzr
mrs x26, NZCV
cmp x10, x15
cinc x27, x14, lo
msr NZCV, x20
adcs xzr, x24, x10
mrs x20, NZCV
adcs x8, x27, x28
cmp x11, x16
cinc x11, x17, lo
msr NZCV, x2
adcs x22, x11, x13
msr NZCV, x26
cset w24, hs
ldr x5, [sp, #408] ; 8-byte Folded Reload
ldr x10, [sp, #184] ; 8-byte Folded Reload
adds x26, x10, x5
msr NZCV, x7
adcs x7, x26, x22
ldr x0, [sp, #440] ; 8-byte Folded Reload
msr NZCV, x0
adcs xzr, x4, x12
adcs x12, x25, xzr
cmp x12, x9
cset w0, lo
cmp x11, x16
cinc x4, x17, lo
cmp x4, x16
cinc x25, x17, lo
cmp x9, x1
ldr x9, [sp, #448] ; 8-byte Folded Reload
cinc x9, x9, lo
msr NZCV, x3
adcs x12, x12, x24
add x9, x9, x25
adcs x9, x9, x0
ldr x17, [sp, #432] ; 8-byte Folded Reload
msr NZCV, x17
adcs xzr, x13, x11
mrs x11, NZCV
adcs x13, x4, x12
msr NZCV, x21
adcs xzr, x22, x26
adcs x13, x13, xzr
mrs x0, NZCV
mrs x1, NZCV
cmp x27, x15
cinc x3, x14, lo
msr NZCV, x20
adcs xzr, x28, x27
mrs x20, NZCV
adcs x17, x3, x7
cmn x10, x5
adcs x13, x13, xzr
cset w21, hs
msr NZCV, x1
adcs x1, x21, xzr
cmp x3, x15
cinc x22, x14, lo
msr NZCV, x20
adcs xzr, x7, x3
mrs x3, NZCV
adcs x26, x22, x13
msr NZCV, x11
adcs xzr, x12, x4
adcs x9, x9, xzr
msr NZCV, x0
adcs x11, x9, x21
mrs x12, NZCV
msr NZCV, x3
adcs xzr, x13, x22
mrs x13, NZCV
mrs x0, NZCV
adcs x3, x11, xzr
cmp x9, x25
cset w4, lo
cmp x22, x15
cinc x7, x14, lo
cmp x7, x15
cinc x10, x14, lo
cmp x25, x16
ldr x14, [sp, #392] ; 8-byte Folded Reload
cinc x20, x14, lo
add x20, x20, x10
msr NZCV, x12
adcs x12, x20, x4
msr NZCV, x0
adcs xzr, x11, x7
adcs x0, x12, xzr
msr NZCV, x13
adcs xzr, x1, x9
adcs x9, x20, x4
cmp x10, x15
cinc x11, x6, lo
cmp x0, x10
cinc x15, x11, lo
mov w24, #-1
ldp x12, x16, [sp, #200] ; 16-byte Folded Reload
cmp x16, x24
cset w10, lo
ldr x11, [sp, #336] ; 8-byte Folded Reload
sub x10, x11, x10
cmp x11, x10
mov x2, x11
cset w11, lo
sub x11, x12, x11
cmp x12, x11
mov x27, x12
cset w12, lo
mov x14, #-4294967295
ldp x5, x4, [sp, #496] ; 16-byte Folded Reload
add x13, x4, x14
sub x12, x13, x12
mov x1, #-65534
movk x1, #0, lsl #16
add x1, x5, x1
cmp x4, x24
mov x28, x4
csetm x4, lo
cmp x13, x12
cset w13, lo
sub x13, x4, x13
add x13, x13, x1
add x4, x19, x14
mov w6, #-2
cmp x5, x6
mov x30, x5
csetm x5, lo
cmp x1, x13
cset w1, lo
sub x1, x5, x1
add x1, x1, x4
add x5, x8, x14
cmp x19, x24
csetm x20, lo
cmp x4, x1
cset w4, lo
sub x4, x20, x4
add x4, x4, x5
add x20, x17, x14
cmp x8, x24
csetm x21, lo
cmp x5, x4
cset w5, lo
sub x5, x21, x5
add x5, x5, x20
add x21, x26, x14
cmp x17, x24
csetm x22, lo
cmp x20, x5
cset w20, lo
sub x20, x22, x20
add x20, x20, x21
cmp x26, x24
csetm x22, lo
cmp x21, x20
cset w21, lo
adds x3, x3, x7
add x7, x3, x14
sub x21, x22, x21
add x21, x21, x7
adcs x9, x9, x14
mov x14, #-4294967295
cmp x3, x24
csetm x22, lo
cmp x7, x21
cset w7, lo
sub x7, x22, x7
add x7, x7, x9
add x6, x15, x14
cmp x0, x24
csetm x22, lo
cmp x9, x7
cset w9, lo
sub x9, x22, x9
add x9, x9, x6
cmp x15, x24
mov w25, #-1
csetm x22, lo
cmp x6, x9
cset w6, lo
mov x24, #-4294967296
cmp x22, x6
csel x6, x25, x24, eq
csetm x22, ne
and x24, x16, x22
ldr x14, [sp, #216] ; 8-byte Folded Reload
and x25, x6, x14
orr x24, x25, x24
and x23, x2, x22
and x10, x6, x10
orr x10, x10, x23
and x23, x27, x22
and x11, x6, x11
orr x11, x11, x23
and x14, x28, x22
and x12, x6, x12
orr x12, x12, x14
and x14, x30, x22
and x13, x6, x13
orr x13, x13, x14
ldr x2, [sp, #328] ; 8-byte Folded Reload
str x24, [x2]
stur x10, [x2, #4]
and x10, x19, x22
and x14, x6, x1
orr x10, x14, x10
str x11, [x2, #8]
stur x12, [x2, #12]
and x8, x8, x22
and x11, x6, x4
orr x8, x11, x8
and x11, x17, x22
and x12, x6, x5
orr x11, x12, x11
str x13, [x2, #16]
stur x10, [x2, #20]
and x10, x26, x22
and x12, x6, x20
orr x10, x12, x10
and x12, x3, x22
and x13, x6, x21
orr x12, x13, x12
and x13, x0, x22
and x14, x6, x7
orr x13, x14, x13
and x14, x15, x22
str x8, [x2, #24]
stur x11, [x2, #28]
and x8, x6, x9
str x10, [x2, #32]
stur x12, [x2, #36]
orr x8, x8, x14
str x13, [x2, #40]
stur x8, [x2, #44]
add sp, sp, #512
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _fiat_p384_nonzero ; -- Begin function fiat_p384_nonzero
.p2align 2
_fiat_p384_nonzero: ; @fiat_p384_nonzero
.cfi_startproc
; %bb.0:
ldp x8, x10, [x0]
ldur x9, [x0, #4]
ldur x11, [x0, #12]
ldp x12, x14, [x0, #16]
ldur x13, [x0, #20]
ldur x15, [x0, #28]
ldp x16, x1, [x0, #32]
ldur x17, [x0, #36]
ldur x0, [x0, #44]
orr x8, x9, x8
orr x8, x8, x10
orr x8, x8, x11
orr x8, x8, x12
orr x8, x8, x13
orr x8, x8, x14
orr x8, x8, x15
orr x8, x8, x16
orr x8, x8, x17
orr x8, x8, x1
orr x0, x8, x0
ret
.cfi_endproc
; -- End function
.globl _fiat_p384_selectznz ; -- Begin function fiat_p384_selectznz
.p2align 2
_fiat_p384_selectznz: ; @fiat_p384_selectznz
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
.cfi_offset w19, -8
.cfi_offset w20, -16
.cfi_offset w21, -24
.cfi_offset w22, -32
.cfi_offset w23, -40
.cfi_offset w24, -48
ldp x8, x10, [x1]
ldur x9, [x1, #4]
ldur x11, [x1, #12]
ldp x12, x14, [x1, #16]
ldur x13, [x1, #20]
ldur x15, [x1, #28]
ldp x16, x4, [x1, #32]
ldur x17, [x1, #36]
ldur x1, [x1, #44]
ldp x5, x7, [x2]
ldur x6, [x2, #4]
ldur x19, [x2, #12]
mov w20, #-1
mov x21, #-4294967296
ldp x22, x23, [x2, #16]
cmp x0, #0
csel x0, x20, x21, eq
ldur x20, [x2, #20]
csetm x21, ne
and x5, x5, x21
and x8, x8, x0
orr x8, x5, x8
ldur x5, [x2, #28]
and x6, x6, x21
and x9, x9, x0
orr x9, x6, x9
and x6, x7, x21
ldur x7, [x2, #36]
and x10, x10, x0
orr x10, x6, x10
ldp x24, x6, [x2, #32]
and x19, x19, x21
and x11, x11, x0
ldur x2, [x2, #44]
orr x11, x19, x11
and x19, x22, x21
and x12, x12, x0
orr x12, x19, x12
and x19, x20, x21
and x13, x13, x0
orr x13, x19, x13
and x19, x23, x21
and x14, x14, x0
orr x14, x19, x14
and x5, x5, x21
and x15, x15, x0
orr x15, x5, x15
and x5, x24, x21
and x16, x16, x0
orr x16, x5, x16
and x5, x7, x21
and x17, x17, x0
orr x17, x5, x17
and x5, x6, x21
and x4, x4, x0
orr x4, x5, x4
and x2, x2, x21
and x0, x1, x0
orr x0, x2, x0
str x8, [x3]
stur x9, [x3, #4]
str x10, [x3, #8]
stur x11, [x3, #12]
str x12, [x3, #16]
stur x13, [x3, #20]
str x14, [x3, #24]
stur x15, [x3, #28]
str x16, [x3, #32]
stur x17, [x3, #36]
str x4, [x3, #40]
stur x0, [x3, #44]
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _fiat_p384_to_bytes ; -- Begin function fiat_p384_to_bytes
.p2align 2
_fiat_p384_to_bytes: ; @fiat_p384_to_bytes
.cfi_startproc
; %bb.0:
ldp x3, x17, [x0]
ldur x2, [x0, #4]
ldur x16, [x0, #12]
ldp x15, x13, [x0, #16]
ldur x14, [x0, #20]
ldur x12, [x0, #28]
ldp x11, x9, [x0, #32]
ldur x10, [x0, #36]
ldur x8, [x0, #44]
lsr x0, x3, #8
strb w0, [x1, #1]
lsr x0, x3, #16
strb w0, [x1, #2]
strb w3, [x1]
lsr x0, x3, #24
strb w0, [x1, #3]
lsr x0, x2, #8
strb w0, [x1, #5]
lsr x0, x2, #16
strb w0, [x1, #6]
strb w2, [x1, #4]
lsr x0, x2, #24
strb w0, [x1, #7]
lsr x0, x17, #8
strb w0, [x1, #9]
lsr x0, x17, #16
strb w0, [x1, #10]
strb w17, [x1, #8]
lsr x17, x17, #24
strb w17, [x1, #11]
lsr x17, x16, #8
strb w17, [x1, #13]
lsr x17, x16, #16
strb w17, [x1, #14]
strb w16, [x1, #12]
lsr x16, x16, #24
strb w16, [x1, #15]
lsr x16, x15, #8
strb w16, [x1, #17]
lsr x16, x15, #16
strb w16, [x1, #18]
strb w15, [x1, #16]
lsr x15, x15, #24
strb w15, [x1, #19]
lsr x15, x14, #8
strb w15, [x1, #21]
lsr x15, x14, #16
strb w15, [x1, #22]
strb w14, [x1, #20]
lsr x14, x14, #24
strb w14, [x1, #23]
lsr x14, x13, #8
strb w14, [x1, #25]
lsr x14, x13, #16
strb w14, [x1, #26]
strb w13, [x1, #24]
lsr x13, x13, #24
strb w13, [x1, #27]
lsr x13, x12, #8
strb w13, [x1, #29]
lsr x13, x12, #16
strb w13, [x1, #30]
strb w12, [x1, #28]
lsr x12, x12, #24
strb w12, [x1, #31]
lsr x12, x11, #8
strb w12, [x1, #33]
lsr x12, x11, #16
strb w12, [x1, #34]
strb w11, [x1, #32]
lsr x11, x11, #24
strb w11, [x1, #35]
lsr x11, x10, #8
strb w11, [x1, #37]
lsr x11, x10, #16
strb w11, [x1, #38]
strb w10, [x1, #36]
lsr x10, x10, #24
strb w10, [x1, #39]
lsr x10, x9, #8
strb w10, [x1, #41]
lsr x10, x9, #16
strb w10, [x1, #42]
strb w9, [x1, #40]
lsr x9, x9, #24
strb w9, [x1, #43]
lsr x9, x8, #8
strb w9, [x1, #45]
lsr x9, x8, #16
strb w9, [x1, #46]
strb w8, [x1, #44]
lsr x8, x8, #24
strb w8, [x1, #47]
ret
.cfi_endproc
; -- End function
.globl _fiat_p384_from_bytes ; -- Begin function fiat_p384_from_bytes
.p2align 2
_fiat_p384_from_bytes: ; @fiat_p384_from_bytes
.cfi_startproc
; %bb.0:
ldp w9, w8, [x0, #40]
ldp w11, w10, [x0, #32]
ldp w13, w12, [x0, #24]
ldp w15, w14, [x0, #16]
ldp w17, w16, [x0, #8]
ldp w2, w0, [x0]
str x2, [x1]
stur x0, [x1, #4]
str x17, [x1, #8]
stur x16, [x1, #12]
str x15, [x1, #16]
stur x14, [x1, #20]
str x13, [x1, #24]
stur x12, [x1, #28]
str x11, [x1, #32]
stur x10, [x1, #36]
str x9, [x1, #40]
stur x8, [x1, #44]
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/140766090.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _encode_lavc_set_audio_pts ## -- Begin function encode_lavc_set_audio_pts
.p2align 4, 0x90
_encode_lavc_set_audio_pts: ## @encode_lavc_set_audio_pts
.cfi_startproc
## %bb.0:
testq %rdi, %rdi
je LBB0_1
## %bb.2:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
leaq 8(%rdi), %r14
movq %r14, %rdi
movsd %xmm0, -24(%rbp) ## 8-byte Spill
callq _pthread_mutex_lock
movsd -24(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, (%rbx)
movq $0, 16(%rbx)
movq %r14, %rdi
addq $16, %rsp
popq %rbx
popq %r14
popq %rbp
jmp _pthread_mutex_unlock ## TAILCALL
LBB0_1:
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _encode_lavc_set_audio_pts ; -- Begin function encode_lavc_set_audio_pts
.p2align 2
_encode_lavc_set_audio_pts: ; @encode_lavc_set_audio_pts
.cfi_startproc
; %bb.0:
cbz x0, LBB0_2
; %bb.1:
stp d9, d8, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset b8, -40
.cfi_offset b9, -48
fmov d8, d0
mov x19, x0
add x20, x0, #8
mov x0, x20
bl _pthread_mutex_lock
str d8, [x19]
str xzr, [x19, #16]
mov x0, x20
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp d9, d8, [sp], #48 ; 16-byte Folded Reload
b _pthread_mutex_unlock
LBB0_2:
.cfi_def_cfa wsp, 0
.cfi_same_value w30
.cfi_same_value w29
.cfi_same_value w19
.cfi_same_value w20
.cfi_same_value b8
.cfi_same_value b9
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/mpv/common/extr_encode_lavc.c_encode_lavc_set_audio_pts.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function main
LCPI0_0:
.short 0 ## 0x0
.short 1 ## 0x1
.short 2 ## 0x2
.short 3 ## 0x3
.space 2
.space 2
.space 2
.space 2
LCPI0_1:
.long 0 ## 0x0
.long 1 ## 0x1
.long 2 ## 0x2
.long 3 ## 0x3
LCPI0_2:
.long 4 ## 0x4
.long 4 ## 0x4
.long 4 ## 0x4
.long 4 ## 0x4
LCPI0_3:
.short 43691 ## 0xaaab
.short 43691 ## 0xaaab
.short 43691 ## 0xaaab
.short 43691 ## 0xaaab
.space 2
.space 2
.space 2
.space 2
LCPI0_4:
.short 21845 ## 0x5555
.short 21845 ## 0x5555
.short 21845 ## 0x5555
.short 21845 ## 0x5555
.space 2
.space 2
.space 2
.space 2
LCPI0_5:
.short 43692 ## 0xaaac
.short 43692 ## 0xaaac
.short 43692 ## 0xaaac
.short 43692 ## 0xaaac
.short 0 ## 0x0
.short 0 ## 0x0
.short 0 ## 0x0
.short 0 ## 0x0
LCPI0_6:
.short 52429 ## 0xcccd
.short 52429 ## 0xcccd
.short 52429 ## 0xcccd
.short 52429 ## 0xcccd
.space 2
.space 2
.space 2
.space 2
LCPI0_7:
.short 13107 ## 0x3333
.short 13107 ## 0x3333
.short 13107 ## 0x3333
.short 13107 ## 0x3333
.space 2
.space 2
.space 2
.space 2
LCPI0_8:
.short 13108 ## 0x3334
.short 13108 ## 0x3334
.short 13108 ## 0x3334
.short 13108 ## 0x3334
.short 0 ## 0x0
.short 0 ## 0x0
.short 0 ## 0x0
.short 0 ## 0x0
LCPI0_9:
.long 8 ## 0x8
.long 8 ## 0x8
.long 8 ## 0x8
.long 8 ## 0x8
LCPI0_10:
.short 8 ## 0x8
.short 8 ## 0x8
.short 8 ## 0x8
.short 8 ## 0x8
.space 2
.space 2
.space 2
.space 2
.section __TEXT,__text,regular,pure_instructions
.globl _main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movdqa LCPI0_0(%rip), %xmm7 ## xmm7 = <0,1,2,3,u,u,u,u>
pxor %xmm0, %xmm0
movdqa LCPI0_1(%rip), %xmm2 ## xmm2 = [0,1,2,3]
movl $1000, %eax ## imm = 0x3E8
movdqa LCPI0_3(%rip), %xmm9 ## xmm9 = <43691,43691,43691,43691,u,u,u,u>
movdqa LCPI0_4(%rip), %xmm15 ## xmm15 = <21845,21845,21845,21845,u,u,u,u>
movdqa LCPI0_5(%rip), %xmm10 ## xmm10 = [43692,43692,43692,43692,0,0,0,0]
movdqa LCPI0_6(%rip), %xmm11 ## xmm11 = <52429,52429,52429,52429,u,u,u,u>
movdqa LCPI0_7(%rip), %xmm3 ## xmm3 = <13107,13107,13107,13107,u,u,u,u>
movdqa LCPI0_8(%rip), %xmm12 ## xmm12 = [13108,13108,13108,13108,0,0,0,0]
movdqa LCPI0_9(%rip), %xmm13 ## xmm13 = [8,8,8,8]
movdqa LCPI0_10(%rip), %xmm14 ## xmm14 = <8,8,8,8,u,u,u,u>
pxor %xmm6, %xmm6
.p2align 4, 0x90
LBB0_1: ## =>This Inner Loop Header: Depth=1
movdqa %xmm7, %xmm1
pmullw %xmm9, %xmm1
movdqa %xmm1, %xmm4
pminuw %xmm15, %xmm4
pcmpeqw %xmm1, %xmm4
paddw %xmm10, %xmm1
movdqa %xmm1, %xmm5
pminuw %xmm15, %xmm5
pcmpeqw %xmm1, %xmm5
movdqa %xmm7, %xmm1
pmullw %xmm11, %xmm1
movdqa %xmm1, %xmm8
pminuw %xmm3, %xmm8
pcmpeqw %xmm1, %xmm8
por %xmm4, %xmm8
paddw %xmm12, %xmm1
movdqa %xmm1, %xmm4
pminuw %xmm3, %xmm4
pcmpeqw %xmm1, %xmm4
por %xmm5, %xmm4
movdqa %xmm2, %xmm1
paddd LCPI0_2(%rip), %xmm1
pmovsxwd %xmm4, %xmm4
pand %xmm1, %xmm4
paddd %xmm4, %xmm6
pmovsxwd %xmm8, %xmm1
pand %xmm2, %xmm1
paddd %xmm1, %xmm0
paddd %xmm13, %xmm2
paddw %xmm14, %xmm7
addl $-8, %eax
jne LBB0_1
## %bb.2:
paddd %xmm0, %xmm6
pshufd $238, %xmm6, %xmm0 ## xmm0 = xmm6[2,3,2,3]
paddd %xmm6, %xmm0
pshufd $85, %xmm0, %xmm1 ## xmm1 = xmm0[1,1,1,1]
paddd %xmm0, %xmm1
movd %xmm1, %esi
leaq L_.str(%rip), %rdi
xorl %eax, %eax
callq _printf
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Soma: %d\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
mov w12, #0
mov w10, #0
mov w11, #0
mov w9, #0
mov w8, #0
mov w13, #43691
movk w13, #43690, lsl #16
mov w14, #52429
movk w14, #52428, lsl #16
mov w15, #13108
movk w15, #13107, lsl #16
mov w16, #21846
movk w16, #21845, lsl #16
LBB0_1: ; =>This Inner Loop Header: Depth=1
and w17, w12, #0xffff
orr w0, w17, #0x1
orr w1, w17, #0x2
orr w2, w17, #0x3
mul w3, w17, w13
mul w17, w17, w14
cmp w17, w15
ccmp w3, w16, #0, hs
mul w17, w0, w13
mul w0, w0, w14
csel w3, w12, wzr, lo
cmp w0, w15
ccmp w17, w16, #0, hs
mul w17, w1, w13
mul w0, w1, w14
csinc w1, wzr, w12, hs
cmp w0, w15
ccmp w17, w16, #0, hs
mul w17, w2, w13
mul w0, w2, w14
add w2, w12, #2
csel w2, w2, wzr, lo
cmp w0, w15
ccmp w17, w16, #0, hs
add w17, w12, #3
csel w17, w17, wzr, lo
add w10, w3, w10
add w11, w1, w11
add w9, w2, w9
add w8, w17, w8
add w12, w12, #4
cmp w12, #1000
b.ne LBB0_1
; %bb.2:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
add w10, w11, w10
add w9, w9, w10
add w8, w8, w9
str x8, [sp]
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _printf
mov w0, #0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Soma: %d\n"
.subsections_via_symbols
| the_stack_data/994895.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _proc_pid ## -- Begin function proc_pid
.p2align 4, 0x90
_proc_pid: ## @proc_pid
.cfi_startproc
## %bb.0:
testq %rdi, %rdi
je LBB0_1
## %bb.2:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl (%rdi), %eax
popq %rbp
retq
LBB0_1:
movl $-1, %eax
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _proc_pid ; -- Begin function proc_pid
.p2align 2
_proc_pid: ; @proc_pid
.cfi_startproc
; %bb.0:
cbz x0, LBB0_2
; %bb.1:
ldr w0, [x0]
ret
LBB0_2:
mov w0, #-1
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/darwin-xnu/bsd/kern/extr_kern_proc.c_proc_pid.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function max9860_hw_params
_max9860_hw_params: ## @max9860_hw_params
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r13
movq (%rdx), %r15
movq %r15, %rdi
callq _snd_soc_component_get_drvdata
movq %rax, %r14
movl (%r15), %r12d
movq %r13, %rdi
callq _params_rate
movl %eax, %ebx
movq %r13, %rdi
callq _params_channels
leaq L_.str(%rip), %rsi
movl $0, -44(%rbp) ## 4-byte Folded Spill
movl %r12d, %edi
movl %ebx, %edx
movl %eax, %ecx
xorl %eax, %eax
callq _dev_dbg
movq %r13, %rdi
callq _params_channels
movl $0, %r12d
cmpl $2, %eax
jne LBB0_2
## %bb.1:
movq _MAX9860_ST@GOTPCREL(%rip), %rax
movl (%rax), %r12d
LBB0_2:
movq _SND_SOC_DAIFMT_MASTER_MASK@GOTPCREL(%rip), %rax
movl (%rax), %eax
andl (%r14), %eax
cmpl $136, %eax
je LBB0_7
## %bb.3:
cmpl $137, %eax
jne LBB0_25
## %bb.4:
movq %r15, -56(%rbp) ## 8-byte Spill
movq %r14, %r15
movq _MAX9860_MASTER@GOTPCREL(%rip), %rax
movl (%rax), %r14d
testl %r14d, %r14d
je LBB0_8
## %bb.5:
movq %r13, %rdi
callq _params_width
movl %eax, %ebx
movq %r13, %rdi
callq _params_channels
imull %ebx, %eax
cmpl $49, %eax
jl LBB0_9
## %bb.6:
movq _MAX9860_BSEL_64X@GOTPCREL(%rip), %rax
jmp LBB0_10
LBB0_7:
movq %r15, -56(%rbp) ## 8-byte Spill
movq %r14, %r15
xorl %r14d, %r14d
jmp LBB0_11
LBB0_8:
movl $0, -44(%rbp) ## 4-byte Folded Spill
xorl %r14d, %r14d
jmp LBB0_11
LBB0_9:
movq _MAX9860_BSEL_48X@GOTPCREL(%rip), %rax
LBB0_10:
orl (%rax), %r12d
movb $1, %al
movl %eax, -44(%rbp) ## 4-byte Spill
LBB0_11:
movq _SND_SOC_DAIFMT_FORMAT_MASK@GOTPCREL(%rip), %rsi
movl (%rsi), %eax
andl (%r15), %eax
addl $-130, %eax
cmpl $5, %eax
ja LBB0_25
## %bb.12:
leaq LJTI0_0(%rip), %rcx
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
LBB0_13:
movq _MAX9860_WCI@GOTPCREL(%rip), %rax
movl (%rax), %ebx
jmp LBB0_20
LBB0_14:
movq _MAX9860_DDLY@GOTPCREL(%rip), %rax
movl (%rax), %ebx
movq _MAX9860_ADLY@GOTPCREL(%rip), %rax
orl (%rax), %r12d
jmp LBB0_20
LBB0_15:
movq %r13, %rdi
callq _params_width
cmpl $16, %eax
jne LBB0_26
## %bb.16:
movq _MAX9860_WCI@GOTPCREL(%rip), %rax
movq _MAX9860_HIZ@GOTPCREL(%rip), %rcx
movl (%rcx), %ebx
orl (%rax), %ebx
movq _MAX9860_TDM@GOTPCREL(%rip), %rax
orl (%rax), %ebx
jmp LBB0_19
LBB0_17:
movq %r13, %rdi
callq _params_width
cmpl $16, %eax
jne LBB0_27
## %bb.18:
movq _MAX9860_DDLY@GOTPCREL(%rip), %rax
movq _MAX9860_WCI@GOTPCREL(%rip), %rcx
movl (%rcx), %ebx
orl (%rax), %ebx
movq _MAX9860_HIZ@GOTPCREL(%rip), %rax
orl (%rax), %ebx
movq _MAX9860_TDM@GOTPCREL(%rip), %rax
orl (%rax), %ebx
movq _MAX9860_ADLY@GOTPCREL(%rip), %rax
orl (%rax), %r12d
LBB0_19:
movq _SND_SOC_DAIFMT_FORMAT_MASK@GOTPCREL(%rip), %rsi
LBB0_20:
movl (%r15), %eax
movq _SND_SOC_DAIFMT_INV_MASK@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
andl %eax, %ecx
addl $-128, %ecx
cmpl $4, %ecx
ja LBB0_25
## %bb.21:
orl %r14d, %ebx
leaq LJTI0_1(%rip), %rdx
movslq (%rdx,%rcx,4), %rcx
addq %rdx, %rcx
jmpq *%rcx
LBB0_22:
andl (%rsi), %eax
andl $-2, %eax
cmpl $134, %eax
je LBB0_25
## %bb.23:
movq _MAX9860_WCI@GOTPCREL(%rip), %rax
xorl (%rax), %ebx
jmp LBB0_31
LBB0_24:
andl (%rsi), %eax
andl $-2, %eax
cmpl $134, %eax
jne LBB0_29
LBB0_25:
movq _EINVAL@GOTPCREL(%rip), %rax
xorl %ebx, %ebx
subl (%rax), %ebx
jmp LBB0_59
LBB0_26:
movq -56(%rbp), %rax ## 8-byte Reload
movl (%rax), %edi
leaq L_.str.2(%rip), %rsi
jmp LBB0_28
LBB0_27:
movq -56(%rbp), %rax ## 8-byte Reload
movl (%rax), %edi
leaq L_.str.1(%rip), %rsi
LBB0_28:
xorl %ebx, %ebx
xorl %eax, %eax
callq _dev_err
movq _EINVAL@GOTPCREL(%rip), %rax
subl (%rax), %ebx
jmp LBB0_59
LBB0_29:
movq _MAX9860_WCI@GOTPCREL(%rip), %rax
xorl (%rax), %ebx
LBB0_30:
movq _MAX9860_DBCI@GOTPCREL(%rip), %rax
xorl (%rax), %ebx
movq _MAX9860_ABCI@GOTPCREL(%rip), %rax
xorl (%rax), %r12d
LBB0_31:
movq -56(%rbp), %r14 ## 8-byte Reload
movl (%r14), %edi
leaq L_.str.3(%rip), %rsi
movl %ebx, %edx
xorl %eax, %eax
callq _dev_dbg
movl 12(%r15), %edi
movq _MAX9860_IFC1A@GOTPCREL(%rip), %rax
movl (%rax), %esi
movslq %ebx, %rdx
callq _regmap_write
movl (%r14), %edi
testl %eax, %eax
je LBB0_33
## %bb.32:
movl %eax, %ebx
leaq L_.str.4(%rip), %rsi
jmp LBB0_58
LBB0_33:
leaq L_.str.5(%rip), %rsi
movl %r12d, %edx
xorl %eax, %eax
callq _dev_dbg
movl 12(%r15), %edi
movq _MAX9860_IFC1B@GOTPCREL(%rip), %rax
movl (%rax), %esi
movslq %r12d, %rdx
callq _regmap_write
testl %eax, %eax
je LBB0_35
## %bb.34:
movl %eax, %ebx
movq -56(%rbp), %rax ## 8-byte Reload
movl (%rax), %edi
leaq L_.str.6(%rip), %rsi
jmp LBB0_58
LBB0_35:
movq %r13, %rdi
callq _params_rate
cmpl $8000, %eax ## imm = 0x1F40
jne LBB0_37
## %bb.36:
xorl %ebx, %ebx
cmpb $0, -44(%rbp) ## 1-byte Folded Reload
movl $0, -44(%rbp) ## 4-byte Folded Spill
jne LBB0_38
jmp LBB0_48
LBB0_37:
movq %r13, %rdi
callq _params_rate
cmpl $16000, %eax ## imm = 0x3E80
sete %al
xorl %ebx, %ebx
testb %al, -44(%rbp) ## 1-byte Folded Reload
je LBB0_48
LBB0_38:
movl 4(%r15), %eax
xorl %ebx, %ebx
movb $1, %cl
movl %ecx, -44(%rbp) ## 4-byte Spill
cmpl $12000000, %eax ## imm = 0xB71B00
je LBB0_42
## %bb.39:
cmpl $19200000, %eax ## imm = 0x124F800
je LBB0_43
## %bb.40:
cmpl $13000000, %eax ## imm = 0xC65D40
jne LBB0_48
## %bb.41:
movq _MAX9860_FREQ_13MHZ@GOTPCREL(%rip), %rax
jmp LBB0_44
LBB0_42:
movq _MAX9860_FREQ_12MHZ@GOTPCREL(%rip), %rax
jmp LBB0_44
LBB0_43:
movq _MAX9860_FREQ_19_2MHZ@GOTPCREL(%rip), %rax
LBB0_44:
movl (%rax), %r14d
testl %r14d, %r14d
je LBB0_48
## %bb.45:
movq %r13, %rdi
callq _params_rate
cmpl $16000, %eax ## imm = 0x3E80
jne LBB0_47
## %bb.46:
movq _MAX9860_16KHZ@GOTPCREL(%rip), %rax
orl (%rax), %r14d
LBB0_47:
movl %r14d, %ebx
LBB0_48:
movq %r13, %rdi
callq _params_rate
cltq
shlq $21, %rax
leaq (%rax,%rax,2), %rdi
movl 4(%r15), %esi
callq _DIV_ROUND_CLOSEST_ULL
movq %rax, %r12
testl %ebx, %ebx
jne LBB0_52
## %bb.49:
movq %r13, %rdi
callq _params_rate
xorl %ebx, %ebx
cmpl $24001, %eax ## imm = 0x5DC1
jl LBB0_51
## %bb.50:
movq _MAX9860_16KHZ@GOTPCREL(%rip), %rax
movl (%rax), %ebx
LBB0_51:
movl -44(%rbp), %eax ## 4-byte Reload
## kill: def $al killed $al killed $eax
xorb $1, %al
movzbl %al, %eax
orq %rax, %r12
LBB0_52:
orl 8(%r15), %ebx
movslq %ebx, %rbx
movq -56(%rbp), %r14 ## 8-byte Reload
movl (%r14), %edi
leaq L_.str.7(%rip), %rsi
movl %ebx, %edx
xorl %eax, %eax
callq _dev_dbg
movl 12(%r15), %edi
movq _MAX9860_SYSCLK@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %rbx, %rdx
callq _regmap_write
movl (%r14), %edi
testl %eax, %eax
je LBB0_54
## %bb.53:
movl %eax, %ebx
leaq L_.str.8(%rip), %rsi
jmp LBB0_58
LBB0_54:
leaq L_.str.9(%rip), %rsi
movq %r12, %rdx
xorl %eax, %eax
callq _dev_dbg
movl 12(%r15), %edi
movq _MAX9860_AUDIOCLKHIGH@GOTPCREL(%rip), %r14
movl (%r14), %esi
movq %r12, %rdx
shrq $8, %rdx
callq _regmap_write
testl %eax, %eax
je LBB0_56
## %bb.55:
movl %eax, %ebx
movq -56(%rbp), %rax ## 8-byte Reload
movl (%rax), %edi
leaq L_.str.10(%rip), %rsi
jmp LBB0_58
LBB0_56:
movl 12(%r15), %edi
movq _MAX9860_AUDIOCLKLOW@GOTPCREL(%rip), %rax
movl (%rax), %esi
movzbl %r12b, %edx
callq _regmap_write
testl %eax, %eax
je LBB0_60
## %bb.57:
movl %eax, %ebx
movq -56(%rbp), %rax ## 8-byte Reload
movl (%rax), %edi
leaq L_.str.11(%rip), %rsi
LBB0_58:
movl %ebx, %edx
xorl %eax, %eax
callq _dev_err
LBB0_59:
movl %ebx, %eax
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_60:
xorl %ebx, %ebx
cmpb $0, -44(%rbp) ## 1-byte Folded Reload
jne LBB0_59
## %bb.61:
movq -56(%rbp), %rax ## 8-byte Reload
movl (%rax), %edi
leaq L_.str.12(%rip), %rsi
xorl %ebx, %ebx
xorl %eax, %eax
callq _dev_dbg
movl 12(%r15), %edi
movl (%r14), %esi
movq _MAX9860_PLL@GOTPCREL(%rip), %rax
movl (%rax), %ecx
movl %ecx, %edx
callq _regmap_update_bits
testl %eax, %eax
je LBB0_59
## %bb.62:
movl %eax, %r14d
movq -56(%rbp), %rax ## 8-byte Reload
movl (%rax), %edi
leaq L_.str.13(%rip), %rsi
movl %r14d, %edx
xorl %eax, %eax
callq _dev_err
movl %r14d, %ebx
jmp LBB0_59
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L0_0_set_13, LBB0_13-LJTI0_0
.set L0_0_set_25, LBB0_25-LJTI0_0
.set L0_0_set_14, LBB0_14-LJTI0_0
.set L0_0_set_15, LBB0_15-LJTI0_0
.set L0_0_set_17, LBB0_17-LJTI0_0
LJTI0_0:
.long L0_0_set_13
.long L0_0_set_25
.long L0_0_set_25
.long L0_0_set_14
.long L0_0_set_15
.long L0_0_set_17
.set L0_1_set_31, LBB0_31-LJTI0_1
.set L0_1_set_22, LBB0_22-LJTI0_1
.set L0_1_set_25, LBB0_25-LJTI0_1
.set L0_1_set_30, LBB0_30-LJTI0_1
.set L0_1_set_24, LBB0_24-LJTI0_1
LJTI0_1:
.long L0_1_set_31
.long L0_1_set_22
.long L0_1_set_25
.long L0_1_set_30
.long L0_1_set_24
.end_data_region
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "hw_params %u Hz, %u channels\n"
.comm _MAX9860_ST,4,2 ## @MAX9860_ST
.comm _SND_SOC_DAIFMT_MASTER_MASK,4,2 ## @SND_SOC_DAIFMT_MASTER_MASK
.comm _MAX9860_MASTER,4,2 ## @MAX9860_MASTER
.comm _EINVAL,4,2 ## @EINVAL
.comm _MAX9860_BSEL_64X,4,2 ## @MAX9860_BSEL_64X
.comm _MAX9860_BSEL_48X,4,2 ## @MAX9860_BSEL_48X
.comm _SND_SOC_DAIFMT_FORMAT_MASK,4,2 ## @SND_SOC_DAIFMT_FORMAT_MASK
.comm _MAX9860_DDLY,4,2 ## @MAX9860_DDLY
.comm _MAX9860_ADLY,4,2 ## @MAX9860_ADLY
.comm _MAX9860_WCI,4,2 ## @MAX9860_WCI
L_.str.1: ## @.str.1
.asciz "DSP_A works for 16 bits per sample only.\n"
.comm _MAX9860_HIZ,4,2 ## @MAX9860_HIZ
.comm _MAX9860_TDM,4,2 ## @MAX9860_TDM
L_.str.2: ## @.str.2
.asciz "DSP_B works for 16 bits per sample only.\n"
.comm _SND_SOC_DAIFMT_INV_MASK,4,2 ## @SND_SOC_DAIFMT_INV_MASK
.comm _MAX9860_DBCI,4,2 ## @MAX9860_DBCI
.comm _MAX9860_ABCI,4,2 ## @MAX9860_ABCI
L_.str.3: ## @.str.3
.asciz "IFC1A %02x\n"
.comm _MAX9860_IFC1A,4,2 ## @MAX9860_IFC1A
L_.str.4: ## @.str.4
.asciz "Failed to set IFC1A: %d\n"
L_.str.5: ## @.str.5
.asciz "IFC1B %02x\n"
.comm _MAX9860_IFC1B,4,2 ## @MAX9860_IFC1B
L_.str.6: ## @.str.6
.asciz "Failed to set IFC1B: %d\n"
.comm _MAX9860_FREQ_12MHZ,4,2 ## @MAX9860_FREQ_12MHZ
.comm _MAX9860_FREQ_13MHZ,4,2 ## @MAX9860_FREQ_13MHZ
.comm _MAX9860_FREQ_19_2MHZ,4,2 ## @MAX9860_FREQ_19_2MHZ
.comm _MAX9860_16KHZ,4,2 ## @MAX9860_16KHZ
L_.str.7: ## @.str.7
.asciz "SYSCLK %02x\n"
.comm _MAX9860_SYSCLK,4,2 ## @MAX9860_SYSCLK
L_.str.8: ## @.str.8
.asciz "Failed to set SYSCLK: %d\n"
L_.str.9: ## @.str.9
.asciz "N %lu\n"
.comm _MAX9860_AUDIOCLKHIGH,4,2 ## @MAX9860_AUDIOCLKHIGH
L_.str.10: ## @.str.10
.asciz "Failed to set NHI: %d\n"
.comm _MAX9860_AUDIOCLKLOW,4,2 ## @MAX9860_AUDIOCLKLOW
L_.str.11: ## @.str.11
.asciz "Failed to set NLO: %d\n"
L_.str.12: ## @.str.12
.asciz "Enable PLL\n"
.comm _MAX9860_PLL,4,2 ## @MAX9860_PLL
L_.str.13: ## @.str.13
.asciz "Failed to enable PLL: %d\n"
.no_dead_strip _max9860_hw_params
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function max9860_hw_params
_max9860_hw_params: ; @max9860_hw_params
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x21, x1
ldr x19, [x2]
mov x0, x19
bl _snd_soc_component_get_drvdata
mov x20, x0
ldr w22, [x19]
mov x0, x21
bl _params_rate
mov x23, x0
mov x0, x21
bl _params_channels
; kill: def $w0 killed $w0 def $x0
stp x23, x0, [sp]
Lloh0:
adrp x1, l_.str@PAGE
Lloh1:
add x1, x1, l_.str@PAGEOFF
mov x0, x22
bl _dev_dbg
mov x0, x21
bl _params_channels
Lloh2:
adrp x8, _MAX9860_ST@GOTPAGE
Lloh3:
ldr x8, [x8, _MAX9860_ST@GOTPAGEOFF]
Lloh4:
ldr w8, [x8]
cmp w0, #2
csel w24, w8, wzr, eq
ldr w8, [x20]
Lloh5:
adrp x9, _SND_SOC_DAIFMT_MASTER_MASK@GOTPAGE
Lloh6:
ldr x9, [x9, _SND_SOC_DAIFMT_MASTER_MASK@GOTPAGEOFF]
Lloh7:
ldr w9, [x9]
and w8, w9, w8
cmp w8, #136
b.eq LBB0_5
; %bb.1:
cmp w8, #137
b.ne LBB0_33
; %bb.2:
Lloh8:
adrp x8, _MAX9860_MASTER@GOTPAGE
Lloh9:
ldr x8, [x8, _MAX9860_MASTER@GOTPAGEOFF]
Lloh10:
ldr w25, [x8]
cbz w25, LBB0_6
; %bb.3:
mov x0, x21
bl _params_width
mov x22, x0
mov x0, x21
bl _params_channels
mul w8, w0, w22
cmp w8, #49
b.lt LBB0_7
; %bb.4:
Lloh11:
adrp x8, _MAX9860_BSEL_64X@GOTPAGE
Lloh12:
ldr x8, [x8, _MAX9860_BSEL_64X@GOTPAGEOFF]
b LBB0_8
LBB0_5:
mov w23, #0
mov w25, #0
b LBB0_9
LBB0_6:
mov w23, #0
b LBB0_9
LBB0_7:
Lloh13:
adrp x8, _MAX9860_BSEL_48X@GOTPAGE
Lloh14:
ldr x8, [x8, _MAX9860_BSEL_48X@GOTPAGEOFF]
LBB0_8:
ldr w8, [x8]
orr w24, w8, w24
mov w23, #1
LBB0_9:
ldr w8, [x20]
Lloh15:
adrp x22, _SND_SOC_DAIFMT_FORMAT_MASK@GOTPAGE
Lloh16:
ldr x22, [x22, _SND_SOC_DAIFMT_FORMAT_MASK@GOTPAGEOFF]
ldr w9, [x22]
and w8, w9, w8
sub w8, w8, #130
cmp w8, #5
b.hi LBB0_33
; %bb.10:
Lloh17:
adrp x9, lJTI0_0@PAGE
Lloh18:
add x9, x9, lJTI0_0@PAGEOFF
adr x10, LBB0_11
ldrb w11, [x9, x8]
add x10, x10, x11, lsl #2
br x10
LBB0_11:
Lloh19:
adrp x8, _MAX9860_WCI@GOTPAGE
Lloh20:
ldr x8, [x8, _MAX9860_WCI@GOTPAGEOFF]
Lloh21:
ldr w8, [x8]
b LBB0_18
LBB0_12:
Lloh22:
adrp x8, _MAX9860_DDLY@GOTPAGE
Lloh23:
ldr x8, [x8, _MAX9860_DDLY@GOTPAGEOFF]
Lloh24:
ldr w8, [x8]
b LBB0_17
LBB0_13:
mov x0, x21
bl _params_width
cmp w0, #16
b.ne LBB0_30
; %bb.14:
Lloh25:
adrp x8, _MAX9860_WCI@GOTPAGE
Lloh26:
ldr x8, [x8, _MAX9860_WCI@GOTPAGEOFF]
Lloh27:
ldr w8, [x8]
Lloh28:
adrp x9, _MAX9860_HIZ@GOTPAGE
Lloh29:
ldr x9, [x9, _MAX9860_HIZ@GOTPAGEOFF]
Lloh30:
ldr w9, [x9]
orr w8, w9, w8
Lloh31:
adrp x9, _MAX9860_TDM@GOTPAGE
Lloh32:
ldr x9, [x9, _MAX9860_TDM@GOTPAGEOFF]
Lloh33:
ldr w9, [x9]
orr w8, w8, w9
b LBB0_18
LBB0_15:
mov x0, x21
bl _params_width
cmp w0, #16
b.ne LBB0_31
; %bb.16:
Lloh34:
adrp x8, _MAX9860_DDLY@GOTPAGE
Lloh35:
ldr x8, [x8, _MAX9860_DDLY@GOTPAGEOFF]
Lloh36:
ldr w8, [x8]
Lloh37:
adrp x9, _MAX9860_WCI@GOTPAGE
Lloh38:
ldr x9, [x9, _MAX9860_WCI@GOTPAGEOFF]
Lloh39:
ldr w9, [x9]
orr w8, w9, w8
Lloh40:
adrp x9, _MAX9860_HIZ@GOTPAGE
Lloh41:
ldr x9, [x9, _MAX9860_HIZ@GOTPAGEOFF]
Lloh42:
ldr w9, [x9]
orr w8, w8, w9
Lloh43:
adrp x9, _MAX9860_TDM@GOTPAGE
Lloh44:
ldr x9, [x9, _MAX9860_TDM@GOTPAGEOFF]
Lloh45:
ldr w9, [x9]
orr w8, w8, w9
LBB0_17:
Lloh46:
adrp x9, _MAX9860_ADLY@GOTPAGE
Lloh47:
ldr x9, [x9, _MAX9860_ADLY@GOTPAGEOFF]
Lloh48:
ldr w9, [x9]
orr w24, w9, w24
LBB0_18:
ldr w9, [x20]
Lloh49:
adrp x10, _SND_SOC_DAIFMT_INV_MASK@GOTPAGE
Lloh50:
ldr x10, [x10, _SND_SOC_DAIFMT_INV_MASK@GOTPAGEOFF]
Lloh51:
ldr w10, [x10]
and w10, w10, w9
sub w10, w10, #128
cmp w10, #4
b.hi LBB0_33
; %bb.19:
orr w25, w8, w25
Lloh52:
adrp x8, lJTI0_1@PAGE
Lloh53:
add x8, x8, lJTI0_1@PAGEOFF
adr x11, LBB0_20
ldrb w12, [x8, x10]
add x11, x11, x12, lsl #2
br x11
LBB0_20:
ldr w8, [x22]
and w8, w9, w8
and w8, w8, #0xfffffffe
cmp w8, #134
b.eq LBB0_33
; %bb.21:
Lloh54:
adrp x8, _MAX9860_WCI@GOTPAGE
Lloh55:
ldr x8, [x8, _MAX9860_WCI@GOTPAGEOFF]
Lloh56:
ldr w8, [x8]
eor w25, w8, w25
b LBB0_25
LBB0_22:
ldr w8, [x22]
and w8, w9, w8
and w8, w8, #0xfffffffe
cmp w8, #134
b.eq LBB0_33
; %bb.23:
Lloh57:
adrp x8, _MAX9860_WCI@GOTPAGE
Lloh58:
ldr x8, [x8, _MAX9860_WCI@GOTPAGEOFF]
Lloh59:
ldr w8, [x8]
eor w25, w8, w25
LBB0_24:
Lloh60:
adrp x8, _MAX9860_DBCI@GOTPAGE
Lloh61:
ldr x8, [x8, _MAX9860_DBCI@GOTPAGEOFF]
Lloh62:
ldr w8, [x8]
eor w25, w8, w25
Lloh63:
adrp x8, _MAX9860_ABCI@GOTPAGE
Lloh64:
ldr x8, [x8, _MAX9860_ABCI@GOTPAGEOFF]
Lloh65:
ldr w8, [x8]
eor w24, w8, w24
LBB0_25:
ldr w0, [x19]
str x25, [sp]
Lloh66:
adrp x1, l_.str.3@PAGE
Lloh67:
add x1, x1, l_.str.3@PAGEOFF
bl _dev_dbg
ldr w0, [x20, #12]
Lloh68:
adrp x8, _MAX9860_IFC1A@GOTPAGE
Lloh69:
ldr x8, [x8, _MAX9860_IFC1A@GOTPAGEOFF]
Lloh70:
ldr w1, [x8]
sxtw x2, w25
bl _regmap_write
mov x22, x0
ldr w0, [x19]
cbz w22, LBB0_27
; %bb.26:
str x22, [sp]
Lloh71:
adrp x1, l_.str.4@PAGE
Lloh72:
add x1, x1, l_.str.4@PAGEOFF
bl _dev_err
b LBB0_34
LBB0_27:
str x24, [sp]
Lloh73:
adrp x1, l_.str.5@PAGE
Lloh74:
add x1, x1, l_.str.5@PAGEOFF
bl _dev_dbg
ldr w0, [x20, #12]
Lloh75:
adrp x8, _MAX9860_IFC1B@GOTPAGE
Lloh76:
ldr x8, [x8, _MAX9860_IFC1B@GOTPAGEOFF]
Lloh77:
ldr w1, [x8]
sxtw x2, w24
bl _regmap_write
cbz w0, LBB0_35
; %bb.28:
mov x22, x0
ldr w0, [x19]
str x22, [sp]
Lloh78:
adrp x1, l_.str.6@PAGE
Lloh79:
add x1, x1, l_.str.6@PAGEOFF
LBB0_29:
bl _dev_err
; kill: def $w22 killed $w22 killed $x22 def $x22
b LBB0_34
LBB0_30:
ldr w0, [x19]
Lloh80:
adrp x1, l_.str.2@PAGE
Lloh81:
add x1, x1, l_.str.2@PAGEOFF
b LBB0_32
LBB0_31:
ldr w0, [x19]
Lloh82:
adrp x1, l_.str.1@PAGE
Lloh83:
add x1, x1, l_.str.1@PAGEOFF
LBB0_32:
bl _dev_err
LBB0_33:
Lloh84:
adrp x8, _EINVAL@GOTPAGE
Lloh85:
ldr x8, [x8, _EINVAL@GOTPAGEOFF]
Lloh86:
ldr w8, [x8]
neg w22, w8
LBB0_34:
mov x0, x22
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #96
ret
LBB0_35:
mov x0, x21
bl _params_rate
mov w8, #8000
cmp w0, w8
b.ne LBB0_38
; %bb.36:
cbnz w23, LBB0_39
LBB0_37:
mov w24, #0
b LBB0_50
LBB0_38:
mov x0, x21
bl _params_rate
mov w8, #16000
cmp w0, w8
cset w8, eq
and w8, w23, w8
tbz w8, #0, LBB0_37
LBB0_39:
ldr w8, [x20, #4]
sub w9, w8, #2929, lsl #12 ; =11997184
cmp w9, #2816
b.eq LBB0_43
; %bb.40:
mov w9, #63488
movk w9, #292, lsl #16
cmp w8, w9
b.eq LBB0_44
; %bb.41:
mov w9, #23872
movk w9, #198, lsl #16
cmp w8, w9
b.ne LBB0_48
; %bb.42:
Lloh87:
adrp x8, _MAX9860_FREQ_13MHZ@GOTPAGE
Lloh88:
ldr x8, [x8, _MAX9860_FREQ_13MHZ@GOTPAGEOFF]
b LBB0_45
LBB0_43:
Lloh89:
adrp x8, _MAX9860_FREQ_12MHZ@GOTPAGE
Lloh90:
ldr x8, [x8, _MAX9860_FREQ_12MHZ@GOTPAGEOFF]
b LBB0_45
LBB0_44:
Lloh91:
adrp x8, _MAX9860_FREQ_19_2MHZ@GOTPAGE
Lloh92:
ldr x8, [x8, _MAX9860_FREQ_19_2MHZ@GOTPAGEOFF]
LBB0_45:
ldr w24, [x8]
cbz w24, LBB0_49
; %bb.46:
mov x0, x21
bl _params_rate
mov w8, #16000
cmp w0, w8
b.ne LBB0_49
; %bb.47:
Lloh93:
adrp x8, _MAX9860_16KHZ@GOTPAGE
Lloh94:
ldr x8, [x8, _MAX9860_16KHZ@GOTPAGEOFF]
Lloh95:
ldr w8, [x8]
orr w24, w8, w24
b LBB0_49
LBB0_48:
mov w24, #0
LBB0_49:
mov w23, #1
LBB0_50:
mov x0, x21
bl _params_rate
mov w8, #6291456
smull x0, w0, w8
ldr w1, [x20, #4]
bl _DIV_ROUND_CLOSEST_ULL
mov x22, x0
cbnz w24, LBB0_52
; %bb.51:
mov x0, x21
bl _params_rate
Lloh96:
adrp x8, _MAX9860_16KHZ@GOTPAGE
Lloh97:
ldr x8, [x8, _MAX9860_16KHZ@GOTPAGEOFF]
Lloh98:
ldr w8, [x8]
mov w9, #24000
cmp w0, w9
csel w24, w8, wzr, gt
eor w8, w23, #0x1
orr x22, x22, x8
LBB0_52:
ldr w8, [x20, #8]
orr w8, w8, w24
sxtw x21, w8
ldr w0, [x19]
str x21, [sp]
Lloh99:
adrp x1, l_.str.7@PAGE
Lloh100:
add x1, x1, l_.str.7@PAGEOFF
bl _dev_dbg
ldr w0, [x20, #12]
Lloh101:
adrp x8, _MAX9860_SYSCLK@GOTPAGE
Lloh102:
ldr x8, [x8, _MAX9860_SYSCLK@GOTPAGEOFF]
Lloh103:
ldr w1, [x8]
mov x2, x21
bl _regmap_write
mov x21, x0
ldr w0, [x19]
cbz w21, LBB0_54
; %bb.53:
str x21, [sp]
Lloh104:
adrp x1, l_.str.8@PAGE
Lloh105:
add x1, x1, l_.str.8@PAGEOFF
b LBB0_56
LBB0_54:
str x22, [sp]
Lloh106:
adrp x1, l_.str.9@PAGE
Lloh107:
add x1, x1, l_.str.9@PAGEOFF
bl _dev_dbg
ldr w0, [x20, #12]
Lloh108:
adrp x24, _MAX9860_AUDIOCLKHIGH@GOTPAGE
Lloh109:
ldr x24, [x24, _MAX9860_AUDIOCLKHIGH@GOTPAGEOFF]
ldr w1, [x24]
lsr x2, x22, #8
bl _regmap_write
cbz w0, LBB0_57
; %bb.55:
mov x21, x0
ldr w0, [x19]
str x21, [sp]
Lloh110:
adrp x1, l_.str.10@PAGE
Lloh111:
add x1, x1, l_.str.10@PAGEOFF
LBB0_56:
bl _dev_err
mov x22, x21
b LBB0_34
LBB0_57:
ldr w0, [x20, #12]
Lloh112:
adrp x8, _MAX9860_AUDIOCLKLOW@GOTPAGE
Lloh113:
ldr x8, [x8, _MAX9860_AUDIOCLKLOW@GOTPAGEOFF]
Lloh114:
ldr w1, [x8]
and x2, x22, #0xff
bl _regmap_write
cbz w0, LBB0_59
; %bb.58:
mov x22, x0
ldr w0, [x19]
str x22, [sp]
Lloh115:
adrp x1, l_.str.11@PAGE
Lloh116:
add x1, x1, l_.str.11@PAGEOFF
b LBB0_29
LBB0_59:
tbz w23, #0, LBB0_61
; %bb.60:
mov w22, #0
b LBB0_34
LBB0_61:
ldr w0, [x19]
Lloh117:
adrp x1, l_.str.12@PAGE
Lloh118:
add x1, x1, l_.str.12@PAGEOFF
bl _dev_dbg
ldr w0, [x20, #12]
ldr w1, [x24]
Lloh119:
adrp x8, _MAX9860_PLL@GOTPAGE
Lloh120:
ldr x8, [x8, _MAX9860_PLL@GOTPAGEOFF]
Lloh121:
ldr w2, [x8]
mov x3, x2
bl _regmap_update_bits
mov x22, x0
cbz w0, LBB0_34
; %bb.62:
ldr w0, [x19]
str x22, [sp]
Lloh122:
adrp x1, l_.str.13@PAGE
Lloh123:
add x1, x1, l_.str.13@PAGEOFF
b LBB0_29
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.loh AdrpLdrGot Lloh11, Lloh12
.loh AdrpLdrGot Lloh13, Lloh14
.loh AdrpLdrGot Lloh15, Lloh16
.loh AdrpAdd Lloh17, Lloh18
.loh AdrpLdrGotLdr Lloh19, Lloh20, Lloh21
.loh AdrpLdrGotLdr Lloh22, Lloh23, Lloh24
.loh AdrpLdrGotLdr Lloh31, Lloh32, Lloh33
.loh AdrpLdrGotLdr Lloh28, Lloh29, Lloh30
.loh AdrpLdrGotLdr Lloh25, Lloh26, Lloh27
.loh AdrpLdrGotLdr Lloh43, Lloh44, Lloh45
.loh AdrpLdrGotLdr Lloh40, Lloh41, Lloh42
.loh AdrpLdrGotLdr Lloh37, Lloh38, Lloh39
.loh AdrpLdrGotLdr Lloh34, Lloh35, Lloh36
.loh AdrpLdrGotLdr Lloh46, Lloh47, Lloh48
.loh AdrpLdrGotLdr Lloh49, Lloh50, Lloh51
.loh AdrpAdd Lloh52, Lloh53
.loh AdrpLdrGotLdr Lloh54, Lloh55, Lloh56
.loh AdrpLdrGotLdr Lloh57, Lloh58, Lloh59
.loh AdrpLdrGotLdr Lloh63, Lloh64, Lloh65
.loh AdrpLdrGotLdr Lloh60, Lloh61, Lloh62
.loh AdrpLdrGotLdr Lloh68, Lloh69, Lloh70
.loh AdrpAdd Lloh66, Lloh67
.loh AdrpAdd Lloh71, Lloh72
.loh AdrpLdrGotLdr Lloh75, Lloh76, Lloh77
.loh AdrpAdd Lloh73, Lloh74
.loh AdrpAdd Lloh78, Lloh79
.loh AdrpAdd Lloh80, Lloh81
.loh AdrpAdd Lloh82, Lloh83
.loh AdrpLdrGotLdr Lloh84, Lloh85, Lloh86
.loh AdrpLdrGot Lloh87, Lloh88
.loh AdrpLdrGot Lloh89, Lloh90
.loh AdrpLdrGot Lloh91, Lloh92
.loh AdrpLdrGotLdr Lloh93, Lloh94, Lloh95
.loh AdrpLdrGotLdr Lloh96, Lloh97, Lloh98
.loh AdrpLdrGotLdr Lloh101, Lloh102, Lloh103
.loh AdrpAdd Lloh99, Lloh100
.loh AdrpAdd Lloh104, Lloh105
.loh AdrpLdrGot Lloh108, Lloh109
.loh AdrpAdd Lloh106, Lloh107
.loh AdrpAdd Lloh110, Lloh111
.loh AdrpLdrGotLdr Lloh112, Lloh113, Lloh114
.loh AdrpAdd Lloh115, Lloh116
.loh AdrpLdrGotLdr Lloh119, Lloh120, Lloh121
.loh AdrpAdd Lloh117, Lloh118
.loh AdrpAdd Lloh122, Lloh123
.cfi_endproc
.section __TEXT,__const
lJTI0_0:
.byte (LBB0_11-LBB0_11)>>2
.byte (LBB0_33-LBB0_11)>>2
.byte (LBB0_33-LBB0_11)>>2
.byte (LBB0_12-LBB0_11)>>2
.byte (LBB0_13-LBB0_11)>>2
.byte (LBB0_15-LBB0_11)>>2
lJTI0_1:
.byte (LBB0_25-LBB0_20)>>2
.byte (LBB0_20-LBB0_20)>>2
.byte (LBB0_33-LBB0_20)>>2
.byte (LBB0_24-LBB0_20)>>2
.byte (LBB0_22-LBB0_20)>>2
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "hw_params %u Hz, %u channels\n"
.comm _MAX9860_ST,4,2 ; @MAX9860_ST
.comm _SND_SOC_DAIFMT_MASTER_MASK,4,2 ; @SND_SOC_DAIFMT_MASTER_MASK
.comm _MAX9860_MASTER,4,2 ; @MAX9860_MASTER
.comm _EINVAL,4,2 ; @EINVAL
.comm _MAX9860_BSEL_64X,4,2 ; @MAX9860_BSEL_64X
.comm _MAX9860_BSEL_48X,4,2 ; @MAX9860_BSEL_48X
.comm _SND_SOC_DAIFMT_FORMAT_MASK,4,2 ; @SND_SOC_DAIFMT_FORMAT_MASK
.comm _MAX9860_DDLY,4,2 ; @MAX9860_DDLY
.comm _MAX9860_ADLY,4,2 ; @MAX9860_ADLY
.comm _MAX9860_WCI,4,2 ; @MAX9860_WCI
l_.str.1: ; @.str.1
.asciz "DSP_A works for 16 bits per sample only.\n"
.comm _MAX9860_HIZ,4,2 ; @MAX9860_HIZ
.comm _MAX9860_TDM,4,2 ; @MAX9860_TDM
l_.str.2: ; @.str.2
.asciz "DSP_B works for 16 bits per sample only.\n"
.comm _SND_SOC_DAIFMT_INV_MASK,4,2 ; @SND_SOC_DAIFMT_INV_MASK
.comm _MAX9860_DBCI,4,2 ; @MAX9860_DBCI
.comm _MAX9860_ABCI,4,2 ; @MAX9860_ABCI
l_.str.3: ; @.str.3
.asciz "IFC1A %02x\n"
.comm _MAX9860_IFC1A,4,2 ; @MAX9860_IFC1A
l_.str.4: ; @.str.4
.asciz "Failed to set IFC1A: %d\n"
l_.str.5: ; @.str.5
.asciz "IFC1B %02x\n"
.comm _MAX9860_IFC1B,4,2 ; @MAX9860_IFC1B
l_.str.6: ; @.str.6
.asciz "Failed to set IFC1B: %d\n"
.comm _MAX9860_FREQ_12MHZ,4,2 ; @MAX9860_FREQ_12MHZ
.comm _MAX9860_FREQ_13MHZ,4,2 ; @MAX9860_FREQ_13MHZ
.comm _MAX9860_FREQ_19_2MHZ,4,2 ; @MAX9860_FREQ_19_2MHZ
.comm _MAX9860_16KHZ,4,2 ; @MAX9860_16KHZ
l_.str.7: ; @.str.7
.asciz "SYSCLK %02x\n"
.comm _MAX9860_SYSCLK,4,2 ; @MAX9860_SYSCLK
l_.str.8: ; @.str.8
.asciz "Failed to set SYSCLK: %d\n"
l_.str.9: ; @.str.9
.asciz "N %lu\n"
.comm _MAX9860_AUDIOCLKHIGH,4,2 ; @MAX9860_AUDIOCLKHIGH
l_.str.10: ; @.str.10
.asciz "Failed to set NHI: %d\n"
.comm _MAX9860_AUDIOCLKLOW,4,2 ; @MAX9860_AUDIOCLKLOW
l_.str.11: ; @.str.11
.asciz "Failed to set NLO: %d\n"
l_.str.12: ; @.str.12
.asciz "Enable PLL\n"
.comm _MAX9860_PLL,4,2 ; @MAX9860_PLL
l_.str.13: ; @.str.13
.asciz "Failed to enable PLL: %d\n"
.no_dead_strip _max9860_hw_params
.subsections_via_symbols
| AnghaBench/linux/sound/soc/codecs/extr_max9860.c_max9860_hw_params.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function fotg210_set_cxstall
_fotg210_set_cxstall: ## @fotg210_set_cxstall
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
movq _FOTG210_DCFESR@GOTPCREL(%rip), %r14
movq (%r14), %rdi
addq (%rbx), %rdi
callq _ioread32
movq _DCFESR_CX_STL@GOTPCREL(%rip), %rcx
orl (%rcx), %eax
movq (%r14), %rsi
addq (%rbx), %rsi
movl %eax, %edi
popq %rbx
popq %r14
popq %rbp
jmp _iowrite32 ## TAILCALL
.cfi_endproc
## -- End function
.comm _FOTG210_DCFESR,8,3 ## @FOTG210_DCFESR
.comm _DCFESR_CX_STL,4,2 ## @DCFESR_CX_STL
.no_dead_strip _fotg210_set_cxstall
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function fotg210_set_cxstall
_fotg210_set_cxstall: ; @fotg210_set_cxstall
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
ldr x8, [x0]
Lloh0:
adrp x20, _FOTG210_DCFESR@GOTPAGE
Lloh1:
ldr x20, [x20, _FOTG210_DCFESR@GOTPAGEOFF]
ldr x9, [x20]
add x0, x9, x8
bl _ioread32
Lloh2:
adrp x8, _DCFESR_CX_STL@GOTPAGE
Lloh3:
ldr x8, [x8, _DCFESR_CX_STL@GOTPAGEOFF]
Lloh4:
ldr w8, [x8]
orr w0, w8, w0
ldr x8, [x19]
ldr x9, [x20]
add x1, x9, x8
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _iowrite32
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm _FOTG210_DCFESR,8,3 ; @FOTG210_DCFESR
.comm _DCFESR_CX_STL,4,2 ; @DCFESR_CX_STL
.no_dead_strip _fotg210_set_cxstall
.subsections_via_symbols
| AnghaBench/linux/drivers/usb/gadget/udc/extr_fotg210-udc.c_fotg210_set_cxstall.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _reach_error ## -- Begin function reach_error
.p2align 4, 0x90
_reach_error: ## @reach_error
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl ___VERIFIER_assert ## -- Begin function __VERIFIER_assert
.p2align 4, 0x90
___VERIFIER_assert: ## @__VERIFIER_assert
.cfi_startproc
## %bb.0:
testl %edi, %edi
je LBB1_2
## %bb.1:
retq
LBB1_2:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
callq _abort
.cfi_endproc
## -- End function
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movl $9000008, %eax ## imm = 0x895448
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -32(%rbp)
leaq -9000032(%rbp), %r15
xorl %r14d, %r14d
.p2align 4, 0x90
LBB2_1: ## =>This Loop Header: Depth=1
## Child Loop BB2_2 Depth 2
xorl %ebx, %ebx
.p2align 4, 0x90
LBB2_2: ## Parent Loop BB2_1 Depth=1
## => This Inner Loop Header: Depth=2
xorl %eax, %eax
callq ___VERIFIER_nondet_int
movl %eax, (%r15,%rbx,4)
incq %rbx
cmpq $1500, %rbx ## imm = 0x5DC
jne LBB2_2
## %bb.3: ## in Loop: Header=BB2_1 Depth=1
incq %r14
addq $6000, %r15 ## imm = 0x1770
cmpq $1000, %r14 ## imm = 0x3E8
jne LBB2_1
## %bb.4:
leaq -9000032(%rbp), %rcx
movl -9000032(%rbp), %eax
xorl %edx, %edx
jmp LBB2_5
.p2align 4, 0x90
LBB2_8: ## in Loop: Header=BB2_5 Depth=1
pmaxsd %xmm1, %xmm0
pshufd $238, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,2,3]
pmaxsd %xmm0, %xmm1
pshufd $85, %xmm1, %xmm0 ## xmm0 = xmm1[1,1,1,1]
pmaxsd %xmm1, %xmm0
movd %xmm0, %eax
movl -8994048(%rbp,%rsi,4), %edi
cmpl %eax, %edi
cmovgl %edi, %eax
movl -8994044(%rbp,%rsi,4), %edi
cmpl %eax, %edi
cmovgl %edi, %eax
movl -8994040(%rbp,%rsi,4), %edi
cmpl %eax, %edi
cmovgl %edi, %eax
movl -8994036(%rbp,%rsi,4), %esi
cmpl %eax, %esi
cmovgl %esi, %eax
incq %rdx
addq $6000, %rcx ## imm = 0x1770
cmpq $1500, %rdx ## imm = 0x5DC
je LBB2_9
LBB2_5: ## =>This Loop Header: Depth=1
## Child Loop BB2_6 Depth 2
imulq $1500, %rdx, %rsi ## imm = 0x5DC
movd %eax, %xmm0
pshufd $0, %xmm0, %xmm0 ## xmm0 = xmm0[0,0,0,0]
movl $28, %eax
movdqa %xmm0, %xmm1
.p2align 4, 0x90
LBB2_6: ## Parent Loop BB2_5 Depth=1
## => This Inner Loop Header: Depth=2
pmaxsd -112(%rcx,%rax,4), %xmm0
pmaxsd -96(%rcx,%rax,4), %xmm1
pmaxsd -80(%rcx,%rax,4), %xmm0
pmaxsd -64(%rcx,%rax,4), %xmm1
pmaxsd -48(%rcx,%rax,4), %xmm0
pmaxsd -32(%rcx,%rax,4), %xmm1
cmpq $1500, %rax ## imm = 0x5DC
je LBB2_8
## %bb.7: ## in Loop: Header=BB2_6 Depth=2
pmaxsd -16(%rcx,%rax,4), %xmm0
pmaxsd (%rcx,%rax,4), %xmm1
addq $32, %rax
jmp LBB2_6
LBB2_9:
leaq -9000032(%rbp), %rcx
xorl %edx, %edx
.p2align 4, 0x90
LBB2_10: ## =>This Loop Header: Depth=1
## Child Loop BB2_17 Depth 2
movl $5, %esi
.p2align 4, 0x90
LBB2_17: ## Parent Loop BB2_10 Depth=1
## => This Inner Loop Header: Depth=2
cmpl %eax, -20(%rcx,%rsi,4)
jg LBB2_18
## %bb.11: ## in Loop: Header=BB2_17 Depth=2
cmpl %eax, -16(%rcx,%rsi,4)
jg LBB2_18
## %bb.12: ## in Loop: Header=BB2_17 Depth=2
cmpl %eax, -12(%rcx,%rsi,4)
jg LBB2_18
## %bb.13: ## in Loop: Header=BB2_17 Depth=2
cmpl %eax, -8(%rcx,%rsi,4)
jg LBB2_18
## %bb.14: ## in Loop: Header=BB2_17 Depth=2
cmpl %eax, -4(%rcx,%rsi,4)
jg LBB2_18
## %bb.15: ## in Loop: Header=BB2_17 Depth=2
cmpl %eax, (%rcx,%rsi,4)
jg LBB2_18
## %bb.16: ## in Loop: Header=BB2_17 Depth=2
addq $6, %rsi
cmpq $1505, %rsi ## imm = 0x5E1
jne LBB2_17
## %bb.19: ## in Loop: Header=BB2_10 Depth=1
incq %rdx
addq $6000, %rcx ## imm = 0x1770
cmpq $1500, %rdx ## imm = 0x5DC
jne LBB2_10
## %bb.20:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -32(%rbp), %rax
jne LBB2_22
## %bb.21:
xorl %eax, %eax
addq $9000008, %rsp ## imm = 0x895448
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
LBB2_18:
callq _abort
LBB2_22:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _reach_error ; -- Begin function reach_error
.p2align 2
_reach_error: ; @reach_error
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.globl ___VERIFIER_assert ; -- Begin function __VERIFIER_assert
.p2align 2
___VERIFIER_assert: ; @__VERIFIER_assert
.cfi_startproc
; %bb.0:
cbz w0, LBB1_2
; %bb.1:
ret
LBB1_2:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
bl _abort
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w27, -56
.cfi_offset w28, -64
mov w9, #21584
movk w9, #137, lsl #16
Lloh0:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh1:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #2197, lsl #12 ; =8998912
sub sp, sp, #1104
mov x19, #0
Lloh2:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh3:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh4:
ldr x8, [x8]
stur x8, [x29, #-56]
add x20, sp, #8
mov w21, #6000
LBB2_1: ; =>This Loop Header: Depth=1
; Child Loop BB2_2 Depth 2
mov x22, #0
LBB2_2: ; Parent Loop BB2_1 Depth=1
; => This Inner Loop Header: Depth=2
bl ___VERIFIER_nondet_int
str w0, [x20, x22, lsl #2]
add x22, x22, #1
cmp x22, #1500
b.ne LBB2_2
; %bb.3: ; in Loop: Header=BB2_1 Depth=1
add x19, x19, #1
add x20, x20, x21
cmp x19, #1000
b.ne LBB2_1
; %bb.4:
mov x8, #0
ldr w13, [sp, #8]
add x9, sp, #8
add x10, x9, #32
mov w11, #6000
LBB2_5: ; =>This Loop Header: Depth=1
; Child Loop BB2_6 Depth 2
madd x12, x8, x11, x9
dup.4s v0, w13
mov x13, #-5952
mov.16b v1, v0
mov.16b v2, v0
mov.16b v3, v0
LBB2_6: ; Parent Loop BB2_5 Depth=1
; => This Inner Loop Header: Depth=2
add x14, x10, x13
ldr q4, [x14, #5920]
ldr q5, [x14, #5936]
ldr q6, [x14, #5952]
ldr q7, [x14, #5968]
smax.4s v0, v4, v0
smax.4s v1, v5, v1
smax.4s v2, v6, v2
smax.4s v3, v7, v3
adds x13, x13, #64
b.ne LBB2_6
; %bb.7: ; in Loop: Header=BB2_5 Depth=1
smax.4s v0, v0, v1
smax.4s v0, v0, v2
smax.4s v0, v0, v3
smaxv.4s s0, v0
fmov w13, s0
ldr w14, [x12, #5952]
cmp w14, w13
csel w13, w14, w13, gt
ldr w14, [x12, #5956]
cmp w14, w13
csel w13, w14, w13, gt
ldr w14, [x12, #5960]
cmp w14, w13
csel w13, w14, w13, gt
ldr w14, [x12, #5964]
cmp w14, w13
csel w13, w14, w13, gt
ldr w14, [x12, #5968]
cmp w14, w13
csel w13, w14, w13, gt
ldr w14, [x12, #5972]
cmp w14, w13
csel w13, w14, w13, gt
ldr w14, [x12, #5976]
cmp w14, w13
csel w13, w14, w13, gt
ldr w14, [x12, #5980]
cmp w14, w13
csel w13, w14, w13, gt
ldr w14, [x12, #5984]
cmp w14, w13
csel w13, w14, w13, gt
ldr w14, [x12, #5988]
cmp w14, w13
csel w13, w14, w13, gt
ldr w14, [x12, #5992]
cmp w14, w13
csel w13, w14, w13, gt
ldr w12, [x12, #5996]
cmp w12, w13
csel w13, w12, w13, gt
add x8, x8, #1
add x10, x10, x11
cmp x8, #1500
b.ne LBB2_5
; %bb.8:
mov x8, #0
add x9, sp, #8
mov w10, #6000
LBB2_9: ; =>This Loop Header: Depth=1
; Child Loop BB2_10 Depth 2
mov x11, #0
LBB2_10: ; Parent Loop BB2_9 Depth=1
; => This Inner Loop Header: Depth=2
ldr w12, [x9, x11, lsl #2]
cmp w12, w13
b.gt LBB2_15
; %bb.11: ; in Loop: Header=BB2_10 Depth=2
add x11, x11, #1
cmp x11, #1500
b.ne LBB2_10
; %bb.12: ; in Loop: Header=BB2_9 Depth=1
add x8, x8, #1
add x9, x9, x10
cmp x8, #1500
b.ne LBB2_9
; %bb.13:
ldur x8, [x29, #-56]
Lloh5:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh6:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh7:
ldr x9, [x9]
cmp x9, x8
b.ne LBB2_16
; %bb.14:
mov w0, #0
add sp, sp, #2197, lsl #12 ; =8998912
add sp, sp, #1104
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #64 ; 16-byte Folded Reload
ret
LBB2_15:
bl _abort
LBB2_16:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/1186381.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function tcq_stat_sc
_tcq_stat_sc: ## @tcq_stat_sc
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r15
movq (%rdi), %r14
movq %r14, %rdi
callq _IFCQ_LOCK_ASSERT_HELD
movq (%r15), %rax
movq _MBUF_SC_UNSPEC@GOTPCREL(%rip), %rcx
movl $1, %edi
cmpq (%rcx), %rax
je LBB0_2
## %bb.1:
movq %rax, %rdi
callq _MBUF_VALID_SC
xorl %edi, %edi
testq %rax, %rax
setne %dil
LBB0_2:
callq _VERIFY
movq (%r15), %rdi
callq _MBUF_SCIDX
movq %rax, %rbx
movq _IFCQ_SC_MAX@GOTPCREL(%rip), %rax
xorl %edi, %edi
cmpq (%rax), %rbx
setb %dil
callq _VERIFY
movq (%r14), %rax
movq (%rax,%rbx,8), %rbx
movq %rbx, %rdi
callq _qlen
movl %eax, 12(%r15)
movq %rbx, %rdi
callq _qsize
movl %eax, 8(%r15)
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _MBUF_SC_UNSPEC,8,3 ## @MBUF_SC_UNSPEC
.comm _IFCQ_SC_MAX,8,3 ## @IFCQ_SC_MAX
.no_dead_strip _tcq_stat_sc
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function tcq_stat_sc
_tcq_stat_sc: ; @tcq_stat_sc
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x1
ldr x20, [x0]
mov x0, x20
bl _IFCQ_LOCK_ASSERT_HELD
ldr x0, [x19]
Lloh0:
adrp x8, _MBUF_SC_UNSPEC@GOTPAGE
Lloh1:
ldr x8, [x8, _MBUF_SC_UNSPEC@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
cmp x0, x8
b.ne LBB0_2
; %bb.1:
mov w0, #1
b LBB0_3
LBB0_2:
bl _MBUF_VALID_SC
cmp x0, #0
cset w0, ne
LBB0_3:
bl _VERIFY
ldr x0, [x19]
bl _MBUF_SCIDX
mov x21, x0
Lloh3:
adrp x8, _IFCQ_SC_MAX@GOTPAGE
Lloh4:
ldr x8, [x8, _IFCQ_SC_MAX@GOTPAGEOFF]
Lloh5:
ldr x8, [x8]
cmp x0, x8
cset w0, lo
bl _VERIFY
ldr x8, [x20]
ldr x20, [x8, x21, lsl #3]
mov x0, x20
bl _qlen
str w0, [x19, #12]
mov x0, x20
bl _qsize
str w0, [x19, #8]
mov w0, #0
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _MBUF_SC_UNSPEC,8,3 ; @MBUF_SC_UNSPEC
.comm _IFCQ_SC_MAX,8,3 ; @IFCQ_SC_MAX
.no_dead_strip _tcq_stat_sc
.subsections_via_symbols
| AnghaBench/darwin-xnu/bsd/net/pktsched/extr_pktsched_tcq.c_tcq_stat_sc.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
leaq L_.str.2(%rip), %rdi
movl $649, %esi ## imm = 0x289
movl $384, %edx ## imm = 0x180
xorl %eax, %eax
callq _open
cmpl $-1, %eax
jne LBB0_1
## %bb.5:
leaq L_.str.3(%rip), %rdi
callq _perror
movl $1, %edi
callq _exit
LBB0_1:
movl %eax, %ebx
callq _getchar
leaq L_.str(%rip), %rsi
movl $5, %edx
movl %ebx, %edi
callq _write
cmpq $-1, %rax
jne LBB0_2
## %bb.6:
leaq L_.str.3(%rip), %rdi
callq _perror
movl $2, %edi
callq _exit
LBB0_2:
leaq L_.str.1(%rip), %rsi
movl $5, %edx
movl %ebx, %edi
callq _write
cmpq $-1, %rax
jne LBB0_3
## %bb.7:
leaq L_.str.3(%rip), %rdi
callq _perror
movl $3, %edi
callq _exit
LBB0_3:
movl %ebx, %edi
callq _close
cmpl $-1, %eax
jne LBB0_4
## %bb.8:
leaq L_.str.3(%rip), %rdi
callq _perror
movl $4, %edi
callq _exit
LBB0_4:
xorl %edi, %edi
callq _exit
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "AAAAA"
L_.str.1: ## @.str.1
.asciz "BBBBB"
L_.str.2: ## @.str.2
.asciz "f1.txt"
L_.str.3: ## @.str.3
.asciz "file"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov w8, #384
str x8, [sp]
Lloh0:
adrp x0, l_.str.2@PAGE
Lloh1:
add x0, x0, l_.str.2@PAGEOFF
mov w1, #649
bl _open
cmn w0, #1
b.ne LBB0_2
; %bb.1:
Lloh2:
adrp x0, l_.str.3@PAGE
Lloh3:
add x0, x0, l_.str.3@PAGEOFF
bl _perror
mov w0, #1
bl _exit
LBB0_2:
mov x19, x0
bl _getchar
Lloh4:
adrp x1, l_.str@PAGE
Lloh5:
add x1, x1, l_.str@PAGEOFF
mov x0, x19
mov w2, #5
bl _write
cmn x0, #1
b.ne LBB0_4
; %bb.3:
Lloh6:
adrp x0, l_.str.3@PAGE
Lloh7:
add x0, x0, l_.str.3@PAGEOFF
bl _perror
mov w0, #2
bl _exit
LBB0_4:
Lloh8:
adrp x1, l_.str.1@PAGE
Lloh9:
add x1, x1, l_.str.1@PAGEOFF
mov x0, x19
mov w2, #5
bl _write
cmn x0, #1
b.ne LBB0_6
; %bb.5:
Lloh10:
adrp x0, l_.str.3@PAGE
Lloh11:
add x0, x0, l_.str.3@PAGEOFF
bl _perror
mov w0, #3
bl _exit
LBB0_6:
mov x0, x19
bl _close
cmn w0, #1
b.ne LBB0_8
; %bb.7:
Lloh12:
adrp x0, l_.str.3@PAGE
Lloh13:
add x0, x0, l_.str.3@PAGEOFF
bl _perror
mov w0, #4
bl _exit
LBB0_8:
mov w0, #0
bl _exit
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh6, Lloh7
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpAdd Lloh10, Lloh11
.loh AdrpAdd Lloh12, Lloh13
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "AAAAA"
l_.str.1: ; @.str.1
.asciz "BBBBB"
l_.str.2: ; @.str.2
.asciz "f1.txt"
l_.str.3: ; @.str.3
.asciz "file"
.subsections_via_symbols
| the_stack_data/67326390.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _Uart16550Init ## -- Begin function Uart16550Init
.p2align 4, 0x90
_Uart16550Init: ## @Uart16550Init
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %ecx, %r14d
movl %edx, %r15d
movl %esi, %r12d
movl %edi, %ebx
movq _OFS_INTR_ENABLE@GOTPCREL(%rip), %rax
movl (%rax), %edi
xorl %esi, %esi
callq _UART16550_WRITE
movq _OFS_LINE_CONTROL@GOTPCREL(%rip), %r13
movl (%r13), %edi
movl $128, %esi
callq _UART16550_WRITE
movq _MAX_BAUD@GOTPCREL(%rip), %rax
movl (%rax), %eax
cltd
idivl %ebx
movl %eax, %ebx
movq _OFS_DIVISOR_LSB@GOTPCREL(%rip), %rax
movl (%rax), %edi
movzbl %bl, %esi
callq _UART16550_WRITE
movq _OFS_DIVISOR_MSB@GOTPCREL(%rip), %rax
movl (%rax), %edi
movzbl %bh, %esi
callq _UART16550_WRITE
movl (%r13), %edi
xorl %esi, %esi
callq _UART16550_WRITE
movq _OFS_DATA_FORMAT@GOTPCREL(%rip), %rax
movl (%rax), %edi
orl %r15d, %r12d
orl %r14d, %r12d
movl %r12d, %esi
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _UART16550_WRITE ## TAILCALL
.cfi_endproc
## -- End function
.comm _OFS_INTR_ENABLE,4,2 ## @OFS_INTR_ENABLE
.comm _OFS_LINE_CONTROL,4,2 ## @OFS_LINE_CONTROL
.comm _MAX_BAUD,4,2 ## @MAX_BAUD
.comm _OFS_DIVISOR_LSB,4,2 ## @OFS_DIVISOR_LSB
.comm _OFS_DIVISOR_MSB,4,2 ## @OFS_DIVISOR_MSB
.comm _OFS_DATA_FORMAT,4,2 ## @OFS_DATA_FORMAT
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _Uart16550Init ; -- Begin function Uart16550Init
.p2align 2
_Uart16550Init: ; @Uart16550Init
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x3
mov x20, x2
mov x21, x1
mov x22, x0
Lloh0:
adrp x8, _OFS_INTR_ENABLE@GOTPAGE
Lloh1:
ldr x8, [x8, _OFS_INTR_ENABLE@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
mov w1, #0
bl _UART16550_WRITE
Lloh3:
adrp x23, _OFS_LINE_CONTROL@GOTPAGE
Lloh4:
ldr x23, [x23, _OFS_LINE_CONTROL@GOTPAGEOFF]
ldr w0, [x23]
mov w1, #128
bl _UART16550_WRITE
Lloh5:
adrp x8, _MAX_BAUD@GOTPAGE
Lloh6:
ldr x8, [x8, _MAX_BAUD@GOTPAGEOFF]
Lloh7:
ldr w8, [x8]
sdiv w22, w8, w22
Lloh8:
adrp x8, _OFS_DIVISOR_LSB@GOTPAGE
Lloh9:
ldr x8, [x8, _OFS_DIVISOR_LSB@GOTPAGEOFF]
Lloh10:
ldr w0, [x8]
and w1, w22, #0xff
bl _UART16550_WRITE
Lloh11:
adrp x8, _OFS_DIVISOR_MSB@GOTPAGE
Lloh12:
ldr x8, [x8, _OFS_DIVISOR_MSB@GOTPAGEOFF]
Lloh13:
ldr w0, [x8]
ubfx w1, w22, #8, #8
bl _UART16550_WRITE
ldr w0, [x23]
mov w1, #0
bl _UART16550_WRITE
Lloh14:
adrp x8, _OFS_DATA_FORMAT@GOTPAGE
Lloh15:
ldr x8, [x8, _OFS_DATA_FORMAT@GOTPAGEOFF]
Lloh16:
ldr w0, [x8]
orr w8, w20, w21
orr w1, w8, w19
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
b _UART16550_WRITE
.loh AdrpLdrGotLdr Lloh14, Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.loh AdrpLdrGot Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _OFS_INTR_ENABLE,4,2 ; @OFS_INTR_ENABLE
.comm _OFS_LINE_CONTROL,4,2 ; @OFS_LINE_CONTROL
.comm _MAX_BAUD,4,2 ; @MAX_BAUD
.comm _OFS_DIVISOR_LSB,4,2 ; @OFS_DIVISOR_LSB
.comm _OFS_DIVISOR_MSB,4,2 ; @OFS_DIVISOR_MSB
.comm _OFS_DATA_FORMAT,4,2 ; @OFS_DATA_FORMAT
.subsections_via_symbols
| AnghaBench/openwrt/target/linux/generic/image/lzma-loader/src/extr_uart16550.c_Uart16550Init.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function rcar_i2c_dma_unmap
_rcar_i2c_dma_unmap: ## @rcar_i2c_dma_unmap
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
movq (%rdi), %rax
movq _DMA_FROM_DEVICE@GOTPCREL(%rip), %r13
xorl %ecx, %ecx
cmpq (%r13), %rax
sete %cl
movq 24(%rdi,%rcx,8), %rax
movq (%rax), %rax
movl (%rax), %r14d
leaq 20(%rdi), %r15
movq %r15, %rdi
callq _sg_dma_address
movl %eax, %r12d
movq %r15, %rdi
callq _sg_dma_len
movq (%rbx), %rcx
movl %r14d, %edi
movl %r12d, %esi
movl %eax, %edx
callq _dma_unmap_single
movq 8(%rbx), %rax
movq _I2C_RCAR_GEN3@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_3
## %bb.1:
movq (%rbx), %rax
cmpq (%r13), %rax
jne LBB0_3
## %bb.2:
movq _ID_P_NO_RXDMA@GOTPCREL(%rip), %rax
movl (%rax), %eax
orl %eax, 16(%rbx)
LBB0_3:
movq _DMA_NONE@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, (%rbx)
movq _ICDMAER@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %rbx, %rdi
xorl %edx, %edx
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _rcar_i2c_write ## TAILCALL
.cfi_endproc
## -- End function
.comm _DMA_FROM_DEVICE,8,3 ## @DMA_FROM_DEVICE
.comm _I2C_RCAR_GEN3,8,3 ## @I2C_RCAR_GEN3
.comm _ID_P_NO_RXDMA,4,2 ## @ID_P_NO_RXDMA
.comm _DMA_NONE,8,3 ## @DMA_NONE
.comm _ICDMAER,4,2 ## @ICDMAER
.no_dead_strip _rcar_i2c_dma_unmap
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function rcar_i2c_dma_unmap
_rcar_i2c_dma_unmap: ; @rcar_i2c_dma_unmap
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x0
ldr x8, [x0]
Lloh0:
adrp x23, _DMA_FROM_DEVICE@GOTPAGE
Lloh1:
ldr x23, [x23, _DMA_FROM_DEVICE@GOTPAGEOFF]
ldr x9, [x23]
mov w10, #24
mov w11, #32
cmp x8, x9
csel x8, x11, x10, eq
ldr x8, [x0, x8]
ldr x8, [x8]
ldr w20, [x8]
add x21, x0, #20
mov x0, x21
bl _sg_dma_address
mov x22, x0
mov x0, x21
bl _sg_dma_len
mov x2, x0
ldr x3, [x19]
mov x0, x20
mov x1, x22
bl _dma_unmap_single
ldr x8, [x19, #8]
Lloh2:
adrp x9, _I2C_RCAR_GEN3@GOTPAGE
Lloh3:
ldr x9, [x9, _I2C_RCAR_GEN3@GOTPAGEOFF]
Lloh4:
ldr x9, [x9]
cmp x8, x9
b.ne LBB0_3
; %bb.1:
ldr x8, [x19]
ldr x9, [x23]
cmp x8, x9
b.ne LBB0_3
; %bb.2:
Lloh5:
adrp x8, _ID_P_NO_RXDMA@GOTPAGE
Lloh6:
ldr x8, [x8, _ID_P_NO_RXDMA@GOTPAGEOFF]
Lloh7:
ldr w8, [x8]
ldr w9, [x19, #16]
orr w8, w9, w8
str w8, [x19, #16]
LBB0_3:
Lloh8:
adrp x8, _DMA_NONE@GOTPAGE
Lloh9:
ldr x8, [x8, _DMA_NONE@GOTPAGEOFF]
Lloh10:
ldr x8, [x8]
str x8, [x19]
Lloh11:
adrp x8, _ICDMAER@GOTPAGE
Lloh12:
ldr x8, [x8, _ICDMAER@GOTPAGEOFF]
Lloh13:
ldr w1, [x8]
mov x0, x19
mov w2, #0
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
b _rcar_i2c_write
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.cfi_endproc
; -- End function
.comm _DMA_FROM_DEVICE,8,3 ; @DMA_FROM_DEVICE
.comm _I2C_RCAR_GEN3,8,3 ; @I2C_RCAR_GEN3
.comm _ID_P_NO_RXDMA,4,2 ; @ID_P_NO_RXDMA
.comm _DMA_NONE,8,3 ; @DMA_NONE
.comm _ICDMAER,4,2 ; @ICDMAER
.no_dead_strip _rcar_i2c_dma_unmap
.subsections_via_symbols
| AnghaBench/linux/drivers/i2c/busses/extr_i2c-rcar.c_rcar_i2c_dma_unmap.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ar9300_gpio_get_mask ## -- Begin function ar9300_gpio_get_mask
.p2align 4, 0x90
_ar9300_gpio_get_mask: ## @ar9300_gpio_get_mask
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq _AR9382_MAX_GPIO_INPUT_PIN_NUM@GOTPCREL(%rip), %rax
movb (%rax), %cl
incb %cl
movl $-1, %r14d
movl $-1, %ebx
shll %cl, %ebx
callq _AH_PRIVATE
movq (%rax), %rax
movq _AR9300_DEVID_AR9380_PCIE@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_2
## %bb.1:
movq _AR9382_MAX_GPIO_PIN_NUM@GOTPCREL(%rip), %rax
movb (%rax), %cl
shll %cl, %r14d
movq _AR9382_GPIO_PIN_8_RESERVED@GOTPCREL(%rip), %rax
movb (%rax), %al
btsl %eax, %r14d
movl %r14d, %ebx
LBB0_2:
notl %ebx
movl %ebx, %eax
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _AR9382_MAX_GPIO_INPUT_PIN_NUM,4,2 ## @AR9382_MAX_GPIO_INPUT_PIN_NUM
.comm _AR9300_DEVID_AR9380_PCIE,8,3 ## @AR9300_DEVID_AR9380_PCIE
.comm _AR9382_MAX_GPIO_PIN_NUM,4,2 ## @AR9382_MAX_GPIO_PIN_NUM
.comm _AR9382_GPIO_PIN_8_RESERVED,4,2 ## @AR9382_GPIO_PIN_8_RESERVED
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ar9300_gpio_get_mask ; -- Begin function ar9300_gpio_get_mask
.p2align 2
_ar9300_gpio_get_mask: ; @ar9300_gpio_get_mask
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x8, _AR9382_MAX_GPIO_INPUT_PIN_NUM@GOTPAGE
Lloh1:
ldr x8, [x8, _AR9382_MAX_GPIO_INPUT_PIN_NUM@GOTPAGEOFF]
Lloh2:
ldr w20, [x8]
mov w19, #-1
bl _AH_PRIVATE
ldr x8, [x0]
Lloh3:
adrp x9, _AR9300_DEVID_AR9380_PCIE@GOTPAGE
Lloh4:
ldr x9, [x9, _AR9300_DEVID_AR9380_PCIE@GOTPAGEOFF]
Lloh5:
ldr x9, [x9]
cmp x8, x9
b.ne LBB0_2
; %bb.1:
Lloh6:
adrp x8, _AR9382_MAX_GPIO_PIN_NUM@GOTPAGE
Lloh7:
ldr x8, [x8, _AR9382_MAX_GPIO_PIN_NUM@GOTPAGEOFF]
Lloh8:
ldr w8, [x8]
lsl w8, w19, w8
Lloh9:
adrp x9, _AR9382_GPIO_PIN_8_RESERVED@GOTPAGE
Lloh10:
ldr x9, [x9, _AR9382_GPIO_PIN_8_RESERVED@GOTPAGEOFF]
Lloh11:
ldr w9, [x9]
mov w10, #1
lsl w9, w10, w9
orr w8, w9, w8
b LBB0_3
LBB0_2:
add w8, w20, #1
lsl w8, w19, w8
LBB0_3:
mvn w0, w8
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.cfi_endproc
; -- End function
.comm _AR9382_MAX_GPIO_INPUT_PIN_NUM,4,2 ; @AR9382_MAX_GPIO_INPUT_PIN_NUM
.comm _AR9300_DEVID_AR9380_PCIE,8,3 ; @AR9300_DEVID_AR9380_PCIE
.comm _AR9382_MAX_GPIO_PIN_NUM,4,2 ; @AR9382_MAX_GPIO_PIN_NUM
.comm _AR9382_GPIO_PIN_8_RESERVED,4,2 ; @AR9382_GPIO_PIN_8_RESERVED
.subsections_via_symbols
| AnghaBench/freebsd/sys/contrib/dev/ath/ath_hal/ar9300/extr_ar9300_gpio.c_ar9300_gpio_get_mask.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| the_stack_data/82822.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _nfsd_cross_mnt ## -- Begin function nfsd_cross_mnt
.p2align 4, 0x90
_nfsd_cross_mnt: ## @nfsd_cross_mnt
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r15
movq %rdi, %r14
movq (%rdx), %rbx
movq %rsi, -80(%rbp) ## 8-byte Spill
movq (%rsi), %r13
movq %r13, %rdi
callq _dget
movq %rax, -64(%rbp)
movl 4(%rbx), %edi
callq _mntget
movl %eax, -56(%rbp)
leaq -64(%rbp), %rdi
xorl %esi, %esi
callq ___follow_down
movl %eax, %r12d
testl %eax, %eax
js LBB0_10
## %bb.1:
movq %r15, -72(%rbp) ## 8-byte Spill
movq %rbx, -48(%rbp) ## 8-byte Spill
leaq -64(%rbp), %rsi
movq %r14, %rdi
callq _rqst_exp_get_by_name
movq %rax, %r15
movq %rax, %rdi
callq _IS_ERR
testq %rax, %rax
je LBB0_5
## %bb.2:
movq %r15, %rdi
callq _PTR_ERR
movl %eax, %r12d
movq _ENOENT@GOTPCREL(%rip), %rax
movl (%rax), %eax
addl %r12d, %eax
jne LBB0_4
## %bb.3:
movq _NFSEXP_V4ROOT@GOTPCREL(%rip), %rax
movl (%rax), %eax
movq -48(%rbp), %rcx ## 8-byte Reload
andl (%rcx), %eax
cmovel %eax, %r12d
LBB0_4:
leaq -64(%rbp), %rdi
callq _path_put
jmp LBB0_10
LBB0_5:
movq %r14, %rdi
callq _nfsd_v4client
testq %rax, %rax
jne LBB0_8
## %bb.6:
movq _NFSEXP_CROSSMOUNT@GOTPCREL(%rip), %rax
movl (%rax), %eax
movq -48(%rbp), %rcx ## 8-byte Reload
testl %eax, (%rcx)
jne LBB0_8
## %bb.7:
movq %r15, %rdi
callq _EX_NOHIDE
testq %rax, %rax
je LBB0_9
LBB0_8:
movq -64(%rbp), %rax
movq -80(%rbp), %rcx ## 8-byte Reload
movq %rax, (%rcx)
movq %r13, -64(%rbp)
movq -72(%rbp), %rax ## 8-byte Reload
movq %r15, (%rax)
movq -48(%rbp), %r15 ## 8-byte Reload
LBB0_9:
leaq -64(%rbp), %rdi
callq _path_put
movq %r15, %rdi
callq _exp_put
LBB0_10:
movl %r12d, %eax
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _ENOENT,4,2 ## @ENOENT
.comm _NFSEXP_V4ROOT,4,2 ## @NFSEXP_V4ROOT
.comm _NFSEXP_CROSSMOUNT,4,2 ## @NFSEXP_CROSSMOUNT
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _nfsd_cross_mnt ; -- Begin function nfsd_cross_mnt
.p2align 2
_nfsd_cross_mnt: ; @nfsd_cross_mnt
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x21, x2
mov x22, x1
mov x24, x0
ldr x19, [x2]
ldr x23, [x1]
mov x0, x23
bl _dget
str x0, [sp]
ldr w0, [x19, #4]
bl _mntget
str w0, [sp, #8]
mov x0, sp
mov w1, #0
bl ___follow_down
mov x20, x0
tbnz w0, #31, LBB0_10
; %bb.1:
mov x1, sp
mov x0, x24
bl _rqst_exp_get_by_name
mov x25, x0
bl _IS_ERR
cbz x0, LBB0_5
; %bb.2:
mov x0, x25
bl _PTR_ERR
mov x20, x0
Lloh0:
adrp x8, _ENOENT@GOTPAGE
Lloh1:
ldr x8, [x8, _ENOENT@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
cmn w0, w8
b.ne LBB0_4
; %bb.3:
ldr w8, [x19]
Lloh3:
adrp x9, _NFSEXP_V4ROOT@GOTPAGE
Lloh4:
ldr x9, [x9, _NFSEXP_V4ROOT@GOTPAGEOFF]
Lloh5:
ldr w9, [x9]
tst w9, w8
csel w20, wzr, w20, eq
LBB0_4:
mov x0, sp
bl _path_put
b LBB0_10
LBB0_5:
mov x0, x24
bl _nfsd_v4client
cbnz x0, LBB0_8
; %bb.6:
ldr w8, [x19]
Lloh6:
adrp x9, _NFSEXP_CROSSMOUNT@GOTPAGE
Lloh7:
ldr x9, [x9, _NFSEXP_CROSSMOUNT@GOTPAGEOFF]
Lloh8:
ldr w9, [x9]
tst w9, w8
b.ne LBB0_8
; %bb.7:
mov x0, x25
bl _EX_NOHIDE
cbz x0, LBB0_11
LBB0_8:
ldr x8, [sp]
str x8, [x22]
str x23, [sp]
str x25, [x21]
LBB0_9:
mov x0, sp
bl _path_put
mov x0, x19
bl _exp_put
LBB0_10:
mov x0, x20
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #96
ret
LBB0_11:
mov x19, x25
b LBB0_9
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.cfi_endproc
; -- End function
.comm _ENOENT,4,2 ; @ENOENT
.comm _NFSEXP_V4ROOT,4,2 ; @NFSEXP_V4ROOT
.comm _NFSEXP_CROSSMOUNT,4,2 ; @NFSEXP_CROSSMOUNT
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/fs/nfsd/extr_vfs.c_nfsd_cross_mnt.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function pm3393_get_speed_duplex_fc
_pm3393_get_speed_duplex_fc: ## @pm3393_get_speed_duplex_fc
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
testq %rsi, %rsi
je LBB0_2
## %bb.1:
movq _SPEED_10000@GOTPCREL(%rip), %rax
movl (%rax), %eax
movl %eax, (%rsi)
LBB0_2:
testq %rdx, %rdx
je LBB0_4
## %bb.3:
movq _DUPLEX_FULL@GOTPCREL(%rip), %rax
movl (%rax), %eax
movl %eax, (%rdx)
LBB0_4:
testq %rcx, %rcx
je LBB0_6
## %bb.5:
movq (%rdi), %rax
movl (%rax), %eax
movl %eax, (%rcx)
LBB0_6:
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _SPEED_10000,4,2 ## @SPEED_10000
.comm _DUPLEX_FULL,4,2 ## @DUPLEX_FULL
.no_dead_strip _pm3393_get_speed_duplex_fc
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function pm3393_get_speed_duplex_fc
_pm3393_get_speed_duplex_fc: ; @pm3393_get_speed_duplex_fc
.cfi_startproc
; %bb.0:
cbz x1, LBB0_2
; %bb.1:
Lloh0:
adrp x8, _SPEED_10000@GOTPAGE
Lloh1:
ldr x8, [x8, _SPEED_10000@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
str w8, [x1]
LBB0_2:
cbz x2, LBB0_4
; %bb.3:
Lloh3:
adrp x8, _DUPLEX_FULL@GOTPAGE
Lloh4:
ldr x8, [x8, _DUPLEX_FULL@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
str w8, [x2]
LBB0_4:
cbz x3, LBB0_6
; %bb.5:
ldr x8, [x0]
ldr w8, [x8]
str w8, [x3]
LBB0_6:
mov w0, #0
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _SPEED_10000,4,2 ; @SPEED_10000
.comm _DUPLEX_FULL,4,2 ; @DUPLEX_FULL
.no_dead_strip _pm3393_get_speed_duplex_fc
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/chelsio/cxgb/extr_pm3393.c_pm3393_get_speed_duplex_fc.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function panda_usb_disconnect
_panda_usb_disconnect: ## @panda_usb_disconnect
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
callq _usb_get_intfdata
movq %rax, %r15
xorl %r12d, %r12d
movq %rbx, %rdi
xorl %esi, %esi
callq _usb_set_intfdata
leaq L_.str(%rip), %r14
movq _PANDA_NUM_CAN_INTERFACES@GOTPCREL(%rip), %r13
.p2align 4, 0x90
LBB0_1: ## =>This Inner Loop Header: Depth=1
movq (%r15), %rax
movq (%rax,%r12,8), %rbx
testq %rbx, %rbx
je LBB0_3
## %bb.2: ## in Loop: Header=BB0_1 Depth=1
movl (%rbx), %edi
movq %r14, %rsi
callq _netdev_info
movl (%rbx), %edi
callq _unregister_candev
movl (%rbx), %edi
callq _free_candev
incq %r12
movslq (%r13), %rax
cmpq %rax, %r12
jl LBB0_1
LBB0_3:
movq %rbx, %rdi
callq _panda_urb_unlink
movq %r15, %rdi
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _kfree ## TAILCALL
.cfi_endproc
## -- End function
.comm _PANDA_NUM_CAN_INTERFACES,4,2 ## @PANDA_NUM_CAN_INTERFACES
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "device disconnected\n"
.no_dead_strip _panda_usb_disconnect
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function panda_usb_disconnect
_panda_usb_disconnect: ; @panda_usb_disconnect
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x20, x0
bl _usb_get_intfdata
mov x19, x0
mov x0, x20
mov x1, #0
bl _usb_set_intfdata
mov x22, #0
Lloh0:
adrp x20, l_.str@PAGE
Lloh1:
add x20, x20, l_.str@PAGEOFF
Lloh2:
adrp x23, _PANDA_NUM_CAN_INTERFACES@GOTPAGE
Lloh3:
ldr x23, [x23, _PANDA_NUM_CAN_INTERFACES@GOTPAGEOFF]
LBB0_1: ; =>This Inner Loop Header: Depth=1
ldr x8, [x19]
ldr x21, [x8, x22, lsl #3]
cbz x21, LBB0_3
; %bb.2: ; in Loop: Header=BB0_1 Depth=1
ldr w0, [x21]
mov x1, x20
bl _netdev_info
ldr w0, [x21]
bl _unregister_candev
ldr w0, [x21]
bl _free_candev
add x22, x22, #1
ldrsw x8, [x23]
cmp x22, x8
b.lt LBB0_1
LBB0_3:
mov x0, x21
bl _panda_urb_unlink
mov x0, x19
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
b _kfree
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm _PANDA_NUM_CAN_INTERFACES,4,2 ; @PANDA_NUM_CAN_INTERFACES
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "device disconnected\n"
.no_dead_strip _panda_usb_disconnect
.subsections_via_symbols
| AnghaBench/openpilot/panda/drivers/linux/extr_panda.c_panda_usb_disconnect.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function pool_free
_pool_free: ## @pool_free
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
movq %rsi, %rdi
callq _virt_to_page
movq %rax, %r14
movq %rbx, %rdi
callq _pagevec_space
testq %rax, %rax
je LBB0_2
## %bb.1:
movq %rbx, %rdi
movq %r14, %rsi
popq %rbx
popq %r14
popq %rbp
jmp _pagevec_add ## TAILCALL
LBB0_2:
movq %r14, %rdi
popq %rbx
popq %r14
popq %rbp
jmp ___free_page ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _pool_free
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function pool_free
_pool_free: ; @pool_free
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
mov x0, x1
bl _virt_to_page
mov x20, x0
mov x0, x19
bl _pagevec_space
cbz x0, LBB0_2
; %bb.1:
mov x0, x19
mov x1, x20
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _pagevec_add
LBB0_2:
mov x0, x20
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b ___free_page
.cfi_endproc
; -- End function
.no_dead_strip _pool_free
.subsections_via_symbols
| AnghaBench/linux/drivers/gpu/drm/i915/extr_i915_gpu_error.c_pool_free.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function get_3_3_div
_get_3_3_div: ## @get_3_3_div
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl %edi, %ecx
shrl $3, %ecx
andl $7, %ecx
incq %rcx
andl $7, %edi
leaq 1(%rdi), %rax
imulq %rcx, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _get_3_3_div
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function get_3_3_div
_get_3_3_div: ; @get_3_3_div
.cfi_startproc
; %bb.0:
ubfx x8, x0, #3, #3
and x9, x0, #0x7
add x9, x9, #1
madd x0, x9, x8, x9
ret
.cfi_endproc
; -- End function
.no_dead_strip _get_3_3_div
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/arch/arm/mach-mx3/extr_clock-imx35.c_get_3_3_div.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function zynq_slcr_unlock
_zynq_slcr_unlock: ## @zynq_slcr_unlock
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _SLCR_UNLOCK_MAGIC@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq _SLCR_UNLOCK_OFFSET@GOTPCREL(%rip), %rax
movl (%rax), %esi
callq _zynq_slcr_write
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _SLCR_UNLOCK_MAGIC,4,2 ## @SLCR_UNLOCK_MAGIC
.comm _SLCR_UNLOCK_OFFSET,4,2 ## @SLCR_UNLOCK_OFFSET
.no_dead_strip _zynq_slcr_unlock
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function zynq_slcr_unlock
_zynq_slcr_unlock: ; @zynq_slcr_unlock
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x8, _SLCR_UNLOCK_MAGIC@GOTPAGE
Lloh1:
ldr x8, [x8, _SLCR_UNLOCK_MAGIC@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
Lloh3:
adrp x8, _SLCR_UNLOCK_OFFSET@GOTPAGE
Lloh4:
ldr x8, [x8, _SLCR_UNLOCK_OFFSET@GOTPAGEOFF]
Lloh5:
ldr w1, [x8]
bl _zynq_slcr_write
mov w0, #0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _SLCR_UNLOCK_MAGIC,4,2 ; @SLCR_UNLOCK_MAGIC
.comm _SLCR_UNLOCK_OFFSET,4,2 ; @SLCR_UNLOCK_OFFSET
.no_dead_strip _zynq_slcr_unlock
.subsections_via_symbols
| AnghaBench/linux/arch/arm/mach-zynq/extr_slcr.c_zynq_slcr_unlock.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _feclearexcept ## -- Begin function feclearexcept
.p2align 4, 0x90
_feclearexcept: ## @feclearexcept
.cfi_startproc
## %bb.0:
movl %edi, %esi
andl $63, %esi
je LBB0_2
## %bb.1:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $16, %rsp
movw $0, -2(%rbp)
leaq -2(%rbp), %rdi
callq _fesetexceptflag
addq $16, %rsp
popq %rbp
LBB0_2:
xorl %eax, %eax
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _feclearexcept ; -- Begin function feclearexcept
.p2align 2
_feclearexcept: ; @feclearexcept
.cfi_startproc
; %bb.0:
mov w8, #159
ands w1, w0, w8
b.eq LBB0_2
; %bb.1:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
sturh wzr, [x29, #-2]
sub x0, x29, #2
bl _fesetexceptflag
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
LBB0_2:
.cfi_def_cfa wsp, 0
.cfi_same_value w30
.cfi_same_value w29
mov w0, #0
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/1257989.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ack_a_interrupt
_ack_a_interrupt: ## @ack_a_interrupt
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _AI_SC_TC_St@GOTPCREL(%rip), %rax
movzwl (%rax), %ecx
andw %si, %cx
je LBB0_2
## %bb.1:
movq _AI_SC_TC_Interrupt_Ack@GOTPCREL(%rip), %rax
movzwl (%rax), %ecx
LBB0_2:
movq _AI_START1_St@GOTPCREL(%rip), %rax
movzwl (%rax), %eax
andw %si, %ax
je LBB0_4
## %bb.3:
movq _AI_START1_Interrupt_Ack@GOTPCREL(%rip), %rax
movzwl (%rax), %eax
LBB0_4:
orl %ecx, %eax
movq _AI_START_St@GOTPCREL(%rip), %rcx
movzwl (%rcx), %ecx
andw %si, %cx
je LBB0_6
## %bb.5:
movq _AI_START_Interrupt_Ack@GOTPCREL(%rip), %rcx
movzwl (%rcx), %ecx
LBB0_6:
orl %ecx, %eax
movq _AI_STOP_St@GOTPCREL(%rip), %rcx
andw (%rcx), %si
je LBB0_8
## %bb.7:
movq _AI_STOP_Interrupt_Ack@GOTPCREL(%rip), %rcx
movzwl (%rcx), %esi
LBB0_8:
orw %si, %ax
je LBB0_9
## %bb.10:
movq _devpriv@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
movq (%rcx), %rcx
movq _Interrupt_A_Ack_Register@GOTPCREL(%rip), %rdx
movl (%rdx), %edx
movzwl %ax, %esi
popq %rbp
jmpq *%rcx ## TAILCALL
LBB0_9:
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _AI_SC_TC_St,2,1 ## @AI_SC_TC_St
.comm _AI_SC_TC_Interrupt_Ack,2,1 ## @AI_SC_TC_Interrupt_Ack
.comm _AI_START1_St,2,1 ## @AI_START1_St
.comm _AI_START1_Interrupt_Ack,2,1 ## @AI_START1_Interrupt_Ack
.comm _AI_START_St,2,1 ## @AI_START_St
.comm _AI_START_Interrupt_Ack,2,1 ## @AI_START_Interrupt_Ack
.comm _AI_STOP_St,2,1 ## @AI_STOP_St
.comm _AI_STOP_Interrupt_Ack,2,1 ## @AI_STOP_Interrupt_Ack
.comm _devpriv,8,3 ## @devpriv
.comm _Interrupt_A_Ack_Register,4,2 ## @Interrupt_A_Ack_Register
.no_dead_strip _ack_a_interrupt
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ack_a_interrupt
_ack_a_interrupt: ; @ack_a_interrupt
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _AI_SC_TC_St@GOTPAGE
Lloh1:
ldr x8, [x8, _AI_SC_TC_St@GOTPAGEOFF]
ldrh w8, [x8]
Lloh2:
adrp x9, _AI_SC_TC_Interrupt_Ack@GOTPAGE
Lloh3:
ldr x9, [x9, _AI_SC_TC_Interrupt_Ack@GOTPAGEOFF]
ldrh w9, [x9]
tst w8, w1
csel w8, wzr, w9, eq
Lloh4:
adrp x9, _AI_START1_St@GOTPAGE
Lloh5:
ldr x9, [x9, _AI_START1_St@GOTPAGEOFF]
Lloh6:
adrp x10, _AI_START1_Interrupt_Ack@GOTPAGE
Lloh7:
ldr x10, [x10, _AI_START1_Interrupt_Ack@GOTPAGEOFF]
ldrh w9, [x9]
ldrh w10, [x10]
tst w9, w1
csel w9, wzr, w10, eq
Lloh8:
adrp x10, _AI_START_St@GOTPAGE
Lloh9:
ldr x10, [x10, _AI_START_St@GOTPAGEOFF]
orr w8, w9, w8
ldrh w9, [x10]
Lloh10:
adrp x10, _AI_START_Interrupt_Ack@GOTPAGE
Lloh11:
ldr x10, [x10, _AI_START_Interrupt_Ack@GOTPAGEOFF]
ldrh w10, [x10]
tst w9, w1
csel w9, wzr, w10, eq
orr w8, w8, w9
Lloh12:
adrp x9, _AI_STOP_St@GOTPAGE
Lloh13:
ldr x9, [x9, _AI_STOP_St@GOTPAGEOFF]
Lloh14:
adrp x10, _AI_STOP_Interrupt_Ack@GOTPAGE
Lloh15:
ldr x10, [x10, _AI_STOP_Interrupt_Ack@GOTPAGEOFF]
ldrh w9, [x9]
ldrh w10, [x10]
tst w9, w1
csel w9, wzr, w10, eq
orr w8, w8, w9
tst w8, #0xffff
b.eq LBB0_2
; %bb.1:
Lloh16:
adrp x9, _devpriv@GOTPAGE
Lloh17:
ldr x9, [x9, _devpriv@GOTPAGEOFF]
Lloh18:
ldr x9, [x9]
Lloh19:
adrp x10, _Interrupt_A_Ack_Register@GOTPAGE
Lloh20:
ldr x10, [x10, _Interrupt_A_Ack_Register@GOTPAGEOFF]
ldr x3, [x9]
Lloh21:
ldr w2, [x10]
and w1, w8, #0xffff
br x3
LBB0_2:
ret
.loh AdrpLdrGot Lloh14, Lloh15
.loh AdrpLdrGot Lloh12, Lloh13
.loh AdrpLdrGot Lloh10, Lloh11
.loh AdrpLdrGot Lloh8, Lloh9
.loh AdrpLdrGot Lloh6, Lloh7
.loh AdrpLdrGot Lloh4, Lloh5
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh19, Lloh20, Lloh21
.loh AdrpLdrGotLdr Lloh16, Lloh17, Lloh18
.cfi_endproc
; -- End function
.comm _AI_SC_TC_St,2,1 ; @AI_SC_TC_St
.comm _AI_SC_TC_Interrupt_Ack,2,1 ; @AI_SC_TC_Interrupt_Ack
.comm _AI_START1_St,2,1 ; @AI_START1_St
.comm _AI_START1_Interrupt_Ack,2,1 ; @AI_START1_Interrupt_Ack
.comm _AI_START_St,2,1 ; @AI_START_St
.comm _AI_START_Interrupt_Ack,2,1 ; @AI_START_Interrupt_Ack
.comm _AI_STOP_St,2,1 ; @AI_STOP_St
.comm _AI_STOP_Interrupt_Ack,2,1 ; @AI_STOP_Interrupt_Ack
.comm _devpriv,8,3 ; @devpriv
.comm _Interrupt_A_Ack_Register,4,2 ; @Interrupt_A_Ack_Register
.no_dead_strip _ack_a_interrupt
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/staging/comedi/drivers/extr_ni_mio_common.c_ack_a_interrupt.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function vidioc_g_input
_vidioc_g_input: ## @vidioc_g_input
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $0, (%rdx)
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _vidioc_g_input
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function vidioc_g_input
_vidioc_g_input: ; @vidioc_g_input
.cfi_startproc
; %bb.0:
str wzr, [x2]
mov w0, #0
ret
.cfi_endproc
; -- End function
.no_dead_strip _vidioc_g_input
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/media/video/gspca/extr_gspca.c_vidioc_g_input.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function mlxsw_sp_pfc_delay_get
_mlxsw_sp_pfc_delay_get: ## @mlxsw_sp_pfc_delay_get
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %r14d
movq %rdi, %r15
movq _BITS_PER_BYTE@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl %edx, %edi
callq _DIV_ROUND_UP
movq %r15, %rdi
movl %eax, %esi
callq _mlxsw_sp_bytes_cells
movl %eax, %ebx
movq _MLXSW_SP_CELL_FACTOR@GOTPCREL(%rip), %rax
imull (%rax), %ebx
movq %r15, %rdi
movl %r14d, %esi
callq _mlxsw_sp_bytes_cells
addl %ebx, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _BITS_PER_BYTE,4,2 ## @BITS_PER_BYTE
.comm _MLXSW_SP_CELL_FACTOR,4,2 ## @MLXSW_SP_CELL_FACTOR
.no_dead_strip _mlxsw_sp_pfc_delay_get
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function mlxsw_sp_pfc_delay_get
_mlxsw_sp_pfc_delay_get: ; @mlxsw_sp_pfc_delay_get
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x1
mov x20, x0
Lloh0:
adrp x8, _BITS_PER_BYTE@GOTPAGE
Lloh1:
ldr x8, [x8, _BITS_PER_BYTE@GOTPAGEOFF]
Lloh2:
ldr w1, [x8]
mov x0, x2
bl _DIV_ROUND_UP
mov x1, x0
mov x0, x20
bl _mlxsw_sp_bytes_cells
mov x21, x0
Lloh3:
adrp x8, _MLXSW_SP_CELL_FACTOR@GOTPAGE
Lloh4:
ldr x8, [x8, _MLXSW_SP_CELL_FACTOR@GOTPAGEOFF]
Lloh5:
ldr w22, [x8]
mov x0, x20
mov x1, x19
bl _mlxsw_sp_bytes_cells
madd w0, w22, w21, w0
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _BITS_PER_BYTE,4,2 ; @BITS_PER_BYTE
.comm _MLXSW_SP_CELL_FACTOR,4,2 ; @MLXSW_SP_CELL_FACTOR
.no_dead_strip _mlxsw_sp_pfc_delay_get
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/mellanox/mlxsw/extr_spectrum.c_mlxsw_sp_pfc_delay_get.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _test ## -- Begin function test
.p2align 4, 0x90
_test: ## @test
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq -8(%rbp), %rax
negq %rax
addq %rbp, %rax
addq $-4, %rax
shrq $2, %rax
## kill: def $eax killed $eax killed $rax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _test ; -- Begin function test
.p2align 2
_test: ; @test
.cfi_startproc
; %bb.0:
sub sp, sp, #16
.cfi_def_cfa_offset 16
add x8, sp, #12
add x9, sp, #8
sub x8, x9, x8
lsr x0, x8, #2
; kill: def $w0 killed $w0 killed $x0
add sp, sp, #16
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/7286.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function mlxsw_reg_spvm_pack
_mlxsw_reg_spvm_pack: ## @mlxsw_reg_spvm_pack
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r9d, -48(%rbp) ## 4-byte Spill
movl %r8d, %r12d
movl %ecx, %r14d
movl %edx, %r15d
movl %esi, -44(%rbp) ## 4-byte Spill
movq %rdi, %rbx
subl %edx, %r14d
leal 1(%r14), %r13d
movq _spvm@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq %rbx, %rsi
callq _MLXSW_REG_ZERO
movq %rbx, %rdi
movl -44(%rbp), %esi ## 4-byte Reload
callq _mlxsw_reg_spvm_local_port_set
movq %rbx, %rdi
movl %r13d, %esi
callq _mlxsw_reg_spvm_num_rec_set
testl %r14d, %r14d
js LBB0_3
## %bb.1:
xorl %r14d, %r14d
.p2align 4, 0x90
LBB0_2: ## =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %r14d, %esi
movl %r12d, %edx
callq _mlxsw_reg_spvm_rec_i_set
movq %rbx, %rdi
movl %r14d, %esi
movl %r12d, %edx
callq _mlxsw_reg_spvm_rec_e_set
movq %rbx, %rdi
movl %r14d, %esi
movl -48(%rbp), %edx ## 4-byte Reload
callq _mlxsw_reg_spvm_rec_u_set
leal (%r15,%r14), %edx
movq %rbx, %rdi
movl %r14d, %esi
callq _mlxsw_reg_spvm_rec_vid_set
incl %r14d
cmpl %r14d, %r13d
jne LBB0_2
LBB0_3:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _spvm,4,2 ## @spvm
.no_dead_strip _mlxsw_reg_spvm_pack
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function mlxsw_reg_spvm_pack
_mlxsw_reg_spvm_pack: ; @mlxsw_reg_spvm_pack
.cfi_startproc
; %bb.0:
stp x26, x25, [sp, #-80]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x19, x5
mov x20, x4
mov x21, x2
mov x24, x1
mov x22, x0
sub w25, w3, w2
add w23, w25, #1
Lloh0:
adrp x8, _spvm@GOTPAGE
Lloh1:
ldr x8, [x8, _spvm@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
mov x1, x22
bl _MLXSW_REG_ZERO
mov x0, x22
mov x1, x24
bl _mlxsw_reg_spvm_local_port_set
mov x0, x22
mov x1, x23
bl _mlxsw_reg_spvm_num_rec_set
tbnz w25, #31, LBB0_3
; %bb.1:
mov w24, #0
LBB0_2: ; =>This Inner Loop Header: Depth=1
mov x0, x22
mov x1, x24
mov x2, x20
bl _mlxsw_reg_spvm_rec_i_set
mov x0, x22
mov x1, x24
mov x2, x20
bl _mlxsw_reg_spvm_rec_e_set
mov x0, x22
mov x1, x24
mov x2, x19
bl _mlxsw_reg_spvm_rec_u_set
add w2, w21, w24
mov x0, x22
mov x1, x24
bl _mlxsw_reg_spvm_rec_vid_set
add w24, w24, #1
cmp w23, w24
b.ne LBB0_2
LBB0_3:
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
ldp x26, x25, [sp], #80 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _spvm,4,2 ; @spvm
.no_dead_strip _mlxsw_reg_spvm_pack
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/mellanox/mlxsw/extr_reg.h_mlxsw_reg_spvm_pack.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _trap_FS_Read ## -- Begin function trap_FS_Read
.p2align 4, 0x90
_trap_FS_Read: ## @trap_FS_Read
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl %edx, %ecx
movl %esi, %edx
movq %rdi, %rsi
movq _UI_FS_READ@GOTPCREL(%rip), %rax
movl (%rax), %edi
popq %rbp
jmp _syscall ## TAILCALL
.cfi_endproc
## -- End function
.comm _UI_FS_READ,4,2 ## @UI_FS_READ
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _trap_FS_Read ; -- Begin function trap_FS_Read
.p2align 2
_trap_FS_Read: ; @trap_FS_Read
.cfi_startproc
; %bb.0:
mov x3, x2
mov x2, x1
mov x1, x0
Lloh0:
adrp x8, _UI_FS_READ@GOTPAGE
Lloh1:
ldr x8, [x8, _UI_FS_READ@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
b _syscall
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _UI_FS_READ,4,2 ; @UI_FS_READ
.subsections_via_symbols
| AnghaBench/lab/engine/code/ui/extr_ui_syscalls.c_trap_FS_Read.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function smallblk_changed_cb
_smallblk_changed_cb: ## @smallblk_changed_cb
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %rbx
movq %rdi, %r14
movq _SPA_OLD_MAXBLOCKSIZE@GOTPCREL(%rip), %rax
xorl %edi, %edi
cmpq %rsi, (%rax)
setge %dil
callq _ASSERT
movq %rbx, %rdi
callq _ISP2
movl %eax, %edi
callq _ASSERT
movq %rbx, (%r14)
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _SPA_OLD_MAXBLOCKSIZE,8,3 ## @SPA_OLD_MAXBLOCKSIZE
.no_dead_strip _smallblk_changed_cb
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function smallblk_changed_cb
_smallblk_changed_cb: ; @smallblk_changed_cb
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
mov x20, x0
Lloh0:
adrp x8, _SPA_OLD_MAXBLOCKSIZE@GOTPAGE
Lloh1:
ldr x8, [x8, _SPA_OLD_MAXBLOCKSIZE@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
cmp x8, x1
cset w0, ge
bl _ASSERT
mov x0, x19
bl _ISP2
bl _ASSERT
str x19, [x20]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _SPA_OLD_MAXBLOCKSIZE,8,3 ; @SPA_OLD_MAXBLOCKSIZE
.no_dead_strip _smallblk_changed_cb
.subsections_via_symbols
| AnghaBench/zfs/module/zfs/extr_dmu_objset.c_smallblk_changed_cb.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _jrand48 ## -- Begin function jrand48
.p2align 4, 0x90
_jrand48: ## @jrand48
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl (%rdi), %eax
movzwl 4(%rdi), %ecx
shlq $32, %rcx
orq %rax, %rcx
movabsq $25214903917, %rax ## imm = 0x5DEECE66D
imulq %rcx, %rax
addq $11, %rax
movw %ax, (%rdi)
movq %rax, %rcx
shrq $16, %rcx
movw %cx, 2(%rdi)
shrq $32, %rax
movw %ax, 4(%rdi)
movslq %ecx, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _jrand48 ; -- Begin function jrand48
.p2align 2
_jrand48: ; @jrand48
.cfi_startproc
; %bb.0:
ldr w8, [x0]
ldrh w9, [x0, #4]
bfi x8, x9, #32, #16
mov x9, #58989
movk x9, #57068, lsl #16
movk x9, #5, lsl #32
mul x8, x8, x9
add x8, x8, #11
strh w8, [x0]
lsr x9, x8, #16
strh w9, [x0, #2]
lsr x9, x8, #32
strh w9, [x0, #4]
sbfx x0, x8, #16, #32
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/290489.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function i2c_pxa_probe_pdata
_i2c_pxa_probe_pdata: ## @i2c_pxa_probe_pdata
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r15
movq %rsi, %r14
movq %rdi, %rbx
callq _dev_get_platdata
movq %rax, %r12
movq %rbx, %rdi
callq _platform_get_device_id
movl (%rax), %eax
movl %eax, (%r15)
testq %r12, %r12
je LBB0_2
## %bb.1:
movl (%r12), %eax
testl %eax, %eax
movl $14, %ecx
cmovnel %eax, %ecx
movl %ecx, (%r14)
movups 4(%r12), %xmm0
movups %xmm0, 4(%r14)
LBB0_2:
xorl %eax, %eax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _i2c_pxa_probe_pdata
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function i2c_pxa_probe_pdata
_i2c_pxa_probe_pdata: ; @i2c_pxa_probe_pdata
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x21, x2
mov x19, x1
mov x22, x0
bl _dev_get_platdata
mov x20, x0
mov x0, x22
bl _platform_get_device_id
ldr w8, [x0]
str w8, [x21]
cbz x20, LBB0_2
; %bb.1:
ldr w8, [x20]
mov w9, #14
cmp w8, #0
csel w8, w9, w8, eq
str w8, [x19]
ldur q0, [x20, #4]
stur q0, [x19, #4]
LBB0_2:
mov w0, #0
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _i2c_pxa_probe_pdata
.subsections_via_symbols
| AnghaBench/linux/drivers/i2c/busses/extr_i2c-pxa.c_i2c_pxa_probe_pdata.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function iop_shutdown
_iop_shutdown: ## @iop_shutdown
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
callq _read_tmr0
movq _IOP_TMR_EN@GOTPCREL(%rip), %rcx
movl (%rcx), %edi
notl %edi
andl %eax, %edi
callq _write_tmr0
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _IOP_TMR_EN,4,2 ## @IOP_TMR_EN
.no_dead_strip _iop_shutdown
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function iop_shutdown
_iop_shutdown: ; @iop_shutdown
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
bl _read_tmr0
Lloh0:
adrp x8, _IOP_TMR_EN@GOTPAGE
Lloh1:
ldr x8, [x8, _IOP_TMR_EN@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
bic w0, w0, w8
bl _write_tmr0
mov w0, #0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _IOP_TMR_EN,4,2 ; @IOP_TMR_EN
.no_dead_strip _iop_shutdown
.subsections_via_symbols
| AnghaBench/linux/arch/arm/mach-iop32x/extr_time.c_iop_shutdown.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function cik_sdma_ring_emit_wreg
_cik_sdma_ring_emit_wreg: ## @cik_sdma_ring_emit_wreg
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r14d
movl %esi, %r15d
movq %rdi, %rbx
movq _SDMA_OPCODE_SRBM_WRITE@GOTPCREL(%rip), %rax
movl (%rax), %edi
xorl %esi, %esi
movl $61440, %edx ## imm = 0xF000
callq _SDMA_PACKET
movq %rbx, %rdi
movl %eax, %esi
callq _amdgpu_ring_write
movq %rbx, %rdi
movl %r15d, %esi
callq _amdgpu_ring_write
movq %rbx, %rdi
movl %r14d, %esi
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _amdgpu_ring_write ## TAILCALL
.cfi_endproc
## -- End function
.comm _SDMA_OPCODE_SRBM_WRITE,4,2 ## @SDMA_OPCODE_SRBM_WRITE
.no_dead_strip _cik_sdma_ring_emit_wreg
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function cik_sdma_ring_emit_wreg
_cik_sdma_ring_emit_wreg: ; @cik_sdma_ring_emit_wreg
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x2
mov x20, x1
mov x21, x0
Lloh0:
adrp x8, _SDMA_OPCODE_SRBM_WRITE@GOTPAGE
Lloh1:
ldr x8, [x8, _SDMA_OPCODE_SRBM_WRITE@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
mov w1, #0
mov w2, #61440
bl _SDMA_PACKET
mov x1, x0
mov x0, x21
bl _amdgpu_ring_write
mov x0, x21
mov x1, x20
bl _amdgpu_ring_write
mov x0, x21
mov x1, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _amdgpu_ring_write
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _SDMA_OPCODE_SRBM_WRITE,4,2 ; @SDMA_OPCODE_SRBM_WRITE
.no_dead_strip _cik_sdma_ring_emit_wreg
.subsections_via_symbols
| AnghaBench/linux/drivers/gpu/drm/amd/amdgpu/extr_cik_sdma.c_cik_sdma_ring_emit_wreg.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function intel_sdvo_dpms
_intel_sdvo_dpms: ## @intel_sdvo_dpms
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %ebx
movq %rdi, %r15
callq _intel_attached_sdvo
movq %rax, %r14
movq _DRM_MODE_DPMS_ON@GOTPCREL(%rip), %rax
cmpl %ebx, (%rax)
je LBB0_2
## %bb.1:
movq _DRM_MODE_DPMS_OFF@GOTPCREL(%rip), %rcx
movl (%rcx), %ebx
LBB0_2:
cmpl (%r15), %ebx
je LBB0_5
## %bb.3:
movl %ebx, (%r15)
movq 16(%r14), %r12
testq %r12, %r12
je LBB0_4
## %bb.6:
cmpl (%rax), %ebx
jne LBB0_7
## %bb.8:
movl $1, 8(%r14)
movq %r12, %rdi
callq _intel_crtc_update_dpms
movl (%r14), %esi
movq %r14, %rdi
callq _intel_sdvo_set_active_outputs
jmp LBB0_9
LBB0_4:
movl $0, 8(%r14)
LBB0_5:
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
LBB0_7:
movq %r14, %rdi
xorl %esi, %esi
callq _intel_sdvo_set_active_outputs
movl $0, 8(%r14)
movq %r12, %rdi
callq _intel_crtc_update_dpms
LBB0_9:
movl 4(%r15), %edi
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp _intel_modeset_check_state ## TAILCALL
.cfi_endproc
## -- End function
.comm _DRM_MODE_DPMS_ON,4,2 ## @DRM_MODE_DPMS_ON
.comm _DRM_MODE_DPMS_OFF,4,2 ## @DRM_MODE_DPMS_OFF
.no_dead_strip _intel_sdvo_dpms
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function intel_sdvo_dpms
_intel_sdvo_dpms: ; @intel_sdvo_dpms
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x21, x1
mov x19, x0
bl _intel_attached_sdvo
Lloh0:
adrp x8, _DRM_MODE_DPMS_ON@GOTPAGE
Lloh1:
ldr x8, [x8, _DRM_MODE_DPMS_ON@GOTPAGEOFF]
Lloh2:
adrp x9, _DRM_MODE_DPMS_OFF@GOTPAGE
Lloh3:
ldr x9, [x9, _DRM_MODE_DPMS_OFF@GOTPAGEOFF]
ldr w10, [x8]
Lloh4:
ldr w9, [x9]
cmp w10, w21
csel w9, w21, w9, eq
ldr w10, [x19]
cmp w9, w10
b.eq LBB0_5
; %bb.1:
mov x20, x0
str w9, [x19]
ldr x21, [x0, #16]
cbz x21, LBB0_4
; %bb.2:
ldr w8, [x8]
cmp w9, w8
b.ne LBB0_6
; %bb.3:
mov w8, #1
str w8, [x20, #8]
mov x0, x21
bl _intel_crtc_update_dpms
ldr w1, [x20]
mov x0, x20
bl _intel_sdvo_set_active_outputs
b LBB0_7
LBB0_4:
str wzr, [x20, #8]
LBB0_5:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
LBB0_6:
mov x0, x20
mov w1, #0
bl _intel_sdvo_set_active_outputs
str wzr, [x20, #8]
mov x0, x21
bl _intel_crtc_update_dpms
LBB0_7:
ldr w0, [x19, #4]
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _intel_modeset_check_state
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm _DRM_MODE_DPMS_ON,4,2 ; @DRM_MODE_DPMS_ON
.comm _DRM_MODE_DPMS_OFF,4,2 ; @DRM_MODE_DPMS_OFF
.no_dead_strip _intel_sdvo_dpms
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/gpu/drm/i915/extr_intel_sdvo.c_intel_sdvo_dpms.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function XLogCheckpointNeeded
_XLogCheckpointNeeded: ## @XLogCheckpointNeeded
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _RedoRecPtr@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq _wal_segment_size@GOTPCREL(%rip), %rax
movl (%rax), %edx
callq _XLByteToSeg
movl $1, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _RedoRecPtr,4,2 ## @RedoRecPtr
.comm _wal_segment_size,4,2 ## @wal_segment_size
.comm _CheckPointSegments,4,2 ## @CheckPointSegments
.no_dead_strip _XLogCheckpointNeeded
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function XLogCheckpointNeeded
_XLogCheckpointNeeded: ; @XLogCheckpointNeeded
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x8, _RedoRecPtr@GOTPAGE
Lloh1:
ldr x8, [x8, _RedoRecPtr@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
Lloh3:
adrp x8, _wal_segment_size@GOTPAGE
Lloh4:
ldr x8, [x8, _wal_segment_size@GOTPAGEOFF]
Lloh5:
ldr w2, [x8]
bl _XLByteToSeg
mov w0, #1
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _RedoRecPtr,4,2 ; @RedoRecPtr
.comm _wal_segment_size,4,2 ; @wal_segment_size
.comm _CheckPointSegments,4,2 ; @CheckPointSegments
.no_dead_strip _XLogCheckpointNeeded
.subsections_via_symbols
| AnghaBench/postgres/src/backend/access/transam/extr_xlog.c_XLogCheckpointNeeded.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _GIFDisposeFrame ## -- Begin function GIFDisposeFrame
.p2align 4, 0x90
_GIFDisposeFrame: ## @GIFDisposeFrame
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
jmp _ErrorGIFNotAvailable ## TAILCALL
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _GIFDisposeFrame ; -- Begin function GIFDisposeFrame
.p2align 2
_GIFDisposeFrame: ; @GIFDisposeFrame
.cfi_startproc
; %bb.0:
b _ErrorGIFNotAvailable
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/sumatrapdf/ext/libwebp/examples/extr_gifdec.c_GIFDisposeFrame.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $16, %rsp
leaq L_.str(%rip), %rdi
leaq -4(%rbp), %rsi
xorl %eax, %eax
callq _scanf
movl -4(%rbp), %eax
decl %eax
cmpl $11, %eax
ja LBB0_1
## %bb.2:
cltq
leaq l_reltable.main(%rip), %rcx
movslq (%rcx,%rax,4), %rdi
addq %rcx, %rdi
jmp LBB0_3
LBB0_1:
leaq L_.str.4(%rip), %rdi
LBB0_3:
xorl %eax, %eax
callq _printf
xorl %eax, %eax
addq $16, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%d"
L_.str.1: ## @.str.1
.asciz "winter"
L_.str.2: ## @.str.2
.asciz "spring"
L_.str.3: ## @.str.3
.asciz "summer"
L_.str.4: ## @.str.4
.asciz "fall"
.section __TEXT,__const
.p2align 2 ## @reltable.main
l_reltable.main:
.long L_.str.1-l_reltable.main
.long L_.str.1-l_reltable.main
.long L_.str.2-l_reltable.main
.long L_.str.2-l_reltable.main
.long L_.str.2-l_reltable.main
.long L_.str.3-l_reltable.main
.long L_.str.3-l_reltable.main
.long L_.str.3-l_reltable.main
.long L_.str.4-l_reltable.main
.long L_.str.4-l_reltable.main
.long L_.str.4-l_reltable.main
.long L_.str.1-l_reltable.main
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
sub x8, x29, #4
str x8, [sp]
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _scanf
ldur w8, [x29, #-4]
sub w8, w8, #1
cmp w8, #11
b.hi LBB0_2
; %bb.1:
Lloh2:
adrp x9, l_switch.table.main@PAGE
Lloh3:
add x9, x9, l_switch.table.main@PAGEOFF
ldr x0, [x9, w8, sxtw #3]
b LBB0_3
LBB0_2:
Lloh4:
adrp x0, l_.str.4@PAGE
Lloh5:
add x0, x0, l_.str.4@PAGEOFF
LBB0_3:
bl _printf
mov w0, #0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh4, Lloh5
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "%d"
l_.str.1: ; @.str.1
.asciz "winter"
l_.str.2: ; @.str.2
.asciz "spring"
l_.str.3: ; @.str.3
.asciz "summer"
l_.str.4: ; @.str.4
.asciz "fall"
.section __DATA,__const
.p2align 3 ; @switch.table.main
l_switch.table.main:
.quad l_.str.1
.quad l_.str.1
.quad l_.str.2
.quad l_.str.2
.quad l_.str.2
.quad l_.str.3
.quad l_.str.3
.quad l_.str.3
.quad l_.str.4
.quad l_.str.4
.quad l_.str.4
.quad l_.str.1
.subsections_via_symbols
| the_stack_data/451187.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/225144481.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L_str(%rip), %rdi
callq _puts
leaq L_.str.1(%rip), %rdi
xorl %eax, %eax
callq _printf
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str.1: ## @.str.1
.asciz "Welcome to C programming."
L_str: ## @str
.asciz "Hello, World!"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x0, l_str@PAGE
Lloh1:
add x0, x0, l_str@PAGEOFF
bl _puts
Lloh2:
adrp x0, l_.str.1@PAGE
Lloh3:
add x0, x0, l_.str.1@PAGEOFF
bl _printf
mov w0, #0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str.1: ; @.str.1
.asciz "Welcome to C programming."
l_str: ; @str
.asciz "Hello, World!"
.subsections_via_symbols
| the_stack_data/150140700.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function hvcs_convert
_hvcs_convert: ## @hvcs_convert
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
addq $-128, %rdi
cmpq $10, %rdi
ja LBB0_5
## %bb.1:
xorl %eax, %eax
leaq LJTI0_0(%rip), %rcx
movslq (%rcx,%rdi,4), %rdx
addq %rcx, %rdx
jmpq *%rdx
LBB0_4:
movq _EBUSY@GOTPCREL(%rip), %rcx
LBB0_6:
xorl %eax, %eax
subl (%rcx), %eax
LBB0_7:
popq %rbp
retq
LBB0_3:
movq _EIO@GOTPCREL(%rip), %rcx
jmp LBB0_6
LBB0_5:
movq _EPERM@GOTPCREL(%rip), %rcx
jmp LBB0_6
LBB0_2:
movq _EINVAL@GOTPCREL(%rip), %rcx
jmp LBB0_6
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L0_0_set_7, LBB0_7-LJTI0_0
.set L0_0_set_2, LBB0_2-LJTI0_0
.set L0_0_set_4, LBB0_4-LJTI0_0
.set L0_0_set_3, LBB0_3-LJTI0_0
.set L0_0_set_5, LBB0_5-LJTI0_0
LJTI0_0:
.long L0_0_set_7
.long L0_0_set_2
.long L0_0_set_4
.long L0_0_set_4
.long L0_0_set_4
.long L0_0_set_4
.long L0_0_set_4
.long L0_0_set_4
.long L0_0_set_3
.long L0_0_set_5
.long L0_0_set_4
.end_data_region
## -- End function
.comm _EINVAL,4,2 ## @EINVAL
.comm _EIO,4,2 ## @EIO
.comm _EBUSY,4,2 ## @EBUSY
.comm _EPERM,4,2 ## @EPERM
.no_dead_strip _hvcs_convert
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function hvcs_convert
_hvcs_convert: ; @hvcs_convert
.cfi_startproc
; %bb.0:
sub x8, x0, #128
cmp x8, #10
b.hi LBB0_6
; %bb.1:
mov w0, #0
Lloh0:
adrp x9, lJTI0_0@PAGE
Lloh1:
add x9, x9, lJTI0_0@PAGEOFF
adr x10, LBB0_2
ldrb w11, [x9, x8]
add x10, x10, x11, lsl #2
br x10
LBB0_2:
Lloh2:
adrp x8, _EBUSY@GOTPAGE
Lloh3:
ldr x8, [x8, _EBUSY@GOTPAGEOFF]
LBB0_3:
ldr w8, [x8]
neg w0, w8
LBB0_4:
ret
LBB0_5:
Lloh4:
adrp x8, _EIO@GOTPAGE
Lloh5:
ldr x8, [x8, _EIO@GOTPAGEOFF]
b LBB0_3
LBB0_6:
Lloh6:
adrp x8, _EPERM@GOTPAGE
Lloh7:
ldr x8, [x8, _EPERM@GOTPAGEOFF]
b LBB0_3
LBB0_7:
Lloh8:
adrp x8, _EINVAL@GOTPAGE
Lloh9:
ldr x8, [x8, _EINVAL@GOTPAGEOFF]
b LBB0_3
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh4, Lloh5
.loh AdrpLdrGot Lloh6, Lloh7
.loh AdrpLdrGot Lloh8, Lloh9
.cfi_endproc
.section __TEXT,__const
lJTI0_0:
.byte (LBB0_4-LBB0_2)>>2
.byte (LBB0_7-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_5-LBB0_2)>>2
.byte (LBB0_6-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
; -- End function
.comm _EINVAL,4,2 ; @EINVAL
.comm _EIO,4,2 ; @EIO
.comm _EBUSY,4,2 ; @EBUSY
.comm _EPERM,4,2 ; @EPERM
.no_dead_strip _hvcs_convert
.subsections_via_symbols
| AnghaBench/linux/arch/powerpc/platforms/pseries/extr_hvcserver.c_hvcs_convert.c | anghabench |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.