x86 stringlengths 122 9.39M | arm stringlengths 122 9.33M | file stringlengths 19 200 | source stringclasses 2 values |
|---|---|---|---|
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function opal_reverttper
_opal_reverttper: ## @opal_reverttper
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $72, %rsp
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r14d
movq %rdi, %r15
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -32(%rbp)
movq _start_SIDASP_opal_session@GOTPCREL(%rip), %rax
movl (%rax), %eax
movl %eax, -64(%rbp)
movq %rsi, -56(%rbp)
movq _revert_tper@GOTPCREL(%rip), %rax
movl (%rax), %eax
movl %eax, -48(%rbp)
movq $0, -40(%rbp)
movq _start_PSID_opal_session@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
movl %ecx, -96(%rbp)
movq %rsi, -88(%rbp)
movl %eax, -80(%rbp)
movq $0, -72(%rbp)
callq _mutex_lock
movq %r15, %rdi
callq _setup_opal_dev
testl %r14d, %r14d
leaq -64(%rbp), %rax
leaq -96(%rbp), %rbx
cmoveq %rax, %rbx
movq %rbx, %rdi
callq _ARRAY_SIZE
movq %r15, %rdi
movq %rbx, %rsi
movl %eax, %edx
callq _execute_steps
movl %eax, %r14d
movq %r15, %rdi
callq _mutex_unlock
testl %r14d, %r14d
jne LBB0_2
## %bb.1:
movq %r15, %rdi
callq _clean_opal_dev
LBB0_2:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -32(%rbp), %rax
jne LBB0_4
## %bb.3:
movl %r14d, %eax
addq $72, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
LBB0_4:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.comm _start_SIDASP_opal_session,4,2 ## @start_SIDASP_opal_session
.comm _revert_tper,4,2 ## @revert_tper
.comm _start_PSID_opal_session,4,2 ## @start_PSID_opal_session
.no_dead_strip _opal_reverttper
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function opal_reverttper
_opal_reverttper: ; @opal_reverttper
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x20, x2
mov x19, x0
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-24]
Lloh3:
adrp x8, _start_SIDASP_opal_session@GOTPAGE
Lloh4:
ldr x8, [x8, _start_SIDASP_opal_session@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
str w8, [sp, #40]
str x1, [sp, #48]
Lloh6:
adrp x8, _revert_tper@GOTPAGE
Lloh7:
ldr x8, [x8, _revert_tper@GOTPAGEOFF]
Lloh8:
ldr w8, [x8]
str w8, [sp, #56]
Lloh9:
adrp x9, _start_PSID_opal_session@GOTPAGE
Lloh10:
ldr x9, [x9, _start_PSID_opal_session@GOTPAGEOFF]
str xzr, [sp, #64]
Lloh11:
ldr w9, [x9]
str w9, [sp, #8]
str x1, [sp, #16]
str w8, [sp, #24]
str xzr, [sp, #32]
bl _mutex_lock
mov x0, x19
bl _setup_opal_dev
cmp w20, #0
add x8, sp, #8
add x9, sp, #40
csel x20, x9, x8, eq
mov x0, x20
bl _ARRAY_SIZE
mov x2, x0
mov x0, x19
mov x1, x20
bl _execute_steps
mov x20, x0
mov x0, x19
bl _mutex_unlock
cbnz w20, LBB0_2
; %bb.1:
mov x0, x19
bl _clean_opal_dev
LBB0_2:
ldur x8, [x29, #-24]
Lloh12:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh13:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh14:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_4
; %bb.3:
mov x0, x20
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
add sp, sp, #112
ret
LBB0_4:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.cfi_endproc
; -- End function
.comm _start_SIDASP_opal_session,4,2 ; @start_SIDASP_opal_session
.comm _revert_tper,4,2 ; @revert_tper
.comm _start_PSID_opal_session,4,2 ; @start_PSID_opal_session
.no_dead_strip _opal_reverttper
.subsections_via_symbols
| AnghaBench/linux/block/extr_sed-opal.c_opal_reverttper.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _netxen_init_firmware ## -- Begin function netxen_init_firmware
.p2align 4, 0x90
_netxen_init_firmware: ## @netxen_init_firmware
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
callq _netxen_receive_peg_ready
movl %eax, %r14d
testl %eax, %eax
jne LBB0_3
## %bb.1:
movq _CRB_NIC_CAPABILITIES_HOST@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _INTR_SCHEME_PERPORT@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq %rbx, %rdi
callq _NXWR32
movq _CRB_MPORT_MODE@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _MPORT_MULTI_FUNCTION_MODE@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq %rbx, %rdi
callq _NXWR32
movq _CRB_CMDPEG_STATE@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _PHAN_INITIALIZE_ACK@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq %rbx, %rdi
callq _NXWR32
movl (%rbx), %edi
callq _NX_IS_REVISION_P2
testq %rax, %rax
je LBB0_3
## %bb.2:
movq _CRB_NIC_MSI_MODE_HOST@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _MSI_MODE_MULTIFUNC@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq %rbx, %rdi
callq _NXWR32
LBB0_3:
movl %r14d, %eax
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _CRB_NIC_CAPABILITIES_HOST,4,2 ## @CRB_NIC_CAPABILITIES_HOST
.comm _INTR_SCHEME_PERPORT,4,2 ## @INTR_SCHEME_PERPORT
.comm _CRB_MPORT_MODE,4,2 ## @CRB_MPORT_MODE
.comm _MPORT_MULTI_FUNCTION_MODE,4,2 ## @MPORT_MULTI_FUNCTION_MODE
.comm _CRB_CMDPEG_STATE,4,2 ## @CRB_CMDPEG_STATE
.comm _PHAN_INITIALIZE_ACK,4,2 ## @PHAN_INITIALIZE_ACK
.comm _CRB_NIC_MSI_MODE_HOST,4,2 ## @CRB_NIC_MSI_MODE_HOST
.comm _MSI_MODE_MULTIFUNC,4,2 ## @MSI_MODE_MULTIFUNC
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _netxen_init_firmware ; -- Begin function netxen_init_firmware
.p2align 2
_netxen_init_firmware: ; @netxen_init_firmware
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x20, x0
bl _netxen_receive_peg_ready
mov x19, x0
cbnz w0, LBB0_3
; %bb.1:
Lloh0:
adrp x8, _CRB_NIC_CAPABILITIES_HOST@GOTPAGE
Lloh1:
ldr x8, [x8, _CRB_NIC_CAPABILITIES_HOST@GOTPAGEOFF]
Lloh2:
ldr w1, [x8]
Lloh3:
adrp x8, _INTR_SCHEME_PERPORT@GOTPAGE
Lloh4:
ldr x8, [x8, _INTR_SCHEME_PERPORT@GOTPAGEOFF]
Lloh5:
ldr w2, [x8]
mov x0, x20
bl _NXWR32
Lloh6:
adrp x8, _CRB_MPORT_MODE@GOTPAGE
Lloh7:
ldr x8, [x8, _CRB_MPORT_MODE@GOTPAGEOFF]
Lloh8:
ldr w1, [x8]
Lloh9:
adrp x8, _MPORT_MULTI_FUNCTION_MODE@GOTPAGE
Lloh10:
ldr x8, [x8, _MPORT_MULTI_FUNCTION_MODE@GOTPAGEOFF]
Lloh11:
ldr w2, [x8]
mov x0, x20
bl _NXWR32
Lloh12:
adrp x8, _CRB_CMDPEG_STATE@GOTPAGE
Lloh13:
ldr x8, [x8, _CRB_CMDPEG_STATE@GOTPAGEOFF]
Lloh14:
ldr w1, [x8]
Lloh15:
adrp x8, _PHAN_INITIALIZE_ACK@GOTPAGE
Lloh16:
ldr x8, [x8, _PHAN_INITIALIZE_ACK@GOTPAGEOFF]
Lloh17:
ldr w2, [x8]
mov x0, x20
bl _NXWR32
ldr w0, [x20]
bl _NX_IS_REVISION_P2
cbz x0, LBB0_3
; %bb.2:
Lloh18:
adrp x8, _CRB_NIC_MSI_MODE_HOST@GOTPAGE
Lloh19:
ldr x8, [x8, _CRB_NIC_MSI_MODE_HOST@GOTPAGEOFF]
Lloh20:
ldr w1, [x8]
Lloh21:
adrp x8, _MSI_MODE_MULTIFUNC@GOTPAGE
Lloh22:
ldr x8, [x8, _MSI_MODE_MULTIFUNC@GOTPAGEOFF]
Lloh23:
ldr w2, [x8]
mov x0, x20
bl _NXWR32
LBB0_3:
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh15, Lloh16, Lloh17
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh21, Lloh22, Lloh23
.loh AdrpLdrGotLdr Lloh18, Lloh19, Lloh20
.cfi_endproc
; -- End function
.comm _CRB_NIC_CAPABILITIES_HOST,4,2 ; @CRB_NIC_CAPABILITIES_HOST
.comm _INTR_SCHEME_PERPORT,4,2 ; @INTR_SCHEME_PERPORT
.comm _CRB_MPORT_MODE,4,2 ; @CRB_MPORT_MODE
.comm _MPORT_MULTI_FUNCTION_MODE,4,2 ; @MPORT_MULTI_FUNCTION_MODE
.comm _CRB_CMDPEG_STATE,4,2 ; @CRB_CMDPEG_STATE
.comm _PHAN_INITIALIZE_ACK,4,2 ; @PHAN_INITIALIZE_ACK
.comm _CRB_NIC_MSI_MODE_HOST,4,2 ; @CRB_NIC_MSI_MODE_HOST
.comm _MSI_MODE_MULTIFUNC,4,2 ; @MSI_MODE_MULTIFUNC
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/net/netxen/extr_netxen_nic_init.c_netxen_init_firmware.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $72, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl $7, %eax
movq %rax, -48(%rbp) ## 8-byte Spill
movl $21, %r15d
movl $189826, %ecx ## imm = 0x2E582
movl $4, %r14d
xorl %eax, %eax
.p2align 4, 0x90
LBB0_1: ## =>This Loop Header: Depth=1
## Child Loop BB0_2 Depth 2
movq %rax, -104(%rbp) ## 8-byte Spill
movq -48(%rbp), %rdx ## 8-byte Reload
movl %edx, %eax
shrl $9, %eax
movl %eax, -80(%rbp) ## 4-byte Spill
movl %edx, %eax
shll $9, %eax
movl %eax, -76(%rbp) ## 4-byte Spill
movl %edx, %eax
shrl $17, %eax
movl %eax, -72(%rbp) ## 4-byte Spill
movl %edx, %eax
shll $17, %eax
movl %eax, -68(%rbp) ## 4-byte Spill
movl $-27115, %r12d ## imm = 0x9615
movl %r14d, -60(%rbp) ## 4-byte Spill
movl %r14d, -52(%rbp) ## 4-byte Spill
movl %r15d, -64(%rbp) ## 4-byte Spill
movl %r15d, -56(%rbp) ## 4-byte Spill
movl %ecx, -84(%rbp) ## 4-byte Spill
.p2align 4, 0x90
LBB0_2: ## Parent Loop BB0_1 Depth=1
## => This Inner Loop Header: Depth=2
leal 27118(%r12), %ebx
movq -48(%rbp), %r14 ## 8-byte Reload
movl %r14d, %edi
movq %r14, -48(%rbp) ## 8-byte Spill
callq _av_int2i
movl %eax, %r13d
movl %ebx, %edi
movl %ebx, -92(%rbp) ## 4-byte Spill
callq _av_int2i
movl %eax, %r15d
movl %r13d, %edi
callq _av_i2int
xorl %edi, %edi
cmpl %r14d, %eax
sete %dil
callq _av_assert0
movl %r15d, %edi
callq _av_i2int
xorl %edi, %edi
cmpl %eax, %ebx
sete %dil
callq _av_assert0
movl %r13d, %edi
movl %r15d, %esi
callq _av_add_i
movl %eax, %edi
callq _av_i2int
movq -104(%rbp), %rcx ## 8-byte Reload
addl %r12d, %ecx
addl $27125, %ecx ## imm = 0x69F5
xorl %edi, %edi
cmpl %eax, %ecx
sete %dil
callq _av_assert0
movl %r13d, %edi
movl %r15d, %esi
movl %r15d, -88(%rbp) ## 4-byte Spill
callq _av_sub_i
movl %eax, %edi
callq _av_i2int
xorl %edi, %edi
movl -52(%rbp), %r12d ## 4-byte Reload
cmpl %eax, %r12d
sete %dil
callq _av_assert0
movl %r13d, %edi
movl %r15d, %esi
callq _av_mul_i
movl %eax, %edi
callq _av_i2int
xorl %edi, %edi
movl -56(%rbp), %r14d ## 4-byte Reload
cmpl %eax, %r14d
sete %dil
callq _av_assert0
movl %r13d, %edi
movl $9, %esi
callq _av_shr_i
movl %eax, %edi
callq _av_i2int
xorl %edi, %edi
cmpl -80(%rbp), %eax ## 4-byte Folded Reload
sete %dil
callq _av_assert0
movl %r13d, %edi
movl $-9, %esi
callq _av_shr_i
movl %eax, %edi
callq _av_i2int
xorl %edi, %edi
cmpl -76(%rbp), %eax ## 4-byte Folded Reload
sete %dil
callq _av_assert0
movl %r13d, %edi
movl $17, %esi
callq _av_shr_i
movl %eax, %edi
callq _av_i2int
xorl %edi, %edi
cmpl -72(%rbp), %eax ## 4-byte Folded Reload
sete %dil
callq _av_assert0
movl %r13d, %edi
movl $-17, %esi
callq _av_shr_i
movl %eax, %edi
callq _av_i2int
xorl %edi, %edi
cmpl -68(%rbp), %eax ## 4-byte Folded Reload
sete %dil
callq _av_assert0
movl %r13d, %edi
callq _av_log2_i
movq %rax, %rbx
movq -48(%rbp), %r15 ## 8-byte Reload
movl %r15d, %edi
callq _av_log2
xorl %edi, %edi
cmpq %rax, %rbx
sete %dil
callq _av_assert0
movl %r13d, %edi
movl -88(%rbp), %esi ## 4-byte Reload
callq _av_div_i
movl %eax, %edi
callq _av_i2int
movl %eax, %ecx
movl %r15d, %eax
xorl %edx, %edx
movl -92(%rbp), %ebx ## 4-byte Reload
divl %ebx
xorl %edi, %edi
cmpl %eax, %ecx
sete %dil
callq _av_assert0
movl -84(%rbp), %ecx ## 4-byte Reload
addl %ecx, %r14d
movl %r14d, -56(%rbp) ## 4-byte Spill
addl $-27118, %r12d ## imm = 0x9612
movl %r12d, -52(%rbp) ## 4-byte Spill
movl %ebx, %eax
movl %ebx, %r12d
cmpl $16750098, %ebx ## imm = 0xFF9612
jb LBB0_2
## %bb.3: ## in Loop: Header=BB0_1 Depth=1
movq -48(%rbp), %rdx ## 8-byte Reload
leal 13215(%rdx), %eax
movl -64(%rbp), %r15d ## 4-byte Reload
addl $39645, %r15d ## imm = 0x9ADD
addl $358364370, %ecx ## imm = 0x155C34D2
movl -60(%rbp), %r14d ## 4-byte Reload
addl $13215, %r14d ## imm = 0x339F
movq -104(%rbp), %rsi ## 8-byte Reload
addl $13215, %esi ## imm = 0x339F
cmpl $16764001, %edx ## imm = 0xFFCC61
## kill: def $eax killed $eax def $rax
movq %rax, -48(%rbp) ## 8-byte Spill
movq %rsi, %rax
jb LBB0_1
## %bb.4:
xorl %eax, %eax
addq $72, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #128
.cfi_def_cfa_offset 128
stp x28, x27, [sp, #32] ; 16-byte Folded Spill
stp x26, x25, [sp, #48] ; 16-byte Folded Spill
stp x24, x23, [sp, #64] ; 16-byte Folded Spill
stp x22, x21, [sp, #80] ; 16-byte Folded Spill
stp x20, x19, [sp, #96] ; 16-byte Folded Spill
stp x29, x30, [sp, #112] ; 16-byte Folded Spill
add x29, sp, #112
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov w19, #7
mov w24, #21
mov w25, #58754
movk w25, #2, lsl #16
mov w27, #4
mov w26, #10
LBB0_1: ; =>This Loop Header: Depth=1
; Child Loop BB0_2 Depth 2
mov w28, #0
lsr w9, w19, #9
lsl w8, w19, #9
stp w8, w9, [sp, #24] ; 8-byte Folded Spill
lsr w9, w19, #17
stp w27, w24, [sp, #8] ; 8-byte Folded Spill
lsl w8, w19, #17
stp w8, w9, [sp, #16] ; 8-byte Folded Spill
LBB0_2: ; Parent Loop BB0_1 Depth=1
; => This Inner Loop Header: Depth=2
add w20, w28, #3
mov x0, x19
bl _av_int2i
mov x21, x0
mov x0, x20
bl _av_int2i
mov x22, x0
mov x0, x21
bl _av_i2int
cmp w0, w19
cset w0, eq
bl _av_assert0
mov x0, x22
bl _av_i2int
cmp w20, w0
cset w0, eq
bl _av_assert0
mov x0, x21
mov x1, x22
bl _av_add_i
bl _av_i2int
add w8, w26, w28
cmp w8, w0
cset w0, eq
bl _av_assert0
mov x0, x21
mov x1, x22
bl _av_sub_i
bl _av_i2int
cmp w27, w0
cset w0, eq
bl _av_assert0
mov x0, x21
mov x1, x22
bl _av_mul_i
bl _av_i2int
cmp w24, w0
cset w0, eq
bl _av_assert0
mov x0, x21
mov w1, #9
bl _av_shr_i
bl _av_i2int
ldr w8, [sp, #28] ; 4-byte Folded Reload
cmp w0, w8
cset w0, eq
bl _av_assert0
mov x0, x21
mov w1, #-9
bl _av_shr_i
bl _av_i2int
ldr w8, [sp, #24] ; 4-byte Folded Reload
cmp w0, w8
cset w0, eq
bl _av_assert0
mov x0, x21
mov w1, #17
bl _av_shr_i
bl _av_i2int
ldr w8, [sp, #20] ; 4-byte Folded Reload
cmp w0, w8
cset w0, eq
bl _av_assert0
mov x0, x21
mov w1, #-17
bl _av_shr_i
bl _av_i2int
ldr w8, [sp, #16] ; 4-byte Folded Reload
cmp w0, w8
cset w0, eq
bl _av_assert0
mov x0, x21
bl _av_log2_i
mov x23, x0
mov x0, x19
bl _av_log2
cmp x23, x0
cset w0, eq
bl _av_assert0
mov x0, x21
mov x1, x22
bl _av_div_i
bl _av_i2int
udiv w8, w19, w20
cmp w0, w8
cset w0, eq
bl _av_assert0
mov w9, #-27118
mov w8, #27118
add w28, w28, w8
add w24, w24, w25
add w27, w27, w9
mov w8, #38418
movk w8, #255, lsl #16
cmp w20, w8
b.lo LBB0_2
; %bb.3: ; in Loop: Header=BB0_1 Depth=1
mov w9, #13215
add w8, w19, w9
ldp w27, w24, [sp, #8] ; 8-byte Folded Reload
mov w10, #39645
add w24, w24, w10
mov w10, #13522
movk w10, #5468, lsl #16
add w25, w25, w10
add w27, w27, w9
add w26, w26, w9
mov w9, #52321
movk w9, #255, lsl #16
cmp w19, w9
mov x19, x8
b.lo LBB0_1
; %bb.4:
mov w0, #0
ldp x29, x30, [sp, #112] ; 16-byte Folded Reload
ldp x20, x19, [sp, #96] ; 16-byte Folded Reload
ldp x22, x21, [sp, #80] ; 16-byte Folded Reload
ldp x24, x23, [sp, #64] ; 16-byte Folded Reload
ldp x26, x25, [sp, #48] ; 16-byte Folded Reload
ldp x28, x27, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #128
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/FFmpeg/libavutil/tests/extr_integer.c_main.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function sbp2scsi_slave_alloc
_sbp2scsi_slave_alloc: ## @sbp2scsi_slave_alloc
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
cmpq $0, (%rdi)
jne LBB0_3
## %bb.1:
movq %rdi, %rbx
movq 40(%rdi), %rax
movq (%rax), %rax
movq (%rax), %r14
movq 8(%rdi), %rax
movq 16(%r14), %rcx
cmpq (%rcx), %rax
jne LBB0_3
## %bb.2:
cmpq $0, 16(%rbx)
je LBB0_4
LBB0_3:
movq _ENODEV@GOTPCREL(%rip), %rcx
xorl %eax, %eax
subl (%rcx), %eax
LBB0_6:
popq %rbx
popq %r14
popq %rbp
retq
LBB0_4:
movq %rbx, 8(%r14)
movl $1, 24(%rbx)
movl 32(%rbx), %edi
movl $3, %esi
callq _blk_queue_update_dma_alignment
movq _SBP2_WORKAROUND_INQUIRY_36@GOTPCREL(%rip), %rax
movl (%rax), %ecx
xorl %eax, %eax
testl %ecx, (%r14)
je LBB0_6
## %bb.5:
movl $36, 28(%rbx)
jmp LBB0_6
.cfi_endproc
## -- End function
.comm _ENODEV,4,2 ## @ENODEV
.comm _SBP2_WORKAROUND_INQUIRY_36,4,2 ## @SBP2_WORKAROUND_INQUIRY_36
.no_dead_strip _sbp2scsi_slave_alloc
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function sbp2scsi_slave_alloc
_sbp2scsi_slave_alloc: ; @sbp2scsi_slave_alloc
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
ldr x8, [x0]
cbnz x8, LBB0_3
; %bb.1:
mov x19, x0
ldr x8, [x0, #40]
ldr x8, [x8]
ldr x20, [x8]
ldr x8, [x0, #8]
ldr x9, [x20, #16]
ldr x9, [x9]
cmp x8, x9
b.ne LBB0_3
; %bb.2:
ldr x8, [x19, #16]
cbz x8, LBB0_5
LBB0_3:
Lloh0:
adrp x8, _ENODEV@GOTPAGE
Lloh1:
ldr x8, [x8, _ENODEV@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
neg w0, w8
LBB0_4:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB0_5:
str x19, [x20, #8]
mov w8, #1
str w8, [x19, #24]
ldr w0, [x19, #32]
mov w1, #3
bl _blk_queue_update_dma_alignment
ldr w8, [x20]
Lloh3:
adrp x9, _SBP2_WORKAROUND_INQUIRY_36@GOTPAGE
Lloh4:
ldr x9, [x9, _SBP2_WORKAROUND_INQUIRY_36@GOTPAGEOFF]
Lloh5:
ldr w9, [x9]
tst w9, w8
b.eq LBB0_7
; %bb.6:
mov w0, #0
mov w8, #36
str w8, [x19, #28]
b LBB0_4
LBB0_7:
mov w0, #0
b LBB0_4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _ENODEV,4,2 ; @ENODEV
.comm _SBP2_WORKAROUND_INQUIRY_36,4,2 ; @SBP2_WORKAROUND_INQUIRY_36
.no_dead_strip _sbp2scsi_slave_alloc
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/ieee1394/extr_sbp2.c_sbp2scsi_slave_alloc.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/23574405.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function POP3Transport_CallbackRecvPASSResp
_POP3Transport_CallbackRecvPASSResp: ## @POP3Transport_CallbackRecvPASSResp
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
leaq L_.str(%rip), %rdi
callq _TRACE
movq _POP3Transport_CallbackProcessPASSResp@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %rbx, %rdi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _InternetTransport_ReadLine ## TAILCALL
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "\n"
.comm _POP3Transport_CallbackProcessPASSResp,4,2 ## @POP3Transport_CallbackProcessPASSResp
.no_dead_strip _POP3Transport_CallbackRecvPASSResp
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function POP3Transport_CallbackRecvPASSResp
_POP3Transport_CallbackRecvPASSResp: ; @POP3Transport_CallbackRecvPASSResp
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _TRACE
Lloh2:
adrp x8, _POP3Transport_CallbackProcessPASSResp@GOTPAGE
Lloh3:
ldr x8, [x8, _POP3Transport_CallbackProcessPASSResp@GOTPAGEOFF]
Lloh4:
ldr w1, [x8]
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _InternetTransport_ReadLine
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "\n"
.comm _POP3Transport_CallbackProcessPASSResp,4,2 ; @POP3Transport_CallbackProcessPASSResp
.no_dead_strip _POP3Transport_CallbackRecvPASSResp
.subsections_via_symbols
| AnghaBench/reactos/dll/win32/inetcomm/extr_pop3transport.c_POP3Transport_CallbackRecvPASSResp.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _spa_l2cache_add ## -- Begin function spa_l2cache_add
.p2align 4, 0x90
_spa_l2cache_add: ## @spa_l2cache_add
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
movq _spa_l2cache_lock@GOTPCREL(%rip), %r14
movq %r14, %rdi
callq _mutex_enter
xorl %edi, %edi
cmpq $0, (%rbx)
sete %dil
callq _ASSERT
movq _spa_l2cache_avl@GOTPCREL(%rip), %rsi
movq %rbx, %rdi
callq _spa_aux_add
movq _B_TRUE@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, (%rbx)
movq %r14, %rdi
popq %rbx
popq %r14
popq %rbp
jmp _mutex_exit ## TAILCALL
.cfi_endproc
## -- End function
.comm _spa_l2cache_lock,4,2 ## @spa_l2cache_lock
.comm _spa_l2cache_avl,4,2 ## @spa_l2cache_avl
.comm _B_TRUE,8,3 ## @B_TRUE
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _spa_l2cache_add ; -- Begin function spa_l2cache_add
.p2align 2
_spa_l2cache_add: ; @spa_l2cache_add
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
Lloh0:
adrp x20, _spa_l2cache_lock@GOTPAGE
Lloh1:
ldr x20, [x20, _spa_l2cache_lock@GOTPAGEOFF]
mov x0, x20
bl _mutex_enter
ldr x8, [x19]
cmp x8, #0
cset w0, eq
bl _ASSERT
Lloh2:
adrp x1, _spa_l2cache_avl@GOTPAGE
Lloh3:
ldr x1, [x1, _spa_l2cache_avl@GOTPAGEOFF]
mov x0, x19
bl _spa_aux_add
Lloh4:
adrp x8, _B_TRUE@GOTPAGE
Lloh5:
ldr x8, [x8, _B_TRUE@GOTPAGEOFF]
Lloh6:
ldr x8, [x8]
str x8, [x19]
mov x0, x20
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _mutex_exit
.loh AdrpLdrGotLdr Lloh4, Lloh5, Lloh6
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm _spa_l2cache_lock,4,2 ; @spa_l2cache_lock
.comm _spa_l2cache_avl,4,2 ; @spa_l2cache_avl
.comm _B_TRUE,8,3 ; @B_TRUE
.subsections_via_symbols
| AnghaBench/freebsd/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/extr_spa_misc.c_spa_l2cache_add.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function create_custom_action_table
_create_custom_action_table: ## @create_custom_action_table
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
leaq L_.str(%rip), %rsi
callq _run_query
movq %rax, %rbx
movq _ERROR_SUCCESS@GOTPCREL(%rip), %rax
xorl %edi, %edi
cmpq (%rax), %rbx
sete %dil
leaq L_.str.1(%rip), %rsi
movq %rbx, %rdx
callq _ok
movq %rbx, %rax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "CREATE TABLE `CustomAction` (`Action` CHAR(72) NOT NULL, `Type` SHORT NOT NULL, `Source` CHAR(75), `Target` CHAR(255) PRIMARY KEY `Action`)"
.comm _ERROR_SUCCESS,8,3 ## @ERROR_SUCCESS
L_.str.1: ## @.str.1
.asciz "Failed to create CustomAction table: %u\n"
.no_dead_strip _create_custom_action_table
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function create_custom_action_table
_create_custom_action_table: ; @create_custom_action_table
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x1, l_.str@PAGE
Lloh1:
add x1, x1, l_.str@PAGEOFF
bl _run_query
mov x19, x0
Lloh2:
adrp x8, _ERROR_SUCCESS@GOTPAGE
Lloh3:
ldr x8, [x8, _ERROR_SUCCESS@GOTPAGEOFF]
Lloh4:
ldr x8, [x8]
cmp x0, x8
cset w0, eq
Lloh5:
adrp x1, l_.str.1@PAGE
Lloh6:
add x1, x1, l_.str.1@PAGEOFF
mov x2, x19
bl _ok
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh5, Lloh6
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "CREATE TABLE `CustomAction` (`Action` CHAR(72) NOT NULL, `Type` SHORT NOT NULL, `Source` CHAR(75), `Target` CHAR(255) PRIMARY KEY `Action`)"
.comm _ERROR_SUCCESS,8,3 ; @ERROR_SUCCESS
l_.str.1: ; @.str.1
.asciz "Failed to create CustomAction table: %u\n"
.no_dead_strip _create_custom_action_table
.subsections_via_symbols
| AnghaBench/reactos/modules/rostests/winetests/msi/extr_package.c_create_custom_action_table.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function be_async_grp5_evt_process
_be_async_grp5_evt_process: ## @be_async_grp5_evt_process
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _ASYNC_TRAILER_EVENT_TYPE_SHIFT@GOTPCREL(%rip), %rax
movb (%rax), %cl
sarl %cl, %esi
movq _ASYNC_TRAILER_EVENT_TYPE_MASK@GOTPCREL(%rip), %rax
andl (%rax), %esi
cmpl $128, %esi
je LBB0_4
## %bb.1:
cmpl $129, %esi
je LBB0_5
## %bb.2:
cmpl $130, %esi
jne LBB0_6
## %bb.3:
movq %rdx, %rsi
popq %rbp
jmp _be_async_grp5_cos_priority_process ## TAILCALL
LBB0_4:
movq %rdx, %rsi
popq %rbp
jmp _be_async_grp5_qos_speed_process ## TAILCALL
LBB0_5:
movq %rdx, %rsi
popq %rbp
jmp _be_async_grp5_pvid_state_process ## TAILCALL
LBB0_6:
movq (%rdi), %rdi
leaq L_.str(%rip), %rsi
popq %rbp
jmp _dev_warn ## TAILCALL
.cfi_endproc
## -- End function
.comm _ASYNC_TRAILER_EVENT_TYPE_SHIFT,4,2 ## @ASYNC_TRAILER_EVENT_TYPE_SHIFT
.comm _ASYNC_TRAILER_EVENT_TYPE_MASK,4,2 ## @ASYNC_TRAILER_EVENT_TYPE_MASK
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Unknown grp5 event!\n"
.no_dead_strip _be_async_grp5_evt_process
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function be_async_grp5_evt_process
_be_async_grp5_evt_process: ; @be_async_grp5_evt_process
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _ASYNC_TRAILER_EVENT_TYPE_SHIFT@GOTPAGE
Lloh1:
ldr x8, [x8, _ASYNC_TRAILER_EVENT_TYPE_SHIFT@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
asr w8, w1, w8
Lloh3:
adrp x9, _ASYNC_TRAILER_EVENT_TYPE_MASK@GOTPAGE
Lloh4:
ldr x9, [x9, _ASYNC_TRAILER_EVENT_TYPE_MASK@GOTPAGEOFF]
Lloh5:
ldr w9, [x9]
and w8, w8, w9
cmp w8, #128
b.eq LBB0_4
; %bb.1:
cmp w8, #129
b.eq LBB0_5
; %bb.2:
cmp w8, #130
b.ne LBB0_6
; %bb.3:
mov x1, x2
b _be_async_grp5_cos_priority_process
LBB0_4:
mov x1, x2
b _be_async_grp5_qos_speed_process
LBB0_5:
mov x1, x2
b _be_async_grp5_pvid_state_process
LBB0_6:
ldr x0, [x0]
Lloh6:
adrp x1, l_.str@PAGE
Lloh7:
add x1, x1, l_.str@PAGEOFF
b _dev_warn
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh6, Lloh7
.cfi_endproc
; -- End function
.comm _ASYNC_TRAILER_EVENT_TYPE_SHIFT,4,2 ; @ASYNC_TRAILER_EVENT_TYPE_SHIFT
.comm _ASYNC_TRAILER_EVENT_TYPE_MASK,4,2 ; @ASYNC_TRAILER_EVENT_TYPE_MASK
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Unknown grp5 event!\n"
.no_dead_strip _be_async_grp5_evt_process
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/net/benet/extr_be_cmds.c_be_async_grp5_evt_process.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function get_rec_volume_linear
_get_rec_volume_linear: ## @get_rec_volume_linear
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movl (%rdi), %eax
addl $-128, %eax
cmpl $4, %eax
ja LBB0_2
## %bb.1:
movslq %eax, %rbx
leaq l_switch.table.get_rec_volume_linear(%rip), %r14
callq _get_rec_volume
imull $100, %eax, %eax
cltd
idivl (%r14,%rbx,4)
cmpl $100, %eax
movl $100, %ecx
cmovgel %ecx, %eax
jmp LBB0_3
LBB0_2:
movl $-1, %eax
LBB0_3:
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _get_rec_volume_linear
.section __TEXT,__const
.p2align 2 ## @switch.table.get_rec_volume_linear
l_switch.table.get_rec_volume_linear:
.long 256 ## 0x100
.long 1216 ## 0x4c0
.long 1088 ## 0x440
.long 1024 ## 0x400
.long 384 ## 0x180
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function get_rec_volume_linear
_get_rec_volume_linear: ; @get_rec_volume_linear
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
ldr w8, [x0]
sub w8, w8, #128
cmp w8, #4
b.hi LBB0_2
; %bb.1:
Lloh0:
adrp x9, l_switch.table.get_rec_volume_linear@PAGE
Lloh1:
add x9, x9, l_switch.table.get_rec_volume_linear@PAGEOFF
ldr w19, [x9, w8, sxtw #2]
bl _get_rec_volume
mov w8, #100
mul w9, w0, w8
sdiv w9, w9, w19
cmp w9, #100
csel w0, w9, w8, lt
b LBB0_3
LBB0_2:
mov w0, #-1
LBB0_3:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.no_dead_strip _get_rec_volume_linear
.section __TEXT,__const
.p2align 2 ; @switch.table.get_rec_volume_linear
l_switch.table.get_rec_volume_linear:
.long 256 ; 0x100
.long 1216 ; 0x4c0
.long 1088 ; 0x440
.long 1024 ; 0x400
.long 384 ; 0x180
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/telephony/extr_ixj.c_get_rec_volume_linear.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _spa_feature_create_zap_objects ## -- Begin function spa_feature_create_zap_objects
.p2align 4, 0x90
_spa_feature_create_zap_objects: ## @spa_feature_create_zap_objects
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r14
movq %rdi, %rbx
callq _spa_get_dsl
movl %eax, %edi
callq _dsl_pool_sync_context
movl $1, %edi
testq %rax, %rax
jne LBB0_3
## %bb.1:
xorl %edi, %edi
cmpl $0, 32(%rbx)
jne LBB0_3
## %bb.2:
movq (%r14), %rax
movq _TXG_INITIAL@GOTPCREL(%rip), %rcx
xorl %edi, %edi
cmpq (%rcx), %rax
sete %dil
LBB0_3:
callq _ASSERT
movl (%rbx), %edi
movq _DMU_OTN_ZAP_METADATA@GOTPCREL(%rip), %r15
movl (%r15), %esi
movq _DMU_POOL_DIRECTORY_OBJECT@GOTPCREL(%rip), %r12
movl (%r12), %edx
movq _DMU_POOL_FEATURES_FOR_READ@GOTPCREL(%rip), %rax
movl (%rax), %ecx
movq %r14, %r8
callq _zap_create_link
movq %rax, 24(%rbx)
movl (%rbx), %edi
movl (%r15), %esi
movl (%r12), %edx
movq _DMU_POOL_FEATURES_FOR_WRITE@GOTPCREL(%rip), %rax
movl (%rax), %ecx
movq %r14, %r8
callq _zap_create_link
movq %rax, 16(%rbx)
movl (%rbx), %edi
movl (%r15), %esi
movl (%r12), %edx
movq _DMU_POOL_FEATURE_DESCRIPTIONS@GOTPCREL(%rip), %rax
movl (%rax), %ecx
movq %r14, %r8
callq _zap_create_link
movq %rax, 8(%rbx)
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _TXG_INITIAL,8,3 ## @TXG_INITIAL
.comm _DMU_OTN_ZAP_METADATA,4,2 ## @DMU_OTN_ZAP_METADATA
.comm _DMU_POOL_DIRECTORY_OBJECT,4,2 ## @DMU_POOL_DIRECTORY_OBJECT
.comm _DMU_POOL_FEATURES_FOR_READ,4,2 ## @DMU_POOL_FEATURES_FOR_READ
.comm _DMU_POOL_FEATURES_FOR_WRITE,4,2 ## @DMU_POOL_FEATURES_FOR_WRITE
.comm _DMU_POOL_FEATURE_DESCRIPTIONS,4,2 ## @DMU_POOL_FEATURE_DESCRIPTIONS
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _spa_feature_create_zap_objects ; -- Begin function spa_feature_create_zap_objects
.p2align 2
_spa_feature_create_zap_objects: ; @spa_feature_create_zap_objects
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x1
mov x19, x0
bl _spa_get_dsl
bl _dsl_pool_sync_context
cbz x0, LBB0_2
; %bb.1:
mov w0, #1
b LBB0_5
LBB0_2:
ldr w8, [x19, #32]
cbz w8, LBB0_4
; %bb.3:
mov w0, #0
b LBB0_5
LBB0_4:
ldr x8, [x20]
Lloh0:
adrp x9, _TXG_INITIAL@GOTPAGE
Lloh1:
ldr x9, [x9, _TXG_INITIAL@GOTPAGEOFF]
Lloh2:
ldr x9, [x9]
cmp x8, x9
cset w0, eq
LBB0_5:
bl _ASSERT
ldr w0, [x19]
Lloh3:
adrp x21, _DMU_OTN_ZAP_METADATA@GOTPAGE
Lloh4:
ldr x21, [x21, _DMU_OTN_ZAP_METADATA@GOTPAGEOFF]
ldr w1, [x21]
Lloh5:
adrp x22, _DMU_POOL_DIRECTORY_OBJECT@GOTPAGE
Lloh6:
ldr x22, [x22, _DMU_POOL_DIRECTORY_OBJECT@GOTPAGEOFF]
ldr w2, [x22]
Lloh7:
adrp x8, _DMU_POOL_FEATURES_FOR_READ@GOTPAGE
Lloh8:
ldr x8, [x8, _DMU_POOL_FEATURES_FOR_READ@GOTPAGEOFF]
Lloh9:
ldr w3, [x8]
mov x4, x20
bl _zap_create_link
str x0, [x19, #24]
ldr w0, [x19]
ldr w1, [x21]
ldr w2, [x22]
Lloh10:
adrp x8, _DMU_POOL_FEATURES_FOR_WRITE@GOTPAGE
Lloh11:
ldr x8, [x8, _DMU_POOL_FEATURES_FOR_WRITE@GOTPAGEOFF]
Lloh12:
ldr w3, [x8]
mov x4, x20
bl _zap_create_link
str x0, [x19, #16]
ldr w0, [x19]
ldr w1, [x21]
ldr w2, [x22]
Lloh13:
adrp x8, _DMU_POOL_FEATURE_DESCRIPTIONS@GOTPAGE
Lloh14:
ldr x8, [x8, _DMU_POOL_FEATURE_DESCRIPTIONS@GOTPAGEOFF]
Lloh15:
ldr w3, [x8]
mov x4, x20
bl _zap_create_link
str x0, [x19, #8]
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh13, Lloh14, Lloh15
.loh AdrpLdrGotLdr Lloh10, Lloh11, Lloh12
.loh AdrpLdrGotLdr Lloh7, Lloh8, Lloh9
.loh AdrpLdrGot Lloh5, Lloh6
.loh AdrpLdrGot Lloh3, Lloh4
.cfi_endproc
; -- End function
.comm _TXG_INITIAL,8,3 ; @TXG_INITIAL
.comm _DMU_OTN_ZAP_METADATA,4,2 ; @DMU_OTN_ZAP_METADATA
.comm _DMU_POOL_DIRECTORY_OBJECT,4,2 ; @DMU_POOL_DIRECTORY_OBJECT
.comm _DMU_POOL_FEATURES_FOR_READ,4,2 ; @DMU_POOL_FEATURES_FOR_READ
.comm _DMU_POOL_FEATURES_FOR_WRITE,4,2 ; @DMU_POOL_FEATURES_FOR_WRITE
.comm _DMU_POOL_FEATURE_DESCRIPTIONS,4,2 ; @DMU_POOL_FEATURE_DESCRIPTIONS
.subsections_via_symbols
| AnghaBench/freebsd/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/extr_zfeature.c_spa_feature_create_zap_objects.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function bnx2x_write_dmae_phys_len
_bnx2x_write_dmae_phys_len: ## @bnx2x_write_dmae_phys_len
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %ecx, %ebx
## kill: def $edx killed $edx def $rdx
movq %rdx, -56(%rbp) ## 8-byte Spill
movq %rsi, %r14
movq %rdi, -48(%rbp) ## 8-byte Spill
callq _DMAE_LEN32_WR_MAX
xorl %r15d, %r15d
cmpl %ebx, %eax
movq %r14, -64(%rbp) ## 8-byte Spill
jge LBB0_3
## %bb.1:
movl %eax, %r13d
leal (,%r13,4), %eax
movslq %eax, %r12
xorl %r15d, %r15d
.p2align 4, 0x90
LBB0_2: ## =>This Inner Loop Header: Depth=1
movq -56(%rbp), %rax ## 8-byte Reload
leal (%rax,%r15), %edx
movq -48(%rbp), %rdi ## 8-byte Reload
movq %r14, %rsi
movl %r13d, %ecx
callq _bnx2x_write_dmae
subl %r13d, %ebx
addl %r12d, %r15d
addq %r12, %r14
cmpl %r13d, %ebx
jg LBB0_2
LBB0_3:
movslq %r15d, %rdx
movq -64(%rbp), %rsi ## 8-byte Reload
addq %rdx, %rsi
addl -56(%rbp), %edx ## 4-byte Folded Reload
movq -48(%rbp), %rdi ## 8-byte Reload
## kill: def $edx killed $edx killed $rdx
movl %ebx, %ecx
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _bnx2x_write_dmae ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _bnx2x_write_dmae_phys_len
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function bnx2x_write_dmae_phys_len
_bnx2x_write_dmae_phys_len: ; @bnx2x_write_dmae_phys_len
.cfi_startproc
; %bb.0:
stp x26, x25, [sp, #-80]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x19, x3
mov x20, x2
mov x22, x1
mov x21, x0
bl _DMAE_LEN32_WR_MAX
cmp w0, w19
b.ge LBB0_3
; %bb.1:
mov x23, x0
mov w25, #0
lsl w8, w0, #2
sxtw x26, w8
mov x24, x22
LBB0_2: ; =>This Inner Loop Header: Depth=1
add w2, w20, w25
mov x0, x21
mov x1, x24
mov x3, x23
bl _bnx2x_write_dmae
add w25, w25, w26
add x24, x24, x26
sub w19, w19, w23
cmp w19, w23
b.gt LBB0_2
b LBB0_4
LBB0_3:
mov w25, #0
LBB0_4:
add x1, x22, w25, sxtw
add w2, w25, w20
mov x0, x21
mov x3, x19
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
ldp x26, x25, [sp], #80 ; 16-byte Folded Reload
b _bnx2x_write_dmae
.cfi_endproc
; -- End function
.no_dead_strip _bnx2x_write_dmae_phys_len
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/broadcom/bnx2x/extr_bnx2x_main.c_bnx2x_write_dmae_phys_len.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| the_stack_data/108246.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ahash_register_instance ## -- Begin function ahash_register_instance
.p2align 4, 0x90
_ahash_register_instance: ## @ahash_register_instance
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %rbx
movq %rdi, %r14
movq %rsi, %rdi
callq _ahash_prepare_alg
testl %eax, %eax
je LBB0_2
## %bb.1:
popq %rbx
popq %r14
popq %rbp
retq
LBB0_2:
movq %rbx, %rdi
callq _ahash_crypto_instance
movq %r14, %rdi
movl %eax, %esi
popq %rbx
popq %r14
popq %rbp
jmp _crypto_register_instance ## TAILCALL
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ahash_register_instance ; -- Begin function ahash_register_instance
.p2align 2
_ahash_register_instance: ; @ahash_register_instance
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x20, x1
mov x19, x0
mov x0, x1
bl _ahash_prepare_alg
cbz w0, LBB0_2
; %bb.1:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB0_2:
mov x0, x20
bl _ahash_crypto_instance
mov x1, x0
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _crypto_register_instance
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/linux/crypto/extr_ahash.c_ahash_register_instance.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ssip_pn_xmit
_ssip_pn_xmit: ## @ssip_pn_xmit
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r14
movq %rdi, %r13
movl 20(%rsi), %edi
callq _to_hsi_client
movq %rax, %r15
movq %rax, %rdi
callq _hsi_client_drvdata
movq %rax, %r12
movl 16(%r13), %ebx
movq _ETH_P_PHONET@GOTPCREL(%rip), %rax
movl (%rax), %edi
callq _htons
cmpl %eax, %ebx
jne LBB0_9
## %bb.1:
movl (%r13), %eax
movq _SSIP_MIN_PN_HDR@GOTPCREL(%rip), %rcx
cmpl (%rcx), %eax
jl LBB0_9
## %bb.2:
andl $3, %eax
je LBB0_4
## %bb.3:
movl $4, %esi
subl %eax, %esi
movq %r13, %rdi
callq _skb_pad
testq %rax, %rax
jne LBB0_10
LBB0_4:
movq %r13, %rdi
xorl %esi, %esi
callq _skb_cow_head
testq %rax, %rax
je LBB0_5
LBB0_9:
movq %r13, %rdi
callq _dev_kfree_skb
LBB0_10:
incl 12(%r14)
LBB0_19:
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_5:
movq 8(%r13), %rax
movl 8(%rax), %edi
callq _htons
movq 8(%r13), %rcx
movl %eax, 8(%rcx)
movq _GFP_ATOMIC@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq %r12, %rdi
movq %r13, %rsi
callq _ssip_alloc_data
testq %rax, %rax
je LBB0_6
## %bb.7:
movq %rax, %rbx
movq _ssip_tx_data_complete@GOTPCREL(%rip), %rax
movl (%rax), %eax
movl %eax, 4(%rbx)
leaq 24(%r12), %rdi
movq %rdi, -48(%rbp) ## 8-byte Spill
callq _spin_lock_bh
movq (%r12), %rax
movq _ACTIVE@GOTPCREL(%rip), %rcx
xorl %edi, %edi
cmpq (%rcx), %rax
setne %dil
callq _unlikely
testq %rax, %rax
je LBB0_11
## %bb.8:
movq -48(%rbp), %rdi ## 8-byte Reload
callq _spin_unlock_bh
leaq L_.str.1(%rip), %rsi
movq %r15, %rdi
xorl %eax, %eax
callq _dev_dbg
movq %rbx, %rdi
callq _hsi_free_msg
jmp LBB0_9
LBB0_6:
leaq L_.str(%rip), %rsi
movq %r15, %rdi
xorl %eax, %eax
callq _dev_dbg
jmp LBB0_9
LBB0_11:
leaq 32(%r12), %rsi
movq %rbx, %rdi
callq _list_add_tail
movq 8(%r12), %rax
leaq 1(%rax), %rdx
movq %rdx, 8(%r12)
cmpq %rax, (%r14)
jg LBB0_13
## %bb.12:
leaq L_.str.2(%rip), %rsi
movq %r15, %rdi
callq _dev_info
movq %r14, %rdi
callq _netif_stop_queue
LBB0_13:
movq 16(%r12), %rax
movq _SEND_IDLE@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
movq -48(%rbp), %rbx ## 8-byte Reload
jne LBB0_15
## %bb.14:
movq _WAIT4READY@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r12, %rdi
callq _ssip_set_txstate
movq %rbx, %rdi
callq _spin_unlock_bh
movq 8(%r12), %rdx
leaq L_.str.3(%rip), %rsi
movq %r15, %rdi
xorl %eax, %eax
callq _dev_dbg
movq %r15, %rdi
callq _hsi_start_tx
jmp LBB0_18
LBB0_15:
movq _SEND_READY@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_17
## %bb.16:
movq 8(%r12), %rdx
leaq L_.str.4(%rip), %rsi
movq %r15, %rdi
xorl %eax, %eax
callq _dev_dbg
movq %rbx, %rdi
callq _spin_unlock_bh
addq $28, %r12
movq %r12, %rdi
callq _schedule_work
jmp LBB0_18
LBB0_17:
movq %rbx, %rdi
callq _spin_unlock_bh
LBB0_18:
incl 16(%r14)
movl (%r13), %eax
addl %eax, 8(%r14)
jmp LBB0_19
.cfi_endproc
## -- End function
.comm _ETH_P_PHONET,4,2 ## @ETH_P_PHONET
.comm _SSIP_MIN_PN_HDR,4,2 ## @SSIP_MIN_PN_HDR
.comm _GFP_ATOMIC,4,2 ## @GFP_ATOMIC
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Dropping tx data: No memory\n"
.comm _ssip_tx_data_complete,4,2 ## @ssip_tx_data_complete
.comm _ACTIVE,8,3 ## @ACTIVE
L_.str.1: ## @.str.1
.asciz "Dropping tx data: CMT is OFFLINE\n"
L_.str.2: ## @.str.2
.asciz "TX queue full %d\n"
.comm _SEND_IDLE,8,3 ## @SEND_IDLE
.comm _WAIT4READY,4,2 ## @WAIT4READY
L_.str.3: ## @.str.3
.asciz "Start TX qlen %d\n"
.comm _SEND_READY,8,3 ## @SEND_READY
L_.str.4: ## @.str.4
.asciz "Start TX on SEND READY qlen %d\n"
.no_dead_strip _ssip_pn_xmit
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ssip_pn_xmit
_ssip_pn_xmit: ; @ssip_pn_xmit
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x1
mov x20, x0
ldr w0, [x1, #20]
bl _to_hsi_client
mov x21, x0
bl _hsi_client_drvdata
mov x22, x0
ldr w23, [x20, #16]
Lloh0:
adrp x8, _ETH_P_PHONET@GOTPAGE
Lloh1:
ldr x8, [x8, _ETH_P_PHONET@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
bl _htons
cmp w23, w0
b.ne LBB0_5
; %bb.1:
ldr w8, [x20]
Lloh3:
adrp x9, _SSIP_MIN_PN_HDR@GOTPAGE
Lloh4:
ldr x9, [x9, _SSIP_MIN_PN_HDR@GOTPAGEOFF]
Lloh5:
ldr w9, [x9]
cmp w8, w9
b.lt LBB0_5
; %bb.2:
ands w8, w8, #0x3
b.eq LBB0_4
; %bb.3:
mov w9, #4
sub w1, w9, w8
mov x0, x20
bl _skb_pad
cbnz x0, LBB0_6
LBB0_4:
mov x0, x20
mov w1, #0
bl _skb_cow_head
cbz x0, LBB0_8
LBB0_5:
mov x0, x20
bl _dev_kfree_skb
LBB0_6:
ldr w8, [x19, #12]
add w8, w8, #1
str w8, [x19, #12]
LBB0_7:
mov w0, #0
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #80
ret
LBB0_8:
ldr x8, [x20, #8]
ldr w0, [x8, #8]
bl _htons
ldr x8, [x20, #8]
str w0, [x8, #8]
Lloh6:
adrp x8, _GFP_ATOMIC@GOTPAGE
Lloh7:
ldr x8, [x8, _GFP_ATOMIC@GOTPAGEOFF]
Lloh8:
ldr w2, [x8]
mov x0, x22
mov x1, x20
bl _ssip_alloc_data
cbz x0, LBB0_11
; %bb.9:
mov x24, x0
Lloh9:
adrp x8, _ssip_tx_data_complete@GOTPAGE
Lloh10:
ldr x8, [x8, _ssip_tx_data_complete@GOTPAGEOFF]
Lloh11:
ldr w8, [x8]
str w8, [x0, #4]
add x23, x22, #24
mov x0, x23
bl _spin_lock_bh
ldr x8, [x22]
Lloh12:
adrp x9, _ACTIVE@GOTPAGE
Lloh13:
ldr x9, [x9, _ACTIVE@GOTPAGEOFF]
Lloh14:
ldr x9, [x9]
cmp x8, x9
cset w0, ne
bl _unlikely
cbz x0, LBB0_12
; %bb.10:
mov x0, x23
bl _spin_unlock_bh
Lloh15:
adrp x1, l_.str.1@PAGE
Lloh16:
add x1, x1, l_.str.1@PAGEOFF
mov x0, x21
bl _dev_dbg
mov x0, x24
bl _hsi_free_msg
b LBB0_5
LBB0_11:
Lloh17:
adrp x1, l_.str@PAGE
Lloh18:
add x1, x1, l_.str@PAGEOFF
mov x0, x21
bl _dev_dbg
b LBB0_5
LBB0_12:
add x1, x22, #32
mov x0, x24
bl _list_add_tail
ldr x8, [x22, #8]
add x2, x8, #1
str x2, [x22, #8]
ldr x9, [x19]
cmp x9, x8
b.gt LBB0_14
; %bb.13:
Lloh19:
adrp x1, l_.str.2@PAGE
Lloh20:
add x1, x1, l_.str.2@PAGEOFF
mov x0, x21
bl _dev_info
mov x0, x19
bl _netif_stop_queue
LBB0_14:
ldr x8, [x22, #16]
Lloh21:
adrp x9, _SEND_IDLE@GOTPAGE
Lloh22:
ldr x9, [x9, _SEND_IDLE@GOTPAGEOFF]
Lloh23:
ldr x9, [x9]
cmp x8, x9
b.ne LBB0_16
; %bb.15:
Lloh24:
adrp x8, _WAIT4READY@GOTPAGE
Lloh25:
ldr x8, [x8, _WAIT4READY@GOTPAGEOFF]
Lloh26:
ldr w1, [x8]
mov x0, x22
bl _ssip_set_txstate
mov x0, x23
bl _spin_unlock_bh
ldr x8, [x22, #8]
str x8, [sp]
Lloh27:
adrp x1, l_.str.3@PAGE
Lloh28:
add x1, x1, l_.str.3@PAGEOFF
mov x0, x21
bl _dev_dbg
mov x0, x21
bl _hsi_start_tx
b LBB0_19
LBB0_16:
Lloh29:
adrp x9, _SEND_READY@GOTPAGE
Lloh30:
ldr x9, [x9, _SEND_READY@GOTPAGEOFF]
Lloh31:
ldr x9, [x9]
cmp x8, x9
b.ne LBB0_18
; %bb.17:
ldr x8, [x22, #8]
str x8, [sp]
Lloh32:
adrp x1, l_.str.4@PAGE
Lloh33:
add x1, x1, l_.str.4@PAGEOFF
mov x0, x21
bl _dev_dbg
mov x0, x23
bl _spin_unlock_bh
add x0, x22, #28
bl _schedule_work
b LBB0_19
LBB0_18:
mov x0, x23
bl _spin_unlock_bh
LBB0_19:
ldr w8, [x19, #16]
add w8, w8, #1
str w8, [x19, #16]
ldr w8, [x20]
ldr w9, [x19, #8]
add w8, w9, w8
str w8, [x19, #8]
b LBB0_7
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpAdd Lloh15, Lloh16
.loh AdrpAdd Lloh17, Lloh18
.loh AdrpAdd Lloh19, Lloh20
.loh AdrpLdrGotLdr Lloh21, Lloh22, Lloh23
.loh AdrpAdd Lloh27, Lloh28
.loh AdrpLdrGotLdr Lloh24, Lloh25, Lloh26
.loh AdrpLdrGotLdr Lloh29, Lloh30, Lloh31
.loh AdrpAdd Lloh32, Lloh33
.cfi_endproc
; -- End function
.comm _ETH_P_PHONET,4,2 ; @ETH_P_PHONET
.comm _SSIP_MIN_PN_HDR,4,2 ; @SSIP_MIN_PN_HDR
.comm _GFP_ATOMIC,4,2 ; @GFP_ATOMIC
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Dropping tx data: No memory\n"
.comm _ssip_tx_data_complete,4,2 ; @ssip_tx_data_complete
.comm _ACTIVE,8,3 ; @ACTIVE
l_.str.1: ; @.str.1
.asciz "Dropping tx data: CMT is OFFLINE\n"
l_.str.2: ; @.str.2
.asciz "TX queue full %d\n"
.comm _SEND_IDLE,8,3 ; @SEND_IDLE
.comm _WAIT4READY,4,2 ; @WAIT4READY
l_.str.3: ; @.str.3
.asciz "Start TX qlen %d\n"
.comm _SEND_READY,8,3 ; @SEND_READY
l_.str.4: ; @.str.4
.asciz "Start TX on SEND READY qlen %d\n"
.no_dead_strip _ssip_pn_xmit
.subsections_via_symbols
| AnghaBench/linux/drivers/hsi/clients/extr_ssi_protocol.c_ssip_pn_xmit.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _LoadMessageA ## -- Begin function LoadMessageA
.p2align 4, 0x90
_LoadMessageA: ## @LoadMessageA
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edi, %ebx
movl $1, %edi
movl $1024, %esi ## imm = 0x400
callq _calloc
movq %rax, %r14
movq _hDllInstance@GOTPCREL(%rip), %rax
movl (%rax), %r15d
xorl %eax, %eax
callq _GetLocalizedMessageOffset
addl %eax, %ebx
movl %r15d, %edi
movl %ebx, %esi
movq %r14, %rdx
movl $1024, %ecx ## imm = 0x400
callq _LoadStringA
movq %r14, %rax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _hDllInstance,4,2 ## @hDllInstance
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _LoadMessageA ; -- Begin function LoadMessageA
.p2align 2
_LoadMessageA: ; @LoadMessageA
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
mov w0, #1
mov w1, #1024
bl _calloc
mov x20, x0
Lloh0:
adrp x8, _hDllInstance@GOTPAGE
Lloh1:
ldr x8, [x8, _hDllInstance@GOTPAGEOFF]
Lloh2:
ldr w21, [x8]
bl _GetLocalizedMessageOffset
add w1, w0, w19
mov x0, x21
mov x2, x20
mov w3, #1024
bl _LoadStringA
mov x0, x20
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _hDllInstance,4,2 ; @hDllInstance
.subsections_via_symbols
| AnghaBench/SoftEtherVPN/src/vpnweb/extr_vpnwebdlg_inner.h_LoadMessageA.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function dispc_read_reg
_dispc_read_reg: ## @dispc_read_reg
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _dispc@GOTPCREL(%rip), %rax
addq (%rax), %rdi
popq %rbp
jmp ___raw_readl ## TAILCALL
.cfi_endproc
## -- End function
.comm _dispc,8,3 ## @dispc
.no_dead_strip _dispc_read_reg
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function dispc_read_reg
_dispc_read_reg: ; @dispc_read_reg
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _dispc@GOTPAGE
Lloh1:
ldr x8, [x8, _dispc@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
add x0, x8, x0
b ___raw_readl
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _dispc,8,3 ; @dispc
.no_dead_strip _dispc_read_reg
.subsections_via_symbols
| AnghaBench/linux/drivers/video/fbdev/omap2/omapfb/dss/extr_dispc.c_dispc_read_reg.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _mch_can_restore_title ## -- Begin function mch_can_restore_title
.p2align 4, 0x90
_mch_can_restore_title: ## @mch_can_restore_title
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _wb_window@GOTPCREL(%rip), %rcx
xorl %eax, %eax
cmpq $0, (%rcx)
setne %al
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _wb_window,8,3 ## @wb_window
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _mch_can_restore_title ; -- Begin function mch_can_restore_title
.p2align 2
_mch_can_restore_title: ; @mch_can_restore_title
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _wb_window@GOTPAGE
Lloh1:
ldr x8, [x8, _wb_window@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
cmp x8, #0
cset w0, ne
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _wb_window,8,3 ; @wb_window
.subsections_via_symbols
| AnghaBench/macvim/src/extr_os_amiga.c_mch_can_restore_title.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function dmabuf_exp_ops_map_dma_buf
_dmabuf_exp_ops_map_dma_buf: ## @dmabuf_exp_ops_map_dma_buf
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %r15d
movq %rdi, %r12
movq 16(%rdi), %r13
movq 8(%rdi), %rax
movq (%rax), %r14
movl (%r14), %esi
movl (%rdi), %edx
leaq L_.str(%rip), %rdi
xorl %ebx, %ebx
xorl %eax, %eax
callq _pr_debug
movq _DMA_NONE@GOTPCREL(%rip), %rax
movl (%rax), %eax
cmpl %r15d, %eax
je LBB0_2
## %bb.1:
testq %r13, %r13
je LBB0_2
## %bb.4:
movl (%r13), %ecx
cmpl %r15d, %ecx
jne LBB0_6
## %bb.5:
movq 8(%r13), %r14
jmp LBB0_14
LBB0_2:
movq _EINVAL@GOTPCREL(%rip), %rax
subl (%rax), %ebx
movl %ebx, %edi
LBB0_3:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _ERR_PTR ## TAILCALL
LBB0_6:
cmpl %eax, %ecx
jne LBB0_7
## %bb.8:
movl (%r14), %esi
movl 4(%r14), %edi
callq _dmabuf_pages_to_sgt
movq %rax, %r14
movq %rax, %rdi
callq _IS_ERR
testq %rax, %rax
jne LBB0_12
## %bb.9:
movl (%r12), %edi
movl (%r14), %edx
movl 4(%r14), %esi
movq _DMA_ATTR_SKIP_CPU_SYNC@GOTPCREL(%rip), %rax
movl (%rax), %r8d
movl %r15d, %ecx
callq _dma_map_sg_attrs
testl %eax, %eax
je LBB0_10
## %bb.11:
movq %r14, 8(%r13)
movl %r15d, (%r13)
jmp LBB0_12
LBB0_7:
movq _EBUSY@GOTPCREL(%rip), %rax
xorl %edi, %edi
subl (%rax), %edi
jmp LBB0_3
LBB0_10:
movq %r14, %rdi
callq _sg_free_table
movq %r14, %rdi
callq _kfree
movq _ENOMEM@GOTPCREL(%rip), %rax
xorl %edi, %edi
subl (%rax), %edi
callq _ERR_PTR
movq %rax, %r14
LBB0_12:
movq %r14, %rdi
callq _IS_ERR
testq %rax, %rax
je LBB0_14
## %bb.13:
movl (%r12), %esi
leaq L_.str.1(%rip), %rdi
xorl %eax, %eax
callq _pr_debug
LBB0_14:
movq %r14, %rax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Mapping %d pages for dev %p\n"
.comm _DMA_NONE,4,2 ## @DMA_NONE
.comm _EINVAL,4,2 ## @EINVAL
.comm _EBUSY,4,2 ## @EBUSY
.comm _DMA_ATTR_SKIP_CPU_SYNC,4,2 ## @DMA_ATTR_SKIP_CPU_SYNC
.comm _ENOMEM,4,2 ## @ENOMEM
L_.str.1: ## @.str.1
.asciz "Failed to map sg table for dev %p\n"
.no_dead_strip _dmabuf_exp_ops_map_dma_buf
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function dmabuf_exp_ops_map_dma_buf
_dmabuf_exp_ops_map_dma_buf: ; @dmabuf_exp_ops_map_dma_buf
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x1
mov x19, x0
ldp x8, x22, [x0, #8]
ldr x21, [x8]
ldr w1, [x21]
ldr w8, [x0]
str x8, [sp]
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _pr_debug
Lloh2:
adrp x8, _DMA_NONE@GOTPAGE
Lloh3:
ldr x8, [x8, _DMA_NONE@GOTPAGEOFF]
Lloh4:
ldr w8, [x8]
cmp w8, w20
ccmp x22, #0, #4, ne
b.ne LBB0_3
; %bb.1:
Lloh5:
adrp x8, _EINVAL@GOTPAGE
Lloh6:
ldr x8, [x8, _EINVAL@GOTPAGEOFF]
LBB0_2:
ldr w8, [x8]
neg w0, w8
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
b _ERR_PTR
LBB0_3:
ldr w9, [x22]
cmp w9, w20
b.ne LBB0_5
; %bb.4:
ldr x21, [x22, #8]
b LBB0_13
LBB0_5:
cmp w9, w8
b.ne LBB0_9
; %bb.6:
ldp w1, w0, [x21]
bl _dmabuf_pages_to_sgt
mov x21, x0
bl _IS_ERR
cbnz x0, LBB0_11
; %bb.7:
ldr w0, [x19]
ldp w2, w1, [x21]
Lloh7:
adrp x8, _DMA_ATTR_SKIP_CPU_SYNC@GOTPAGE
Lloh8:
ldr x8, [x8, _DMA_ATTR_SKIP_CPU_SYNC@GOTPAGEOFF]
Lloh9:
ldr w4, [x8]
mov x3, x20
bl _dma_map_sg_attrs
cbz w0, LBB0_10
; %bb.8:
str x21, [x22, #8]
str w20, [x22]
b LBB0_11
LBB0_9:
Lloh10:
adrp x8, _EBUSY@GOTPAGE
Lloh11:
ldr x8, [x8, _EBUSY@GOTPAGEOFF]
b LBB0_2
LBB0_10:
mov x0, x21
bl _sg_free_table
mov x0, x21
bl _kfree
Lloh12:
adrp x8, _ENOMEM@GOTPAGE
Lloh13:
ldr x8, [x8, _ENOMEM@GOTPAGEOFF]
Lloh14:
ldr w8, [x8]
neg w0, w8
bl _ERR_PTR
mov x21, x0
LBB0_11:
mov x0, x21
bl _IS_ERR
cbz x0, LBB0_13
; %bb.12:
ldr w1, [x19]
Lloh15:
adrp x0, l_.str.1@PAGE
Lloh16:
add x0, x0, l_.str.1@PAGEOFF
bl _pr_debug
LBB0_13:
mov x0, x21
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpLdrGot Lloh5, Lloh6
.loh AdrpLdrGotLdr Lloh7, Lloh8, Lloh9
.loh AdrpLdrGot Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpAdd Lloh15, Lloh16
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Mapping %d pages for dev %p\n"
.comm _DMA_NONE,4,2 ; @DMA_NONE
.comm _EINVAL,4,2 ; @EINVAL
.comm _EBUSY,4,2 ; @EBUSY
.comm _DMA_ATTR_SKIP_CPU_SYNC,4,2 ; @DMA_ATTR_SKIP_CPU_SYNC
.comm _ENOMEM,4,2 ; @ENOMEM
l_.str.1: ; @.str.1
.asciz "Failed to map sg table for dev %p\n"
.no_dead_strip _dmabuf_exp_ops_map_dma_buf
.subsections_via_symbols
| AnghaBench/linux/drivers/xen/extr_gntdev-dmabuf.c_dmabuf_exp_ops_map_dma_buf.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function nv_rx_process
_nv_rx_process: ## @nv_rx_process
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, -52(%rbp) ## 4-byte Spill
movq %rdi, -48(%rbp) ## 8-byte Spill
callq _netdev_priv
movq %rax, %rbx
movq 40(%rax), %rax
cmpq 72(%rbx), %rax
je LBB0_1
## %bb.2:
leaq 56(%rbx), %rcx
movq %rcx, -72(%rbp) ## 8-byte Spill
movq %rbx, %rcx
addq $52, %rcx
movq %rcx, -64(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
jmp LBB0_3
.p2align 4, 0x90
LBB0_28: ## in Loop: Header=BB0_3 Depth=1
incl %r12d
movq 40(%rbx), %rax
cmpq 72(%rbx), %rax
je LBB0_29
LBB0_3: ## =>This Inner Loop Header: Depth=1
movl (%rax), %edi
callq _le32_to_cpu
movl %eax, %r13d
movq _NV_RX_AVAIL@GOTPCREL(%rip), %rax
testl %r13d, (%rax)
jne LBB0_29
## %bb.4: ## in Loop: Header=BB0_3 Depth=1
cmpl -52(%rbp), %r12d ## 4-byte Folded Reload
jge LBB0_29
## %bb.5: ## in Loop: Header=BB0_3 Depth=1
movq 16(%rbx), %rax
movq 64(%rbx), %rdi
movl 8(%rax), %edx
movl 12(%rax), %esi
movq _DMA_FROM_DEVICE@GOTPCREL(%rip), %rax
movl (%rax), %ecx
callq _dma_unmap_single
movq 16(%rbx), %rax
movq (%rax), %r14
movq $0, (%rax)
movq (%rbx), %rax
movq _DESC_VER_1@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_14
## %bb.6: ## in Loop: Header=BB0_3 Depth=1
movq _NV_RX_DESCRIPTORVALID@GOTPCREL(%rip), %rax
movl (%rax), %edi
andl %r13d, %edi
callq _likely
testq %rax, %rax
je LBB0_10
## %bb.7: ## in Loop: Header=BB0_3 Depth=1
movq _LEN_MASK_V1@GOTPCREL(%rip), %rax
movl (%rax), %r15d
andl %r13d, %r15d
movq _NV_RX_ERROR@GOTPCREL(%rip), %rax
movl (%rax), %edi
andl %r13d, %edi
callq _unlikely
testq %rax, %rax
je LBB0_23
## %bb.8: ## in Loop: Header=BB0_3 Depth=1
movq _NV_RX_ERROR_MASK@GOTPCREL(%rip), %rax
movl (%rax), %eax
andl %r13d, %eax
movq _NV_RX_ERROR4@GOTPCREL(%rip), %rcx
cmpl (%rcx), %eax
jne LBB0_11
## %bb.9: ## in Loop: Header=BB0_3 Depth=1
movl 8(%r14), %esi
movq -48(%rbp), %rdi ## 8-byte Reload
movl %r15d, %edx
callq _nv_getlen
movl %eax, %r15d
testl %eax, %eax
jns LBB0_23
jmp LBB0_10
.p2align 4, 0x90
LBB0_14: ## in Loop: Header=BB0_3 Depth=1
movq _NV_RX2_DESCRIPTORVALID@GOTPCREL(%rip), %rax
movl (%rax), %edi
andl %r13d, %edi
callq _likely
testq %rax, %rax
je LBB0_10
## %bb.15: ## in Loop: Header=BB0_3 Depth=1
movq _LEN_MASK_V2@GOTPCREL(%rip), %rax
movl (%rax), %r15d
andl %r13d, %r15d
movq _NV_RX2_ERROR@GOTPCREL(%rip), %rax
movl (%rax), %edi
andl %r13d, %edi
callq _unlikely
testq %rax, %rax
je LBB0_20
## %bb.16: ## in Loop: Header=BB0_3 Depth=1
movq _NV_RX2_ERROR_MASK@GOTPCREL(%rip), %rax
movl (%rax), %eax
andl %r13d, %eax
movq _NV_RX2_ERROR4@GOTPCREL(%rip), %rcx
cmpl (%rcx), %eax
jne LBB0_18
## %bb.17: ## in Loop: Header=BB0_3 Depth=1
movl 8(%r14), %esi
movq -48(%rbp), %rdi ## 8-byte Reload
movl %r15d, %edx
callq _nv_getlen
movl %eax, %r15d
testl %eax, %eax
jns LBB0_20
jmp LBB0_10
LBB0_11: ## in Loop: Header=BB0_3 Depth=1
movq _NV_RX_FRAMINGERR@GOTPCREL(%rip), %rcx
cmpl (%rcx), %eax
jne LBB0_13
## %bb.12: ## in Loop: Header=BB0_3 Depth=1
movq _NV_RX_SUBTRACT1@GOTPCREL(%rip), %rax
andl (%rax), %r13d
cmpl $1, %r13d
adcl $-1, %r15d
jmp LBB0_23
LBB0_18: ## in Loop: Header=BB0_3 Depth=1
movq _NV_RX2_FRAMINGERR@GOTPCREL(%rip), %rcx
cmpl (%rcx), %eax
jne LBB0_10
## %bb.19: ## in Loop: Header=BB0_3 Depth=1
movq _NV_RX2_SUBTRACT1@GOTPCREL(%rip), %rax
movl (%rax), %eax
andl %r13d, %eax
cmpl $1, %eax
adcl $-1, %r15d
LBB0_20: ## in Loop: Header=BB0_3 Depth=1
movq _NV_RX2_CHECKSUMMASK@GOTPCREL(%rip), %rax
andl (%rax), %r13d
movq _NV_RX2_CHECKSUM_IP_TCP@GOTPCREL(%rip), %rax
cmpl (%rax), %r13d
je LBB0_22
## %bb.21: ## in Loop: Header=BB0_3 Depth=1
movq _NV_RX2_CHECKSUM_IP_UDP@GOTPCREL(%rip), %rax
cmpl (%rax), %r13d
jne LBB0_23
LBB0_22: ## in Loop: Header=BB0_3 Depth=1
movq _CHECKSUM_UNNECESSARY@GOTPCREL(%rip), %rax
movl (%rax), %eax
movl %eax, 4(%r14)
LBB0_23: ## in Loop: Header=BB0_3 Depth=1
movq %r14, %rdi
movl %r15d, %esi
callq _skb_put
movq %r14, %rdi
movq -48(%rbp), %rsi ## 8-byte Reload
callq _eth_type_trans
movl %eax, (%r14)
movq -72(%rbp), %rdi ## 8-byte Reload
movq %r14, %rsi
callq _napi_gro_receive
movq -64(%rbp), %r14 ## 8-byte Reload
movq %r14, %rdi
callq _u64_stats_update_begin
movq _stat_rx_packets@GOTPCREL(%rip), %rax
movl (%rax), %edi
callq _nv_txrx_stats_inc
movq _stat_rx_bytes@GOTPCREL(%rip), %rax
movl (%rax), %edi
movl %r15d, %esi
callq _nv_txrx_stats_add
movq %r14, %rdi
callq _u64_stats_update_end
jmp LBB0_24
LBB0_13: ## in Loop: Header=BB0_3 Depth=1
movl %r13d, %edi
movq %rbx, %rsi
callq _rx_missing_handler
.p2align 4, 0x90
LBB0_10: ## in Loop: Header=BB0_3 Depth=1
movq %r14, %rdi
callq _dev_kfree_skb
LBB0_24: ## in Loop: Header=BB0_3 Depth=1
movq 40(%rbx), %rax
leaq 4(%rax), %rcx
movq %rcx, 40(%rbx)
movslq 48(%rbx), %rcx
xorl %edi, %edi
cmpq %rcx, %rax
sete %dil
callq _unlikely
testq %rax, %rax
je LBB0_26
## %bb.25: ## in Loop: Header=BB0_3 Depth=1
movq 32(%rbx), %rax
movq %rax, 40(%rbx)
LBB0_26: ## in Loop: Header=BB0_3 Depth=1
movq 16(%rbx), %rax
leaq 16(%rax), %rcx
movq %rcx, 16(%rbx)
movslq 24(%rbx), %rcx
xorl %edi, %edi
cmpq %rcx, %rax
sete %dil
callq _unlikely
testq %rax, %rax
je LBB0_28
## %bb.27: ## in Loop: Header=BB0_3 Depth=1
movq 8(%rbx), %rax
movq %rax, 16(%rbx)
jmp LBB0_28
LBB0_1:
xorl %r12d, %r12d
LBB0_29:
movl %r12d, %eax
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _NV_RX_AVAIL,4,2 ## @NV_RX_AVAIL
.comm _DMA_FROM_DEVICE,4,2 ## @DMA_FROM_DEVICE
.comm _DESC_VER_1,8,3 ## @DESC_VER_1
.comm _NV_RX_DESCRIPTORVALID,4,2 ## @NV_RX_DESCRIPTORVALID
.comm _LEN_MASK_V1,4,2 ## @LEN_MASK_V1
.comm _NV_RX_ERROR,4,2 ## @NV_RX_ERROR
.comm _NV_RX_ERROR_MASK,4,2 ## @NV_RX_ERROR_MASK
.comm _NV_RX_ERROR4,4,2 ## @NV_RX_ERROR4
.comm _NV_RX_FRAMINGERR,4,2 ## @NV_RX_FRAMINGERR
.comm _NV_RX_SUBTRACT1,4,2 ## @NV_RX_SUBTRACT1
.comm _NV_RX2_DESCRIPTORVALID,4,2 ## @NV_RX2_DESCRIPTORVALID
.comm _LEN_MASK_V2,4,2 ## @LEN_MASK_V2
.comm _NV_RX2_ERROR,4,2 ## @NV_RX2_ERROR
.comm _NV_RX2_ERROR_MASK,4,2 ## @NV_RX2_ERROR_MASK
.comm _NV_RX2_ERROR4,4,2 ## @NV_RX2_ERROR4
.comm _NV_RX2_FRAMINGERR,4,2 ## @NV_RX2_FRAMINGERR
.comm _NV_RX2_SUBTRACT1,4,2 ## @NV_RX2_SUBTRACT1
.comm _NV_RX2_CHECKSUMMASK,4,2 ## @NV_RX2_CHECKSUMMASK
.comm _NV_RX2_CHECKSUM_IP_TCP,4,2 ## @NV_RX2_CHECKSUM_IP_TCP
.comm _NV_RX2_CHECKSUM_IP_UDP,4,2 ## @NV_RX2_CHECKSUM_IP_UDP
.comm _CHECKSUM_UNNECESSARY,4,2 ## @CHECKSUM_UNNECESSARY
.comm _stat_rx_packets,4,2 ## @stat_rx_packets
.comm _stat_rx_bytes,4,2 ## @stat_rx_bytes
.no_dead_strip _nv_rx_process
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function nv_rx_process
_nv_rx_process: ; @nv_rx_process
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x28, x27, [sp, #16] ; 16-byte Folded Spill
stp x26, x25, [sp, #32] ; 16-byte Folded Spill
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x19, x1
mov x20, x0
bl _netdev_priv
ldr x8, [x0, #40]
ldr x9, [x0, #72]
cmp x8, x9
b.eq LBB0_27
; %bb.1:
mov x22, x0
mov w21, #0
Lloh0:
adrp x28, _NV_RX_AVAIL@GOTPAGE
Lloh1:
ldr x28, [x28, _NV_RX_AVAIL@GOTPAGEOFF]
Lloh2:
adrp x23, _DESC_VER_1@GOTPAGE
Lloh3:
ldr x23, [x23, _DESC_VER_1@GOTPAGEOFF]
add x9, x0, #56
str x9, [sp, #8] ; 8-byte Folded Spill
add x24, x0, #52
b LBB0_3
LBB0_2: ; in Loop: Header=BB0_3 Depth=1
add w21, w21, #1
ldr x8, [x22, #40]
ldr x9, [x22, #72]
cmp x8, x9
b.eq LBB0_28
LBB0_3: ; =>This Inner Loop Header: Depth=1
ldr w0, [x8]
bl _le32_to_cpu
ldr w8, [x28]
and w8, w8, w0
cmp w8, #0
ccmp w21, w19, #0, eq
b.ge LBB0_28
; %bb.4: ; in Loop: Header=BB0_3 Depth=1
mov x27, x0
ldr x0, [x22, #64]
ldr x8, [x22, #16]
ldp w2, w1, [x8, #8]
Lloh4:
adrp x8, _DMA_FROM_DEVICE@GOTPAGE
Lloh5:
ldr x8, [x8, _DMA_FROM_DEVICE@GOTPAGEOFF]
Lloh6:
ldr w3, [x8]
bl _dma_unmap_single
ldr x8, [x22, #16]
ldr x25, [x8]
str xzr, [x8]
ldr x8, [x22]
ldr x9, [x23]
cmp x8, x9
b.ne LBB0_9
; %bb.5: ; in Loop: Header=BB0_3 Depth=1
Lloh7:
adrp x8, _NV_RX_DESCRIPTORVALID@GOTPAGE
Lloh8:
ldr x8, [x8, _NV_RX_DESCRIPTORVALID@GOTPAGEOFF]
Lloh9:
ldr w8, [x8]
and w0, w8, w27
bl _likely
cbz x0, LBB0_22
; %bb.6: ; in Loop: Header=BB0_3 Depth=1
Lloh10:
adrp x8, _LEN_MASK_V1@GOTPAGE
Lloh11:
ldr x8, [x8, _LEN_MASK_V1@GOTPAGEOFF]
Lloh12:
ldr w8, [x8]
and w26, w8, w27
Lloh13:
adrp x8, _NV_RX_ERROR@GOTPAGE
Lloh14:
ldr x8, [x8, _NV_RX_ERROR@GOTPAGEOFF]
Lloh15:
ldr w8, [x8]
and w0, w8, w27
bl _unlikely
cbz x0, LBB0_20
; %bb.7: ; in Loop: Header=BB0_3 Depth=1
Lloh16:
adrp x8, _NV_RX_ERROR_MASK@GOTPAGE
Lloh17:
ldr x8, [x8, _NV_RX_ERROR_MASK@GOTPAGEOFF]
Lloh18:
ldr w8, [x8]
and w8, w8, w27
Lloh19:
adrp x9, _NV_RX_ERROR4@GOTPAGE
Lloh20:
ldr x9, [x9, _NV_RX_ERROR4@GOTPAGEOFF]
Lloh21:
ldr w9, [x9]
cmp w8, w9
b.ne LBB0_13
; %bb.8: ; in Loop: Header=BB0_3 Depth=1
ldr w1, [x25, #8]
mov x0, x20
mov x2, x26
bl _nv_getlen
mov x26, x0
tbz w0, #31, LBB0_20
b LBB0_22
LBB0_9: ; in Loop: Header=BB0_3 Depth=1
Lloh22:
adrp x8, _NV_RX2_DESCRIPTORVALID@GOTPAGE
Lloh23:
ldr x8, [x8, _NV_RX2_DESCRIPTORVALID@GOTPAGEOFF]
Lloh24:
ldr w8, [x8]
and w0, w8, w27
bl _likely
cbz x0, LBB0_22
; %bb.10: ; in Loop: Header=BB0_3 Depth=1
Lloh25:
adrp x8, _LEN_MASK_V2@GOTPAGE
Lloh26:
ldr x8, [x8, _LEN_MASK_V2@GOTPAGEOFF]
Lloh27:
ldr w8, [x8]
and w26, w8, w27
Lloh28:
adrp x8, _NV_RX2_ERROR@GOTPAGE
Lloh29:
ldr x8, [x8, _NV_RX2_ERROR@GOTPAGEOFF]
Lloh30:
ldr w8, [x8]
and w0, w8, w27
bl _unlikely
cbz x0, LBB0_17
; %bb.11: ; in Loop: Header=BB0_3 Depth=1
Lloh31:
adrp x8, _NV_RX2_ERROR_MASK@GOTPAGE
Lloh32:
ldr x8, [x8, _NV_RX2_ERROR_MASK@GOTPAGEOFF]
Lloh33:
ldr w8, [x8]
and w8, w8, w27
Lloh34:
adrp x9, _NV_RX2_ERROR4@GOTPAGE
Lloh35:
ldr x9, [x9, _NV_RX2_ERROR4@GOTPAGEOFF]
Lloh36:
ldr w9, [x9]
cmp w8, w9
b.ne LBB0_15
; %bb.12: ; in Loop: Header=BB0_3 Depth=1
ldr w1, [x25, #8]
mov x0, x20
mov x2, x26
bl _nv_getlen
mov x26, x0
tbz w0, #31, LBB0_17
b LBB0_22
LBB0_13: ; in Loop: Header=BB0_3 Depth=1
Lloh37:
adrp x9, _NV_RX_FRAMINGERR@GOTPAGE
Lloh38:
ldr x9, [x9, _NV_RX_FRAMINGERR@GOTPAGEOFF]
Lloh39:
ldr w9, [x9]
cmp w8, w9
b.ne LBB0_21
; %bb.14: ; in Loop: Header=BB0_3 Depth=1
Lloh40:
adrp x8, _NV_RX_SUBTRACT1@GOTPAGE
Lloh41:
ldr x8, [x8, _NV_RX_SUBTRACT1@GOTPAGEOFF]
Lloh42:
ldr w8, [x8]
tst w8, w27
cset w8, ne
sub w26, w26, w8
b LBB0_20
LBB0_15: ; in Loop: Header=BB0_3 Depth=1
Lloh43:
adrp x9, _NV_RX2_FRAMINGERR@GOTPAGE
Lloh44:
ldr x9, [x9, _NV_RX2_FRAMINGERR@GOTPAGEOFF]
Lloh45:
ldr w9, [x9]
cmp w8, w9
b.ne LBB0_22
; %bb.16: ; in Loop: Header=BB0_3 Depth=1
Lloh46:
adrp x8, _NV_RX2_SUBTRACT1@GOTPAGE
Lloh47:
ldr x8, [x8, _NV_RX2_SUBTRACT1@GOTPAGEOFF]
Lloh48:
ldr w8, [x8]
tst w8, w27
cset w8, ne
sub w26, w26, w8
LBB0_17: ; in Loop: Header=BB0_3 Depth=1
Lloh49:
adrp x8, _NV_RX2_CHECKSUMMASK@GOTPAGE
Lloh50:
ldr x8, [x8, _NV_RX2_CHECKSUMMASK@GOTPAGEOFF]
Lloh51:
ldr w8, [x8]
and w8, w8, w27
Lloh52:
adrp x9, _NV_RX2_CHECKSUM_IP_TCP@GOTPAGE
Lloh53:
ldr x9, [x9, _NV_RX2_CHECKSUM_IP_TCP@GOTPAGEOFF]
Lloh54:
ldr w9, [x9]
cmp w8, w9
b.eq LBB0_19
; %bb.18: ; in Loop: Header=BB0_3 Depth=1
Lloh55:
adrp x9, _NV_RX2_CHECKSUM_IP_UDP@GOTPAGE
Lloh56:
ldr x9, [x9, _NV_RX2_CHECKSUM_IP_UDP@GOTPAGEOFF]
Lloh57:
ldr w9, [x9]
cmp w8, w9
b.ne LBB0_20
LBB0_19: ; in Loop: Header=BB0_3 Depth=1
Lloh58:
adrp x8, _CHECKSUM_UNNECESSARY@GOTPAGE
Lloh59:
ldr x8, [x8, _CHECKSUM_UNNECESSARY@GOTPAGEOFF]
Lloh60:
ldr w8, [x8]
str w8, [x25, #4]
LBB0_20: ; in Loop: Header=BB0_3 Depth=1
mov x0, x25
mov x1, x26
bl _skb_put
mov x0, x25
mov x1, x20
bl _eth_type_trans
str w0, [x25]
ldr x0, [sp, #8] ; 8-byte Folded Reload
mov x1, x25
bl _napi_gro_receive
mov x0, x24
bl _u64_stats_update_begin
Lloh61:
adrp x8, _stat_rx_packets@GOTPAGE
Lloh62:
ldr x8, [x8, _stat_rx_packets@GOTPAGEOFF]
Lloh63:
ldr w0, [x8]
bl _nv_txrx_stats_inc
Lloh64:
adrp x8, _stat_rx_bytes@GOTPAGE
Lloh65:
ldr x8, [x8, _stat_rx_bytes@GOTPAGEOFF]
Lloh66:
ldr w0, [x8]
mov x1, x26
bl _nv_txrx_stats_add
mov x0, x24
bl _u64_stats_update_end
b LBB0_23
LBB0_21: ; in Loop: Header=BB0_3 Depth=1
mov x0, x27
mov x1, x22
bl _rx_missing_handler
LBB0_22: ; in Loop: Header=BB0_3 Depth=1
mov x0, x25
bl _dev_kfree_skb
LBB0_23: ; in Loop: Header=BB0_3 Depth=1
ldr x8, [x22, #40]
add x9, x8, #4
str x9, [x22, #40]
ldrsw x9, [x22, #48]
cmp x8, x9
cset w0, eq
bl _unlikely
cbz x0, LBB0_25
; %bb.24: ; in Loop: Header=BB0_3 Depth=1
ldr x8, [x22, #32]
str x8, [x22, #40]
LBB0_25: ; in Loop: Header=BB0_3 Depth=1
ldr x8, [x22, #16]
add x9, x8, #16
str x9, [x22, #16]
ldrsw x9, [x22, #24]
cmp x8, x9
cset w0, eq
bl _unlikely
cbz x0, LBB0_2
; %bb.26: ; in Loop: Header=BB0_3 Depth=1
ldr x8, [x22, #8]
str x8, [x22, #16]
b LBB0_2
LBB0_27:
mov w21, #0
LBB0_28:
mov x0, x21
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
ldp x26, x25, [sp, #32] ; 16-byte Folded Reload
ldp x28, x27, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh4, Lloh5, Lloh6
.loh AdrpLdrGotLdr Lloh7, Lloh8, Lloh9
.loh AdrpLdrGotLdr Lloh13, Lloh14, Lloh15
.loh AdrpLdrGotLdr Lloh10, Lloh11, Lloh12
.loh AdrpLdrGotLdr Lloh19, Lloh20, Lloh21
.loh AdrpLdrGotLdr Lloh16, Lloh17, Lloh18
.loh AdrpLdrGotLdr Lloh22, Lloh23, Lloh24
.loh AdrpLdrGotLdr Lloh28, Lloh29, Lloh30
.loh AdrpLdrGotLdr Lloh25, Lloh26, Lloh27
.loh AdrpLdrGotLdr Lloh34, Lloh35, Lloh36
.loh AdrpLdrGotLdr Lloh31, Lloh32, Lloh33
.loh AdrpLdrGotLdr Lloh37, Lloh38, Lloh39
.loh AdrpLdrGotLdr Lloh40, Lloh41, Lloh42
.loh AdrpLdrGotLdr Lloh43, Lloh44, Lloh45
.loh AdrpLdrGotLdr Lloh46, Lloh47, Lloh48
.loh AdrpLdrGotLdr Lloh52, Lloh53, Lloh54
.loh AdrpLdrGotLdr Lloh49, Lloh50, Lloh51
.loh AdrpLdrGotLdr Lloh55, Lloh56, Lloh57
.loh AdrpLdrGotLdr Lloh58, Lloh59, Lloh60
.loh AdrpLdrGotLdr Lloh64, Lloh65, Lloh66
.loh AdrpLdrGotLdr Lloh61, Lloh62, Lloh63
.cfi_endproc
; -- End function
.comm _NV_RX_AVAIL,4,2 ; @NV_RX_AVAIL
.comm _DMA_FROM_DEVICE,4,2 ; @DMA_FROM_DEVICE
.comm _DESC_VER_1,8,3 ; @DESC_VER_1
.comm _NV_RX_DESCRIPTORVALID,4,2 ; @NV_RX_DESCRIPTORVALID
.comm _LEN_MASK_V1,4,2 ; @LEN_MASK_V1
.comm _NV_RX_ERROR,4,2 ; @NV_RX_ERROR
.comm _NV_RX_ERROR_MASK,4,2 ; @NV_RX_ERROR_MASK
.comm _NV_RX_ERROR4,4,2 ; @NV_RX_ERROR4
.comm _NV_RX_FRAMINGERR,4,2 ; @NV_RX_FRAMINGERR
.comm _NV_RX_SUBTRACT1,4,2 ; @NV_RX_SUBTRACT1
.comm _NV_RX2_DESCRIPTORVALID,4,2 ; @NV_RX2_DESCRIPTORVALID
.comm _LEN_MASK_V2,4,2 ; @LEN_MASK_V2
.comm _NV_RX2_ERROR,4,2 ; @NV_RX2_ERROR
.comm _NV_RX2_ERROR_MASK,4,2 ; @NV_RX2_ERROR_MASK
.comm _NV_RX2_ERROR4,4,2 ; @NV_RX2_ERROR4
.comm _NV_RX2_FRAMINGERR,4,2 ; @NV_RX2_FRAMINGERR
.comm _NV_RX2_SUBTRACT1,4,2 ; @NV_RX2_SUBTRACT1
.comm _NV_RX2_CHECKSUMMASK,4,2 ; @NV_RX2_CHECKSUMMASK
.comm _NV_RX2_CHECKSUM_IP_TCP,4,2 ; @NV_RX2_CHECKSUM_IP_TCP
.comm _NV_RX2_CHECKSUM_IP_UDP,4,2 ; @NV_RX2_CHECKSUM_IP_UDP
.comm _CHECKSUM_UNNECESSARY,4,2 ; @CHECKSUM_UNNECESSARY
.comm _stat_rx_packets,4,2 ; @stat_rx_packets
.comm _stat_rx_bytes,4,2 ; @stat_rx_bytes
.no_dead_strip _nv_rx_process
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/nvidia/extr_forcedeth.c_nv_rx_process.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function i915_buddy_block_size
_i915_buddy_block_size: ## @i915_buddy_block_size
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movl (%rdi), %ebx
movq %rsi, %rdi
callq _i915_buddy_block_order
movl %eax, %ecx
shll %cl, %ebx
movl %ebx, %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _i915_buddy_block_size
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function i915_buddy_block_size
_i915_buddy_block_size: ; @i915_buddy_block_size
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
ldr w19, [x0]
mov x0, x1
bl _i915_buddy_block_order
lsl w0, w19, w0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _i915_buddy_block_size
.subsections_via_symbols
| AnghaBench/linux/drivers/gpu/drm/i915/extr_i915_buddy.h_i915_buddy_block_size.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _recording_driver_update_streaming_url ## -- Begin function recording_driver_update_streaming_url
.p2align 4, 0x90
_recording_driver_update_streaming_url: ## @recording_driver_update_streaming_url
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq _configuration_settings@GOTPCREL(%rip), %rax
movq (%rax), %rbx
testq %rbx, %rbx
je LBB0_10
## %bb.1:
movl (%rbx), %eax
cmpl $128, %eax
je LBB0_7
## %bb.2:
cmpl $130, %eax
je LBB0_9
## %bb.3:
cmpl $129, %eax
jne LBB0_10
## %bb.4:
movl 12(%rbx), %edi
callq _string_is_empty
testl %eax, %eax
jne LBB0_10
## %bb.5:
movl 8(%rbx), %edi
movl 12(%rbx), %r8d
leaq L_.str.2(%rip), %rdx
leaq L_.str.1(%rip), %rcx
jmp LBB0_6
LBB0_7:
movl 16(%rbx), %edi
callq _string_is_empty
testl %eax, %eax
je LBB0_8
LBB0_10:
addq $8, %rsp
popq %rbx
popq %rbp
retq
LBB0_9:
movl 4(%rbx), %r8d
movl 8(%rbx), %edi
leaq L_.str.3(%rip), %rdx
leaq L_.str.4(%rip), %rcx
jmp LBB0_6
LBB0_8:
movl 8(%rbx), %edi
movl 16(%rbx), %r8d
leaq L_.str.2(%rip), %rdx
leaq L_.str(%rip), %rcx
LBB0_6:
movl $4, %esi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _snprintf ## TAILCALL
.cfi_endproc
## -- End function
.comm _configuration_settings,8,3 ## @configuration_settings
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "rtmp://a.rtmp.youtube.com/live2/"
L_.str.1: ## @.str.1
.asciz "rtmp://live.twitch.tv/app/"
L_.str.2: ## @.str.2
.asciz "%s%s"
L_.str.3: ## @.str.3
.asciz "udp://%s:%u"
L_.str.4: ## @.str.4
.asciz "127.0.0.1"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _recording_driver_update_streaming_url ; -- Begin function recording_driver_update_streaming_url
.p2align 2
_recording_driver_update_streaming_url: ; @recording_driver_update_streaming_url
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x8, _configuration_settings@GOTPAGE
Lloh1:
ldr x8, [x8, _configuration_settings@GOTPAGEOFF]
Lloh2:
ldr x19, [x8]
cbz x19, LBB0_7
; %bb.1:
ldr w8, [x19]
cmp w8, #128
b.eq LBB0_6
; %bb.2:
cmp w8, #130
b.eq LBB0_8
; %bb.3:
cmp w8, #129
b.ne LBB0_7
; %bb.4:
ldr w0, [x19, #12]
bl _string_is_empty
cbnz w0, LBB0_7
; %bb.5:
Lloh3:
adrp x2, l_.str.2@PAGE
Lloh4:
add x2, x2, l_.str.2@PAGEOFF
Lloh5:
adrp x3, l_.str.1@PAGE
Lloh6:
add x3, x3, l_.str.1@PAGEOFF
ldp w0, w4, [x19, #8]
b LBB0_10
LBB0_6:
ldr w0, [x19, #16]
bl _string_is_empty
cbz w0, LBB0_9
LBB0_7:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB0_8:
Lloh7:
adrp x2, l_.str.3@PAGE
Lloh8:
add x2, x2, l_.str.3@PAGEOFF
Lloh9:
adrp x3, l_.str.4@PAGE
Lloh10:
add x3, x3, l_.str.4@PAGEOFF
ldp w4, w0, [x19, #4]
b LBB0_10
LBB0_9:
ldr w0, [x19, #8]
Lloh11:
adrp x2, l_.str.2@PAGE
Lloh12:
add x2, x2, l_.str.2@PAGEOFF
Lloh13:
adrp x3, l_.str@PAGE
Lloh14:
add x3, x3, l_.str@PAGEOFF
ldr w4, [x19, #16]
LBB0_10:
mov w1, #4
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _snprintf
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh5, Lloh6
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpAdd Lloh9, Lloh10
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpAdd Lloh13, Lloh14
.loh AdrpAdd Lloh11, Lloh12
.cfi_endproc
; -- End function
.comm _configuration_settings,8,3 ; @configuration_settings
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "rtmp://a.rtmp.youtube.com/live2/"
l_.str.1: ; @.str.1
.asciz "rtmp://live.twitch.tv/app/"
l_.str.2: ; @.str.2
.asciz "%s%s"
l_.str.3: ; @.str.3
.asciz "udp://%s:%u"
l_.str.4: ; @.str.4
.asciz "127.0.0.1"
.subsections_via_symbols
| AnghaBench/RetroArch/griffin/extr_..retroarch.c_recording_driver_update_streaming_url.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _tracker_write_status_to_file ## -- Begin function tracker_write_status_to_file
.p2align 4, 0x90
_tracker_write_status_to_file: ## @tracker_write_status_to_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $272, %rsp ## imm = 0x110
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -24(%rbp)
movq _MAX_PATH_SIZE@GOTPCREL(%rip), %rax
movl (%rax), %ebx
movq %rbx, %rax
callq ____chkstk_darwin
addq $15, %rax
andq $-16, %rax
subq %rax, %rsp
movq %rsp, %r14
movq _g_fdfs_base_path@GOTPCREL(%rip), %rax
movq (%rax), %rcx
movq _TRACKER_STATUS_FILENAME@GOTPCREL(%rip), %rax
movq (%rax), %r8
leaq L_.str(%rip), %rdx
movq %r14, %rdi
movl %ebx, %esi
callq _snprintf
movq _TRACKER_STATUS_ITEM_UP_TIME@GOTPCREL(%rip), %rax
movq (%rax), %rdx
movq _g_up_time@GOTPCREL(%rip), %rax
movl (%rax), %ecx
movq _TRACKER_STATUS_ITEM_LAST_CHECK_TIME@GOTPCREL(%rip), %rax
movq (%rax), %r8
movq _g_current_time@GOTPCREL(%rip), %rax
movl (%rax), %r9d
leaq L_.str.1(%rip), %rsi
leaq -288(%rbp), %rbx
movq %rbx, %rdi
callq _sprintf
movq %r14, %rdi
movq %rbx, %rsi
movl %eax, %edx
callq _writeToFile
movq ___stack_chk_guard@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq -24(%rbp), %rcx
jne LBB0_2
## %bb.1:
leaq -16(%rbp), %rsp
popq %rbx
popq %r14
popq %rbp
retq
LBB0_2:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.comm _MAX_PATH_SIZE,4,2 ## @MAX_PATH_SIZE
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%s/data/%s"
.comm _g_fdfs_base_path,8,3 ## @g_fdfs_base_path
.comm _TRACKER_STATUS_FILENAME,8,3 ## @TRACKER_STATUS_FILENAME
L_.str.1: ## @.str.1
.asciz "%s=%d\n%s=%d\n"
.comm _TRACKER_STATUS_ITEM_UP_TIME,8,3 ## @TRACKER_STATUS_ITEM_UP_TIME
.comm _g_up_time,8,3 ## @g_up_time
.comm _TRACKER_STATUS_ITEM_LAST_CHECK_TIME,8,3 ## @TRACKER_STATUS_ITEM_LAST_CHECK_TIME
.comm _g_current_time,8,3 ## @g_current_time
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _tracker_write_status_to_file ; -- Begin function tracker_write_status_to_file
.p2align 2
_tracker_write_status_to_file: ; @tracker_write_status_to_file
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w27, -40
.cfi_offset w28, -48
sub sp, sp, #272
mov x19, sp
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-40]
Lloh3:
adrp x8, _MAX_PATH_SIZE@GOTPAGE
Lloh4:
ldr x8, [x8, _MAX_PATH_SIZE@GOTPAGEOFF]
Lloh5:
ldr w1, [x8]
mov x9, x1
Lloh6:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh7:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
mov x8, sp
add x9, x1, #15
and x9, x9, #0x1fffffff0
sub x20, x8, x9
mov sp, x20
Lloh8:
adrp x8, _g_fdfs_base_path@GOTPAGE
Lloh9:
ldr x8, [x8, _g_fdfs_base_path@GOTPAGEOFF]
Lloh10:
ldr x3, [x8]
Lloh11:
adrp x8, _TRACKER_STATUS_FILENAME@GOTPAGE
Lloh12:
ldr x8, [x8, _TRACKER_STATUS_FILENAME@GOTPAGEOFF]
Lloh13:
ldr x4, [x8]
Lloh14:
adrp x2, l_.str@PAGE
Lloh15:
add x2, x2, l_.str@PAGEOFF
mov x0, x20
; kill: def $w1 killed $w1 killed $x1
bl _snprintf
Lloh16:
adrp x8, _TRACKER_STATUS_ITEM_UP_TIME@GOTPAGE
Lloh17:
ldr x8, [x8, _TRACKER_STATUS_ITEM_UP_TIME@GOTPAGEOFF]
Lloh18:
ldr x2, [x8]
Lloh19:
adrp x8, _g_up_time@GOTPAGE
Lloh20:
ldr x8, [x8, _g_up_time@GOTPAGEOFF]
Lloh21:
ldr w3, [x8]
Lloh22:
adrp x8, _TRACKER_STATUS_ITEM_LAST_CHECK_TIME@GOTPAGE
Lloh23:
ldr x8, [x8, _TRACKER_STATUS_ITEM_LAST_CHECK_TIME@GOTPAGEOFF]
Lloh24:
ldr x4, [x8]
Lloh25:
adrp x8, _g_current_time@GOTPAGE
Lloh26:
ldr x8, [x8, _g_current_time@GOTPAGEOFF]
Lloh27:
ldr w5, [x8]
Lloh28:
adrp x1, l_.str.1@PAGE
Lloh29:
add x1, x1, l_.str.1@PAGEOFF
add x0, x19, #8
bl _sprintf
mov x2, x0
add x1, x19, #8
mov x0, x20
bl _writeToFile
ldur x8, [x29, #-40]
Lloh30:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh31:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh32:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_2
; %bb.1:
sub sp, x29, #32
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #48 ; 16-byte Folded Reload
ret
LBB0_2:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh30, Lloh31, Lloh32
.loh AdrpAdd Lloh28, Lloh29
.loh AdrpLdrGotLdr Lloh25, Lloh26, Lloh27
.loh AdrpLdrGotLdr Lloh22, Lloh23, Lloh24
.loh AdrpLdrGotLdr Lloh19, Lloh20, Lloh21
.loh AdrpLdrGotLdr Lloh16, Lloh17, Lloh18
.loh AdrpAdd Lloh14, Lloh15
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.loh AdrpLdrGot Lloh6, Lloh7
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _MAX_PATH_SIZE,4,2 ; @MAX_PATH_SIZE
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "%s/data/%s"
.comm _g_fdfs_base_path,8,3 ; @g_fdfs_base_path
.comm _TRACKER_STATUS_FILENAME,8,3 ; @TRACKER_STATUS_FILENAME
l_.str.1: ; @.str.1
.asciz "%s=%d\n%s=%d\n"
.comm _TRACKER_STATUS_ITEM_UP_TIME,8,3 ; @TRACKER_STATUS_ITEM_UP_TIME
.comm _g_up_time,8,3 ; @g_up_time
.comm _TRACKER_STATUS_ITEM_LAST_CHECK_TIME,8,3 ; @TRACKER_STATUS_ITEM_LAST_CHECK_TIME
.comm _g_current_time,8,3 ; @g_current_time
.subsections_via_symbols
| AnghaBench/fastdfs/tracker/extr_tracker_status.c_tracker_write_status_to_file.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function fsl_diu_pan_display
_fsl_diu_pan_display: ## @fsl_diu_pan_display
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq (%rdi), %rcx
cmpq %rcx, (%rsi)
jne LBB0_2
## %bb.1:
movq 8(%rsi), %rdx
xorl %eax, %eax
cmpq 8(%rdi), %rdx
je LBB0_9
LBB0_2:
movq 16(%rsi), %rax
addq %rcx, %rax
cmpq 24(%rsi), %rax
jg LBB0_4
## %bb.3:
movq 8(%rdi), %rax
movq 32(%rsi), %rdx
addq %rax, %rdx
cmpq 40(%rsi), %rdx
jle LBB0_5
LBB0_4:
movq _EINVAL@GOTPCREL(%rip), %rcx
xorl %eax, %eax
subl (%rcx), %eax
popq %rbp
retq
LBB0_5:
movq %rcx, (%rsi)
movq %rax, 8(%rsi)
movq _FB_VMODE_YWRAP@GOTPCREL(%rip), %rax
movl (%rax), %eax
testl %eax, 16(%rdi)
je LBB0_7
## %bb.6:
orl %eax, 48(%rsi)
jmp LBB0_8
LBB0_7:
notl %eax
andl %eax, 48(%rsi)
LBB0_8:
movq %rsi, %rdi
callq _fsl_diu_set_aoi
xorl %eax, %eax
LBB0_9:
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _EINVAL,4,2 ## @EINVAL
.comm _FB_VMODE_YWRAP,4,2 ## @FB_VMODE_YWRAP
.no_dead_strip _fsl_diu_pan_display
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function fsl_diu_pan_display
_fsl_diu_pan_display: ; @fsl_diu_pan_display
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
ldr x9, [x1]
ldr x8, [x0]
cmp x9, x8
b.ne LBB0_2
; %bb.1:
ldr x9, [x1, #8]
ldr x10, [x0, #8]
cmp x9, x10
b.eq LBB0_9
LBB0_2:
ldp x9, x10, [x1, #16]
add x9, x9, x8
cmp x9, x10
b.gt LBB0_4
; %bb.3:
ldr x9, [x0, #8]
ldp x10, x11, [x1, #32]
add x10, x10, x9
cmp x10, x11
b.le LBB0_5
LBB0_4:
Lloh0:
adrp x8, _EINVAL@GOTPAGE
Lloh1:
ldr x8, [x8, _EINVAL@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
neg w0, w8
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
LBB0_5:
stp x8, x9, [x1]
ldr w9, [x0, #16]
Lloh3:
adrp x8, _FB_VMODE_YWRAP@GOTPAGE
Lloh4:
ldr x8, [x8, _FB_VMODE_YWRAP@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
tst w8, w9
b.eq LBB0_7
; %bb.6:
ldr w9, [x1, #48]
orr w8, w9, w8
b LBB0_8
LBB0_7:
ldr w9, [x1, #48]
bic w8, w9, w8
LBB0_8:
str w8, [x1, #48]
mov x0, x1
bl _fsl_diu_set_aoi
LBB0_9:
mov w0, #0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _EINVAL,4,2 ; @EINVAL
.comm _FB_VMODE_YWRAP,4,2 ; @FB_VMODE_YWRAP
.no_dead_strip _fsl_diu_pan_display
.subsections_via_symbols
| AnghaBench/linux/drivers/video/fbdev/extr_fsl-diu-fb.c_fsl_diu_pan_display.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function nv50_graph_construct_gene_vfetch
_nv50_graph_construct_gene_vfetch: ## @nv50_graph_construct_gene_vfetch
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r15
movq (%rdi), %r14
movl (%r14), %edi
callq _IS_NVA3F
xorl %r12d, %r12d
testq %rax, %rax
setne %r12b
shll $4, %r12d
addl $16, %r12d
cmpl $160, (%r14)
jl LBB0_2
## %bb.1:
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $1, %edx
callq _xf_emit
LBB0_2:
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $15, %edx
callq _xf_emit
movl %r12d, %ebx
shrl $3, %ebx
leal -1(%rbx), %esi
movq %r15, %rdi
movl %esi, -44(%rbp) ## 4-byte Spill
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movq %rbx, -56(%rbp) ## 8-byte Spill
movl %ebx, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $32, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movl (%r14), %edi
callq _IS_NVA3F
movl $11, %esi
testq %rax, %rax
jne LBB0_4
## %bb.3:
xorl %esi, %esi
cmpl $160, (%r14)
setge %sil
orl $8, %esi
LBB0_4:
xorl %r13d, %r13d
movq %r15, %rdi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $26, %edx
callq _xf_emit
movq %r15, %rdi
movl $12, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $4, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $4, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $4, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $8, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
xorl %edx, %edx
cmpl $80, (%r14)
setne %dl
shll $10, %edx
orl $1023, %edx ## imm = 0x3FF
movq %r15, %rdi
movl $1, %esi
callq _xf_emit
cmpl $168, (%r14)
jne LBB0_6
## %bb.5:
movq %r15, %rdi
movl $1, %esi
movl $7680, %edx ## imm = 0x1E00
callq _xf_emit
LBB0_6:
movq %r15, %rdi
movl $12, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $15, %edx
callq _xf_emit
movq %r15, %rdi
movl -44(%rbp), %esi ## 4-byte Reload
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movl (%r14), %eax
addl $-81, %eax
cmpl $79, %eax
adcl $1, %r13d
movq %r15, %rdi
movl %r13d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movl (%r14), %edi
callq _IS_NVA3F
xorl %ecx, %ecx
xorl %edx, %edx
testq %rax, %rax
sete %cl
setne %dl
leal 8(,%rdx,8), %esi
movl $2, %r13d
subl %ecx, %r13d
movq %r15, %rdi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl %r13d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $2, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
cmpl $160, (%r14)
jl LBB0_8
## %bb.7:
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
LBB0_8:
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $2, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $3, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $3, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $3, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $3, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $3, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $3, %esi
xorl %edx, %edx
callq _xf_emit
movl (%r14), %edi
callq _IS_NVA3F
testq %rax, %rax
je LBB0_10
## %bb.9:
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $3, %esi
xorl %edx, %edx
callq _xf_emit
LBB0_10:
movl (%r14), %edi
callq _IS_NVA3F
xorl %ecx, %ecx
testq %rax, %rax
sete %cl
leal (%rcx,%rcx,2), %esi
addl $2, %esi
movq %r15, %rdi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movl (%r14), %edi
cmpl $159, %edi
jg LBB0_12
## %bb.11:
movq %r15, %rdi
movl $65, %esi
xorl %edx, %edx
callq _xf_emit
movl $17, %esi
jmp LBB0_13
LBB0_12:
callq _IS_NVA3F
xorl %ecx, %ecx
testq %rax, %rax
setne %cl
leal 80(,%rcx,8), %esi
LBB0_13:
movq %r15, %rdi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $15, %edx
callq _xf_emit
movq %r15, %rdi
movl -44(%rbp), %ebx ## 4-byte Reload
movl %ebx, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $1, %edx
callq _xf_emit
shll $2, %r12d
movq %r15, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $4, %esi
xorl %edx, %edx
callq _xf_emit
movl (%r14), %edi
callq _IS_NVA3F
testq %rax, %rax
movl $22, %eax
movl $29, %esi
cmovel %eax, %esi
movq %r15, %rdi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $15, %edx
callq _xf_emit
movq %r15, %rdi
movl %ebx, %esi
xorl %edx, %edx
callq _xf_emit
movl (%r14), %edi
movl $8, %esi
cmpl $160, %edi
jl LBB0_15
## %bb.14:
callq _IS_NVA3F
xorl %ecx, %ecx
testq %rax, %rax
setne %cl
leal (%rcx,%rcx,4), %esi
addl $7, %esi
LBB0_15:
movq %r15, %rdi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $10, %esi
xorl %edx, %edx
callq _xf_emit
movl (%r14), %edi
xorl %eax, %eax
cmpl $160, %edi
sete %al
leal 3(,%rax,8), %ebx
.p2align 4, 0x90
LBB0_16: ## =>This Inner Loop Header: Depth=1
callq _IS_NVA3F
testq %rax, %rax
je LBB0_18
## %bb.17: ## in Loop: Header=BB0_16 Depth=1
movq %r15, %rdi
movl $32, %esi
xorl %edx, %edx
callq _xf_emit
LBB0_18: ## in Loop: Header=BB0_16 Depth=1
movq %r15, %rdi
movl $512, %esi ## imm = 0x200
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $4, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $4, %esi
xorl %edx, %edx
callq _xf_emit
subl $1, %ebx
jb LBB0_20
## %bb.19: ## in Loop: Header=BB0_16 Depth=1
movl (%r14), %edi
jmp LBB0_16
LBB0_20:
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
movl $15, %edx
callq _xf_emit
movq %r15, %rdi
movl -44(%rbp), %esi ## 4-byte Reload
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movq -56(%rbp), %rsi ## 8-byte Reload
## kill: def $esi killed $esi killed $rsi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movq %r15, %rdi
movl $1, %esi
xorl %edx, %edx
callq _xf_emit
movl (%r14), %edi
callq _IS_NVA3F
xorl %ecx, %ecx
testq %rax, %rax
setne %cl
leal (%rcx,%rcx), %esi
addl $5, %esi
movq %r15, %rdi
xorl %edx, %edx
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _xf_emit ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _nv50_graph_construct_gene_vfetch
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function nv50_graph_construct_gene_vfetch
_nv50_graph_construct_gene_vfetch: ; @nv50_graph_construct_gene_vfetch
.cfi_startproc
; %bb.0:
stp x26, x25, [sp, #-80]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x19, x0
ldr x24, [x0]
ldr w0, [x24]
bl _IS_NVA3F
cmp x0, #0
mov w8, #32
mov w9, #16
csel w22, w9, w8, eq
ldr w8, [x24]
cmp w8, #160
b.lt LBB0_2
; %bb.1:
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #1
bl _xf_emit
LBB0_2:
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #15
bl _xf_emit
lsr w20, w22, #3
sub w21, w20, #1
mov x0, x19
mov x1, x21
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x20
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #32
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
ldr w0, [x24]
bl _IS_NVA3F
cbz x0, LBB0_4
; %bb.3:
mov w1, #11
b LBB0_5
LBB0_4:
ldr w8, [x24]
cmp w8, #159
mov w8, #8
cinc w1, w8, gt
LBB0_5:
mov x0, x19
mov w2, #0
bl _xf_emit
mov w23, #1
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #26
bl _xf_emit
mov x0, x19
mov w1, #12
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #4
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #4
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #4
bl _xf_emit
mov w25, #8
mov x0, x19
mov w1, #1
mov w2, #8
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
ldr w8, [x24]
mov w9, #2047
mov w10, #1023
cmp w8, #80
csel w2, w10, w9, eq
mov x0, x19
mov w1, #1
bl _xf_emit
ldr w8, [x24]
cmp w8, #168
b.ne LBB0_7
; %bb.6:
mov x0, x19
mov w1, #1
mov w2, #7680
bl _xf_emit
LBB0_7:
mov x0, x19
mov w1, #12
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #15
bl _xf_emit
mov x0, x19
mov x1, x21
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
ldr w8, [x24]
sub w8, w8, #81
cmp w8, #79
cinc w1, w23, lo
mov x0, x19
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
ldr w0, [x24]
bl _IS_NVA3F
cmp x0, #0
mov w8, #16
csel w1, w25, w8, eq
cinc w23, w23, ne
mov x0, x19
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x23
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #2
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x22
mov w2, #0
bl _xf_emit
ldr w8, [x24]
cmp w8, #160
b.lt LBB0_9
; %bb.8:
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
LBB0_9:
mov x0, x19
mov x1, x22
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x22
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x22
mov w2, #0
bl _xf_emit
mov w23, #2
mov x0, x19
mov w1, #2
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x22
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #3
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x22
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #3
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x22
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #3
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x22
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #3
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x22
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #3
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x22
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #3
mov w2, #0
bl _xf_emit
ldr w0, [x24]
bl _IS_NVA3F
cbz x0, LBB0_11
; %bb.10:
mov x0, x19
mov x1, x22
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #3
mov w2, #0
bl _xf_emit
LBB0_11:
ldr w0, [x24]
bl _IS_NVA3F
cmp x0, #0
mov w8, #5
csel w1, w8, w23, eq
mov x0, x19
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
ldr w0, [x24]
cmp w0, #159
b.gt LBB0_13
; %bb.12:
mov x0, x19
mov w1, #65
mov w2, #0
bl _xf_emit
mov w1, #17
b LBB0_14
LBB0_13:
bl _IS_NVA3F
cmp x0, #0
mov w8, #88
mov w9, #80
csel w1, w9, w8, eq
LBB0_14:
mov x0, x19
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #15
bl _xf_emit
mov x0, x19
mov x1, x21
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #1
bl _xf_emit
lsl w1, w22, #2
mov x0, x19
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #4
mov w2, #0
bl _xf_emit
ldr w0, [x24]
bl _IS_NVA3F
cmp x0, #0
mov w8, #29
mov w9, #22
csel w1, w9, w8, eq
mov x0, x19
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #15
bl _xf_emit
mov x0, x19
mov x1, x21
mov w2, #0
bl _xf_emit
ldr w0, [x24]
cmp w0, #160
b.ge LBB0_16
; %bb.15:
mov w1, #8
b LBB0_17
LBB0_16:
bl _IS_NVA3F
cmp x0, #0
mov w8, #12
mov w9, #7
csel w1, w9, w8, eq
LBB0_17:
mov x0, x19
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #10
mov w2, #0
bl _xf_emit
ldr w0, [x24]
mov w8, #3
mov w9, #11
cmp w0, #160
csel w22, w9, w8, eq
bl _IS_NVA3F
cbz x0, LBB0_19
LBB0_18:
mov x0, x19
mov w1, #32
mov w2, #0
bl _xf_emit
LBB0_19: ; =>This Inner Loop Header: Depth=1
mov x0, x19
mov w1, #512
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #4
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #4
mov w2, #0
bl _xf_emit
cbz w22, LBB0_21
; %bb.20: ; in Loop: Header=BB0_19 Depth=1
ldr w0, [x24]
sub w22, w22, #1
bl _IS_NVA3F
cbnz x0, LBB0_18
b LBB0_19
LBB0_21:
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #15
bl _xf_emit
mov x0, x19
mov x1, x21
mov w2, #0
bl _xf_emit
mov x0, x19
mov x1, x20
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
mov x0, x19
mov w1, #1
mov w2, #0
bl _xf_emit
ldr w0, [x24]
bl _IS_NVA3F
cmp x0, #0
mov w8, #7
mov w9, #5
csel w1, w9, w8, eq
mov x0, x19
mov w2, #0
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
ldp x26, x25, [sp], #80 ; 16-byte Folded Reload
b _xf_emit
.cfi_endproc
; -- End function
.no_dead_strip _nv50_graph_construct_gene_vfetch
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/gpu/drm/nouveau/core/engine/graph/extr_ctxnv50.c_nv50_graph_construct_gene_vfetch.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $16, %rsp
leaq L_str(%rip), %rdi
callq _puts
leaq L_.str.1(%rip), %rdi
leaq -4(%rbp), %rsi
xorl %eax, %eax
callq _scanf
movslq -4(%rbp), %rax
imulq $1431655766, %rax, %rcx ## imm = 0x55555556
movq %rcx, %rdx
shrq $63, %rdx
shrq $32, %rcx
addl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movl %eax, %edx
subl %ecx, %edx
imulq $1717986919, %rax, %rcx ## imm = 0x66666667
movq %rcx, %rsi
shrq $63, %rsi
sarq $33, %rcx
addl %esi, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
leaq L_str.6(%rip), %rcx
leaq L_str.7(%rip), %rsi
cmoveq %rcx, %rsi
testl %edx, %edx
cmoveq %rcx, %rsi
orl %edx, %eax
leaq L_str.5(%rip), %rdi
cmovneq %rsi, %rdi
callq _puts
xorl %eax, %eax
addq $16, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _data1 ## -- Begin function data1
.p2align 4, 0x90
_data1: ## @data1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L_str.5(%rip), %rdi
popq %rbp
jmp _puts ## TAILCALL
.cfi_endproc
## -- End function
.globl _data2 ## -- Begin function data2
.p2align 4, 0x90
_data2: ## @data2
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L_str.6(%rip), %rdi
popq %rbp
jmp _puts ## TAILCALL
.cfi_endproc
## -- End function
.globl _data3 ## -- Begin function data3
.p2align 4, 0x90
_data3: ## @data3
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L_str.7(%rip), %rdi
popq %rbp
jmp _puts ## TAILCALL
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str.1: ## @.str.1
.asciz "%d"
L_str: ## @str
.asciz "Please enter a number below."
L_str.5: ## @str.5
.asciz "The number is divisible by both 3 and 5."
L_str.6: ## @str.6
.asciz "The number is divisible either 3 or 5."
L_str.7: ## @str.7
.asciz "It's not divisible by 3 or 5."
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x0, l_str@PAGE
Lloh1:
add x0, x0, l_str@PAGEOFF
bl _puts
sub x8, x29, #4
str x8, [sp]
Lloh2:
adrp x0, l_.str.1@PAGE
Lloh3:
add x0, x0, l_.str.1@PAGEOFF
bl _scanf
ldursw x8, [x29, #-4]
mov w9, #21846
movk w9, #21845, lsl #16
mul x9, x8, x9
lsr x10, x9, #63
lsr x9, x9, #32
add w9, w9, w10
add w9, w9, w9, lsl #1
sub w9, w8, w9
mov w10, #26215
movk w10, #26214, lsl #16
mul x10, x8, x10
lsr x11, x10, #63
asr x10, x10, #33
add w10, w10, w11
add w10, w10, w10, lsl #2
sub w8, w8, w10
orr w10, w9, w8
cmp w8, #0
Lloh4:
adrp x8, l_str.7@PAGE
Lloh5:
add x8, x8, l_str.7@PAGEOFF
Lloh6:
adrp x11, l_str.6@PAGE
Lloh7:
add x11, x11, l_str.6@PAGEOFF
ccmp w9, #0, #4, ne
csel x8, x11, x8, eq
Lloh8:
adrp x9, l_str.5@PAGE
Lloh9:
add x9, x9, l_str.5@PAGEOFF
cmp w10, #0
csel x0, x9, x8, eq
bl _puts
mov w0, #0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpAdd Lloh6, Lloh7
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.globl _data1 ; -- Begin function data1
.p2align 2
_data1: ; @data1
.cfi_startproc
; %bb.0:
Lloh10:
adrp x0, l_str.5@PAGE
Lloh11:
add x0, x0, l_str.5@PAGEOFF
b _puts
.loh AdrpAdd Lloh10, Lloh11
.cfi_endproc
; -- End function
.globl _data2 ; -- Begin function data2
.p2align 2
_data2: ; @data2
.cfi_startproc
; %bb.0:
Lloh12:
adrp x0, l_str.6@PAGE
Lloh13:
add x0, x0, l_str.6@PAGEOFF
b _puts
.loh AdrpAdd Lloh12, Lloh13
.cfi_endproc
; -- End function
.globl _data3 ; -- Begin function data3
.p2align 2
_data3: ; @data3
.cfi_startproc
; %bb.0:
Lloh14:
adrp x0, l_str.7@PAGE
Lloh15:
add x0, x0, l_str.7@PAGEOFF
b _puts
.loh AdrpAdd Lloh14, Lloh15
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str.1: ; @.str.1
.asciz "%d"
l_str: ; @str
.asciz "Please enter a number below."
l_str.5: ; @str.5
.asciz "The number is divisible by both 3 and 5."
l_str.6: ; @str.6
.asciz "The number is divisible either 3 or 5."
l_str.7: ; @str.7
.asciz "It's not divisible by 3 or 5."
.subsections_via_symbols
| the_stack_data/23996.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function vlc_osd_slider_type_from_string
_vlc_osd_slider_type_from_string: ## @vlc_osd_slider_type_from_string
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %r14
leaq L_.str(%rip), %rsi
callq _strcmp
xorl %ebx, %ebx
testl %eax, %eax
je LBB0_1
## %bb.3:
leaq L_.str.1(%rip), %rsi
movq %r14, %rdi
callq _strcmp
testl %eax, %eax
jne LBB0_2
## %bb.4:
movl $1, %ebx
LBB0_1:
shlq $4, %rbx
leaq _vlc_osd_slider_type_from_string.pp_types(%rip), %rax
movl (%rbx,%rax), %ebx
LBB0_2:
movl %ebx, %eax
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.section __DATA,__const
.p2align 4 ## @vlc_osd_slider_type_from_string.pp_types
_vlc_osd_slider_type_from_string.pp_types:
.long 129 ## 0x81
.space 4
.quad L_.str
.long 128 ## 0x80
.space 4
.quad L_.str.1
.space 16
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "horizontal"
L_.str.1: ## @.str.1
.asciz "vertical"
.no_dead_strip _vlc_osd_slider_type_from_string
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function vlc_osd_slider_type_from_string
_vlc_osd_slider_type_from_string: ; @vlc_osd_slider_type_from_string
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
Lloh0:
adrp x1, l_.str@PAGE
Lloh1:
add x1, x1, l_.str@PAGEOFF
bl _strcmp
cbz w0, LBB0_3
; %bb.1:
Lloh2:
adrp x1, l_.str.1@PAGE
Lloh3:
add x1, x1, l_.str.1@PAGEOFF
mov x0, x19
bl _strcmp
cbz w0, LBB0_4
; %bb.2:
mov w0, #0
b LBB0_6
LBB0_3:
mov x8, #0
b LBB0_5
LBB0_4:
mov w8, #1
LBB0_5:
lsl x8, x8, #4
Lloh4:
adrp x9, _vlc_osd_slider_type_from_string.pp_types@PAGE
Lloh5:
add x9, x9, _vlc_osd_slider_type_from_string.pp_types@PAGEOFF
ldr w0, [x9, x8]
LBB0_6:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh4, Lloh5
.cfi_endproc
; -- End function
.section __DATA,__const
.p2align 3 ; @vlc_osd_slider_type_from_string.pp_types
_vlc_osd_slider_type_from_string.pp_types:
.long 129 ; 0x81
.space 4
.quad l_.str
.long 128 ; 0x80
.space 4
.quad l_.str.1
.space 16
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "horizontal"
l_.str.1: ; @.str.1
.asciz "vertical"
.no_dead_strip _vlc_osd_slider_type_from_string
.subsections_via_symbols
| AnghaBench/vlc/modules/lua/libs/extr_osd.c_vlc_osd_slider_type_from_string.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _rds_info_iter_unmap ## -- Begin function rds_info_iter_unmap
.p2align 4, 0x90
_rds_info_iter_unmap: ## @rds_info_iter_unmap
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
movq (%rdi), %rdi
testq %rdi, %rdi
je LBB0_2
## %bb.1:
callq _kunmap_atomic
movq $0, (%rbx)
LBB0_2:
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _rds_info_iter_unmap ; -- Begin function rds_info_iter_unmap
.p2align 2
_rds_info_iter_unmap: ; @rds_info_iter_unmap
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
ldr x0, [x0]
cbz x0, LBB0_2
; %bb.1:
bl _kunmap_atomic
str xzr, [x19]
LBB0_2:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/linux/net/rds/extr_info.c_rds_info_iter_unmap.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function rvu_enable_sriov
_rvu_enable_sriov: ## @rvu_enable_sriov
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r15
movq 8(%rdi), %r14
callq _rvu_afvf_msix_vectors_num_ok
testl %eax, %eax
je LBB0_1
## %bb.2:
xorl %eax, %eax
callq _lbk_get_num_chans
movl %eax, %ebx
testl %eax, %eax
js LBB0_3
## %bb.4:
movq %r14, %rdi
callq _pci_sriov_get_totalvfs
cmpl %ebx, %eax
cmovll %eax, %ebx
testb $1, %bl
je LBB0_6
## %bb.5:
leal -1(%rbx), %r12d
leaq L_.str.1(%rip), %rsi
movq %r14, %rdi
movl %r12d, %edx
movl %ebx, %ecx
xorl %eax, %eax
callq _dev_warn
movl %r12d, %ebx
LBB0_6:
testl %ebx, %ebx
je LBB0_7
## %bb.8:
movl %ebx, (%r15)
leaq 4(%r15), %r12
movq _TYPE_AFVF@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq _rvu_afvf_mbox_handler@GOTPCREL(%rip), %rax
movl (%rax), %r8d
movq _rvu_afvf_mbox_up_handler@GOTPCREL(%rip), %rax
movl (%rax), %r9d
movq %r15, %rdi
movq %r12, %rsi
movl %ebx, %ecx
callq _rvu_mbox_init
movl %eax, %r13d
testl %eax, %eax
jne LBB0_11
## %bb.9:
movq %r15, %rdi
callq _rvu_enable_afvf_intr
xorl %r13d, %r13d
xorl %eax, %eax
callq _mb
movq %r14, %rdi
movl %ebx, %esi
callq _pci_enable_sriov
testl %eax, %eax
je LBB0_11
## %bb.10:
movl %eax, %ebx
movq %r15, %rdi
callq _rvu_disable_afvf_intr
movq %r12, %rdi
callq _rvu_mbox_destroy
movl %ebx, %r13d
jmp LBB0_11
LBB0_1:
leaq L_.str(%rip), %rsi
xorl %r13d, %r13d
movq %r14, %rdi
xorl %eax, %eax
callq _dev_warn
jmp LBB0_11
LBB0_3:
movl %ebx, %r13d
jmp LBB0_11
LBB0_7:
xorl %r13d, %r13d
LBB0_11:
movl %r13d, %eax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Skipping SRIOV enablement since not enough IRQs are available\n"
L_.str.1: ## @.str.1
.asciz "Number of VFs should be even. Enabling %d out of %d.\n"
.comm _TYPE_AFVF,4,2 ## @TYPE_AFVF
.comm _rvu_afvf_mbox_handler,4,2 ## @rvu_afvf_mbox_handler
.comm _rvu_afvf_mbox_up_handler,4,2 ## @rvu_afvf_mbox_up_handler
.no_dead_strip _rvu_enable_sriov
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function rvu_enable_sriov
_rvu_enable_sriov: ; @rvu_enable_sriov
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x0
ldr x20, [x0, #8]
bl _rvu_afvf_msix_vectors_num_ok
cbz w0, LBB0_8
; %bb.1:
bl _lbk_get_num_chans
mov x23, x0
tbnz w0, #31, LBB0_10
; %bb.2:
mov x0, x20
bl _pci_sriov_get_totalvfs
cmp w0, w23
csel w21, w0, w23, lt
tbz w21, #0, LBB0_4
; %bb.3:
sub w22, w21, #1
stp x22, x21, [sp]
Lloh0:
adrp x1, l_.str.1@PAGE
Lloh1:
add x1, x1, l_.str.1@PAGEOFF
mov x0, x20
bl _dev_warn
mov x21, x22
LBB0_4:
cbz w21, LBB0_9
; %bb.5:
mov x22, x19
str w21, [x22], #4
Lloh2:
adrp x8, _TYPE_AFVF@GOTPAGE
Lloh3:
ldr x8, [x8, _TYPE_AFVF@GOTPAGEOFF]
Lloh4:
ldr w2, [x8]
Lloh5:
adrp x8, _rvu_afvf_mbox_handler@GOTPAGE
Lloh6:
ldr x8, [x8, _rvu_afvf_mbox_handler@GOTPAGEOFF]
Lloh7:
ldr w4, [x8]
Lloh8:
adrp x8, _rvu_afvf_mbox_up_handler@GOTPAGE
Lloh9:
ldr x8, [x8, _rvu_afvf_mbox_up_handler@GOTPAGEOFF]
Lloh10:
ldr w5, [x8]
mov x0, x19
mov x1, x22
mov x3, x21
bl _rvu_mbox_init
mov x23, x0
cbnz w0, LBB0_10
; %bb.6:
mov x0, x19
bl _rvu_enable_afvf_intr
bl _mb
mov x0, x20
mov x1, x21
bl _pci_enable_sriov
mov x23, x0
cbz w0, LBB0_10
; %bb.7:
mov x0, x19
bl _rvu_disable_afvf_intr
mov x0, x22
bl _rvu_mbox_destroy
b LBB0_10
LBB0_8:
Lloh11:
adrp x1, l_.str@PAGE
Lloh12:
add x1, x1, l_.str@PAGEOFF
mov x0, x20
bl _dev_warn
LBB0_9:
mov w23, #0
LBB0_10:
mov x0, x23
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #80
ret
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpAdd Lloh11, Lloh12
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Skipping SRIOV enablement since not enough IRQs are available\n"
l_.str.1: ; @.str.1
.asciz "Number of VFs should be even. Enabling %d out of %d.\n"
.comm _TYPE_AFVF,4,2 ; @TYPE_AFVF
.comm _rvu_afvf_mbox_handler,4,2 ; @rvu_afvf_mbox_handler
.comm _rvu_afvf_mbox_up_handler,4,2 ; @rvu_afvf_mbox_up_handler
.no_dead_strip _rvu_enable_sriov
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/marvell/octeontx2/af/extr_rvu.c_rvu_enable_sriov.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq ___stdoutp@GOTPCREL(%rip), %rax
movq (%rax), %rbx
movq _shellcode(%rip), %rdi
callq _strlen
leaq L_.str.1(%rip), %rsi
movq %rbx, %rdi
movq %rax, %rdx
xorl %eax, %eax
callq _fprintf
xorl %eax, %eax
callq *_shellcode(%rip)
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "H1\322RH\270/bin//shPH\211\347RWH\211\346H\215B;\017\005"
.section __DATA,__data
.globl _shellcode ## @shellcode
.p2align 3
_shellcode:
.quad L_.str
.section __TEXT,__cstring,cstring_literals
L_.str.1: ## @.str.1
.asciz "Length: %d\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x8, ___stdoutp@GOTPAGE
Lloh1:
ldr x8, [x8, ___stdoutp@GOTPAGEOFF]
Lloh2:
ldr x19, [x8]
adrp x20, _shellcode@PAGE
ldr x0, [x20, _shellcode@PAGEOFF]
bl _strlen
str x0, [sp]
Lloh3:
adrp x1, l_.str.1@PAGE
Lloh4:
add x1, x1, l_.str.1@PAGEOFF
mov x0, x19
bl _fprintf
ldr x8, [x20, _shellcode@PAGEOFF]
blr x8
mov w0, #0
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "H1\322RH\270/bin//shPH\211\347RWH\211\346H\215B;\017\005"
.section __DATA,__data
.globl _shellcode ; @shellcode
.p2align 3
_shellcode:
.quad l_.str
.section __TEXT,__cstring,cstring_literals
l_.str.1: ; @.str.1
.asciz "Length: %d\n"
.subsections_via_symbols
| the_stack_data/423031.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function MONTHCAL_GoToMonth
_MONTHCAL_GoToMonth: ## @MONTHCAL_GoToMonth
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %r14d
movq %rdi, %r13
movl (%rdi), %r15d
testl %r15d, %r15d
jne LBB0_2
## %bb.1:
movq %r13, %rdi
callq _MONTHCAL_GetCalCount
movl %eax, %r15d
LBB0_2:
movq _DIRECTION_BACKWARD@GOTPCREL(%rip), %rbx
cmpl %r14d, (%rbx)
leaq L_.str.1(%rip), %rax
leaq L_.str.2(%rip), %rsi
cmoveq %rax, %rsi
leaq L_.str(%rip), %rdi
callq _TRACE
movq 8(%r13), %r12
cmpl %r14d, (%rbx)
jne LBB0_4
## %bb.3:
movl (%r12), %eax
movl %eax, -44(%rbp)
movl %r15d, %esi
negl %esi
jmp LBB0_5
LBB0_4:
movq %r13, %rdi
callq _MONTHCAL_GetCalCount
cltq
movl -4(%r12,%rax,4), %eax
movl %eax, -44(%rbp)
movl %r15d, %esi
LBB0_5:
leaq -44(%rbp), %r12
movq %r12, %rdi
callq _MONTHCAL_GetMonth
movq _FALSE@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq %r13, %rdi
movq %r12, %rsi
callq _MONTHCAL_IsDateInValidRange
testl %eax, %eax
je LBB0_8
## %bb.6:
movq _MCS_NOSELCHANGEONNAV@GOTPCREL(%rip), %rax
movl %r15d, %esi
negl %esi
cmpl %r14d, (%rbx)
movl (%rax), %ebx
cmovnel %r15d, %esi
movl 4(%r13), %r14d
movl %ebx, %edx
andl %r14d, %edx
movq %r13, %rdi
callq _MONTHCAL_Scroll
movq %r13, %rdi
callq _MONTHCAL_NotifyDayState
andl %r14d, %ebx
jne LBB0_8
## %bb.7:
movq %r13, %rdi
callq _MONTHCAL_NotifySelectionChange
LBB0_8:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%s\n"
.comm _DIRECTION_BACKWARD,4,2 ## @DIRECTION_BACKWARD
L_.str.1: ## @.str.1
.asciz "back"
L_.str.2: ## @.str.2
.asciz "fwd"
.comm _FALSE,4,2 ## @FALSE
.comm _MCS_NOSELCHANGEONNAV,4,2 ## @MCS_NOSELCHANGEONNAV
.no_dead_strip _MONTHCAL_GoToMonth
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function MONTHCAL_GoToMonth
_MONTHCAL_GoToMonth: ; @MONTHCAL_GoToMonth
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x20, x1
mov x19, x0
ldr w21, [x0]
cbnz w21, LBB0_2
; %bb.1:
mov x0, x19
bl _MONTHCAL_GetCalCount
mov x21, x0
LBB0_2:
Lloh0:
adrp x23, _DIRECTION_BACKWARD@GOTPAGE
Lloh1:
ldr x23, [x23, _DIRECTION_BACKWARD@GOTPAGEOFF]
ldr w8, [x23]
Lloh2:
adrp x9, l_.str.2@PAGE
Lloh3:
add x9, x9, l_.str.2@PAGEOFF
Lloh4:
adrp x10, l_.str.1@PAGE
Lloh5:
add x10, x10, l_.str.1@PAGEOFF
cmp w8, w20
csel x1, x10, x9, eq
Lloh6:
adrp x0, l_.str@PAGE
Lloh7:
add x0, x0, l_.str@PAGEOFF
bl _TRACE
ldr w8, [x23]
ldr x22, [x19, #8]
cmp w8, w20
b.ne LBB0_4
; %bb.3:
ldr w8, [x22]
str w8, [sp, #12]
neg w1, w21
b LBB0_5
LBB0_4:
mov x0, x19
bl _MONTHCAL_GetCalCount
add x8, x22, w0, sxtw #2
ldur w8, [x8, #-4]
str w8, [sp, #12]
mov x1, x21
LBB0_5:
add x0, sp, #12
bl _MONTHCAL_GetMonth
Lloh8:
adrp x8, _FALSE@GOTPAGE
Lloh9:
ldr x8, [x8, _FALSE@GOTPAGEOFF]
Lloh10:
ldr w2, [x8]
add x1, sp, #12
mov x0, x19
bl _MONTHCAL_IsDateInValidRange
cbz w0, LBB0_8
; %bb.6:
Lloh11:
adrp x8, _MCS_NOSELCHANGEONNAV@GOTPAGE
Lloh12:
ldr x8, [x8, _MCS_NOSELCHANGEONNAV@GOTPAGEOFF]
ldr w9, [x19, #4]
Lloh13:
ldr w8, [x8]
and w22, w8, w9
ldr w8, [x23]
cmp w8, w20
cneg w1, w21, eq
mov x0, x19
mov x2, x22
bl _MONTHCAL_Scroll
mov x0, x19
bl _MONTHCAL_NotifyDayState
cbnz w22, LBB0_8
; %bb.7:
mov x0, x19
bl _MONTHCAL_NotifySelectionChange
LBB0_8:
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #80
ret
.loh AdrpAdd Lloh6, Lloh7
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "%s\n"
.comm _DIRECTION_BACKWARD,4,2 ; @DIRECTION_BACKWARD
l_.str.1: ; @.str.1
.asciz "back"
l_.str.2: ; @.str.2
.asciz "fwd"
.comm _FALSE,4,2 ; @FALSE
.comm _MCS_NOSELCHANGEONNAV,4,2 ; @MCS_NOSELCHANGEONNAV
.no_dead_strip _MONTHCAL_GoToMonth
.subsections_via_symbols
| AnghaBench/reactos/dll/win32/comctl32/extr_monthcal.c_MONTHCAL_GoToMonth.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function mpt_terminate_raid_thread
_mpt_terminate_raid_thread: ## @mpt_terminate_raid_thread
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
cmpq $0, 8(%rdi)
je LBB0_1
## %bb.2:
movq %rdi, %rbx
leaq 8(%rdi), %r14
movl $1, (%rdi)
addq $16, %rdi
callq _wakeup
movq _PUSER@GOTPCREL(%rip), %rax
movl (%rax), %edx
leaq L_.str(%rip), %rcx
movq %rbx, %rdi
movq %r14, %rsi
xorl %r8d, %r8d
popq %rbx
popq %r14
popq %rbp
jmp _mpt_sleep ## TAILCALL
LBB0_1:
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _PUSER,4,2 ## @PUSER
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "thtrm"
.no_dead_strip _mpt_terminate_raid_thread
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function mpt_terminate_raid_thread
_mpt_terminate_raid_thread: ; @mpt_terminate_raid_thread
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x20, x0
ldr x8, [x20, #8]!
cbz x8, LBB0_2
; %bb.1:
mov x19, x0
mov w8, #1
str w8, [x0], #16
bl _wakeup
Lloh0:
adrp x8, _PUSER@GOTPAGE
Lloh1:
ldr x8, [x8, _PUSER@GOTPAGEOFF]
Lloh2:
adrp x3, l_.str@PAGE
Lloh3:
add x3, x3, l_.str@PAGEOFF
Lloh4:
ldr w2, [x8]
mov x0, x19
mov x1, x20
mov w4, #0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _mpt_sleep
LBB0_2:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh4
.cfi_endproc
; -- End function
.comm _PUSER,4,2 ; @PUSER
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "thtrm"
.no_dead_strip _mpt_terminate_raid_thread
.subsections_via_symbols
| AnghaBench/freebsd/sys/dev/mpt/extr_mpt_raid.c_mpt_terminate_raid_thread.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function dma_get_buffer_addr
_dma_get_buffer_addr: ## @dma_get_buffer_addr
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq (%rdi), %rax
movq (%rax), %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _dma_get_buffer_addr
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function dma_get_buffer_addr
_dma_get_buffer_addr: ; @dma_get_buffer_addr
.cfi_startproc
; %bb.0:
ldr x8, [x0]
ldr x0, [x8]
ret
.cfi_endproc
; -- End function
.no_dead_strip _dma_get_buffer_addr
.subsections_via_symbols
| AnghaBench/linux/sound/pci/hda/extr_patch_ca0132.c_dma_get_buffer_addr.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.section __TEXT,__literal4,4byte_literals
.p2align 2 ## -- Begin function decode
LCPI0_0:
.long 0x42000000 ## float 32
.section __TEXT,__literal8,8byte_literals
.p2align 3
LCPI0_1:
.quad 0x3fbd791c5f88877e ## double 0.1151292546497
LCPI0_2:
.quad 0x3e80000000000000 ## double 1.1920928955078125E-7
LCPI0_3:
.quad 0x3e94000000000000 ## double 2.9802322387695313E-7
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_decode: ## @decode
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $56, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %r12d
movss %xmm0, -84(%rbp) ## 4-byte Spill
movq %rdi, %r14
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movq (%rdi), %r15
movq 8(%rdi), %r13
leaq 280(%r15), %rdi
leaq 112(%r13), %rax
movq %rax, -96(%rbp) ## 8-byte Spill
leaq 300(%r15), %rsi
movl $144, %edx
callq _memmove
movq 16(%r14), %rax
movss 148(%r13), %xmm1 ## xmm1 = mem[0],zero,zero,zero
mulss (%rax), %xmm1
movss LCPI0_0(%rip), %xmm0 ## xmm0 = mem[0],zero,zero,zero
subss %xmm1, %xmm0
movss 144(%r13), %xmm1 ## xmm1 = mem[0],zero,zero,zero
mulss 4(%rax), %xmm1
subss %xmm1, %xmm0
movss 140(%r13), %xmm1 ## xmm1 = mem[0],zero,zero,zero
mulss 8(%rax), %xmm1
subss %xmm1, %xmm0
movss 136(%r13), %xmm1 ## xmm1 = mem[0],zero,zero,zero
mulss 12(%rax), %xmm1
subss %xmm1, %xmm0
movss 132(%r13), %xmm1 ## xmm1 = mem[0],zero,zero,zero
mulss 16(%rax), %xmm1
subss %xmm1, %xmm0
movss 128(%r13), %xmm1 ## xmm1 = mem[0],zero,zero,zero
mulss 20(%rax), %xmm1
subss %xmm1, %xmm0
movss 124(%r13), %xmm1 ## xmm1 = mem[0],zero,zero,zero
mulss 24(%rax), %xmm1
subss %xmm1, %xmm0
movss 120(%r13), %xmm1 ## xmm1 = mem[0],zero,zero,zero
mulss 28(%rax), %xmm1
subss %xmm1, %xmm0
leaq 116(%r13), %rbx
movss 112(%r13), %xmm1 ## xmm1 = mem[0],zero,zero,zero
movss 116(%r13), %xmm2 ## xmm2 = mem[0],zero,zero,zero
mulss 32(%rax), %xmm2
subss %xmm2, %xmm0
mulss 36(%rax), %xmm1
subss %xmm1, %xmm0
xorl %edi, %edi
movl $60, %esi
callq _av_clipf
cvtss2sd %xmm0, %xmm0
mulsd LCPI0_1(%rip), %xmm0
cvtsd2ss %xmm0, %xmm0
callq _exp
mulss -84(%rbp), %xmm0 ## 4-byte Folded Reload
cvtss2sd %xmm0, %xmm0
mulsd LCPI0_2(%rip), %xmm0
movq _codetable@GOTPCREL(%rip), %rax
movq (%rax), %rax
movslq %r12d, %rcx
movq (%rax,%rcx,8), %rax
movupd (%rax), %xmm1
movupd 16(%rax), %xmm2
movddup %xmm0, %xmm3 ## xmm3 = xmm0[0,0]
mulpd %xmm3, %xmm1
mulpd %xmm3, %xmm2
cvtpd2ps %xmm2, %xmm2
cvtpd2ps %xmm1, %xmm1
unpcklpd %xmm2, %xmm1 ## xmm1 = xmm1[0],xmm2[0]
movapd %xmm1, -80(%rbp)
mulsd 32(%rax), %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, -64(%rbp)
addq $424, %r15 ## imm = 0x1A8
leaq -80(%rbp), %r12
movq %r12, %rdi
movq %r12, %rsi
movl $5, %edx
callq _avpriv_scalarproduct_float_c
movsd LCPI0_3(%rip), %xmm1 ## xmm1 = mem[0],zero
callq _FFMAX
movss %xmm0, -84(%rbp) ## 4-byte Spill
movq -96(%rbp), %rdi ## 8-byte Reload
movq %rbx, %rsi
movl $36, %edx
callq _memmove
cvttss2si -84(%rbp), %edi ## 4-byte Folded Reload
callq _log10
movl %eax, %ebx
movl $3355443, %edi ## imm = 0x333333
callq _log10
## kill: def $eax killed $eax def $rax
addl %ebx, %eax
leal (%rax,%rax,4), %eax
addl %eax, %eax
addl $-32, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 148(%r13)
movl 24(%r14), %esi
movq %r15, %rdi
movq %r12, %rdx
movl $5, %ecx
movl $36, %r8d
callq _ff_celp_lp_synthesis_filterf
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB0_2
## %bb.1:
addq $56, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_2:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.comm _codetable,8,3 ## @codetable
.no_dead_strip _decode
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function decode
_decode: ; @decode
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp d9, d8, [sp, #32] ; 16-byte Folded Spill
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset b8, -72
.cfi_offset b9, -80
mov x21, x1
fmov s8, s0
mov x19, x0
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
str x8, [sp, #24]
ldp x23, x20, [x0]
add x0, x23, #280
add x1, x23, #300
mov w2, #144
bl _memmove
ldr x8, [x19, #16]
ldr s0, [x20, #112]!
movi.2s v1, #66, lsl #24
ldp s3, s2, [x20, #32]
ldp s4, s5, [x8]
fmsub s1, s2, s4, s1
fmsub s1, s3, s5, s1
ldp s3, s2, [x20, #24]
ldp s4, s5, [x8, #8]
fmsub s1, s2, s4, s1
fmsub s1, s3, s5, s1
ldp s3, s2, [x20, #16]
ldp s4, s5, [x8, #16]
fmsub s1, s2, s4, s1
fmsub s1, s3, s5, s1
ldp s3, s2, [x20, #8]
ldp s4, s5, [x8, #24]
fmsub s1, s2, s4, s1
fmsub s1, s3, s5, s1
mov x22, x20
ldr s2, [x22, #4]!
ldp s3, s4, [x8, #32]
fmsub s1, s2, s3, s1
fmsub s0, s0, s4, s1
mov w0, #0
mov w1, #60
bl _av_clipf
fcvt d0, s0
mov x8, #34686
movk x8, #24456, lsl #16
movk x8, #31004, lsl #32
movk x8, #16317, lsl #48
fmov d1, x8
fmul d0, d0, d1
fcvt s0, d0
bl _exp
fmul s0, s0, s8
fcvt d0, s0
mov x8, #4503599627370496000
fmov d1, x8
fmul d0, d0, d1
Lloh3:
adrp x8, _codetable@GOTPAGE
Lloh4:
ldr x8, [x8, _codetable@GOTPAGEOFF]
Lloh5:
ldr x8, [x8]
ldr x8, [x8, w21, sxtw #3]
ldp q1, q2, [x8]
fmul.2d v2, v2, v0[0]
fmul.2d v1, v1, v0[0]
fcvtn v1.2s, v1.2d
fcvtn2 v1.4s, v2.2d
str q1, [sp]
ldr d1, [x8, #32]
fmul d0, d0, d1
fcvt s0, d0
str s0, [sp, #16]
add x21, x23, #424
mov x0, sp
mov x1, sp
mov w2, #5
bl _avpriv_scalarproduct_float_c
mov x8, #4509229126904709120
fmov d1, x8
bl _FFMAX
fmov s8, s0
mov x0, x20
mov x1, x22
mov w2, #36
bl _memmove
fcvtzs w0, s8
bl _log10
mov x22, x0
mov w0, #13107
movk w0, #51, lsl #16
bl _log10
add w8, w0, w22
mov w9, #10
mul w8, w8, w9
sub w8, w8, #32
scvtf s0, w8
str s0, [x20, #36]
ldr w1, [x19, #24]
mov x2, sp
mov x0, x21
mov w3, #5
mov w4, #36
bl _ff_celp_lp_synthesis_filterf
ldr x8, [sp, #24]
Lloh6:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh7:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh8:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_2
; %bb.1:
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
ldp d9, d8, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #112
ret
LBB0_2:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _codetable,8,3 ; @codetable
.no_dead_strip _decode
.subsections_via_symbols
| AnghaBench/FFmpeg/libavcodec/extr_ra288.c_decode.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $16, %rsp
leaq L_.str(%rip), %rdi
leaq -4(%rbp), %rsi
xorl %eax, %eax
callq _scanf
xorl %eax, %eax
addq $16, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%d"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
sub x8, x29, #4
str x8, [sp]
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _scanf
mov w0, #0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "%d"
.subsections_via_symbols
| the_stack_data/81864.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -16(%rbp)
leaq L_.str(%rip), %rbx
leaq -48(%rbp), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _scanf
leaq -44(%rbp), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _scanf
leaq -40(%rbp), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _scanf
leaq -36(%rbp), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _scanf
leaq -32(%rbp), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _scanf
movl -48(%rbp), %eax
movl -36(%rbp), %ecx
movl -32(%rbp), %edx
cmpl %edx, %ecx
cmovll %ecx, %edx
cmpl $2000, %edx ## imm = 0x7D0
movl $2000, %ecx ## imm = 0x7D0
cmovgel %ecx, %edx
movl -44(%rbp), %esi
cmpl %esi, %eax
cmovll %eax, %esi
movl -40(%rbp), %eax
cmpl %eax, %esi
cmovgel %eax, %esi
cmpl $2000, %esi ## imm = 0x7D0
cmovgel %ecx, %esi
addl %edx, %esi
addl $-50, %esi
movq %rbx, %rdi
xorl %eax, %eax
callq _printf
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -16(%rbp), %rax
jne LBB0_2
## %bb.1:
xorl %eax, %eax
addq $40, %rsp
popq %rbx
popq %rbp
retq
LBB0_2:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%d"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-24]
add x20, sp, #20
str x20, [sp]
Lloh3:
adrp x19, l_.str@PAGE
Lloh4:
add x19, x19, l_.str@PAGEOFF
mov x0, x19
bl _scanf
add x8, x20, #4
str x8, [sp]
mov x0, x19
bl _scanf
add x8, x20, #8
str x8, [sp]
mov x0, x19
bl _scanf
add x8, x20, #12
str x8, [sp]
mov x0, x19
bl _scanf
add x8, x20, #16
str x8, [sp]
mov x0, x19
bl _scanf
ldp w8, w9, [sp, #20]
ldp w10, w11, [sp, #28]
ldr w12, [sp, #36]
cmp w11, w12
csel w11, w11, w12, lt
mov w12, #2000
cmp w11, #2000
csel w11, w11, w12, lt
cmp w8, w9
csel w8, w8, w9, lt
cmp w8, w10
csel w8, w8, w10, lt
cmp w8, #2000
csel w8, w8, w12, lt
add w8, w8, w11
sub w8, w8, #50
str x8, [sp]
mov x0, x19
bl _printf
ldur x8, [x29, #-24]
Lloh5:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh6:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh7:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_2
; %bb.1:
mov w0, #0
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #80
ret
LBB0_2:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "%d"
.subsections_via_symbols
| the_stack_data/6530.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _snd_soc_dai_remove ## -- Begin function snd_soc_dai_remove
.p2align 4, 0x90
_snd_soc_dai_remove: ## @snd_soc_dai_remove
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq (%rdi), %rax
movq (%rax), %rax
testq %rax, %rax
je LBB0_1
## %bb.2:
popq %rbp
jmpq *%rax ## TAILCALL
LBB0_1:
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _snd_soc_dai_remove ; -- Begin function snd_soc_dai_remove
.p2align 2
_snd_soc_dai_remove: ; @snd_soc_dai_remove
.cfi_startproc
; %bb.0:
ldr x8, [x0]
ldr x1, [x8]
cbz x1, LBB0_2
; %bb.1:
br x1
LBB0_2:
mov w0, #0
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/linux/sound/soc/extr_soc-dai.c_snd_soc_dai_remove.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function a_bus_drop_store
_a_bus_drop_store: ## @a_bus_drop_store
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r14
movq %rdx, %r12
callq _dev_get_drvdata
movq %rax, %r15
movl $-1, %eax
cmpq $2, %r14
ja LBB0_6
## %bb.1:
movq %r15, %rbx
addq $4, %rbx
movq %rbx, %rdi
callq _mutex_lock
movb (%r12), %al
cmpb $49, %al
je LBB0_4
## %bb.2:
cmpb $48, %al
jne LBB0_5
## %bb.3:
movl $0, (%r15)
jmp LBB0_5
LBB0_4:
movl $1, (%r15)
movq $0, 8(%r15)
LBB0_5:
movq %r15, %rdi
callq _ci_otg_queue_work
movq %rbx, %rdi
callq _mutex_unlock
movl %r14d, %eax
LBB0_6:
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _a_bus_drop_store
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function a_bus_drop_store
_a_bus_drop_store: ; @a_bus_drop_store
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x3
mov x22, x2
bl _dev_get_drvdata
cmp x19, #2
b.ls LBB0_2
; %bb.1:
mov w19, #-1
b LBB0_7
LBB0_2:
mov x20, x0
add x21, x0, #4
mov x0, x21
bl _mutex_lock
ldrb w8, [x22]
cmp w8, #49
b.eq LBB0_5
; %bb.3:
cmp w8, #48
b.ne LBB0_6
; %bb.4:
str wzr, [x20]
b LBB0_6
LBB0_5:
mov w8, #1
str w8, [x20]
str xzr, [x20, #8]
LBB0_6:
mov x0, x20
bl _ci_otg_queue_work
mov x0, x21
bl _mutex_unlock
LBB0_7:
mov x0, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _a_bus_drop_store
.subsections_via_symbols
| AnghaBench/linux/drivers/usb/chipidea/extr_otg_fsm.c_a_bus_drop_store.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function batadv_show_gw_bwidth
_batadv_show_gw_bwidth: ## @batadv_show_gw_bwidth
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r14
movq %rsi, %r15
callq _batadv_kobj_to_batpriv
movq %rax, %rbx
movq %r15, %rdi
callq _batadv_sysfs_deprecated
leaq 4(%rbx), %rdi
callq _atomic_read
movl %eax, %r15d
movq %rbx, %rdi
callq _atomic_read
movslq %r15d, %rcx
imulq $1717986919, %rcx, %rdx ## imm = 0x66666667
movq %rdx, %rsi
shrq $63, %rsi
sarq $34, %rdx
addl %esi, %edx
leal (%rdx,%rdx), %esi
leal (%rsi,%rsi,4), %esi
subl %esi, %ecx
movslq %eax, %r9
imulq $1717986919, %r9, %r8 ## imm = 0x66666667
movq %r8, %rax
shrq $63, %rax
sarq $34, %r8
addl %eax, %r8d
leal (%r8,%r8), %eax
leal (%rax,%rax,4), %eax
subl %eax, %r9d
leaq L_.str(%rip), %rsi
movq %r14, %rdi
## kill: def $edx killed $edx killed $rdx
## kill: def $ecx killed $ecx killed $rcx
## kill: def $r8d killed $r8d killed $r8
## kill: def $r9d killed $r9d killed $r9
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _sprintf ## TAILCALL
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%u.%u/%u.%u MBit\n"
.no_dead_strip _batadv_show_gw_bwidth
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function batadv_show_gw_bwidth
_batadv_show_gw_bwidth: ; @batadv_show_gw_bwidth
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x2
mov x20, x1
bl _batadv_kobj_to_batpriv
mov x21, x0
mov x0, x20
bl _batadv_sysfs_deprecated
add x0, x21, #4
bl _atomic_read
mov x20, x0
mov x0, x21
bl _atomic_read
mov w8, #26215
movk w8, #26214, lsl #16
smull x9, w20, w8
lsr x10, x9, #63
asr x9, x9, #34
add w2, w9, w10
mov w9, #10
msub w3, w2, w9, w20
smull x8, w0, w8
lsr x10, x8, #63
asr x8, x8, #34
add w4, w8, w10
msub w5, w4, w9, w0
Lloh0:
adrp x1, l_.str@PAGE
Lloh1:
add x1, x1, l_.str@PAGEOFF
mov x0, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _sprintf
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "%u.%u/%u.%u MBit\n"
.no_dead_strip _batadv_show_gw_bwidth
.subsections_via_symbols
| AnghaBench/linux/net/batman-adv/extr_sysfs.c_batadv_show_gw_bwidth.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function fts5WriteInit
_fts5WriteInit: ## @fts5WriteInit
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r12d
movq %rsi, %rbx
movq %rdi, %r14
movq 16(%rdi), %rax
movq _FTS5_DATA_PADDING@GOTPCREL(%rip), %rcx
movl (%rcx), %r15d
addl (%rax), %r15d
movq %rsi, %rdi
xorl %esi, %esi
movl $56, %edx
callq _memset
movl %r12d, (%rbx)
movq %r14, %rdi
movq %rbx, %rsi
movl $1, %edx
callq _fts5WriteDlidxGrow
movl $1, 16(%rbx)
movabsq $4294967297, %rax ## imm = 0x100000001
movq %rax, 4(%rbx)
leaq 24(%rbx), %r12
xorl %edi, %edi
cmpq $0, 24(%rbx)
sete %dil
callq _assert
leaq 40(%rbx), %r13
xorl %edi, %edi
cmpq $0, 40(%rbx)
sete %dil
callq _assert
movq %r14, %rdi
movq %r13, %rsi
movl %r15d, %edx
callq _sqlite3Fts5BufferSize
movq %r14, %rdi
movq %r12, %rsi
movl %r15d, %edx
callq _sqlite3Fts5BufferSize
leaq 8(%r14), %r15
cmpq $0, 8(%r14)
jne LBB0_2
## %bb.1:
movq 16(%r14), %rax
movl 4(%rax), %edx
movl 8(%rax), %esi
leaq L_.str(%rip), %rdi
callq _sqlite3_mprintf
movq %r14, %rdi
movq %r15, %rsi
movl %eax, %edx
callq _fts5IndexPrepareStmt
LBB0_2:
movq (%r14), %rax
movq _SQLITE_OK@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_3
## %bb.4:
movq 32(%rbx), %rdi
xorl %esi, %esi
movl $4, %edx
callq _memset
movq $4, 24(%rbx)
movq (%r15), %rdi
movl (%rbx), %edx
movl $1, %esi
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _sqlite3_bind_int ## TAILCALL
LBB0_3:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _FTS5_DATA_PADDING,4,2 ## @FTS5_DATA_PADDING
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)"
.comm _SQLITE_OK,8,3 ## @SQLITE_OK
.no_dead_strip _fts5WriteInit
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function fts5WriteInit
_fts5WriteInit: ; @fts5WriteInit
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x21, x2
mov x19, x1
mov x20, x0
ldr x8, [x0, #16]
ldr w8, [x8]
Lloh0:
adrp x9, _FTS5_DATA_PADDING@GOTPAGE
Lloh1:
ldr x9, [x9, _FTS5_DATA_PADDING@GOTPAGEOFF]
Lloh2:
ldr w9, [x9]
add w22, w9, w8
mov x0, x1
mov w1, #0
mov w2, #56
bl _memset
str w21, [x19]
mov w21, #1
mov x0, x20
mov x1, x19
mov w2, #1
bl _fts5WriteDlidxGrow
str w21, [x19, #16]
movi.2s v0, #1
stur d0, [x19, #4]
mov x21, x19
ldr x8, [x21, #24]!
cmp x8, #0
cset w0, eq
bl _assert
mov x23, x19
ldr x8, [x23, #40]!
cmp x8, #0
cset w0, eq
bl _assert
mov x0, x20
mov x1, x23
mov x2, x22
bl _sqlite3Fts5BufferSize
mov x0, x20
mov x1, x21
mov x2, x22
bl _sqlite3Fts5BufferSize
mov x21, x20
ldr x8, [x21, #8]!
cbnz x8, LBB0_2
; %bb.1:
ldr x8, [x20, #16]
ldp w2, w1, [x8, #4]
Lloh3:
adrp x0, l_.str@PAGE
Lloh4:
add x0, x0, l_.str@PAGEOFF
bl _sqlite3_mprintf
mov x2, x0
mov x0, x20
mov x1, x21
bl _fts5IndexPrepareStmt
LBB0_2:
ldr x8, [x20]
Lloh5:
adrp x9, _SQLITE_OK@GOTPAGE
Lloh6:
ldr x9, [x9, _SQLITE_OK@GOTPAGEOFF]
Lloh7:
ldr x9, [x9]
cmp x8, x9
b.ne LBB0_4
; %bb.3:
ldr x0, [x19, #32]
mov w20, #4
mov w1, #0
mov w2, #4
bl _memset
str x20, [x19, #24]
ldr x0, [x21]
ldr w2, [x19]
mov w1, #1
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
b _sqlite3_bind_int
LBB0_4:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.cfi_endproc
; -- End function
.comm _FTS5_DATA_PADDING,4,2 ; @FTS5_DATA_PADDING
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)"
.comm _SQLITE_OK,8,3 ; @SQLITE_OK
.no_dead_strip _fts5WriteInit
.subsections_via_symbols
| AnghaBench/sqlcipher/ext/fts5/extr_fts5_index.c_fts5WriteInit.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _strpbrk ## -- Begin function strpbrk
.p2align 4, 0x90
_strpbrk: ## @strpbrk
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rdi, %rax
LBB0_1: ## =>This Loop Header: Depth=1
## Child Loop BB0_2 Depth 2
movq %rsi, %rcx
.p2align 4, 0x90
LBB0_2: ## Parent Loop BB0_1 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rcx), %edx
testb %dl, %dl
je LBB0_4
## %bb.3: ## in Loop: Header=BB0_2 Depth=2
incq %rcx
cmpb %dl, (%rax)
jne LBB0_2
jmp LBB0_6
.p2align 4, 0x90
LBB0_4: ## in Loop: Header=BB0_1 Depth=1
cmpb $0, (%rax)
leaq 1(%rax), %rax
jne LBB0_1
## %bb.5:
xorl %eax, %eax
LBB0_6:
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _strpbrk ; -- Begin function strpbrk
.p2align 2
_strpbrk: ; @strpbrk
.cfi_startproc
; %bb.0:
LBB0_1: ; =>This Loop Header: Depth=1
; Child Loop BB0_2 Depth 2
mov x8, x1
LBB0_2: ; Parent Loop BB0_1 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w9, [x8]
cbz w9, LBB0_4
; %bb.3: ; in Loop: Header=BB0_2 Depth=2
ldrb w10, [x0]
add x8, x8, #1
cmp w10, w9
b.ne LBB0_2
b LBB0_6
LBB0_4: ; in Loop: Header=BB0_1 Depth=1
ldrb w8, [x0], #1
cbnz w8, LBB0_1
; %bb.5:
mov x0, #0
LBB0_6:
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/211080841.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _numa_cpu_node ## -- Begin function numa_cpu_node
.p2align 4, 0x90
_numa_cpu_node: ## @numa_cpu_node
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl %edi, %esi
movq _x86_cpu_to_apicid@GOTPCREL(%rip), %rax
movl (%rax), %edi
callq _early_per_cpu
movq _BAD_APICID@GOTPCREL(%rip), %rcx
cmpl (%rcx), %eax
jne LBB0_1
## %bb.2:
movq _NUMA_NO_NODE@GOTPCREL(%rip), %rax
jmp LBB0_3
LBB0_1:
movq ___apicid_to_node@GOTPCREL(%rip), %rcx
cltq
shlq $2, %rax
addq (%rcx), %rax
LBB0_3:
movl (%rax), %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _x86_cpu_to_apicid,4,2 ## @x86_cpu_to_apicid
.comm _BAD_APICID,4,2 ## @BAD_APICID
.comm ___apicid_to_node,8,3 ## @__apicid_to_node
.comm _NUMA_NO_NODE,4,2 ## @NUMA_NO_NODE
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _numa_cpu_node ; -- Begin function numa_cpu_node
.p2align 2
_numa_cpu_node: ; @numa_cpu_node
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
mov x1, x0
Lloh0:
adrp x8, _x86_cpu_to_apicid@GOTPAGE
Lloh1:
ldr x8, [x8, _x86_cpu_to_apicid@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
bl _early_per_cpu
Lloh3:
adrp x8, _BAD_APICID@GOTPAGE
Lloh4:
ldr x8, [x8, _BAD_APICID@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
Lloh6:
adrp x9, ___apicid_to_node@GOTPAGE
Lloh7:
ldr x9, [x9, ___apicid_to_node@GOTPAGEOFF]
Lloh8:
ldr x9, [x9]
add x9, x9, w0, sxtw #2
Lloh9:
adrp x10, _NUMA_NO_NODE@GOTPAGE
Lloh10:
ldr x10, [x10, _NUMA_NO_NODE@GOTPAGEOFF]
cmp w0, w8
csel x8, x10, x9, eq
ldr w0, [x8]
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh9, Lloh10
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _x86_cpu_to_apicid,4,2 ; @x86_cpu_to_apicid
.comm _BAD_APICID,4,2 ; @BAD_APICID
.comm ___apicid_to_node,8,3 ; @__apicid_to_node
.comm _NUMA_NO_NODE,4,2 ; @NUMA_NO_NODE
.subsections_via_symbols
| AnghaBench/linux/arch/x86/mm/extr_numa.c_numa_cpu_node.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _RM_CommandFilterArgsCount ## -- Begin function RM_CommandFilterArgsCount
.p2align 4, 0x90
_RM_CommandFilterArgsCount: ## @RM_CommandFilterArgsCount
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl (%rdi), %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _RM_CommandFilterArgsCount ; -- Begin function RM_CommandFilterArgsCount
.p2align 2
_RM_CommandFilterArgsCount: ; @RM_CommandFilterArgsCount
.cfi_startproc
; %bb.0:
ldr w0, [x0]
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/redis/src/extr_module.c_RM_CommandFilterArgsCount.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function tblcalc_number
_tblcalc_number: ## @tblcalc_number
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r13
movq %rcx, -72(%rbp) ## 8-byte Spill
movq (%rcx), %rdi
testq %rdi, %rdi
je LBB0_15
## %bb.1:
cmpb $0, (%rdi)
je LBB0_15
## %bb.2:
movq %rdx, %rbx
movq %rsi, %r14
movl 8(%r13), %esi
callq *(%r13)
testq %r14, %r14
je LBB0_28
## %bb.3:
movq %r14, -64(%rbp) ## 8-byte Spill
movq %rax, -56(%rbp) ## 8-byte Spill
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %r15
xorl %r14d, %r14d
xorl %r12d, %r12d
jmp LBB0_5
.p2align 4, 0x90
LBB0_4: ## in Loop: Header=BB0_5 Depth=1
movzbl (%r15), %edi
callq _isdigit
testq %rax, %rax
cmovneq %r15, %r14
incq %r15
LBB0_5: ## =>This Inner Loop Header: Depth=1
movzbl (%r15), %eax
cmpb $92, %al
je LBB0_7
## %bb.6: ## in Loop: Header=BB0_5 Depth=1
testb %al, %al
jne LBB0_8
jmp LBB0_16
.p2align 4, 0x90
LBB0_7: ## in Loop: Header=BB0_5 Depth=1
cmpb $38, 1(%r15)
je LBB0_19
LBB0_8: ## in Loop: Header=BB0_5 Depth=1
cmpb (%rbx), %al
jne LBB0_4
## %bb.9: ## in Loop: Header=BB0_5 Depth=1
movzbl 1(%r15), %edi
callq _isdigit
testq %rax, %rax
je LBB0_11
## %bb.10: ## in Loop: Header=BB0_5 Depth=1
movq %r15, %r12
incq %r15
jmp LBB0_5
LBB0_11: ## in Loop: Header=BB0_5 Depth=1
movq -72(%rbp), %rax ## 8-byte Reload
cmpq (%rax), %r15
jbe LBB0_4
## %bb.12: ## in Loop: Header=BB0_5 Depth=1
movzbl -1(%r15), %edi
callq _isdigit
testq %rax, %rax
je LBB0_4
## %bb.13: ## in Loop: Header=BB0_5 Depth=1
movq %r15, %r12
incq %r15
jmp LBB0_5
LBB0_15:
xorl %eax, %eax
LBB0_28:
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_16:
testq %r14, %r14
jne LBB0_20
## %bb.17:
movq -56(%rbp), %rax ## 8-byte Reload
movq -64(%rbp), %rcx ## 8-byte Reload
cmpq %rax, (%rcx)
jae LBB0_28
## %bb.18:
movq %rax, (%rcx)
jmp LBB0_28
LBB0_19:
movq %r15, %r12
movq %r15, %r14
LBB0_20:
incq %r14
testq %r12, %r12
cmovneq %r12, %r14
movb $0, -41(%rbp)
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %r12
cmpq %r14, %r12
jae LBB0_25
## %bb.21:
subq %r12, %r14
xorl %r15d, %r15d
xorl %ebx, %ebx
.p2align 4, 0x90
LBB0_22: ## =>This Inner Loop Header: Depth=1
movzbl (%r12,%r15), %eax
movb %al, -42(%rbp)
movl 8(%r13), %esi
leaq -42(%rbp), %rdi
callq *(%r13)
addq %rax, %rbx
incq %r15
cmpq %r15, %r14
jne LBB0_22
## %bb.23:
movq -64(%rbp), %rsi ## 8-byte Reload
movq 8(%rsi), %rdx
movq %rbx, %rcx
subq %rdx, %rcx
jbe LBB0_26
## %bb.24:
addq 16(%rsi), %rcx
movq %rcx, 16(%rsi)
movq %rbx, 8(%rsi)
movq -56(%rbp), %rax ## 8-byte Reload
cmpq %rcx, %rax
ja LBB0_27
jmp LBB0_28
LBB0_25:
movq -64(%rbp), %rsi ## 8-byte Reload
movq 8(%rsi), %rdx
xorl %ebx, %ebx
LBB0_26:
movq -56(%rbp), %rax ## 8-byte Reload
subq %rbx, %rax
addq %rdx, %rax
movq 16(%rsi), %rcx
cmpq %rcx, %rax
jbe LBB0_28
LBB0_27:
movq %rax, 16(%rsi)
jmp LBB0_28
.cfi_endproc
## -- End function
.no_dead_strip _tblcalc_number
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function tblcalc_number
_tblcalc_number: ; @tblcalc_number
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x21, x0
ldr x0, [x3]
cbz x0, LBB0_14
; %bb.1:
ldrb w8, [x0]
cbz w8, LBB0_14
; %bb.2:
mov x22, x3
mov x23, x2
mov x20, x1
ldr x8, [x21]
ldr w1, [x21, #8]
blr x8
mov x19, x0
cbz x20, LBB0_15
; %bb.3:
mov x24, #0
mov x25, #0
ldr x26, [x22]
b LBB0_5
LBB0_4: ; in Loop: Header=BB0_5 Depth=1
ldrb w0, [x26]
bl _isdigit
cmp x0, #0
csel x24, x24, x26, eq
add x26, x26, #1
LBB0_5: ; =>This Inner Loop Header: Depth=1
ldrb w8, [x26]
cmp w8, #92
b.eq LBB0_7
; %bb.6: ; in Loop: Header=BB0_5 Depth=1
cbnz w8, LBB0_8
b LBB0_16
LBB0_7: ; in Loop: Header=BB0_5 Depth=1
ldrb w9, [x26, #1]
cmp w9, #38
b.eq LBB0_19
LBB0_8: ; in Loop: Header=BB0_5 Depth=1
ldrb w9, [x23]
cmp w8, w9
b.ne LBB0_4
; %bb.9: ; in Loop: Header=BB0_5 Depth=1
ldrb w0, [x26, #1]
bl _isdigit
cbz x0, LBB0_11
; %bb.10: ; in Loop: Header=BB0_5 Depth=1
mov x25, x26
add x26, x26, #1
b LBB0_5
LBB0_11: ; in Loop: Header=BB0_5 Depth=1
ldr x8, [x22]
cmp x26, x8
b.ls LBB0_4
; %bb.12: ; in Loop: Header=BB0_5 Depth=1
ldurb w0, [x26, #-1]
bl _isdigit
cbz x0, LBB0_4
; %bb.13: ; in Loop: Header=BB0_5 Depth=1
mov x25, x26
add x26, x26, #1
b LBB0_5
LBB0_14:
mov x19, #0
LBB0_15:
mov x0, x19
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #96
ret
LBB0_16:
cbnz x24, LBB0_20
; %bb.17:
ldr x8, [x20]
cmp x8, x19
b.hs LBB0_15
; %bb.18:
str x19, [x20]
b LBB0_15
LBB0_19:
mov x25, x26
mov x24, x26
LBB0_20:
cmp x25, #0
csinc x8, x25, x24, ne
strb wzr, [sp, #15]
ldr x23, [x22]
cmp x23, x8
b.hs LBB0_25
; %bb.21:
mov x22, #0
sub x24, x8, x23
LBB0_22: ; =>This Inner Loop Header: Depth=1
ldrb w8, [x23], #1
strb w8, [sp, #14]
ldr x8, [x21]
ldr w1, [x21, #8]
add x0, sp, #14
blr x8
add x22, x0, x22
subs x24, x24, #1
b.ne LBB0_22
; %bb.23:
ldr x8, [x20, #8]
subs x9, x22, x8
b.ls LBB0_26
; %bb.24:
ldr x8, [x20, #16]
add x8, x9, x8
stp x22, x8, [x20, #8]
cmp x19, x8
b.hi LBB0_27
b LBB0_15
LBB0_25:
mov x22, #0
ldr x8, [x20, #8]
LBB0_26:
sub x9, x19, x22
add x19, x9, x8
ldr x8, [x20, #16]
cmp x19, x8
b.ls LBB0_15
LBB0_27:
str x19, [x20, #16]
b LBB0_15
.cfi_endproc
; -- End function
.no_dead_strip _tblcalc_number
.subsections_via_symbols
| AnghaBench/freebsd/contrib/mandoc/extr_out.c_tblcalc_number.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function dummy_timer_set_mode
_dummy_timer_set_mode: ## @dummy_timer_set_mode
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _dummy_timer_set_mode
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function dummy_timer_set_mode
_dummy_timer_set_mode: ; @dummy_timer_set_mode
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.no_dead_strip _dummy_timer_set_mode
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/arch/sh/kernel/extr_localtimer.c_dummy_timer_set_mode.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function winbond_set_piomode
_winbond_set_piomode: ## @winbond_set_piomode
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r14
movq 8(%rdi), %rax
movq (%rax), %r15
movl (%r15), %edi
movl $129, %esi
callq _winbond_readcfg
movl %eax, -44(%rbp) ## 4-byte Spill
testb $64, %al
movl 16(%r14), %esi
movl $30303, %eax ## imm = 0x765F
movl $20000, %ecx ## imm = 0x4E20
cmovel %eax, %ecx
leaq -56(%rbp), %rdx
movq %r14, %rdi
movl $1000, %r8d ## imm = 0x3E8
callq _ata_timing_compute
movl -48(%rbp), %edi
movl $3, %esi
movl $17, %edx
callq _clamp_val
movl %eax, %ebx
movl -52(%rbp), %edi
movl $1, %esi
movl $15, %edx
callq _clamp_val
movl %eax, %r12d
incl %r12d
andl $15, %r12d
shll $4, %ebx
addl $240, %ebx
movzbl %bl, %r13d
leal (%r12,%r13), %esi
movl (%r15), %edi
movl -44(%rbp), %edx ## 4-byte Reload
callq _winbond_writecfg
movq 8(%r14), %rax
movq _ATA_DEV_ATA@GOTPCREL(%rip), %rcx
xorl %ebx, %ebx
cmpq (%rcx), %rax
setne %bl
shll $3, %ebx
movq %r14, %rdi
callq _ata_pio_need_iordy
xorl %ecx, %ecx
testl %eax, %eax
sete %cl
leal (%rbx,%rcx,2), %ebx
movl -56(%rbp), %edi
xorl %esi, %esi
movl $3, %edx
callq _clamp_val
## kill: def $eax killed $eax def $rax
shll $6, %eax
leal (%rax,%rbx), %edx
addl $53, %edx
movl (%r15), %edi
leal (%r12,%r13), %esi
incl %esi
callq _winbond_writecfg
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _ATA_DEV_ATA,8,3 ## @ATA_DEV_ATA
.no_dead_strip _winbond_set_piomode
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function winbond_set_piomode
_winbond_set_piomode: ; @winbond_set_piomode
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x1
ldr x8, [x0, #8]
ldr x22, [x8]
ldr w0, [x22]
mov w1, #129
bl _winbond_readcfg
mov x20, x0
ldr w1, [x19, #16]
tst w0, #0x40
mov w8, #20000
mov w9, #30303
csel w3, w9, w8, eq
mov x2, sp
mov x0, x19
mov w4, #1000
bl _ata_timing_compute
ldr w0, [sp, #8]
mov w1, #3
mov w2, #17
bl _clamp_val
mov x21, x0
ldr w0, [sp, #4]
mov w1, #1
mov w2, #15
bl _clamp_val
add w8, w0, #1
lsl w9, w21, #4
add w9, w9, #240
and w21, w9, #0xf0
bfxil w21, w8, #0, #4
ldr w0, [x22]
mov x1, x21
mov x2, x20
bl _winbond_writecfg
Lloh0:
adrp x8, _ATA_DEV_ATA@GOTPAGE
Lloh1:
ldr x8, [x8, _ATA_DEV_ATA@GOTPAGEOFF]
ldr x9, [x19, #8]
Lloh2:
ldr x8, [x8]
cmp x9, x8
mov w8, #61
mov w9, #53
csel w20, w9, w8, eq
mov x0, x19
bl _ata_pio_need_iordy
orr w8, w20, #0x2
cmp w0, #0
csel w19, w8, w20, eq
ldr w0, [sp]
mov w1, #0
mov w2, #3
bl _clamp_val
bfi w19, w0, #6, #26
ldr w0, [x22]
add w1, w21, #1
mov x2, x19
bl _winbond_writecfg
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _ATA_DEV_ATA,8,3 ; @ATA_DEV_ATA
.no_dead_strip _winbond_set_piomode
.subsections_via_symbols
| AnghaBench/linux/drivers/ata/extr_pata_legacy.c_winbond_set_piomode.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function cppc_cpufreq_perf_to_khz
_cppc_cpufreq_perf_to_khz: ## @cppc_cpufreq_perf_to_khz
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movl %esi, %r14d
movq %rdi, %rbx
movl (%rdi), %edx
testl %edx, %edx
je LBB0_4
## %bb.1:
movl 4(%rbx), %eax
testl %eax, %eax
je LBB0_4
## %bb.2:
movl 8(%rbx), %ecx
cmpl %r14d, %ecx
jbe LBB0_7
## %bb.3:
subl %edx, %eax
subl 12(%rbx), %ecx
jmp LBB0_7
LBB0_4:
movl _cppc_cpufreq_perf_to_khz.max_khz(%rip), %eax
testl %eax, %eax
jne LBB0_6
## %bb.5:
xorl %eax, %eax
callq _cppc_get_dmi_max_khz
movl %eax, _cppc_cpufreq_perf_to_khz.max_khz(%rip)
LBB0_6:
movl 16(%rbx), %ecx
LBB0_7:
imull %r14d, %eax
xorl %edx, %edx
divl %ecx
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.zerofill __DATA,__bss,_cppc_cpufreq_perf_to_khz.max_khz,4,2 ## @cppc_cpufreq_perf_to_khz.max_khz
.no_dead_strip _cppc_cpufreq_perf_to_khz
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function cppc_cpufreq_perf_to_khz
_cppc_cpufreq_perf_to_khz: ; @cppc_cpufreq_perf_to_khz
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x1
mov x20, x0
ldr w9, [x0]
cbz w9, LBB0_4
; %bb.1:
ldr w0, [x20, #4]
cbz w0, LBB0_4
; %bb.2:
ldr w8, [x20, #8]
cmp w8, w19
b.ls LBB0_7
; %bb.3:
sub w0, w0, w9
ldr w9, [x20, #12]
sub w8, w8, w9
b LBB0_7
LBB0_4:
adrp x21, _cppc_cpufreq_perf_to_khz.max_khz@PAGE
ldr w0, [x21, _cppc_cpufreq_perf_to_khz.max_khz@PAGEOFF]
cbnz w0, LBB0_6
; %bb.5:
bl _cppc_get_dmi_max_khz
str w0, [x21, _cppc_cpufreq_perf_to_khz.max_khz@PAGEOFF]
LBB0_6:
ldr w8, [x20, #16]
LBB0_7:
mul w9, w0, w19
udiv w0, w9, w8
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.zerofill __DATA,__bss,_cppc_cpufreq_perf_to_khz.max_khz,4,2 ; @cppc_cpufreq_perf_to_khz.max_khz
.no_dead_strip _cppc_cpufreq_perf_to_khz
.subsections_via_symbols
| AnghaBench/linux/drivers/cpufreq/extr_cppc_cpufreq.c_cppc_cpufreq_perf_to_khz.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _test_compress ## -- Begin function test_compress
.p2align 4, 0x90
_test_compress: ## @test_compress
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdx, %rbx
movq %rdi, %r14
movq %rsi, -24(%rbp)
movq %rcx, -32(%rbp)
leaq _hello(%rip), %rdx
leaq -24(%rbp), %rsi
movl $14, %ecx
callq _compress
testl %eax, %eax
jne LBB0_1
## %bb.3:
movabsq $28542640608928103, %rax ## imm = 0x65676162726167
movq %rax, (%rbx)
movq -24(%rbp), %rcx
leaq -32(%rbp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
callq _uncompress
testl %eax, %eax
jne LBB0_4
## %bb.5:
leaq _hello(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
jne LBB0_7
## %bb.6:
leaq L_.str.5(%rip), %rdi
movq %rbx, %rsi
xorl %eax, %eax
callq _printf
addq $16, %rsp
popq %rbx
popq %r14
popq %rbp
retq
LBB0_1:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.1(%rip), %rdx
jmp LBB0_2
LBB0_4:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.3(%rip), %rdx
LBB0_2:
movl %eax, %ecx
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
LBB0_7:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.4(%rip), %rdi
movl $15, %esi
movl $1, %edx
callq _fwrite
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.globl _test_gzio ## -- Begin function test_gzio
.p2align 4, 0x90
_test_gzio: ## @test_gzio
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
leaq L_.str.6(%rip), %rsi
callq _gzopen
testq %rax, %rax
je LBB1_1
## %bb.4:
movq %rax, %rbx
movq %rax, %rdi
movl $104, %esi
callq _gzputc
leaq L_.str.8(%rip), %rsi
movq %rbx, %rdi
callq _gzputs
cmpl $4, %eax
jne LBB1_5
## %bb.8:
leaq L_.str.10(%rip), %rsi
leaq _dictionary(%rip), %rdx
movq %rbx, %rdi
xorl %eax, %eax
callq _gzprintf
cmpl $8, %eax
jne LBB1_9
## %bb.10:
movl $1, %esi
movq %rbx, %rdi
movl $1, %edx
callq _gzseek
movq %rbx, %rdi
callq _gzclose
leaq L_.str.13(%rip), %rsi
movq %r12, %rdi
callq _gzopen
testq %rax, %rax
je LBB1_1
## %bb.11:
movq %rax, %rbx
movabsq $28542640608928103, %rax ## imm = 0x65676162726167
movq %rax, (%r15)
movq %rbx, %rdi
movq %r15, %rsi
movl %r14d, %edx
callq _gzread
cmpl $14, %eax
jne LBB1_12
## %bb.13:
leaq _hello(%rip), %rsi
movq %r15, %rdi
callq _strcmp
testl %eax, %eax
jne LBB1_14
## %bb.15:
leaq L_.str.16(%rip), %rdi
movq %r15, %rsi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movq $-8, %rsi
movl $1, %edx
callq _gzseek
movq %rax, %r12
cmpq $6, %rax
jne LBB1_29
## %bb.16:
movq %rbx, %rdi
callq _gztell
cmpq $6, %rax
jne LBB1_29
## %bb.17:
movl (%rbx), %eax
testl %eax, %eax
je LBB1_19
## %bb.18:
decl %eax
movl %eax, (%rbx)
incq 16(%rbx)
movq 8(%rbx), %rax
leaq 1(%rax), %rcx
movq %rcx, 8(%rbx)
movzbl (%rax), %eax
cmpl $32, %eax
jne LBB1_21
LBB1_22:
movl $32, %edi
movq %rbx, %rsi
callq _gzungetc
cmpl $32, %eax
jne LBB1_23
## %bb.24:
movq %rbx, %rdi
movq %r15, %rsi
movl %r14d, %edx
callq _gzgets
movq %r15, %rdi
callq _strlen
cmpq $7, %rax
jne LBB1_25
## %bb.26:
leaq _hello+6(%rip), %rsi
movq %r15, %rdi
callq _strcmp
testl %eax, %eax
jne LBB1_27
## %bb.28:
leaq L_.str.22(%rip), %rdi
movq %r15, %rsi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
addq $16, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp _gzclose ## TAILCALL
LBB1_19:
movq %rbx, %rdi
callq _gzgetc
cmpl $32, %eax
je LBB1_22
LBB1_21:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.18(%rip), %rdi
jmp LBB1_2
LBB1_1:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.7(%rip), %rdi
LBB1_2:
movl $13, %esi
jmp LBB1_3
LBB1_29:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %r14
movq %rbx, %rdi
callq _gztell
leaq L_.str.17(%rip), %rsi
movq %r14, %rdi
movq %r12, %rdx
movq %rax, %rcx
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
LBB1_5:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %r14
leaq -36(%rbp), %rsi
movq %rbx, %rdi
callq _gzerror
leaq L_.str.9(%rip), %rsi
jmp LBB1_6
LBB1_9:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %r14
leaq -36(%rbp), %rsi
movq %rbx, %rdi
callq _gzerror
leaq L_.str.12(%rip), %rsi
jmp LBB1_6
LBB1_12:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %r14
leaq -36(%rbp), %rsi
movq %rbx, %rdi
callq _gzerror
leaq L_.str.14(%rip), %rsi
jmp LBB1_6
LBB1_14:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rdi
leaq L_.str.15(%rip), %rsi
movq %r15, %rdx
jmp LBB1_7
LBB1_23:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.19(%rip), %rdi
movl $15, %esi
jmp LBB1_3
LBB1_25:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %r14
leaq -36(%rbp), %rsi
movq %rbx, %rdi
callq _gzerror
leaq L_.str.20(%rip), %rsi
LBB1_6:
movq %r14, %rdi
movq %rax, %rdx
LBB1_7:
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
LBB1_27:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.21(%rip), %rdi
movl $24, %esi
LBB1_3:
movl $1, %edx
callq _fwrite
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.globl _test_deflate ## -- Begin function test_deflate
.p2align 4, 0x90
_test_deflate: ## @test_deflate
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $112, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %rbx
movq %rdi, %r14
xorps %xmm0, %xmm0
movups %xmm0, -64(%rbp)
movq $0, -48(%rbp)
leaq L_.str.23(%rip), %rdx
leaq -128(%rbp), %rdi
movl $-1, %esi
movl $112, %ecx
callq _deflateInit_
testl %eax, %eax
jne LBB2_1
## %bb.3:
leaq _hello(%rip), %rax
movq %rax, -128(%rbp)
movq %r14, -104(%rbp)
leaq -128(%rbp), %r14
.p2align 4, 0x90
LBB2_4: ## =>This Inner Loop Header: Depth=1
cmpq $14, -112(%rbp)
je LBB2_8
## %bb.5: ## in Loop: Header=BB2_4 Depth=1
cmpq %rbx, -88(%rbp)
jae LBB2_8
## %bb.6: ## in Loop: Header=BB2_4 Depth=1
movl $1, -96(%rbp)
movl $1, -120(%rbp)
movq %r14, %rdi
xorl %esi, %esi
callq _deflate
testl %eax, %eax
je LBB2_4
jmp LBB2_7
.p2align 4, 0x90
LBB2_8: ## =>This Inner Loop Header: Depth=1
movl $1, -96(%rbp)
leaq -128(%rbp), %rdi
movl $4, %esi
callq _deflate
testl %eax, %eax
je LBB2_8
## %bb.9:
cmpl $1, %eax
jne LBB2_7
## %bb.10:
leaq -128(%rbp), %rdi
callq _deflateEnd
testl %eax, %eax
jne LBB2_11
## %bb.12:
addq $112, %rsp
popq %rbx
popq %r14
popq %rbp
retq
LBB2_7:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.25(%rip), %rdx
LBB2_2:
movl %eax, %ecx
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
LBB2_1:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.24(%rip), %rdx
jmp LBB2_2
LBB2_11:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.26(%rip), %rdx
jmp LBB2_2
.cfi_endproc
## -- End function
.globl _test_inflate ## -- Begin function test_inflate
.p2align 4, 0x90
_test_inflate: ## @test_inflate
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $112, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %rbx
movabsq $28542640608928103, %rax ## imm = 0x65676162726167
movq %rax, (%rdx)
xorps %xmm0, %xmm0
movups %xmm0, -80(%rbp)
movq $0, -64(%rbp)
movq %rdi, -144(%rbp)
movl $0, -136(%rbp)
movq %rdx, -120(%rbp)
leaq L_.str.23(%rip), %rsi
leaq -144(%rbp), %rdi
movl $112, %edx
callq _inflateInit_
testl %eax, %eax
jne LBB3_7
## %bb.1:
leaq -144(%rbp), %r12
.p2align 4, 0x90
LBB3_2: ## =>This Inner Loop Header: Depth=1
cmpq %r15, -104(%rbp)
jae LBB3_9
## %bb.3: ## in Loop: Header=BB3_2 Depth=1
cmpq %rbx, -128(%rbp)
jae LBB3_9
## %bb.4: ## in Loop: Header=BB3_2 Depth=1
movl $1, -112(%rbp)
movl $1, -136(%rbp)
movq %r12, %rdi
xorl %esi, %esi
callq _inflate
testl %eax, %eax
je LBB3_2
## %bb.5:
cmpl $1, %eax
jne LBB3_6
LBB3_9:
leaq -144(%rbp), %rdi
callq _inflateEnd
testl %eax, %eax
jne LBB3_10
## %bb.11:
leaq _hello(%rip), %rsi
movq %r14, %rdi
callq _strcmp
testl %eax, %eax
jne LBB3_13
## %bb.12:
leaq L_.str.31(%rip), %rdi
movq %r14, %rsi
xorl %eax, %eax
callq _printf
addq $112, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
LBB3_6:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.28(%rip), %rdx
LBB3_8:
movl %eax, %ecx
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
LBB3_7:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.27(%rip), %rdx
jmp LBB3_8
LBB3_10:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.29(%rip), %rdx
jmp LBB3_8
LBB3_13:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.30(%rip), %rdi
movl $12, %esi
movl $1, %edx
callq _fwrite
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.globl _test_large_deflate ## -- Begin function test_large_deflate
.p2align 4, 0x90
_test_large_deflate: ## @test_large_deflate
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $120, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r14
movq %rdx, %r15
movq %rsi, %rbx
movq %rdi, %r12
xorps %xmm0, %xmm0
movups %xmm0, -88(%rbp)
movq $0, -72(%rbp)
leaq L_.str.23(%rip), %rdx
leaq -152(%rbp), %rdi
movl $1, %esi
movl $112, %ecx
callq _deflateInit_
testl %eax, %eax
jne LBB4_1
## %bb.3:
movq %r12, -128(%rbp)
movl %ebx, -120(%rbp)
movq %r15, -152(%rbp)
movl %r14d, -144(%rbp)
leaq -152(%rbp), %rdi
xorl %esi, %esi
callq _deflate
testl %eax, %eax
jne LBB4_4
## %bb.5:
cmpl $0, -144(%rbp)
jne LBB4_6
## %bb.8:
leaq -152(%rbp), %r13
movq %r13, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq _deflateParams
movq %r12, -152(%rbp)
shrl %ebx
movl %ebx, -144(%rbp)
movq %r13, %rdi
xorl %esi, %esi
callq _deflate
testl %eax, %eax
jne LBB4_4
## %bb.9:
leaq -152(%rbp), %rbx
movq %rbx, %rdi
movl $9, %esi
movl $1, %edx
callq _deflateParams
movq %r15, -152(%rbp)
movl %r14d, -144(%rbp)
movq %rbx, %rdi
xorl %esi, %esi
callq _deflate
testl %eax, %eax
jne LBB4_4
## %bb.10:
leaq -152(%rbp), %rdi
movl $4, %esi
callq _deflate
cmpl $1, %eax
jne LBB4_11
## %bb.12:
leaq -152(%rbp), %rdi
callq _deflateEnd
testl %eax, %eax
jne LBB4_13
## %bb.14:
addq $120, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB4_4:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.25(%rip), %rdx
jmp LBB4_2
LBB4_1:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.24(%rip), %rdx
jmp LBB4_2
LBB4_6:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.32(%rip), %rdi
movl $19, %esi
jmp LBB4_7
LBB4_11:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.33(%rip), %rdi
movl $35, %esi
LBB4_7:
movl $1, %edx
callq _fwrite
movl $1, %edi
callq _exit
LBB4_13:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.26(%rip), %rdx
LBB4_2:
movl %eax, %ecx
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.globl _test_large_inflate ## -- Begin function test_large_inflate
.p2align 4, 0x90
_test_large_inflate: ## @test_large_inflate
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $112, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r15
movq %rdx, %rbx
movq %rsi, %r14
movabsq $28542640608928103, %rax ## imm = 0x65676162726167
movq %rax, (%rdx)
xorps %xmm0, %xmm0
movups %xmm0, -80(%rbp)
movq $0, -64(%rbp)
movq %rdi, -144(%rbp)
movl %r14d, -136(%rbp)
leaq L_.str.23(%rip), %rsi
leaq -144(%rbp), %rdi
movl $112, %edx
callq _inflateInit_
testl %eax, %eax
jne LBB5_5
## %bb.1:
leaq -144(%rbp), %r12
.p2align 4, 0x90
LBB5_2: ## =>This Inner Loop Header: Depth=1
movq %rbx, -120(%rbp)
movl %r15d, -112(%rbp)
movq %r12, %rdi
xorl %esi, %esi
callq _inflate
testl %eax, %eax
je LBB5_2
## %bb.3:
cmpl $1, %eax
jne LBB5_4
## %bb.7:
leaq -144(%rbp), %rdi
callq _inflateEnd
testl %eax, %eax
jne LBB5_8
## %bb.9:
movq -104(%rbp), %rdx
shrq %r14
leaq (%r14,%r15,2), %rax
cmpq %rax, %rdx
jne LBB5_11
## %bb.10:
leaq L_str(%rip), %rdi
callq _puts
addq $112, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
LBB5_4:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.34(%rip), %rdx
LBB5_6:
movl %eax, %ecx
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
LBB5_5:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.27(%rip), %rdx
jmp LBB5_6
LBB5_8:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.29(%rip), %rdx
jmp LBB5_6
LBB5_11:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rdi
leaq L_.str.35(%rip), %rsi
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.globl _test_flush ## -- Begin function test_flush
.p2align 4, 0x90
_test_flush: ## @test_flush
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $112, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %r14
movq %rdi, %rbx
xorps %xmm0, %xmm0
movups %xmm0, -64(%rbp)
movq $0, -48(%rbp)
leaq L_.str.23(%rip), %rdx
leaq -128(%rbp), %rdi
movl $-1, %esi
movl $112, %ecx
callq _deflateInit_
testl %eax, %eax
jne LBB6_1
## %bb.3:
leaq _hello(%rip), %rax
movq %rax, -128(%rbp)
movq %rbx, -104(%rbp)
movl $3, -120(%rbp)
movl (%r14), %eax
movl %eax, -96(%rbp)
leaq -128(%rbp), %rdi
movl $3, %esi
callq _deflate
testl %eax, %eax
jne LBB6_4
## %bb.5:
incb 3(%rbx)
movl $11, -120(%rbp)
leaq -128(%rbp), %rdi
movl $4, %esi
callq _deflate
cmpl $2, %eax
jae LBB6_4
## %bb.6:
leaq -128(%rbp), %rdi
callq _deflateEnd
testl %eax, %eax
jne LBB6_7
## %bb.8:
movq -88(%rbp), %rax
movq %rax, (%r14)
addq $112, %rsp
popq %rbx
popq %r14
popq %rbp
retq
LBB6_4:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.25(%rip), %rdx
jmp LBB6_2
LBB6_1:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.24(%rip), %rdx
jmp LBB6_2
LBB6_7:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.26(%rip), %rdx
LBB6_2:
movl %eax, %ecx
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.globl _test_sync ## -- Begin function test_sync
.p2align 4, 0x90
_test_sync: ## @test_sync
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $120, %rsp
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %rbx
movabsq $28542640608928103, %rax ## imm = 0x65676162726167
movq %rax, (%rdx)
xorps %xmm0, %xmm0
movups %xmm0, -72(%rbp)
movq $0, -56(%rbp)
movq %rdi, -136(%rbp)
movl $2, -128(%rbp)
leaq L_.str.23(%rip), %rsi
leaq -136(%rbp), %rdi
movl $112, %edx
callq _inflateInit_
testl %eax, %eax
jne LBB7_1
## %bb.3:
movq %r14, -112(%rbp)
movl %r15d, -104(%rbp)
leaq -136(%rbp), %r15
movq %r15, %rdi
xorl %esi, %esi
callq _inflate
addl $-2, %ebx
movl %ebx, -128(%rbp)
movq %r15, %rdi
callq _inflateSync
testl %eax, %eax
jne LBB7_4
## %bb.5:
leaq -136(%rbp), %rdi
movl $4, %esi
callq _inflate
cmpl $-3, %eax
jne LBB7_9
## %bb.6:
leaq -136(%rbp), %rdi
callq _inflateEnd
testl %eax, %eax
jne LBB7_7
## %bb.8:
leaq L_.str.39(%rip), %rdi
movq %r14, %rsi
xorl %eax, %eax
callq _printf
addq $120, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
LBB7_1:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.27(%rip), %rdx
jmp LBB7_2
LBB7_4:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.37(%rip), %rdx
jmp LBB7_2
LBB7_9:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.38(%rip), %rdi
movl $33, %esi
movl $1, %edx
callq _fwrite
movl $1, %edi
callq _exit
LBB7_7:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.29(%rip), %rdx
LBB7_2:
movl %eax, %ecx
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.globl _test_dict_deflate ## -- Begin function test_dict_deflate
.p2align 4, 0x90
_test_dict_deflate: ## @test_dict_deflate
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $112, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %r14
movq %rdi, %rbx
xorps %xmm0, %xmm0
movups %xmm0, -64(%rbp)
movq $0, -48(%rbp)
leaq L_.str.23(%rip), %rdx
leaq -128(%rbp), %rdi
movl $9, %esi
movl $112, %ecx
callq _deflateInit_
testl %eax, %eax
jne LBB8_1
## %bb.3:
leaq _dictionary(%rip), %rsi
leaq -128(%rbp), %rdi
movl $6, %edx
callq _deflateSetDictionary
testl %eax, %eax
jne LBB8_4
## %bb.5:
movq -32(%rbp), %rax
movq _dictId@GOTPCREL(%rip), %rcx
movq %rax, (%rcx)
movq %rbx, -104(%rbp)
movl %r14d, -96(%rbp)
leaq _hello(%rip), %rax
movq %rax, -128(%rbp)
movl $14, -120(%rbp)
leaq -128(%rbp), %rdi
movl $4, %esi
callq _deflate
cmpl $1, %eax
jne LBB8_9
## %bb.6:
leaq -128(%rbp), %rdi
callq _deflateEnd
testl %eax, %eax
jne LBB8_7
## %bb.8:
addq $112, %rsp
popq %rbx
popq %r14
popq %rbp
retq
LBB8_1:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.24(%rip), %rdx
jmp LBB8_2
LBB8_4:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.40(%rip), %rdx
jmp LBB8_2
LBB8_9:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.33(%rip), %rdi
movl $35, %esi
movl $1, %edx
callq _fwrite
movl $1, %edi
callq _exit
LBB8_7:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.26(%rip), %rdx
LBB8_2:
movl %eax, %ecx
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.globl _test_dict_inflate ## -- Begin function test_dict_inflate
.p2align 4, 0x90
_test_dict_inflate: ## @test_dict_inflate
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $112, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %rbx
movq %rdx, %r14
movabsq $28542640608928103, %rax ## imm = 0x65676162726167
movq %rax, (%rdx)
xorps %xmm0, %xmm0
movups %xmm0, -80(%rbp)
movq $0, -64(%rbp)
movq %rdi, -144(%rbp)
movl %esi, -136(%rbp)
leaq L_.str.23(%rip), %rsi
leaq -144(%rbp), %rdi
movl $112, %edx
callq _inflateInit_
testl %eax, %eax
jne LBB9_1
## %bb.3:
movq %r14, -120(%rbp)
movl %ebx, -112(%rbp)
leaq -144(%rbp), %rbx
movq _dictId@GOTPCREL(%rip), %r12
leaq _dictionary(%rip), %r15
jmp LBB9_4
.p2align 4, 0x90
LBB9_5: ## in Loop: Header=BB9_4 Depth=1
cmpl $1, %eax
je LBB9_6
## %bb.12: ## in Loop: Header=BB9_4 Depth=1
testl %eax, %eax
jne LBB9_13
LBB9_4: ## =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
xorl %esi, %esi
callq _inflate
cmpl $2, %eax
jne LBB9_5
## %bb.8: ## in Loop: Header=BB9_4 Depth=1
movq -48(%rbp), %rax
cmpq (%r12), %rax
jne LBB9_9
## %bb.11: ## in Loop: Header=BB9_4 Depth=1
movq %rbx, %rdi
movq %r15, %rsi
movl $6, %edx
callq _inflateSetDictionary
testl %eax, %eax
je LBB9_4
LBB9_13:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.42(%rip), %rdx
LBB9_2:
movl %eax, %ecx
xorl %eax, %eax
callq _fprintf
movl $1, %edi
callq _exit
LBB9_6:
leaq -144(%rbp), %rdi
callq _inflateEnd
testl %eax, %eax
jne LBB9_7
## %bb.14:
leaq _hello(%rip), %rsi
movq %r14, %rdi
callq _strcmp
testl %eax, %eax
jne LBB9_15
## %bb.16:
leaq L_.str.44(%rip), %rdi
movq %r14, %rsi
xorl %eax, %eax
callq _printf
addq $112, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
LBB9_9:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.41(%rip), %rdi
movl $21, %esi
LBB9_10:
movl $1, %edx
callq _fwrite
movl $1, %edi
callq _exit
LBB9_1:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.27(%rip), %rdx
jmp LBB9_2
LBB9_7:
movq ___stderrp@GOTPCREL(%rip), %rcx
movq (%rcx), %rdi
leaq L_.str(%rip), %rsi
leaq L_.str.29(%rip), %rdx
jmp LBB9_2
LBB9_15:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.43(%rip), %rdi
movl $22, %esi
jmp LBB9_10
.cfi_endproc
## -- End function
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r15
movl %edi, %r12d
movq $40000, -40(%rbp) ## imm = 0x9C40
callq _zlibVersion
cmpb $49, (%rax)
jne LBB10_9
## %bb.1:
callq _zlibVersion
leaq L_.str.23(%rip), %rsi
movq %rax, %rdi
callq _strcmp
testl %eax, %eax
je LBB10_3
## %bb.2:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.46(%rip), %rdi
movl $32, %esi
movl $1, %edx
callq _fwrite
LBB10_3:
callq _zlibCompileFlags
leaq L_.str.47(%rip), %rdi
leaq L_.str.23(%rip), %rsi
movl $4784, %edx ## imm = 0x12B0
movq %rax, %rcx
xorl %eax, %eax
callq _printf
movl $40000, %edi ## imm = 0x9C40
movl $1, %esi
callq _calloc
movq %rax, %rbx
movl $40000, %edi ## imm = 0x9C40
movl $1, %esi
callq _calloc
testq %rbx, %rbx
je LBB10_10
## %bb.4:
movq %rax, %r14
testq %rax, %rax
je LBB10_10
## %bb.5:
movl $40000, %esi ## imm = 0x9C40
movl $40000, %ecx ## imm = 0x9C40
movq %rbx, %rdi
movq %r14, %rdx
callq _test_compress
cmpl $2, %r12d
jl LBB10_6
## %bb.7:
movq 8(%r15), %rdi
jmp LBB10_8
LBB10_6:
leaq L_.str.49(%rip), %rdi
LBB10_8:
movl $40000, %edx ## imm = 0x9C40
movq %r14, %rsi
callq _test_gzio
movl $40000, %esi ## imm = 0x9C40
movq %rbx, %rdi
callq _test_deflate
movl $40000, %esi ## imm = 0x9C40
movl $40000, %ecx ## imm = 0x9C40
movq %rbx, %rdi
movq %r14, %rdx
callq _test_inflate
movl $40000, %esi ## imm = 0x9C40
movl $40000, %ecx ## imm = 0x9C40
movq %rbx, %rdi
movq %r14, %rdx
callq _test_large_deflate
movl $40000, %esi ## imm = 0x9C40
movl $40000, %ecx ## imm = 0x9C40
movq %rbx, %rdi
movq %r14, %rdx
callq _test_large_inflate
leaq -40(%rbp), %rsi
movq %rbx, %rdi
callq _test_flush
movq -40(%rbp), %rsi
movl $40000, %ecx ## imm = 0x9C40
movq %rbx, %rdi
movq %r14, %rdx
callq _test_sync
movl $40000, %esi ## imm = 0x9C40
movq %rbx, %rdi
callq _test_dict_deflate
movl $40000, %esi ## imm = 0x9C40
movl $40000, %ecx ## imm = 0x9C40
movq %rbx, %rdi
movq %r14, %rdx
callq _test_dict_inflate
movq %rbx, %rdi
callq _free
movq %r14, %rdi
callq _free
xorl %eax, %eax
addq $16, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
LBB10_9:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.45(%rip), %rdi
movl $26, %esi
movl $1, %edx
callq _fwrite
movl $1, %edi
callq _exit
LBB10_10:
leaq L_str.50(%rip), %rdi
callq _puts
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.section __TEXT,__const
.globl _hello ## @hello
_hello:
.asciz "hello, hello!"
.globl _dictionary ## @dictionary
_dictionary:
.asciz "hello"
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%s error: %d\n"
L_.str.1: ## @.str.1
.asciz "compress"
L_.str.3: ## @.str.3
.asciz "uncompress"
L_.str.4: ## @.str.4
.asciz "bad uncompress\n"
L_.str.5: ## @.str.5
.asciz "uncompress(): %s\n"
L_.str.6: ## @.str.6
.asciz "wb"
L_.str.7: ## @.str.7
.asciz "gzopen error\n"
L_.str.8: ## @.str.8
.asciz "ello"
L_.str.9: ## @.str.9
.asciz "gzputs err: %s\n"
L_.str.10: ## @.str.10
.asciz ", %s!"
L_.str.12: ## @.str.12
.asciz "gzprintf err: %s\n"
L_.str.13: ## @.str.13
.asciz "rb"
L_.str.14: ## @.str.14
.asciz "gzread err: %s\n"
L_.str.15: ## @.str.15
.asciz "bad gzread: %s\n"
L_.str.16: ## @.str.16
.asciz "gzread(): %s\n"
L_.str.17: ## @.str.17
.asciz "gzseek error, pos=%ld, gztell=%ld\n"
L_.str.18: ## @.str.18
.asciz "gzgetc error\n"
L_.str.19: ## @.str.19
.asciz "gzungetc error\n"
L_.str.20: ## @.str.20
.asciz "gzgets err after gzseek: %s\n"
L_.str.21: ## @.str.21
.asciz "bad gzgets after gzseek\n"
L_.str.22: ## @.str.22
.asciz "gzgets() after gzseek: %s\n"
L_.str.23: ## @.str.23
.asciz "1.2.11"
L_.str.24: ## @.str.24
.asciz "deflateInit"
L_.str.25: ## @.str.25
.asciz "deflate"
L_.str.26: ## @.str.26
.asciz "deflateEnd"
L_.str.27: ## @.str.27
.asciz "inflateInit"
L_.str.28: ## @.str.28
.asciz "inflate"
L_.str.29: ## @.str.29
.asciz "inflateEnd"
L_.str.30: ## @.str.30
.asciz "bad inflate\n"
L_.str.31: ## @.str.31
.asciz "inflate(): %s\n"
L_.str.32: ## @.str.32
.asciz "deflate not greedy\n"
L_.str.33: ## @.str.33
.asciz "deflate should report Z_STREAM_END\n"
L_.str.34: ## @.str.34
.asciz "large inflate"
L_.str.35: ## @.str.35
.asciz "bad large inflate: %ld\n"
L_.str.37: ## @.str.37
.asciz "inflateSync"
L_.str.38: ## @.str.38
.asciz "inflate should report DATA_ERROR\n"
L_.str.39: ## @.str.39
.asciz "after inflateSync(): hel%s\n"
L_.str.40: ## @.str.40
.asciz "deflateSetDictionary"
.comm _dictId,8,3 ## @dictId
L_.str.41: ## @.str.41
.asciz "unexpected dictionary"
L_.str.42: ## @.str.42
.asciz "inflate with dict"
L_.str.43: ## @.str.43
.asciz "bad inflate with dict\n"
L_.str.44: ## @.str.44
.asciz "inflate with dictionary: %s\n"
L_.str.45: ## @.str.45
.asciz "incompatible zlib version\n"
L_.str.46: ## @.str.46
.asciz "warning: different zlib version\n"
L_.str.47: ## @.str.47
.asciz "zlib version %s = 0x%04x, compile flags = 0x%lx\n"
L_.str.49: ## @.str.49
.asciz "foo.gz"
L_str: ## @str
.asciz "large_inflate(): OK"
L_str.50: ## @str.50
.asciz "out of memory"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _test_compress ; -- Begin function test_compress
.p2align 2
_test_compress: ; @test_compress
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x2
mov x20, x0
stp x3, x1, [sp, #16]
Lloh0:
adrp x2, _hello@PAGE
Lloh1:
add x2, x2, _hello@PAGEOFF
add x1, sp, #24
mov w3, #14
bl _compress
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB0_4
; %bb.1:
mov x8, #24935
movk x8, #25202, lsl #16
movk x8, #26465, lsl #32
movk x8, #101, lsl #48
str x8, [x19]
ldr x3, [sp, #24]
add x1, sp, #16
mov x0, x19
mov x2, x20
bl _uncompress
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB0_5
; %bb.2:
Lloh2:
adrp x1, _hello@PAGE
Lloh3:
add x1, x1, _hello@PAGEOFF
mov x0, x19
bl _strcmp
cbnz w0, LBB0_7
; %bb.3:
str x19, [sp]
Lloh4:
adrp x0, l_.str.5@PAGE
Lloh5:
add x0, x0, l_.str.5@PAGEOFF
bl _printf
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #64
ret
LBB0_4:
Lloh6:
adrp x8, ___stderrp@GOTPAGE
Lloh7:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh8:
ldr x8, [x8]
Lloh9:
adrp x9, l_.str.1@PAGE
Lloh10:
add x9, x9, l_.str.1@PAGEOFF
b LBB0_6
LBB0_5:
Lloh11:
adrp x8, ___stderrp@GOTPAGE
Lloh12:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh13:
ldr x8, [x8]
Lloh14:
adrp x9, l_.str.3@PAGE
Lloh15:
add x9, x9, l_.str.3@PAGEOFF
LBB0_6:
stp x9, x0, [sp]
Lloh16:
adrp x1, l_.str@PAGE
Lloh17:
add x1, x1, l_.str@PAGEOFF
mov x0, x8
bl _fprintf
mov w0, #1
bl _exit
LBB0_7:
Lloh18:
adrp x8, ___stderrp@GOTPAGE
Lloh19:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh20:
ldr x3, [x8]
Lloh21:
adrp x0, l_.str.4@PAGE
Lloh22:
add x0, x0, l_.str.4@PAGEOFF
mov w1, #15
mov w2, #1
bl _fwrite
mov w0, #1
bl _exit
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh9, Lloh10
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpAdd Lloh14, Lloh15
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.loh AdrpAdd Lloh16, Lloh17
.loh AdrpAdd Lloh21, Lloh22
.loh AdrpLdrGotLdr Lloh18, Lloh19, Lloh20
.cfi_endproc
; -- End function
.globl _test_gzio ; -- Begin function test_gzio
.p2align 2
_test_gzio: ; @test_gzio
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x2
mov x19, x1
mov x22, x0
Lloh23:
adrp x1, l_.str.6@PAGE
Lloh24:
add x1, x1, l_.str.6@PAGEOFF
bl _gzopen
cbz x0, LBB1_16
; %bb.1:
mov x21, x0
mov w1, #104
bl _gzputc
Lloh25:
adrp x1, l_.str.8@PAGE
Lloh26:
add x1, x1, l_.str.8@PAGEOFF
mov x0, x21
bl _gzputs
cmp w0, #4
b.ne LBB1_19
; %bb.2:
Lloh27:
adrp x8, _dictionary@PAGE
Lloh28:
add x8, x8, _dictionary@PAGEOFF
str x8, [sp]
Lloh29:
adrp x1, l_.str.10@PAGE
Lloh30:
add x1, x1, l_.str.10@PAGEOFF
mov x0, x21
bl _gzprintf
cmp w0, #8
b.ne LBB1_20
; %bb.3:
mov x0, x21
mov w1, #1
mov w2, #1
bl _gzseek
mov x0, x21
bl _gzclose
Lloh31:
adrp x1, l_.str.13@PAGE
Lloh32:
add x1, x1, l_.str.13@PAGEOFF
mov x0, x22
bl _gzopen
cbz x0, LBB1_16
; %bb.4:
mov x21, x0
mov x8, #24935
movk x8, #25202, lsl #16
movk x8, #26465, lsl #32
movk x8, #101, lsl #48
str x8, [x19]
mov x1, x19
mov x2, x20
bl _gzread
cmp w0, #14
b.ne LBB1_21
; %bb.5:
Lloh33:
adrp x1, _hello@PAGE
Lloh34:
add x1, x1, _hello@PAGEOFF
mov x0, x19
bl _strcmp
cbnz w0, LBB1_22
; %bb.6:
str x19, [sp]
Lloh35:
adrp x0, l_.str.16@PAGE
Lloh36:
add x0, x0, l_.str.16@PAGEOFF
bl _printf
mov x0, x21
mov x1, #-8
mov w2, #1
bl _gzseek
mov x22, x0
cmp x0, #6
b.ne LBB1_18
; %bb.7:
mov x0, x21
bl _gztell
cmp x0, #6
b.ne LBB1_18
; %bb.8:
ldr w8, [x21]
cbz w8, LBB1_14
; %bb.9:
sub w8, w8, #1
str w8, [x21]
ldp x9, x8, [x21, #8]
add x8, x8, #1
add x10, x9, #1
stp x10, x8, [x21, #8]
ldrb w0, [x9]
cmp w0, #32
b.ne LBB1_15
LBB1_10:
mov x1, x21
bl _gzungetc
cmp w0, #32
b.ne LBB1_23
; %bb.11:
mov x0, x21
mov x1, x19
mov x2, x20
bl _gzgets
mov x0, x19
bl _strlen
cmp x0, #7
b.ne LBB1_24
; %bb.12:
Lloh37:
adrp x1, _hello@PAGE+6
Lloh38:
add x1, x1, _hello@PAGEOFF+6
mov x0, x19
bl _strcmp
cbnz w0, LBB1_27
; %bb.13:
str x19, [sp]
Lloh39:
adrp x0, l_.str.22@PAGE
Lloh40:
add x0, x0, l_.str.22@PAGEOFF
bl _printf
mov x0, x21
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #80
b _gzclose
LBB1_14:
mov x0, x21
bl _gzgetc
cmp w0, #32
b.eq LBB1_10
LBB1_15:
Lloh41:
adrp x8, ___stderrp@GOTPAGE
Lloh42:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh43:
ldr x3, [x8]
Lloh44:
adrp x0, l_.str.18@PAGE
Lloh45:
add x0, x0, l_.str.18@PAGEOFF
b LBB1_17
LBB1_16:
Lloh46:
adrp x8, ___stderrp@GOTPAGE
Lloh47:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh48:
ldr x3, [x8]
Lloh49:
adrp x0, l_.str.7@PAGE
Lloh50:
add x0, x0, l_.str.7@PAGEOFF
LBB1_17:
mov w1, #13
b LBB1_28
LBB1_18:
Lloh51:
adrp x8, ___stderrp@GOTPAGE
Lloh52:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh53:
ldr x19, [x8]
mov x0, x21
bl _gztell
stp x22, x0, [sp]
Lloh54:
adrp x1, l_.str.17@PAGE
Lloh55:
add x1, x1, l_.str.17@PAGEOFF
b LBB1_25
LBB1_19:
Lloh56:
adrp x8, ___stderrp@GOTPAGE
Lloh57:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh58:
ldr x19, [x8]
add x1, sp, #28
mov x0, x21
bl _gzerror
str x0, [sp]
Lloh59:
adrp x1, l_.str.9@PAGE
Lloh60:
add x1, x1, l_.str.9@PAGEOFF
b LBB1_25
LBB1_20:
Lloh61:
adrp x8, ___stderrp@GOTPAGE
Lloh62:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh63:
ldr x19, [x8]
add x1, sp, #28
mov x0, x21
bl _gzerror
str x0, [sp]
Lloh64:
adrp x1, l_.str.12@PAGE
Lloh65:
add x1, x1, l_.str.12@PAGEOFF
b LBB1_25
LBB1_21:
Lloh66:
adrp x8, ___stderrp@GOTPAGE
Lloh67:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh68:
ldr x19, [x8]
add x1, sp, #28
mov x0, x21
bl _gzerror
str x0, [sp]
Lloh69:
adrp x1, l_.str.14@PAGE
Lloh70:
add x1, x1, l_.str.14@PAGEOFF
b LBB1_25
LBB1_22:
Lloh71:
adrp x8, ___stderrp@GOTPAGE
Lloh72:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh73:
ldr x0, [x8]
str x19, [sp]
Lloh74:
adrp x1, l_.str.15@PAGE
Lloh75:
add x1, x1, l_.str.15@PAGEOFF
b LBB1_26
LBB1_23:
Lloh76:
adrp x8, ___stderrp@GOTPAGE
Lloh77:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh78:
ldr x3, [x8]
Lloh79:
adrp x0, l_.str.19@PAGE
Lloh80:
add x0, x0, l_.str.19@PAGEOFF
mov w1, #15
b LBB1_28
LBB1_24:
Lloh81:
adrp x8, ___stderrp@GOTPAGE
Lloh82:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh83:
ldr x19, [x8]
add x1, sp, #28
mov x0, x21
bl _gzerror
str x0, [sp]
Lloh84:
adrp x1, l_.str.20@PAGE
Lloh85:
add x1, x1, l_.str.20@PAGEOFF
LBB1_25:
mov x0, x19
LBB1_26:
bl _fprintf
mov w0, #1
bl _exit
LBB1_27:
Lloh86:
adrp x8, ___stderrp@GOTPAGE
Lloh87:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh88:
ldr x3, [x8]
Lloh89:
adrp x0, l_.str.21@PAGE
Lloh90:
add x0, x0, l_.str.21@PAGEOFF
mov w1, #24
LBB1_28:
mov w2, #1
bl _fwrite
mov w0, #1
bl _exit
.loh AdrpAdd Lloh23, Lloh24
.loh AdrpAdd Lloh25, Lloh26
.loh AdrpAdd Lloh29, Lloh30
.loh AdrpAdd Lloh27, Lloh28
.loh AdrpAdd Lloh31, Lloh32
.loh AdrpAdd Lloh33, Lloh34
.loh AdrpAdd Lloh35, Lloh36
.loh AdrpAdd Lloh37, Lloh38
.loh AdrpAdd Lloh39, Lloh40
.loh AdrpAdd Lloh44, Lloh45
.loh AdrpLdrGotLdr Lloh41, Lloh42, Lloh43
.loh AdrpAdd Lloh49, Lloh50
.loh AdrpLdrGotLdr Lloh46, Lloh47, Lloh48
.loh AdrpAdd Lloh54, Lloh55
.loh AdrpLdrGotLdr Lloh51, Lloh52, Lloh53
.loh AdrpAdd Lloh59, Lloh60
.loh AdrpLdrGotLdr Lloh56, Lloh57, Lloh58
.loh AdrpAdd Lloh64, Lloh65
.loh AdrpLdrGotLdr Lloh61, Lloh62, Lloh63
.loh AdrpAdd Lloh69, Lloh70
.loh AdrpLdrGotLdr Lloh66, Lloh67, Lloh68
.loh AdrpAdd Lloh74, Lloh75
.loh AdrpLdrGotLdr Lloh71, Lloh72, Lloh73
.loh AdrpAdd Lloh79, Lloh80
.loh AdrpLdrGotLdr Lloh76, Lloh77, Lloh78
.loh AdrpAdd Lloh84, Lloh85
.loh AdrpLdrGotLdr Lloh81, Lloh82, Lloh83
.loh AdrpAdd Lloh89, Lloh90
.loh AdrpLdrGotLdr Lloh86, Lloh87, Lloh88
.cfi_endproc
; -- End function
.globl _test_deflate ; -- Begin function test_deflate
.p2align 2
_test_deflate: ; @test_deflate
.cfi_startproc
; %bb.0:
sub sp, sp, #160
.cfi_def_cfa_offset 160
stp x20, x19, [sp, #128] ; 16-byte Folded Spill
stp x29, x30, [sp, #144] ; 16-byte Folded Spill
add x29, sp, #144
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
mov x20, x0
stp xzr, xzr, [sp, #80]
str xzr, [sp, #96]
Lloh91:
adrp x2, l_.str.23@PAGE
Lloh92:
add x2, x2, l_.str.23@PAGEOFF
add x0, sp, #16
mov w1, #-1
mov w3, #112
bl _deflateInit_
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB2_11
; %bb.1:
Lloh93:
adrp x8, _hello@PAGE
Lloh94:
add x8, x8, _hello@PAGEOFF
str x8, [sp, #16]
str x20, [sp, #40]
mov w20, #1
LBB2_2: ; =>This Inner Loop Header: Depth=1
ldr x8, [sp, #32]
ldr x9, [sp, #56]
cmp x8, #14
ccmp x9, x19, #2, ne
b.hs LBB2_4
; %bb.3: ; in Loop: Header=BB2_2 Depth=1
str w20, [sp, #48]
str w20, [sp, #24]
add x0, sp, #16
mov w1, #0
bl _deflate
; kill: def $w0 killed $w0 def $x0
cbz w0, LBB2_2
b LBB2_9
LBB2_4:
mov w19, #1
LBB2_5: ; =>This Inner Loop Header: Depth=1
str w19, [sp, #48]
add x0, sp, #16
mov w1, #4
bl _deflate
; kill: def $w0 killed $w0 def $x0
cbz w0, LBB2_5
; %bb.6:
cmp w0, #1
b.ne LBB2_9
; %bb.7:
add x0, sp, #16
bl _deflateEnd
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB2_12
; %bb.8:
ldp x29, x30, [sp, #144] ; 16-byte Folded Reload
ldp x20, x19, [sp, #128] ; 16-byte Folded Reload
add sp, sp, #160
ret
LBB2_9:
Lloh95:
adrp x8, ___stderrp@GOTPAGE
Lloh96:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh97:
ldr x8, [x8]
Lloh98:
adrp x9, l_.str.25@PAGE
Lloh99:
add x9, x9, l_.str.25@PAGEOFF
LBB2_10:
stp x9, x0, [sp]
Lloh100:
adrp x1, l_.str@PAGE
Lloh101:
add x1, x1, l_.str@PAGEOFF
mov x0, x8
bl _fprintf
mov w0, #1
bl _exit
LBB2_11:
Lloh102:
adrp x8, ___stderrp@GOTPAGE
Lloh103:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh104:
ldr x8, [x8]
Lloh105:
adrp x9, l_.str.24@PAGE
Lloh106:
add x9, x9, l_.str.24@PAGEOFF
b LBB2_10
LBB2_12:
Lloh107:
adrp x8, ___stderrp@GOTPAGE
Lloh108:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh109:
ldr x8, [x8]
Lloh110:
adrp x9, l_.str.26@PAGE
Lloh111:
add x9, x9, l_.str.26@PAGEOFF
b LBB2_10
.loh AdrpAdd Lloh91, Lloh92
.loh AdrpAdd Lloh93, Lloh94
.loh AdrpAdd Lloh98, Lloh99
.loh AdrpLdrGotLdr Lloh95, Lloh96, Lloh97
.loh AdrpAdd Lloh100, Lloh101
.loh AdrpAdd Lloh105, Lloh106
.loh AdrpLdrGotLdr Lloh102, Lloh103, Lloh104
.loh AdrpAdd Lloh110, Lloh111
.loh AdrpLdrGotLdr Lloh107, Lloh108, Lloh109
.cfi_endproc
; -- End function
.globl _test_inflate ; -- Begin function test_inflate
.p2align 2
_test_inflate: ; @test_inflate
.cfi_startproc
; %bb.0:
sub sp, sp, #176
.cfi_def_cfa_offset 176
stp x22, x21, [sp, #128] ; 16-byte Folded Spill
stp x20, x19, [sp, #144] ; 16-byte Folded Spill
stp x29, x30, [sp, #160] ; 16-byte Folded Spill
add x29, sp, #160
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x3
mov x19, x2
mov x21, x1
mov x8, #24935
movk x8, #25202, lsl #16
movk x8, #26465, lsl #32
movk x8, #101, lsl #48
str x8, [x2]
stp xzr, xzr, [sp, #88]
str x0, [sp, #16]
str wzr, [sp, #24]
str xzr, [sp, #80]
str x2, [sp, #40]
Lloh112:
adrp x1, l_.str.23@PAGE
Lloh113:
add x1, x1, l_.str.23@PAGEOFF
add x0, sp, #16
mov w2, #112
bl _inflateInit_
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB3_10
; %bb.1:
mov w22, #1
LBB3_2: ; =>This Inner Loop Header: Depth=1
ldr x8, [sp, #56]
ldr x9, [sp, #32]
cmp x8, x20
ccmp x9, x21, #2, lo
b.hs LBB3_5
; %bb.3: ; in Loop: Header=BB3_2 Depth=1
str w22, [sp, #48]
str w22, [sp, #24]
add x0, sp, #16
mov w1, #0
bl _inflate
; kill: def $w0 killed $w0 def $x0
cbz w0, LBB3_2
; %bb.4:
cmp w0, #1
b.ne LBB3_8
LBB3_5:
add x0, sp, #16
bl _inflateEnd
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB3_11
; %bb.6:
Lloh114:
adrp x1, _hello@PAGE
Lloh115:
add x1, x1, _hello@PAGEOFF
mov x0, x19
bl _strcmp
cbnz w0, LBB3_12
; %bb.7:
str x19, [sp]
Lloh116:
adrp x0, l_.str.31@PAGE
Lloh117:
add x0, x0, l_.str.31@PAGEOFF
bl _printf
ldp x29, x30, [sp, #160] ; 16-byte Folded Reload
ldp x20, x19, [sp, #144] ; 16-byte Folded Reload
ldp x22, x21, [sp, #128] ; 16-byte Folded Reload
add sp, sp, #176
ret
LBB3_8:
Lloh118:
adrp x8, ___stderrp@GOTPAGE
Lloh119:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh120:
ldr x8, [x8]
Lloh121:
adrp x9, l_.str.28@PAGE
Lloh122:
add x9, x9, l_.str.28@PAGEOFF
LBB3_9:
stp x9, x0, [sp]
Lloh123:
adrp x1, l_.str@PAGE
Lloh124:
add x1, x1, l_.str@PAGEOFF
mov x0, x8
bl _fprintf
mov w0, #1
bl _exit
LBB3_10:
Lloh125:
adrp x8, ___stderrp@GOTPAGE
Lloh126:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh127:
ldr x8, [x8]
Lloh128:
adrp x9, l_.str.27@PAGE
Lloh129:
add x9, x9, l_.str.27@PAGEOFF
b LBB3_9
LBB3_11:
Lloh130:
adrp x8, ___stderrp@GOTPAGE
Lloh131:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh132:
ldr x8, [x8]
Lloh133:
adrp x9, l_.str.29@PAGE
Lloh134:
add x9, x9, l_.str.29@PAGEOFF
b LBB3_9
LBB3_12:
Lloh135:
adrp x8, ___stderrp@GOTPAGE
Lloh136:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh137:
ldr x3, [x8]
Lloh138:
adrp x0, l_.str.30@PAGE
Lloh139:
add x0, x0, l_.str.30@PAGEOFF
mov w1, #12
mov w2, #1
bl _fwrite
mov w0, #1
bl _exit
.loh AdrpAdd Lloh112, Lloh113
.loh AdrpAdd Lloh114, Lloh115
.loh AdrpAdd Lloh116, Lloh117
.loh AdrpAdd Lloh121, Lloh122
.loh AdrpLdrGotLdr Lloh118, Lloh119, Lloh120
.loh AdrpAdd Lloh123, Lloh124
.loh AdrpAdd Lloh128, Lloh129
.loh AdrpLdrGotLdr Lloh125, Lloh126, Lloh127
.loh AdrpAdd Lloh133, Lloh134
.loh AdrpLdrGotLdr Lloh130, Lloh131, Lloh132
.loh AdrpAdd Lloh138, Lloh139
.loh AdrpLdrGotLdr Lloh135, Lloh136, Lloh137
.cfi_endproc
; -- End function
.globl _test_large_deflate ; -- Begin function test_large_deflate
.p2align 2
_test_large_deflate: ; @test_large_deflate
.cfi_startproc
; %bb.0:
sub sp, sp, #176
.cfi_def_cfa_offset 176
stp x22, x21, [sp, #128] ; 16-byte Folded Spill
stp x20, x19, [sp, #144] ; 16-byte Folded Spill
stp x29, x30, [sp, #160] ; 16-byte Folded Spill
add x29, sp, #160
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x3
mov x20, x2
mov x21, x1
mov x22, x0
stp xzr, xzr, [sp, #80]
str xzr, [sp, #96]
Lloh140:
adrp x2, l_.str.23@PAGE
Lloh141:
add x2, x2, l_.str.23@PAGEOFF
add x0, sp, #16
mov w1, #1
mov w3, #112
bl _deflateInit_
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB4_9
; %bb.1:
str x22, [sp, #40]
str w21, [sp, #48]
str x20, [sp, #16]
str w19, [sp, #24]
add x0, sp, #16
mov w1, #0
bl _deflate
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB4_8
; %bb.2:
ldr w8, [sp, #24]
cbnz w8, LBB4_10
; %bb.3:
add x0, sp, #16
mov w1, #0
mov w2, #0
bl _deflateParams
str x22, [sp, #16]
lsr w8, w21, #1
str w8, [sp, #24]
add x0, sp, #16
mov w1, #0
bl _deflate
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB4_8
; %bb.4:
add x0, sp, #16
mov w1, #9
mov w2, #1
bl _deflateParams
str x20, [sp, #16]
str w19, [sp, #24]
add x0, sp, #16
mov w1, #0
bl _deflate
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB4_8
; %bb.5:
add x0, sp, #16
mov w1, #4
bl _deflate
cmp w0, #1
b.ne LBB4_11
; %bb.6:
add x0, sp, #16
bl _deflateEnd
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB4_13
; %bb.7:
ldp x29, x30, [sp, #160] ; 16-byte Folded Reload
ldp x20, x19, [sp, #144] ; 16-byte Folded Reload
ldp x22, x21, [sp, #128] ; 16-byte Folded Reload
add sp, sp, #176
ret
LBB4_8:
Lloh142:
adrp x8, ___stderrp@GOTPAGE
Lloh143:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh144:
ldr x8, [x8]
Lloh145:
adrp x9, l_.str.25@PAGE
Lloh146:
add x9, x9, l_.str.25@PAGEOFF
b LBB4_14
LBB4_9:
Lloh147:
adrp x8, ___stderrp@GOTPAGE
Lloh148:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh149:
ldr x8, [x8]
Lloh150:
adrp x9, l_.str.24@PAGE
Lloh151:
add x9, x9, l_.str.24@PAGEOFF
b LBB4_14
LBB4_10:
Lloh152:
adrp x8, ___stderrp@GOTPAGE
Lloh153:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh154:
ldr x3, [x8]
Lloh155:
adrp x0, l_.str.32@PAGE
Lloh156:
add x0, x0, l_.str.32@PAGEOFF
mov w1, #19
b LBB4_12
LBB4_11:
Lloh157:
adrp x8, ___stderrp@GOTPAGE
Lloh158:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh159:
ldr x3, [x8]
Lloh160:
adrp x0, l_.str.33@PAGE
Lloh161:
add x0, x0, l_.str.33@PAGEOFF
mov w1, #35
LBB4_12:
mov w2, #1
bl _fwrite
mov w0, #1
bl _exit
LBB4_13:
Lloh162:
adrp x8, ___stderrp@GOTPAGE
Lloh163:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh164:
ldr x8, [x8]
Lloh165:
adrp x9, l_.str.26@PAGE
Lloh166:
add x9, x9, l_.str.26@PAGEOFF
LBB4_14:
stp x9, x0, [sp]
Lloh167:
adrp x1, l_.str@PAGE
Lloh168:
add x1, x1, l_.str@PAGEOFF
mov x0, x8
bl _fprintf
mov w0, #1
bl _exit
.loh AdrpAdd Lloh140, Lloh141
.loh AdrpAdd Lloh145, Lloh146
.loh AdrpLdrGotLdr Lloh142, Lloh143, Lloh144
.loh AdrpAdd Lloh150, Lloh151
.loh AdrpLdrGotLdr Lloh147, Lloh148, Lloh149
.loh AdrpAdd Lloh155, Lloh156
.loh AdrpLdrGotLdr Lloh152, Lloh153, Lloh154
.loh AdrpAdd Lloh160, Lloh161
.loh AdrpLdrGotLdr Lloh157, Lloh158, Lloh159
.loh AdrpAdd Lloh165, Lloh166
.loh AdrpLdrGotLdr Lloh162, Lloh163, Lloh164
.loh AdrpAdd Lloh167, Lloh168
.cfi_endproc
; -- End function
.globl _test_large_inflate ; -- Begin function test_large_inflate
.p2align 2
_test_large_inflate: ; @test_large_inflate
.cfi_startproc
; %bb.0:
sub sp, sp, #176
.cfi_def_cfa_offset 176
stp x22, x21, [sp, #128] ; 16-byte Folded Spill
stp x20, x19, [sp, #144] ; 16-byte Folded Spill
stp x29, x30, [sp, #160] ; 16-byte Folded Spill
add x29, sp, #160
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x3
mov x21, x2
mov x20, x1
mov x8, #24935
movk x8, #25202, lsl #16
movk x8, #26465, lsl #32
movk x8, #101, lsl #48
str x8, [x2]
stp xzr, xzr, [sp, #88]
str xzr, [sp, #80]
str x0, [sp, #16]
str w20, [sp, #24]
Lloh169:
adrp x1, l_.str.23@PAGE
Lloh170:
add x1, x1, l_.str.23@PAGEOFF
add x0, sp, #16
mov w2, #112
bl _inflateInit_
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB5_9
LBB5_1: ; =>This Inner Loop Header: Depth=1
str x21, [sp, #40]
str w19, [sp, #48]
add x0, sp, #16
mov w1, #0
bl _inflate
; kill: def $w0 killed $w0 def $x0
cbz w0, LBB5_1
; %bb.2:
cmp w0, #1
b.ne LBB5_6
; %bb.3:
add x0, sp, #16
bl _inflateEnd
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB5_10
; %bb.4:
ldr x8, [sp, #56]
lsl x9, x19, #1
add x9, x9, x20, lsr #1
cmp x8, x9
b.ne LBB5_11
; %bb.5:
Lloh171:
adrp x0, l_str@PAGE
Lloh172:
add x0, x0, l_str@PAGEOFF
bl _puts
ldp x29, x30, [sp, #160] ; 16-byte Folded Reload
ldp x20, x19, [sp, #144] ; 16-byte Folded Reload
ldp x22, x21, [sp, #128] ; 16-byte Folded Reload
add sp, sp, #176
ret
LBB5_6:
Lloh173:
adrp x8, ___stderrp@GOTPAGE
Lloh174:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh175:
ldr x8, [x8]
Lloh176:
adrp x9, l_.str.34@PAGE
Lloh177:
add x9, x9, l_.str.34@PAGEOFF
LBB5_7:
stp x9, x0, [sp]
Lloh178:
adrp x1, l_.str@PAGE
Lloh179:
add x1, x1, l_.str@PAGEOFF
mov x0, x8
LBB5_8:
bl _fprintf
mov w0, #1
bl _exit
LBB5_9:
Lloh180:
adrp x8, ___stderrp@GOTPAGE
Lloh181:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh182:
ldr x8, [x8]
Lloh183:
adrp x9, l_.str.27@PAGE
Lloh184:
add x9, x9, l_.str.27@PAGEOFF
b LBB5_7
LBB5_10:
Lloh185:
adrp x8, ___stderrp@GOTPAGE
Lloh186:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh187:
ldr x8, [x8]
Lloh188:
adrp x9, l_.str.29@PAGE
Lloh189:
add x9, x9, l_.str.29@PAGEOFF
b LBB5_7
LBB5_11:
Lloh190:
adrp x9, ___stderrp@GOTPAGE
Lloh191:
ldr x9, [x9, ___stderrp@GOTPAGEOFF]
Lloh192:
ldr x0, [x9]
str x8, [sp]
Lloh193:
adrp x1, l_.str.35@PAGE
Lloh194:
add x1, x1, l_.str.35@PAGEOFF
b LBB5_8
.loh AdrpAdd Lloh169, Lloh170
.loh AdrpAdd Lloh171, Lloh172
.loh AdrpAdd Lloh176, Lloh177
.loh AdrpLdrGotLdr Lloh173, Lloh174, Lloh175
.loh AdrpAdd Lloh178, Lloh179
.loh AdrpAdd Lloh183, Lloh184
.loh AdrpLdrGotLdr Lloh180, Lloh181, Lloh182
.loh AdrpAdd Lloh188, Lloh189
.loh AdrpLdrGotLdr Lloh185, Lloh186, Lloh187
.loh AdrpAdd Lloh193, Lloh194
.loh AdrpLdrGotLdr Lloh190, Lloh191, Lloh192
.cfi_endproc
; -- End function
.globl _test_flush ; -- Begin function test_flush
.p2align 2
_test_flush: ; @test_flush
.cfi_startproc
; %bb.0:
sub sp, sp, #160
.cfi_def_cfa_offset 160
stp x20, x19, [sp, #128] ; 16-byte Folded Spill
stp x29, x30, [sp, #144] ; 16-byte Folded Spill
add x29, sp, #144
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
mov x20, x0
stp xzr, xzr, [sp, #80]
str xzr, [sp, #96]
Lloh195:
adrp x2, l_.str.23@PAGE
Lloh196:
add x2, x2, l_.str.23@PAGEOFF
add x0, sp, #16
mov w1, #-1
mov w3, #112
bl _deflateInit_
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB6_6
; %bb.1:
Lloh197:
adrp x8, _hello@PAGE
Lloh198:
add x8, x8, _hello@PAGEOFF
str x8, [sp, #16]
str x20, [sp, #40]
mov w8, #3
str w8, [sp, #24]
ldr x8, [x19]
str w8, [sp, #48]
add x0, sp, #16
mov w1, #3
bl _deflate
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB6_5
; %bb.2:
ldrb w8, [x20, #3]
add w8, w8, #1
strb w8, [x20, #3]
mov w8, #11
str w8, [sp, #24]
add x0, sp, #16
mov w1, #4
bl _deflate
; kill: def $w0 killed $w0 def $x0
cmp w0, #2
b.hs LBB6_5
; %bb.3:
add x0, sp, #16
bl _deflateEnd
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB6_7
; %bb.4:
ldr x8, [sp, #56]
str x8, [x19]
ldp x29, x30, [sp, #144] ; 16-byte Folded Reload
ldp x20, x19, [sp, #128] ; 16-byte Folded Reload
add sp, sp, #160
ret
LBB6_5:
Lloh199:
adrp x8, ___stderrp@GOTPAGE
Lloh200:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh201:
ldr x8, [x8]
Lloh202:
adrp x9, l_.str.25@PAGE
Lloh203:
add x9, x9, l_.str.25@PAGEOFF
b LBB6_8
LBB6_6:
Lloh204:
adrp x8, ___stderrp@GOTPAGE
Lloh205:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh206:
ldr x8, [x8]
Lloh207:
adrp x9, l_.str.24@PAGE
Lloh208:
add x9, x9, l_.str.24@PAGEOFF
b LBB6_8
LBB6_7:
Lloh209:
adrp x8, ___stderrp@GOTPAGE
Lloh210:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh211:
ldr x8, [x8]
Lloh212:
adrp x9, l_.str.26@PAGE
Lloh213:
add x9, x9, l_.str.26@PAGEOFF
LBB6_8:
stp x9, x0, [sp]
Lloh214:
adrp x1, l_.str@PAGE
Lloh215:
add x1, x1, l_.str@PAGEOFF
mov x0, x8
bl _fprintf
mov w0, #1
bl _exit
.loh AdrpAdd Lloh195, Lloh196
.loh AdrpAdd Lloh197, Lloh198
.loh AdrpAdd Lloh202, Lloh203
.loh AdrpLdrGotLdr Lloh199, Lloh200, Lloh201
.loh AdrpAdd Lloh207, Lloh208
.loh AdrpLdrGotLdr Lloh204, Lloh205, Lloh206
.loh AdrpAdd Lloh212, Lloh213
.loh AdrpLdrGotLdr Lloh209, Lloh210, Lloh211
.loh AdrpAdd Lloh214, Lloh215
.cfi_endproc
; -- End function
.globl _test_sync ; -- Begin function test_sync
.p2align 2
_test_sync: ; @test_sync
.cfi_startproc
; %bb.0:
sub sp, sp, #176
.cfi_def_cfa_offset 176
stp x22, x21, [sp, #128] ; 16-byte Folded Spill
stp x20, x19, [sp, #144] ; 16-byte Folded Spill
stp x29, x30, [sp, #160] ; 16-byte Folded Spill
add x29, sp, #160
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x21, x3
mov x19, x2
mov x20, x1
mov x8, #24935
movk x8, #25202, lsl #16
movk x8, #26465, lsl #32
movk x8, #101, lsl #48
str x8, [x2]
stp xzr, xzr, [sp, #88]
str xzr, [sp, #80]
str x0, [sp, #16]
mov w8, #2
str w8, [sp, #24]
Lloh216:
adrp x1, l_.str.23@PAGE
Lloh217:
add x1, x1, l_.str.23@PAGEOFF
add x0, sp, #16
mov w2, #112
bl _inflateInit_
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB7_5
; %bb.1:
str x19, [sp, #40]
str w21, [sp, #48]
add x0, sp, #16
mov w1, #0
bl _inflate
sub w8, w20, #2
str w8, [sp, #24]
add x0, sp, #16
bl _inflateSync
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB7_6
; %bb.2:
add x0, sp, #16
mov w1, #4
bl _inflate
cmn w0, #3
b.ne LBB7_7
; %bb.3:
add x0, sp, #16
bl _inflateEnd
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB7_8
; %bb.4:
str x19, [sp]
Lloh218:
adrp x0, l_.str.39@PAGE
Lloh219:
add x0, x0, l_.str.39@PAGEOFF
bl _printf
ldp x29, x30, [sp, #160] ; 16-byte Folded Reload
ldp x20, x19, [sp, #144] ; 16-byte Folded Reload
ldp x22, x21, [sp, #128] ; 16-byte Folded Reload
add sp, sp, #176
ret
LBB7_5:
Lloh220:
adrp x8, ___stderrp@GOTPAGE
Lloh221:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh222:
ldr x8, [x8]
Lloh223:
adrp x9, l_.str.27@PAGE
Lloh224:
add x9, x9, l_.str.27@PAGEOFF
b LBB7_9
LBB7_6:
Lloh225:
adrp x8, ___stderrp@GOTPAGE
Lloh226:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh227:
ldr x8, [x8]
Lloh228:
adrp x9, l_.str.37@PAGE
Lloh229:
add x9, x9, l_.str.37@PAGEOFF
b LBB7_9
LBB7_7:
Lloh230:
adrp x8, ___stderrp@GOTPAGE
Lloh231:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh232:
ldr x3, [x8]
Lloh233:
adrp x0, l_.str.38@PAGE
Lloh234:
add x0, x0, l_.str.38@PAGEOFF
mov w1, #33
mov w2, #1
bl _fwrite
mov w0, #1
bl _exit
LBB7_8:
Lloh235:
adrp x8, ___stderrp@GOTPAGE
Lloh236:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh237:
ldr x8, [x8]
Lloh238:
adrp x9, l_.str.29@PAGE
Lloh239:
add x9, x9, l_.str.29@PAGEOFF
LBB7_9:
stp x9, x0, [sp]
Lloh240:
adrp x1, l_.str@PAGE
Lloh241:
add x1, x1, l_.str@PAGEOFF
mov x0, x8
bl _fprintf
mov w0, #1
bl _exit
.loh AdrpAdd Lloh216, Lloh217
.loh AdrpAdd Lloh218, Lloh219
.loh AdrpAdd Lloh223, Lloh224
.loh AdrpLdrGotLdr Lloh220, Lloh221, Lloh222
.loh AdrpAdd Lloh228, Lloh229
.loh AdrpLdrGotLdr Lloh225, Lloh226, Lloh227
.loh AdrpAdd Lloh233, Lloh234
.loh AdrpLdrGotLdr Lloh230, Lloh231, Lloh232
.loh AdrpAdd Lloh238, Lloh239
.loh AdrpLdrGotLdr Lloh235, Lloh236, Lloh237
.loh AdrpAdd Lloh240, Lloh241
.cfi_endproc
; -- End function
.globl _test_dict_deflate ; -- Begin function test_dict_deflate
.p2align 2
_test_dict_deflate: ; @test_dict_deflate
.cfi_startproc
; %bb.0:
sub sp, sp, #160
.cfi_def_cfa_offset 160
stp x20, x19, [sp, #128] ; 16-byte Folded Spill
stp x29, x30, [sp, #144] ; 16-byte Folded Spill
add x29, sp, #144
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
mov x20, x0
stp xzr, xzr, [sp, #80]
str xzr, [sp, #96]
Lloh242:
adrp x2, l_.str.23@PAGE
Lloh243:
add x2, x2, l_.str.23@PAGEOFF
add x0, sp, #16
mov w1, #9
mov w3, #112
bl _deflateInit_
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB8_5
; %bb.1:
Lloh244:
adrp x1, _dictionary@PAGE
Lloh245:
add x1, x1, _dictionary@PAGEOFF
add x0, sp, #16
mov w2, #6
bl _deflateSetDictionary
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB8_6
; %bb.2:
ldr x8, [sp, #112]
Lloh246:
adrp x9, _dictId@GOTPAGE
Lloh247:
ldr x9, [x9, _dictId@GOTPAGEOFF]
Lloh248:
str x8, [x9]
str x20, [sp, #40]
str w19, [sp, #48]
Lloh249:
adrp x8, _hello@PAGE
Lloh250:
add x8, x8, _hello@PAGEOFF
str x8, [sp, #16]
mov w8, #14
str w8, [sp, #24]
add x0, sp, #16
mov w1, #4
bl _deflate
cmp w0, #1
b.ne LBB8_7
; %bb.3:
add x0, sp, #16
bl _deflateEnd
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB8_8
; %bb.4:
ldp x29, x30, [sp, #144] ; 16-byte Folded Reload
ldp x20, x19, [sp, #128] ; 16-byte Folded Reload
add sp, sp, #160
ret
LBB8_5:
Lloh251:
adrp x8, ___stderrp@GOTPAGE
Lloh252:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh253:
ldr x8, [x8]
Lloh254:
adrp x9, l_.str.24@PAGE
Lloh255:
add x9, x9, l_.str.24@PAGEOFF
b LBB8_9
LBB8_6:
Lloh256:
adrp x8, ___stderrp@GOTPAGE
Lloh257:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh258:
ldr x8, [x8]
Lloh259:
adrp x9, l_.str.40@PAGE
Lloh260:
add x9, x9, l_.str.40@PAGEOFF
b LBB8_9
LBB8_7:
Lloh261:
adrp x8, ___stderrp@GOTPAGE
Lloh262:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh263:
ldr x3, [x8]
Lloh264:
adrp x0, l_.str.33@PAGE
Lloh265:
add x0, x0, l_.str.33@PAGEOFF
mov w1, #35
mov w2, #1
bl _fwrite
mov w0, #1
bl _exit
LBB8_8:
Lloh266:
adrp x8, ___stderrp@GOTPAGE
Lloh267:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh268:
ldr x8, [x8]
Lloh269:
adrp x9, l_.str.26@PAGE
Lloh270:
add x9, x9, l_.str.26@PAGEOFF
LBB8_9:
stp x9, x0, [sp]
Lloh271:
adrp x1, l_.str@PAGE
Lloh272:
add x1, x1, l_.str@PAGEOFF
mov x0, x8
bl _fprintf
mov w0, #1
bl _exit
.loh AdrpAdd Lloh242, Lloh243
.loh AdrpAdd Lloh244, Lloh245
.loh AdrpAdd Lloh249, Lloh250
.loh AdrpLdrGotStr Lloh246, Lloh247, Lloh248
.loh AdrpAdd Lloh254, Lloh255
.loh AdrpLdrGotLdr Lloh251, Lloh252, Lloh253
.loh AdrpAdd Lloh259, Lloh260
.loh AdrpLdrGotLdr Lloh256, Lloh257, Lloh258
.loh AdrpAdd Lloh264, Lloh265
.loh AdrpLdrGotLdr Lloh261, Lloh262, Lloh263
.loh AdrpAdd Lloh269, Lloh270
.loh AdrpLdrGotLdr Lloh266, Lloh267, Lloh268
.loh AdrpAdd Lloh271, Lloh272
.cfi_endproc
; -- End function
.globl _test_dict_inflate ; -- Begin function test_dict_inflate
.p2align 2
_test_dict_inflate: ; @test_dict_inflate
.cfi_startproc
; %bb.0:
sub sp, sp, #176
.cfi_def_cfa_offset 176
stp x22, x21, [sp, #128] ; 16-byte Folded Spill
stp x20, x19, [sp, #144] ; 16-byte Folded Spill
stp x29, x30, [sp, #160] ; 16-byte Folded Spill
add x29, sp, #160
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x3
mov x19, x2
mov x8, #24935
movk x8, #25202, lsl #16
movk x8, #26465, lsl #32
movk x8, #101, lsl #48
str x8, [x2]
stp xzr, xzr, [sp, #88]
str xzr, [sp, #80]
str x0, [sp, #16]
str w1, [sp, #24]
Lloh273:
adrp x1, l_.str.23@PAGE
Lloh274:
add x1, x1, l_.str.23@PAGEOFF
add x0, sp, #16
mov w2, #112
bl _inflateInit_
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB9_14
; %bb.1:
str x19, [sp, #40]
str w20, [sp, #48]
Lloh275:
adrp x20, _dictionary@PAGE
Lloh276:
add x20, x20, _dictionary@PAGEOFF
Lloh277:
adrp x21, _dictId@GOTPAGE
Lloh278:
ldr x21, [x21, _dictId@GOTPAGEOFF]
b LBB9_4
LBB9_2: ; in Loop: Header=BB9_4 Depth=1
cmp w0, #1
b.eq LBB9_9
; %bb.3: ; in Loop: Header=BB9_4 Depth=1
cbnz w0, LBB9_7
LBB9_4: ; =>This Inner Loop Header: Depth=1
add x0, sp, #16
mov w1, #0
bl _inflate
; kill: def $w0 killed $w0 def $x0
cmp w0, #2
b.ne LBB9_2
; %bb.5: ; in Loop: Header=BB9_4 Depth=1
ldr x8, [sp, #112]
ldr x9, [x21]
cmp x8, x9
b.ne LBB9_12
; %bb.6: ; in Loop: Header=BB9_4 Depth=1
add x0, sp, #16
mov x1, x20
mov w2, #6
bl _inflateSetDictionary
; kill: def $w0 killed $w0 def $x0
cbz w0, LBB9_4
LBB9_7:
Lloh279:
adrp x8, ___stderrp@GOTPAGE
Lloh280:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh281:
ldr x8, [x8]
Lloh282:
adrp x9, l_.str.42@PAGE
Lloh283:
add x9, x9, l_.str.42@PAGEOFF
LBB9_8:
stp x9, x0, [sp]
Lloh284:
adrp x1, l_.str@PAGE
Lloh285:
add x1, x1, l_.str@PAGEOFF
mov x0, x8
bl _fprintf
mov w0, #1
bl _exit
LBB9_9:
add x0, sp, #16
bl _inflateEnd
; kill: def $w0 killed $w0 def $x0
cbnz w0, LBB9_15
; %bb.10:
Lloh286:
adrp x1, _hello@PAGE
Lloh287:
add x1, x1, _hello@PAGEOFF
mov x0, x19
bl _strcmp
cbnz w0, LBB9_16
; %bb.11:
str x19, [sp]
Lloh288:
adrp x0, l_.str.44@PAGE
Lloh289:
add x0, x0, l_.str.44@PAGEOFF
bl _printf
ldp x29, x30, [sp, #160] ; 16-byte Folded Reload
ldp x20, x19, [sp, #144] ; 16-byte Folded Reload
ldp x22, x21, [sp, #128] ; 16-byte Folded Reload
add sp, sp, #176
ret
LBB9_12:
Lloh290:
adrp x8, ___stderrp@GOTPAGE
Lloh291:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh292:
ldr x3, [x8]
Lloh293:
adrp x0, l_.str.41@PAGE
Lloh294:
add x0, x0, l_.str.41@PAGEOFF
mov w1, #21
LBB9_13:
mov w2, #1
bl _fwrite
mov w0, #1
bl _exit
LBB9_14:
Lloh295:
adrp x8, ___stderrp@GOTPAGE
Lloh296:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh297:
ldr x8, [x8]
Lloh298:
adrp x9, l_.str.27@PAGE
Lloh299:
add x9, x9, l_.str.27@PAGEOFF
b LBB9_8
LBB9_15:
Lloh300:
adrp x8, ___stderrp@GOTPAGE
Lloh301:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh302:
ldr x8, [x8]
Lloh303:
adrp x9, l_.str.29@PAGE
Lloh304:
add x9, x9, l_.str.29@PAGEOFF
b LBB9_8
LBB9_16:
Lloh305:
adrp x8, ___stderrp@GOTPAGE
Lloh306:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh307:
ldr x3, [x8]
Lloh308:
adrp x0, l_.str.43@PAGE
Lloh309:
add x0, x0, l_.str.43@PAGEOFF
mov w1, #22
b LBB9_13
.loh AdrpAdd Lloh273, Lloh274
.loh AdrpLdrGot Lloh277, Lloh278
.loh AdrpAdd Lloh275, Lloh276
.loh AdrpAdd Lloh282, Lloh283
.loh AdrpLdrGotLdr Lloh279, Lloh280, Lloh281
.loh AdrpAdd Lloh284, Lloh285
.loh AdrpAdd Lloh286, Lloh287
.loh AdrpAdd Lloh288, Lloh289
.loh AdrpAdd Lloh293, Lloh294
.loh AdrpLdrGotLdr Lloh290, Lloh291, Lloh292
.loh AdrpAdd Lloh298, Lloh299
.loh AdrpLdrGotLdr Lloh295, Lloh296, Lloh297
.loh AdrpAdd Lloh303, Lloh304
.loh AdrpLdrGotLdr Lloh300, Lloh301, Lloh302
.loh AdrpAdd Lloh308, Lloh309
.loh AdrpLdrGotLdr Lloh305, Lloh306, Lloh307
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x21, x1
mov x22, x0
mov w8, #40000
str x8, [sp, #24]
bl _zlibVersion
ldrb w8, [x0]
cmp w8, #49
b.ne LBB10_9
; %bb.1:
bl _zlibVersion
Lloh310:
adrp x19, l_.str.23@PAGE
Lloh311:
add x19, x19, l_.str.23@PAGEOFF
mov x1, x19
bl _strcmp
cbz w0, LBB10_3
; %bb.2:
Lloh312:
adrp x8, ___stderrp@GOTPAGE
Lloh313:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh314:
ldr x3, [x8]
Lloh315:
adrp x0, l_.str.46@PAGE
Lloh316:
add x0, x0, l_.str.46@PAGEOFF
mov w1, #32
mov w2, #1
bl _fwrite
LBB10_3:
bl _zlibCompileFlags
mov w8, #4784
stp x8, x0, [sp, #8]
str x19, [sp]
Lloh317:
adrp x0, l_.str.47@PAGE
Lloh318:
add x0, x0, l_.str.47@PAGEOFF
bl _printf
mov w0, #40000
mov w1, #1
bl _calloc
mov x19, x0
mov w0, #40000
mov w1, #1
bl _calloc
cbz x19, LBB10_10
; %bb.4:
mov x20, x0
cbz x0, LBB10_10
; %bb.5:
mov x0, x19
mov w1, #40000
mov x2, x20
mov w3, #40000
bl _test_compress
cmp w22, #2
b.lt LBB10_7
; %bb.6:
ldr x0, [x21, #8]
b LBB10_8
LBB10_7:
Lloh319:
adrp x0, l_.str.49@PAGE
Lloh320:
add x0, x0, l_.str.49@PAGEOFF
LBB10_8:
mov x1, x20
mov w2, #40000
bl _test_gzio
mov x0, x19
mov w1, #40000
bl _test_deflate
mov x0, x19
mov w1, #40000
mov x2, x20
mov w3, #40000
bl _test_inflate
mov x0, x19
mov w1, #40000
mov x2, x20
mov w3, #40000
bl _test_large_deflate
mov x0, x19
mov w1, #40000
mov x2, x20
mov w3, #40000
bl _test_large_inflate
add x1, sp, #24
mov x0, x19
bl _test_flush
ldr x1, [sp, #24]
mov x0, x19
mov x2, x20
mov w3, #40000
bl _test_sync
mov x0, x19
mov w1, #40000
bl _test_dict_deflate
mov x0, x19
mov w1, #40000
mov x2, x20
mov w3, #40000
bl _test_dict_inflate
mov x0, x19
bl _free
mov x0, x20
bl _free
mov w0, #0
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #80
ret
LBB10_9:
Lloh321:
adrp x8, ___stderrp@GOTPAGE
Lloh322:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh323:
ldr x3, [x8]
Lloh324:
adrp x0, l_.str.45@PAGE
Lloh325:
add x0, x0, l_.str.45@PAGEOFF
mov w1, #26
mov w2, #1
bl _fwrite
mov w0, #1
bl _exit
LBB10_10:
Lloh326:
adrp x0, l_str.50@PAGE
Lloh327:
add x0, x0, l_str.50@PAGEOFF
bl _puts
mov w0, #1
bl _exit
.loh AdrpAdd Lloh310, Lloh311
.loh AdrpAdd Lloh315, Lloh316
.loh AdrpLdrGotLdr Lloh312, Lloh313, Lloh314
.loh AdrpAdd Lloh317, Lloh318
.loh AdrpAdd Lloh319, Lloh320
.loh AdrpAdd Lloh324, Lloh325
.loh AdrpLdrGotLdr Lloh321, Lloh322, Lloh323
.loh AdrpAdd Lloh326, Lloh327
.cfi_endproc
; -- End function
.section __TEXT,__const
.globl _hello ; @hello
_hello:
.asciz "hello, hello!"
.globl _dictionary ; @dictionary
_dictionary:
.asciz "hello"
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "%s error: %d\n"
l_.str.1: ; @.str.1
.asciz "compress"
l_.str.3: ; @.str.3
.asciz "uncompress"
l_.str.4: ; @.str.4
.asciz "bad uncompress\n"
l_.str.5: ; @.str.5
.asciz "uncompress(): %s\n"
l_.str.6: ; @.str.6
.asciz "wb"
l_.str.7: ; @.str.7
.asciz "gzopen error\n"
l_.str.8: ; @.str.8
.asciz "ello"
l_.str.9: ; @.str.9
.asciz "gzputs err: %s\n"
l_.str.10: ; @.str.10
.asciz ", %s!"
l_.str.12: ; @.str.12
.asciz "gzprintf err: %s\n"
l_.str.13: ; @.str.13
.asciz "rb"
l_.str.14: ; @.str.14
.asciz "gzread err: %s\n"
l_.str.15: ; @.str.15
.asciz "bad gzread: %s\n"
l_.str.16: ; @.str.16
.asciz "gzread(): %s\n"
l_.str.17: ; @.str.17
.asciz "gzseek error, pos=%ld, gztell=%ld\n"
l_.str.18: ; @.str.18
.asciz "gzgetc error\n"
l_.str.19: ; @.str.19
.asciz "gzungetc error\n"
l_.str.20: ; @.str.20
.asciz "gzgets err after gzseek: %s\n"
l_.str.21: ; @.str.21
.asciz "bad gzgets after gzseek\n"
l_.str.22: ; @.str.22
.asciz "gzgets() after gzseek: %s\n"
l_.str.23: ; @.str.23
.asciz "1.2.11"
l_.str.24: ; @.str.24
.asciz "deflateInit"
l_.str.25: ; @.str.25
.asciz "deflate"
l_.str.26: ; @.str.26
.asciz "deflateEnd"
l_.str.27: ; @.str.27
.asciz "inflateInit"
l_.str.28: ; @.str.28
.asciz "inflate"
l_.str.29: ; @.str.29
.asciz "inflateEnd"
l_.str.30: ; @.str.30
.asciz "bad inflate\n"
l_.str.31: ; @.str.31
.asciz "inflate(): %s\n"
l_.str.32: ; @.str.32
.asciz "deflate not greedy\n"
l_.str.33: ; @.str.33
.asciz "deflate should report Z_STREAM_END\n"
l_.str.34: ; @.str.34
.asciz "large inflate"
l_.str.35: ; @.str.35
.asciz "bad large inflate: %ld\n"
l_.str.37: ; @.str.37
.asciz "inflateSync"
l_.str.38: ; @.str.38
.asciz "inflate should report DATA_ERROR\n"
l_.str.39: ; @.str.39
.asciz "after inflateSync(): hel%s\n"
l_.str.40: ; @.str.40
.asciz "deflateSetDictionary"
.comm _dictId,8,3 ; @dictId
l_.str.41: ; @.str.41
.asciz "unexpected dictionary"
l_.str.42: ; @.str.42
.asciz "inflate with dict"
l_.str.43: ; @.str.43
.asciz "bad inflate with dict\n"
l_.str.44: ; @.str.44
.asciz "inflate with dictionary: %s\n"
l_.str.45: ; @.str.45
.asciz "incompatible zlib version\n"
l_.str.46: ; @.str.46
.asciz "warning: different zlib version\n"
l_.str.47: ; @.str.47
.asciz "zlib version %s = 0x%04x, compile flags = 0x%lx\n"
l_.str.49: ; @.str.49
.asciz "foo.gz"
l_str: ; @str
.asciz "large_inflate(): OK"
l_str.50: ; @str.50
.asciz "out of memory"
.subsections_via_symbols
| the_stack_data/122016113.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function a800_rc_query
_a800_rc_query: ## @a800_rc_query
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r15
movq _GFP_KERNEL@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl $5, %edi
callq _kmalloc
testq %rax, %rax
je LBB0_1
## %bb.2:
movq %rax, %r14
movl 4(%r15), %ebx
xorl %r12d, %r12d
movl %ebx, %edi
xorl %esi, %esi
callq _usb_rcvctrlpipe
movq _USB_TYPE_VENDOR@GOTPCREL(%rip), %rdx
movq _USB_DIR_IN@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
orl (%rdx), %ecx
subq $8, %rsp
movl %ebx, %edi
movl %eax, %esi
movl $4, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
pushq $2000 ## imm = 0x7D0
pushq $5
pushq %r14
callq _usb_control_msg
addq $32, %rsp
cmpl $5, %eax
jne LBB0_3
## %bb.4:
movl (%r14), %eax
cmpl $2, %eax
je LBB0_7
## %bb.5:
cmpl $1, %eax
jne LBB0_8
## %bb.6:
movl (%r15), %r15d
movq _RC_PROTO_NEC@GOTPCREL(%rip), %rax
movl (%rax), %ebx
movl 4(%r14), %edi
movl 12(%r14), %esi
callq _RC_SCANCODE_NEC
xorl %r12d, %r12d
movl %r15d, %edi
movl %ebx, %esi
movl %eax, %edx
xorl %ecx, %ecx
callq _rc_keydown
jmp LBB0_8
LBB0_1:
movq _ENOMEM@GOTPCREL(%rip), %rax
xorl %r12d, %r12d
subl (%rax), %r12d
jmp LBB0_9
LBB0_3:
movq _ENODEV@GOTPCREL(%rip), %rax
subl (%rax), %r12d
jmp LBB0_8
LBB0_7:
movl (%r15), %edi
callq _rc_repeat
LBB0_8:
movq %r14, %rdi
callq _kfree
LBB0_9:
movl %r12d, %eax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _GFP_KERNEL,4,2 ## @GFP_KERNEL
.comm _ENOMEM,4,2 ## @ENOMEM
.comm _USB_TYPE_VENDOR,4,2 ## @USB_TYPE_VENDOR
.comm _USB_DIR_IN,4,2 ## @USB_DIR_IN
.comm _ENODEV,4,2 ## @ENODEV
.comm _RC_PROTO_NEC,4,2 ## @RC_PROTO_NEC
.no_dead_strip _a800_rc_query
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function a800_rc_query
_a800_rc_query: ; @a800_rc_query
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x0
Lloh0:
adrp x8, _GFP_KERNEL@GOTPAGE
Lloh1:
ldr x8, [x8, _GFP_KERNEL@GOTPAGEOFF]
Lloh2:
ldr w1, [x8]
mov w0, #5
bl _kmalloc
cbz x0, LBB0_5
; %bb.1:
mov x19, x0
ldr w21, [x20, #4]
mov x0, x21
mov w1, #0
bl _usb_rcvctrlpipe
mov x1, x0
Lloh3:
adrp x8, _USB_TYPE_VENDOR@GOTPAGE
Lloh4:
ldr x8, [x8, _USB_TYPE_VENDOR@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
Lloh6:
adrp x9, _USB_DIR_IN@GOTPAGE
Lloh7:
ldr x9, [x9, _USB_DIR_IN@GOTPAGEOFF]
Lloh8:
ldr w9, [x9]
orr w3, w9, w8
mov w8, #2000
str w8, [sp]
mov x0, x21
mov w2, #4
mov w4, #0
mov w5, #0
mov x6, x19
mov w7, #5
bl _usb_control_msg
cmp w0, #5
b.ne LBB0_6
; %bb.2:
ldr w8, [x19]
cmp w8, #2
b.eq LBB0_7
; %bb.3:
cmp w8, #1
b.ne LBB0_8
; %bb.4:
ldr w20, [x20]
Lloh9:
adrp x8, _RC_PROTO_NEC@GOTPAGE
Lloh10:
ldr x8, [x8, _RC_PROTO_NEC@GOTPAGEOFF]
Lloh11:
ldr w21, [x8]
ldr w0, [x19, #4]
ldr w1, [x19, #12]
bl _RC_SCANCODE_NEC
mov x2, x0
mov x0, x20
mov x1, x21
mov w3, #0
bl _rc_keydown
b LBB0_8
LBB0_5:
Lloh12:
adrp x8, _ENOMEM@GOTPAGE
Lloh13:
ldr x8, [x8, _ENOMEM@GOTPAGEOFF]
Lloh14:
ldr w8, [x8]
neg w20, w8
b LBB0_10
LBB0_6:
Lloh15:
adrp x8, _ENODEV@GOTPAGE
Lloh16:
ldr x8, [x8, _ENODEV@GOTPAGEOFF]
Lloh17:
ldr w8, [x8]
neg w20, w8
b LBB0_9
LBB0_7:
ldr w0, [x20]
bl _rc_repeat
LBB0_8:
mov w20, #0
LBB0_9:
mov x0, x19
bl _kfree
LBB0_10:
mov x0, x20
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh15, Lloh16, Lloh17
.cfi_endproc
; -- End function
.comm _GFP_KERNEL,4,2 ; @GFP_KERNEL
.comm _ENOMEM,4,2 ; @ENOMEM
.comm _USB_TYPE_VENDOR,4,2 ; @USB_TYPE_VENDOR
.comm _USB_DIR_IN,4,2 ; @USB_DIR_IN
.comm _ENODEV,4,2 ; @ENODEV
.comm _RC_PROTO_NEC,4,2 ; @RC_PROTO_NEC
.no_dead_strip _a800_rc_query
.subsections_via_symbols
| AnghaBench/linux/drivers/media/usb/dvb-usb/extr_a800.c_a800_rc_query.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function da9034_touch_open
_da9034_touch_open: ## @da9034_touch_open
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
callq _input_get_drvdata
movq %rax, %rbx
movl 4(%rax), %edi
leaq 8(%rax), %rsi
movq _DA9034_EVENT_PEN_DOWN@GOTPCREL(%rip), %rax
movq _DA9034_EVENT_TSI_READY@GOTPCREL(%rip), %rcx
movl (%rcx), %edx
orl (%rax), %edx
callq _da903x_register_notifier
testl %eax, %eax
je LBB0_2
## %bb.1:
movq _EBUSY@GOTPCREL(%rip), %rcx
xorl %eax, %eax
subl (%rcx), %eax
jmp LBB0_6
LBB0_2:
movl 4(%rbx), %edi
movq _DA9034_MANUAL_CTRL@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _DA9034_LDO_ADC_EN@GOTPCREL(%rip), %rax
movl (%rax), %edx
callq _da903x_set_bits
testl %eax, %eax
jne LBB0_6
## %bb.3:
movl 4(%rbx), %edi
movq _DA9034_TSI_CTRL1@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl $27, %edx
callq _da903x_write
testl %eax, %eax
jne LBB0_6
## %bb.4:
movl 4(%rbx), %edi
movq _DA9034_TSI_CTRL2@GOTPCREL(%rip), %rax
movl (%rax), %esi
xorl %edx, %edx
callq _da903x_write
testl %eax, %eax
jne LBB0_6
## %bb.5:
movq _STATE_IDLE@GOTPCREL(%rip), %rax
movl (%rax), %eax
movl %eax, (%rbx)
movq %rbx, %rdi
movl $1, %esi
callq _detect_pen_down
xorl %eax, %eax
LBB0_6:
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _DA9034_EVENT_PEN_DOWN,4,2 ## @DA9034_EVENT_PEN_DOWN
.comm _DA9034_EVENT_TSI_READY,4,2 ## @DA9034_EVENT_TSI_READY
.comm _EBUSY,4,2 ## @EBUSY
.comm _DA9034_MANUAL_CTRL,4,2 ## @DA9034_MANUAL_CTRL
.comm _DA9034_LDO_ADC_EN,4,2 ## @DA9034_LDO_ADC_EN
.comm _DA9034_TSI_CTRL1,4,2 ## @DA9034_TSI_CTRL1
.comm _DA9034_TSI_CTRL2,4,2 ## @DA9034_TSI_CTRL2
.comm _STATE_IDLE,4,2 ## @STATE_IDLE
.no_dead_strip _da9034_touch_open
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function da9034_touch_open
_da9034_touch_open: ; @da9034_touch_open
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
bl _input_get_drvdata
mov x19, x0
ldr w0, [x0, #4]
Lloh0:
adrp x8, _DA9034_EVENT_PEN_DOWN@GOTPAGE
Lloh1:
ldr x8, [x8, _DA9034_EVENT_PEN_DOWN@GOTPAGEOFF]
add x1, x19, #8
Lloh2:
ldr w8, [x8]
Lloh3:
adrp x9, _DA9034_EVENT_TSI_READY@GOTPAGE
Lloh4:
ldr x9, [x9, _DA9034_EVENT_TSI_READY@GOTPAGEOFF]
Lloh5:
ldr w9, [x9]
orr w2, w9, w8
bl _da903x_register_notifier
cbz w0, LBB0_2
; %bb.1:
Lloh6:
adrp x8, _EBUSY@GOTPAGE
Lloh7:
ldr x8, [x8, _EBUSY@GOTPAGEOFF]
Lloh8:
ldr w8, [x8]
neg w0, w8
b LBB0_6
LBB0_2:
ldr w0, [x19, #4]
Lloh9:
adrp x8, _DA9034_MANUAL_CTRL@GOTPAGE
Lloh10:
ldr x8, [x8, _DA9034_MANUAL_CTRL@GOTPAGEOFF]
Lloh11:
ldr w1, [x8]
Lloh12:
adrp x8, _DA9034_LDO_ADC_EN@GOTPAGE
Lloh13:
ldr x8, [x8, _DA9034_LDO_ADC_EN@GOTPAGEOFF]
Lloh14:
ldr w2, [x8]
bl _da903x_set_bits
cbnz w0, LBB0_6
; %bb.3:
ldr w0, [x19, #4]
Lloh15:
adrp x8, _DA9034_TSI_CTRL1@GOTPAGE
Lloh16:
ldr x8, [x8, _DA9034_TSI_CTRL1@GOTPAGEOFF]
Lloh17:
ldr w1, [x8]
mov w2, #27
bl _da903x_write
cbnz w0, LBB0_6
; %bb.4:
ldr w0, [x19, #4]
Lloh18:
adrp x8, _DA9034_TSI_CTRL2@GOTPAGE
Lloh19:
ldr x8, [x8, _DA9034_TSI_CTRL2@GOTPAGEOFF]
Lloh20:
ldr w1, [x8]
mov w2, #0
bl _da903x_write
cbnz w0, LBB0_6
; %bb.5:
Lloh21:
adrp x8, _STATE_IDLE@GOTPAGE
Lloh22:
ldr x8, [x8, _STATE_IDLE@GOTPAGEOFF]
Lloh23:
ldr w8, [x8]
str w8, [x19]
mov x0, x19
mov w1, #1
bl _detect_pen_down
mov w0, #0
LBB0_6:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh15, Lloh16, Lloh17
.loh AdrpLdrGotLdr Lloh18, Lloh19, Lloh20
.loh AdrpLdrGotLdr Lloh21, Lloh22, Lloh23
.cfi_endproc
; -- End function
.comm _DA9034_EVENT_PEN_DOWN,4,2 ; @DA9034_EVENT_PEN_DOWN
.comm _DA9034_EVENT_TSI_READY,4,2 ; @DA9034_EVENT_TSI_READY
.comm _EBUSY,4,2 ; @EBUSY
.comm _DA9034_MANUAL_CTRL,4,2 ; @DA9034_MANUAL_CTRL
.comm _DA9034_LDO_ADC_EN,4,2 ; @DA9034_LDO_ADC_EN
.comm _DA9034_TSI_CTRL1,4,2 ; @DA9034_TSI_CTRL1
.comm _DA9034_TSI_CTRL2,4,2 ; @DA9034_TSI_CTRL2
.comm _STATE_IDLE,4,2 ; @STATE_IDLE
.no_dead_strip _da9034_touch_open
.subsections_via_symbols
| AnghaBench/linux/drivers/input/touchscreen/extr_da9034-ts.c_da9034_touch_open.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function emac_adjust_link
_emac_adjust_link: ## @emac_adjust_link
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
callq _netdev_priv
movq %rax, %r15
movq (%rbx), %r14
cmpq $0, (%r14)
je LBB0_2
## %bb.1:
movq %r15, %rdi
callq _emac_mac_start
movq %r15, %rdi
movl $1, %esi
callq _emac_sgmii_link_change
jmp LBB0_3
LBB0_2:
movq %r15, %rdi
xorl %esi, %esi
callq _emac_sgmii_link_change
movq %r15, %rdi
callq _emac_mac_stop
LBB0_3:
movq %r14, %rdi
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _phy_print_status ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _emac_adjust_link
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function emac_adjust_link
_emac_adjust_link: ; @emac_adjust_link
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
bl _netdev_priv
mov x20, x0
ldr x19, [x19]
ldr x8, [x19]
cbz x8, LBB0_2
; %bb.1:
mov x0, x20
bl _emac_mac_start
mov x0, x20
mov w1, #1
bl _emac_sgmii_link_change
b LBB0_3
LBB0_2:
mov x0, x20
mov w1, #0
bl _emac_sgmii_link_change
mov x0, x20
bl _emac_mac_stop
LBB0_3:
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _phy_print_status
.cfi_endproc
; -- End function
.no_dead_strip _emac_adjust_link
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/qualcomm/emac/extr_emac-mac.c_emac_adjust_link.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
callq _getchar
cmpl $-1, %eax
je LBB0_1
## %bb.2:
xorl %r13d, %r13d
movabsq $4294968832, %r12 ## imm = 0x100000600
xorl %r15d, %r15d
xorl %r14d, %r14d
xorl %ebx, %ebx
jmp LBB0_3
.p2align 4, 0x90
LBB0_6: ## in Loop: Header=BB0_3 Depth=1
cmpl $1, %r13d
adcl $0, %r14d
movl $1, %r13d
LBB0_7: ## in Loop: Header=BB0_3 Depth=1
movb %dl, %cl
addl %ecx, %ebx
callq _getchar
incl %r15d
cmpl $-1, %eax
je LBB0_8
LBB0_3: ## =>This Inner Loop Header: Depth=1
xorl %ecx, %ecx
cmpl $10, %eax
sete %dl
cmpl $32, %eax
ja LBB0_6
## %bb.4: ## in Loop: Header=BB0_3 Depth=1
movl %eax, %eax
btq %rax, %r12
jae LBB0_6
## %bb.5: ## in Loop: Header=BB0_3 Depth=1
xorl %r13d, %r13d
jmp LBB0_7
LBB0_1:
xorl %ebx, %ebx
xorl %r14d, %r14d
xorl %r15d, %r15d
LBB0_8:
leaq L_.str(%rip), %rdi
movl %ebx, %esi
movl %r14d, %edx
movl %r15d, %ecx
xorl %eax, %eax
callq _printf
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%d %d %d\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
bl _getchar
cmn w0, #1
b.eq LBB0_7
; %bb.1:
mov w24, #0
mov w21, #0
mov w20, #0
mov w19, #0
mov w22, #1
mov x23, #1536
movk x23, #1, lsl #32
b LBB0_4
LBB0_2: ; in Loop: Header=BB0_4 Depth=1
cmp w24, #0
cinc w20, w20, eq
mov w24, #1
LBB0_3: ; in Loop: Header=BB0_4 Depth=1
bl _getchar
add w21, w21, #1
cmn w0, #1
b.eq LBB0_8
LBB0_4: ; =>This Inner Loop Header: Depth=1
cmp w0, #10
cinc w19, w19, eq
cmp w0, #32
b.hi LBB0_2
; %bb.5: ; in Loop: Header=BB0_4 Depth=1
mov w8, w0
lsl x8, x22, x8
tst x8, x23
b.eq LBB0_2
; %bb.6: ; in Loop: Header=BB0_4 Depth=1
mov w24, #0
b LBB0_3
LBB0_7:
mov w19, #0
mov w20, #0
mov w21, #0
LBB0_8:
stp x20, x21, [sp, #8]
str x19, [sp]
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _printf
mov w0, #0
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #96
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "%d %d %d\n"
.subsections_via_symbols
| the_stack_data/67325594.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _kvmppc_mmu_hpte_cache_free ## -- Begin function kvmppc_mmu_hpte_cache_free
.p2align 4, 0x90
_kvmppc_mmu_hpte_cache_free: ## @kvmppc_mmu_hpte_cache_free
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rdi, %rsi
movq _hpte_cache@GOTPCREL(%rip), %rax
movl (%rax), %edi
popq %rbp
jmp _kmem_cache_free ## TAILCALL
.cfi_endproc
## -- End function
.comm _hpte_cache,4,2 ## @hpte_cache
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _kvmppc_mmu_hpte_cache_free ; -- Begin function kvmppc_mmu_hpte_cache_free
.p2align 2
_kvmppc_mmu_hpte_cache_free: ; @kvmppc_mmu_hpte_cache_free
.cfi_startproc
; %bb.0:
mov x1, x0
Lloh0:
adrp x8, _hpte_cache@GOTPAGE
Lloh1:
ldr x8, [x8, _hpte_cache@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
b _kmem_cache_free
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _hpte_cache,4,2 ; @hpte_cache
.subsections_via_symbols
| AnghaBench/linux/arch/powerpc/kvm/extr_book3s_mmu_hpte.c_kvmppc_mmu_hpte_cache_free.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ar8327_atu_flush_port
_ar8327_atu_flush_port: ## @ar8327_atu_flush_port
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %ebx
movq %rdi, %r14
movq _AR8327_REG_ATU_FUNC@GOTPCREL(%rip), %r12
movl (%r12), %esi
movq _AR8327_ATU_FUNC_BUSY@GOTPCREL(%rip), %r13
movl (%r13), %edx
xorl %ecx, %ecx
callq _ar8216_wait_bit
movl %eax, %r15d
testl %eax, %eax
jne LBB0_2
## %bb.1:
movq _AR8327_ATU_PORT_NUM_S@GOTPCREL(%rip), %rax
movb (%rax), %cl
shll %cl, %ebx
movq _AR8327_ATU_FUNC_OP_FLUSH_PORT@GOTPCREL(%rip), %rax
orl (%rax), %ebx
orl (%r13), %ebx
movl (%r12), %esi
movq %r14, %rdi
movl %ebx, %edx
callq _ar8xxx_write
LBB0_2:
movl %r15d, %eax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _AR8327_REG_ATU_FUNC,4,2 ## @AR8327_REG_ATU_FUNC
.comm _AR8327_ATU_FUNC_BUSY,4,2 ## @AR8327_ATU_FUNC_BUSY
.comm _AR8327_ATU_PORT_NUM_S,4,2 ## @AR8327_ATU_PORT_NUM_S
.comm _AR8327_ATU_FUNC_OP_FLUSH_PORT,4,2 ## @AR8327_ATU_FUNC_OP_FLUSH_PORT
.no_dead_strip _ar8327_atu_flush_port
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ar8327_atu_flush_port
_ar8327_atu_flush_port: ; @ar8327_atu_flush_port
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x20, x1
mov x19, x0
Lloh0:
adrp x22, _AR8327_REG_ATU_FUNC@GOTPAGE
Lloh1:
ldr x22, [x22, _AR8327_REG_ATU_FUNC@GOTPAGEOFF]
ldr w1, [x22]
Lloh2:
adrp x23, _AR8327_ATU_FUNC_BUSY@GOTPAGE
Lloh3:
ldr x23, [x23, _AR8327_ATU_FUNC_BUSY@GOTPAGEOFF]
ldr w2, [x23]
mov w3, #0
bl _ar8216_wait_bit
mov x21, x0
cbnz w0, LBB0_2
; %bb.1:
Lloh4:
adrp x8, _AR8327_ATU_PORT_NUM_S@GOTPAGE
Lloh5:
ldr x8, [x8, _AR8327_ATU_PORT_NUM_S@GOTPAGEOFF]
Lloh6:
ldr w8, [x8]
Lloh7:
adrp x9, _AR8327_ATU_FUNC_OP_FLUSH_PORT@GOTPAGE
Lloh8:
ldr x9, [x9, _AR8327_ATU_FUNC_OP_FLUSH_PORT@GOTPAGEOFF]
lsl w8, w20, w8
Lloh9:
ldr w9, [x9]
orr w8, w8, w9
ldr w9, [x23]
orr w2, w8, w9
ldr w1, [x22]
mov x0, x19
bl _ar8xxx_write
LBB0_2:
mov x0, x21
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh7, Lloh8, Lloh9
.loh AdrpLdrGotLdr Lloh4, Lloh5, Lloh6
.cfi_endproc
; -- End function
.comm _AR8327_REG_ATU_FUNC,4,2 ; @AR8327_REG_ATU_FUNC
.comm _AR8327_ATU_FUNC_BUSY,4,2 ; @AR8327_ATU_FUNC_BUSY
.comm _AR8327_ATU_PORT_NUM_S,4,2 ; @AR8327_ATU_PORT_NUM_S
.comm _AR8327_ATU_FUNC_OP_FLUSH_PORT,4,2 ; @AR8327_ATU_FUNC_OP_FLUSH_PORT
.no_dead_strip _ar8327_atu_flush_port
.subsections_via_symbols
| AnghaBench/openwrt/target/linux/generic/files/drivers/net/phy/extr_ar8327.c_ar8327_atu_flush_port.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _usage ## -- Begin function usage
.p2align 4, 0x90
_usage: ## @usage
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _stderr@GOTPCREL(%rip), %rax
movl (%rax), %edi
leaq L_.str(%rip), %rsi
callq _fprintf
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.comm _stderr,4,2 ## @stderr
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "usage: dbtest [-l] [-f file] [-i info] [-o file] type script\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _usage ; -- Begin function usage
.p2align 2
_usage: ; @usage
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x8, _stderr@GOTPAGE
Lloh1:
ldr x8, [x8, _stderr@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
Lloh3:
adrp x1, l_.str@PAGE
Lloh4:
add x1, x1, l_.str@PAGEOFF
bl _fprintf
mov w0, #1
bl _exit
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _stderr,4,2 ; @stderr
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "usage: dbtest [-l] [-f file] [-i info] [-o file] type script\n"
.subsections_via_symbols
| AnghaBench/freebsd/lib/libc/db/test/extr_dbtest.c_usage.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _status_get_hashes_msec_all ## -- Begin function status_get_hashes_msec_all
.p2align 4, 0x90
_status_get_hashes_msec_all: ## @status_get_hashes_msec_all
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq (%rdi), %r15
cmpl $0, (%r15)
jle LBB0_1
## %bb.3:
movq %rdi, %r14
xorps %xmm0, %xmm0
xorl %ebx, %ebx
.p2align 4, 0x90
LBB0_4: ## =>This Inner Loop Header: Depth=1
movsd %xmm0, -32(%rbp) ## 8-byte Spill
movq %r14, %rdi
movl %ebx, %esi
callq _status_get_hashes_msec_dev
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
movsd -32(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movsd %xmm1, -32(%rbp) ## 8-byte Spill
movsd -32(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
incl %ebx
cmpl (%r15), %ebx
jl LBB0_4
jmp LBB0_2
LBB0_1:
xorps %xmm0, %xmm0
LBB0_2:
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _status_get_hashes_msec_all ; -- Begin function status_get_hashes_msec_all
.p2align 2
_status_get_hashes_msec_all: ; @status_get_hashes_msec_all
.cfi_startproc
; %bb.0:
stp d9, d8, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset b8, -56
.cfi_offset b9, -64
ldr x21, [x0]
ldr w8, [x21]
cmp w8, #1
b.lt LBB0_3
; %bb.1:
mov x19, x0
mov w20, #0
movi d8, #0000000000000000
LBB0_2: ; =>This Inner Loop Header: Depth=1
mov x0, x19
mov x1, x20
bl _status_get_hashes_msec_dev
scvtf d0, x0
fadd d8, d8, d0
add w20, w20, #1
ldr w8, [x21]
cmp w20, w8
b.lt LBB0_2
b LBB0_4
LBB0_3:
movi d8, #0000000000000000
LBB0_4:
fmov d0, d8
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp d9, d8, [sp], #64 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/hashcat/src/extr_status.c_status_get_hashes_msec_all.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function refill_rspq
_refill_rspq: ## @refill_rspq
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r14d
movq %rsi, %rbx
movq %rdi, %r15
xorl %eax, %eax
callq _rmb
movq _A_SG_RSPQ_CREDIT_RETURN@GOTPCREL(%rip), %rax
movl (%rax), %r12d
movl (%rbx), %edi
callq _V_RSPQ
movl %eax, %ebx
movl %r14d, %edi
callq _V_CREDITS
orl %eax, %ebx
movq %r15, %rdi
movl %r12d, %esi
movl %ebx, %edx
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp _t3_write_reg ## TAILCALL
.cfi_endproc
## -- End function
.comm _A_SG_RSPQ_CREDIT_RETURN,4,2 ## @A_SG_RSPQ_CREDIT_RETURN
.no_dead_strip _refill_rspq
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function refill_rspq
_refill_rspq: ; @refill_rspq
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x2
mov x20, x1
mov x21, x0
bl _rmb
Lloh0:
adrp x8, _A_SG_RSPQ_CREDIT_RETURN@GOTPAGE
Lloh1:
ldr x8, [x8, _A_SG_RSPQ_CREDIT_RETURN@GOTPAGEOFF]
Lloh2:
ldr w22, [x8]
ldr w0, [x20]
bl _V_RSPQ
mov x20, x0
mov x0, x19
bl _V_CREDITS
orr w2, w0, w20
mov x0, x21
mov x1, x22
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _t3_write_reg
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _A_SG_RSPQ_CREDIT_RETURN,4,2 ; @A_SG_RSPQ_CREDIT_RETURN
.no_dead_strip _refill_rspq
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/chelsio/cxgb3/extr_sge.c_refill_rspq.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ns_nprint ## -- Begin function ns_nprint
.p2align 4, 0x90
_ns_nprint: ## @ns_nprint
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r12
movq %rsi, %rbx
movq %rdi, %r15
callq _labellen
cmpq $-1, %rax
je LBB0_18
## %bb.1:
movq %rax, %r14
movq (%rbx), %rdi
movl $1, %esi
callq _ND_TTEST2
testl %eax, %eax
je LBB0_18
## %bb.2:
movq (%rbx), %r13
movq _INDIR_MASK@GOTPCREL(%rip), %rcx
movq %r13, %rax
notq %rax
andq (%rcx), %rax
movq %rbx, %rcx
leaq 8(%rbx,%r14,8), %rbx
cmoveq %rax, %rbx
testq %r13, %r13
je LBB0_20
## %bb.3:
movq %r12, %rax
leaq 8(%rcx), %r12
movq %rax, -64(%rbp) ## 8-byte Spill
subq %rax, %rcx
sarq $3, %rcx
movq %rcx, -56(%rbp) ## 8-byte Spill
movb $1, %al
movl %eax, -44(%rbp) ## 4-byte Spill
.p2align 4, 0x90
LBB0_4: ## =>This Inner Loop Header: Depth=1
movq (%r15), %rcx
cmpq %rcx, %r12
jae LBB0_22
## %bb.5: ## in Loop: Header=BB0_4 Depth=1
movq _INDIR_MASK@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, %rdx
andq %r13, %rdx
cmpq %rax, %rdx
je LBB0_13
## %bb.6: ## in Loop: Header=BB0_4 Depth=1
movq _EDNS0_MASK@GOTPCREL(%rip), %rsi
cmpq (%rsi), %rdx
jne LBB0_9
## %bb.7: ## in Loop: Header=BB0_4 Depth=1
notq %rax
andq %rax, %r13
cmpl $128, %r13d
jne LBB0_25
## %bb.8: ## in Loop: Header=BB0_4 Depth=1
movq %r15, %rdi
movq %r12, %rsi
callq _blabel_print
testq %rax, %rax
jne LBB0_10
jmp LBB0_18
.p2align 4, 0x90
LBB0_9: ## in Loop: Header=BB0_4 Depth=1
movq %r15, %rdi
movq %r12, %rsi
movq %r14, %rdx
callq _fn_printn
testq %rax, %rax
jne LBB0_18
LBB0_10: ## in Loop: Header=BB0_4 Depth=1
leaq (%r12,%r14,8), %r12
leaq L_.str.2(%rip), %rdi
callq _ND_PRINT
movq %r15, %rdi
movq %r12, %rsi
callq _labellen
cmpq $-1, %rax
je LBB0_18
## %bb.11: ## in Loop: Header=BB0_4 Depth=1
movq %rax, %r14
movq (%r12), %rdi
movl $1, %esi
callq _ND_TTEST2
testl %eax, %eax
je LBB0_18
## %bb.12: ## in Loop: Header=BB0_4 Depth=1
movq (%r12), %r13
addq $8, %r12
movq %r14, %rax
incq %rax
testb $1, -44(%rbp) ## 1-byte Folded Reload
movl $0, %ecx
cmoveq %rcx, %rax
leaq (%rbx,%rax,8), %rbx
testq %r13, %r13
jne LBB0_4
jmp LBB0_22
LBB0_13: ## in Loop: Header=BB0_4 Depth=1
leaq 8(%r12), %rax
testb $1, -44(%rbp) ## 1-byte Folded Reload
cmovneq %rax, %rbx
movq (%r12), %rdi
movl $1, %esi
callq _ND_TTEST2
testl %eax, %eax
je LBB0_18
## %bb.14: ## in Loop: Header=BB0_4 Depth=1
shll $8, %r13d
orl (%r12), %r13d
andl $16383, %r13d ## imm = 0x3FFF
cmpq -56(%rbp), %r13 ## 8-byte Folded Reload
jge LBB0_29
## %bb.15: ## in Loop: Header=BB0_4 Depth=1
movq -64(%rbp), %rax ## 8-byte Reload
leaq (%rax,%r13,8), %r12
movq %r15, %rdi
movq %r12, %rsi
callq _labellen
cmpq $-1, %rax
je LBB0_18
## %bb.16: ## in Loop: Header=BB0_4 Depth=1
movq %rax, %r14
movq (%r12), %rdi
movl $1, %esi
callq _ND_TTEST2
testl %eax, %eax
je LBB0_18
## %bb.17: ## in Loop: Header=BB0_4 Depth=1
movq %r13, -56(%rbp) ## 8-byte Spill
movq (%r12), %rax
addq $8, %r12
movl $0, -44(%rbp) ## 4-byte Folded Spill
movq %rax, %r13
testq %rax, %rax
jne LBB0_4
jmp LBB0_22
LBB0_18:
xorl %ebx, %ebx
LBB0_22:
movq %rbx, %rax
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_20:
leaq L_.str.2(%rip), %rdi
LBB0_21:
callq _ND_PRINT
jmp LBB0_22
LBB0_25:
movslq %r13d, %rdi
xorl %ebx, %ebx
jmp LBB0_21
LBB0_29:
xorl %ebx, %ebx
leaq L_.str(%rip), %rdi
jmp LBB0_21
.cfi_endproc
## -- End function
.comm _INDIR_MASK,8,3 ## @INDIR_MASK
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "<BAD PTR>"
.comm _EDNS0_MASK,8,3 ## @EDNS0_MASK
L_.str.2: ## @.str.2
.asciz "."
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ns_nprint ; -- Begin function ns_nprint
.p2align 2
_ns_nprint: ; @ns_nprint
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x28, x27, [sp, #16] ; 16-byte Folded Spill
stp x26, x25, [sp, #32] ; 16-byte Folded Spill
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x20, x2
mov x22, x1
mov x21, x0
bl _labellen
cmn x0, #1
b.eq LBB0_18
; %bb.1:
mov x25, x0
ldr x0, [x22]
mov w1, #1
bl _ND_TTEST2
cbz w0, LBB0_18
; %bb.2:
mov x23, x22
ldr x24, [x23], #8
Lloh0:
adrp x26, _INDIR_MASK@GOTPAGE
Lloh1:
ldr x26, [x26, _INDIR_MASK@GOTPAGEOFF]
ldr x8, [x26]
add x9, x23, x25, lsl #3
bics xzr, x8, x24
csel x19, xzr, x9, eq
cbz x24, LBB0_20
; %bb.3:
sub x8, x22, x20
asr x8, x8, #3
str x8, [sp, #8] ; 8-byte Folded Spill
mov w28, #1
Lloh2:
adrp x27, _EDNS0_MASK@GOTPAGE
Lloh3:
ldr x27, [x27, _EDNS0_MASK@GOTPAGEOFF]
Lloh4:
adrp x22, l_.str.2@PAGE
Lloh5:
add x22, x22, l_.str.2@PAGEOFF
LBB0_4: ; =>This Inner Loop Header: Depth=1
ldr x3, [x21]
cmp x23, x3
b.hs LBB0_19
; %bb.5: ; in Loop: Header=BB0_4 Depth=1
ldr x8, [x26]
and x9, x8, x24
cmp x9, x8
b.eq LBB0_13
; %bb.6: ; in Loop: Header=BB0_4 Depth=1
ldr x10, [x27]
cmp x9, x10
b.ne LBB0_9
; %bb.7: ; in Loop: Header=BB0_4 Depth=1
bic x8, x24, x8
cmp w8, #128
b.ne LBB0_22
; %bb.8: ; in Loop: Header=BB0_4 Depth=1
mov x0, x21
mov x1, x23
bl _blabel_print
cbnz x0, LBB0_10
b LBB0_18
LBB0_9: ; in Loop: Header=BB0_4 Depth=1
mov x0, x21
mov x1, x23
mov x2, x25
bl _fn_printn
cbnz x0, LBB0_18
LBB0_10: ; in Loop: Header=BB0_4 Depth=1
add x23, x23, x25, lsl #3
mov x0, x22
bl _ND_PRINT
mov x0, x21
mov x1, x23
bl _labellen
cmn x0, #1
b.eq LBB0_18
; %bb.11: ; in Loop: Header=BB0_4 Depth=1
mov x25, x0
ldr x0, [x23]
mov w1, #1
bl _ND_TTEST2
cbz w0, LBB0_18
; %bb.12: ; in Loop: Header=BB0_4 Depth=1
ldr x24, [x23], #8
tst w28, #0x1
csinc x8, xzr, x25, eq
add x19, x19, x8, lsl #3
cbnz x24, LBB0_4
b LBB0_19
LBB0_13: ; in Loop: Header=BB0_4 Depth=1
tst w28, #0x1
mov x8, x23
ldr x0, [x8], #8
csel x19, x8, x19, ne
mov w1, #1
bl _ND_TTEST2
cbz w0, LBB0_18
; %bb.14: ; in Loop: Header=BB0_4 Depth=1
ldr w8, [x23]
orr w8, w8, w24, lsl #8
and x22, x8, #0x3fff
ldr x8, [sp, #8] ; 8-byte Folded Reload
cmp x22, x8
b.ge LBB0_23
; %bb.15: ; in Loop: Header=BB0_4 Depth=1
add x23, x20, x22, lsl #3
mov x0, x21
mov x1, x23
bl _labellen
cmn x0, #1
b.eq LBB0_18
; %bb.16: ; in Loop: Header=BB0_4 Depth=1
mov x25, x0
ldr x0, [x23]
mov w1, #1
bl _ND_TTEST2
cbz w0, LBB0_18
; %bb.17: ; in Loop: Header=BB0_4 Depth=1
mov w28, #0
ldr x24, [x23], #8
str x22, [sp, #8] ; 8-byte Folded Spill
Lloh6:
adrp x22, l_.str.2@PAGE
Lloh7:
add x22, x22, l_.str.2@PAGEOFF
cbnz x24, LBB0_4
b LBB0_19
LBB0_18:
mov x19, #0
LBB0_19:
mov x0, x19
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
ldp x26, x25, [sp, #32] ; 16-byte Folded Reload
ldp x28, x27, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #112
ret
LBB0_20:
Lloh8:
adrp x0, l_.str.2@PAGE
Lloh9:
add x0, x0, l_.str.2@PAGEOFF
LBB0_21:
bl _ND_PRINT
b LBB0_19
LBB0_22:
mov x19, #0
sxtw x0, w8
b LBB0_21
LBB0_23:
mov x19, #0
Lloh10:
adrp x0, l_.str@PAGE
Lloh11:
add x0, x0, l_.str@PAGEOFF
b LBB0_21
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpAdd Lloh6, Lloh7
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpAdd Lloh10, Lloh11
.cfi_endproc
; -- End function
.comm _INDIR_MASK,8,3 ; @INDIR_MASK
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "<BAD PTR>"
.comm _EDNS0_MASK,8,3 ; @EDNS0_MASK
l_.str.2: ; @.str.2
.asciz "."
.subsections_via_symbols
| AnghaBench/freebsd/contrib/tcpdump/extr_print-domain.c_ns_nprint.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function mt7623_trgmii_write
_mt7623_trgmii_write: ## @mt7623_trgmii_write
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r15d
movq %rdi, %r14
movl 4(%rdi), %ebx
movl %esi, %edi
callq _TRGMII_BASE
movl %ebx, %edi
movl %eax, %esi
movl %r15d, %edx
callq _regmap_write
movl %eax, %ebx
testl %eax, %eax
jns LBB0_2
## %bb.1:
movl (%r14), %edi
leaq L_.str(%rip), %rsi
callq _dev_err
LBB0_2:
movl %ebx, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "failed to priv write register\n"
.no_dead_strip _mt7623_trgmii_write
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function mt7623_trgmii_write
_mt7623_trgmii_write: ; @mt7623_trgmii_write
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x2
mov x19, x0
ldr w21, [x0, #4]
mov x0, x1
bl _TRGMII_BASE
mov x1, x0
mov x0, x21
mov x2, x20
bl _regmap_write
mov x20, x0
tbz w0, #31, LBB0_2
; %bb.1:
ldr w0, [x19]
Lloh0:
adrp x1, l_.str@PAGE
Lloh1:
add x1, x1, l_.str@PAGEOFF
bl _dev_err
LBB0_2:
mov x0, x20
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "failed to priv write register\n"
.no_dead_strip _mt7623_trgmii_write
.subsections_via_symbols
| AnghaBench/linux/drivers/net/dsa/extr_mt7530.c_mt7623_trgmii_write.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq _count@GOTPCREL(%rip), %r14
movl $0, (%r14)
movq _nl@GOTPCREL(%rip), %r15
movl $0, (%r15)
movq _c@GOTPCREL(%rip), %rbx
jmp LBB0_1
.p2align 4, 0x90
LBB0_3: ## in Loop: Header=BB0_1 Depth=1
xorl %ecx, %ecx
LBB0_4: ## in Loop: Header=BB0_1 Depth=1
movl %ecx, (%r15)
incl (%r14)
LBB0_1: ## =>This Inner Loop Header: Depth=1
callq _getchar
movl %eax, (%rbx)
movl $1, %ecx
cmpl $10, %eax
je LBB0_4
## %bb.2: ## in Loop: Header=BB0_1 Depth=1
cmpl $-1, %eax
jne LBB0_3
## %bb.5:
cmpl $0, (%r15)
jne LBB0_7
## %bb.6:
movl $10, %edi
callq _putchar
LBB0_7:
movl (%r14), %esi
leaq L_.str.1(%rip), %rdi
xorl %eax, %eax
callq _printf
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _count,4,2 ## @count
.comm _nl,4,2 ## @nl
.comm _c,4,2 ## @c
.section __TEXT,__cstring,cstring_literals
L_.str.1: ## @.str.1
.asciz "%d\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
Lloh0:
adrp x19, _count@GOTPAGE
Lloh1:
ldr x19, [x19, _count@GOTPAGEOFF]
str wzr, [x19]
Lloh2:
adrp x20, _nl@GOTPAGE
Lloh3:
ldr x20, [x20, _nl@GOTPAGEOFF]
str wzr, [x20]
Lloh4:
adrp x21, _c@GOTPAGE
Lloh5:
ldr x21, [x21, _c@GOTPAGEOFF]
b LBB0_3
LBB0_1: ; in Loop: Header=BB0_3 Depth=1
mov w8, #1
LBB0_2: ; in Loop: Header=BB0_3 Depth=1
str w8, [x20]
ldr w8, [x19]
add w8, w8, #1
str w8, [x19]
LBB0_3: ; =>This Inner Loop Header: Depth=1
bl _getchar
str w0, [x21]
cmp w0, #10
b.eq LBB0_1
; %bb.4: ; in Loop: Header=BB0_3 Depth=1
cmn w0, #1
b.eq LBB0_6
; %bb.5: ; in Loop: Header=BB0_3 Depth=1
mov w8, #0
b LBB0_2
LBB0_6:
ldr w8, [x20]
cbnz w8, LBB0_8
; %bb.7:
mov w0, #10
bl _putchar
LBB0_8:
ldr w8, [x19]
str x8, [sp]
Lloh6:
adrp x0, l_.str.1@PAGE
Lloh7:
add x0, x0, l_.str.1@PAGEOFF
bl _printf
mov w0, #0
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.loh AdrpLdrGot Lloh4, Lloh5
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpAdd Lloh6, Lloh7
.cfi_endproc
; -- End function
.comm _count,4,2 ; @count
.comm _nl,4,2 ; @nl
.comm _c,4,2 ; @c
.section __TEXT,__cstring,cstring_literals
l_.str.1: ; @.str.1
.asciz "%d\n"
.subsections_via_symbols
| the_stack_data/873001.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -8(%rbp)
leaq L_.str.1(%rip), %rax
movq %rax, -48(%rbp)
leaq L_.str.2(%rip), %rax
movq %rax, -40(%rbp)
leaq L_.str.3(%rip), %rax
movq %rax, -32(%rbp)
leaq L_.str.4(%rip), %rax
movq %rax, -24(%rbp)
movq $0, -16(%rbp)
leaq L_.str(%rip), %rdi
leaq -48(%rbp), %rsi
callq _execvp
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -8(%rbp), %rax
jne LBB0_2
## %bb.1:
xorl %eax, %eax
addq $48, %rsp
popq %rbp
retq
LBB0_2:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "/bin/ls"
L_.str.1: ## @.str.1
.asciz "ls"
L_.str.2: ## @.str.2
.asciz "-l"
L_.str.3: ## @.str.3
.asciz "-R"
L_.str.4: ## @.str.4
.asciz "../"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
Lloh3:
adrp x9, l_.str.1@PAGE
Lloh4:
add x9, x9, l_.str.1@PAGEOFF
stur x8, [x29, #-8]
Lloh5:
adrp x8, l_.str.2@PAGE
Lloh6:
add x8, x8, l_.str.2@PAGEOFF
Lloh7:
adrp x10, l_.str.3@PAGE
Lloh8:
add x10, x10, l_.str.3@PAGEOFF
stp x9, x8, [sp]
Lloh9:
adrp x8, l_.str.4@PAGE
Lloh10:
add x8, x8, l_.str.4@PAGEOFF
stp x10, x8, [sp, #16]
str xzr, [sp, #32]
Lloh11:
adrp x0, l_.str@PAGE
Lloh12:
add x0, x0, l_.str@PAGEOFF
mov x1, sp
bl _execvp
ldur x8, [x29, #-8]
Lloh13:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh14:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh15:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_2
; %bb.1:
mov w0, #0
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
LBB0_2:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh13, Lloh14, Lloh15
.loh AdrpAdd Lloh11, Lloh12
.loh AdrpAdd Lloh9, Lloh10
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpAdd Lloh5, Lloh6
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "/bin/ls"
l_.str.1: ; @.str.1
.asciz "ls"
l_.str.2: ; @.str.2
.asciz "-l"
l_.str.3: ; @.str.3
.asciz "-R"
l_.str.4: ; @.str.4
.asciz "../"
.subsections_via_symbols
| the_stack_data/273203.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _reset_struct ## -- Begin function reset_struct
.p2align 4, 0x90
_reset_struct: ## @reset_struct
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorps %xmm0, %xmm0
movups %xmm0, 16(%rdi)
movups %xmm0, (%rdi)
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _reset_struct ; -- Begin function reset_struct
.p2align 2
_reset_struct: ; @reset_struct
.cfi_startproc
; %bb.0:
movi.2d v0, #0000000000000000
stp q0, q0, [x0]
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/goaccess/src/extr_parser.c_reset_struct.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _initializeArray ## -- Begin function initializeArray
.p2align 4, 0x90
_initializeArray: ## @initializeArray
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %r14
movq %rdi, %rbx
imulq $204, %rsi, %rdi
callq _malloc
movq %rax, (%rbx)
movq $0, 8(%rbx)
movq %r14, 16(%rbx)
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _insertList ## -- Begin function insertList
.p2align 4, 0x90
_insertList: ## @insertList
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
leaq 16(%rbp), %r14
movq 8(%rdi), %rcx
cmpq 16(%rdi), %rcx
jne LBB1_1
## %bb.2:
incq %rcx
movq %rcx, 16(%rbx)
movq (%rbx), %rdi
imulq $204, %rcx, %rsi
callq _realloc
movq %rax, (%rbx)
movq 8(%rbx), %rcx
jmp LBB1_3
LBB1_1:
movq (%rbx), %rax
LBB1_3:
leaq 1(%rcx), %rdx
movq %rdx, 8(%rbx)
imulq $204, %rcx, %rdi
addq %rax, %rdi
movl $204, %edx
movq %r14, %rsi
callq _memcpy
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _freeList ## -- Begin function freeList
.p2align 4, 0x90
_freeList: ## @freeList
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
movq (%rdi), %rdi
callq _free
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movq $0, 16(%rbx)
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal4,4byte_literals
.p2align 2 ## -- Begin function addProductToList
LCPI3_0:
.long 0x00000000 ## float 0
.section __TEXT,__text,regular,pure_instructions
.globl _addProductToList
.p2align 4, 0x90
_addProductToList: ## @addProductToList
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $664, %rsp ## imm = 0x298
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, -704(%rbp) ## 8-byte Spill
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movq ___stdinp@GOTPCREL(%rip), %r14
leaq -576(%rbp), %rbx
leaq L_.str.1(%rip), %r12
leaq -692(%rbp), %r13
leaq L_str.17(%rip), %r15
jmp LBB3_1
.p2align 4, 0x90
LBB3_15: ## in Loop: Header=BB3_1 Depth=1
movq %r15, %rdi
callq _puts
LBB3_1: ## =>This Inner Loop Header: Depth=1
leaq L_.str(%rip), %rdi
xorl %eax, %eax
callq _printf
movq (%r14), %rdx
movq %rbx, %rdi
movl $100, %esi
callq _fgets
movq %rbx, %rdi
movq %r12, %rsi
movq %r13, %rdx
xorl %eax, %eax
callq _sscanf
cmpl $1, %eax
je LBB3_15
## %bb.2: ## in Loop: Header=BB3_1 Depth=1
movq %rbx, %rdi
leaq L_.str.3(%rip), %rsi
leaq -688(%rbp), %rdx
xorl %eax, %eax
callq _sscanf
cmpl $1, %eax
jne LBB3_15
## %bb.3:
leaq -464(%rbp), %rdi
leaq -688(%rbp), %rsi
movl $100, %edx
callq ___strcpy_chk
leaq -576(%rbp), %rbx
leaq L_.str.1(%rip), %r15
leaq -692(%rbp), %r12
leaq L_str.17(%rip), %r13
jmp LBB3_4
.p2align 4, 0x90
LBB3_16: ## in Loop: Header=BB3_4 Depth=1
movq %r13, %rdi
callq _puts
LBB3_4: ## =>This Inner Loop Header: Depth=1
leaq L_.str.4(%rip), %rdi
xorl %eax, %eax
callq _printf
movq (%r14), %rdx
movq %rbx, %rdi
movl $100, %esi
callq _fgets
movq %rbx, %rdi
movq %r15, %rsi
movq %r12, %rdx
xorl %eax, %eax
callq _sscanf
cmpl $1, %eax
jne LBB3_16
## %bb.5: ## in Loop: Header=BB3_4 Depth=1
movss -692(%rbp), %xmm0 ## xmm0 = mem[0],zero,zero,zero
ucomiss LCPI3_0(%rip), %xmm0
jbe LBB3_16
## %bb.6:
movss %xmm0, -364(%rbp)
leaq -576(%rbp), %rbx
leaq L_.str.1(%rip), %r12
leaq -692(%rbp), %r13
leaq L_str.17(%rip), %r15
jmp LBB3_7
.p2align 4, 0x90
LBB3_14: ## in Loop: Header=BB3_7 Depth=1
movq %r15, %rdi
callq _puts
LBB3_7: ## =>This Inner Loop Header: Depth=1
leaq L_.str.5(%rip), %rdi
xorl %eax, %eax
callq _printf
movq (%r14), %rdx
movq %rbx, %rdi
movl $100, %esi
callq _fgets
movq %rbx, %rdi
movq %r12, %rsi
movq %r13, %rdx
xorl %eax, %eax
callq _sscanf
cmpl $1, %eax
je LBB3_14
## %bb.8: ## in Loop: Header=BB3_7 Depth=1
movq %rbx, %rdi
leaq L_.str.3(%rip), %rsi
leaq -688(%rbp), %rdx
xorl %eax, %eax
callq _sscanf
cmpl $1, %eax
jne LBB3_14
## %bb.9:
leaq -360(%rbp), %rdi
leaq -688(%rbp), %rsi
movl $100, %edx
callq ___strcpy_chk
leaq -256(%rbp), %rdi
leaq -464(%rbp), %rsi
movl $204, %edx
callq _memcpy
movq -704(%rbp), %rbx ## 8-byte Reload
movq 8(%rbx), %rcx
cmpq 16(%rbx), %rcx
jne LBB3_10
## %bb.11:
incq %rcx
movq %rcx, 16(%rbx)
movq (%rbx), %rdi
imulq $204, %rcx, %rsi
callq _realloc
movq %rax, (%rbx)
movq 8(%rbx), %rcx
jmp LBB3_12
LBB3_10:
movq (%rbx), %rax
LBB3_12:
leaq 1(%rcx), %rdx
movq %rdx, 8(%rbx)
imulq $204, %rcx, %rdi
addq %rax, %rdi
leaq -256(%rbp), %rsi
movl $204, %edx
callq _memcpy
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB3_17
## %bb.13:
addq $664, %rsp ## imm = 0x298
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB3_17:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.globl _printList ## -- Begin function printList
.p2align 4, 0x90
_printList: ## @printList
.cfi_startproc
## %bb.0:
testl %esi, %esi
jle LBB4_6
## %bb.1:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r14
movl %esi, %r12d
xorl %ebx, %ebx
movq %r12, %r13
movq %rdi, %r15
.p2align 4, 0x90
LBB4_2: ## =>This Inner Loop Header: Depth=1
movq %r15, %rdi
callq _strlen
movslq %ebx, %rbx
cmpq %rbx, %rax
cmoval %eax, %ebx
addq $204, %r15
decq %r13
jne LBB4_2
## %bb.3:
addl $4, %ebx
leaq L_.str.6(%rip), %rdi
xorl %eax, %eax
callq _printf
addq $104, %r14
leaq L_.str.7(%rip), %r15
.p2align 4, 0x90
LBB4_4: ## =>This Inner Loop Header: Depth=1
leaq -104(%r14), %rdx
movss -4(%r14), %xmm0 ## xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movq %r15, %rdi
movl %ebx, %esi
movq %r14, %rcx
movb $1, %al
callq _printf
addq $204, %r14
decq %r12
jne LBB4_4
## %bb.5:
movl $10, %edi
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _putchar ## TAILCALL
LBB4_6:
leaq L_str.18(%rip), %rdi
jmp _puts ## TAILCALL
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function main
LCPI5_0:
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 1 ## 0x1
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.section __TEXT,__text,regular,pure_instructions
.globl _main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $152, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movl $204, %edi
callq _malloc
movq %rax, -192(%rbp)
movaps LCPI5_0(%rip), %xmm0 ## xmm0 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
movups %xmm0, -184(%rbp)
leaq L_.str.10(%rip), %r14
movq ___stdinp@GOTPCREL(%rip), %r13
leaq -160(%rbp), %rbx
leaq L_.str.11(%rip), %r15
leaq -164(%rbp), %r12
jmp LBB5_1
.p2align 4, 0x90
LBB5_7: ## in Loop: Header=BB5_1 Depth=1
movq -192(%rbp), %rdi
movl -184(%rbp), %esi
callq _printList
LBB5_1: ## =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
movaps %xmm0, -80(%rbp)
movaps %xmm0, -96(%rbp)
movaps %xmm0, -112(%rbp)
movaps %xmm0, -128(%rbp)
movaps %xmm0, -144(%rbp)
movaps %xmm0, -160(%rbp)
movl $0, -64(%rbp)
movq %r14, %rdi
xorl %eax, %eax
callq _printf
movq (%r13), %rdx
movq %rbx, %rdi
movl $100, %esi
callq _fgets
movq %rbx, %rdi
movq %r15, %rsi
movq %r12, %rdx
xorl %eax, %eax
callq _sscanf
testl %eax, %eax
je LBB5_5
## %bb.2: ## in Loop: Header=BB5_1 Depth=1
movl -164(%rbp), %eax
cmpl $1, %eax
je LBB5_6
## %bb.3: ## in Loop: Header=BB5_1 Depth=1
cmpl $2, %eax
je LBB5_7
## %bb.4: ## in Loop: Header=BB5_1 Depth=1
cmpl $3, %eax
je LBB5_8
LBB5_5: ## in Loop: Header=BB5_1 Depth=1
leaq L_str.20(%rip), %rdi
callq _puts
jmp LBB5_1
.p2align 4, 0x90
LBB5_6: ## in Loop: Header=BB5_1 Depth=1
leaq -192(%rbp), %rdi
callq _addProductToList
jmp LBB5_1
LBB5_8:
movq -192(%rbp), %rdi
callq _free
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB5_10
## %bb.9:
xorl %eax, %eax
addq $152, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB5_10:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Enter product name: "
L_.str.1: ## @.str.1
.asciz "%f"
L_.str.3: ## @.str.3
.asciz "\n%[^\n]%*c"
L_.str.4: ## @.str.4
.asciz "Enter amount: "
L_.str.5: ## @.str.5
.asciz "Enter unit: "
L_.str.6: ## @.str.6
.asciz "\nList:"
L_.str.7: ## @.str.7
.asciz "\n%-*s%4g %s"
L_.str.10: ## @.str.10
.asciz "\nWelcome to your shopping list.\n1 - Add product to list.\n2 - Print shopping list.\n3 - Quit.\nEnter command: "
L_.str.11: ## @.str.11
.asciz "%d"
L_str.17: ## @str.17
.asciz "Input error."
L_str.18: ## @str.18
.asciz "\nYour list is empty."
L_str.20: ## @str.20
.asciz "Input error. "
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _initializeArray ; -- Begin function initializeArray
.p2align 2
_initializeArray: ; @initializeArray
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
mov x20, x0
mov w8, #204
mul x0, x1, x8
bl _malloc
stp x0, xzr, [x20]
str x19, [x20, #16]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _insertList ; -- Begin function insertList
.p2align 2
_insertList: ; @insertList
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
mov x20, x0
ldp x8, x9, [x0, #8]
cmp x8, x9
b.ne LBB1_2
; %bb.1:
add x8, x8, #1
str x8, [x20, #16]
ldr x0, [x20]
mov w9, #204
mul x1, x8, x9
bl _realloc
str x0, [x20]
ldr x8, [x20, #8]
b LBB1_3
LBB1_2:
ldr x0, [x20]
LBB1_3:
add x9, x8, #1
str x9, [x20, #8]
mov w9, #204
madd x8, x8, x9, x0
ldr q0, [x19]
str q0, [x8]
ldp q0, q1, [x19, #16]
ldp q2, q3, [x19, #48]
stp q2, q3, [x8, #48]
stp q0, q1, [x8, #16]
ldp q0, q1, [x19, #80]
ldp q2, q3, [x19, #112]
stp q2, q3, [x8, #112]
stp q0, q1, [x8, #80]
ldp q0, q1, [x19, #144]
ldr q2, [x19, #176]
ldur q3, [x19, #188]
stur q3, [x8, #188]
stp q1, q2, [x8, #160]
str q0, [x8, #144]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _freeList ; -- Begin function freeList
.p2align 2
_freeList: ; @freeList
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
ldr x0, [x0]
bl _free
stp xzr, xzr, [x19]
str xzr, [x19, #16]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _addProductToList ; -- Begin function addProductToList
.p2align 2
_addProductToList: ; @addProductToList
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #656
mov x19, x0
add x25, sp, #424
add x24, sp, #16
Lloh0:
adrp x20, l_.str@PAGE
Lloh1:
add x20, x20, l_.str@PAGEOFF
Lloh2:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh3:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh4:
ldr x8, [x8]
Lloh5:
adrp x26, ___stdinp@GOTPAGE
Lloh6:
ldr x26, [x26, ___stdinp@GOTPAGEOFF]
stur x8, [x29, #-104]
Lloh7:
adrp x21, l_.str.1@PAGE
Lloh8:
add x21, x21, l_.str.1@PAGEOFF
add x27, sp, #12
Lloh9:
adrp x22, l_str.17@PAGE
Lloh10:
add x22, x22, l_str.17@PAGEOFF
add x28, sp, #224
Lloh11:
adrp x23, l_.str.3@PAGE
Lloh12:
add x23, x23, l_.str.3@PAGEOFF
b LBB3_2
LBB3_1: ; in Loop: Header=BB3_2 Depth=1
mov x0, x22
bl _puts
LBB3_2: ; =>This Inner Loop Header: Depth=1
mov x0, x20
bl _printf
ldr x2, [x26]
add x0, sp, #324
mov w1, #100
bl _fgets
str x27, [sp]
add x0, sp, #324
mov x1, x21
bl _sscanf
cmp w0, #1
b.eq LBB3_1
; %bb.3: ; in Loop: Header=BB3_2 Depth=1
str x28, [sp]
add x0, sp, #324
mov x1, x23
bl _sscanf
cmp w0, #1
b.ne LBB3_1
; %bb.4:
add x0, sp, #424
add x1, sp, #224
mov w2, #100
bl ___strcpy_chk
Lloh13:
adrp x20, l_.str.4@PAGE
Lloh14:
add x20, x20, l_.str.4@PAGEOFF
add x23, sp, #12
Lloh15:
adrp x21, l_.str.1@PAGE
Lloh16:
add x21, x21, l_.str.1@PAGEOFF
Lloh17:
adrp x22, l_str.17@PAGE
Lloh18:
add x22, x22, l_str.17@PAGEOFF
b LBB3_6
LBB3_5: ; in Loop: Header=BB3_6 Depth=1
mov x0, x22
bl _puts
LBB3_6: ; =>This Inner Loop Header: Depth=1
mov x0, x20
bl _printf
ldr x2, [x26]
add x0, sp, #324
mov w1, #100
bl _fgets
str x23, [sp]
add x0, sp, #324
mov x1, x21
bl _sscanf
cmp w0, #1
b.ne LBB3_5
; %bb.7: ; in Loop: Header=BB3_6 Depth=1
ldr s0, [sp, #12]
fcmp s0, #0.0
b.le LBB3_5
; %bb.8:
Lloh19:
adrp x20, l_.str.5@PAGE
Lloh20:
add x20, x20, l_.str.5@PAGEOFF
str s0, [sp, #524]
Lloh21:
adrp x21, l_.str.1@PAGE
Lloh22:
add x21, x21, l_.str.1@PAGEOFF
add x27, sp, #12
Lloh23:
adrp x22, l_str.17@PAGE
Lloh24:
add x22, x22, l_str.17@PAGEOFF
add x28, sp, #224
Lloh25:
adrp x23, l_.str.3@PAGE
Lloh26:
add x23, x23, l_.str.3@PAGEOFF
b LBB3_10
LBB3_9: ; in Loop: Header=BB3_10 Depth=1
mov x0, x22
bl _puts
LBB3_10: ; =>This Inner Loop Header: Depth=1
mov x0, x20
bl _printf
ldr x2, [x26]
add x0, sp, #324
mov w1, #100
bl _fgets
str x27, [sp]
add x0, sp, #324
mov x1, x21
bl _sscanf
cmp w0, #1
b.eq LBB3_9
; %bb.11: ; in Loop: Header=BB3_10 Depth=1
str x28, [sp]
add x0, sp, #324
mov x1, x23
bl _sscanf
cmp w0, #1
b.ne LBB3_9
; %bb.12:
add x8, sp, #424
add x0, x8, #104
add x1, sp, #224
mov w2, #100
bl ___strcpy_chk
ldp q0, q1, [x25, #160]
stp q0, q1, [x24, #160]
ldur q0, [x25, #188]
stur q0, [x24, #188]
ldp q0, q1, [x25, #96]
stp q0, q1, [sp, #112]
ldp q1, q0, [x25, #128]
stp q1, q0, [x24, #128]
ldp q0, q1, [x25, #32]
stp q0, q1, [sp, #48]
ldp q1, q0, [x25, #64]
stp q1, q0, [sp, #80]
ldp q1, q0, [x25]
stp q1, q0, [sp, #16]
ldp x8, x9, [x19, #8]
cmp x8, x9
b.ne LBB3_14
; %bb.13:
add x8, x8, #1
str x8, [x19, #16]
ldr x0, [x19]
mov w9, #204
mul x1, x8, x9
bl _realloc
str x0, [x19]
ldr x8, [x19, #8]
b LBB3_15
LBB3_14:
ldr x0, [x19]
LBB3_15:
add x9, x8, #1
str x9, [x19, #8]
mov w9, #204
madd x8, x8, x9, x0
ldp q0, q1, [x24, #160]
stp q0, q1, [x8, #160]
ldur q0, [x24, #188]
stur q0, [x8, #188]
ldp q0, q1, [sp, #112]
stp q0, q1, [x8, #96]
ldp q1, q0, [x24, #128]
stp q1, q0, [x8, #128]
ldp q0, q1, [sp, #48]
stp q0, q1, [x8, #32]
ldp q1, q0, [sp, #80]
stp q1, q0, [x8, #64]
ldp q1, q0, [sp, #16]
stp q1, q0, [x8]
Lloh27:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh28:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh29:
ldr x8, [x8]
ldur x9, [x29, #-104]
cmp x8, x9
b.ne LBB3_17
; %bb.16:
add sp, sp, #656
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB3_17:
bl ___stack_chk_fail
.loh AdrpAdd Lloh11, Lloh12
.loh AdrpAdd Lloh9, Lloh10
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpLdrGot Lloh5, Lloh6
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh17, Lloh18
.loh AdrpAdd Lloh15, Lloh16
.loh AdrpAdd Lloh13, Lloh14
.loh AdrpAdd Lloh25, Lloh26
.loh AdrpAdd Lloh23, Lloh24
.loh AdrpAdd Lloh21, Lloh22
.loh AdrpAdd Lloh19, Lloh20
.loh AdrpLdrGotLdr Lloh27, Lloh28, Lloh29
.cfi_endproc
; -- End function
.globl _printList ; -- Begin function printList
.p2align 2
_printList: ; @printList
.cfi_startproc
; %bb.0:
cmp w1, #1
b.lt LBB4_6
; %bb.1:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x0
mov w22, #0
mov w21, w1
mov x23, x21
mov x20, x0
LBB4_2: ; =>This Inner Loop Header: Depth=1
mov x0, x20
bl _strlen
cmp x0, w22, sxtw
csel w22, w0, w22, hi
add x20, x20, #204
subs x23, x23, #1
b.ne LBB4_2
; %bb.3:
add w20, w22, #4
Lloh30:
adrp x0, l_.str.6@PAGE
Lloh31:
add x0, x0, l_.str.6@PAGEOFF
bl _printf
add x22, x19, #104
Lloh32:
adrp x19, l_.str.7@PAGE
Lloh33:
add x19, x19, l_.str.7@PAGEOFF
LBB4_4: ; =>This Inner Loop Header: Depth=1
sub x8, x22, #104
ldur s0, [x22, #-4]
fcvt d0, s0
str x22, [sp, #24]
str d0, [sp, #16]
stp x20, x8, [sp]
mov x0, x19
bl _printf
add x22, x22, #204
subs x21, x21, #1
b.ne LBB4_4
; %bb.5:
mov w0, #10
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #96
b _putchar
LBB4_6:
.cfi_def_cfa wsp, 0
.cfi_same_value w30
.cfi_same_value w29
.cfi_same_value w19
.cfi_same_value w20
.cfi_same_value w21
.cfi_same_value w22
.cfi_same_value w23
.cfi_same_value w24
Lloh34:
adrp x0, l_str.18@PAGE
Lloh35:
add x0, x0, l_str.18@PAGEOFF
b _puts
.loh AdrpAdd Lloh32, Lloh33
.loh AdrpAdd Lloh30, Lloh31
.loh AdrpAdd Lloh34, Lloh35
.cfi_endproc
; -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ; -- Begin function main
lCPI5_0:
.quad 0 ; 0x0
.quad 1 ; 0x1
.section __TEXT,__text,regular,pure_instructions
.globl _main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #224
.cfi_def_cfa_offset 224
stp x24, x23, [sp, #160] ; 16-byte Folded Spill
stp x22, x21, [sp, #176] ; 16-byte Folded Spill
stp x20, x19, [sp, #192] ; 16-byte Folded Spill
stp x29, x30, [sp, #208] ; 16-byte Folded Spill
add x29, sp, #208
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
Lloh36:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh37:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh38:
ldr x8, [x8]
stur x8, [x29, #-56]
mov w0, #204
bl _malloc
str x0, [sp, #24]
Lloh39:
adrp x8, lCPI5_0@PAGE
Lloh40:
ldr q0, [x8, lCPI5_0@PAGEOFF]
stur q0, [sp, #32]
Lloh41:
adrp x19, l_.str.10@PAGE
Lloh42:
add x19, x19, l_.str.10@PAGEOFF
Lloh43:
adrp x22, ___stdinp@GOTPAGE
Lloh44:
ldr x22, [x22, ___stdinp@GOTPAGEOFF]
add x23, sp, #20
Lloh45:
adrp x20, l_.str.11@PAGE
Lloh46:
add x20, x20, l_.str.11@PAGEOFF
Lloh47:
adrp x21, l_str.20@PAGE
Lloh48:
add x21, x21, l_str.20@PAGEOFF
b LBB5_2
LBB5_1: ; in Loop: Header=BB5_2 Depth=1
ldr x0, [sp, #24]
ldr w1, [sp, #32]
bl _printList
LBB5_2: ; =>This Inner Loop Header: Depth=1
str wzr, [sp, #144]
movi.2d v0, #0000000000000000
stp q0, q0, [sp, #112]
stp q0, q0, [sp, #80]
stp q0, q0, [sp, #48]
mov x0, x19
bl _printf
ldr x2, [x22]
add x0, sp, #48
mov w1, #100
bl _fgets
str x23, [sp]
add x0, sp, #48
mov x1, x20
bl _sscanf
cbz w0, LBB5_6
; %bb.3: ; in Loop: Header=BB5_2 Depth=1
ldr w8, [sp, #20]
cmp w8, #1
b.eq LBB5_7
; %bb.4: ; in Loop: Header=BB5_2 Depth=1
cmp w8, #2
b.eq LBB5_1
; %bb.5: ; in Loop: Header=BB5_2 Depth=1
cmp w8, #3
b.eq LBB5_8
LBB5_6: ; in Loop: Header=BB5_2 Depth=1
mov x0, x21
bl _puts
b LBB5_2
LBB5_7: ; in Loop: Header=BB5_2 Depth=1
add x0, sp, #24
bl _addProductToList
b LBB5_2
LBB5_8:
ldr x0, [sp, #24]
bl _free
ldur x8, [x29, #-56]
Lloh49:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh50:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh51:
ldr x9, [x9]
cmp x9, x8
b.ne LBB5_10
; %bb.9:
mov w0, #0
ldp x29, x30, [sp, #208] ; 16-byte Folded Reload
ldp x20, x19, [sp, #192] ; 16-byte Folded Reload
ldp x22, x21, [sp, #176] ; 16-byte Folded Reload
ldp x24, x23, [sp, #160] ; 16-byte Folded Reload
add sp, sp, #224
ret
LBB5_10:
bl ___stack_chk_fail
.loh AdrpAdd Lloh47, Lloh48
.loh AdrpAdd Lloh45, Lloh46
.loh AdrpLdrGot Lloh43, Lloh44
.loh AdrpAdd Lloh41, Lloh42
.loh AdrpLdr Lloh39, Lloh40
.loh AdrpLdrGotLdr Lloh36, Lloh37, Lloh38
.loh AdrpLdrGotLdr Lloh49, Lloh50, Lloh51
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Enter product name: "
l_.str.1: ; @.str.1
.asciz "%f"
l_.str.3: ; @.str.3
.asciz "\n%[^\n]%*c"
l_.str.4: ; @.str.4
.asciz "Enter amount: "
l_.str.5: ; @.str.5
.asciz "Enter unit: "
l_.str.6: ; @.str.6
.asciz "\nList:"
l_.str.7: ; @.str.7
.asciz "\n%-*s%4g %s"
l_.str.10: ; @.str.10
.asciz "\nWelcome to your shopping list.\n1 - Add product to list.\n2 - Print shopping list.\n3 - Quit.\nEnter command: "
l_.str.11: ; @.str.11
.asciz "%d"
l_str.17: ; @str.17
.asciz "Input error."
l_str.18: ; @str.18
.asciz "\nYour list is empty."
l_str.20: ; @str.20
.asciz "Input error. "
.subsections_via_symbols
| the_stack_data/77305.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _get_wchan ## -- Begin function get_wchan
.p2align 4, 0x90
_get_wchan: ## @get_wchan
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _get_wchan ; -- Begin function get_wchan
.p2align 2
_get_wchan: ; @get_wchan
.cfi_startproc
; %bb.0:
mov x0, #0
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/linux/arch/openrisc/kernel/extr_process.c_get_wchan.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _vmul ## -- Begin function vmul
.p2align 4, 0x90
_vmul: ## @vmul
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
testl %ecx, %ecx
jle LBB0_16
## %bb.1:
movl %ecx, %r8d
cmpl $8, %ecx
jae LBB0_3
## %bb.2:
xorl %ecx, %ecx
LBB0_12:
movq %rcx, %r9
notq %r9
addq %r8, %r9
movq %r8, %r10
andq $3, %r10
je LBB0_14
.p2align 4, 0x90
LBB0_13: ## =>This Inner Loop Header: Depth=1
movl (%rsi,%rcx,4), %eax
imull (%rdi,%rcx,4), %eax
movl %eax, (%rdx,%rcx,4)
incq %rcx
decq %r10
jne LBB0_13
LBB0_14:
cmpq $3, %r9
jb LBB0_16
.p2align 4, 0x90
LBB0_15: ## =>This Inner Loop Header: Depth=1
movl (%rsi,%rcx,4), %eax
imull (%rdi,%rcx,4), %eax
movl %eax, (%rdx,%rcx,4)
movl 4(%rsi,%rcx,4), %eax
imull 4(%rdi,%rcx,4), %eax
movl %eax, 4(%rdx,%rcx,4)
movl 8(%rsi,%rcx,4), %eax
imull 8(%rdi,%rcx,4), %eax
movl %eax, 8(%rdx,%rcx,4)
movl 12(%rsi,%rcx,4), %eax
imull 12(%rdi,%rcx,4), %eax
movl %eax, 12(%rdx,%rcx,4)
addq $4, %rcx
cmpq %rcx, %r8
jne LBB0_15
jmp LBB0_16
LBB0_3:
movq %rdx, %rax
subq %rdi, %rax
xorl %ecx, %ecx
cmpq $32, %rax
jb LBB0_12
## %bb.4:
movq %rdx, %rax
subq %rsi, %rax
cmpq $32, %rax
jb LBB0_12
## %bb.5:
movl %r8d, %ecx
andl $-8, %ecx
leaq -8(%rcx), %rax
movq %rax, %r9
shrq $3, %r9
incq %r9
testq %rax, %rax
je LBB0_6
## %bb.7:
movq %r9, %r10
andq $-2, %r10
xorl %eax, %eax
.p2align 4, 0x90
LBB0_8: ## =>This Inner Loop Header: Depth=1
movdqu (%rdi,%rax,4), %xmm0
movdqu 16(%rdi,%rax,4), %xmm1
movdqu (%rsi,%rax,4), %xmm2
pmulld %xmm0, %xmm2
movdqu 16(%rsi,%rax,4), %xmm0
pmulld %xmm1, %xmm0
movdqu %xmm2, (%rdx,%rax,4)
movdqu %xmm0, 16(%rdx,%rax,4)
movdqu 32(%rdi,%rax,4), %xmm0
movdqu 48(%rdi,%rax,4), %xmm1
movdqu 32(%rsi,%rax,4), %xmm2
pmulld %xmm0, %xmm2
movdqu 48(%rsi,%rax,4), %xmm0
pmulld %xmm1, %xmm0
movdqu %xmm2, 32(%rdx,%rax,4)
movdqu %xmm0, 48(%rdx,%rax,4)
addq $16, %rax
addq $-2, %r10
jne LBB0_8
## %bb.9:
testb $1, %r9b
je LBB0_11
LBB0_10:
movdqu (%rdi,%rax,4), %xmm0
movdqu 16(%rdi,%rax,4), %xmm1
movdqu (%rsi,%rax,4), %xmm2
pmulld %xmm0, %xmm2
movdqu 16(%rsi,%rax,4), %xmm0
pmulld %xmm1, %xmm0
movdqu %xmm2, (%rdx,%rax,4)
movdqu %xmm0, 16(%rdx,%rax,4)
LBB0_11:
cmpq %r8, %rcx
jne LBB0_12
LBB0_16:
popq %rbp
retq
LBB0_6:
xorl %eax, %eax
testb $1, %r9b
jne LBB0_10
jmp LBB0_11
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _vmul ; -- Begin function vmul
.p2align 2
_vmul: ; @vmul
.cfi_startproc
; %bb.0:
cmp w3, #1
b.lt LBB0_10
; %bb.1:
mov w8, w3
cmp w3, #16
b.hs LBB0_3
; %bb.2:
mov x9, #0
b LBB0_8
LBB0_3:
mov x9, #0
sub x10, x2, x0
cmp x10, #64
b.lo LBB0_8
; %bb.4:
sub x10, x2, x1
cmp x10, #64
b.lo LBB0_8
; %bb.5:
and x9, x8, #0xfffffff0
add x10, x2, #32
add x11, x0, #32
add x12, x1, #32
mov x13, x9
LBB0_6: ; =>This Inner Loop Header: Depth=1
ldp q0, q1, [x11, #-32]
ldp q2, q3, [x11], #64
ldp q4, q5, [x12, #-32]
ldp q6, q7, [x12], #64
mul.4s v0, v4, v0
mul.4s v1, v5, v1
mul.4s v2, v6, v2
mul.4s v3, v7, v3
stp q0, q1, [x10, #-32]
stp q2, q3, [x10], #64
subs x13, x13, #16
b.ne LBB0_6
; %bb.7:
cmp x9, x8
b.eq LBB0_10
LBB0_8:
lsl x12, x9, #2
add x10, x2, x12
add x11, x1, x12
add x12, x0, x12
sub x8, x8, x9
LBB0_9: ; =>This Inner Loop Header: Depth=1
ldr w9, [x12], #4
ldr w13, [x11], #4
mul w9, w13, w9
str w9, [x10], #4
subs x8, x8, #1
b.ne LBB0_9
LBB0_10:
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/91301.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _fdc_release_resources ## -- Begin function fdc_release_resources
.p2align 4, 0x90
_fdc_release_resources: ## @fdc_release_resources
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r12
movl 56(%rdi), %r14d
movq 48(%rdi), %rdx
testq %rdx, %rdx
je LBB0_2
## %bb.1:
movq 32(%r12), %rsi
movl %r14d, %edi
callq _bus_teardown_intr
LBB0_2:
movq $0, 48(%r12)
movq 32(%r12), %rcx
testq %rcx, %rcx
je LBB0_4
## %bb.3:
movq _SYS_RES_IRQ@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl 40(%r12), %edx
movl %r14d, %edi
callq _bus_release_resource
LBB0_4:
movq $0, 32(%r12)
movq _FDC_MAXREG@GOTPCREL(%rip), %r15
movl (%r15), %eax
testl %eax, %eax
jle LBB0_10
## %bb.5:
xorl %ebx, %ebx
movq _SYS_RES_IOPORT@GOTPCREL(%rip), %r13
xorl %edx, %edx
jmp LBB0_6
.p2align 4, 0x90
LBB0_9: ## in Loop: Header=BB0_6 Depth=1
incq %rbx
movslq %eax, %rcx
cmpq %rcx, %rbx
jge LBB0_10
LBB0_6: ## =>This Inner Loop Header: Depth=1
movq 16(%r12), %rcx
movq (%rcx,%rbx,8), %rcx
testq %rcx, %rcx
je LBB0_9
## %bb.7: ## in Loop: Header=BB0_6 Depth=1
cmpq %rdx, %rcx
je LBB0_9
## %bb.8: ## in Loop: Header=BB0_6 Depth=1
movl (%r13), %esi
movq 24(%r12), %rax
movl (%rax,%rbx,4), %edx
movl %r14d, %edi
callq _bus_release_resource
movq 16(%r12), %rax
movq (%rax,%rbx,8), %rdx
movq $0, (%rax,%rbx,8)
movl (%r15), %eax
jmp LBB0_9
LBB0_10:
movq (%r12), %rcx
testq %rcx, %rcx
je LBB0_12
## %bb.11:
movq _SYS_RES_DRQ@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl 8(%r12), %edx
movl %r14d, %edi
callq _bus_release_resource
LBB0_12:
movq $0, (%r12)
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _SYS_RES_IRQ,4,2 ## @SYS_RES_IRQ
.comm _FDC_MAXREG,4,2 ## @FDC_MAXREG
.comm _SYS_RES_IOPORT,4,2 ## @SYS_RES_IOPORT
.comm _SYS_RES_DRQ,4,2 ## @SYS_RES_DRQ
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _fdc_release_resources ; -- Begin function fdc_release_resources
.p2align 2
_fdc_release_resources: ; @fdc_release_resources
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x0
ldr w20, [x0, #56]
ldr x2, [x0, #48]
cbz x2, LBB0_2
; %bb.1:
ldr x1, [x19, #32]
mov x0, x20
bl _bus_teardown_intr
LBB0_2:
str xzr, [x19, #48]
ldr x3, [x19, #32]
cbz x3, LBB0_4
; %bb.3:
Lloh0:
adrp x8, _SYS_RES_IRQ@GOTPAGE
Lloh1:
ldr x8, [x8, _SYS_RES_IRQ@GOTPAGEOFF]
Lloh2:
ldr w1, [x8]
ldr w2, [x19, #40]
mov x0, x20
bl _bus_release_resource
LBB0_4:
str xzr, [x19, #32]
Lloh3:
adrp x21, _FDC_MAXREG@GOTPAGE
Lloh4:
ldr x21, [x21, _FDC_MAXREG@GOTPAGEOFF]
ldr w8, [x21]
cmp w8, #1
b.lt LBB0_9
; %bb.5:
mov x22, #0
mov x9, #0
Lloh5:
adrp x23, _SYS_RES_IOPORT@GOTPAGE
Lloh6:
ldr x23, [x23, _SYS_RES_IOPORT@GOTPAGEOFF]
b LBB0_7
LBB0_6: ; in Loop: Header=BB0_7 Depth=1
add x22, x22, #1
cmp x22, w8, sxtw
b.ge LBB0_9
LBB0_7: ; =>This Inner Loop Header: Depth=1
ldr x10, [x19, #16]
ldr x3, [x10, x22, lsl #3]
cmp x3, #0
ccmp x3, x9, #4, ne
b.eq LBB0_6
; %bb.8: ; in Loop: Header=BB0_7 Depth=1
ldr w1, [x23]
ldr x8, [x19, #24]
ldr w2, [x8, x22, lsl #2]
mov x0, x20
bl _bus_release_resource
ldr x8, [x19, #16]
lsl x10, x22, #3
ldr x9, [x8, x10]
str xzr, [x8, x10]
ldr w8, [x21]
b LBB0_6
LBB0_9:
ldr x3, [x19]
cbz x3, LBB0_11
; %bb.10:
Lloh7:
adrp x8, _SYS_RES_DRQ@GOTPAGE
Lloh8:
ldr x8, [x8, _SYS_RES_DRQ@GOTPAGEOFF]
Lloh9:
ldr w1, [x8]
ldr w2, [x19, #8]
mov x0, x20
bl _bus_release_resource
LBB0_11:
str xzr, [x19]
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGot Lloh3, Lloh4
.loh AdrpLdrGot Lloh5, Lloh6
.loh AdrpLdrGotLdr Lloh7, Lloh8, Lloh9
.cfi_endproc
; -- End function
.comm _SYS_RES_IRQ,4,2 ; @SYS_RES_IRQ
.comm _FDC_MAXREG,4,2 ; @FDC_MAXREG
.comm _SYS_RES_IOPORT,4,2 ; @SYS_RES_IOPORT
.comm _SYS_RES_DRQ,4,2 ; @SYS_RES_DRQ
.subsections_via_symbols
| AnghaBench/freebsd/sys/dev/fdc/extr_fdc.c_fdc_release_resources.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function nfsd4_encode_locku
_nfsd4_encode_locku: ## @nfsd4_encode_locku
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rdx, %rsi
popq %rbp
jmp _nfsd4_encode_stateid ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _nfsd4_encode_locku
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function nfsd4_encode_locku
_nfsd4_encode_locku: ; @nfsd4_encode_locku
.cfi_startproc
; %bb.0:
mov x1, x2
b _nfsd4_encode_stateid
.cfi_endproc
; -- End function
.no_dead_strip _nfsd4_encode_locku
.subsections_via_symbols
| AnghaBench/linux/fs/nfsd/extr_nfs4xdr.c_nfsd4_encode_locku.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _pasemi_read_dma_reg ## -- Begin function pasemi_read_dma_reg
.p2align 4, 0x90
_pasemi_read_dma_reg: ## @pasemi_read_dma_reg
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _dma_regs@GOTPCREL(%rip), %rax
movl %edi, %edi
addq (%rax), %rdi
popq %rbp
jmp _in_le32 ## TAILCALL
.cfi_endproc
## -- End function
.comm _dma_regs,8,3 ## @dma_regs
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _pasemi_read_dma_reg ; -- Begin function pasemi_read_dma_reg
.p2align 2
_pasemi_read_dma_reg: ; @pasemi_read_dma_reg
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _dma_regs@GOTPAGE
Lloh1:
ldr x8, [x8, _dma_regs@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
add x0, x8, w0, uxtw
b _in_le32
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _dma_regs,8,3 ; @dma_regs
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/arch/powerpc/platforms/pasemi/extr_dma_lib.c_pasemi_read_dma_reg.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _compute_nfs_args ## -- Begin function compute_nfs_args
.p2align 4, 0x90
_compute_nfs_args: ## @compute_nfs_args
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _compute_nfs23_args ## TAILCALL
.cfi_endproc
## -- End function
.comm _NFS_VERSION4,8,3 ## @NFS_VERSION4
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _compute_nfs_args ; -- Begin function compute_nfs_args
.p2align 2
_compute_nfs_args: ; @compute_nfs_args
.cfi_startproc
; %bb.0:
ldp x8, x9, [sp]
stp x8, x9, [sp]
b _compute_nfs23_args
.cfi_endproc
; -- End function
.comm _NFS_VERSION4,8,3 ; @NFS_VERSION4
.subsections_via_symbols
| AnghaBench/freebsd/contrib/amd/libamu/extr_mount_fs.c_compute_nfs_args.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function bhndb_io_resource_get_window
_bhndb_io_resource_get_window: ## @bhndb_io_resource_get_window
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $56, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %r9, -80(%rbp) ## 8-byte Spill
movq %r8, %r12
movq %rcx, %r13
movq %rdx, -56(%rbp) ## 8-byte Spill
movq %rsi, %r14
movq %rdi, %rbx
movq _MA_OWNED@GOTPCREL(%rip), %rax
movl (%rax), %esi
callq _BHNDB_LOCK_ASSERT
movq (%rbx), %rbx
movl $0, (%r13)
movl $0, (%r12)
movq %rbx, %rdi
callq _bhndb_dw_next_free
movq %rax, %r15
testq %rax, %rax
jne LBB0_1
## %bb.2:
movq %r13, -72(%rbp) ## 8-byte Spill
movq %r12, -64(%rbp) ## 8-byte Spill
movq %r14, -48(%rbp) ## 8-byte Spill
cmpq $0, (%rbx)
je LBB0_8
## %bb.3:
movq -48(%rbp), %rax ## 8-byte Reload
movq -56(%rbp), %rcx ## 8-byte Reload
addq %rcx, %rax
movq %rax, -88(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
xorl %r14d, %r14d
jmp LBB0_5
.p2align 4, 0x90
LBB0_4: ## in Loop: Header=BB0_5 Depth=1
incq %r14
addq $16, %r12
cmpq (%rbx), %r14
jae LBB0_8
LBB0_5: ## =>This Inner Loop Header: Depth=1
movq 8(%rbx), %r15
movq 8(%r15,%r12), %r13
movq (%r13), %rax
xorl %edi, %edi
movq _BHNDB_REGWIN_T_DYN@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
sete %dil
leaq L_.str(%rip), %rsi
callq _KASSERT
movq (%r15,%r12), %rax
cmpq -48(%rbp), %rax ## 8-byte Folded Reload
jg LBB0_4
## %bb.6: ## in Loop: Header=BB0_5 Depth=1
addq 8(%r13), %rax
cmpq %rax, -88(%rbp) ## 8-byte Folded Reload
jg LBB0_4
## %bb.7:
movq -72(%rbp), %rax ## 8-byte Reload
movl $1, (%rax)
addq %r12, %r15
jmp LBB0_1
LBB0_8:
movq %rbx, %rdi
movq -48(%rbp), %r14 ## 8-byte Reload
movq %r14, %rsi
movq -56(%rbp), %r12 ## 8-byte Reload
movq %r12, %rdx
callq _bhndb_find_resource_region
testq %rax, %rax
je LBB0_13
## %bb.9:
movq _BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
testl %ecx, (%rax)
je LBB0_13
## %bb.10:
movq %rbx, %rdi
movq -80(%rbp), %rsi ## 8-byte Reload
callq _bhndb_dw_steal
testq %rax, %rax
je LBB0_14
## %bb.11:
movq %rax, %r15
movq -64(%rbp), %rax ## 8-byte Reload
movl $1, (%rax)
jmp LBB0_1
LBB0_13:
xorl %r15d, %r15d
jmp LBB0_1
LBB0_14:
leaq (%r12,%r14), %rdx
decq %rdx
leaq L_.str.1(%rip), %rdi
movq %r14, %rsi
callq _panic
## implicit-def: $r15
LBB0_1:
movq %r15, %rax
addq $56, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _MA_OWNED,4,2 ## @MA_OWNED
.comm _BHNDB_REGWIN_T_DYN,8,3 ## @BHNDB_REGWIN_T_DYN
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "invalid register window type"
.comm _BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT,4,2 ## @BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT
L_.str.1: ## @.str.1
.asciz "register windows exhausted attempting to map 0x%llx-0x%llx\n"
.no_dead_strip _bhndb_io_resource_get_window
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function bhndb_io_resource_get_window
_bhndb_io_resource_get_window: ; @bhndb_io_resource_get_window
.cfi_startproc
; %bb.0:
sub sp, sp, #128
.cfi_def_cfa_offset 128
stp x28, x27, [sp, #32] ; 16-byte Folded Spill
stp x26, x25, [sp, #48] ; 16-byte Folded Spill
stp x24, x23, [sp, #64] ; 16-byte Folded Spill
stp x22, x21, [sp, #80] ; 16-byte Folded Spill
stp x20, x19, [sp, #96] ; 16-byte Folded Spill
stp x29, x30, [sp, #112] ; 16-byte Folded Spill
add x29, sp, #112
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x22, x5
mov x20, x4
mov x24, x3
mov x21, x2
mov x19, x1
mov x23, x0
Lloh0:
adrp x8, _MA_OWNED@GOTPAGE
Lloh1:
ldr x8, [x8, _MA_OWNED@GOTPAGEOFF]
Lloh2:
ldr w1, [x8]
bl _BHNDB_LOCK_ASSERT
ldr x23, [x23]
str wzr, [x24]
str wzr, [x20]
mov x0, x23
bl _bhndb_dw_next_free
cbz x0, LBB0_2
LBB0_1:
ldp x29, x30, [sp, #112] ; 16-byte Folded Reload
ldp x20, x19, [sp, #96] ; 16-byte Folded Reload
ldp x22, x21, [sp, #80] ; 16-byte Folded Reload
ldp x24, x23, [sp, #64] ; 16-byte Folded Reload
ldp x26, x25, [sp, #48] ; 16-byte Folded Reload
ldp x28, x27, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #128
ret
LBB0_2:
stp x24, x22, [sp, #8] ; 16-byte Folded Spill
str x21, [sp, #24] ; 8-byte Folded Spill
str x20, [sp] ; 8-byte Folded Spill
ldr x8, [x23]
cbz x8, LBB0_8
; %bb.3:
mov x26, #0
mov x27, #0
ldr x8, [sp, #24] ; 8-byte Folded Reload
add x28, x8, x19
Lloh3:
adrp x22, _BHNDB_REGWIN_T_DYN@GOTPAGE
Lloh4:
ldr x22, [x22, _BHNDB_REGWIN_T_DYN@GOTPAGEOFF]
Lloh5:
adrp x25, l_.str@PAGE
Lloh6:
add x25, x25, l_.str@PAGEOFF
b LBB0_5
LBB0_4: ; in Loop: Header=BB0_5 Depth=1
add x27, x27, #1
ldr x8, [x23]
add x26, x26, #16
cmp x27, x8
b.hs LBB0_8
LBB0_5: ; =>This Inner Loop Header: Depth=1
ldr x21, [x23, #8]
add x24, x21, x26
ldr x20, [x24, #8]
ldr x8, [x20]
ldr x9, [x22]
cmp x8, x9
cset w0, eq
mov x1, x25
bl _KASSERT
ldr x8, [x24]
cmp x8, x19
b.gt LBB0_4
; %bb.6: ; in Loop: Header=BB0_5 Depth=1
ldr x9, [x20, #8]
add x8, x9, x8
cmp x28, x8
b.gt LBB0_4
; %bb.7:
mov w8, #1
ldr x9, [sp, #8] ; 8-byte Folded Reload
str w8, [x9]
add x0, x21, x26
b LBB0_1
LBB0_8:
mov x0, x23
mov x1, x19
ldr x20, [sp, #24] ; 8-byte Folded Reload
mov x2, x20
bl _bhndb_find_resource_region
cbz x0, LBB0_1
; %bb.9:
ldr w8, [x0]
Lloh7:
adrp x9, _BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT@GOTPAGE
Lloh8:
ldr x9, [x9, _BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT@GOTPAGEOFF]
Lloh9:
ldr w9, [x9]
tst w9, w8
b.eq LBB0_12
; %bb.10:
mov x0, x23
ldr x1, [sp, #16] ; 8-byte Folded Reload
bl _bhndb_dw_steal
cbz x0, LBB0_13
; %bb.11:
mov w8, #1
ldr x9, [sp] ; 8-byte Folded Reload
str w8, [x9]
b LBB0_1
LBB0_12:
mov x0, #0
b LBB0_1
LBB0_13:
add x8, x20, x19
sub x2, x8, #1
Lloh10:
adrp x0, l_.str.1@PAGE
Lloh11:
add x0, x0, l_.str.1@PAGEOFF
mov x1, x19
bl _panic
; implicit-def: $x0
b LBB0_1
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh5, Lloh6
.loh AdrpLdrGot Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh7, Lloh8, Lloh9
.loh AdrpAdd Lloh10, Lloh11
.cfi_endproc
; -- End function
.comm _MA_OWNED,4,2 ; @MA_OWNED
.comm _BHNDB_REGWIN_T_DYN,8,3 ; @BHNDB_REGWIN_T_DYN
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "invalid register window type"
.comm _BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT,4,2 ; @BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT
l_.str.1: ; @.str.1
.asciz "register windows exhausted attempting to map 0x%llx-0x%llx\n"
.no_dead_strip _bhndb_io_resource_get_window
.subsections_via_symbols
| AnghaBench/freebsd/sys/dev/bhnd/bhndb/extr_bhndb.c_bhndb_io_resource_get_window.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _typec_altmode_unregister_driver ## -- Begin function typec_altmode_unregister_driver
.p2align 4, 0x90
_typec_altmode_unregister_driver: ## @typec_altmode_unregister_driver
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _driver_unregister ## TAILCALL
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _typec_altmode_unregister_driver ; -- Begin function typec_altmode_unregister_driver
.p2align 2
_typec_altmode_unregister_driver: ; @typec_altmode_unregister_driver
.cfi_startproc
; %bb.0:
b _driver_unregister
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/linux/drivers/usb/typec/extr_bus.c_typec_altmode_unregister_driver.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function r8153_set_rx_early_timeout
_r8153_set_rx_early_timeout: ## @r8153_set_rx_early_timeout
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %r14
movl (%rdi), %ecx
movl 4(%rdi), %eax
leal 7(%rcx), %ebx
testl %ecx, %ecx
cmovnsl %ecx, %ebx
leal -130(%rax), %ecx
cmpl $4, %ecx
jae LBB0_2
## %bb.1:
movq _USB_RX_EARLY_TIMEOUT@GOTPCREL(%rip), %rax
jmp LBB0_4
LBB0_2:
addl $-128, %eax
cmpl $1, %eax
ja LBB0_5
## %bb.3:
movq _MCU_TYPE_USB@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _USB_RX_EARLY_TIMEOUT@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq %r14, %rdi
movl $16, %ecx
callq _ocp_write_word
movq _USB_RX_EXTRA_AGGR_TMR@GOTPCREL(%rip), %rax
LBB0_4:
sarl $3, %ebx
movq _MCU_TYPE_USB@GOTPCREL(%rip), %rcx
movl (%rcx), %esi
movl (%rax), %edx
movq %r14, %rdi
movl %ebx, %ecx
popq %rbx
popq %r14
popq %rbp
jmp _ocp_write_word ## TAILCALL
LBB0_5:
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _MCU_TYPE_USB,4,2 ## @MCU_TYPE_USB
.comm _USB_RX_EARLY_TIMEOUT,4,2 ## @USB_RX_EARLY_TIMEOUT
.comm _USB_RX_EXTRA_AGGR_TMR,4,2 ## @USB_RX_EXTRA_AGGR_TMR
.no_dead_strip _r8153_set_rx_early_timeout
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function r8153_set_rx_early_timeout
_r8153_set_rx_early_timeout: ; @r8153_set_rx_early_timeout
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
ldp w8, w9, [x0]
add w10, w8, #7
cmp w8, #0
csel w20, w10, w8, lt
Lloh0:
adrp x8, _USB_RX_EARLY_TIMEOUT@GOTPAGE
Lloh1:
ldr x8, [x8, _USB_RX_EARLY_TIMEOUT@GOTPAGEOFF]
sub w10, w9, #130
Lloh2:
adrp x21, _MCU_TYPE_USB@GOTPAGE
Lloh3:
ldr x21, [x21, _MCU_TYPE_USB@GOTPAGEOFF]
cmp w10, #4
b.lo LBB0_3
; %bb.1:
sub w9, w9, #128
cmp w9, #1
b.hi LBB0_4
; %bb.2:
ldr w1, [x21]
ldr w2, [x8]
mov x0, x19
mov w3, #16
bl _ocp_write_word
Lloh4:
adrp x8, _USB_RX_EXTRA_AGGR_TMR@GOTPAGE
Lloh5:
ldr x8, [x8, _USB_RX_EXTRA_AGGR_TMR@GOTPAGEOFF]
LBB0_3:
asr w3, w20, #3
ldr w1, [x21]
ldr w2, [x8]
mov x0, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _ocp_write_word
LBB0_4:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGot Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _MCU_TYPE_USB,4,2 ; @MCU_TYPE_USB
.comm _USB_RX_EARLY_TIMEOUT,4,2 ; @USB_RX_EARLY_TIMEOUT
.comm _USB_RX_EXTRA_AGGR_TMR,4,2 ; @USB_RX_EXTRA_AGGR_TMR
.no_dead_strip _r8153_set_rx_early_timeout
.subsections_via_symbols
| AnghaBench/linux/drivers/net/usb/extr_r8152.c_r8153_set_rx_early_timeout.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ocrdma_get_mbx_errno
_ocrdma_get_mbx_errno: ## @ocrdma_get_mbx_errno
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _OCRDMA_MBX_RSP_STATUS_MASK@GOTPCREL(%rip), %rax
movl (%rax), %eax
andl %edi, %eax
movq _OCRDMA_MBX_RSP_STATUS_SHIFT@GOTPCREL(%rip), %rcx
movb (%rcx), %cl
sarl %cl, %eax
addl $-128, %eax
cmpl $35, %eax
ja LBB0_7
## %bb.1:
movq _EAGAIN@GOTPCREL(%rip), %rcx
leaq LJTI0_0(%rip), %rdx
movslq (%rdx,%rax,4), %rax
addq %rdx, %rax
jmpq *%rax
LBB0_2:
movq _EINVAL@GOTPCREL(%rip), %rcx
jmp LBB0_8
LBB0_4:
movq _ENOBUFS@GOTPCREL(%rip), %rcx
jmp LBB0_8
LBB0_3:
movq _EBUSY@GOTPCREL(%rip), %rcx
jmp LBB0_8
LBB0_5:
movq _OCRDMA_MBX_RSP_ASTATUS_MASK@GOTPCREL(%rip), %rax
andl (%rax), %edi
movq _OCRDMA_MBX_RSP_ASTATUS_SHIFT@GOTPCREL(%rip), %rax
movb (%rax), %cl
sarl %cl, %edi
cmpl $164, %edi
jne LBB0_7
## %bb.6:
movq _EAGAIN@GOTPCREL(%rip), %rcx
jmp LBB0_8
LBB0_7:
movq _EFAULT@GOTPCREL(%rip), %rcx
LBB0_8:
xorl %eax, %eax
subl (%rcx), %eax
popq %rbp
retq
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L0_0_set_2, LBB0_2-LJTI0_0
.set L0_0_set_4, LBB0_4-LJTI0_0
.set L0_0_set_3, LBB0_3-LJTI0_0
.set L0_0_set_8, LBB0_8-LJTI0_0
.set L0_0_set_5, LBB0_5-LJTI0_0
LJTI0_0:
.long L0_0_set_2
.long L0_0_set_4
.long L0_0_set_2
.long L0_0_set_4
.long L0_0_set_4
.long L0_0_set_4
.long L0_0_set_4
.long L0_0_set_4
.long L0_0_set_4
.long L0_0_set_3
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_3
.long L0_0_set_4
.long L0_0_set_8
.long L0_0_set_3
.long L0_0_set_3
.long L0_0_set_2
.long L0_0_set_8
.long L0_0_set_4
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_2
.long L0_0_set_5
.long L0_0_set_2
.end_data_region
## -- End function
.comm _OCRDMA_MBX_RSP_STATUS_MASK,4,2 ## @OCRDMA_MBX_RSP_STATUS_MASK
.comm _OCRDMA_MBX_RSP_STATUS_SHIFT,4,2 ## @OCRDMA_MBX_RSP_STATUS_SHIFT
.comm _OCRDMA_MBX_RSP_ASTATUS_MASK,4,2 ## @OCRDMA_MBX_RSP_ASTATUS_MASK
.comm _OCRDMA_MBX_RSP_ASTATUS_SHIFT,4,2 ## @OCRDMA_MBX_RSP_ASTATUS_SHIFT
.comm _EAGAIN,4,2 ## @EAGAIN
.comm _EINVAL,4,2 ## @EINVAL
.comm _EBUSY,4,2 ## @EBUSY
.comm _ENOBUFS,4,2 ## @ENOBUFS
.comm _EFAULT,4,2 ## @EFAULT
.no_dead_strip _ocrdma_get_mbx_errno
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ocrdma_get_mbx_errno
_ocrdma_get_mbx_errno: ; @ocrdma_get_mbx_errno
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _OCRDMA_MBX_RSP_STATUS_MASK@GOTPAGE
Lloh1:
ldr x8, [x8, _OCRDMA_MBX_RSP_STATUS_MASK@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
and w8, w8, w0
Lloh3:
adrp x9, _OCRDMA_MBX_RSP_STATUS_SHIFT@GOTPAGE
Lloh4:
ldr x9, [x9, _OCRDMA_MBX_RSP_STATUS_SHIFT@GOTPAGEOFF]
Lloh5:
ldr w9, [x9]
asr w8, w8, w9
sub w9, w8, #128
cmp w9, #35
b.hi LBB0_5
; %bb.1:
Lloh6:
adrp x8, _EAGAIN@GOTPAGE
Lloh7:
ldr x8, [x8, _EAGAIN@GOTPAGEOFF]
Lloh8:
adrp x10, lJTI0_0@PAGE
Lloh9:
add x10, x10, lJTI0_0@PAGEOFF
adr x11, LBB0_2
ldrb w12, [x10, x9]
add x11, x11, x12, lsl #2
br x11
LBB0_2:
Lloh10:
adrp x8, _EINVAL@GOTPAGE
Lloh11:
ldr x8, [x8, _EINVAL@GOTPAGEOFF]
b LBB0_7
LBB0_3:
Lloh12:
adrp x8, _ENOBUFS@GOTPAGE
Lloh13:
ldr x8, [x8, _ENOBUFS@GOTPAGEOFF]
b LBB0_7
LBB0_4:
Lloh14:
adrp x8, _EBUSY@GOTPAGE
Lloh15:
ldr x8, [x8, _EBUSY@GOTPAGEOFF]
b LBB0_7
LBB0_5:
Lloh16:
adrp x8, _EFAULT@GOTPAGE
Lloh17:
ldr x8, [x8, _EFAULT@GOTPAGEOFF]
b LBB0_7
LBB0_6:
Lloh18:
adrp x9, _OCRDMA_MBX_RSP_ASTATUS_MASK@GOTPAGE
Lloh19:
ldr x9, [x9, _OCRDMA_MBX_RSP_ASTATUS_MASK@GOTPAGEOFF]
Lloh20:
ldr w9, [x9]
and w9, w9, w0
Lloh21:
adrp x10, _OCRDMA_MBX_RSP_ASTATUS_SHIFT@GOTPAGE
Lloh22:
ldr x10, [x10, _OCRDMA_MBX_RSP_ASTATUS_SHIFT@GOTPAGEOFF]
Lloh23:
ldr w10, [x10]
asr w9, w9, w10
Lloh24:
adrp x10, _EFAULT@GOTPAGE
Lloh25:
ldr x10, [x10, _EFAULT@GOTPAGEOFF]
cmp w9, #164
csel x8, x8, x10, eq
LBB0_7:
ldr w8, [x8]
neg w0, w8
ret
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpLdrGot Lloh6, Lloh7
.loh AdrpLdrGot Lloh10, Lloh11
.loh AdrpLdrGot Lloh12, Lloh13
.loh AdrpLdrGot Lloh14, Lloh15
.loh AdrpLdrGot Lloh16, Lloh17
.loh AdrpLdrGot Lloh24, Lloh25
.loh AdrpLdrGotLdr Lloh21, Lloh22, Lloh23
.loh AdrpLdrGotLdr Lloh18, Lloh19, Lloh20
.cfi_endproc
.section __TEXT,__const
lJTI0_0:
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_3-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_3-LBB0_2)>>2
.byte (LBB0_3-LBB0_2)>>2
.byte (LBB0_3-LBB0_2)>>2
.byte (LBB0_3-LBB0_2)>>2
.byte (LBB0_3-LBB0_2)>>2
.byte (LBB0_3-LBB0_2)>>2
.byte (LBB0_4-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_4-LBB0_2)>>2
.byte (LBB0_3-LBB0_2)>>2
.byte (LBB0_7-LBB0_2)>>2
.byte (LBB0_4-LBB0_2)>>2
.byte (LBB0_4-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_7-LBB0_2)>>2
.byte (LBB0_3-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
.byte (LBB0_6-LBB0_2)>>2
.byte (LBB0_2-LBB0_2)>>2
; -- End function
.comm _OCRDMA_MBX_RSP_STATUS_MASK,4,2 ; @OCRDMA_MBX_RSP_STATUS_MASK
.comm _OCRDMA_MBX_RSP_STATUS_SHIFT,4,2 ; @OCRDMA_MBX_RSP_STATUS_SHIFT
.comm _OCRDMA_MBX_RSP_ASTATUS_MASK,4,2 ; @OCRDMA_MBX_RSP_ASTATUS_MASK
.comm _OCRDMA_MBX_RSP_ASTATUS_SHIFT,4,2 ; @OCRDMA_MBX_RSP_ASTATUS_SHIFT
.comm _EAGAIN,4,2 ; @EAGAIN
.comm _EINVAL,4,2 ; @EINVAL
.comm _EBUSY,4,2 ; @EBUSY
.comm _ENOBUFS,4,2 ; @ENOBUFS
.comm _EFAULT,4,2 ; @EFAULT
.no_dead_strip _ocrdma_get_mbx_errno
.subsections_via_symbols
| AnghaBench/linux/drivers/infiniband/hw/ocrdma/extr_ocrdma_hw.c_ocrdma_get_mbx_errno.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _foo_jac_sq ## -- Begin function foo_jac_sq
.p2align 4, 0x90
_foo_jac_sq: ## @foo_jac_sq
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
mulsd %xmm0, %xmm0
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _foo_jac_fmin ## -- Begin function foo_jac_fmin
.p2align 4, 0x90
_foo_jac_fmin: ## @foo_jac_fmin
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movapd %xmm0, %xmm2
cmpunordsd %xmm0, %xmm2
movapd %xmm2, %xmm3
andpd %xmm1, %xmm3
minsd %xmm0, %xmm1
andnpd %xmm1, %xmm2
orpd %xmm3, %xmm2
movapd %xmm2, %xmm0
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal8,8byte_literals
.p2align 3 ## -- Begin function F
LCPI2_0:
.quad 0x3f83a8bec97cab4a ## double 0.0095991997946690728
LCPI2_1:
.quad 0x3fe0000000000000 ## double 0.5
LCPI2_2:
.quad 0xbfb184b6093c8354 ## double -0.068431260352111167
LCPI2_3:
.quad 0x40236dbd2283a883 ## double 9.7143336091724048
LCPI2_4:
.quad 0x4057d0f6e6dab318 ## double 95.265069688390554
LCPI2_5:
.quad 0x3fa5de574798101d ## double 0.042711951722623299
LCPI2_6:
.quad 0xbfc56b67771a6f72 ## double -0.16734021487797751
LCPI2_7:
.quad 0x3fb060eb5e7398d7 ## double 0.063978872832737607
LCPI2_8:
.quad 0xbfb060eb5e7398d7 ## double -0.063978872832737607
LCPI2_9:
.quad 0x401eb0746e6afdde ## double 7.6723191502382786
LCPI2_10:
.quad 0xc052cf580a7d5b90 ## double -75.239748594684215
LCPI2_11:
.quad 0x3fa8cf9a0171bde3 ## double 0.048458874393755534
LCPI2_12:
.quad 0xbfc7da66307d0934 ## double -0.18635251395796504
LCPI2_13:
.quad 0xbf727b65c5a72bc1 ## double -0.0045122123214679797
LCPI2_14:
.quad 0x3fd966eda631cb7a ## double 0.39690724592144699
LCPI2_15:
.quad 0xbfd966eda631cb7a ## double -0.39690724592144699
LCPI2_16:
.quad 0x40087719fcc5171b ## double 3.0581550357482121
LCPI2_17:
.quad 0xc03dfd816c2bef76 ## double -29.990256081320204
LCPI2_18:
.quad 0x3f82f2698a004954 ## double 0.0092514271964644693
LCPI2_19:
.quad 0x3fb51dc75e661920 ## double 0.082485638186061028
LCPI2_20:
.quad 0x3f8adde2745359a7 ## double 0.013118523764586621
LCPI2_21:
.quad 0x3c90000000000000 ## double 5.5511151231257827E-17
LCPI2_22:
.quad 0x3fb89f89713dc053 ## double 0.096184339663296911
LCPI2_23:
.quad 0x3f9d8c3e87e3b396 ## double 0.028855301898989071
LCPI2_24:
.quad 0xbfdb7801f38da86b ## double -0.42919968399531311
LCPI2_25:
.quad 0xbfa8047849d8f494 ## double -0.046909102453789903
LCPI2_26:
.quad 0x3fa4a8aaddbab8a8 ## double 0.040349330488753055
LCPI2_27:
.quad 0xbfa4a8aaddbab8a8 ## double -0.040349330488753055
LCPI2_28:
.quad 0x3ff07f43c1bfc3a0 ## double 1.0310704773257626
LCPI2_29:
.quad 0x402439028349a518 ## double 10.11134729646669
LCPI2_30:
.quad 0x3f70624dd2f1a9fc ## double 0.0040000000000000001
LCPI2_31:
.quad 0xbfa1eb851eb851ec ## double -0.035000000000000003
LCPI2_32:
.quad 0x3c80000000000000 ## double 2.7755575615628914E-17
LCPI2_33:
.quad 0x3c70000000000000 ## double 1.3877787807814457E-17
LCPI2_34:
.quad 0x3ee4f8b588e368f1 ## double 1.0000000000000001E-5
LCPI2_35:
.quad 0x3ff8000000000000 ## double 1.5
LCPI2_36:
.quad 0xc072c00000000000 ## double -300
LCPI2_37:
.quad 0x3fd5555555555555 ## double 0.33333333333333331
LCPI2_38:
.quad 0x4049000000000000 ## double 50
LCPI2_39:
.quad 0x410587fb5dabb204 ## double 176383.42073763919
LCPI2_40:
.quad 0xc008000000000000 ## double -3
LCPI2_41:
.quad 0x3ff0000000000000 ## double 1
LCPI2_42:
.quad 0x3fc999999999999a ## double 0.20000000000000001
LCPI2_43:
.quad 0x3fe999999999999a ## double 0.80000000000000004
LCPI2_44:
.quad 0xbc70000000000000 ## double -1.3877787807814457E-17
LCPI2_45:
.quad 0xbf4b020c9c63afe9 ## double -8.24218900360216E-4
LCPI2_46:
.quad 0x3f292c73a5bdb29c ## double 1.9205962859860238E-4
LCPI2_47:
.quad 0xbf525b145384fce8 ## double -0.0011203478334918474
LCPI2_48:
.quad 0x3f4630437bcf9f9e ## double 6.7714018916563748E-4
LCPI2_49:
.quad 0xbf2976f43e97efde ## double -1.9427997112575387E-4
LCPI2_50:
.quad 0xbf425b145384fce8 ## double -5.6017391674592371E-4
LCPI2_51:
.quad 0x3f1976f43e97efde ## double 9.7139985562876935E-5
LCPI2_52:
.quad 0x3fc6035caa5b8ec0 ## double 0.17197759931797485
LCPI2_53:
.quad 0x3f5f8486f7594818 ## double 0.0019236867932659382
LCPI2_54:
.quad 0x3f51050133a5f8da ## double 0.0010387908683636067
LCPI2_55:
.quad 0xbf9eb851eb851eb8 ## double -0.029999999999999999
LCPI2_56:
.quad 0x3fa999999999999a ## double 0.050000000000000003
LCPI2_57:
.quad 0xbf9999999999999a ## double -0.025000000000000001
LCPI2_58:
.quad 0x3f425b145384fce8 ## double 5.6017391674592371E-4
LCPI2_59:
.quad 0x3f59b08a494c0d79 ## double 0.0015679693653403977
LCPI2_60:
.quad 0x3fa10a0c2a85de2d ## double 0.03327978152350073
LCPI2_61:
.quad 0x3f77a3653982f612 ## double 0.0057710603797978145
LCPI2_62:
.quad 0x3f913c79cf44d36d ## double 0.016832259441076958
LCPI2_63:
.quad 0x3fc6de755b39d616 ## double 0.17866389231100815
LCPI2_64:
.quad 0xbf2347248250412c ## double -1.4707871119457852E-4
LCPI2_65:
.quad 0xbffc0893fd832bcd ## double -1.7520942595317479
LCPI2_66:
.quad 0x4103ef1a878c497b ## double 163299.31618554503
LCPI2_67:
.quad 0x4008000000000000 ## double 3
LCPI2_68:
.quad 0x410232799687291b ## double 149071.19849998583
LCPI2_69:
.quad 0xc02439028349a518 ## double -10.11134729646669
LCPI2_70:
.quad 0x3fe9e2929391772a ## double 0.80890778371733529
LCPI2_71:
.quad 0x403dfd816c2bef76 ## double 29.990256081320204
LCPI2_72:
.quad 0x4052cf580a7d5b90 ## double 75.239748594684215
LCPI2_73:
.quad 0xbc80000000000000 ## double -2.7755575615628914E-17
LCPI2_74:
.quad 0x3f525b145384fce8 ## double 0.0011203478334918474
LCPI2_75:
.quad 0x3f2976f43e97efde ## double 1.9427997112575387E-4
LCPI2_76:
.quad 0xbf1976f43e97efde ## double -9.7139985562876935E-5
LCPI2_77:
.quad 0xbfb8f3b341a8bf66 ## double -0.097468570261069226
LCPI2_78:
.quad 0x3fb431c983f2d665 ## double 0.078884691919336072
LCPI2_79:
.quad 0xbf9d9ef12e009beb ## double -0.028926628525507352
LCPI2_80:
.quad 0x3fd3bf4b740067f2 ## double 0.30855070427207842
LCPI2_81:
.quad 0x403c3d82dbf5bb0f ## double 28.240278003208967
LCPI2_82:
.quad 0x40714f14923e1140 ## double 276.94252228016921
LCPI2_83:
.quad 0xbf913c79cf44d36d ## double -0.016832259441076958
LCPI2_84:
.quad 0x3fc144c43f671202 ## double 0.13491108985739692
.section __TEXT,__literal16,16byte_literals
.p2align 4
LCPI2_85:
.quad 0x8000000000000000 ## double -0
.quad 0x8000000000000000 ## double -0
.section __TEXT,__text,regular,pure_instructions
.globl _F
.p2align 4, 0x90
_F: ## @F
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $1120, %rsp ## imm = 0x460
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %r14
movq (%rdi), %rbx
testq %rbx, %rbx
je LBB2_1
## %bb.2:
movsd (%rbx), %xmm0 ## xmm0 = mem[0],zero
movsd 176(%rbx), %xmm1 ## xmm1 = mem[0],zero
movsd LCPI2_0(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd %xmm1, -512(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm2
movsd %xmm2, -120(%rbp) ## 8-byte Spill
jmp LBB2_3
LBB2_1:
xorpd %xmm0, %xmm0
movsd %xmm0, -120(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
movsd %xmm0, -512(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
LBB2_3:
mulsd LCPI2_1(%rip), %xmm0
callq ___sincos_stret
movapd %xmm1, %xmm4
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm4
mulsd %xmm0, %xmm0
subsd %xmm0, %xmm1
movsd LCPI2_2(%rip), %xmm3 ## xmm3 = mem[0],zero
movsd %xmm1, -440(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm3
testq %rbx, %rbx
movsd %xmm3, -1104(%rbp) ## 8-byte Spill
je LBB2_4
## %bb.5:
movsd 192(%rbx), %xmm0 ## xmm0 = mem[0],zero
movapd %xmm3, %xmm1
movsd %xmm0, -848(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm1
addsd %xmm4, %xmm4
movsd %xmm4, -136(%rbp) ## 8-byte Spill
movsd LCPI2_2(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm4, %xmm2
movsd 48(%rbx), %xmm0 ## xmm0 = mem[0],zero
movsd 184(%rbx), %xmm4 ## xmm4 = mem[0],zero
movsd %xmm2, -1008(%rbp) ## 8-byte Spill
movsd %xmm4, -840(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm1
movsd -120(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm1, %xmm2
mulsd LCPI2_3(%rip), %xmm2
movsd LCPI2_4(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm3, %xmm1
addsd %xmm2, %xmm1
movsd %xmm1, -832(%rbp) ## 8-byte Spill
movsd 200(%rbx), %xmm1 ## xmm1 = mem[0],zero
movsd %xmm1, -160(%rbp) ## 8-byte Spill
jmp LBB2_6
LBB2_4:
xorpd %xmm5, %xmm5
movapd %xmm3, %xmm0
mulsd %xmm5, %xmm0
addsd %xmm4, %xmm4
movsd %xmm4, -136(%rbp) ## 8-byte Spill
movsd LCPI2_2(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
movsd %xmm1, -1008(%rbp) ## 8-byte Spill
xorpd %xmm2, %xmm2
movsd %xmm2, -160(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm0
movsd -120(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm0, %xmm1
mulsd LCPI2_3(%rip), %xmm1
movsd LCPI2_4(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm3, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -832(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
movsd %xmm0, -848(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
movsd %xmm0, -840(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
LBB2_6:
callq ___sincos_stret
movsd -440(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm0, %xmm4
movsd -136(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm5
mulsd %xmm1, %xmm5
xorpd %xmm6, %xmm6
xorpd %xmm2, %xmm2
movsd %xmm2, -120(%rbp) ## 8-byte Spill
xorpd %xmm2, %xmm2
testq %rbx, %rbx
je LBB2_8
## %bb.7:
movsd 8(%rbx), %xmm2 ## xmm2 = mem[0],zero
movsd %xmm2, -120(%rbp) ## 8-byte Spill
movsd 24(%rbx), %xmm2 ## xmm2 = mem[0],zero
LBB2_8:
movsd %xmm2, -504(%rbp) ## 8-byte Spill
addsd %xmm5, %xmm4
movsd %xmm4, -232(%rbp) ## 8-byte Spill
mulsd -440(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm1
movsd %xmm1, -296(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
testq %rbx, %rbx
je LBB2_10
## %bb.9:
movsd 80(%rbx), %xmm0 ## xmm0 = mem[0],zero
movsd 216(%rbx), %xmm6 ## xmm6 = mem[0],zero
LBB2_10:
movsd %xmm6, -88(%rbp) ## 8-byte Spill
callq ___sincos_stret
movsd -296(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm3
mulsd %xmm0, %xmm3
movsd -232(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm7
mulsd %xmm1, %xmm7
xorpd %xmm2, %xmm2
xorpd %xmm6, %xmm6
testq %rbx, %rbx
je LBB2_12
## %bb.11:
movsd 56(%rbx), %xmm6 ## xmm6 = mem[0],zero
LBB2_12:
addsd %xmm7, %xmm3
movsd %xmm3, -608(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm1
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm1
movsd %xmm1, -288(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
testq %rbx, %rbx
movsd -136(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
je LBB2_14
## %bb.13:
movsd 112(%rbx), %xmm0 ## xmm0 = mem[0],zero
movsd 232(%rbx), %xmm2 ## xmm2 = mem[0],zero
LBB2_14:
movsd %xmm2, -328(%rbp) ## 8-byte Spill
movsd LCPI2_7(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm3, %xmm1
movsd %xmm1, -712(%rbp) ## 8-byte Spill
addsd -120(%rbp), %xmm6 ## 8-byte Folded Reload
movsd %xmm6, -168(%rbp) ## 8-byte Spill
movsd LCPI2_13(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm5, %xmm1
movsd %xmm1, -24(%rbp) ## 8-byte Spill
movsd LCPI2_14(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
movsd %xmm1, -344(%rbp) ## 8-byte Spill
callq ___sincos_stret
movsd -288(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm15
mulsd %xmm1, %xmm15
movsd -608(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd %xmm9, %xmm2
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm15
mulsd %xmm3, %xmm0
mulsd %xmm9, %xmm1
addsd %xmm0, %xmm1
movsd LCPI2_21(%rip), %xmm5 ## xmm5 = mem[0],zero
xorpd %xmm4, %xmm4
xorpd %xmm8, %xmm8
testq %rbx, %rbx
je LBB2_16
## %bb.15:
movsd 88(%rbx), %xmm8 ## xmm8 = mem[0],zero
LBB2_16:
movsd -712(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
addsd -1104(%rbp), %xmm11 ## 8-byte Folded Reload
movsd -344(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
addsd -24(%rbp), %xmm12 ## 8-byte Folded Reload
movapd %xmm1, %xmm3
mulsd %xmm5, %xmm3
movsd -168(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
addsd %xmm7, %xmm8
movsd LCPI2_25(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm15, %xmm2
movsd LCPI2_26(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm1, %xmm13
testq %rbx, %rbx
je LBB2_18
## %bb.17:
movsd 120(%rbx), %xmm4 ## xmm4 = mem[0],zero
LBB2_18:
movsd LCPI2_8(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd -440(%rbp), %xmm14 ## 8-byte Folded Reload
movsd LCPI2_13(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd -232(%rbp), %xmm10 ## 8-byte Folded Reload
movsd LCPI2_15(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd -296(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm11, %xmm0
mulsd -120(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -376(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm0
mulsd %xmm7, %xmm0
movsd %xmm0, -384(%rbp) ## 8-byte Spill
movsd LCPI2_24(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm9, %xmm0
movapd %xmm15, %xmm9
mulsd %xmm5, %xmm9
addsd %xmm2, %xmm13
movapd %xmm15, %xmm2
movsd %xmm3, -32(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm2
movsd %xmm2, -80(%rbp) ## 8-byte Spill
addsd %xmm8, %xmm4
movsd %xmm4, -24(%rbp) ## 8-byte Spill
movsd LCPI2_25(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm1, %xmm5
movsd LCPI2_27(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm15, %xmm2
xorpd %xmm7, %xmm7
xorpd %xmm4, %xmm4
testq %rbx, %rbx
je LBB2_20
## %bb.19:
movsd 40(%rbx), %xmm4 ## xmm4 = mem[0],zero
LBB2_20:
movsd %xmm11, -712(%rbp) ## 8-byte Spill
movsd %xmm12, -344(%rbp) ## 8-byte Spill
addsd -1008(%rbp), %xmm14 ## 8-byte Folded Reload
addsd %xmm6, %xmm10
movsd LCPI2_24(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd -288(%rbp), %xmm6 ## 8-byte Folded Reload
addsd %xmm2, %xmm5
movapd %xmm1, %xmm3
addsd %xmm9, %xmm3
subsd %xmm1, %xmm9
movsd -32(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
addsd %xmm15, %xmm11
addsd -376(%rbp), %xmm4 ## 8-byte Folded Reload
movsd -384(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd %xmm4, -104(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm2
movsd %xmm0, -784(%rbp) ## 8-byte Spill
movsd %xmm8, -400(%rbp) ## 8-byte Spill
mulsd %xmm8, %xmm0
movsd %xmm2, -216(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm2
movsd %xmm2, -368(%rbp) ## 8-byte Spill
movsd %xmm13, -224(%rbp) ## 8-byte Spill
mulsd -24(%rbp), %xmm13 ## 8-byte Folded Reload
movsd %xmm13, -464(%rbp) ## 8-byte Spill
movsd LCPI2_30(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -80(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movapd %xmm2, %xmm4
mulsd %xmm0, %xmm4
testq %rbx, %rbx
je LBB2_22
## %bb.21:
movsd 32(%rbx), %xmm7 ## xmm7 = mem[0],zero
LBB2_22:
movsd %xmm14, -1040(%rbp) ## 8-byte Spill
addsd %xmm14, %xmm7
movsd %xmm10, -408(%rbp) ## 8-byte Spill
movsd %xmm7, -144(%rbp) ## 8-byte Spill
addsd %xmm7, %xmm10
movsd %xmm6, -128(%rbp) ## 8-byte Spill
addsd %xmm6, %xmm10
movsd %xmm5, -496(%rbp) ## 8-byte Spill
addsd %xmm5, %xmm10
movsd LCPI2_30(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm3, %xmm5
addsd %xmm10, %xmm5
addsd LCPI2_31(%rip), %xmm5
movapd %xmm5, %xmm2
mulsd LCPI2_1(%rip), %xmm2
movapd %xmm5, %xmm7
movapd %xmm5, %xmm8
movsd %xmm5, -208(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm7
movsd %xmm10, -56(%rbp) ## 8-byte Spill
subsd %xmm10, %xmm7
movsd %xmm3, -392(%rbp) ## 8-byte Spill
movapd %xmm3, %xmm2
mulsd %xmm7, %xmm2
addsd %xmm2, %xmm4
movsd -80(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd %xmm4, -312(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm2
movapd %xmm9, %xmm3
mulsd %xmm0, %xmm3
movsd %xmm11, -32(%rbp) ## 8-byte Spill
movapd %xmm11, %xmm4
mulsd %xmm7, %xmm4
addsd %xmm3, %xmm4
movsd %xmm9, -600(%rbp) ## 8-byte Spill
movapd %xmm9, %xmm3
movsd %xmm4, -304(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm3
addsd %xmm2, %xmm3
movsd LCPI2_32(%rip), %xmm5 ## xmm5 = mem[0],zero
movapd %xmm15, %xmm2
mulsd %xmm5, %xmm2
movsd LCPI2_33(%rip), %xmm4 ## xmm4 = mem[0],zero
movapd %xmm1, %xmm6
mulsd %xmm4, %xmm6
addsd %xmm2, %xmm6
mulsd %xmm6, %xmm0
mulsd %xmm5, %xmm1
mulsd %xmm4, %xmm15
subsd %xmm15, %xmm1
movsd %xmm1, -552(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm7
addsd %xmm0, %xmm7
movsd %xmm6, -896(%rbp) ## 8-byte Spill
movsd %xmm7, -272(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm6
addsd %xmm3, %xmm6
movsd %xmm6, -592(%rbp) ## 8-byte Spill
movapd %xmm8, %xmm0
mulsd %xmm8, %xmm0
addsd LCPI2_34(%rip), %xmm0
sqrtsd %xmm0, %xmm0
movsd LCPI2_35(%rip), %xmm1 ## xmm1 = mem[0],zero
callq _pow
movsd %xmm0, -760(%rbp) ## 8-byte Spill
movsd -464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -368(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -24(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -592(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm0, -96(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm1
movsd %xmm1, -456(%rbp) ## 8-byte Spill
movsd -208(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_36(%rip), %xmm0
callq _tanh
movsd %xmm0, -992(%rbp) ## 8-byte Spill
movsd LCPI2_37(%rip), %xmm0 ## xmm0 = mem[0],zero
subsd -456(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI2_38(%rip), %xmm0
callq _tanh
movsd %xmm0, -648(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
movsd %xmm0, -424(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
testq %rbx, %rbx
je LBB2_24
## %bb.23:
movsd 144(%rbx), %xmm0 ## xmm0 = mem[0],zero
LBB2_24:
callq ___sincos_stret
movapd %xmm1, %xmm4
movsd LCPI2_32(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
movsd LCPI2_44(%rip), %xmm5 ## xmm5 = mem[0],zero
movapd %xmm0, %xmm3
mulsd %xmm5, %xmm3
addsd %xmm4, %xmm3
movsd %xmm3, -208(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm5
movapd %xmm0, %xmm3
mulsd %xmm2, %xmm3
subsd %xmm3, %xmm5
movsd %xmm5, -176(%rbp) ## 8-byte Spill
movsd -80(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm4
mulsd %xmm1, %xmm4
movsd -600(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm2
mulsd %xmm0, %xmm2
addsd %xmm4, %xmm2
movsd %xmm2, -544(%rbp) ## 8-byte Spill
movapd %xmm5, %xmm4
mulsd %xmm1, %xmm4
movapd %xmm3, %xmm2
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm4
movsd %xmm4, -72(%rbp) ## 8-byte Spill
movsd -32(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm3
mulsd %xmm1, %xmm3
movsd -392(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm2
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm3
movsd %xmm3, -736(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm1
mulsd %xmm4, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -40(%rbp) ## 8-byte Spill
xorpd %xmm2, %xmm2
testq %rbx, %rbx
je LBB2_26
## %bb.25:
movsd 152(%rbx), %xmm2 ## xmm2 = mem[0],zero
movsd 248(%rbx), %xmm0 ## xmm0 = mem[0],zero
movsd %xmm0, -424(%rbp) ## 8-byte Spill
LBB2_26:
movsd LCPI2_52(%rip), %xmm8 ## xmm8 = mem[0],zero
movsd -80(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm8, %xmm7
movsd LCPI2_53(%rip), %xmm3 ## xmm3 = mem[0],zero
movapd %xmm5, %xmm0
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm7
movsd LCPI2_54(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd -896(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm7
mulsd %xmm8, %xmm6
movapd %xmm4, %xmm0
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm6
movsd -552(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm6
movsd %xmm7, -776(%rbp) ## 8-byte Spill
movsd -24(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm7
movsd %xmm7, -584(%rbp) ## 8-byte Spill
movapd %xmm5, %xmm1
mulsd %xmm2, %xmm1
movsd %xmm1, -336(%rbp) ## 8-byte Spill
movsd %xmm2, -560(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm0
movsd %xmm0, -432(%rbp) ## 8-byte Spill
movsd %xmm6, -488(%rbp) ## 8-byte Spill
movsd -56(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm6, %xmm2
movsd %xmm2, -56(%rbp) ## 8-byte Spill
movsd LCPI2_55(%rip), %xmm4 ## xmm4 = mem[0],zero
addsd %xmm2, %xmm4
movapd %xmm4, %xmm0
movsd LCPI2_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm4, %xmm6
movsd %xmm4, -48(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm6
subsd %xmm2, %xmm6
movapd %xmm6, %xmm1
mulsd -40(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -192(%rbp) ## 8-byte Spill
movsd -544(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd %xmm6, %xmm3
movsd %xmm3, -696(%rbp) ## 8-byte Spill
mulsd -736(%rbp), %xmm6 ## 8-byte Folded Reload
movsd %xmm6, -200(%rbp) ## 8-byte Spill
movsd -72(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm6, %xmm1
addsd %xmm0, %xmm1
mulsd %xmm3, %xmm5
addsd %xmm1, %xmm5
movsd %xmm5, -888(%rbp) ## 8-byte Spill
movapd %xmm4, %xmm0
mulsd %xmm4, %xmm0
movsd LCPI2_34(%rip), %xmm1 ## xmm1 = mem[0],zero
addsd %xmm1, %xmm0
sqrtsd %xmm0, %xmm0
movsd LCPI2_35(%rip), %xmm1 ## xmm1 = mem[0],zero
callq _pow
movsd %xmm0, -912(%rbp) ## 8-byte Spill
movsd -96(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -584(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm2
movsd %xmm0, -96(%rbp) ## 8-byte Spill
movsd -888(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -432(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -208(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -192(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -176(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -200(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
addsd -696(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -1096(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm0
mulsd -336(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
addsd %xmm2, %xmm3
movsd %xmm3, -248(%rbp) ## 8-byte Spill
movsd -48(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_36(%rip), %xmm0
callq _tanh
movsd %xmm0, -752(%rbp) ## 8-byte Spill
movsd LCPI2_37(%rip), %xmm0 ## xmm0 = mem[0],zero
subsd -248(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI2_38(%rip), %xmm0
callq _tanh
movsd %xmm0, -640(%rbp) ## 8-byte Spill
movsd LCPI2_56(%rip), %xmm3 ## xmm3 = mem[0],zero
movsd -544(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm0
mulsd %xmm3, %xmm0
movapd %xmm2, %xmm1
movapd %xmm2, %xmm8
mulsd %xmm0, %xmm1
movsd -40(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm6
mulsd %xmm3, %xmm6
movsd -56(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
addsd %xmm4, %xmm6
addsd LCPI2_57(%rip), %xmm6
movapd %xmm6, %xmm2
mulsd LCPI2_1(%rip), %xmm2
movapd %xmm6, %xmm7
movapd %xmm6, %xmm9
movsd %xmm6, -48(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm7
subsd %xmm4, %xmm7
movapd %xmm5, %xmm2
mulsd %xmm7, %xmm2
addsd %xmm1, %xmm2
movsd -208(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm1
mulsd %xmm3, %xmm1
mulsd %xmm1, %xmm4
addsd %xmm2, %xmm4
movsd %xmm4, -184(%rbp) ## 8-byte Spill
movapd %xmm8, %xmm2
mulsd %xmm4, %xmm2
movsd -72(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm3
mulsd %xmm0, %xmm3
movsd -736(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm7, %xmm4
addsd %xmm3, %xmm4
movsd -176(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm1, %xmm6
addsd %xmm4, %xmm6
movsd %xmm6, -240(%rbp) ## 8-byte Spill
movapd %xmm5, %xmm3
mulsd %xmm6, %xmm3
addsd %xmm2, %xmm3
movsd -896(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
mulsd -552(%rbp), %xmm7 ## 8-byte Folded Reload
addsd %xmm0, %xmm7
addsd %xmm1, %xmm7
movsd %xmm7, -688(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm2
addsd %xmm3, %xmm2
movsd %xmm2, -880(%rbp) ## 8-byte Spill
movapd %xmm9, %xmm0
mulsd %xmm9, %xmm0
addsd LCPI2_34(%rip), %xmm0
sqrtsd %xmm0, %xmm0
movsd LCPI2_35(%rip), %xmm1 ## xmm1 = mem[0],zero
callq _pow
movsd %xmm0, -744(%rbp) ## 8-byte Spill
movsd -880(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -432(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -208(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -184(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -176(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -240(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
addsd -688(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -1088(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm0
mulsd -336(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
addsd -96(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, -576(%rbp) ## 8-byte Spill
movsd -48(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_36(%rip), %xmm0
callq _tanh
movsd %xmm0, -984(%rbp) ## 8-byte Spill
movsd LCPI2_37(%rip), %xmm0 ## xmm0 = mem[0],zero
subsd -576(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI2_38(%rip), %xmm0
callq _tanh
movsd %xmm0, -632(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
movsd %xmm0, -96(%rbp) ## 8-byte Spill
xorpd %xmm1, %xmm1
xorpd %xmm0, %xmm0
testq %rbx, %rbx
je LBB2_28
## %bb.27:
movsd 64(%rbx), %xmm0 ## xmm0 = mem[0],zero
movsd 208(%rbx), %xmm1 ## xmm1 = mem[0],zero
LBB2_28:
movsd %xmm1, -616(%rbp) ## 8-byte Spill
callq ___sincos_stret
movsd -440(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm4
mulsd %xmm0, %xmm4
movsd -136(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm5
mulsd %xmm1, %xmm5
addsd %xmm4, %xmm5
movsd %xmm5, -680(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm1
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm1
movsd %xmm1, -416(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
testq %rbx, %rbx
je LBB2_30
## %bb.29:
movsd 96(%rbx), %xmm0 ## xmm0 = mem[0],zero
movsd 224(%rbx), %xmm1 ## xmm1 = mem[0],zero
movsd %xmm1, -96(%rbp) ## 8-byte Spill
LBB2_30:
callq ___sincos_stret
movsd -416(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm4
mulsd %xmm0, %xmm4
movsd -680(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm2
mulsd %xmm1, %xmm2
xorpd %xmm8, %xmm8
xorpd %xmm7, %xmm7
testq %rbx, %rbx
je LBB2_32
## %bb.31:
movsd 72(%rbx), %xmm7 ## xmm7 = mem[0],zero
LBB2_32:
movsd LCPI2_13(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm5, %xmm9
movsd LCPI2_14(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm6, %xmm3
addsd %xmm2, %xmm4
movsd %xmm4, -664(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm1
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm1
movsd %xmm1, -672(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
testq %rbx, %rbx
je LBB2_34
## %bb.33:
movsd 128(%rbx), %xmm0 ## xmm0 = mem[0],zero
movsd 240(%rbx), %xmm8 ## xmm8 = mem[0],zero
LBB2_34:
movsd %xmm8, -320(%rbp) ## 8-byte Spill
movsd LCPI2_13(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm6, %xmm2
movsd %xmm2, -704(%rbp) ## 8-byte Spill
movsd LCPI2_15(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm5, %xmm1
movsd %xmm1, -56(%rbp) ## 8-byte Spill
addsd -120(%rbp), %xmm7 ## 8-byte Folded Reload
movsd %xmm7, -656(%rbp) ## 8-byte Spill
addsd %xmm9, %xmm3
movsd %xmm3, -1032(%rbp) ## 8-byte Spill
callq ___sincos_stret
movsd -672(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm5
mulsd %xmm1, %xmm5
movsd -664(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm5
mulsd %xmm3, %xmm0
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, %xmm13
movsd LCPI2_21(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm13
mulsd %xmm5, %xmm6
xorpd %xmm10, %xmm10
xorpd %xmm11, %xmm11
testq %rbx, %rbx
je LBB2_36
## %bb.35:
movsd 104(%rbx), %xmm11 ## xmm11 = mem[0],zero
LBB2_36:
movsd -704(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
addsd -56(%rbp), %xmm9 ## 8-byte Folded Reload
movsd -1032(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -656(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
movsd %xmm2, -816(%rbp) ## 8-byte Spill
movsd LCPI2_24(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm4, %xmm12
movapd %xmm5, %xmm14
subsd %xmm13, %xmm14
movapd %xmm6, %xmm15
subsd %xmm1, %xmm15
addsd %xmm0, %xmm11
movsd LCPI2_25(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm5, %xmm0
movsd LCPI2_26(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm1, %xmm8
addsd %xmm0, %xmm8
movsd LCPI2_24(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm3, %xmm7
testq %rbx, %rbx
movsd -144(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
je LBB2_38
## %bb.37:
movsd 136(%rbx), %xmm10 ## xmm10 = mem[0],zero
LBB2_38:
movsd LCPI2_25(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movsd LCPI2_27(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm5, %xmm3
addsd %xmm0, %xmm3
addsd %xmm1, %xmm6
addsd %xmm5, %xmm13
movsd -104(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd -816(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm12, -1080(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm0
mulsd %xmm11, %xmm0
movsd %xmm2, -104(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm2
movsd %xmm2, -808(%rbp) ## 8-byte Spill
movsd LCPI2_30(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd %xmm14, %xmm12
mulsd %xmm0, %xmm12
movapd %xmm14, %xmm2
mulsd %xmm12, %xmm2
movsd %xmm9, -704(%rbp) ## 8-byte Spill
addsd %xmm9, %xmm4
movsd %xmm7, -1024(%rbp) ## 8-byte Spill
addsd %xmm7, %xmm4
movsd %xmm3, -1016(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm4
mulsd %xmm6, %xmm0
addsd %xmm4, %xmm0
addsd LCPI2_31(%rip), %xmm0
movapd %xmm0, %xmm3
mulsd LCPI2_1(%rip), %xmm3
movapd %xmm0, %xmm9
movsd %xmm0, -48(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm0
movsd %xmm4, -144(%rbp) ## 8-byte Spill
subsd %xmm4, %xmm0
movsd %xmm6, -280(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm6
addsd %xmm2, %xmm6
movsd %xmm14, -480(%rbp) ## 8-byte Spill
movapd %xmm14, %xmm2
movsd %xmm6, -952(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm2
movapd %xmm15, %xmm3
mulsd %xmm12, %xmm3
movsd %xmm13, -152(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm13
addsd %xmm3, %xmm13
movsd %xmm15, -472(%rbp) ## 8-byte Spill
movapd %xmm15, %xmm3
movsd %xmm13, -960(%rbp) ## 8-byte Spill
mulsd %xmm13, %xmm3
addsd %xmm2, %xmm3
movsd LCPI2_73(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd %xmm5, %xmm6
mulsd %xmm2, %xmm6
movapd %xmm1, %xmm4
movsd LCPI2_33(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm4
subsd %xmm4, %xmm6
mulsd %xmm2, %xmm1
movsd %xmm11, -872(%rbp) ## 8-byte Spill
addsd %xmm11, %xmm10
movsd %xmm10, -56(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm5
addsd %xmm1, %xmm5
movsd %xmm8, -1072(%rbp) ## 8-byte Spill
mulsd %xmm10, %xmm8
movsd %xmm8, -800(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm12
movsd %xmm5, -536(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm0
addsd %xmm12, %xmm0
movsd %xmm6, -528(%rbp) ## 8-byte Spill
movsd %xmm0, -264(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm6
addsd %xmm3, %xmm6
movsd %xmm6, -824(%rbp) ## 8-byte Spill
movapd %xmm9, %xmm0
mulsd %xmm9, %xmm0
addsd LCPI2_34(%rip), %xmm0
sqrtsd %xmm0, %xmm0
movsd LCPI2_35(%rip), %xmm1 ## xmm1 = mem[0],zero
callq _pow
movsd %xmm0, -968(%rbp) ## 8-byte Spill
movsd -808(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -800(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -824(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -56(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm0, -360(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm1
movsd %xmm1, -792(%rbp) ## 8-byte Spill
movsd -48(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_36(%rip), %xmm0
callq _tanh
movsd %xmm0, -976(%rbp) ## 8-byte Spill
movsd LCPI2_37(%rip), %xmm0 ## xmm0 = mem[0],zero
subsd -792(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI2_38(%rip), %xmm0
callq _tanh
movsd %xmm0, -624(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
movsd %xmm0, -520(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
testq %rbx, %rbx
je LBB2_40
## %bb.39:
movsd 160(%rbx), %xmm0 ## xmm0 = mem[0],zero
LBB2_40:
callq ___sincos_stret
movapd %xmm0, %xmm11
xorpd %xmm4, %xmm4
testq %rbx, %rbx
je LBB2_42
## %bb.41:
movsd 168(%rbx), %xmm4 ## xmm4 = mem[0],zero
movsd 256(%rbx), %xmm0 ## xmm0 = mem[0],zero
movsd %xmm0, -520(%rbp) ## 8-byte Spill
LBB2_42:
movq (%r14), %rbx
testq %rbx, %rbx
movsd -1040(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movsd -168(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movsd -408(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
movsd -400(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movsd -24(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
je LBB2_44
## %bb.43:
movsd %xmm4, -1064(%rbp) ## 8-byte Spill
movapd %xmm15, %xmm5
mulsd -120(%rbp), %xmm5 ## 8-byte Folded Reload
movsd %xmm1, -768(%rbp) ## 8-byte Spill
movsd -504(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
subsd %xmm5, %xmm8
movsd -40(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd -192(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm7
movsd -736(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movsd -200(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm12, %xmm3
addsd %xmm0, %xmm3
movapd %xmm13, %xmm1
mulsd %xmm6, %xmm1
movapd %xmm8, %xmm10
subsd %xmm1, %xmm10
movsd -552(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movsd -696(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm9, %xmm1
addsd %xmm3, %xmm1
movapd %xmm1, %xmm5
movsd %xmm1, -696(%rbp) ## 8-byte Spill
movsd -184(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm7, %xmm1
movsd -240(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm12, %xmm3
addsd %xmm1, %xmm3
movsd -128(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm14, %xmm1
movsd %xmm11, -48(%rbp) ## 8-byte Spill
movapd %xmm10, %xmm11
subsd %xmm1, %xmm11
movsd -688(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm9, %xmm1
addsd %xmm3, %xmm1
movapd %xmm1, %xmm6
movsd -496(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
movapd %xmm11, %xmm3
subsd %xmm1, %xmm3
movapd %xmm3, %xmm4
movapd %xmm9, %xmm1
mulsd -560(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -1096(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm1, %xmm3
movapd %xmm1, %xmm7
movapd %xmm5, %xmm1
movapd %xmm2, %xmm0
movsd -432(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm3
movsd %xmm3, -200(%rbp) ## 8-byte Spill
movsd -312(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -392(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -304(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -32(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm1, %xmm3
movsd -488(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
movapd %xmm4, %xmm0
movsd %xmm4, -448(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm0
movsd %xmm0, -312(%rbp) ## 8-byte Spill
movsd -336(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
movsd -888(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm5
movsd %xmm5, -1048(%rbp) ## 8-byte Spill
movsd -1088(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm7, %xmm5
movsd %xmm7, -864(%rbp) ## 8-byte Spill
movsd %xmm6, -688(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm1
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm5
movsd %xmm5, -240(%rbp) ## 8-byte Spill
movsd -272(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm9, %xmm1
addsd %xmm3, %xmm1
movsd %xmm1, -272(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm6
movsd -880(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm6
movsd %xmm6, -1056(%rbp) ## 8-byte Spill
movsd -704(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -656(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm8, %xmm6
subsd %xmm1, %xmm6
movsd -1024(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -872(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm6, %xmm7
subsd %xmm1, %xmm7
movsd -1016(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -56(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm7, %xmm2
subsd %xmm1, %xmm2
movapd %xmm2, %xmm0
movsd %xmm2, -1000(%rbp) ## 8-byte Spill
movsd -512(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm1, %xmm2
movapd %xmm1, %xmm3
mulsd -712(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm4, %xmm1
subsd %xmm11, %xmm1
movsd %xmm1, -112(%rbp) ## 8-byte Spill
subsd %xmm10, %xmm11
movsd %xmm11, -256(%rbp) ## 8-byte Spill
subsd %xmm8, %xmm10
movsd %xmm10, -568(%rbp) ## 8-byte Spill
subsd %xmm7, %xmm0
movsd %xmm0, -856(%rbp) ## 8-byte Spill
subsd %xmm6, %xmm7
movsd %xmm7, -304(%rbp) ## 8-byte Spill
subsd %xmm8, %xmm6
movsd %xmm6, -192(%rbp) ## 8-byte Spill
subsd -504(%rbp), %xmm8 ## 8-byte Folded Reload
addsd -848(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -120(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm8
addsd %xmm2, %xmm8
movsd %xmm8, -184(%rbp) ## 8-byte Spill
movapd %xmm3, %xmm0
movapd %xmm3, %xmm11
movsd %xmm3, -512(%rbp) ## 8-byte Spill
mulsd %xmm15, %xmm0
movsd -840(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
subsd %xmm0, %xmm14
movsd -376(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm14
movsd %xmm14, -64(%rbp) ## 8-byte Spill
movsd LCPI2_45(%rip), %xmm9 ## xmm9 = mem[0],zero
movsd -208(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm9
movsd LCPI2_46(%rip), %xmm4 ## xmm4 = mem[0],zero
movsd -176(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
movapd %xmm13, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm9
movapd %xmm2, %xmm1
movapd %xmm2, %xmm10
mulsd %xmm4, %xmm1
movsd LCPI2_48(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm13, %xmm3
subsd %xmm1, %xmm3
movsd LCPI2_45(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd -40(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
movapd %xmm12, %xmm15
movapd %xmm12, %xmm1
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm2
movapd %xmm0, %xmm1
movapd %xmm0, %xmm8
mulsd %xmm4, %xmm1
movsd LCPI2_48(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm12, %xmm4
subsd %xmm1, %xmm4
movsd -200(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd -312(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
addsd %xmm6, %xmm1
movsd %xmm1, -200(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm1
movsd -168(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movsd -1048(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -352(%rbp) ## 8-byte Spill
movsd LCPI2_47(%rip), %xmm5 ## xmm5 = mem[0],zero
movsd -552(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
addsd %xmm2, %xmm5
movsd LCPI2_49(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm0, %xmm7
addsd %xmm4, %xmm7
movsd -240(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm6, %xmm2
movsd %xmm2, -240(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm2
movsd -1056(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm0
addsd %xmm2, %xmm0
movsd %xmm0, -936(%rbp) ## 8-byte Spill
movsd -160(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm11, %xmm0
movapd %xmm0, %xmm2
mulsd -344(%rbp), %xmm2 ## 8-byte Folded Reload
addsd -184(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -568(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd %xmm12, %xmm11
addsd %xmm2, %xmm11
movsd %xmm11, -568(%rbp) ## 8-byte Spill
movapd %xmm0, %xmm2
movapd %xmm0, %xmm4
movsd %xmm0, -160(%rbp) ## 8-byte Spill
mulsd -408(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm14
movsd -384(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm14
movsd %xmm14, -944(%rbp) ## 8-byte Spill
movsd -24(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -272(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -448(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd %xmm0, %xmm6
movsd %xmm6, -312(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm0
movsd %xmm0, -448(%rbp) ## 8-byte Spill
movsd -368(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd -216(%rbp), %xmm0 ## 8-byte Folded Reload
addsd LCPI2_47(%rip), %xmm9
addsd LCPI2_49(%rip), %xmm3
movapd %xmm10, %xmm2
mulsd %xmm9, %xmm2
movapd %xmm13, %xmm1
mulsd %xmm3, %xmm1
addsd %xmm2, %xmm1
movsd %xmm1, -376(%rbp) ## 8-byte Spill
movapd %xmm8, %xmm10
movapd %xmm8, %xmm2
mulsd %xmm5, %xmm2
movapd %xmm15, %xmm1
mulsd %xmm7, %xmm1
addsd %xmm2, %xmm1
movsd %xmm1, -384(%rbp) ## 8-byte Spill
movsd -88(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
addsd %xmm4, %xmm6
movsd %xmm6, -88(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm2
mulsd -784(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm11, %xmm4
subsd %xmm2, %xmm4
movsd -400(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd -256(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm1, %xmm2
addsd %xmm4, %xmm2
movsd %xmm2, -256(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm2
mulsd -128(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm14
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm14
movsd %xmm14, -216(%rbp) ## 8-byte Spill
movsd LCPI2_60(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -544(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movapd %xmm14, %xmm2
mulsd %xmm0, %xmm2
movsd LCPI2_61(%rip), %xmm4 ## xmm4 = mem[0],zero
movsd -72(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm1
mulsd %xmm4, %xmm1
addsd %xmm2, %xmm1
mulsd %xmm0, %xmm10
movapd %xmm15, %xmm0
mulsd %xmm4, %xmm0
movapd %xmm4, %xmm12
addsd %xmm10, %xmm0
movsd -248(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd LCPI2_67(%rip), %xmm11
movsd -912(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd LCPI2_66(%rip), %xmm4
movsd LCPI2_41(%rip), %xmm2 ## xmm2 = mem[0],zero
subsd %xmm11, %xmm2
mulsd %xmm4, %xmm2
movsd -208(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm4
mulsd LCPI2_60(%rip), %xmm4
movapd %xmm13, %xmm6
mulsd %xmm12, %xmm6
addsd %xmm4, %xmm6
movsd %xmm6, -368(%rbp) ## 8-byte Spill
movsd LCPI2_62(%rip), %xmm6 ## xmm6 = mem[0],zero
movsd -896(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movapd %xmm11, %xmm4
mulsd %xmm6, %xmm4
addsd %xmm1, %xmm4
movsd %xmm4, -248(%rbp) ## 8-byte Spill
movsd -552(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movapd %xmm12, %xmm1
mulsd %xmm6, %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, -912(%rbp) ## 8-byte Spill
mulsd %xmm14, %xmm5
mulsd %xmm8, %xmm7
addsd %xmm5, %xmm7
mulsd LCPI2_58(%rip), %xmm13
movapd %xmm10, %xmm0
mulsd LCPI2_51(%rip), %xmm0
subsd %xmm13, %xmm0
addsd %xmm7, %xmm0
movsd %xmm0, -208(%rbp) ## 8-byte Spill
movsd LCPI2_1(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -752(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm0, %xmm5
addsd %xmm0, %xmm5
mulsd %xmm2, %xmm5
movsd -576(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_67(%rip), %xmm0
movsd LCPI2_41(%rip), %xmm10 ## xmm10 = mem[0],zero
movapd %xmm10, %xmm1
subsd %xmm0, %xmm1
movsd -744(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_68(%rip), %xmm0
mulsd %xmm0, %xmm1
movsd -328(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd -88(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, -328(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm0
mulsd -224(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -256(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -112(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movsd -24(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm4
addsd %xmm0, %xmm4
movsd %xmm4, -112(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm0
mulsd -496(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -216(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
subsd %xmm0, %xmm13
movsd -464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
movapd %xmm7, %xmm6
subsd %xmm0, %xmm13
movsd -456(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd LCPI2_40(%rip), %xmm7
movsd -760(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_39(%rip), %xmm0
addsd %xmm10, %xmm7
mulsd %xmm0, %xmm7
movapd %xmm14, %xmm0
mulsd %xmm9, %xmm0
movapd %xmm8, %xmm4
mulsd %xmm3, %xmm4
addsd %xmm0, %xmm4
movsd LCPI2_50(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm15, %xmm0
movsd LCPI2_51(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd -40(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm2
addsd %xmm0, %xmm2
subsd %xmm2, %xmm4
movsd %xmm4, -744(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm2
movapd %xmm12, %xmm0
mulsd %xmm6, %xmm0
movapd %xmm6, %xmm15
movapd %xmm11, %xmm4
movapd %xmm11, %xmm6
movsd -424(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm6
movsd %xmm12, -424(%rbp) ## 8-byte Spill
movsd -560(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd %xmm11, %xmm0
subsd %xmm0, %xmm6
movsd %xmm6, -752(%rbp) ## 8-byte Spill
mulsd %xmm10, %xmm9
mulsd -736(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm9, %xmm3
mulsd LCPI2_58(%rip), %xmm8
mulsd LCPI2_51(%rip), %xmm14
subsd %xmm8, %xmm14
addsd %xmm3, %xmm14
movsd %xmm14, -544(%rbp) ## 8-byte Spill
mulsd %xmm15, %xmm4
mulsd %xmm11, %xmm4
movapd %xmm2, %xmm0
mulsd %xmm12, %xmm0
addsd %xmm0, %xmm4
movsd %xmm4, -760(%rbp) ## 8-byte Spill
movsd -384(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -376(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI2_64(%rip), %xmm8 ## xmm8 = mem[0],zero
subsd %xmm0, %xmm8
movsd -640(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI2_1(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
addsd %xmm4, %xmm0
mulsd %xmm5, %xmm0
movsd %xmm0, -640(%rbp) ## 8-byte Spill
movsd LCPI2_34(%rip), %xmm15 ## xmm15 = mem[0],zero
movsd -352(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm15, %xmm2
sqrtsd %xmm2, %xmm2
movsd %xmm2, -560(%rbp) ## 8-byte Spill
movsd LCPI2_42(%rip), %xmm3 ## xmm3 = mem[0],zero
movapd %xmm2, %xmm0
divsd %xmm3, %xmm0
movapd %xmm3, %xmm10
movsd LCPI2_41(%rip), %xmm6 ## xmm6 = mem[0],zero
minsd %xmm6, %xmm0
movsd LCPI2_43(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
mulsd %xmm4, %xmm2
addsd %xmm0, %xmm2
movsd %xmm2, -464(%rbp) ## 8-byte Spill
movsd -984(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
addsd %xmm4, %xmm0
mulsd %xmm1, %xmm0
movsd -368(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
addsd LCPI2_62(%rip), %xmm12
movsd -992(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm4, %xmm5
addsd %xmm4, %xmm5
mulsd %xmm7, %xmm5
movsd -632(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
addsd %xmm4, %xmm1
mulsd %xmm0, %xmm1
movsd %xmm1, -632(%rbp) ## 8-byte Spill
movsd -936(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm15, %xmm0
xorps %xmm1, %xmm1
sqrtsd %xmm0, %xmm1
movsd %xmm1, -576(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm0
divsd %xmm10, %xmm0
minsd %xmm6, %xmm0
mulsd %xmm3, %xmm0
mulsd %xmm4, %xmm1
movapd %xmm4, %xmm6
addsd %xmm0, %xmm1
movsd %xmm1, -456(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm14
movsd %xmm12, -368(%rbp) ## 8-byte Spill
movsd -864(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm14
movsd -912(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm1
movsd -432(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm14
movapd %xmm2, %xmm1
movsd -336(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
movsd -248(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm2
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm1
movapd %xmm10, %xmm9
mulsd %xmm0, %xmm9
movapd %xmm12, %xmm2
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm9
movsd -952(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -280(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -960(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd -152(%rbp), %xmm12 ## 8-byte Folded Reload
addsd %xmm0, %xmm12
movsd -80(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd LCPI2_22(%rip), %xmm2
movsd -600(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd LCPI2_23(%rip), %xmm11
movsd -392(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd LCPI2_22(%rip), %xmm3
addsd %xmm2, %xmm11
movsd -32(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_23(%rip), %xmm0
addsd %xmm3, %xmm0
movsd %xmm0, -32(%rbp) ## 8-byte Spill
movsd -648(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
addsd %xmm6, %xmm0
mulsd %xmm5, %xmm0
movsd %xmm0, -648(%rbp) ## 8-byte Spill
movsd -448(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm2
addsd %xmm15, %xmm2
sqrtsd %xmm2, %xmm15
movsd %xmm15, -80(%rbp) ## 8-byte Spill
movapd %xmm15, %xmm2
divsd LCPI2_42(%rip), %xmm2
minsd LCPI2_41(%rip), %xmm2
mulsd LCPI2_43(%rip), %xmm2
mulsd %xmm6, %xmm15
addsd %xmm2, %xmm15
movsd -328(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm2
mulsd -776(%rbp), %xmm2 ## 8-byte Folded Reload
addsd -112(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -312(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movsd -24(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm3
addsd %xmm2, %xmm3
movsd %xmm3, -312(%rbp) ## 8-byte Spill
movapd %xmm7, %xmm2
mulsd -488(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm13, %xmm0
subsd %xmm2, %xmm0
movsd -584(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm0
movsd %xmm0, -600(%rbp) ## 8-byte Spill
movsd LCPI2_59(%rip), %xmm6 ## xmm6 = mem[0],zero
movsd -384(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
addsd %xmm6, %xmm5
movsd %xmm5, -384(%rbp) ## 8-byte Spill
movsd -208(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd %xmm0, %xmm2
movsd -336(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
movsd -864(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm5
addsd %xmm2, %xmm5
movapd %xmm8, %xmm2
addsd %xmm6, %xmm2
movsd %xmm2, -984(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm2
movapd %xmm0, %xmm6
mulsd %xmm4, %xmm6
addsd %xmm2, %xmm6
movsd -424(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm7, %xmm0
movsd %xmm0, -424(%rbp) ## 8-byte Spill
movsd -368(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm2
movsd -752(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
mulsd %xmm0, %xmm10
subsd %xmm10, %xmm2
movsd -912(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm0, %xmm4
movsd -760(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm7
subsd %xmm7, %xmm4
movsd -264(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -536(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm12, %xmm0
movsd %xmm0, -264(%rbp) ## 8-byte Spill
movsd -744(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm7
mulsd %xmm3, %xmm7
movsd -544(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd %xmm0, %xmm3
mulsd %xmm8, %xmm3
addsd %xmm7, %xmm3
movsd -432(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm0
addsd %xmm5, %xmm0
movsd %xmm0, -584(%rbp) ## 8-byte Spill
movapd %xmm10, %xmm0
mulsd %xmm7, %xmm0
addsd %xmm6, %xmm0
movapd %xmm0, %xmm12
movsd %xmm0, -992(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm14
movsd -336(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm5
mulsd %xmm1, %xmm5
subsd %xmm5, %xmm14
movsd -864(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm1
mulsd %xmm7, %xmm9
subsd %xmm9, %xmm1
movsd -376(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
addsd LCPI2_59(%rip), %xmm5
movsd %xmm5, -376(%rbp) ## 8-byte Spill
mulsd -424(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm3, %xmm5
movsd -312(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm9
mulsd -248(%rbp), %xmm9 ## 8-byte Folded Reload
movsd -600(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd -912(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm0, %xmm7
subsd %xmm7, %xmm9
addsd %xmm5, %xmm9
movapd %xmm3, %xmm5
subsd %xmm2, %xmm5
movapd %xmm0, %xmm2
subsd %xmm4, %xmm2
mulsd -648(%rbp), %xmm15 ## 8-byte Folded Reload
mulsd -448(%rbp), %xmm15 ## 8-byte Folded Reload
movapd %xmm6, %xmm10
mulsd -584(%rbp), %xmm10 ## 8-byte Folded Reload
movapd %xmm8, %xmm4
mulsd %xmm12, %xmm4
subsd %xmm4, %xmm10
movsd -640(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movsd -464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm8, %xmm0
movsd %xmm0, -464(%rbp) ## 8-byte Spill
movsd -200(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm0, %xmm6
divsd -560(%rbp), %xmm6 ## 8-byte Folded Reload
movsd %xmm6, -200(%rbp) ## 8-byte Spill
movsd -888(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm8, %xmm4
movsd -696(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm6, %xmm7
addsd %xmm4, %xmm7
movsd LCPI2_63(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
mulsd %xmm0, %xmm14
addsd %xmm5, %xmm14
mulsd %xmm0, %xmm2
mulsd %xmm0, %xmm1
addsd %xmm2, %xmm1
movsd -32(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd %xmm0, %xmm2
movsd -24(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
movapd %xmm11, %xmm4
mulsd %xmm3, %xmm4
mulsd %xmm3, %xmm2
mulsd %xmm3, %xmm4
movsd -264(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -56(%rbp), %xmm5 ## 8-byte Folded Reload
movsd -1000(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
subsd %xmm5, %xmm3
movsd %xmm3, -40(%rbp) ## 8-byte Spill
movsd -328(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm5
mulsd %xmm0, %xmm5
mulsd %xmm13, %xmm0
subsd %xmm5, %xmm13
mulsd %xmm11, %xmm6
movsd -112(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm3, %xmm6
mulsd %xmm11, %xmm3
subsd %xmm0, %xmm3
movsd %xmm3, -112(%rbp) ## 8-byte Spill
divsd -80(%rbp), %xmm15 ## 8-byte Folded Reload
movsd -248(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd LCPI2_65(%rip), %xmm12
addsd %xmm7, %xmm12
movsd -632(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd -456(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm0, %xmm5
movsd %xmm5, -456(%rbp) ## 8-byte Spill
movsd -240(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm5, %xmm3
divsd -576(%rbp), %xmm3 ## 8-byte Folded Reload
movsd %xmm3, -240(%rbp) ## 8-byte Spill
movsd -880(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm0, %xmm5
movsd -688(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm3, %xmm7
addsd %xmm5, %xmm7
addsd %xmm12, %xmm7
addsd LCPI2_65(%rip), %xmm8
addsd %xmm0, %xmm8
subsd %xmm8, %xmm14
movsd -200(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
addsd %xmm3, %xmm5
addsd %xmm1, %xmm5
movsd LCPI2_28(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm6
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm6
mulsd %xmm0, %xmm13
mulsd %xmm0, %xmm4
subsd %xmm4, %xmm13
movsd LCPI2_20(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -328(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -112(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -592(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -648(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
movapd %xmm15, -912(%rbp) ## 16-byte Spill
movsd -272(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm15, %xmm0
addsd %xmm2, %xmm0
movsd LCPI2_63(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm9
mulsd %xmm2, %xmm10
addsd %xmm9, %xmm10
subsd %xmm7, %xmm10
movsd %xmm10, -248(%rbp) ## 8-byte Spill
movsd LCPI2_69(%rip), %xmm2 ## xmm2 = mem[0],zero
addsd %xmm3, %xmm2
subsd %xmm2, %xmm6
mulsd LCPI2_29(%rip), %xmm11
subsd %xmm11, %xmm0
movapd %xmm0, %xmm2
movsd -776(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm14, %xmm8
movsd -488(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm8
mulsd LCPI2_28(%rip), %xmm1
subsd %xmm2, %xmm1
addsd %xmm14, %xmm6
addsd %xmm15, %xmm13
addsd %xmm5, %xmm13
movsd -608(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd LCPI2_12(%rip), %xmm11
movsd -88(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd %xmm9, %xmm0
mulsd %xmm11, %xmm0
movsd -568(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movapd %xmm12, %xmm15
subsd %xmm0, %xmm15
movsd -616(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
addsd -512(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm5, %xmm0
mulsd -1032(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -184(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -192(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -656(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
addsd %xmm0, %xmm2
movsd %xmm2, -192(%rbp) ## 8-byte Spill
movsd -288(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd LCPI2_12(%rip), %xmm7
movapd %xmm5, %xmm0
movapd %xmm5, %xmm14
movsd %xmm5, -616(%rbp) ## 8-byte Spill
mulsd -704(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -64(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
subsd %xmm0, %xmm5
movsd -792(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd LCPI2_40(%rip), %xmm2
movsd -816(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm5
movsd %xmm5, -24(%rbp) ## 8-byte Spill
movsd -968(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_39(%rip), %xmm0
movsd LCPI2_41(%rip), %xmm3 ## xmm3 = mem[0],zero
addsd %xmm3, %xmm2
mulsd %xmm0, %xmm2
movapd %xmm9, %xmm0
mulsd %xmm7, %xmm0
movapd %xmm12, %xmm5
mulsd %xmm11, %xmm5
movapd %xmm7, %xmm3
movsd -944(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm7
addsd %xmm5, %xmm7
addsd %xmm10, %xmm8
addsd %xmm1, %xmm8
movsd -224(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm6, %xmm4
movsd -496(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm13, %xmm1
subsd %xmm1, %xmm4
addsd %xmm8, %xmm4
movsd %xmm4, -224(%rbp) ## 8-byte Spill
movsd -400(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm3
mulsd %xmm5, %xmm3
movsd LCPI2_16(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm15
mulsd %xmm4, %xmm3
subsd %xmm3, %xmm15
subsd %xmm0, %xmm12
movapd %xmm12, %xmm1
movapd %xmm11, %xmm10
mulsd %xmm5, %xmm10
mulsd %xmm5, %xmm10
movsd -808(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
subsd -104(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -976(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movsd LCPI2_1(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
addsd %xmm0, %xmm5
mulsd %xmm2, %xmm5
mulsd LCPI2_11(%rip), %xmm9
subsd %xmm7, %xmm9
movsd %xmm9, -88(%rbp) ## 8-byte Spill
movsd LCPI2_19(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -256(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm0, %xmm7
addsd LCPI2_70(%rip), %xmm7
addsd %xmm6, %xmm7
movsd -216(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
addsd %xmm13, %xmm2
movapd %xmm2, %xmm9
movsd %xmm2, -216(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm1
mulsd %xmm4, %xmm10
addsd %xmm1, %xmm10
movsd -232(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd LCPI2_6(%rip), %xmm12
movsd -160(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm0
mulsd %xmm12, %xmm0
movsd -184(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
movapd %xmm13, %xmm8
subsd %xmm0, %xmm8
movsd -96(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
addsd %xmm14, %xmm6
movsd %xmm6, -96(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm0
mulsd -1080(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -192(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movsd -304(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd -872(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -304(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm0
mulsd -1024(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -24(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
mulsd %xmm2, %xmm3
subsd %xmm3, %xmm1
movsd %xmm1, -288(%rbp) ## 8-byte Spill
movsd -624(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI2_1(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
addsd %xmm3, %xmm0
mulsd %xmm5, %xmm0
movsd %xmm0, -624(%rbp) ## 8-byte Spill
movsd -40(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm0
addsd LCPI2_34(%rip), %xmm0
xorps %xmm2, %xmm2
sqrtsd %xmm0, %xmm2
movsd %xmm2, -32(%rbp) ## 8-byte Spill
movsd -296(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_6(%rip), %xmm0
movapd %xmm2, %xmm1
divsd LCPI2_42(%rip), %xmm1
minsd LCPI2_41(%rip), %xmm1
mulsd LCPI2_43(%rip), %xmm1
mulsd %xmm3, %xmm2
addsd %xmm1, %xmm2
movsd %xmm2, -296(%rbp) ## 8-byte Spill
movsd -328(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd LCPI2_18(%rip), %xmm2
movapd %xmm4, %xmm1
mulsd %xmm0, %xmm1
movapd %xmm12, %xmm3
mulsd %xmm13, %xmm3
movapd %xmm0, %xmm6
movsd -64(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
mulsd LCPI2_19(%rip), %xmm2
addsd %xmm3, %xmm0
addsd -224(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, %xmm13
movsd %xmm2, -328(%rbp) ## 8-byte Spill
movsd -784(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm7, %xmm3
movsd -128(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm9, %xmm2
addsd %xmm3, %xmm2
movsd %xmm2, -128(%rbp) ## 8-byte Spill
movsd -168(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm6
mulsd %xmm2, %xmm6
movsd LCPI2_9(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm8
mulsd %xmm3, %xmm6
subsd %xmm6, %xmm8
movapd %xmm4, %xmm3
subsd %xmm1, %xmm3
movapd %xmm12, %xmm9
mulsd %xmm2, %xmm9
mulsd %xmm2, %xmm9
movsd -768(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm1
movsd LCPI2_32(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm1
movsd -48(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
movsd LCPI2_33(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm2
subsd %xmm1, %xmm2
movsd %xmm2, -224(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm6
mulsd %xmm4, %xmm14
addsd %xmm6, %xmm14
movsd %xmm14, -80(%rbp) ## 8-byte Spill
movsd -160(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd LCPI2_5(%rip), %xmm14
movsd -88(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd LCPI2_16(%rip), %xmm1
subsd %xmm0, %xmm14
mulsd LCPI2_17(%rip), %xmm11
addsd %xmm1, %xmm11
addsd LCPI2_71(%rip), %xmm15
addsd %xmm7, %xmm15
addsd -216(%rbp), %xmm10 ## 8-byte Folded Reload
movsd LCPI2_9(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
mulsd %xmm0, %xmm9
addsd %xmm3, %xmm9
movsd LCPI2_22(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -480(%rbp), %xmm1 ## 8-byte Folded Reload
movsd LCPI2_23(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -472(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd %xmm0, -168(%rbp) ## 8-byte Spill
movsd -320(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -96(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm1
movapd %xmm0, %xmm2
movsd %xmm0, -320(%rbp) ## 8-byte Spill
mulsd -1072(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -304(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -56(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movsd -856(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -856(%rbp) ## 8-byte Spill
movsd LCPI2_22(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -280(%rbp), %xmm1 ## 8-byte Folded Reload
movsd LCPI2_23(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -152(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd %xmm0, -400(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm1
mulsd -1016(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -288(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd %xmm1, %xmm2
movsd -800(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, -176(%rbp) ## 8-byte Spill
subsd -128(%rbp), %xmm13 ## 8-byte Folded Reload
addsd %xmm11, %xmm13
movsd %xmm13, -216(%rbp) ## 8-byte Spill
movsd -344(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm15, %xmm1
movsd -408(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm10, %xmm0
subsd %xmm0, %xmm1
movsd %xmm1, -344(%rbp) ## 8-byte Spill
movsd LCPI2_6(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd -680(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm0, %xmm6
movsd %xmm0, -680(%rbp) ## 8-byte Spill
movsd -416(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm0, %xmm3
movsd %xmm0, -416(%rbp) ## 8-byte Spill
movsd -624(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
movsd -296(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm13, %xmm5
mulsd -40(%rbp), %xmm5 ## 8-byte Folded Reload
movsd LCPI2_45(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd -224(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm2
movsd -80(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm1
movsd LCPI2_46(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm2
movsd %xmm2, -408(%rbp) ## 8-byte Spill
movapd %xmm4, %xmm1
mulsd %xmm0, %xmm1
movsd LCPI2_48(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
subsd %xmm1, %xmm0
movsd %xmm0, -112(%rbp) ## 8-byte Spill
mulsd LCPI2_9(%rip), %xmm14
mulsd LCPI2_10(%rip), %xmm12
addsd %xmm14, %xmm12
movapd %xmm8, %xmm11
addsd LCPI2_72(%rip), %xmm11
addsd %xmm15, %xmm11
movsd %xmm11, -816(%rbp) ## 8-byte Spill
movapd %xmm9, %xmm14
addsd %xmm10, %xmm14
movsd %xmm14, -808(%rbp) ## 8-byte Spill
movsd -184(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm6, %xmm1
movsd -64(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm3, %xmm8
addsd %xmm1, %xmm8
movsd -664(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd LCPI2_12(%rip), %xmm1
movsd %xmm1, -664(%rbp) ## 8-byte Spill
movsd -672(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd LCPI2_12(%rip), %xmm3
movsd %xmm3, -672(%rbp) ## 8-byte Spill
mulsd -192(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd -24(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm1, %xmm3
movsd -168(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd %xmm9, %xmm7
mulsd -856(%rbp), %xmm7 ## 8-byte Folded Reload
movsd -400(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -176(%rbp), %xmm4 ## 8-byte Folded Reload
subsd %xmm4, %xmm7
movapd %xmm5, %xmm15
divsd -32(%rbp), %xmm15 ## 8-byte Folded Reload
movsd %xmm15, -296(%rbp) ## 8-byte Spill
movsd -480(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm0
movsd -768(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm0
movsd -472(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm1
movsd -48(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, -32(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm10
movapd %xmm2, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm10
movsd %xmm10, -128(%rbp) ## 8-byte Spill
movsd -152(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm6
mulsd %xmm5, %xmm6
movsd -280(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm1, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm6
movsd %xmm6, -232(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm5
mulsd %xmm2, %xmm4
addsd %xmm5, %xmm4
movsd %xmm4, -48(%rbp) ## 8-byte Spill
movsd -344(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -216(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm12, %xmm0
movapd %xmm0, %xmm5
movsd %xmm0, -344(%rbp) ## 8-byte Spill
movsd -712(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm11, %xmm0
movsd -1040(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm14, %xmm4
subsd %xmm4, %xmm0
movsd LCPI2_5(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd -616(%rbp), %xmm4 ## 8-byte Folded Reload
subsd %xmm8, %xmm4
movsd LCPI2_11(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd -96(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm3, %xmm2
movsd LCPI2_20(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd -320(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm7, %xmm3
movapd %xmm3, %xmm14
movsd -824(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm13, %xmm1
movsd -264(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm15, %xmm3
addsd %xmm1, %xmm3
movsd -408(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
addsd LCPI2_74(%rip), %xmm6
movsd %xmm6, -408(%rbp) ## 8-byte Spill
movsd LCPI2_69(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm9, %xmm11
addsd %xmm3, %xmm11
movsd -112(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd LCPI2_75(%rip), %xmm1
movsd %xmm1, -112(%rbp) ## 8-byte Spill
movsd -32(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm6, %xmm3
mulsd %xmm1, %xmm10
addsd %xmm3, %xmm10
movsd LCPI2_58(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd -232(%rbp), %xmm3 ## 8-byte Folded Reload
movsd LCPI2_76(%rip), %xmm6 ## xmm6 = mem[0],zero
movsd -48(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm13, %xmm6
addsd %xmm3, %xmm6
subsd %xmm6, %xmm10
movsd %xmm10, -264(%rbp) ## 8-byte Spill
addsd %xmm5, %xmm0
movsd -832(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm0, %xmm3
movsd %xmm3, -832(%rbp) ## 8-byte Spill
mulsd LCPI2_9(%rip), %xmm4
movsd LCPI2_10(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -680(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm0
mulsd LCPI2_16(%rip), %xmm2
addsd %xmm4, %xmm0
movsd %xmm0, -792(%rbp) ## 8-byte Spill
movsd LCPI2_17(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -664(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm0
addsd %xmm2, %xmm0
movsd %xmm0, -784(%rbp) ## 8-byte Spill
movapd %xmm14, %xmm12
mulsd LCPI2_28(%rip), %xmm12
subsd %xmm11, %xmm12
movsd %xmm12, -968(%rbp) ## 8-byte Spill
movsd LCPI2_77(%rip), %xmm4 ## xmm4 = mem[0],zero
movsd -136(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm0
mulsd %xmm4, %xmm0
movsd LCPI2_78(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd -440(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm0, %xmm2
movsd -512(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm0
mulsd %xmm2, %xmm0
movapd %xmm2, %xmm12
movsd %xmm2, -448(%rbp) ## 8-byte Spill
movsd -840(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd %xmm0, %xmm2
mulsd %xmm7, %xmm4
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm4
movapd %xmm4, %xmm0
movapd %xmm4, %xmm14
movsd %xmm4, -976(%rbp) ## 8-byte Spill
movsd -120(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, -568(%rbp) ## 8-byte Spill
movsd LCPI2_79(%rip), %xmm11 ## xmm11 = mem[0],zero
movapd %xmm3, %xmm1
mulsd %xmm11, %xmm1
movsd LCPI2_80(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd %xmm7, %xmm5
mulsd %xmm0, %xmm5
addsd %xmm1, %xmm5
movapd %xmm8, %xmm1
mulsd %xmm5, %xmm1
movapd %xmm5, %xmm6
movsd %xmm5, -952(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm5
subsd %xmm1, %xmm5
mulsd %xmm11, %xmm7
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm7
movsd LCPI2_81(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm5
movapd %xmm7, %xmm0
movsd %xmm7, -440(%rbp) ## 8-byte Spill
movapd %xmm4, %xmm1
mulsd %xmm4, %xmm0
mulsd %xmm4, %xmm0
mulsd %xmm2, %xmm0
movapd %xmm2, %xmm11
subsd %xmm0, %xmm5
movsd %xmm5, -768(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm0
mulsd %xmm4, %xmm0
movsd -504(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm4
subsd %xmm0, %xmm4
subsd %xmm2, %xmm4
movapd %xmm8, %xmm0
mulsd %xmm14, %xmm0
addsd -848(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm1, %xmm4
addsd %xmm0, %xmm4
movsd %xmm4, -960(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm8
addsd %xmm4, %xmm8
mulsd %xmm11, %xmm8
mulsd %xmm1, %xmm6
mulsd %xmm1, %xmm6
mulsd %xmm11, %xmm6
subsd %xmm6, %xmm8
movsd %xmm8, -392(%rbp) ## 8-byte Spill
movsd -616(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -416(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -64(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movsd LCPI2_9(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
movsd -656(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm9
mulsd %xmm0, %xmm9
mulsd %xmm2, %xmm9
addsd %xmm1, %xmm9
movsd %xmm9, -944(%rbp) ## 8-byte Spill
movsd -96(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -672(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -24(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movsd LCPI2_16(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
movsd -872(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm10
mulsd %xmm0, %xmm10
mulsd %xmm2, %xmm10
addsd %xmm1, %xmm10
movsd %xmm10, -936(%rbp) ## 8-byte Spill
movsd -320(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm0
mulsd -400(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -176(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm4
subsd %xmm0, %xmm4
movsd LCPI2_28(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
movsd -168(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd -56(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm8
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm4
movsd %xmm4, -256(%rbp) ## 8-byte Spill
movsd LCPI2_52(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd -280(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm1, %xmm2
movsd LCPI2_53(%rip), %xmm9 ## xmm9 = mem[0],zero
movsd -152(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm9, %xmm0
subsd %xmm0, %xmm2
movsd -536(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movapd %xmm12, %xmm4
movsd LCPI2_54(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm4
addsd %xmm2, %xmm4
movsd %xmm4, -608(%rbp) ## 8-byte Spill
movapd %xmm10, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm3
movsd -480(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movsd -472(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm0
movsd -528(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm5
addsd %xmm0, %xmm5
movsd %xmm5, -488(%rbp) ## 8-byte Spill
mulsd %xmm8, %xmm5
movsd %xmm5, -64(%rbp) ## 8-byte Spill
movapd %xmm8, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm3
movapd %xmm3, %xmm15
movsd %xmm3, -176(%rbp) ## 8-byte Spill
movapd %xmm13, %xmm0
movsd LCPI2_60(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
movsd -232(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd LCPI2_61(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm1
addsd %xmm0, %xmm1
movsd LCPI2_83(%rip), %xmm11 ## xmm11 = mem[0],zero
movapd %xmm12, %xmm9
mulsd %xmm11, %xmm9
addsd %xmm1, %xmm9
movsd -224(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movapd %xmm14, %xmm0
mulsd %xmm3, %xmm0
movapd %xmm3, %xmm7
movsd -80(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
movapd %xmm13, %xmm3
mulsd %xmm6, %xmm3
addsd %xmm0, %xmm3
movapd %xmm12, %xmm0
movsd -520(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movsd %xmm2, -520(%rbp) ## 8-byte Spill
movapd %xmm4, %xmm6
mulsd %xmm8, %xmm6
movsd -1064(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm6
addsd %xmm0, %xmm6
movsd %xmm6, -280(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm10
movsd %xmm10, -592(%rbp) ## 8-byte Spill
movapd %xmm9, %xmm0
movsd %xmm9, -472(%rbp) ## 8-byte Spill
mulsd %xmm10, %xmm0
addsd %xmm11, %xmm3
movapd %xmm3, %xmm1
movapd %xmm3, %xmm10
movsd %xmm3, -480(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm0
movapd %xmm15, %xmm1
subsd %xmm0, %xmm1
mulsd -32(%rbp), %xmm7 ## 8-byte Folded Reload
movsd LCPI2_61(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -128(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm7, %xmm0
mulsd %xmm4, %xmm11
addsd %xmm0, %xmm11
movsd %xmm11, -1112(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm15
movapd %xmm12, %xmm5
mulsd %xmm8, %xmm5
movapd %xmm4, %xmm3
mulsd %xmm8, %xmm3
movsd %xmm3, -88(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm9
movapd %xmm11, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm9
movsd %xmm9, -1120(%rbp) ## 8-byte Spill
movsd -56(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm4
addsd %xmm8, %xmm4
mulsd %xmm4, %xmm11
mulsd %xmm3, %xmm10
subsd %xmm10, %xmm11
movapd %xmm5, %xmm2
movapd %xmm5, %xmm8
movsd %xmm5, -40(%rbp) ## 8-byte Spill
mulsd %xmm9, %xmm2
mulsd %xmm4, %xmm11
movsd %xmm4, -24(%rbp) ## 8-byte Spill
subsd %xmm11, %xmm2
movsd LCPI2_63(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
mulsd %xmm0, %xmm2
addsd %xmm1, %xmm2
movsd %xmm2, -920(%rbp) ## 8-byte Spill
movsd -608(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movapd %xmm12, %xmm0
mulsd %xmm6, %xmm0
movsd -1000(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movapd %xmm1, %xmm9
movsd %xmm1, -160(%rbp) ## 8-byte Spill
movsd -144(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm12, %xmm1
movsd %xmm1, -144(%rbp) ## 8-byte Spill
movsd LCPI2_55(%rip), %xmm3 ## xmm3 = mem[0],zero
addsd %xmm1, %xmm3
movapd %xmm3, %xmm0
mulsd LCPI2_1(%rip), %xmm0
movapd %xmm3, %xmm2
movapd %xmm3, %xmm10
movsd %xmm3, -152(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm2
subsd %xmm1, %xmm2
movapd %xmm2, %xmm1
movsd -48(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm1
movapd %xmm14, %xmm0
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm11
movsd %xmm1, -72(%rbp) ## 8-byte Spill
movapd %xmm15, %xmm7
mulsd %xmm2, %xmm7
movsd -232(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
movapd %xmm13, %xmm1
mulsd %xmm2, %xmm1
movapd %xmm2, %xmm12
movsd %xmm2, -104(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm1
movapd %xmm5, %xmm0
mulsd %xmm11, %xmm0
movapd %xmm3, %xmm2
mulsd %xmm12, %xmm2
addsd %xmm0, %xmm2
movapd %xmm15, %xmm0
movsd %xmm7, -136(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm0
addsd %xmm2, %xmm0
movsd %xmm0, -272(%rbp) ## 8-byte Spill
addsd %xmm7, %xmm1
movsd %xmm1, -504(%rbp) ## 8-byte Spill
mulsd %xmm8, %xmm1
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm1
movsd LCPI2_18(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -320(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI2_19(%rip), %xmm0
movsd %xmm0, -1128(%rbp) ## 8-byte Spill
movsd -392(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd LCPI2_82(%rip), %xmm0
movsd %xmm0, -392(%rbp) ## 8-byte Spill
movsd -288(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_19(%rip), %xmm0
movsd %xmm0, -288(%rbp) ## 8-byte Spill
movsd -256(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -296(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -256(%rbp) ## 8-byte Spill
addsd %xmm9, %xmm1
movapd %xmm1, -736(%rbp) ## 16-byte Spill
movapd %xmm10, %xmm0
mulsd %xmm10, %xmm0
addsd LCPI2_34(%rip), %xmm0
sqrtsd %xmm0, %xmm0
movsd LCPI2_35(%rip), %xmm1 ## xmm1 = mem[0],zero
callq _pow
mulsd LCPI2_66(%rip), %xmm0
movsd -64(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd -360(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, -64(%rbp) ## 8-byte Spill
movsd -72(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -32(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -104(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -128(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm3, %xmm1
movapd %xmm1, %xmm3
movsd -136(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -528(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm3, %xmm1
movsd %xmm1, -136(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm3
mulsd -24(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -504(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -88(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm3
addsd %xmm2, %xmm3
movsd %xmm3, -104(%rbp) ## 8-byte Spill
movapd %xmm3, %xmm1
mulsd LCPI2_67(%rip), %xmm1
movsd LCPI2_41(%rip), %xmm2 ## xmm2 = mem[0],zero
subsd %xmm1, %xmm2
mulsd %xmm0, %xmm2
movsd %xmm2, -72(%rbp) ## 8-byte Spill
movsd LCPI2_36(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -152(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
movapd %xmm1, %xmm0
callq _tanh
movapd %xmm0, %xmm1
movsd LCPI2_1(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm0, %xmm1
mulsd -72(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -72(%rbp) ## 8-byte Spill
movsd LCPI2_37(%rip), %xmm0 ## xmm0 = mem[0],zero
subsd -104(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI2_38(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
callq _tanh
movsd LCPI2_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
addsd %xmm1, %xmm0
movapd %xmm1, %xmm4
mulsd -72(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm3
movsd %xmm0, -72(%rbp) ## 8-byte Spill
movapd -736(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
mulsd %xmm7, %xmm0
movsd -272(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -88(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -136(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd -40(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm2
movsd %xmm2, -824(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm1
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movsd LCPI2_34(%rip), %xmm8 ## xmm8 = mem[0],zero
addsd %xmm8, %xmm1
xorps %xmm2, %xmm2
sqrtsd %xmm1, %xmm2
movsd %xmm2, -800(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm0
divsd LCPI2_42(%rip), %xmm0
minsd LCPI2_41(%rip), %xmm0
mulsd LCPI2_43(%rip), %xmm0
movapd %xmm2, %xmm1
mulsd %xmm4, %xmm1
movapd %xmm4, %xmm5
addsd %xmm0, %xmm1
mulsd %xmm3, %xmm1
movsd %xmm1, -776(%rbp) ## 8-byte Spill
movapd %xmm7, %xmm0
mulsd %xmm1, %xmm0
divsd %xmm2, %xmm0
movapd %xmm0, -736(%rbp) ## 16-byte Spill
movsd -32(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm1, %xmm0
movsd LCPI2_56(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
mulsd %xmm0, %xmm1
movsd -48(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm6
mulsd %xmm4, %xmm6
movsd -144(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
addsd %xmm7, %xmm6
addsd LCPI2_57(%rip), %xmm6
movapd %xmm6, %xmm2
mulsd %xmm5, %xmm2
movapd %xmm6, %xmm5
movapd %xmm6, %xmm10
movsd %xmm6, -360(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm5
subsd %xmm7, %xmm5
movapd %xmm3, %xmm2
movapd %xmm3, %xmm11
mulsd %xmm5, %xmm2
addsd %xmm1, %xmm2
movsd -224(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm4
movapd %xmm1, %xmm3
mulsd %xmm4, %xmm3
addsd %xmm2, %xmm3
mulsd %xmm3, %xmm1
movapd %xmm3, %xmm12
movsd %xmm3, -928(%rbp) ## 8-byte Spill
movsd -128(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
movsd -232(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm3
mulsd %xmm5, %xmm3
addsd %xmm2, %xmm3
movsd -80(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm2
mulsd %xmm4, %xmm2
addsd %xmm3, %xmm2
movapd %xmm7, %xmm3
mulsd %xmm2, %xmm3
movapd %xmm2, %xmm7
movsd %xmm2, -352(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm3
mulsd -528(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -536(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm5
addsd %xmm0, %xmm5
addsd %xmm4, %xmm5
movsd %xmm5, -144(%rbp) ## 8-byte Spill
addsd %xmm5, %xmm3
movsd %xmm3, -496(%rbp) ## 8-byte Spill
mulsd %xmm9, %xmm3
movapd %xmm11, %xmm0
mulsd %xmm12, %xmm0
movapd %xmm6, %xmm1
mulsd %xmm7, %xmm1
addsd %xmm0, %xmm1
movapd %xmm2, %xmm0
mulsd %xmm5, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -152(%rbp) ## 8-byte Spill
mulsd -24(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
addsd -160(%rbp), %xmm3 ## 8-byte Folded Reload
movsd %xmm3, -104(%rbp) ## 8-byte Spill
movapd %xmm10, %xmm0
mulsd %xmm10, %xmm0
addsd %xmm8, %xmm0
sqrtsd %xmm0, %xmm0
movsd LCPI2_35(%rip), %xmm1 ## xmm1 = mem[0],zero
callq _pow
mulsd LCPI2_68(%rip), %xmm0
movsd -928(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -32(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -352(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -128(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movsd -144(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -528(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
movsd %xmm1, -144(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm2
mulsd -24(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -496(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -88(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm2
addsd -64(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, -64(%rbp) ## 8-byte Spill
movsd LCPI2_67(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
movsd LCPI2_41(%rip), %xmm2 ## xmm2 = mem[0],zero
subsd %xmm1, %xmm2
mulsd %xmm0, %xmm2
movsd %xmm2, -352(%rbp) ## 8-byte Spill
movsd -360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_36(%rip), %xmm0
callq _tanh
movapd %xmm0, %xmm1
movsd LCPI2_1(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm0, %xmm1
mulsd -352(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -360(%rbp) ## 8-byte Spill
movsd LCPI2_37(%rip), %xmm0 ## xmm0 = mem[0],zero
subsd -64(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI2_38(%rip), %xmm0
callq _tanh
movsd LCPI2_1(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
addsd %xmm3, %xmm0
mulsd -360(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm4
movsd -152(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -88(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm2
movsd -144(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd -40(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm1
subsd %xmm1, %xmm2
movsd %xmm2, -352(%rbp) ## 8-byte Spill
movsd -104(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm1
mulsd %xmm5, %xmm1
mulsd %xmm2, %xmm2
addsd %xmm1, %xmm2
addsd LCPI2_34(%rip), %xmm2
sqrtsd %xmm2, %xmm2
movsd %xmm2, -928(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm1
divsd LCPI2_42(%rip), %xmm1
minsd LCPI2_41(%rip), %xmm1
mulsd LCPI2_43(%rip), %xmm1
movapd %xmm3, %xmm0
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm4, %xmm0
movsd %xmm0, -360(%rbp) ## 8-byte Spill
movsd %xmm4, -64(%rbp) ## 8-byte Spill
movapd %xmm5, %xmm1
mulsd %xmm0, %xmm1
divsd %xmm2, %xmm1
movsd %xmm1, -104(%rbp) ## 8-byte Spill
movapd -736(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm1, %xmm0
addsd -920(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -920(%rbp) ## 8-byte Spill
movsd -680(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -616(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -184(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd %xmm0, %xmm2
movsd -416(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd -656(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
movsd LCPI2_9(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, -184(%rbp) ## 8-byte Spill
movsd -664(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -96(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -192(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd %xmm0, %xmm2
movsd -672(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd -872(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm0
movsd LCPI2_16(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, -192(%rbp) ## 8-byte Spill
movsd -304(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI2_19(%rip), %xmm0
movsd %xmm0, -304(%rbp) ## 8-byte Spill
movsd -168(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movsd -320(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm6
movsd -856(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm1, %xmm6
movsd LCPI2_28(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm6
movsd -400(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd -56(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
mulsd %xmm3, %xmm0
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm6
movsd LCPI2_69(%rip), %xmm2 ## xmm2 = mem[0],zero
addsd -624(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm6
movsd %xmm6, -168(%rbp) ## 8-byte Spill
movapd %xmm5, %xmm0
mulsd -488(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd %xmm0, %xmm1
movsd -160(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
subsd -1000(%rbp), %xmm11 ## 8-byte Folded Reload
mulsd %xmm3, %xmm11
addsd %xmm0, %xmm11
mulsd -536(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd -1064(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -520(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -528(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm3, %xmm0
movsd -480(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm2
mulsd %xmm0, %xmm2
movapd %xmm0, %xmm12
movsd %xmm0, -520(%rbp) ## 8-byte Spill
movsd -1112(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm3
movsd -592(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm3
subsd %xmm3, %xmm2
movapd %xmm11, %xmm3
movsd %xmm11, -160(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm3
mulsd %xmm8, %xmm7
movsd -472(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -24(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm7
mulsd %xmm0, %xmm7
movsd -1120(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm9, %xmm0
subsd %xmm0, %xmm7
movsd LCPI2_63(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
mulsd %xmm0, %xmm7
addsd %xmm3, %xmm7
movsd -72(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd LCPI2_65(%rip), %xmm9 ## xmm9 = mem[0],zero
addsd %xmm9, %xmm2
addsd %xmm4, %xmm2
subsd %xmm2, %xmm7
movsd -48(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd %xmm9, %xmm2
movsd -408(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
movsd -232(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
movapd %xmm13, %xmm4
movsd -112(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm4
addsd %xmm2, %xmm4
movsd -128(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movapd %xmm14, %xmm1
movsd LCPI2_58(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd %xmm14, %xmm3
mulsd %xmm2, %xmm3
movsd -32(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd %xmm15, %xmm2
movsd LCPI2_51(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
subsd %xmm2, %xmm3
addsd %xmm4, %xmm3
movapd %xmm3, %xmm1
movsd -264(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm12, %xmm2
movapd %xmm3, %xmm4
movapd %xmm3, %xmm12
movsd %xmm3, -320(%rbp) ## 8-byte Spill
mulsd -280(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm2, %xmm4
movsd -224(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm0
movapd %xmm8, %xmm1
movsd -80(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, -112(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm0
movsd LCPI2_59(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd %xmm0, %xmm2
addsd %xmm1, %xmm2
movsd %xmm2, -56(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm2
addsd %xmm4, %xmm2
mulsd %xmm6, %xmm11
movapd %xmm6, %xmm8
movsd -472(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -176(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm11
addsd %xmm2, %xmm11
movsd LCPI2_45(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm9, %xmm5
movapd %xmm13, %xmm2
movsd LCPI2_46(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm2
subsd %xmm2, %xmm5
movsd LCPI2_74(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd -536(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
addsd %xmm5, %xmm2
mulsd %xmm9, %xmm6
movapd %xmm6, %xmm5
movsd LCPI2_48(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm13, %xmm6
subsd %xmm5, %xmm6
movsd LCPI2_75(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm0, %xmm5
addsd %xmm6, %xmm5
mulsd %xmm2, %xmm15
mulsd %xmm5, %xmm14
addsd %xmm15, %xmm14
mulsd LCPI2_58(%rip), %xmm3
mulsd LCPI2_51(%rip), %xmm10
subsd %xmm10, %xmm3
addsd %xmm14, %xmm3
mulsd %xmm9, %xmm2
mulsd %xmm13, %xmm5
addsd %xmm2, %xmm5
movapd %xmm3, %xmm2
movsd %xmm3, -80(%rbp) ## 8-byte Spill
movsd -88(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
movapd %xmm5, %xmm6
movsd LCPI2_59(%rip), %xmm9 ## xmm9 = mem[0],zero
addsd %xmm9, %xmm6
movsd %xmm6, -32(%rbp) ## 8-byte Spill
movsd -40(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm15, %xmm6
addsd %xmm2, %xmm6
movsd -24(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm12
addsd %xmm6, %xmm12
movsd %xmm12, -48(%rbp) ## 8-byte Spill
addsd -112(%rbp), %xmm5 ## 8-byte Folded Reload
movsd LCPI2_64(%rip), %xmm1 ## xmm1 = mem[0],zero
subsd %xmm5, %xmm1
addsd %xmm9, %xmm1
movsd %xmm1, -416(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm5
mulsd %xmm0, %xmm5
movapd %xmm15, %xmm1
mulsd %xmm15, %xmm3
addsd %xmm5, %xmm3
movsd -264(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm2, %xmm5
addsd %xmm3, %xmm5
movapd %xmm0, %xmm3
mulsd %xmm12, %xmm3
movapd %xmm15, %xmm2
mulsd %xmm5, %xmm2
subsd %xmm2, %xmm3
movsd LCPI2_63(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm11
mulsd %xmm0, %xmm3
addsd %xmm11, %xmm3
movsd -136(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -72(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -272(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -736(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm2, %xmm4
mulsd LCPI2_65(%rip), %xmm8
addsd %xmm8, %xmm4
movsd -144(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -64(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -152(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -104(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
addsd %xmm4, %xmm1
subsd %xmm1, %xmm3
movsd %xmm3, -96(%rbp) ## 8-byte Spill
movsd -168(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
addsd %xmm7, %xmm11
mulsd -488(%rbp), %xmm7 ## 8-byte Folded Reload
movsd -256(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd -920(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm1, %xmm0
mulsd -608(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm7
movsd -304(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd LCPI2_70(%rip), %xmm1
addsd %xmm3, %xmm7
addsd -968(%rbp), %xmm7 ## 8-byte Folded Reload
addsd %xmm11, %xmm1
mulsd -1072(%rbp), %xmm11 ## 8-byte Folded Reload
movsd -288(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm0, %xmm3
mulsd -1016(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm11
addsd %xmm7, %xmm11
addsd -1128(%rbp), %xmm11 ## 8-byte Folded Reload
movsd -192(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd LCPI2_71(%rip), %xmm0
addsd %xmm1, %xmm0
mulsd -1080(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, %xmm2
movapd %xmm3, %xmm1
movsd -936(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm1, %xmm3
mulsd -1024(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
movapd %xmm1, %xmm2
movapd %xmm11, %xmm9
subsd %xmm1, %xmm9
addsd -784(%rbp), %xmm9 ## 8-byte Folded Reload
movsd -184(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
addsd LCPI2_72(%rip), %xmm6
addsd %xmm0, %xmm6
mulsd -1032(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -944(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
movapd %xmm3, %xmm1
addsd %xmm3, %xmm13
mulsd -704(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
addsd %xmm9, %xmm0
addsd -792(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -712(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm6, %xmm3
movsd -1040(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm13, %xmm2
subsd %xmm2, %xmm3
addsd %xmm0, %xmm3
addsd -832(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -952(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -568(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -440(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movsd -960(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm4, %xmm7
subsd %xmm1, %xmm7
movsd LCPI2_84(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd -512(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
addsd %xmm2, %xmm7
mulsd LCPI2_81(%rip), %xmm7
mulsd LCPI2_82(%rip), %xmm4
addsd %xmm7, %xmm4
movapd %xmm4, %xmm7
movsd -976(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -392(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm2
movsd -768(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movsd -448(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm12, %xmm4
subsd %xmm4, %xmm2
addsd %xmm7, %xmm2
addsd %xmm3, %xmm2
movsd %xmm2, (%rbx)
movapd %xmm1, %xmm2
movsd -1008(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm2
movsd -840(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
subsd %xmm2, %xmm3
movsd LCPI2_3(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm3
movsd -1104(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
movsd -120(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm7
mulsd %xmm2, %xmm7
mulsd %xmm8, %xmm7
subsd %xmm7, %xmm3
addsd -808(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm13, %xmm3
addsd %xmm12, %xmm3
movsd %xmm3, 8(%rbx)
movapd %xmm1, %xmm3
addsd -848(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd %xmm2, %xmm4
mulsd %xmm2, %xmm4
mulsd %xmm8, %xmm3
mulsd %xmm8, %xmm4
subsd %xmm4, %xmm3
addsd LCPI2_4(%rip), %xmm3
addsd -816(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm6, %xmm3
addsd %xmm10, %xmm3
movsd %xmm3, 16(%rbx)
movsd -344(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd %xmm1, 24(%rbx)
movsd %xmm0, 32(%rbx)
movsd -216(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 40(%rbx)
movsd %xmm9, 48(%rbx)
movsd -328(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 56(%rbx)
movsd %xmm11, 64(%rbx)
movsd -752(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movsd -984(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm14, %xmm0
movsd -208(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movapd %xmm11, %xmm1
movsd -760(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm15, %xmm1
addsd %xmm0, %xmm1
movsd -744(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd %xmm0, %xmm2
movsd -424(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm2
addsd %xmm1, %xmm2
movsd -368(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movsd -312(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm2
mulsd -336(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -544(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm1
movsd -864(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movsd -376(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movsd -432(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm3
addsd %xmm1, %xmm3
movapd %xmm4, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm3, %xmm4
movsd -584(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm1
movsd LCPI2_63(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
mulsd %xmm0, %xmm1
addsd %xmm2, %xmm1
movapd %xmm1, %xmm13
movsd -464(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -1048(%rbp), %xmm3 ## 8-byte Folded Reload
divsd -560(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -696(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm3, %xmm1
movsd -1096(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm2
mulsd -640(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movapd %xmm9, %xmm1
mulsd LCPI2_65(%rip), %xmm1
addsd %xmm1, %xmm2
movsd -456(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1056(%rbp), %xmm0 ## 8-byte Folded Reload
divsd -576(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -688(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm0, %xmm6
movsd -1088(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm1
mulsd -632(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm6, %xmm1
addsd %xmm2, %xmm1
addsd %xmm13, %xmm1
mulsd -896(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd %xmm11, %xmm14
movsd -384(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm15, %xmm2
addsd %xmm14, %xmm2
mulsd %xmm10, %xmm12
addsd %xmm2, %xmm12
movsd -600(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm9, %xmm2
addsd %xmm12, %xmm2
movsd -992(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -432(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd -336(%rbp), %xmm4 ## 8-byte Folded Reload
subsd %xmm4, %xmm6
movsd LCPI2_63(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm2
mulsd %xmm13, %xmm6
movapd %xmm13, %xmm9
addsd %xmm2, %xmm6
movapd %xmm6, %xmm4
mulsd -888(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd -200(%rbp), %xmm8 ## 8-byte Folded Reload
subsd %xmm8, %xmm3
mulsd -880(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm7, %xmm2
mulsd -240(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm0
addsd %xmm3, %xmm0
movapd %xmm6, %xmm2
subsd %xmm0, %xmm2
mulsd -552(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
addsd -248(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, 72(%rbx)
movsd -520(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movsd -416(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm11, %xmm0
movsd -80(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movapd %xmm12, %xmm1
mulsd -280(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd -264(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd %xmm0, %xmm2
movsd -592(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm13, %xmm2
addsd %xmm1, %xmm2
movsd -480(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movsd -160(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm2
movsd -88(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm0
movsd -320(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm1
movsd -40(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movsd -24(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movsd -56(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm6, %xmm7
addsd %xmm1, %xmm7
movapd %xmm4, %xmm0
mulsd %xmm7, %xmm0
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm0
movapd %xmm9, %xmm1
mulsd %xmm9, %xmm2
mulsd %xmm9, %xmm0
movapd %xmm9, %xmm3
addsd %xmm2, %xmm0
movsd %xmm0, -40(%rbp) ## 8-byte Spill
movsd -776(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -824(%rbp), %xmm6 ## 8-byte Folded Reload
divsd -800(%rbp), %xmm6 ## 8-byte Folded Reload
movsd -272(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
movsd -504(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd %xmm15, %xmm2
mulsd -72(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm0, %xmm2
movsd LCPI2_65(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm14, %xmm0
addsd %xmm0, %xmm2
movsd -360(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -352(%rbp), %xmm4 ## 8-byte Folded Reload
divsd -928(%rbp), %xmm4 ## 8-byte Folded Reload
movsd -152(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
movsd -496(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd %xmm9, %xmm1
mulsd -64(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
addsd %xmm2, %xmm1
addsd -40(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd -528(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd %xmm11, %xmm12
movsd -32(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -280(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm12, %xmm2
mulsd %xmm13, %xmm8
addsd %xmm2, %xmm8
movapd %xmm14, %xmm0
mulsd -176(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm8, %xmm0
mulsd -24(%rbp), %xmm5 ## 8-byte Folded Reload
mulsd %xmm10, %xmm7
subsd %xmm7, %xmm5
mulsd %xmm3, %xmm0
mulsd %xmm3, %xmm5
addsd %xmm0, %xmm5
mulsd -136(%rbp), %xmm6 ## 8-byte Folded Reload
movapd -736(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm15
subsd %xmm15, %xmm6
mulsd -144(%rbp), %xmm4 ## 8-byte Folded Reload
movsd -104(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm9
subsd %xmm9, %xmm4
addsd %xmm6, %xmm4
subsd %xmm4, %xmm5
mulsd -536(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm1, %xmm5
addsd -96(%rbp), %xmm5 ## 8-byte Folded Reload
movsd %xmm5, 80(%rbx)
movapd %xmm2, %xmm0
addsd -296(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm3, %xmm0
movapd LCPI2_85(%rip), %xmm1 ## xmm1 = [-0.0E+0,-0.0E+0]
xorpd %xmm1, %xmm0
movlpd %xmm0, 88(%rbx)
movsd -72(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -624(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -64(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, 96(%rbx)
movapd -912(%rbp), %xmm0 ## 16-byte Reload
addsd -200(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -240(%rbp), %xmm0 ## 8-byte Folded Reload
xorpd %xmm1, %xmm0
movlpd %xmm0, 104(%rbx)
movsd -648(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -640(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -632(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, 112(%rbx)
LBB2_44:
xorl %eax, %eax
addq $1120, %rsp ## imm = 0x460
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_alloc_mem ## -- Begin function F_alloc_mem
.p2align 4, 0x90
_F_alloc_mem: ## @F_alloc_mem
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_init_mem ## -- Begin function F_init_mem
.p2align 4, 0x90
_F_init_mem: ## @F_init_mem
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_free_mem ## -- Begin function F_free_mem
.p2align 4, 0x90
_F_free_mem: ## @F_free_mem
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_checkout ## -- Begin function F_checkout
.p2align 4, 0x90
_F_checkout: ## @F_checkout
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_release ## -- Begin function F_release
.p2align 4, 0x90
_F_release: ## @F_release
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_incref ## -- Begin function F_incref
.p2align 4, 0x90
_F_incref: ## @F_incref
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_decref ## -- Begin function F_decref
.p2align 4, 0x90
_F_decref: ## @F_decref
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_n_in ## -- Begin function F_n_in
.p2align 4, 0x90
_F_n_in: ## @F_n_in
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $1, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_n_out ## -- Begin function F_n_out
.p2align 4, 0x90
_F_n_out: ## @F_n_out
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $1, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_default_in ## -- Begin function F_default_in
.p2align 4, 0x90
_F_default_in: ## @F_default_in
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorps %xmm0, %xmm0
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_name_in ## -- Begin function F_name_in
.p2align 4, 0x90
_F_name_in: ## @F_name_in
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
testq %rdi, %rdi
leaq L_.str(%rip), %rcx
cmoveq %rcx, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_name_out ## -- Begin function F_name_out
.p2align 4, 0x90
_F_name_out: ## @F_name_out
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
testq %rdi, %rdi
leaq L_.str.1(%rip), %rcx
cmoveq %rcx, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_sparsity_in ## -- Begin function F_sparsity_in
.p2align 4, 0x90
_F_sparsity_in: ## @F_sparsity_in
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
testq %rdi, %rdi
leaq _foo_jac_s0(%rip), %rcx
cmoveq %rcx, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_sparsity_out ## -- Begin function F_sparsity_out
.p2align 4, 0x90
_F_sparsity_out: ## @F_sparsity_out
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
testq %rdi, %rdi
leaq _foo_jac_s1(%rip), %rcx
cmoveq %rcx, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _F_work ## -- Begin function F_work
.p2align 4, 0x90
_F_work: ## @F_work
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
testq %rdi, %rdi
je LBB17_2
## %bb.1:
movq $1, (%rdi)
LBB17_2:
testq %rsi, %rsi
je LBB17_4
## %bb.3:
movq $1, (%rsi)
LBB17_4:
testq %rdx, %rdx
je LBB17_6
## %bb.5:
movq $0, (%rdx)
LBB17_6:
testq %rcx, %rcx
je LBB17_8
## %bb.7:
movq $0, (%rcx)
LBB17_8:
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F ## -- Begin function jac_F
.p2align 4, 0x90
_jac_F: ## @jac_F
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
callq _foo_jac_f1
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal8,8byte_literals
.p2align 3 ## -- Begin function foo_jac_f1
LCPI19_0:
.quad 0x3ff0000000000000 ## double 1
LCPI19_1:
.quad 0x3fe0000000000000 ## double 0.5
LCPI19_2:
.quad 0xbfb8f3b341a8bf66 ## double -0.097468570261069226
LCPI19_3:
.quad 0xbfb431c983f2d665 ## double -0.078884691919336072
LCPI19_4:
.quad 0x3fb431c983f2d665 ## double 0.078884691919336072
LCPI19_5:
.quad 0xbf9d9ef12e009beb ## double -0.028926628525507352
LCPI19_6:
.quad 0x3fd3bf4b740067f2 ## double 0.30855070427207842
LCPI19_7:
.quad 0x403c3d82dbf5bb0f ## double 28.240278003208967
LCPI19_8:
.quad 0xc03c3d82dbf5bb0f ## double -28.240278003208967
LCPI19_9:
.quad 0x40714f14923e1140 ## double 276.94252228016921
LCPI19_10:
.quad 0xc0555cda1369ac84 ## double -85.450810292431981
LCPI19_11:
.quad 0xbc80000000000000 ## double -2.7755575615628914E-17
LCPI19_13:
.quad 0x3c90000000000000 ## double 5.5511151231257827E-17
LCPI19_14:
.quad 0x3fa999999999999a ## double 0.050000000000000003
LCPI19_15:
.quad 0x3c70000000000000 ## double 1.3877787807814457E-17
LCPI19_16:
.quad 0xbfb184b6093c8354 ## double -0.068431260352111167
LCPI19_17:
.quad 0xbfb060eb5e7398d7 ## double -0.063978872832737607
LCPI19_18:
.quad 0xbf727b65c5a72bc1 ## double -0.0045122123214679797
LCPI19_19:
.quad 0xbfd966eda631cb7a ## double -0.39690724592144699
LCPI19_20:
.quad 0xbfdb7801f38da86b ## double -0.42919968399531311
LCPI19_21:
.quad 0xbfa8047849d8f494 ## double -0.046909102453789903
LCPI19_22:
.quad 0xbfa4a8aaddbab8a8 ## double -0.040349330488753055
LCPI19_23:
.quad 0x3fc6035caa5b8ec0 ## double 0.17197759931797485
LCPI19_24:
.quad 0xbf5f8486f7594818 ## double -0.0019236867932659382
LCPI19_25:
.quad 0x3f51050133a5f8da ## double 0.0010387908683636067
LCPI19_27:
.quad 0xbf9999999999999a ## double -0.025000000000000001
LCPI19_28:
.quad 0xbfe0000000000000 ## double -0.5
LCPI19_29:
.quad 0x3fb060eb5e7398d7 ## double 0.063978872832737607
LCPI19_30:
.quad 0x3fd966eda631cb7a ## double 0.39690724592144699
LCPI19_31:
.quad 0x3fa4a8aaddbab8a8 ## double 0.040349330488753055
LCPI19_32:
.quad 0x3ee4f8b588e368f1 ## double 1.0000000000000001E-5
LCPI19_33:
.quad 0x3ff8000000000000 ## double 1.5
LCPI19_34:
.quad 0xc072c00000000000 ## double -300
LCPI19_35:
.quad 0xbf9eb851eb851eb8 ## double -0.029999999999999999
LCPI19_36:
.quad 0x3fd5555555555555 ## double 0.33333333333333331
LCPI19_37:
.quad 0x4049000000000000 ## double 50
LCPI19_41:
.quad 0x410232799687291b ## double 149071.19849998583
LCPI19_42:
.quad 0x4103ef1a878c497b ## double 163299.31618554503
LCPI19_43:
.quad 0x4008000000000000 ## double 3
LCPI19_44:
.quad 0x3fe999999999999a ## double 0.80000000000000004
LCPI19_45:
.quad 0x4014000000000000 ## double 5
LCPI19_46:
.quad 0xbf9e3312d458c923 ## double -0.029491705141660007
LCPI19_47:
.quad 0x3fa10a0c2a85de2d ## double 0.03327978152350073
LCPI19_48:
.quad 0x3f77a3653982f612 ## double 0.0057710603797978145
LCPI19_49:
.quad 0xbf913c79cf44d36d ## double -0.016832259441076958
LCPI19_50:
.quad 0x3fc6de755b39d616 ## double 0.17866389231100815
LCPI19_54:
.quad 0xbf4b020c9c63afe9 ## double -8.24218900360216E-4
LCPI19_55:
.quad 0x3f292c73a5bdb29c ## double 1.9205962859860238E-4
LCPI19_56:
.quad 0x3f525b145384fce8 ## double 0.0011203478334918474
LCPI19_57:
.quad 0x3f4630437bcf9f9e ## double 6.7714018916563748E-4
LCPI19_58:
.quad 0x3f2976f43e97efde ## double 1.9427997112575387E-4
LCPI19_59:
.quad 0xbf2347248250412c ## double -1.4707871119457852E-4
LCPI19_60:
.quad 0x3f59b08a494c0d79 ## double 0.0015679693653403977
LCPI19_61:
.quad 0xbfc6de755b39d616 ## double -0.17866389231100815
LCPI19_62:
.quad 0x3f425b145384fce8 ## double 5.6017391674592371E-4
LCPI19_63:
.quad 0x3f1976f43e97efde ## double 9.7139985562876935E-5
LCPI19_64:
.quad 0x3f70624dd2f1a9fc ## double 0.0040000000000000001
LCPI19_65:
.quad 0xbfa1eb851eb851ec ## double -0.035000000000000003
LCPI19_67:
.quad 0xbffc0893fd832bcd ## double -1.7520942595317479
LCPI19_69:
.quad 0x410587fb5dabb204 ## double 176383.42073763919
LCPI19_70:
.quad 0x3fc999999999999a ## double 0.20000000000000001
LCPI19_71:
.quad 0xc014000000000000 ## double -5
LCPI19_72:
.quad 0x3fb89f89713dc053 ## double 0.096184339663296911
LCPI19_73:
.quad 0x3f9d8c3e87e3b396 ## double 0.028855301898989071
LCPI19_74:
.quad 0x3ff07f43c1bfc3a0 ## double 1.0310704773257626
LCPI19_75:
.quad 0xbff07f43c1bfc3a0 ## double -1.0310704773257626
LCPI19_76:
.quad 0xc02439028349a518 ## double -10.11134729646669
LCPI19_77:
.quad 0x3f84b54ebd7eb5cf ## double 0.01011144176285486
LCPI19_78:
.quad 0x3faddab7eac9dfb5 ## double 0.058309314165796357
LCPI19_79:
.quad 0x3fd2ac4b364e66b0 ## double 0.29176597884507327
LCPI19_80:
.quad 0x3fef1f28052d55d1 ## double 0.97255326281691101
LCPI19_81:
.quad 0xbfc7da66307d0934 ## double -0.18635251395796504
LCPI19_82:
.quad 0x40087719fcc5171b ## double 3.0581550357482121
LCPI19_83:
.quad 0xc0087719fcc5171b ## double -3.0581550357482121
LCPI19_84:
.quad 0x403dfd816c2bef76 ## double 29.990256081320204
LCPI19_85:
.quad 0x3fb51dc75e661920 ## double 0.082485638186061028
LCPI19_86:
.quad 0x3fe9e2929391772a ## double 0.80890778371733529
LCPI19_87:
.quad 0x40165ae3ccee793c ## double 5.5887596149971692
LCPI19_88:
.quad 0xbfc56b67771a6f72 ## double -0.16734021487797751
LCPI19_89:
.quad 0x401eb0746e6afdde ## double 7.6723191502382786
LCPI19_90:
.quad 0xc01eb0746e6afdde ## double -7.6723191502382786
LCPI19_91:
.quad 0x4052cf580a7d5b90 ## double 75.239748594684215
LCPI19_92:
.quad 0x40292e67cd56a4ca ## double 12.590635697199463
LCPI19_96:
.quad 0xbf51050133a5f8da ## double -0.0010387908683636067
LCPI19_98:
.quad 0xbc70000000000000 ## double -1.3877787807814457E-17
LCPI19_103:
.quad 0x3f913c79cf44d36d ## double 0.016832259441076958
LCPI19_104:
.quad 0xbf525b145384fce8 ## double -0.0011203478334918474
LCPI19_105:
.quad 0xbf2976f43e97efde ## double -1.9427997112575387E-4
LCPI19_106:
.quad 0x3f9e3312d458c923 ## double 0.029491705141660007
LCPI19_107:
.quad 0xbf425b145384fce8 ## double -5.6017391674592371E-4
LCPI19_108:
.quad 0x3c80000000000000 ## double 2.7755575615628914E-17
LCPI19_109:
.quad 0xbf1976f43e97efde ## double -9.7139985562876935E-5
LCPI19_110:
.quad 0x4072c00000000000 ## double 300
LCPI19_111:
.quad 0x3f727b65c5a72bc1 ## double 0.0045122123214679797
LCPI19_112:
.quad 0x3f5f8486f7594818 ## double 0.0019236867932659382
LCPI19_113:
.quad 0xbf292c73a5bdb29c ## double -1.9205962859860238E-4
LCPI19_114:
.quad 0x3fa8047849d8f494 ## double 0.046909102453789903
LCPI19_115:
.quad 0x3fdb7801f38da86b ## double 0.42919968399531311
LCPI19_116:
.quad 0x3fc7da66307d0934 ## double 0.18635251395796504
LCPI19_117:
.quad 0x3fc56b67771a6f72 ## double 0.16734021487797751
LCPI19_118:
.quad 0x3fb8f3b341a8bf66 ## double 0.097468570261069226
LCPI19_119:
.quad 0x3fb184b6093c8354 ## double 0.068431260352111167
LCPI19_120:
.quad 0x40236dbd2283a883 ## double 9.7143336091724048
LCPI19_121:
.quad 0xc02005a38e8f4163 ## double -8.0110134649154983
LCPI19_122:
.quad 0xc01a139140f10a82 ## double -6.5191087863082675
LCPI19_123:
.quad 0xbff4aceee30d5ff5 ## double -1.2922200078228319
LCPI19_124:
.quad 0xbff35b126d26fe63 ## double -1.2097343696367708
LCPI19_125:
.quad 0x401166c8b725e38b ## double 4.3503750435710442
LCPI19_126:
.quad 0xc0280b9e92c870b4 ## double -12.022694193809322
LCPI19_127:
.quad 0xc01166c8b725e38b ## double -4.3503750435710442
LCPI19_128:
.quad 0xc0236dbd2283a883 ## double -9.7143336091724048
LCPI19_129:
.quad 0x3ff35b126d26fe63 ## double 1.2097343696367708
LCPI19_130:
.quad 0x3ff4aceee30d5ff5 ## double 1.2922200078228319
LCPI19_131:
.quad 0x40280b9e92c870b4 ## double 12.022694193809322
LCPI19_132:
.quad 0xc10232799687291b ## double -149071.19849998583
LCPI19_133:
.quad 0xc103ef1a878c497b ## double -163299.31618554503
LCPI19_134:
.quad 0xc10587fb5dabb204 ## double -176383.42073763919
LCPI19_147:
.quad 0x3f4b020c9c63afe9 ## double 8.24218900360216E-4
LCPI19_148:
.quad 0x3f8bb39570497333 ## double 0.013526122559761688
LCPI19_149:
.quad 0x3f49016dfb343853 ## double 7.6310987643225315E-4
LCPI19_150:
.quad 0x3fc2f80babf6ab47 ## double 0.14819475075395358
LCPI19_151:
.quad 0x3fd4f908449a2100 ## double 0.32769972514553558
LCPI19_152:
.quad 0x400e7abad74710a2 ## double 3.8099266832887944
LCPI19_153:
.quad 0x3fb7df3886b6b2da ## double 0.093249829186514615
.section __TEXT,__literal16,16byte_literals
.p2align 4
LCPI19_12:
.quad 0x3c70000000000000 ## double 1.3877787807814457E-17
.quad 0x3c90000000000000 ## double 5.5511151231257827E-17
LCPI19_26:
.space 8
.quad 0x3c80000000000000 ## double 2.7755575615628914E-17
LCPI19_38:
.quad 0x3ee4f8b588e368f1 ## double 1.0000000000000001E-5
.quad 0x3ee4f8b588e368f1 ## double 1.0000000000000001E-5
LCPI19_39:
.quad 0x3fc999999999999a ## double 0.20000000000000001
.quad 0x3fc999999999999a ## double 0.20000000000000001
LCPI19_40:
.quad 0x3ff0000000000000 ## double 1
.quad 0x3ff0000000000000 ## double 1
LCPI19_51:
.quad 0xbf913c79cf44d36d ## double -0.016832259441076958
.quad 0xbf913c79cf44d36d ## double -0.016832259441076958
LCPI19_52:
.quad 0x3f77a3653982f612 ## double 0.0057710603797978145
.quad 0x3fa10a0c2a85de2d ## double 0.03327978152350073
LCPI19_53:
.quad 0x3fa10a0c2a85de2d ## double 0.03327978152350073
.quad 0x3f77a3653982f612 ## double 0.0057710603797978145
LCPI19_66:
.quad 0x3f70624dd2f1a9fc ## double 0.0040000000000000001
.space 8
LCPI19_68:
.quad 0xbfc6de755b39d616 ## double -0.17866389231100815
.quad 0x3fc6de755b39d616 ## double 0.17866389231100815
LCPI19_93:
.quad 0x3c80000000000000 ## double 2.7755575615628914E-17
.quad 0x3c90000000000000 ## double 5.5511151231257827E-17
LCPI19_94:
.quad 0x3c80000000000000 ## double 2.7755575615628914E-17
.quad 0x3c80000000000000 ## double 2.7755575615628914E-17
LCPI19_95:
.quad 0xbc70000000000000 ## double -1.3877787807814457E-17
.quad 0xbc70000000000000 ## double -1.3877787807814457E-17
LCPI19_97:
.quad 0xbf4b020c9c63afe9 ## double -8.24218900360216E-4
.space 8
LCPI19_99:
.quad 0x3f4630437bcf9f9e ## double 6.7714018916563748E-4
.space 8
LCPI19_100:
.quad 0xbf2976f43e97efde ## double -1.9427997112575387E-4
.space 8
LCPI19_101:
.quad 0x3f292c73a5bdb29c ## double 1.9205962859860238E-4
.space 8
LCPI19_102:
.quad 0xbf525b145384fce8 ## double -0.0011203478334918474
.space 8
LCPI19_135:
.quad 0x8000000000000000 ## double -0
.quad 0x8000000000000000 ## double -0
LCPI19_136:
.quad 0xbf1976f43e97efde ## double -9.7139985562876935E-5
.space 8
LCPI19_137:
.space 8
.quad 0x3f77a3653982f612 ## double 0.0057710603797978145
LCPI19_138:
.quad 0xbf292c73a5bdb29c ## double -1.9205962859860238E-4
.space 8
LCPI19_139:
.space 8
.quad 0x3fa10a0c2a85de2d ## double 0.03327978152350073
LCPI19_140:
.quad 0xbf292c73a5bdb29c ## double -1.9205962859860238E-4
.quad 0x3fa999999999999a ## double 0.050000000000000003
LCPI19_141:
.quad 0x3c70000000000000 ## double 1.3877787807814457E-17
.quad 0x3c70000000000000 ## double 1.3877787807814457E-17
LCPI19_142:
.quad 0x3f77a3653982f612 ## double 0.0057710603797978145
.quad 0x3f77a3653982f612 ## double 0.0057710603797978145
LCPI19_143:
.quad 0x3fa10a0c2a85de2d ## double 0.03327978152350073
.quad 0x3fa10a0c2a85de2d ## double 0.03327978152350073
LCPI19_144:
.quad 0x3fa999999999999a ## double 0.050000000000000003
.quad 0x3fa999999999999a ## double 0.050000000000000003
LCPI19_145:
.quad 0x3f292c73a5bdb29c ## double 1.9205962859860238E-4
.quad 0x3fa999999999999a ## double 0.050000000000000003
LCPI19_146:
.quad 0x3f4b020c9c63afe9 ## double 8.24218900360216E-4
.space 8
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_foo_jac_f1: ## @foo_jac_f1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movl $17816, %eax ## imm = 0x4598
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r14
movq (%rdi), %r15
xorps %xmm0, %xmm0
movsd %xmm0, -128(%rbp) ## 8-byte Spill
testq %r15, %r15
je LBB19_1
## %bb.2:
movsd (%r15), %xmm1 ## xmm1 = mem[0],zero
movsd 176(%r15), %xmm0 ## xmm0 = mem[0],zero
movsd %xmm0, -280(%rbp) ## 8-byte Spill
mulsd LCPI19_1(%rip), %xmm1
movsd %xmm1, -112(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm0
callq _cos
movsd -112(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd %xmm0, -1536(%rbp) ## 8-byte Spill
movsd 192(%r15), %xmm0 ## xmm0 = mem[0],zero
movsd %xmm0, -4032(%rbp) ## 8-byte Spill
jmp LBB19_3
LBB19_1:
movsd LCPI19_0(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd %xmm0, -1536(%rbp) ## 8-byte Spill
xorps %xmm0, %xmm0
movsd %xmm0, -4032(%rbp) ## 8-byte Spill
xorpd %xmm1, %xmm1
xorps %xmm0, %xmm0
movsd %xmm0, -280(%rbp) ## 8-byte Spill
LBB19_3:
movapd %xmm1, %xmm0
callq _sin
movsd -1536(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm1
mulsd %xmm2, %xmm1
movapd %xmm1, -6128(%rbp) ## 16-byte Spill
movapd %xmm0, %xmm3
mulsd %xmm0, %xmm3
movapd %xmm2, %xmm1
mulsd %xmm0, %xmm1
movapd %xmm1, -6112(%rbp) ## 16-byte Spill
xorpd %xmm1, %xmm1
movsd %xmm1, -776(%rbp) ## 8-byte Spill
testq %r15, %r15
je LBB19_5
## %bb.4:
movsd 8(%r15), %xmm1 ## xmm1 = mem[0],zero
movsd %xmm1, -128(%rbp) ## 8-byte Spill
movsd 24(%r15), %xmm1 ## xmm1 = mem[0],zero
movsd %xmm1, -776(%rbp) ## 8-byte Spill
LBB19_5:
movsd %xmm0, -2272(%rbp) ## 8-byte Spill
movapd -6128(%rbp), %xmm1 ## 16-byte Reload
subsd %xmm3, %xmm1
movapd %xmm1, -6128(%rbp) ## 16-byte Spill
movapd -6112(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm0, %xmm0
movapd %xmm0, -6112(%rbp) ## 16-byte Spill
testq %r15, %r15
je LBB19_6
## %bb.7:
movsd 64(%r15), %xmm0 ## xmm0 = mem[0],zero
movsd 96(%r15), %xmm1 ## xmm1 = mem[0],zero
movsd %xmm1, -112(%rbp) ## 8-byte Spill
movsd %xmm0, -64(%rbp) ## 8-byte Spill
callq _sin
movaps %xmm0, -2784(%rbp) ## 16-byte Spill
movsd -112(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
callq _cos
movaps %xmm0, -1216(%rbp) ## 16-byte Spill
movsd 128(%r15), %xmm0 ## xmm0 = mem[0],zero
jmp LBB19_8
LBB19_6:
movsd LCPI19_0(%rip), %xmm0 ## xmm0 = mem[0],zero
movaps %xmm0, -1216(%rbp) ## 16-byte Spill
xorps %xmm0, %xmm0
movsd %xmm0, -112(%rbp) ## 8-byte Spill
xorps %xmm0, %xmm0
movsd %xmm0, -64(%rbp) ## 8-byte Spill
xorps %xmm0, %xmm0
movaps %xmm0, -2784(%rbp) ## 16-byte Spill
xorps %xmm0, %xmm0
LBB19_8:
callq ___sincos_stret
movaps %xmm0, -1328(%rbp) ## 16-byte Spill
movapd %xmm1, -1312(%rbp) ## 16-byte Spill
movsd -64(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
callq _cos
movaps %xmm0, -2880(%rbp) ## 16-byte Spill
movsd -112(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
callq _sin
movapd -6128(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
movapd -2880(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm3
movapd -6112(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
movapd -2784(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm3
movapd -1216(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd %xmm3, %xmm1
mulsd %xmm6, %xmm2
mulsd %xmm7, %xmm4
addsd %xmm2, %xmm4
movapd %xmm0, %xmm2
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm1
movapd %xmm0, -1936(%rbp) ## 16-byte Spill
movapd %xmm3, -10368(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
movapd %xmm5, %xmm3
movapd %xmm4, -10352(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm3
addsd %xmm0, %xmm3
movapd -1328(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm2
movapd -1312(%rbp), %xmm5 ## 16-byte Reload
unpcklpd %xmm5, %xmm2 ## xmm2 = xmm2[0],xmm5[0]
movapd %xmm1, -12032(%rbp) ## 16-byte Spill
movddup %xmm1, %xmm0 ## xmm0 = xmm1[0,0]
movapd %xmm0, -16640(%rbp) ## 16-byte Spill
mulpd %xmm0, %xmm2
movapd %xmm5, %xmm0
unpcklpd %xmm4, %xmm0 ## xmm0 = xmm0[0],xmm4[0]
movapd %xmm3, -12016(%rbp) ## 16-byte Spill
movddup %xmm3, %xmm1 ## xmm1 = xmm3[0,0]
movapd %xmm1, -16624(%rbp) ## 16-byte Spill
mulpd %xmm1, %xmm0
movapd %xmm2, %xmm3
subpd %xmm0, %xmm2
movapd %xmm2, %xmm1
unpckhpd %xmm2, %xmm1 ## xmm1 = xmm1[1],xmm2[1]
movapd %xmm1, -48(%rbp) ## 16-byte Spill
mulsd LCPI19_11(%rip), %xmm1
addpd %xmm0, %xmm3
movddup %xmm3, %xmm0 ## xmm0 = xmm3[0,0]
mulpd LCPI19_12(%rip), %xmm0
movapd %xmm2, -1104(%rbp) ## 16-byte Spill
blendpd $2, %xmm2, %xmm1 ## xmm1 = xmm1[0],xmm2[1]
movapd %xmm0, -64(%rbp) ## 16-byte Spill
subpd %xmm0, %xmm1
movapd %xmm1, -112(%rbp) ## 16-byte Spill
testq %r15, %r15
xorpd %xmm1, %xmm1
je LBB19_10
## %bb.9:
movsd 160(%r15), %xmm1 ## xmm1 = mem[0],zero
LBB19_10:
movapd -1104(%rbp), %xmm0 ## 16-byte Reload
blendpd $1, %xmm3, %xmm0 ## xmm0 = xmm3[0],xmm0[1]
movapd %xmm0, -1104(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm0
movapd %xmm3, -272(%rbp) ## 16-byte Spill
callq ___sincos_stret
movapd -272(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm1, %xmm3
movapd -112(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm1
unpckhpd %xmm10, %xmm1 ## xmm1 = xmm1[1],xmm10[1]
movapd %xmm1, -2560(%rbp) ## 16-byte Spill
movapd %xmm3, -512(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
movsd LCPI19_13(%rip), %xmm5 ## xmm5 = mem[0],zero
movapd -48(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm5
movapd %xmm5, %xmm3
subsd %xmm11, %xmm3
movapd %xmm3, -3264(%rbp) ## 16-byte Spill
movapd %xmm0, -448(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm3
addsd %xmm1, %xmm3
movsd LCPI19_14(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd %xmm3, -720(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
movapd %xmm10, %xmm1
movapd %xmm0, -1440(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm1
movapd %xmm1, %xmm9
movsd LCPI19_11(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm11, %xmm1
movsd LCPI19_15(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm12, %xmm13
addsd %xmm1, %xmm13
testq %r15, %r15
je LBB19_11
## %bb.12:
movsd 32(%r15), %xmm0 ## xmm0 = mem[0],zero
jmp LBB19_13
LBB19_11:
xorpd %xmm0, %xmm0
LBB19_13:
movsd -128(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movsd LCPI19_16(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -6112(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -5304(%rbp) ## 8-byte Spill
movsd LCPI19_17(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd -6128(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm1, %xmm4
movapd %xmm4, -2320(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm0
movsd LCPI19_18(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -10352(%rbp), %xmm1 ## 16-byte Folded Reload
movsd LCPI19_19(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd -10368(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm1, %xmm4
movsd %xmm4, -368(%rbp) ## 8-byte Spill
movapd %xmm4, %xmm2
movsd %xmm0, -1824(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm2
movsd LCPI19_20(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -12032(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -464(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm2
movsd LCPI19_21(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm11, %xmm1
movsd LCPI19_22(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm12, %xmm4
addsd %xmm1, %xmm4
movsd %xmm4, -152(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm2
movapd -64(%rbp), %xmm0 ## 16-byte Reload
blendpd $1, %xmm5, %xmm0 ## xmm0 = xmm5[0],xmm0[1]
movapd -1104(%rbp), %xmm4 ## 16-byte Reload
addpd %xmm0, %xmm4
movsd LCPI19_23(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
movapd %xmm4, %xmm5
unpckhpd %xmm4, %xmm5 ## xmm5 = xmm5[1],xmm4[1]
movsd LCPI19_24(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd %xmm5, -2048(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_25(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm13, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -288(%rbp) ## 8-byte Spill
movapd %xmm0, %xmm3
movsd %xmm2, -96(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm3
movaps LCPI19_26(%rip), %xmm5 ## xmm5 = <u,2.7755575615628914E-17>
movhlps %xmm4, %xmm5 ## xmm5 = xmm4[1],xmm5[1]
movapd -448(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm2
movapd -512(%rbp), %xmm6 ## 16-byte Reload
unpcklpd %xmm6, %xmm2 ## xmm2 = xmm2[0],xmm6[0]
movaps %xmm5, %xmm0
movapd %xmm2, -400(%rbp) ## 16-byte Spill
mulpd %xmm2, %xmm0
movaps %xmm4, -1104(%rbp) ## 16-byte Spill
movhpd LCPI19_15(%rip), %xmm4 ## xmm4 = xmm4[0],mem[0]
unpcklpd %xmm1, %xmm6 ## xmm6 = xmm6[0],xmm1[0]
movapd %xmm4, -208(%rbp) ## 16-byte Spill
movapd %xmm4, %xmm1
movapd %xmm6, -256(%rbp) ## 16-byte Spill
mulpd %xmm6, %xmm1
movapd %xmm1, %xmm2
addpd %xmm0, %xmm2
movsd LCPI19_14(%rip), %xmm6 ## xmm6 = mem[0],zero
movapd %xmm2, %xmm4
mulsd %xmm6, %xmm4
addsd %xmm3, %xmm4
addsd LCPI19_27(%rip), %xmm4
subpd %xmm0, %xmm1
movsd LCPI19_28(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm4, %xmm7
movapd %xmm4, -2896(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm7
movsd %xmm3, -496(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm7
movapd %xmm13, %xmm0
movapd %xmm7, -1184(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm0
addsd %xmm0, %xmm9
movapd %xmm1, %xmm0
movapd %xmm2, -1264(%rbp) ## 16-byte Spill
blendpd $1, %xmm2, %xmm0 ## xmm0 = xmm2[0],xmm0[1]
movapd %xmm0, -64(%rbp) ## 16-byte Spill
movapd %xmm1, -1600(%rbp) ## 16-byte Spill
unpckhpd %xmm1, %xmm1 ## xmm1 = xmm1[1,1]
movapd %xmm1, -1504(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
movapd %xmm1, -14240(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm9
movapd %xmm9, -1856(%rbp) ## 16-byte Spill
testq %r15, %r15
movapd %xmm13, -528(%rbp) ## 16-byte Spill
movaps %xmm5, -912(%rbp) ## 16-byte Spill
je LBB19_14
## %bb.15:
movsd 72(%r15), %xmm4 ## xmm4 = mem[0],zero
addsd %xmm8, %xmm4
movsd 104(%r15), %xmm3 ## xmm3 = mem[0],zero
addsd %xmm4, %xmm3
movsd 136(%r15), %xmm6 ## xmm6 = mem[0],zero
addsd %xmm3, %xmm6
movsd 40(%r15), %xmm2 ## xmm2 = mem[0],zero
movsd 168(%r15), %xmm1 ## xmm1 = mem[0],zero
movapd %xmm6, %xmm0
movapd %xmm1, -688(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
movapd %xmm0, -592(%rbp) ## 16-byte Spill
jmp LBB19_16
LBB19_14:
xorpd %xmm1, %xmm1
movapd %xmm8, %xmm6
xorpd %xmm0, %xmm0
movapd %xmm0, -688(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm6
movapd %xmm6, %xmm3
movapd %xmm6, %xmm4
movapd %xmm6, -592(%rbp) ## 16-byte Spill
xorpd %xmm2, %xmm2
LBB19_16:
movsd %xmm4, -704(%rbp) ## 8-byte Spill
movapd %xmm6, -192(%rbp) ## 16-byte Spill
movsd %xmm3, -1200(%rbp) ## 8-byte Spill
movsd LCPI19_16(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -6128(%rbp), %xmm0 ## 16-byte Folded Reload
movsd LCPI19_29(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -6112(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm0, -5296(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm1
movapd %xmm1, -3200(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm1
movapd %xmm1, -4160(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm2
movsd LCPI19_18(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -10368(%rbp), %xmm0 ## 16-byte Folded Reload
movsd LCPI19_30(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -10352(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd %xmm1, -344(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm1
movsd %xmm1, -3056(%rbp) ## 8-byte Spill
movapd %xmm2, -1952(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm1
movsd LCPI19_20(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -12016(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -352(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm0
movsd %xmm1, -3328(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm3
subsd %xmm0, %xmm3
movsd LCPI19_21(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
movsd LCPI19_31(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm11, %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, -224(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm1
movsd %xmm1, -3048(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm2
movsd %xmm3, -2728(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm2
movsd LCPI19_23(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd -2560(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm0
movsd LCPI19_24(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd -3264(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_25(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm10, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -408(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm0
movsd %xmm0, -3040(%rbp) ## 8-byte Spill
movsd %xmm2, -144(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm0
movsd %xmm0, -48(%rbp) ## 8-byte Spill
mulsd -512(%rbp), %xmm4 ## 16-byte Folded Reload
movapd %xmm3, %xmm0
mulsd -448(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm4
movapd %xmm4, -608(%rbp) ## 16-byte Spill
movapd %xmm10, %xmm0
mulsd -1856(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -272(%rbp) ## 8-byte Spill
movapd -2896(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm0
addsd LCPI19_32(%rip), %xmm0
sqrtsd %xmm0, %xmm0
movsd LCPI19_33(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd %xmm0, -4832(%rbp) ## 8-byte Spill
movsd %xmm8, -128(%rbp) ## 8-byte Spill
callq _pow
movsd %xmm0, -1408(%rbp) ## 8-byte Spill
movapd -2896(%rbp), %xmm0 ## 16-byte Reload
mulsd LCPI19_34(%rip), %xmm0
callq _tanh
movsd %xmm0, -4112(%rbp) ## 8-byte Spill
movapd -2320(%rbp), %xmm0 ## 16-byte Reload
mulsd -128(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -776(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movsd -368(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -704(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm1, -72(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movsd -464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1200(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm1, -2224(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movsd -152(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -192(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm0
movsd %xmm1, -176(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movsd -288(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
movsd %xmm1, -760(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movapd %xmm1, -5264(%rbp) ## 16-byte Spill
movapd -528(%rbp), %xmm0 ## 16-byte Reload
mulsd -1856(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -640(%rbp) ## 16-byte Spill
movsd LCPI19_35(%rip), %xmm3 ## xmm3 = mem[0],zero
movsd -496(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm1, %xmm3
movapd %xmm3, %xmm0
mulsd LCPI19_1(%rip), %xmm0
movapd %xmm3, %xmm2
movsd %xmm3, -1400(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm2
subsd %xmm1, %xmm2
movsd %xmm2, -1872(%rbp) ## 8-byte Spill
movapd -1264(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
movapd %xmm0, -2128(%rbp) ## 16-byte Spill
movapd -720(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movapd %xmm1, -496(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm0
mulsd %xmm3, %xmm0
addsd LCPI19_32(%rip), %xmm0
sqrtsd %xmm0, %xmm0
movsd %xmm0, -4848(%rbp) ## 8-byte Spill
movsd LCPI19_33(%rip), %xmm1 ## xmm1 = mem[0],zero
callq _pow
movsd %xmm0, -1152(%rbp) ## 8-byte Spill
movsd -1400(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_34(%rip), %xmm0
callq _tanh
movsd %xmm0, -4096(%rbp) ## 8-byte Spill
movapd -208(%rbp), %xmm0 ## 16-byte Reload
mulpd -400(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -912(%rbp), %xmm4 ## 16-byte Reload
mulpd -256(%rbp), %xmm4 ## 16-byte Folded Reload
movapd %xmm4, %xmm3
subpd %xmm0, %xmm3
movapd %xmm3, -1248(%rbp) ## 16-byte Spill
addpd %xmm0, %xmm4
movapd %xmm4, %xmm0
blendpd $1, %xmm3, %xmm0 ## xmm0 = xmm3[0],xmm0[1]
movapd %xmm0, %xmm5
movapd %xmm0, -400(%rbp) ## 16-byte Spill
movapd -720(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -608(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movddup -1440(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = mem[0,0]
mulpd %xmm0, %xmm1
movapd -1264(%rbp), %xmm2 ## 16-byte Reload
unpcklpd %xmm3, %xmm2 ## xmm2 = xmm2[0],xmm3[0]
movddup -1184(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = mem[0,0]
mulpd %xmm2, %xmm3
addpd %xmm1, %xmm3
movapd -1600(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm4, -912(%rbp) ## 16-byte Spill
unpckhpd %xmm4, %xmm1 ## xmm1 = xmm1[1],xmm4[1]
movddup -14240(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = mem[0,0]
movapd %xmm2, -6368(%rbp) ## 16-byte Spill
mulpd %xmm2, %xmm1
addpd %xmm3, %xmm1
mulpd %xmm1, %xmm0
movapd %xmm0, %xmm3
unpckhpd %xmm0, %xmm3 ## xmm3 = xmm3[1],xmm0[1]
addsd %xmm0, %xmm3
addsd -272(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, -1472(%rbp) ## 16-byte Spill
mulsd -592(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -112(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -528(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movddup -688(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = mem[0,0]
movapd %xmm0, -16704(%rbp) ## 16-byte Spill
mulpd %xmm0, %xmm2
movapd %xmm2, %xmm4
movapd %xmm2, -272(%rbp) ## 16-byte Spill
movddup %xmm1, %xmm2 ## xmm2 = xmm1[0,0]
movapd -64(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm2, -16688(%rbp) ## 16-byte Spill
mulpd %xmm2, %xmm0
movapd %xmm1, -1600(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm2
unpckhpd %xmm1, %xmm2 ## xmm2 = xmm2[1],xmm1[1]
movapd %xmm5, %xmm1
movapd %xmm2, -1920(%rbp) ## 16-byte Spill
mulpd %xmm2, %xmm1
addpd %xmm0, %xmm1
movapd -640(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -1856(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
addpd %xmm1, %xmm0
movapd %xmm0, -640(%rbp) ## 16-byte Spill
unpckhpd %xmm0, %xmm0 ## xmm0 = xmm0[1,1]
movapd %xmm0, -1840(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm3
addsd -48(%rbp), %xmm3 ## 8-byte Folded Reload
movsd LCPI19_36(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd %xmm3, -208(%rbp) ## 16-byte Spill
subsd %xmm3, %xmm0
mulsd LCPI19_37(%rip), %xmm0
callq _tanh
movsd %xmm0, -456(%rbp) ## 8-byte Spill
movapd -640(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
movapd -272(%rbp), %xmm1 ## 16-byte Reload
mulpd %xmm1, %xmm3
movapd -1472(%rbp), %xmm0 ## 16-byte Reload
unpcklpd %xmm2, %xmm0 ## xmm0 = xmm0[0],xmm2[0]
movapd %xmm1, %xmm2
movapd %xmm1, %xmm4
movapd -592(%rbp), %xmm6 ## 16-byte Reload
shufpd $1, %xmm6, %xmm2 ## xmm2 = xmm2[1],xmm6[0]
movapd %xmm2, -256(%rbp) ## 16-byte Spill
mulpd %xmm2, %xmm0
subpd %xmm0, %xmm3
movddup -5264(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = mem[0,0]
movapd %xmm1, -2944(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm0
addpd %xmm1, %xmm0
movapd %xmm0, -3888(%rbp) ## 16-byte Spill
movapd -528(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm2
movsd -1872(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
movapd -1248(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movapd -608(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
addsd -496(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -112(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm2, %xmm7
addsd %xmm0, %xmm7
movapd %xmm7, -1456(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm7
movddup -2128(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = mem[0,0]
movapd %xmm0, -11024(%rbp) ## 16-byte Spill
movapd -64(%rbp), %xmm6 ## 16-byte Reload
mulpd %xmm0, %xmm6
movapd %xmm1, -2432(%rbp) ## 16-byte Spill
movddup %xmm1, %xmm0 ## xmm0 = xmm1[0,0]
movapd %xmm0, -11040(%rbp) ## 16-byte Spill
movapd -400(%rbp), %xmm1 ## 16-byte Reload
mulpd %xmm0, %xmm1
addpd %xmm6, %xmm1
movapd %xmm5, %xmm0
mulsd %xmm2, %xmm0
movapd %xmm2, -2400(%rbp) ## 16-byte Spill
unpcklpd %xmm2, %xmm0 ## xmm0 = xmm0[0],xmm2[0]
addpd %xmm1, %xmm0
movapd %xmm0, -1232(%rbp) ## 16-byte Spill
unpckhpd %xmm0, %xmm0 ## xmm0 = xmm0[1,1]
movapd %xmm0, -2624(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm7
movapd %xmm3, -6688(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm3
movapd %xmm3, -400(%rbp) ## 16-byte Spill
addsd -48(%rbp), %xmm7 ## 8-byte Folded Reload
movsd LCPI19_36(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd %xmm7, -64(%rbp) ## 16-byte Spill
subsd %xmm7, %xmm0
mulsd LCPI19_37(%rip), %xmm0
callq _tanh
movapd -1232(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
mulpd -272(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -1456(%rbp), %xmm1 ## 16-byte Reload
unpcklpd %xmm3, %xmm1 ## xmm1 = xmm1[0],xmm3[0]
mulpd -256(%rbp), %xmm1 ## 16-byte Folded Reload
subpd %xmm1, %xmm2
movapd -2944(%rbp), %xmm3 ## 16-byte Reload
addpd %xmm2, %xmm3
movapd %xmm2, -5120(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm1
mulsd %xmm2, %xmm1
unpcklpd -400(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm3, -2944(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm2
unpckhpd -3888(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[1],mem[1]
mulpd %xmm2, %xmm2
addpd %xmm1, %xmm2
addpd LCPI19_38(%rip), %xmm2
sqrtpd %xmm2, %xmm13
movapd %xmm13, -3168(%rbp) ## 16-byte Spill
movapd %xmm13, %xmm5
divpd LCPI19_39(%rip), %xmm5
movapd %xmm5, %xmm2
cmplepd LCPI19_40(%rip), %xmm2
movapd %xmm0, %xmm12
movmskpd %xmm2, %eax
movsd LCPI19_0(%rip), %xmm4 ## xmm4 = mem[0],zero
movsd %xmm4, -4784(%rbp) ## 8-byte Spill
testb $2, %al
jne LBB19_18
## %bb.17:
xorpd %xmm1, %xmm1
movsd %xmm1, -4784(%rbp) ## 8-byte Spill
LBB19_18:
movapd %xmm4, %xmm0
testb $1, %al
jne LBB19_20
## %bb.19:
xorpd %xmm0, %xmm0
LBB19_20:
movsd %xmm0, -4144(%rbp) ## 8-byte Spill
movsd -1408(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd LCPI19_41(%rip), %xmm15
movsd -4112(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movsd LCPI19_1(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm11
addsd %xmm8, %xmm11
movapd %xmm8, %xmm10
movapd -3200(%rbp), %xmm14 ## 16-byte Reload
addsd -344(%rbp), %xmm14 ## 8-byte Folded Reload
movapd %xmm14, -9536(%rbp) ## 16-byte Spill
subsd -352(%rbp), %xmm14 ## 8-byte Folded Reload
movapd %xmm14, -9552(%rbp) ## 16-byte Spill
addsd -224(%rbp), %xmm14 ## 8-byte Folded Reload
movapd %xmm14, -6976(%rbp) ## 16-byte Spill
addsd -408(%rbp), %xmm14 ## 8-byte Folded Reload
movapd %xmm14, -4256(%rbp) ## 16-byte Spill
movsd -456(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd %xmm9, %xmm2
mulsd %xmm9, %xmm2
movapd %xmm4, %xmm0
subsd %xmm2, %xmm0
movsd LCPI19_43(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd -208(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm2, %xmm7
movapd %xmm2, %xmm6
movapd %xmm4, %xmm3
subsd %xmm7, %xmm4
movapd %xmm12, %xmm14
movapd %xmm12, %xmm2
mulsd %xmm12, %xmm2
movapd %xmm3, %xmm7
subsd %xmm2, %xmm7
movsd %xmm7, -3760(%rbp) ## 8-byte Spill
movapd -64(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm6, %xmm2
movapd %xmm3, %xmm6
subsd %xmm2, %xmm6
movsd %xmm6, -1712(%rbp) ## 8-byte Spill
movapd %xmm5, %xmm2
unpckhpd %xmm5, %xmm2 ## xmm2 = xmm2[1],xmm5[1]
minsd %xmm3, %xmm2
mulsd LCPI19_44(%rip), %xmm2
movapd -3168(%rbp), %xmm8 ## 16-byte Reload
unpckhpd %xmm8, %xmm8 ## xmm8 = xmm8[1,1]
movapd %xmm8, %xmm13
movapd %xmm10, %xmm3
mulsd %xmm10, %xmm13
addsd %xmm2, %xmm13
movapd -2320(%rbp), %xmm2 ## 16-byte Reload
addsd -368(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, -6896(%rbp) ## 16-byte Spill
addsd -464(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, -208(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm10
addsd -152(%rbp), %xmm10 ## 8-byte Folded Reload
movapd %xmm10, -6960(%rbp) ## 16-byte Spill
addsd -288(%rbp), %xmm10 ## 8-byte Folded Reload
movapd %xmm10, -6160(%rbp) ## 16-byte Spill
movapd %xmm15, %xmm2
movsd %xmm4, -5104(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm2
movapd %xmm5, %xmm12
movapd %xmm11, %xmm5
movsd %xmm2, -5072(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm5
movapd -3888(%rbp), %xmm6 ## 16-byte Reload
unpckhpd %xmm6, %xmm6 ## xmm6 = xmm6[1,1]
movapd %xmm10, %xmm7
addsd -640(%rbp), %xmm7 ## 16-byte Folded Reload
movapd %xmm3, %xmm10
mulsd %xmm3, %xmm9
addsd %xmm3, %xmm9
movapd %xmm7, %xmm2
divsd %xmm8, %xmm2
movapd %xmm6, %xmm1
mulsd %xmm2, %xmm1
movapd %xmm1, %xmm3
mulsd %xmm13, %xmm3
addsd -4256(%rbp), %xmm3 ## 16-byte Folded Reload
addsd -1472(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm5, %xmm4
mulsd %xmm3, %xmm4
mulsd %xmm10, %xmm4
movsd %xmm0, -5760(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm4
movsd LCPI19_37(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
mulsd %xmm9, %xmm3
movsd %xmm11, -1704(%rbp) ## 8-byte Spill
movapd %xmm3, -4752(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm11
movsd %xmm15, -1408(%rbp) ## 8-byte Spill
movsd %xmm11, -2672(%rbp) ## 8-byte Spill
mulsd %xmm11, %xmm15
mulsd LCPI19_43(%rip), %xmm15
addsd %xmm4, %xmm15
movsd %xmm9, -456(%rbp) ## 8-byte Spill
movapd %xmm9, %xmm11
movsd %xmm5, -5728(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm11
movapd %xmm11, %xmm0
movapd %xmm13, -6624(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm0
movapd %xmm6, %xmm9
mulsd %xmm0, %xmm9
divsd %xmm8, %xmm9
movapd %xmm9, %xmm4
divsd %xmm8, %xmm4
movapd %xmm4, -5856(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm7
mulsd %xmm11, %xmm1
movapd %xmm1, %xmm4
mulsd %xmm10, %xmm4
subsd %xmm4, %xmm7
mulsd LCPI19_44(%rip), %xmm1
mulsd -4784(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm1
subsd %xmm1, %xmm7
movapd -272(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm5
unpckhpd %xmm1, %xmm5 ## xmm5 = xmm5[1],xmm1[1]
movapd -6688(%rbp), %xmm3 ## 16-byte Reload
addsd %xmm3, %xmm3
movapd -592(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm13
movapd %xmm10, -592(%rbp) ## 16-byte Spill
mulsd %xmm15, %xmm13
movapd %xmm8, -6608(%rbp) ## 16-byte Spill
addsd %xmm8, %xmm8
movapd %xmm8, -5840(%rbp) ## 16-byte Spill
divsd %xmm8, %xmm7
movapd %xmm3, -4208(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm4
mulsd %xmm7, %xmm4
movapd %xmm5, %xmm3
movapd %xmm5, -400(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm3
movapd %xmm4, -2864(%rbp) ## 16-byte Spill
subsd %xmm3, %xmm13
movapd %xmm6, -3888(%rbp) ## 16-byte Spill
addsd %xmm6, %xmm6
movapd %xmm6, -5248(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm7
movsd %xmm0, -5776(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm7
movapd %xmm1, %xmm0
mulsd %xmm4, %xmm0
mulsd %xmm7, %xmm10
subsd %xmm10, %xmm0
movapd %xmm5, %xmm3
movapd %xmm7, -8960(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm3
movapd %xmm1, %xmm2
mulsd %xmm15, %xmm2
subsd %xmm2, %xmm3
movapd -528(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm2
movapd %xmm0, -880(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm2
movapd %xmm1, %xmm0
movapd %xmm9, -5792(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm0
movapd %xmm0, -7904(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm2
movapd %xmm3, -14208(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm2
movapd -112(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm6
mulsd %xmm13, %xmm6
addsd %xmm2, %xmm6
movapd -1856(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
mulsd %xmm11, %xmm0
movapd %xmm10, %xmm2
movsd %xmm11, -3152(%rbp) ## 8-byte Spill
mulsd %xmm11, %xmm2
movapd %xmm13, -9408(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm3
movapd %xmm0, -8848(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm3
movsd %xmm2, -2688(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm6
movapd -1440(%rbp), %xmm8 ## 16-byte Reload
movsd %xmm6, -5328(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm8
addsd %xmm3, %xmm8
minsd LCPI19_0(%rip), %xmm12
movsd -1152(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd LCPI19_42(%rip), %xmm11
movsd LCPI19_44(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm12
movapd -3168(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm7
movsd LCPI19_1(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm7
addsd %xmm12, %xmm7
movsd -4096(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm6, %xmm5
addsd %xmm6, %xmm5
movapd %xmm11, %xmm1
mulsd -1712(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm5, %xmm9
movsd %xmm1, -5088(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm9
movapd -2944(%rbp), %xmm13 ## 16-byte Reload
unpckhpd %xmm13, %xmm13 ## xmm13 = xmm13[1,1]
movapd -6160(%rbp), %xmm3 ## 16-byte Reload
addsd -1232(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd %xmm6, %xmm14
addsd %xmm6, %xmm14
movapd %xmm3, %xmm0
divsd %xmm4, %xmm0
movapd %xmm13, %xmm2
movapd %xmm0, -4688(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm2
movapd %xmm2, %xmm0
mulsd %xmm7, %xmm0
addsd -4256(%rbp), %xmm0 ## 16-byte Folded Reload
addsd -1456(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm9, %xmm1
mulsd %xmm0, %xmm1
mulsd %xmm6, %xmm1
mulsd -3760(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd LCPI19_37(%rip), %xmm1
mulsd %xmm14, %xmm0
movsd %xmm5, -4816(%rbp) ## 8-byte Spill
movapd %xmm0, -336(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm5
movsd %xmm11, -1152(%rbp) ## 8-byte Spill
movsd %xmm5, -5024(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm11
mulsd LCPI19_43(%rip), %xmm11
addsd %xmm1, %xmm11
movsd %xmm14, -1384(%rbp) ## 8-byte Spill
movsd %xmm9, -4768(%rbp) ## 8-byte Spill
mulsd %xmm9, %xmm14
movapd %xmm14, %xmm1
movsd %xmm14, -536(%rbp) ## 8-byte Spill
movapd %xmm7, -6592(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm1
movapd %xmm13, -2944(%rbp) ## 16-byte Spill
movapd %xmm13, %xmm7
movsd %xmm1, -1696(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm7
divsd %xmm4, %xmm7
movapd %xmm7, -7920(%rbp) ## 16-byte Spill
movapd %xmm7, %xmm1
divsd %xmm4, %xmm1
movapd %xmm4, %xmm5
movapd %xmm1, -5824(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm3
mulsd %xmm14, %xmm2
movapd %xmm2, %xmm1
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm3
mulsd LCPI19_44(%rip), %xmm2
mulsd -4144(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm2
subsd %xmm2, %xmm3
movapd -5120(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm0, %xmm0
movapd -592(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm2
mulsd %xmm11, %xmm2
movapd %xmm5, %xmm1
addsd %xmm5, %xmm1
movapd %xmm1, -5808(%rbp) ## 16-byte Spill
divsd %xmm1, %xmm3
movapd %xmm0, -4288(%rbp) ## 16-byte Spill
movapd %xmm0, %xmm14
movapd %xmm3, -6720(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm14
movapd -400(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd %xmm14, %xmm0
subsd %xmm0, %xmm2
movapd -2400(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm2, -800(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
addsd %xmm8, %xmm0
movapd %xmm0, -64(%rbp) ## 16-byte Spill
movapd -912(%rbp), %xmm12 ## 16-byte Reload
unpckhpd %xmm12, %xmm12 ## xmm12 = xmm12[1,1]
movsd LCPI19_47(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -1504(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -1248(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -720(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd LCPI19_52(%rip), %xmm2
movsd LCPI19_48(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm12, %xmm13
movapd -1264(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm3
movapd %xmm0, %xmm7
unpcklpd -608(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
mulpd LCPI19_53(%rip), %xmm3
addsd %xmm1, %xmm13
addpd %xmm2, %xmm3
movapd -528(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm2
unpcklpd %xmm10, %xmm2 ## xmm2 = xmm2[0],xmm10[0]
movapd LCPI19_51(%rip), %xmm1 ## xmm1 = [-1.6832259441076958E-2,-1.6832259441076958E-2]
movapd %xmm2, -11392(%rbp) ## 16-byte Spill
mulpd %xmm2, %xmm1
addpd %xmm1, %xmm3
movapd %xmm3, %xmm2
movapd -272(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm2
movapd %xmm3, %xmm1
unpckhpd %xmm3, %xmm1 ## xmm1 = xmm1[1],xmm3[1]
movapd %xmm1, -2640(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm2
addsd LCPI19_49(%rip), %xmm13
movsd LCPI19_50(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd -6160(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm0, %xmm10
movapd %xmm0, %xmm8
movapd %xmm10, %xmm0
mulsd %xmm4, %xmm0
movsd %xmm13, -1552(%rbp) ## 8-byte Spill
movapd %xmm0, -12048(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm13
movapd -4256(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm8, %xmm0
movapd %xmm0, %xmm4
movapd %xmm2, -2256(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm4
addsd %xmm13, %xmm4
movapd %xmm10, -6160(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm10
movapd %xmm0, -4256(%rbp) ## 16-byte Spill
movapd %xmm0, %xmm2
mulsd %xmm5, %xmm2
addsd %xmm10, %xmm2
movapd %xmm3, -2816(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm1
movapd %xmm2, -3344(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
addsd %xmm4, %xmm1
movapd -640(%rbp), %xmm2 ## 16-byte Reload
mulsd -2864(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm1, %xmm2
movapd -1840(%rbp), %xmm1 ## 16-byte Reload
movsd %xmm15, -9872(%rbp) ## 8-byte Spill
mulsd %xmm15, %xmm1
subsd %xmm1, %xmm2
movapd -1232(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm14, -2848(%rbp) ## 16-byte Spill
mulsd %xmm14, %xmm10
addsd %xmm2, %xmm10
movapd -2624(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm11, -9456(%rbp) ## 16-byte Spill
mulsd %xmm11, %xmm1
subsd %xmm1, %xmm10
movsd LCPI19_54(%rip), %xmm8 ## xmm8 = mem[0],zero
movapd %xmm7, %xmm1
mulsd %xmm8, %xmm1
movsd LCPI19_55(%rip), %xmm6 ## xmm6 = mem[0],zero
movapd -1248(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
mulsd %xmm6, %xmm2
subsd %xmm2, %xmm1
movsd LCPI19_56(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd %xmm9, %xmm15
mulsd %xmm2, %xmm15
addsd %xmm1, %xmm15
movapd %xmm7, %xmm1
mulsd %xmm6, %xmm1
movsd LCPI19_57(%rip), %xmm11 ## xmm11 = mem[0],zero
movapd %xmm0, %xmm3
mulsd %xmm11, %xmm3
subsd %xmm1, %xmm3
movsd LCPI19_58(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd %xmm9, %xmm14
mulsd %xmm1, %xmm14
addsd %xmm3, %xmm14
movapd %xmm7, %xmm3
mulsd %xmm15, %xmm3
mulsd %xmm14, %xmm0
addsd %xmm3, %xmm0
movapd -1504(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm4
mulsd %xmm8, %xmm4
movapd %xmm12, %xmm3
mulsd %xmm6, %xmm3
subsd %xmm3, %xmm4
addsd %xmm2, %xmm4
movapd %xmm5, %xmm3
movapd %xmm5, %xmm2
mulsd %xmm6, %xmm2
movapd %xmm12, %xmm6
mulsd %xmm11, %xmm6
subsd %xmm2, %xmm6
addsd %xmm1, %xmm6
movapd %xmm5, %xmm1
mulsd %xmm4, %xmm1
movapd %xmm12, %xmm2
mulsd %xmm6, %xmm2
addsd %xmm1, %xmm2
movapd %xmm0, %xmm1
movapd %xmm2, -6752(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm1
movsd LCPI19_59(%rip), %xmm8 ## xmm8 = mem[0],zero
subsd %xmm1, %xmm8
movapd -720(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm1
movapd %xmm15, -7936(%rbp) ## 16-byte Spill
mulsd %xmm15, %xmm1
movapd -608(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm2
movapd %xmm14, -7952(%rbp) ## 16-byte Spill
mulsd %xmm14, %xmm2
addsd %xmm1, %xmm2
movsd LCPI19_62(%rip), %xmm14 ## xmm14 = mem[0],zero
movapd %xmm12, -912(%rbp) ## 16-byte Spill
movapd %xmm12, %xmm5
mulsd %xmm14, %xmm5
movsd LCPI19_63(%rip), %xmm13 ## xmm13 = mem[0],zero
movapd %xmm3, %xmm1
mulsd %xmm13, %xmm1
subsd %xmm1, %xmm5
addsd %xmm2, %xmm5
movapd %xmm5, %xmm1
movapd -272(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm1
addsd LCPI19_60(%rip), %xmm0
movapd %xmm0, -8160(%rbp) ## 16-byte Spill
movapd %xmm0, %xmm2
movapd -400(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm2
addsd %xmm1, %xmm2
movapd -1264(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm4, -6880(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
movapd -1248(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm6, -6864(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm3
addsd %xmm1, %xmm3
mulsd %xmm14, %xmm7
mulsd %xmm13, %xmm9
movapd -64(%rbp), %xmm13 ## 16-byte Reload
subsd %xmm9, %xmm7
addsd %xmm3, %xmm7
movapd %xmm7, -6816(%rbp) ## 16-byte Spill
mulsd -592(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm2, %xmm7
movsd -280(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm1
mulsd -3200(%rbp), %xmm1 ## 16-byte Folded Reload
addsd -4032(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -72(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movapd %xmm12, %xmm0
subsd -776(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -128(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movsd %xmm0, -3536(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm6
addsd %xmm1, %xmm6
movapd -2400(%rbp), %xmm0 ## 16-byte Reload
mulsd -536(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, -8816(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm13
addsd LCPI19_46(%rip), %xmm13
addsd LCPI19_60(%rip), %xmm8
movsd LCPI19_61(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm11, %xmm0
movsd %xmm8, -4432(%rbp) ## 8-byte Spill
movsd %xmm0, -2720(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm8
movsd %xmm8, -8832(%rbp) ## 8-byte Spill
addsd %xmm8, %xmm10
movapd %xmm7, -14352(%rbp) ## 16-byte Spill
movsd LCPI19_50(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm7
movapd %xmm7, -7888(%rbp) ## 16-byte Spill
addsd %xmm7, %xmm10
mulsd %xmm0, %xmm15
movapd %xmm5, -6640(%rbp) ## 16-byte Spill
movapd %xmm5, %xmm0
mulsd %xmm15, %xmm0
movapd %xmm0, -9424(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm10
movapd -688(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm10, -16672(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm7
testq %r15, %r15
je LBB19_21
## %bb.22:
movsd 208(%r15), %xmm4 ## xmm4 = mem[0],zero
addsd %xmm2, %xmm4
movsd -344(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
addsd %xmm6, %xmm1
movsd -176(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm3
movsd -2224(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd %xmm0, %xmm3
subsd %xmm12, %xmm0
movsd -704(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
addsd %xmm1, %xmm2
movsd 224(%r15), %xmm5 ## xmm5 = mem[0],zero
movsd %xmm4, -1280(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm5
movsd -352(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm5, %xmm1
movsd %xmm2, -1760(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm2
movsd -1200(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd %xmm3, -2216(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm2, %xmm1
movsd 240(%r15), %xmm4 ## xmm4 = mem[0],zero
jmp LBB19_23
LBB19_21:
xorpd %xmm4, %xmm4
movapd %xmm2, %xmm5
addsd %xmm4, %xmm5
movapd %xmm5, %xmm1
mulsd -344(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm6, %xmm1
movsd -176(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm3
movsd -2224(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd %xmm0, %xmm3
subsd %xmm12, %xmm0
movsd -704(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
addsd %xmm1, %xmm2
movapd %xmm5, %xmm1
mulsd -352(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm2, -1760(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm2
movsd -1200(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd %xmm3, -2216(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm2, %xmm1
movsd %xmm5, -1280(%rbp) ## 8-byte Spill
LBB19_23:
movsd %xmm6, -4640(%rbp) ## 8-byte Spill
movsd %xmm0, -2224(%rbp) ## 8-byte Spill
movapd %xmm15, -5184(%rbp) ## 16-byte Spill
addsd %xmm13, %xmm7
movapd %xmm7, -176(%rbp) ## 16-byte Spill
movsd %xmm5, -2800(%rbp) ## 8-byte Spill
addsd %xmm5, %xmm4
movsd -224(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
movsd %xmm1, -1112(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm0
movsd -760(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm1
subsd %xmm8, %xmm1
movapd -192(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
movsd %xmm1, -3544(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm2
addsd %xmm0, %xmm2
movsd -408(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm4, -480(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm0
movapd %xmm2, -4128(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm0
movapd -5264(%rbp), %xmm2 ## 16-byte Reload
subsd %xmm5, %xmm2
movapd %xmm3, %xmm1
movapd %xmm2, -5264(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -12064(%rbp) ## 16-byte Spill
xorpd %xmm0, %xmm0
movsd %xmm0, -64(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
testq %r15, %r15
je LBB19_25
## %bb.24:
movsd 256(%r15), %xmm0 ## xmm0 = mem[0],zero
LBB19_25:
movsd %xmm0, -2336(%rbp) ## 8-byte Spill
movapd -1104(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
movapd %xmm0, %xmm4
mulsd LCPI19_64(%rip), %xmm5
movsd -96(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm3, %xmm5
addsd LCPI19_65(%rip), %xmm5
movapd %xmm5, %xmm0
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm5, %xmm2
movsd %xmm5, -2496(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm2
subsd %xmm3, %xmm2
movapd %xmm2, -2352(%rbp) ## 16-byte Spill
movapd -112(%rbp), %xmm0 ## 16-byte Reload
unpckhpd %xmm4, %xmm0 ## xmm0 = xmm0[1],xmm4[1]
movapd LCPI19_66(%rip), %xmm1 ## xmm1 = <4.0000000000000001E-3,u>
unpcklpd %xmm2, %xmm1 ## xmm1 = xmm1[0],xmm2[0]
mulpd %xmm0, %xmm1
movapd %xmm1, -2576(%rbp) ## 16-byte Spill
movapd -3264(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
mulsd %xmm1, %xmm0
unpckhpd %xmm1, %xmm1 ## xmm1 = xmm1[1,1]
addsd %xmm0, %xmm1
movapd %xmm1, -2912(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm3
movapd %xmm3, -48(%rbp) ## 16-byte Spill
movapd %xmm5, %xmm0
mulsd %xmm5, %xmm0
movsd LCPI19_32(%rip), %xmm1 ## xmm1 = mem[0],zero
addsd %xmm1, %xmm0
sqrtsd %xmm0, %xmm0
movsd LCPI19_33(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd %xmm0, -648(%rbp) ## 8-byte Spill
callq _pow
movsd %xmm0, -3856(%rbp) ## 8-byte Spill
movsd LCPI19_34(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -2496(%rbp), %xmm0 ## 8-byte Folded Reload
callq _tanh
movsd %xmm0, -1352(%rbp) ## 8-byte Spill
movddup -2576(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = mem[0,0]
movapd -112(%rbp), %xmm2 ## 16-byte Reload
mulpd %xmm2, %xmm0
movapd -528(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -1104(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm1, -96(%rbp) ## 16-byte Spill
movddup -2352(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = mem[0,0]
mulpd %xmm1, %xmm3
addpd %xmm0, %xmm3
movapd %xmm3, %xmm1
movapd %xmm3, -2544(%rbp) ## 16-byte Spill
unpckhpd %xmm3, %xmm1 ## xmm1 = xmm1[1],xmm3[1]
movapd %xmm1, -3248(%rbp) ## 16-byte Spill
movapd -2560(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
addsd -48(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd %xmm3, %xmm2
addsd %xmm0, %xmm2
movapd %xmm2, -3840(%rbp) ## 16-byte Spill
mulsd -192(%rbp), %xmm2 ## 16-byte Folded Reload
addsd -144(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, -144(%rbp) ## 16-byte Spill
movsd LCPI19_36(%rip), %xmm0 ## xmm0 = mem[0],zero
subsd %xmm2, %xmm0
movsd LCPI19_37(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
callq _tanh
movsd %xmm0, -1168(%rbp) ## 8-byte Spill
movapd -112(%rbp), %xmm1 ## 16-byte Reload
movsd -2336(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
movapd -192(%rbp), %xmm8 ## 16-byte Reload
movddup %xmm8, %xmm3 ## xmm3 = xmm8[0,0]
mulpd -11392(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, -14224(%rbp) ## 16-byte Spill
movapd -688(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
movapd %xmm9, -688(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm1
movapd %xmm1, -5984(%rbp) ## 16-byte Spill
movsd -1552(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movapd %xmm11, %xmm0
mulsd %xmm1, %xmm0
movsd -480(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm1, %xmm7
movapd %xmm1, %xmm12
addsd %xmm2, %xmm7
movapd -2640(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm0
movapd -12064(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm1
subsd %xmm0, %xmm1
movsd LCPI19_50(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm1
movapd %xmm11, %xmm4
movapd -400(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm4
movapd %xmm0, %xmm14
movapd -2816(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
movapd -592(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm4
movapd %xmm5, %xmm0
movapd %xmm5, %xmm3
movsd %xmm4, -4480(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm5
movapd -272(%rbp), %xmm0 ## 16-byte Reload
mulsd -2256(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm5
mulsd %xmm13, %xmm5
addsd %xmm1, %xmm5
movsd LCPI19_67(%rip), %xmm0 ## xmm0 = mem[0],zero
addsd -536(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -3152(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm5
movapd %xmm5, -5008(%rbp) ## 16-byte Spill
movsd -9872(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
addsd -9456(%rbp), %xmm4 ## 16-byte Folded Reload
movapd %xmm4, -9264(%rbp) ## 16-byte Spill
mulpd LCPI19_68(%rip), %xmm2
movapd %xmm2, %xmm0
movapd -6160(%rbp), %xmm15 ## 16-byte Reload
subsd %xmm15, %xmm0
movapd %xmm0, -10608(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm0
subsd %xmm0, %xmm4
movapd %xmm8, %xmm1
movsd %xmm4, -13464(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm1
addsd %xmm5, %xmm1
movapd %xmm2, -8192(%rbp) ## 16-byte Spill
unpckhpd %xmm2, %xmm2 ## xmm2 = xmm2[1,1]
movapd -4256(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
movapd %xmm2, -9520(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm0
movapd %xmm0, -10624(%rbp) ## 16-byte Spill
mulsd %xmm12, %xmm0
addsd %xmm1, %xmm0
movapd %xmm0, -48(%rbp) ## 16-byte Spill
movsd LCPI19_25(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
movapd -176(%rbp), %xmm6 ## 16-byte Reload
addsd %xmm1, %xmm6
movapd %xmm3, %xmm1
mulsd -12048(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm14, %xmm2
mulsd -3344(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movapd %xmm4, %xmm0
movsd %xmm7, -784(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm0
addsd %xmm2, %xmm0
movapd %xmm10, %xmm3
mulsd %xmm13, %xmm3
movapd %xmm3, -7872(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm0
movapd %xmm0, -10416(%rbp) ## 16-byte Spill
movsd LCPI19_49(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm6, %xmm1
movapd -6816(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm13, %xmm3
movapd %xmm15, %xmm2
mulsd %xmm11, %xmm2
movapd %xmm3, -6064(%rbp) ## 16-byte Spill
subsd %xmm2, %xmm3
movapd %xmm3, -11376(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm2
mulsd %xmm9, %xmm2
movapd %xmm2, -17840(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm2
addsd %xmm1, %xmm2
movapd -720(%rbp), %xmm1 ## 16-byte Reload
mulsd -6880(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -608(%rbp), %xmm15 ## 16-byte Reload
mulsd -6864(%rbp), %xmm15 ## 16-byte Folded Reload
addsd %xmm1, %xmm15
movapd -1248(%rbp), %xmm1 ## 16-byte Reload
movsd LCPI19_62(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
movapd -1264(%rbp), %xmm3 ## 16-byte Reload
movsd LCPI19_63(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm3
subsd %xmm3, %xmm1
subsd %xmm1, %xmm15
movapd %xmm15, -6800(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm15
movapd %xmm4, %xmm1
mulsd %xmm11, %xmm1
movapd %xmm15, -6048(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm15
movapd %xmm15, -11360(%rbp) ## 16-byte Spill
mulsd -2336(%rbp), %xmm15 ## 8-byte Folded Reload
addsd %xmm2, %xmm15
movsd -1168(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movapd %xmm11, %xmm2
mulsd %xmm11, %xmm2
movsd LCPI19_0(%rip), %xmm3 ## xmm3 = mem[0],zero
movapd %xmm3, %xmm10
subsd %xmm2, %xmm10
movapd -2048(%rbp), %xmm2 ## 16-byte Reload
mulsd -2912(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -96(%rbp), %xmm1 ## 16-byte Reload
mulpd -2544(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, %xmm4
unpckhpd %xmm1, %xmm4 ## xmm4 = xmm4[1],xmm1[1]
addsd %xmm2, %xmm4
addsd %xmm1, %xmm4
movapd %xmm4, %xmm2
mulsd %xmm8, %xmm2
movsd -760(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
subsd %xmm2, %xmm14
movsd LCPI19_43(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd -144(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd %xmm14, %xmm2
mulsd %xmm14, %xmm2
addsd LCPI19_32(%rip), %xmm2
sqrtsd %xmm2, %xmm9
movapd %xmm9, %xmm8
divsd LCPI19_70(%rip), %xmm8
movapd %xmm3, %xmm1
subsd %xmm0, %xmm3
movapd %xmm8, %xmm2
minsd %xmm1, %xmm2
movsd LCPI19_44(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm2
movapd %xmm9, %xmm1
movsd LCPI19_1(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm1
addsd %xmm2, %xmm1
movsd -3856(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd LCPI19_69(%rip), %xmm12
movsd -1352(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm13, %xmm6
addsd %xmm13, %xmm6
movapd %xmm12, %xmm2
movapd %xmm3, -4080(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm2
movsd %xmm2, -1368(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm2
movapd -6960(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm4, -5232(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm3
movapd %xmm3, %xmm7
divsd %xmm9, %xmm7
movapd %xmm14, %xmm5
mulsd %xmm7, %xmm5
movapd %xmm5, %xmm4
mulsd %xmm1, %xmm4
addsd -6976(%rbp), %xmm4 ## 16-byte Folded Reload
addsd -3840(%rbp), %xmm4 ## 16-byte Folded Reload
movapd %xmm4, %xmm0
mulsd %xmm2, %xmm0
mulsd %xmm13, %xmm0
movapd %xmm10, -3312(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm0
mulsd LCPI19_37(%rip), %xmm0
mulsd %xmm13, %xmm11
addsd %xmm13, %xmm11
mulsd %xmm11, %xmm4
movsd %xmm6, -1656(%rbp) ## 8-byte Spill
movsd %xmm4, -1040(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm6
movsd %xmm12, -3856(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm10
movsd %xmm6, -4016(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm10
mulsd LCPI19_43(%rip), %xmm10
addsd %xmm0, %xmm10
movapd -192(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm12
mulsd %xmm10, %xmm12
movapd -2544(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm12, %xmm4
movapd %xmm12, -816(%rbp) ## 16-byte Spill
addsd %xmm15, %xmm4
movsd %xmm11, -1168(%rbp) ## 8-byte Spill
movsd %xmm2, -4656(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm11
movsd %xmm1, -2968(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm0
mulsd %xmm11, %xmm0
movapd %xmm14, %xmm2
mulsd %xmm0, %xmm2
divsd %xmm9, %xmm2
movapd %xmm2, %xmm1
movapd %xmm2, %xmm6
movsd %xmm2, -1072(%rbp) ## 8-byte Spill
divsd %xmm9, %xmm1
movsd %xmm1, -6672(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm3
mulsd %xmm11, %xmm5
movapd %xmm5, %xmm1
mulsd LCPI19_1(%rip), %xmm1
subsd %xmm1, %xmm3
movsd LCPI19_0(%rip), %xmm1 ## xmm1 = mem[0],zero
cmplesd %xmm1, %xmm8
movapd %xmm1, -848(%rbp) ## 16-byte Spill
andpd %xmm1, %xmm8
mulsd LCPI19_44(%rip), %xmm5
movapd %xmm8, -3584(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm5
mulsd LCPI19_71(%rip), %xmm5
addsd %xmm3, %xmm5
movsd %xmm14, -760(%rbp) ## 8-byte Spill
movapd %xmm14, %xmm1
addsd %xmm14, %xmm1
movsd %xmm9, -2200(%rbp) ## 8-byte Spill
addsd %xmm9, %xmm9
movsd %xmm9, -6656(%rbp) ## 8-byte Spill
divsd %xmm9, %xmm5
movsd %xmm1, -2712(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm5
movsd %xmm0, -4560(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm7
subsd %xmm7, %xmm5
movapd -112(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
mulsd %xmm12, %xmm3
movapd %xmm13, %xmm0
movsd %xmm5, -9856(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm0
movapd %xmm0, -3648(%rbp) ## 16-byte Spill
movapd -528(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm1
mulsd %xmm0, %xmm1
movapd %xmm8, %xmm0
mulsd %xmm6, %xmm0
movapd %xmm0, -7584(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm1
subsd %xmm1, %xmm3
movapd -2544(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
movapd %xmm0, -8800(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm4
movapd %xmm2, %xmm0
mulsd %xmm11, %xmm0
movsd %xmm0, -7568(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm3
movsd %xmm3, -768(%rbp) ## 8-byte Spill
movapd -2576(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm3, %xmm0
addsd %xmm4, %xmm0
movapd %xmm0, %xmm9
movapd %xmm0, -144(%rbp) ## 16-byte Spill
movsd LCPI19_72(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd -2560(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
movsd LCPI19_73(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd -3264(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm1, %xmm4
addsd %xmm2, %xmm4
movapd -1104(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
movapd -2048(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm1, %xmm3
addsd %xmm2, %xmm3
movapd %xmm4, %xmm2
movsd -480(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm2
addsd -4128(%rbp), %xmm2 ## 16-byte Folded Reload
movsd LCPI19_74(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm2
movapd %xmm3, %xmm1
mulsd %xmm13, %xmm1
movapd %xmm13, %xmm5
movsd %xmm1, -4472(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm5
movsd LCPI19_75(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm5
addsd %xmm2, %xmm5
movsd LCPI19_76(%rip), %xmm2 ## xmm2 = mem[0],zero
addsd %xmm11, %xmm2
subsd %xmm2, %xmm5
movapd %xmm5, %xmm0
movsd %xmm10, -13456(%rbp) ## 8-byte Spill
movapd -9264(%rbp), %xmm5 ## 16-byte Reload
addsd %xmm10, %xmm5
movapd %xmm5, -9264(%rbp) ## 16-byte Spill
movapd -6960(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
movapd %xmm1, -6960(%rbp) ## 16-byte Spill
movapd -10608(%rbp), %xmm7 ## 16-byte Reload
subsd %xmm1, %xmm7
movsd %xmm3, -1720(%rbp) ## 8-byte Spill
movapd %xmm3, %xmm2
mulsd %xmm14, %xmm2
movsd %xmm2, -3568(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm7
movapd %xmm7, -10576(%rbp) ## 16-byte Spill
movapd %xmm7, %xmm2
mulsd %xmm13, %xmm2
movapd %xmm5, %xmm1
subsd %xmm2, %xmm1
addsd -5008(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -4992(%rbp) ## 16-byte Spill
movapd %xmm1, -17824(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm13
addsd %xmm0, %xmm13
movapd -6976(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm12, %xmm10
movapd %xmm10, -6976(%rbp) ## 16-byte Spill
addsd -10624(%rbp), %xmm10 ## 16-byte Folded Reload
movapd %xmm4, -4896(%rbp) ## 16-byte Spill
mulsd %xmm12, %xmm4
movapd %xmm4, -10592(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm10
movapd %xmm10, -8176(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm10
addsd %xmm13, %xmm10
movsd LCPI19_11(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm9, %xmm4
movsd LCPI19_21(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm10, %xmm0
addsd %xmm4, %xmm0
movsd %xmm0, -96(%rbp) ## 8-byte Spill
movapd -1248(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm4
mulsd -880(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -5792(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm0
movsd %xmm0, -5472(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm4
movapd -912(%rbp), %xmm5 ## 16-byte Reload
movapd -14208(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm5
addsd %xmm4, %xmm5
movapd -608(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
movapd -9408(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm2
addsd %xmm5, %xmm2
movapd -1920(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm4
mulsd %xmm1, %xmm4
movsd -3152(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm3
movsd %xmm3, -4424(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm4
mulsd %xmm5, %xmm0
movapd %xmm0, -5456(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm2
movapd -1440(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm2, %xmm5
addsd %xmm4, %xmm5
movapd -2432(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm4
mulsd -800(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm5, %xmm4
mulsd -536(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, -8784(%rbp) ## 16-byte Spill
subsd %xmm3, %xmm4
addsd LCPI19_77(%rip), %xmm4
movapd -400(%rbp), %xmm5 ## 16-byte Reload
movsd -2720(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm5
movapd -272(%rbp), %xmm12 ## 16-byte Reload
movapd -5184(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm12
addsd %xmm5, %xmm12
movapd -7952(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm12, %xmm0
movapd %xmm0, -8768(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm4
movsd LCPI19_48(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd -10416(%rbp), %xmm9 ## 16-byte Folded Reload
addsd %xmm4, %xmm9
movapd %xmm8, %xmm4
mulsd -2336(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -14224(%rbp), %xmm5 ## 16-byte Reload
unpckhpd %xmm5, %xmm5 ## xmm5 = xmm5[1,1]
movapd -688(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm5, -16656(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
addsd %xmm4, %xmm0
movsd LCPI19_24(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd -48(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -592(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm4
mulsd %xmm3, %xmm4
movapd %xmm0, -5968(%rbp) ## 16-byte Spill
movsd LCPI19_50(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
movapd %xmm0, -14192(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm4
movapd %xmm4, %xmm0
mulsd LCPI19_62(%rip), %xmm0
movapd %xmm0, -8752(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm9
mulsd %xmm6, %xmm13
movapd -5984(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm3, %xmm6
movapd %xmm6, -14176(%rbp) ## 16-byte Spill
addsd %xmm6, %xmm13
movapd -6864(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm13, %xmm3
movapd %xmm3, -5920(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm9
movapd -512(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm9, %xmm8
addsd %xmm5, %xmm8
movapd -1264(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
mulsd -880(%rbp), %xmm5 ## 16-byte Folded Reload
mulsd %xmm15, %xmm0
movsd %xmm0, -6240(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm5
movapd -1504(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm7, %xmm6
addsd %xmm5, %xmm6
movapd -1600(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm5
mulsd %xmm1, %xmm5
movapd -720(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm1
addsd %xmm6, %xmm1
movapd -112(%rbp), %xmm6 ## 16-byte Reload
mulsd -5328(%rbp), %xmm6 ## 8-byte Folded Reload
movapd -608(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm2, -12000(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm7
addsd %xmm6, %xmm7
movapd %xmm0, %xmm6
movsd -3152(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movapd %xmm0, -8608(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm1
mulsd %xmm1, %xmm6
addsd %xmm7, %xmm6
mulsd %xmm2, %xmm3
movsd %xmm3, -8704(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm5
movapd -1440(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm1, -9408(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm7
mulsd LCPI19_14(%rip), %xmm6
addsd %xmm5, %xmm7
addsd %xmm7, %xmm6
movapd -2128(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm5
mulsd -800(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm6, %xmm5
mulsd -536(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, -8736(%rbp) ## 16-byte Spill
subsd %xmm3, %xmm5
addsd LCPI19_78(%rip), %xmm5
movapd -7936(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm12, -9120(%rbp) ## 16-byte Spill
mulsd %xmm12, %xmm0
movapd %xmm0, -6256(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm5
movapd -10416(%rbp), %xmm2 ## 16-byte Reload
mulsd LCPI19_47(%rip), %xmm2
addsd %xmm5, %xmm2
movapd %xmm4, -14368(%rbp) ## 16-byte Spill
movsd LCPI19_63(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
movapd %xmm4, -8720(%rbp) ## 16-byte Spill
subsd %xmm4, %xmm2
movapd -6880(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm13, -8032(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm0
movapd %xmm0, -5904(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm2
movapd -448(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm2, %xmm5
addsd %xmm8, %xmm5
movapd -2912(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm4
movapd -816(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm4
addsd %xmm5, %xmm4
movapd -3264(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm15
mulsd %xmm7, %xmm15
movapd -2048(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
movapd -3648(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm5
mulsd -1072(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -7552(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm5
subsd %xmm5, %xmm15
movapd %xmm1, %xmm0
mulsd %xmm11, %xmm0
movapd %xmm0, -8688(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm4
movapd %xmm12, %xmm0
mulsd %xmm11, %xmm0
movapd %xmm0, -8640(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm15
movapd -2576(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm5
mulsd %xmm15, %xmm5
addsd %xmm4, %xmm5
movapd -6960(%rbp), %xmm0 ## 16-byte Reload
movapd -192(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm0
movapd %xmm0, -17808(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm4
movapd -6976(%rbp), %xmm8 ## 16-byte Reload
mulsd -480(%rbp), %xmm8 ## 8-byte Folded Reload
addsd %xmm4, %xmm8
addsd LCPI19_79(%rip), %xmm5
movapd -4128(%rbp), %xmm0 ## 16-byte Reload
mulsd LCPI19_74(%rip), %xmm0
movapd %xmm0, -4128(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm8
movapd %xmm8, %xmm4
mulsd LCPI19_73(%rip), %xmm4
addsd %xmm5, %xmm4
movapd -144(%rbp), %xmm0 ## 16-byte Reload
mulsd LCPI19_15(%rip), %xmm0
mulsd LCPI19_22(%rip), %xmm10
addsd %xmm0, %xmm10
addsd %xmm4, %xmm10
movsd LCPI19_13(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm4
movapd -48(%rbp), %xmm0 ## 16-byte Reload
mulsd LCPI19_23(%rip), %xmm0
addsd -96(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -448(%rbp), %xmm1 ## 16-byte Reload
movsd %xmm9, -12504(%rbp) ## 8-byte Spill
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm0
movapd -512(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm2, -10416(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movapd -3248(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
mulsd %xmm7, %xmm0
addsd %xmm1, %xmm0
movapd -2560(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm7
movapd -1104(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm9, %xmm3
mulsd -1072(%rbp), %xmm3 ## 8-byte Folded Reload
movsd %xmm3, -4416(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm1
subsd %xmm1, %xmm7
movapd %xmm2, %xmm1
mulsd %xmm11, %xmm1
movapd %xmm1, -8672(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm0
movapd %xmm5, %xmm1
movsd %xmm11, -1640(%rbp) ## 8-byte Spill
mulsd %xmm11, %xmm1
movsd %xmm1, -4408(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm7
movapd %xmm6, %xmm1
mulsd %xmm7, %xmm1
addsd %xmm0, %xmm1
movapd -112(%rbp), %xmm0 ## 16-byte Reload
mulsd -768(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm12, %xmm3
movapd %xmm15, -5040(%rbp) ## 16-byte Spill
mulsd %xmm15, %xmm3
addsd %xmm0, %xmm3
movapd %xmm5, %xmm0
movapd %xmm7, -816(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm0
addsd %xmm3, %xmm0
mulsd LCPI19_64(%rip), %xmm0
addsd %xmm1, %xmm0
addsd LCPI19_80(%rip), %xmm0
mulsd LCPI19_72(%rip), %xmm8
addsd %xmm0, %xmm8
addsd %xmm8, %xmm4
movapd %xmm4, -11984(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm8
addsd %xmm10, %xmm8
movsd LCPI19_81(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd -12016(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm2, %xmm13
movapd %xmm13, %xmm0
movsd -2800(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd %xmm11, %xmm0
movsd -1760(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movsd LCPI19_82(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm1
movapd -12032(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
movapd %xmm0, %xmm2
movsd -1200(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm2
movapd %xmm5, %xmm4
movapd %xmm2, -8944(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm4
movsd LCPI19_83(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm4
addsd %xmm1, %xmm4
movsd LCPI19_85(%rip), %xmm12 ## xmm12 = mem[0],zero
movapd -208(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
movapd -10576(%rbp), %xmm15 ## 16-byte Reload
subsd %xmm1, %xmm15
movapd -6896(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm14, %xmm1
movapd %xmm15, %xmm2
subsd %xmm1, %xmm2
movapd %xmm0, -6144(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm0
movapd %xmm0, -9440(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm2
movapd %xmm2, -10560(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm0
movsd -704(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm0
movapd -9264(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
subsd %xmm0, %xmm2
movapd %xmm2, -14336(%rbp) ## 16-byte Spill
movapd %xmm15, -12352(%rbp) ## 16-byte Spill
movapd %xmm15, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm3
movsd -1112(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm12, %xmm7
addsd LCPI19_86(%rip), %xmm7
addsd -4992(%rbp), %xmm7 ## 16-byte Folded Reload
movapd -9552(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm12, %xmm2
addsd -8176(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm5, %xmm0
movapd %xmm3, -17792(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm7, %xmm0
movapd %xmm2, %xmm6
mulsd %xmm11, %xmm6
addsd %xmm0, %xmm6
addsd LCPI19_84(%rip), %xmm4
movsd %xmm4, -3120(%rbp) ## 8-byte Spill
movapd -9536(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm14, %xmm0
movapd %xmm0, %xmm3
movapd %xmm2, -9552(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm3
movapd %xmm13, -8208(%rbp) ## 16-byte Spill
movapd %xmm13, %xmm2
mulsd %xmm9, %xmm2
movapd %xmm2, -8144(%rbp) ## 16-byte Spill
mulsd LCPI19_20(%rip), %xmm6
movapd -1312(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm8, -14144(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm2
movapd %xmm1, -6896(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
movapd %xmm1, -17776(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm5
movapd %xmm0, -9536(%rbp) ## 16-byte Spill
movapd %xmm0, %xmm4
mulsd %xmm11, %xmm4
xorpd %xmm0, %xmm0
movsd %xmm0, -176(%rbp) ## 8-byte Spill
xorpd %xmm1, %xmm1
xorpd %xmm0, %xmm0
testq %r15, %r15
movsd %xmm7, -1112(%rbp) ## 8-byte Spill
je LBB19_27
## %bb.26:
movsd 48(%r15), %xmm0 ## xmm0 = mem[0],zero
movsd 80(%r15), %xmm1 ## xmm1 = mem[0],zero
movsd %xmm1, -64(%rbp) ## 8-byte Spill
movsd %xmm0, -176(%rbp) ## 8-byte Spill
movapd %xmm3, -4864(%rbp) ## 16-byte Spill
movapd %xmm2, -48(%rbp) ## 16-byte Spill
movsd %xmm5, -208(%rbp) ## 8-byte Spill
movapd %xmm6, -96(%rbp) ## 16-byte Spill
movapd %xmm4, -144(%rbp) ## 16-byte Spill
callq _sin
movaps %xmm0, -2528(%rbp) ## 16-byte Spill
movsd -64(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
callq _cos
movapd -144(%rbp), %xmm4 ## 16-byte Reload
movapd -96(%rbp), %xmm6 ## 16-byte Reload
movapd -2528(%rbp), %xmm1 ## 16-byte Reload
movsd -208(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd -48(%rbp), %xmm2 ## 16-byte Reload
movsd -1112(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movsd -704(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd -4864(%rbp), %xmm3 ## 16-byte Reload
movaps %xmm0, -848(%rbp) ## 16-byte Spill
movsd 112(%r15), %xmm0 ## xmm0 = mem[0],zero
LBB19_27:
movapd %xmm1, -2528(%rbp) ## 16-byte Spill
movsd -3120(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm7, %xmm1
movsd %xmm1, -3120(%rbp) ## 8-byte Spill
mulsd -14336(%rbp), %xmm10 ## 16-byte Folded Reload
movsd %xmm10, -96(%rbp) ## 8-byte Spill
addsd -8144(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, -4864(%rbp) ## 16-byte Spill
addsd %xmm6, %xmm2
movapd %xmm2, -48(%rbp) ## 16-byte Spill
movapd -1328(%rbp), %xmm1 ## 16-byte Reload
mulsd -11984(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -144(%rbp) ## 16-byte Spill
movsd -1760(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd LCPI19_83(%rip), %xmm1
movsd %xmm1, -1760(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm5
movsd %xmm5, -208(%rbp) ## 8-byte Spill
callq ___sincos_stret
movapd %xmm0, -1088(%rbp) ## 16-byte Spill
movapd %xmm1, -1616(%rbp) ## 16-byte Spill
movsd -176(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
callq _cos
movaps %xmm0, -3184(%rbp) ## 16-byte Spill
movsd -64(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
callq _sin
movapd -6128(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm4
movapd -3184(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm4
movapd -6112(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
movapd -2528(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm3
subsd %xmm3, %xmm4
movapd -848(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm3
mulsd %xmm4, %xmm3
mulsd %xmm6, %xmm1
mulsd %xmm7, %xmm2
addsd %xmm1, %xmm2
movapd %xmm0, %xmm1
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm3
movapd %xmm0, -2144(%rbp) ## 16-byte Spill
movapd %xmm4, -10304(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm0
movapd %xmm5, %xmm1
movapd %xmm2, -10288(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movapd -1616(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
movapd -1088(%rbp), %xmm4 ## 16-byte Reload
unpcklpd %xmm4, %xmm0 ## xmm0 = xmm0[0],xmm4[0]
movapd %xmm3, -11968(%rbp) ## 16-byte Spill
movddup %xmm3, %xmm2 ## xmm2 = xmm3[0,0]
movapd %xmm2, -16528(%rbp) ## 16-byte Spill
mulpd %xmm2, %xmm0
movapd %xmm4, %xmm2
unpcklpd %xmm5, %xmm2 ## xmm2 = xmm2[0],xmm5[0]
movapd %xmm1, -11952(%rbp) ## 16-byte Spill
movddup %xmm1, %xmm1 ## xmm1 = xmm1[0,0]
movapd %xmm1, -16512(%rbp) ## 16-byte Spill
mulpd %xmm1, %xmm2
movapd %xmm0, %xmm3
subpd %xmm2, %xmm3
addpd %xmm0, %xmm2
movapd %xmm2, %xmm0
blendpd $1, %xmm3, %xmm0 ## xmm0 = xmm3[0],xmm0[1]
unpckhpd %xmm2, %xmm2 ## xmm2 = xmm2[1,1]
movapd %xmm2, %xmm1
mulsd LCPI19_15(%rip), %xmm1
movapd LCPI19_93(%rip), %xmm4 ## xmm4 = [2.7755575615628914E-17,5.5511151231257827E-17]
movapd %xmm0, -1344(%rbp) ## 16-byte Spill
mulpd %xmm0, %xmm4
unpcklpd %xmm3, %xmm1 ## xmm1 = xmm1[0],xmm3[0]
movapd %xmm1, %xmm0
addpd %xmm4, %xmm0
movapd %xmm0, -64(%rbp) ## 16-byte Spill
movapd %xmm4, -576(%rbp) ## 16-byte Spill
subpd %xmm4, %xmm1
movapd %xmm1, -4672(%rbp) ## 16-byte Spill
testq %r15, %r15
movapd %xmm2, -176(%rbp) ## 16-byte Spill
movapd %xmm3, -256(%rbp) ## 16-byte Spill
je LBB19_28
## %bb.29:
movsd 144(%r15), %xmm0 ## xmm0 = mem[0],zero
jmp LBB19_30
LBB19_28:
xorpd %xmm0, %xmm0
LBB19_30:
movapd -2320(%rbp), %xmm1 ## 16-byte Reload
movsd -1280(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd LCPI19_89(%rip), %xmm1
movapd %xmm1, -6944(%rbp) ## 16-byte Spill
movapd -1312(%rbp), %xmm1 ## 16-byte Reload
mulsd -11984(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -8000(%rbp) ## 16-byte Spill
movapd -1328(%rbp), %xmm1 ## 16-byte Reload
mulsd -14144(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -320(%rbp) ## 16-byte Spill
movsd -96(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd -3120(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -96(%rbp) ## 8-byte Spill
movapd -4864(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm2, %xmm1
movapd %xmm1, -1008(%rbp) ## 16-byte Spill
movapd -144(%rbp), %xmm1 ## 16-byte Reload
addsd -48(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -144(%rbp) ## 16-byte Spill
movsd -1760(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd -208(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -896(%rbp) ## 8-byte Spill
callq ___sincos_stret
movsd LCPI19_13(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd -256(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm2
movapd -576(%rbp), %xmm4 ## 16-byte Reload
shufpd $1, %xmm2, %xmm4 ## xmm4 = xmm4[1],xmm2[0]
movapd %xmm2, %xmm5
movapd -176(%rbp), %xmm14 ## 16-byte Reload
subsd %xmm14, %xmm5
movsd LCPI19_18(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd -10288(%rbp), %xmm2 ## 16-byte Folded Reload
movsd LCPI19_19(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd -10304(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm2, %xmm3
movsd %xmm3, -360(%rbp) ## 8-byte Spill
movsd -1824(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
addsd %xmm3, %xmm7
movsd LCPI19_20(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd -11968(%rbp), %xmm2 ## 16-byte Folded Reload
movsd %xmm2, -304(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm7
movsd LCPI19_21(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm14, %xmm2
movsd LCPI19_22(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm15, %xmm3
addsd %xmm2, %xmm3
movsd %xmm3, -160(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm7
movapd -1344(%rbp), %xmm3 ## 16-byte Reload
addpd %xmm4, %xmm3
movapd %xmm3, %xmm4
unpckhpd %xmm3, %xmm4 ## xmm4 = xmm4[1],xmm3[1]
movsd LCPI19_23(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd %xmm4, -2416(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
movsd LCPI19_24(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm3, %xmm6
addsd %xmm2, %xmm6
movapd %xmm3, %xmm4
movapd %xmm5, -3280(%rbp) ## 16-byte Spill
shufpd $1, %xmm5, %xmm4 ## xmm4 = xmm4[1],xmm5[0]
movapd %xmm1, %xmm2
unpcklpd %xmm0, %xmm2 ## xmm2 = xmm2[0],xmm0[0]
mulpd %xmm4, %xmm2
movapd -4672(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm4
movapd %xmm3, -1344(%rbp) ## 16-byte Spill
blendpd $1, %xmm3, %xmm4 ## xmm4 = xmm3[0],xmm4[1]
movapd %xmm0, %xmm3
unpcklpd %xmm1, %xmm3 ## xmm3 = xmm3[0],xmm1[0]
mulpd %xmm4, %xmm3
addpd %xmm2, %xmm3
movapd %xmm3, %xmm2
unpckhpd %xmm3, %xmm2 ## xmm2 = xmm2[1],xmm3[1]
movapd %xmm2, -736(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm4
movsd LCPI19_14(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm4
movapd -64(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm5
movapd %xmm3, -576(%rbp) ## 16-byte Spill
movapd %xmm14, %xmm2
movapd %xmm1, -752(%rbp) ## 16-byte Spill
unpcklpd %xmm1, %xmm2 ## xmm2 = xmm2[0],xmm1[0]
mulpd LCPI19_94(%rip), %xmm2
mulsd %xmm10, %xmm3
movapd %xmm15, %xmm13
movapd %xmm0, -496(%rbp) ## 16-byte Spill
unpcklpd %xmm0, %xmm13 ## xmm13 = xmm13[0],xmm0[0]
mulpd LCPI19_95(%rip), %xmm13
addpd %xmm2, %xmm13
movsd LCPI19_96(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm13, %xmm2
addsd %xmm6, %xmm2
movsd %xmm7, -1824(%rbp) ## 8-byte Spill
movsd %xmm2, -296(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm7
addsd %xmm7, %xmm3
addsd LCPI19_27(%rip), %xmm3
movapd %xmm4, -2096(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm5
movsd LCPI19_28(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm3, %xmm0
movapd %xmm3, -1424(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm0
movsd %xmm7, -672(%rbp) ## 8-byte Spill
subsd %xmm7, %xmm0
movapd %xmm13, %xmm1
movapd %xmm0, -1024(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm1
addsd %xmm5, %xmm1
movapd %xmm9, %xmm0
unpckhpd %xmm9, %xmm0 ## xmm0 = xmm0[1],xmm9[1]
movapd %xmm0, -2592(%rbp) ## 16-byte Spill
movapd %xmm13, %xmm0
unpckhpd %xmm13, %xmm0 ## xmm0 = xmm0[1],xmm13[1]
movapd %xmm0, -864(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm0
movapd %xmm0, -5632(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm1
movapd %xmm1, -2384(%rbp) ## 16-byte Spill
testq %r15, %r15
je LBB19_31
## %bb.32:
movsd 56(%r15), %xmm3 ## xmm3 = mem[0],zero
addsd -128(%rbp), %xmm3 ## 8-byte Folded Reload
movsd 88(%r15), %xmm5 ## xmm5 = mem[0],zero
addsd %xmm3, %xmm5
movsd 120(%r15), %xmm12 ## xmm12 = mem[0],zero
addsd %xmm5, %xmm12
movsd 152(%r15), %xmm10 ## xmm10 = mem[0],zero
jmp LBB19_33
LBB19_31:
xorpd %xmm10, %xmm10
movsd -128(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
addsd %xmm10, %xmm12
movapd %xmm12, %xmm3
movapd %xmm12, %xmm5
LBB19_33:
movapd -3200(%rbp), %xmm0 ## 16-byte Reload
movsd -704(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd %xmm5, -1296(%rbp) ## 8-byte Spill
movapd %xmm12, -48(%rbp) ## 16-byte Spill
movsd %xmm3, -560(%rbp) ## 8-byte Spill
movapd %xmm10, -624(%rbp) ## 16-byte Spill
movapd -8000(%rbp), %xmm1 ## 16-byte Reload
subsd -320(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -8000(%rbp) ## 16-byte Spill
movapd -1008(%rbp), %xmm1 ## 16-byte Reload
addsd -96(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -1008(%rbp) ## 16-byte Spill
movsd LCPI19_87(%rip), %xmm1 ## xmm1 = mem[0],zero
subsd -144(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -8048(%rbp) ## 16-byte Spill
movsd -896(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd LCPI19_81(%rip), %xmm1
movsd %xmm1, -896(%rbp) ## 8-byte Spill
mulsd LCPI19_89(%rip), %xmm0
movapd %xmm0, -6000(%rbp) ## 16-byte Spill
movapd -6944(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
movapd %xmm0, -14304(%rbp) ## 16-byte Spill
movsd LCPI19_18(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -10304(%rbp), %xmm0 ## 16-byte Folded Reload
movsd LCPI19_30(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -10288(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd %xmm1, -2928(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
movsd %xmm1, -3032(%rbp) ## 8-byte Spill
movapd -1952(%rbp), %xmm4 ## 16-byte Reload
addsd %xmm1, %xmm4
movsd LCPI19_20(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -11952(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -216(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm0
movapd %xmm4, -1952(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm4
movsd LCPI19_21(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm15, %xmm0
movsd LCPI19_31(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm14, %xmm5
addsd %xmm0, %xmm5
movsd LCPI19_23(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd -2592(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm0
movsd LCPI19_24(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd -3280(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_96(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm8, %xmm2
addsd %xmm1, %xmm2
movapd %xmm13, %xmm0
shufpd $1, -576(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[1],mem[0]
movapd LCPI19_97(%rip), %xmm1 ## xmm1 = <-8.24218900360216E-4,u>
unpcklpd -1024(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
mulpd %xmm0, %xmm1
movapd %xmm1, -96(%rbp) ## 16-byte Spill
movapd %xmm6, %xmm1
movapd -752(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm1
movapd %xmm9, %xmm0
movapd -496(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm1
movapd %xmm1, -176(%rbp) ## 16-byte Spill
movapd -1344(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
movhpd LCPI19_98(%rip), %xmm1 ## xmm1 = xmm1[0],mem[0]
movddup %xmm7, %xmm0 ## xmm0 = xmm7[0,0]
movapd %xmm0, -9936(%rbp) ## 16-byte Spill
mulpd %xmm0, %xmm1
movapd %xmm1, %xmm7
movaps LCPI19_26(%rip), %xmm0 ## xmm0 = <u,2.7755575615628914E-17>
movhlps %xmm3, %xmm0 ## xmm0 = xmm3[1],xmm0[1]
movddup %xmm6, %xmm1 ## xmm1 = xmm6[0,0]
movapd %xmm1, -9952(%rbp) ## 16-byte Spill
mulpd %xmm1, %xmm0
subpd %xmm0, %xmm7
movapd %xmm7, -1520(%rbp) ## 16-byte Spill
movapd %xmm12, %xmm0
addsd %xmm10, %xmm0
movapd %xmm0, -144(%rbp) ## 16-byte Spill
movsd %xmm5, -232(%rbp) ## 8-byte Spill
mulsd %xmm12, %xmm5
movsd %xmm5, -3024(%rbp) ## 8-byte Spill
movsd %xmm4, -2704(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm5
movsd %xmm2, -376(%rbp) ## 8-byte Spill
mulsd %xmm12, %xmm2
movsd %xmm2, -3016(%rbp) ## 8-byte Spill
movsd %xmm5, -2512(%rbp) ## 8-byte Spill
addsd %xmm5, %xmm2
movsd %xmm2, -320(%rbp) ## 8-byte Spill
movapd -864(%rbp), %xmm0 ## 16-byte Reload
mulsd -5632(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -256(%rbp) ## 16-byte Spill
movapd -1424(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm0
movsd LCPI19_32(%rip), %xmm1 ## xmm1 = mem[0],zero
addsd %xmm1, %xmm0
sqrtsd %xmm0, %xmm0
movsd LCPI19_33(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd %xmm0, -3136(%rbp) ## 8-byte Spill
movapd %xmm13, -208(%rbp) ## 16-byte Spill
callq _pow
movsd %xmm0, -936(%rbp) ## 8-byte Spill
movapd -1424(%rbp), %xmm0 ## 16-byte Reload
movsd LCPI19_34(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
callq _tanh
movsd %xmm0, -3632(%rbp) ## 8-byte Spill
movsd -360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -560(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -72(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movsd -304(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1296(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm1, -1376(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movsd -160(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -48(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm0
movsd %xmm1, -2304(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movsd -296(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
movsd %xmm1, -928(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movapd %xmm1, -5216(%rbp) ## 16-byte Spill
movsd LCPI19_35(%rip), %xmm3 ## xmm3 = mem[0],zero
movsd -672(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm1, %xmm3
movapd %xmm3, %xmm0
mulsd LCPI19_1(%rip), %xmm0
movapd %xmm3, %xmm2
movsd %xmm3, -1392(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm2
subsd %xmm1, %xmm2
movapd %xmm2, -2080(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm1
mulsd -576(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -1584(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm0
unpcklpd %xmm1, %xmm0 ## xmm0 = xmm0[0],xmm1[0]
mulpd -208(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -2368(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm0
mulsd %xmm3, %xmm0
addsd LCPI19_32(%rip), %xmm0
sqrtsd %xmm0, %xmm0
movsd %xmm0, -3824(%rbp) ## 8-byte Spill
movsd LCPI19_33(%rip), %xmm1 ## xmm1 = mem[0],zero
callq _pow
movsd %xmm0, -1144(%rbp) ## 8-byte Spill
movsd LCPI19_34(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -1392(%rbp), %xmm0 ## 8-byte Folded Reload
callq _tanh
movsd %xmm0, -2240(%rbp) ## 8-byte Spill
movapd -864(%rbp), %xmm0 ## 16-byte Reload
mulsd LCPI19_55(%rip), %xmm0
movapd -1520(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm1
mulsd -1024(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm6, %xmm8
unpckhpd %xmm6, %xmm8 ## xmm8 = xmm8[1],xmm6[1]
movapd %xmm8, %xmm2
mulsd -5632(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm6, %xmm3
movapd -176(%rbp), %xmm11 ## 16-byte Reload
shufpd $1, %xmm11, %xmm3 ## xmm3 = xmm3[1],xmm11[0]
movapd LCPI19_99(%rip), %xmm4 ## xmm4 = <6.7714018916563748E-4,u>
movapd -2096(%rbp), %xmm5 ## 16-byte Reload
unpcklpd %xmm5, %xmm4 ## xmm4 = xmm4[0],xmm5[0]
mulpd %xmm3, %xmm4
movddup %xmm1, %xmm1 ## xmm1 = xmm1[0,0]
addpd %xmm4, %xmm1
subpd %xmm0, %xmm4
blendpd $1, %xmm4, %xmm1 ## xmm1 = xmm4[0],xmm1[1]
movapd LCPI19_100(%rip), %xmm3 ## xmm3 = <-1.9427997112575387E-4,u>
unpcklpd %xmm2, %xmm3 ## xmm3 = xmm3[0],xmm2[0]
addpd %xmm1, %xmm3
movapd -2080(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm6, %xmm9
movapd %xmm6, %xmm0
movapd -576(%rbp), %xmm10 ## 16-byte Reload
unpckhpd %xmm10, %xmm0 ## xmm0 = xmm0[1],xmm10[1]
movapd LCPI19_101(%rip), %xmm1 ## xmm1 = <1.9205962859860238E-4,u>
unpcklpd %xmm5, %xmm1 ## xmm1 = xmm1[0],xmm5[0]
mulpd %xmm0, %xmm1
movapd -96(%rbp), %xmm0 ## 16-byte Reload
addsubpd %xmm1, %xmm0
movapd LCPI19_102(%rip), %xmm7 ## xmm7 = <-1.1203478334918474E-3,u>
unpcklpd -256(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
addpd %xmm0, %xmm7
movapd -208(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
unpckhpd %xmm3, %xmm1 ## xmm1 = xmm1[1],xmm3[1]
movapd %xmm7, %xmm0
unpckhpd %xmm6, %xmm0 ## xmm0 = xmm0[1],xmm6[1]
mulpd %xmm1, %xmm0
movapd -64(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
unpcklpd %xmm4, %xmm1 ## xmm1 = xmm1[0],xmm4[0]
movddup -624(%rbp), %xmm12 ## 16-byte Folded Reload
## xmm12 = mem[0,0]
movapd %xmm1, -13648(%rbp) ## 16-byte Spill
mulpd %xmm1, %xmm12
unpcklpd %xmm2, %xmm4 ## xmm4 = xmm4[0],xmm2[0]
movapd %xmm3, %xmm1
movapd %xmm11, %xmm5
shufpd $1, %xmm11, %xmm1 ## xmm1 = xmm1[1],xmm11[0]
movapd %xmm3, -5152(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm2
blendpd $1, %xmm6, %xmm2 ## xmm2 = xmm6[0],xmm2[1]
mulpd %xmm1, %xmm2
movddup -1584(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = mem[0,0]
movapd %xmm10, %xmm1
movapd %xmm3, -16576(%rbp) ## 16-byte Spill
mulpd %xmm3, %xmm1
movapd -2368(%rbp), %xmm11 ## 16-byte Reload
movddup %xmm11, %xmm3 ## xmm3 = xmm11[0,0]
mulpd %xmm4, %xmm3
unpcklpd %xmm5, %xmm6 ## xmm6 = xmm6[0],xmm5[0]
movddup %xmm9, %xmm5 ## xmm5 = xmm9[0,0]
movapd %xmm5, -16560(%rbp) ## 16-byte Spill
mulpd %xmm5, %xmm6
addpd %xmm1, %xmm6
addpd %xmm3, %xmm6
movapd %xmm7, -5136(%rbp) ## 16-byte Spill
unpckhpd %xmm7, %xmm7 ## xmm7 = xmm7[1,1]
movapd %xmm7, -1888(%rbp) ## 16-byte Spill
mulpd %xmm7, %xmm10
addpd %xmm2, %xmm10
movapd -2384(%rbp), %xmm3 ## 16-byte Reload
movddup %xmm3, %xmm1 ## xmm1 = xmm3[0,0]
movapd %xmm4, -14160(%rbp) ## 16-byte Spill
mulpd %xmm4, %xmm1
addpd %xmm1, %xmm10
movapd %xmm6, -672(%rbp) ## 16-byte Spill
movapd %xmm6, %xmm1
mulpd %xmm12, %xmm1
movapd %xmm10, %xmm2
movapd %xmm10, -832(%rbp) ## 16-byte Spill
mulpd %xmm12, %xmm2
movapd %xmm12, -96(%rbp) ## 16-byte Spill
hsubpd %xmm1, %xmm2
movapd %xmm2, -5056(%rbp) ## 16-byte Spill
movapd %xmm8, -1488(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm1
movapd %xmm9, -1904(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm1
movapd %xmm11, %xmm2
blendpd $1, %xmm0, %xmm2 ## xmm2 = xmm0[0],xmm2[1]
shufpd $1, %xmm1, %xmm0 ## xmm0 = xmm0[1],xmm1[0]
addpd %xmm2, %xmm0
unpcklpd %xmm11, %xmm3 ## xmm3 = xmm3[0],xmm11[0]
addpd %xmm0, %xmm3
movapd %xmm3, -2064(%rbp) ## 16-byte Spill
movapd %xmm6, %xmm0
unpckhpd %xmm6, %xmm0 ## xmm0 = xmm0[1],xmm6[1]
movapd %xmm0, -2176(%rbp) ## 16-byte Spill
movapd %xmm0, %xmm1
movapd -144(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm1
movapd %xmm3, %xmm0
unpckhpd %xmm3, %xmm0 ## xmm0 = xmm0[1],xmm3[1]
movapd %xmm0, -2608(%rbp) ## 16-byte Spill
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm1
movapd %xmm10, %xmm0
unpckhpd %xmm10, %xmm0 ## xmm0 = xmm0[1],xmm10[1]
movapd %xmm0, -2160(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
movapd %xmm0, -1568(%rbp) ## 16-byte Spill
addsd -320(%rbp), %xmm1 ## 8-byte Folded Reload
movsd LCPI19_36(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd %xmm1, -976(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm0
movsd LCPI19_37(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
callq _tanh
movapd -2064(%rbp), %xmm5 ## 16-byte Reload
movapd -96(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm2
unpckhpd %xmm4, %xmm2 ## xmm2 = xmm2[1],xmm4[1]
movapd %xmm5, %xmm1
movapd %xmm2, -256(%rbp) ## 16-byte Spill
mulpd %xmm2, %xmm1
movapd -832(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -672(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movddup -144(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = mem[0,0]
mulpd %xmm2, %xmm3
subpd %xmm3, %xmm1
movddup -5216(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = mem[0,0]
addpd %xmm1, %xmm2
movapd %xmm2, -4912(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm1
mulpd %xmm2, %xmm1
movapd -5056(%rbp), %xmm2 ## 16-byte Reload
mulpd %xmm2, %xmm2
addpd %xmm1, %xmm2
addpd LCPI19_38(%rip), %xmm2
sqrtpd %xmm2, %xmm1
movapd %xmm1, -3408(%rbp) ## 16-byte Spill
divpd LCPI19_39(%rip), %xmm1
movapd %xmm1, -2112(%rbp) ## 16-byte Spill
cmplepd LCPI19_40(%rip), %xmm1
movsd %xmm0, -1792(%rbp) ## 8-byte Spill
movmskpd %xmm1, %ebx
movsd LCPI19_0(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd %xmm0, -3376(%rbp) ## 8-byte Spill
testb $2, %bl
jne LBB19_35
## %bb.34:
xorps %xmm0, %xmm0
movsd %xmm0, -3376(%rbp) ## 8-byte Spill
LBB19_35:
movsd LCPI19_2(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd -6112(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm0
movsd %xmm0, -992(%rbp) ## 8-byte Spill
movapd -6128(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
mulsd LCPI19_4(%rip), %xmm2
movapd %xmm2, -6736(%rbp) ## 16-byte Spill
movsd LCPI19_2(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
movsd %xmm2, -3000(%rbp) ## 8-byte Spill
movsd LCPI19_3(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm1, %xmm2
movsd %xmm2, -1808(%rbp) ## 8-byte Spill
movapd %xmm0, %xmm2
movsd LCPI19_5(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
movapd %xmm2, -6784(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm6
movsd LCPI19_6(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm6
movapd %xmm6, -3424(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
movapd %xmm1, -2016(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
movapd %xmm0, -5888(%rbp) ## 16-byte Spill
movapd -10368(%rbp), %xmm0 ## 16-byte Reload
mulsd LCPI19_88(%rip), %xmm0
movapd %xmm0, -8096(%rbp) ## 16-byte Spill
movapd -8048(%rbp), %xmm0 ## 16-byte Reload
addsd -896(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, -8048(%rbp) ## 16-byte Spill
movapd -1936(%rbp), %xmm0 ## 16-byte Reload
mulsd -8000(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -960(%rbp) ## 16-byte Spill
movsd LCPI19_19(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -1008(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -896(%rbp) ## 8-byte Spill
movsd -704(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -14304(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -2032(%rbp) ## 8-byte Spill
movapd %xmm5, %xmm0
mulsd %xmm4, %xmm0
movapd -1568(%rbp), %xmm2 ## 16-byte Reload
subsd %xmm0, %xmm2
movapd -6000(%rbp), %xmm0 ## 16-byte Reload
mulsd -1280(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, -1568(%rbp) ## 16-byte Spill
movsd -320(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm2, %xmm1
movsd LCPI19_36(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd %xmm1, -320(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm0
mulsd LCPI19_37(%rip), %xmm0
callq _tanh
movsd %xmm0, -2288(%rbp) ## 8-byte Spill
movsd LCPI19_0(%rip), %xmm15 ## xmm15 = mem[0],zero
movapd %xmm15, %xmm0
testb $1, %bl
jne LBB19_37
## %bb.36:
xorpd %xmm0, %xmm0
LBB19_37:
movsd %xmm0, -1688(%rbp) ## 8-byte Spill
movapd -6736(%rbp), %xmm1 ## 16-byte Reload
addsd -992(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -6736(%rbp) ## 16-byte Spill
movsd -3000(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd -1808(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -3000(%rbp) ## 8-byte Spill
movapd -6784(%rbp), %xmm1 ## 16-byte Reload
subsd -3424(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -6784(%rbp) ## 16-byte Spill
movapd -5888(%rbp), %xmm1 ## 16-byte Reload
addsd -2016(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -5888(%rbp) ## 16-byte Spill
movapd -960(%rbp), %xmm0 ## 16-byte Reload
addsd -896(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, -960(%rbp) ## 16-byte Spill
movsd -2032(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -1568(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -2032(%rbp) ## 8-byte Spill
movsd LCPI19_47(%rip), %xmm4 ## xmm4 = mem[0],zero
movapd -864(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm4, %xmm2
movapd -576(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd %xmm4, %xmm0
movsd LCPI19_103(%rip), %xmm7 ## xmm7 = mem[0],zero
movapd -208(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
movapd %xmm3, %xmm6
mulsd %xmm7, %xmm1
movapd -736(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm4, %xmm3
movsd LCPI19_48(%rip), %xmm13 ## xmm13 = mem[0],zero
movapd -176(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm13, %xmm4
addsd %xmm3, %xmm4
movapd -64(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm7, %xmm14
addsd %xmm4, %xmm14
movsd LCPI19_54(%rip), %xmm3 ## xmm3 = mem[0],zero
movapd %xmm5, %xmm4
mulsd %xmm5, %xmm3
movapd %xmm3, %xmm12
movsd LCPI19_104(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm6, %xmm5
movapd %xmm4, %xmm3
movapd %xmm4, %xmm7
movsd LCPI19_55(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm3
movsd LCPI19_105(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm6, %xmm4
movapd -1488(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm13, %xmm11
addsd %xmm2, %xmm11
movapd -1520(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
mulsd %xmm13, %xmm6
addsd %xmm0, %xmm6
addsd %xmm1, %xmm6
movapd %xmm6, -1568(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm0
mulsd %xmm10, %xmm0
subsd %xmm0, %xmm12
addsd %xmm5, %xmm12
movsd %xmm12, -6528(%rbp) ## 8-byte Spill
movapd %xmm7, %xmm0
mulsd %xmm12, %xmm0
movsd LCPI19_57(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
subsd %xmm3, %xmm1
addsd %xmm4, %xmm1
movsd %xmm1, -6544(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm2
addsd %xmm0, %xmm2
movapd %xmm2, -6096(%rbp) ## 16-byte Spill
movapd -2112(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
unpckhpd %xmm2, %xmm0 ## xmm0 = xmm0[1],xmm2[1]
minsd %xmm15, %xmm0
movsd LCPI19_44(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm5
movapd -3408(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm3
unpckhpd %xmm1, %xmm3 ## xmm3 = xmm3[1],xmm1[1]
movapd %xmm3, %xmm6
movapd %xmm3, -3392(%rbp) ## 16-byte Spill
movsd LCPI19_1(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm6
movapd %xmm13, %xmm4
addsd %xmm0, %xmm6
movapd %xmm6, %xmm13
movapd %xmm6, -3680(%rbp) ## 16-byte Spill
minsd %xmm15, %xmm2
mulsd %xmm5, %xmm2
movapd %xmm1, %xmm12
movapd %xmm1, %xmm9
mulsd %xmm4, %xmm9
addsd %xmm2, %xmm9
movapd %xmm9, -3664(%rbp) ## 16-byte Spill
movapd -3200(%rbp), %xmm1 ## 16-byte Reload
addsd -2928(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -9488(%rbp) ## 16-byte Spill
subsd -216(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -9504(%rbp) ## 16-byte Spill
addsd -232(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -6928(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm4
addsd -376(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -2320(%rbp), %xmm7 ## 16-byte Reload
addsd -360(%rbp), %xmm7 ## 8-byte Folded Reload
movapd %xmm7, -6848(%rbp) ## 16-byte Spill
addsd -304(%rbp), %xmm7 ## 8-byte Folded Reload
movapd %xmm7, -5872(%rbp) ## 16-byte Spill
addsd -160(%rbp), %xmm7 ## 8-byte Folded Reload
movapd %xmm7, -6912(%rbp) ## 16-byte Spill
addsd -296(%rbp), %xmm7 ## 8-byte Folded Reload
movapd %xmm7, %xmm6
movsd LCPI19_50(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm6
movapd %xmm7, %xmm2
addsd -672(%rbp), %xmm2 ## 16-byte Folded Reload
addsd -832(%rbp), %xmm7 ## 16-byte Folded Reload
movapd %xmm2, %xmm0
divsd %xmm3, %xmm0
movapd -4912(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm10
unpckhpd %xmm8, %xmm10 ## xmm10 = xmm10[1],xmm8[1]
movapd %xmm10, %xmm1
movapd %xmm0, -6448(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm1
movapd %xmm1, %xmm3
mulsd %xmm13, %xmm3
addsd %xmm4, %xmm3
movapd %xmm7, %xmm0
divsd %xmm12, %xmm0
movapd %xmm0, -1808(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm12
mulsd %xmm0, %xmm12
movapd %xmm12, %xmm13
mulsd %xmm9, %xmm13
addsd %xmm4, %xmm13
mulsd %xmm5, %xmm4
movapd %xmm6, %xmm0
movapd -256(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm0
movapd %xmm4, %xmm9
movapd -96(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm9
addsd %xmm0, %xmm9
movsd %xmm14, -2112(%rbp) ## 8-byte Spill
mulsd %xmm8, %xmm14
movapd -1568(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm14, %xmm0
movapd %xmm6, -9392(%rbp) ## 16-byte Spill
mulsd -144(%rbp), %xmm6 ## 16-byte Folded Reload
addsd LCPI19_103(%rip), %xmm11
movapd %xmm11, -992(%rbp) ## 16-byte Spill
movapd %xmm6, -11920(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm11
movapd %xmm4, -6832(%rbp) ## 16-byte Spill
movapd %xmm0, -1776(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm4
addsd %xmm11, %xmm4
movapd %xmm9, -5616(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm8
addsd %xmm4, %xmm8
movapd %xmm8, -896(%rbp) ## 16-byte Spill
movsd -1792(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm4
mulsd %xmm6, %xmm4
movapd %xmm15, %xmm9
subsd %xmm4, %xmm9
movsd LCPI19_43(%rip), %xmm11 ## xmm11 = mem[0],zero
movapd -976(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
movapd %xmm11, %xmm5
movapd %xmm15, %xmm14
subsd %xmm0, %xmm14
movsd -1144(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd LCPI19_42(%rip), %xmm8
movsd -2240(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_1(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm0
addsd %xmm11, %xmm0
movapd %xmm8, %xmm4
movsd %xmm14, -3104(%rbp) ## 8-byte Spill
mulsd %xmm14, %xmm4
movapd %xmm0, %xmm14
movsd %xmm4, -3728(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm14
addsd -2176(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm14, %xmm4
mulsd %xmm3, %xmm4
mulsd %xmm11, %xmm4
movsd %xmm9, -1680(%rbp) ## 8-byte Spill
mulsd %xmm9, %xmm4
movsd LCPI19_37(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm4
mulsd %xmm11, %xmm6
addsd %xmm11, %xmm6
movapd %xmm11, %xmm9
mulsd %xmm6, %xmm3
movsd %xmm0, -3808(%rbp) ## 8-byte Spill
movapd %xmm3, -4000(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
movsd %xmm8, -1144(%rbp) ## 8-byte Spill
movsd %xmm0, -3952(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm8
mulsd %xmm5, %xmm8
addsd %xmm4, %xmm8
movsd %xmm6, -1792(%rbp) ## 8-byte Spill
movsd %xmm14, -1664(%rbp) ## 8-byte Spill
mulsd %xmm14, %xmm6
movsd %xmm6, -2016(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm0
mulsd -3680(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm10, -3424(%rbp) ## 16-byte Spill
movsd %xmm0, -3792(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm10
movapd -3392(%rbp), %xmm0 ## 16-byte Reload
divsd %xmm0, %xmm10
movapd %xmm10, -6512(%rbp) ## 16-byte Spill
divsd %xmm0, %xmm10
movapd %xmm10, -5744(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm2
mulsd %xmm6, %xmm1
movapd %xmm1, %xmm4
mulsd %xmm11, %xmm4
subsd %xmm4, %xmm2
mulsd LCPI19_44(%rip), %xmm1
mulsd -3376(%rbp), %xmm1 ## 8-byte Folded Reload
movsd LCPI19_45(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm2
movapd -144(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm8, %xmm11
movapd -5056(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm3
unpckhpd %xmm5, %xmm3 ## xmm3 = xmm3[1],xmm5[1]
movapd %xmm3, -5664(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm3
movapd %xmm0, %xmm4
addsd %xmm0, %xmm4
movapd %xmm4, -5712(%rbp) ## 16-byte Spill
divsd %xmm4, %xmm2
movapd %xmm3, -4272(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm0
movapd %xmm2, -6704(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
movapd %xmm0, -4704(%rbp) ## 16-byte Spill
movapd -256(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm3
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm11
movsd -2288(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm3
mulsd %xmm2, %xmm3
movapd %xmm15, %xmm6
subsd %xmm3, %xmm6
movsd -936(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd LCPI19_41(%rip), %xmm4
movsd -320(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_43(%rip), %xmm0
subsd %xmm0, %xmm15
movsd -3632(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm9, %xmm1
addsd %xmm9, %xmm1
movapd %xmm4, %xmm0
movsd %xmm15, -3744(%rbp) ## 8-byte Spill
mulsd %xmm15, %xmm0
movapd %xmm1, %xmm10
movsd %xmm0, -4800(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm10
addsd -2160(%rbp), %xmm13 ## 16-byte Folded Reload
movapd %xmm10, %xmm3
mulsd %xmm13, %xmm3
mulsd %xmm9, %xmm3
movsd %xmm6, -4736(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm3
mulsd LCPI19_37(%rip), %xmm3
mulsd %xmm9, %xmm2
addsd %xmm9, %xmm2
mulsd %xmm2, %xmm13
movsd %xmm1, -3776(%rbp) ## 8-byte Spill
movapd %xmm13, -3072(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm1
movsd %xmm4, -936(%rbp) ## 8-byte Spill
movapd %xmm4, %xmm6
movsd %xmm1, -6432(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm6
mulsd LCPI19_43(%rip), %xmm6
addsd %xmm3, %xmm6
movsd %xmm2, -2288(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm9
movsd %xmm10, -3360(%rbp) ## 8-byte Spill
mulsd %xmm10, %xmm9
movapd %xmm9, %xmm1
mulsd -3664(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -4912(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm4
mulsd %xmm1, %xmm4
movapd -3408(%rbp), %xmm2 ## 16-byte Reload
divsd %xmm2, %xmm4
movapd %xmm4, %xmm0
divsd %xmm2, %xmm0
movapd %xmm0, -5696(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm7
mulsd %xmm9, %xmm12
movapd %xmm12, %xmm3
mulsd LCPI19_1(%rip), %xmm3
subsd %xmm3, %xmm7
mulsd LCPI19_44(%rip), %xmm12
mulsd -1688(%rbp), %xmm12 ## 8-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm12
subsd %xmm12, %xmm7
movapd -144(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm3
mulsd %xmm6, %xmm3
addsd %xmm5, %xmm5
addsd %xmm2, %xmm2
movapd %xmm2, -5680(%rbp) ## 16-byte Spill
divsd %xmm2, %xmm7
movapd %xmm5, -4192(%rbp) ## 16-byte Spill
movapd %xmm5, %xmm10
mulsd %xmm7, %xmm10
movapd %xmm14, %xmm2
mulsd %xmm10, %xmm2
subsd %xmm2, %xmm3
movapd %xmm15, %xmm5
addsd %xmm15, %xmm5
movapd %xmm5, -5200(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm7
movsd %xmm1, -1672(%rbp) ## 8-byte Spill
movapd -1808(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm7
movapd -96(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd %xmm10, %xmm1
movapd %xmm13, %xmm0
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm1
movapd %xmm14, %xmm2
movapd %xmm7, -8928(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm2
movapd %xmm5, %xmm0
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm2
movapd -208(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm0
movapd %xmm1, -2448(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
movapd %xmm4, -5648(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm14
movsd %xmm14, -4976(%rbp) ## 8-byte Spill
subsd %xmm14, %xmm0
movapd %xmm2, -14112(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm0
movapd -64(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm4
mulsd %xmm3, %xmm4
addsd %xmm0, %xmm4
movapd -2384(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
movapd %xmm3, -9248(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm2
mulsd %xmm9, %xmm0
movapd %xmm0, -7536(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm2
movapd %xmm1, %xmm0
movsd %xmm9, -1128(%rbp) ## 8-byte Spill
mulsd %xmm9, %xmm0
movsd %xmm0, -2680(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm4
movapd -2096(%rbp), %xmm13 ## 16-byte Reload
movsd %xmm4, -5320(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm13
addsd %xmm2, %xmm13
movapd -2368(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm2
movapd %xmm11, -1744(%rbp) ## 16-byte Spill
mulsd %xmm11, %xmm2
addsd %xmm2, %xmm13
movapd -832(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm10, -6464(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm5
subsd -896(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -2064(%rbp), %xmm1 ## 16-byte Reload
movsd %xmm6, -9848(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm5
movapd -672(%rbp), %xmm1 ## 16-byte Reload
mulsd -4704(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm1, %xmm5
movapd -2608(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm8, -9328(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm1
subsd %xmm1, %xmm5
movapd -864(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
movapd -5136(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm1
movapd -1488(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
movapd -5152(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm2
addsd %xmm1, %xmm2
movapd -6096(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm1
movapd %xmm2, -5168(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm1
movsd LCPI19_59(%rip), %xmm4 ## xmm4 = mem[0],zero
subsd %xmm1, %xmm4
movapd -736(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm1
mulsd -6528(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -176(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm2
mulsd -6544(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movsd LCPI19_107(%rip), %xmm7 ## xmm7 = mem[0],zero
movapd %xmm3, %xmm1
mulsd %xmm7, %xmm1
movsd LCPI19_63(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm0
addsd %xmm1, %xmm0
addsd %xmm2, %xmm0
movsd LCPI19_60(%rip), %xmm1 ## xmm1 = mem[0],zero
addsd %xmm1, %xmm4
movapd %xmm15, %xmm2
addsd %xmm1, %xmm2
movapd %xmm0, %xmm1
movapd -96(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm1
movapd %xmm2, -6096(%rbp) ## 16-byte Spill
movapd -256(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm2
addsd %xmm1, %xmm2
movapd -1520(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
movapd -576(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm11, %xmm3
addsd %xmm1, %xmm3
movapd %xmm6, %xmm1
mulsd %xmm7, %xmm1
movapd %xmm14, %xmm7
mulsd %xmm10, %xmm7
addsd %xmm1, %xmm7
addsd %xmm3, %xmm7
movsd %xmm7, -3560(%rbp) ## 8-byte Spill
mulsd -144(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm2, %xmm7
movapd -10352(%rbp), %xmm1 ## 16-byte Reload
mulsd LCPI19_88(%rip), %xmm1
movapd %xmm1, -9472(%rbp) ## 16-byte Spill
movapd -10560(%rbp), %xmm11 ## 16-byte Reload
subsd -6944(%rbp), %xmm11 ## 16-byte Folded Reload
movapd -8096(%rbp), %xmm10 ## 16-byte Reload
movsd LCPI19_90(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm10
movapd -1216(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm1
mulsd -8048(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -6768(%rbp) ## 16-byte Spill
movsd -4640(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm1
mulsd %xmm2, %xmm1
movapd %xmm1, -6080(%rbp) ## 16-byte Spill
movapd %xmm12, %xmm6
unpckhpd %xmm12, %xmm6 ## xmm6 = xmm6[1],xmm12[1]
movapd %xmm6, -896(%rbp) ## 16-byte Spill
mulsd -2016(%rbp), %xmm9 ## 8-byte Folded Reload
movapd %xmm9, -8592(%rbp) ## 16-byte Spill
subsd %xmm9, %xmm13
addsd LCPI19_106(%rip), %xmm13
movsd LCPI19_61(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm8, %xmm1
movsd %xmm4, -8864(%rbp) ## 8-byte Spill
movsd %xmm1, -4224(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm4
movsd %xmm4, -4400(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm5
movsd %xmm7, -9864(%rbp) ## 8-byte Spill
movsd LCPI19_50(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm7
movsd %xmm7, -7776(%rbp) ## 8-byte Spill
addsd %xmm7, %xmm5
movapd %xmm15, %xmm1
mulsd %xmm2, %xmm1
movsd %xmm0, -6496(%rbp) ## 8-byte Spill
movapd %xmm0, %xmm2
movapd %xmm1, -4880(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm2
movapd %xmm2, -9296(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm5
movapd -624(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm5, -16544(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
movapd %xmm0, -3232(%rbp) ## 16-byte Spill
testq %r15, %r15
movsd -3000(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd -6736(%rbp), %xmm5 ## 16-byte Reload
movapd -6784(%rbp), %xmm12 ## 16-byte Reload
movapd -5888(%rbp), %xmm8 ## 16-byte Reload
je LBB19_38
## %bb.39:
movsd 200(%r15), %xmm4 ## xmm4 = mem[0],zero
addsd -280(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -2928(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
addsd %xmm3, %xmm1
movsd -2304(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movsd -1376(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd %xmm2, %xmm7
subsd -72(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -560(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm2, -1376(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
movsd 216(%r15), %xmm6 ## xmm6 = mem[0],zero
movsd %xmm4, -976(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm6
movsd -216(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm6, %xmm1
movsd %xmm0, -1120(%rbp) ## 8-byte Spill
movapd %xmm0, %xmm2
subsd %xmm1, %xmm2
movsd -1296(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movsd %xmm7, -2208(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm4
addsd %xmm2, %xmm4
movsd 232(%r15), %xmm0 ## xmm0 = mem[0],zero
jmp LBB19_40
LBB19_38:
xorpd %xmm0, %xmm0
movsd -280(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
addsd %xmm0, %xmm6
movapd %xmm6, %xmm1
mulsd -2928(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm3, %xmm1
movsd -2304(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movsd -1376(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd %xmm2, %xmm7
subsd -72(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -560(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movsd %xmm2, -1376(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm4
addsd %xmm1, %xmm4
movapd %xmm6, %xmm1
mulsd -216(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm4, -1120(%rbp) ## 8-byte Spill
movapd %xmm4, %xmm2
subsd %xmm1, %xmm2
movsd -1296(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movsd %xmm7, -2208(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm4
addsd %xmm2, %xmm4
movsd %xmm6, -976(%rbp) ## 8-byte Spill
LBB19_40:
movapd -6000(%rbp), %xmm2 ## 16-byte Reload
movapd -8000(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd -128(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -1808(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm15
mulsd LCPI19_8(%rip), %xmm15
movapd %xmm5, %xmm3
movsd LCPI19_7(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm3
mulsd %xmm1, %xmm9
mulsd %xmm1, %xmm12
movapd %xmm10, -10400(%rbp) ## 16-byte Spill
addsd %xmm10, %xmm11
movapd %xmm11, -9024(%rbp) ## 16-byte Spill
movapd %xmm14, %xmm11
mulsd %xmm7, %xmm11
movapd -1008(%rbp), %xmm1 ## 16-byte Reload
mulsd LCPI19_18(%rip), %xmm1
movapd %xmm1, -1008(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm14
addsd -4864(%rbp), %xmm14 ## 16-byte Folded Reload
movapd -9472(%rbp), %xmm8 ## 16-byte Reload
mulsd LCPI19_90(%rip), %xmm8
movapd -6768(%rbp), %xmm5 ## 16-byte Reload
subsd -960(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -6080(%rbp), %xmm1 ## 16-byte Reload
subsd -2032(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -3696(%rbp) ## 8-byte Spill
movaps -4672(%rbp), %xmm1 ## 16-byte Reload
blendps $3, -64(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = mem[0,1],xmm1[2,3]
movaps %xmm1, -4672(%rbp) ## 16-byte Spill
movapd -3232(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm13, %xmm1
movapd %xmm1, -3232(%rbp) ## 16-byte Spill
movsd %xmm6, -960(%rbp) ## 8-byte Spill
addsd %xmm6, %xmm0
movapd %xmm0, %xmm1
movsd -232(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movsd %xmm4, -2000(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm0
movsd -928(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm7
subsd -2304(%rbp), %xmm7 ## 8-byte Folded Reload
movapd -48(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm6
movsd %xmm7, -3008(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm6
addsd %xmm0, %xmm6
movsd -376(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm1, -320(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
movapd %xmm6, -4064(%rbp) ## 16-byte Spill
addsd %xmm6, %xmm0
movapd -5216(%rbp), %xmm1 ## 16-byte Reload
subsd %xmm2, %xmm1
movapd %xmm4, %xmm7
movapd %xmm1, -5216(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm7
addsd %xmm0, %xmm7
movapd %xmm7, -11936(%rbp) ## 16-byte Spill
xorpd %xmm0, %xmm0
movsd %xmm0, -920(%rbp) ## 8-byte Spill
xorpd %xmm0, %xmm0
testq %r15, %r15
movapd -8048(%rbp), %xmm7 ## 16-byte Reload
movsd LCPI19_1(%rip), %xmm4 ## xmm4 = mem[0],zero
je LBB19_42
## %bb.41:
movsd 248(%r15), %xmm0 ## xmm0 = mem[0],zero
LBB19_42:
movsd %xmm0, -72(%rbp) ## 8-byte Spill
movsd -776(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd -1808(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -2976(%rbp) ## 8-byte Spill
movapd %xmm15, -16592(%rbp) ## 16-byte Spill
movapd %xmm15, %xmm0
movapd %xmm3, -8992(%rbp) ## 16-byte Spill
subsd %xmm3, %xmm0
movapd %xmm0, -9008(%rbp) ## 16-byte Spill
movsd %xmm9, -7136(%rbp) ## 8-byte Spill
movapd %xmm12, -16608(%rbp) ## 16-byte Spill
addsd %xmm12, %xmm9
movsd %xmm9, -5272(%rbp) ## 8-byte Spill
movsd -128(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -9024(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -3088(%rbp) ## 8-byte Spill
addsd -1008(%rbp), %xmm11 ## 16-byte Folded Reload
movapd %xmm11, -9088(%rbp) ## 16-byte Spill
movapd -1936(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
movapd %xmm0, -3984(%rbp) ## 16-byte Spill
movapd %xmm8, -10384(%rbp) ## 16-byte Spill
addsd %xmm8, %xmm14
movapd %xmm14, -10096(%rbp) ## 16-byte Spill
addsd LCPI19_92(%rip), %xmm5
movapd %xmm5, -6768(%rbp) ## 16-byte Spill
movsd LCPI19_88(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd -3696(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movsd %xmm0, -3696(%rbp) ## 8-byte Spill
movapd -2592(%rbp), %xmm1 ## 16-byte Reload
movsd LCPI19_64(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
movapd %xmm1, %xmm2
movapd %xmm1, -1008(%rbp) ## 16-byte Spill
movapd -2416(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm0, %xmm5
movsd -1824(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm1, %xmm5
addsd LCPI19_65(%rip), %xmm5
movapd %xmm5, %xmm0
mulsd %xmm4, %xmm0
movapd %xmm5, %xmm3
movsd %xmm5, -2480(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm3
subsd %xmm1, %xmm3
movapd %xmm3, -2304(%rbp) ## 16-byte Spill
movapd -3280(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
mulsd %xmm2, %xmm0
movapd -1344(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm3, %xmm2
addsd %xmm0, %xmm2
movsd %xmm2, -2032(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm1
movapd %xmm1, -3216(%rbp) ## 16-byte Spill
movapd %xmm5, %xmm0
mulsd %xmm5, %xmm0
movsd LCPI19_32(%rip), %xmm1 ## xmm1 = mem[0],zero
addsd %xmm1, %xmm0
sqrtsd %xmm0, %xmm0
movsd LCPI19_33(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd %xmm0, -3712(%rbp) ## 8-byte Spill
callq _pow
movsd %xmm0, -1136(%rbp) ## 8-byte Spill
movsd LCPI19_34(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -2480(%rbp), %xmm0 ## 8-byte Folded Reload
callq _tanh
movsd %xmm0, -5600(%rbp) ## 8-byte Spill
movddup -1008(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = mem[0,0]
movapd -4672(%rbp), %xmm1 ## 16-byte Reload
mulpd %xmm1, %xmm0
movapd -1344(%rbp), %xmm2 ## 16-byte Reload
blendpd $1, -208(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = mem[0],xmm2[1]
movapd %xmm2, -3872(%rbp) ## 16-byte Spill
movddup -2304(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = mem[0,0]
mulpd %xmm2, %xmm3
addpd %xmm0, %xmm3
movapd %xmm3, %xmm2
movapd %xmm3, -1824(%rbp) ## 16-byte Spill
unpckhpd %xmm3, %xmm2 ## xmm2 = xmm2[1],xmm3[1]
movapd %xmm2, -1808(%rbp) ## 16-byte Spill
movapd -2592(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
addsd -3216(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -3216(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm0
mulsd -48(%rbp), %xmm0 ## 16-byte Folded Reload
addsd -2512(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, -4240(%rbp) ## 16-byte Spill
movsd LCPI19_36(%rip), %xmm1 ## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movsd LCPI19_37(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
movapd %xmm1, %xmm0
callq _tanh
movsd %xmm0, -2464(%rbp) ## 8-byte Spill
movapd -64(%rbp), %xmm1 ## 16-byte Reload
movsd -72(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
movapd -48(%rbp), %xmm10 ## 16-byte Reload
movddup %xmm10, %xmm0 ## xmm0 = xmm10[0,0]
mulpd -14160(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -14128(%rbp) ## 16-byte Spill
movapd -624(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm13, %xmm0
subsd %xmm0, %xmm1
movapd %xmm1, -5952(%rbp) ## 16-byte Spill
movapd -992(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm0
mulsd %xmm1, %xmm0
movsd -320(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm1, %xmm6
movapd %xmm1, %xmm15
addsd %xmm2, %xmm6
movsd -2112(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm1
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm0
movapd -11936(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm1
subsd %xmm0, %xmm1
movsd LCPI19_50(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
movapd %xmm11, %xmm4
movapd -256(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm4
movapd %xmm0, %xmm12
movapd -1568(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
movapd -144(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm4
movapd %xmm5, %xmm0
movapd %xmm5, %xmm8
movapd %xmm4, -14320(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm5
movapd -96(%rbp), %xmm0 ## 16-byte Reload
mulsd -1776(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm5
mulsd %xmm7, %xmm5
addsd %xmm1, %xmm5
movsd LCPI19_67(%rip), %xmm0 ## xmm0 = mem[0],zero
addsd -2016(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -1128(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm5
movapd %xmm5, %xmm4
movapd %xmm5, -5568(%rbp) ## 16-byte Spill
movsd -9848(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd -9328(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -9184(%rbp) ## 16-byte Spill
movsd LCPI19_61(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm3, %xmm0
movapd %xmm0, -8112(%rbp) ## 16-byte Spill
movapd -9392(%rbp), %xmm5 ## 16-byte Reload
subsd %xmm5, %xmm0
movapd %xmm0, -10528(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm0
subsd %xmm0, %xmm1
movapd %xmm10, %xmm0
movsd %xmm1, -13440(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
addsd %xmm4, %xmm0
mulsd %xmm7, %xmm2
movapd -6832(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
movapd %xmm2, -8128(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm1
movapd %xmm1, -10544(%rbp) ## 16-byte Spill
mulsd %xmm15, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -4176(%rbp) ## 16-byte Spill
movsd LCPI19_96(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movapd -3232(%rbp), %xmm3 ## 16-byte Reload
addsd %xmm0, %xmm3
movapd %xmm8, %xmm0
mulsd -11920(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm12, %xmm1
mulsd -5616(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd %xmm4, %xmm2
movsd %xmm6, -2512(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm2
addsd %xmm1, %xmm2
mulsd %xmm7, %xmm9
movapd %xmm9, -4720(%rbp) ## 16-byte Spill
addsd %xmm9, %xmm2
movapd %xmm2, -10272(%rbp) ## 16-byte Spill
movsd LCPI19_103(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
addsd %xmm3, %xmm0
movsd -3560(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm7, %xmm2
movapd %xmm5, %xmm1
mulsd %xmm11, %xmm1
movapd %xmm2, -6032(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm2
movapd %xmm2, -11344(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm2
movapd %xmm10, %xmm1
movapd %xmm2, -17760(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movapd -736(%rbp), %xmm0 ## 16-byte Reload
mulsd -5136(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -176(%rbp), %xmm15 ## 16-byte Reload
mulsd -5152(%rbp), %xmm15 ## 16-byte Folded Reload
addsd %xmm0, %xmm15
movapd -1520(%rbp), %xmm0 ## 16-byte Reload
movsd LCPI19_107(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movsd LCPI19_63(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd -576(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
subsd %xmm2, %xmm15
movapd %xmm15, -8080(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm15
movapd %xmm4, %xmm0
mulsd %xmm11, %xmm0
movapd %xmm15, -6016(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm15
movapd %xmm15, -11328(%rbp) ## 16-byte Spill
mulsd -72(%rbp), %xmm15 ## 8-byte Folded Reload
addsd %xmm1, %xmm15
movsd -2464(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm1
mulsd %xmm8, %xmm1
movsd LCPI19_0(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd %xmm2, %xmm13
subsd %xmm1, %xmm13
movsd LCPI19_43(%rip), %xmm6 ## xmm6 = mem[0],zero
movapd -4240(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm6, %xmm0
movapd %xmm2, %xmm4
movapd %xmm2, %xmm3
subsd %xmm0, %xmm4
movapd -1344(%rbp), %xmm1 ## 16-byte Reload
mulsd -2032(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -3872(%rbp), %xmm0 ## 16-byte Reload
mulpd -1824(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, %xmm2
unpckhpd %xmm0, %xmm2 ## xmm2 = xmm2[1],xmm0[1]
addsd %xmm1, %xmm2
addsd %xmm0, %xmm2
movapd %xmm2, %xmm1
mulsd %xmm10, %xmm1
movsd -928(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
subsd %xmm1, %xmm12
movapd %xmm12, %xmm1
mulsd %xmm12, %xmm1
addsd LCPI19_32(%rip), %xmm1
sqrtsd %xmm1, %xmm9
movapd %xmm9, %xmm5
divsd LCPI19_70(%rip), %xmm5
movapd %xmm5, %xmm1
minsd %xmm3, %xmm1
movsd LCPI19_44(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm1
movapd %xmm9, %xmm3
movsd LCPI19_1(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm3
addsd %xmm1, %xmm3
movsd -1136(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_69(%rip), %xmm0
movsd -5600(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm14, %xmm7
addsd %xmm14, %xmm7
movapd %xmm0, %xmm1
movapd %xmm0, %xmm6
movapd %xmm4, -4048(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
movsd %xmm1, -1360(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm1
movapd -6912(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm2, -4240(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm10
movapd %xmm10, %xmm11
divsd %xmm9, %xmm11
movapd %xmm12, %xmm4
mulsd %xmm11, %xmm4
movapd %xmm4, %xmm2
mulsd %xmm3, %xmm2
addsd -6928(%rbp), %xmm2 ## 16-byte Folded Reload
addsd -3216(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm2, %xmm0
mulsd %xmm1, %xmm0
mulsd %xmm14, %xmm0
movapd %xmm13, -4624(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm0
mulsd LCPI19_37(%rip), %xmm0
mulsd %xmm14, %xmm8
addsd %xmm14, %xmm8
mulsd %xmm8, %xmm2
movsd %xmm7, -1648(%rbp) ## 8-byte Spill
movsd %xmm2, -432(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm7
movsd %xmm6, -1136(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm13
movsd %xmm7, -6416(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm13
mulsd LCPI19_43(%rip), %xmm13
addsd %xmm0, %xmm13
movapd -48(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm13, %xmm14
movapd -1824(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm14, %xmm6
movapd %xmm14, -2768(%rbp) ## 16-byte Spill
addsd %xmm15, %xmm6
movsd %xmm8, -2464(%rbp) ## 8-byte Spill
movapd %xmm8, %xmm7
movsd %xmm1, -2832(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm7
movsd %xmm3, -5552(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm3
movapd %xmm12, %xmm2
mulsd %xmm3, %xmm2
divsd %xmm9, %xmm2
movapd %xmm2, %xmm0
movapd %xmm2, %xmm8
movsd %xmm2, -1984(%rbp) ## 8-byte Spill
divsd %xmm9, %xmm0
movsd %xmm0, -6576(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm10
mulsd %xmm7, %xmm4
movapd %xmm4, %xmm0
mulsd LCPI19_1(%rip), %xmm0
subsd %xmm0, %xmm10
movsd LCPI19_0(%rip), %xmm0 ## xmm0 = mem[0],zero
cmplesd %xmm0, %xmm5
andpd %xmm0, %xmm5
mulsd LCPI19_44(%rip), %xmm4
movapd %xmm5, -5536(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm4
mulsd LCPI19_71(%rip), %xmm4
addsd %xmm10, %xmm4
movsd %xmm12, -928(%rbp) ## 8-byte Spill
addsd %xmm12, %xmm12
movsd %xmm9, -2952(%rbp) ## 8-byte Spill
addsd %xmm9, %xmm9
movsd %xmm9, -6560(%rbp) ## 8-byte Spill
divsd %xmm9, %xmm4
movsd %xmm12, -2696(%rbp) ## 8-byte Spill
mulsd %xmm12, %xmm4
movsd %xmm3, -1624(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm11
subsd %xmm11, %xmm4
movapd -64(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm5
mulsd %xmm14, %xmm5
movapd -48(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm1
movsd %xmm4, -9840(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm1
movapd %xmm1, -1968(%rbp) ## 16-byte Spill
movapd -208(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
mulsd %xmm1, %xmm0
movapd %xmm3, %xmm1
mulsd %xmm8, %xmm1
movsd %xmm1, -5424(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm0
subsd %xmm0, %xmm5
movapd -1824(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
movapd %xmm0, -8544(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm6
movapd %xmm2, %xmm0
mulsd %xmm7, %xmm0
movsd %xmm7, -2192(%rbp) ## 8-byte Spill
movsd %xmm0, -7520(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm5
movsd %xmm5, -1056(%rbp) ## 8-byte Spill
movapd -1008(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm5, %xmm0
addsd %xmm6, %xmm0
movapd %xmm0, %xmm8
movapd %xmm0, -3968(%rbp) ## 16-byte Spill
movapd -2592(%rbp), %xmm0 ## 16-byte Reload
movsd LCPI19_72(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm3
movapd -3280(%rbp), %xmm2 ## 16-byte Reload
movsd LCPI19_73(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
movapd %xmm1, %xmm4
addsd %xmm0, %xmm2
movapd -2416(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm3, %xmm0
movapd -1344(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movapd %xmm2, %xmm0
movsd -320(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm0
addsd -4064(%rbp), %xmm0 ## 16-byte Folded Reload
movsd LCPI19_74(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm0
movapd %xmm1, %xmm4
movapd %xmm10, %xmm3
mulsd %xmm10, %xmm4
movapd %xmm10, %xmm6
movsd %xmm4, -2992(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm6
movsd LCPI19_75(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm6
addsd %xmm0, %xmm6
movsd LCPI19_76(%rip), %xmm0 ## xmm0 = mem[0],zero
addsd %xmm7, %xmm0
subsd %xmm0, %xmm6
movsd %xmm13, -13432(%rbp) ## 8-byte Spill
movapd -9184(%rbp), %xmm4 ## 16-byte Reload
addsd %xmm13, %xmm4
movapd %xmm4, -9184(%rbp) ## 16-byte Spill
movapd -6912(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm9, %xmm0
movapd %xmm0, -6912(%rbp) ## 16-byte Spill
movapd -10528(%rbp), %xmm7 ## 16-byte Reload
subsd %xmm0, %xmm7
movsd %xmm1, -3872(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm0
mulsd %xmm3, %xmm0
movsd %xmm0, -3552(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm7
movapd %xmm7, -10496(%rbp) ## 16-byte Spill
movapd %xmm7, %xmm0
movapd %xmm10, %xmm1
mulsd %xmm10, %xmm0
movapd %xmm4, %xmm3
subsd %xmm0, %xmm3
addsd -5568(%rbp), %xmm6 ## 16-byte Folded Reload
movapd %xmm6, -6288(%rbp) ## 16-byte Spill
movapd %xmm3, -17744(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm6, %xmm1
movapd -6928(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm9, %xmm12
movapd %xmm12, -6928(%rbp) ## 16-byte Spill
addsd -10544(%rbp), %xmm12 ## 16-byte Folded Reload
movapd %xmm2, -3232(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm2
movapd %xmm2, -10512(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm12
movapd %xmm12, -8064(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm12
addsd %xmm1, %xmm12
movsd LCPI19_108(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm8, %xmm1
movsd LCPI19_21(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -3616(%rbp) ## 8-byte Spill
movapd -1520(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
movapd -2448(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm1
movapd -5648(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
movsd %xmm0, -3432(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movapd -1488(%rbp), %xmm2 ## 16-byte Reload
movapd -14112(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm2
addsd %xmm1, %xmm2
movapd -176(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
movapd -9248(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm0
addsd %xmm2, %xmm0
movapd -896(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
mulsd %xmm6, %xmm1
movsd -1128(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm2
movsd %xmm2, -4392(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm1
movapd %xmm3, %xmm2
mulsd %xmm8, %xmm2
movapd %xmm2, -5408(%rbp) ## 16-byte Spill
subsd %xmm2, %xmm0
movapd -2096(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm2
mulsd %xmm0, %xmm2
addsd %xmm1, %xmm2
movapd -1904(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
movapd -1744(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm1
addsd %xmm2, %xmm1
movapd -256(%rbp), %xmm2 ## 16-byte Reload
mulsd -4224(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -96(%rbp), %xmm3 ## 16-byte Reload
movapd -4880(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm3
addsd %xmm2, %xmm3
mulsd -2016(%rbp), %xmm4 ## 8-byte Folded Reload
movapd %xmm4, -8432(%rbp) ## 16-byte Spill
subsd %xmm4, %xmm1
addsd LCPI19_77(%rip), %xmm1
movsd -6544(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm3, %xmm2
movsd %xmm2, -4384(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm1
movsd LCPI19_48(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd -10272(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm1, %xmm5
movapd -208(%rbp), %xmm1 ## 16-byte Reload
mulsd -72(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -14128(%rbp), %xmm13 ## 16-byte Reload
unpckhpd %xmm13, %xmm13 ## xmm13 = xmm13[1,1]
movapd %xmm13, -16480(%rbp) ## 16-byte Spill
mulsd -624(%rbp), %xmm13 ## 16-byte Folded Reload
addsd %xmm1, %xmm13
movapd -576(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
mulsd %xmm7, %xmm1
mulsd %xmm11, %xmm2
movsd %xmm2, -5392(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm1
movapd -864(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm10, %xmm2
addsd %xmm1, %xmm2
movapd -1888(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd %xmm6, %xmm1
movapd -736(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm6
addsd %xmm2, %xmm6
mulsd %xmm8, %xmm4
movsd %xmm4, -8416(%rbp) ## 8-byte Spill
subsd %xmm4, %xmm1
movapd %xmm7, %xmm2
mulsd %xmm8, %xmm2
movsd %xmm2, -2648(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm6
mulsd %xmm6, %xmm9
addsd %xmm1, %xmm9
movapd -64(%rbp), %xmm1 ## 16-byte Reload
mulsd -5320(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -176(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm0, -11904(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm4
addsd %xmm1, %xmm4
movapd %xmm7, %xmm1
movapd %xmm6, -9248(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
addsd %xmm4, %xmm1
mulsd LCPI19_14(%rip), %xmm1
addsd %xmm9, %xmm1
movapd -1584(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm2
mulsd %xmm15, %xmm2
addsd %xmm1, %xmm2
movsd LCPI19_24(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -4176(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -144(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd %xmm14, %xmm0
movapd %xmm13, -5936(%rbp) ## 16-byte Spill
movsd LCPI19_50(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm13
movapd %xmm13, -14048(%rbp) ## 16-byte Spill
addsd %xmm13, %xmm0
movapd %xmm0, %xmm4
movsd LCPI19_107(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm4
movapd %xmm4, -8352(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm5
movapd %xmm6, %xmm4
mulsd -4224(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -5952(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm10, %xmm9
movapd %xmm9, -14080(%rbp) ## 16-byte Spill
addsd %xmm9, %xmm4
movapd -5152(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm4, %xmm9
movapd %xmm9, -9216(%rbp) ## 16-byte Spill
addsd %xmm9, %xmm5
movapd -752(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm5, %xmm10
movapd %xmm8, %xmm6
mulsd -2016(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm6, -8384(%rbp) ## 16-byte Spill
subsd %xmm6, %xmm2
addsd LCPI19_78(%rip), %xmm2
addsd %xmm1, %xmm10
movsd -6528(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm3, -8976(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
movapd -10272(%rbp), %xmm13 ## 16-byte Reload
mulsd LCPI19_47(%rip), %xmm13
movsd %xmm1, -8400(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm2
addsd %xmm2, %xmm13
movsd LCPI19_109(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd %xmm0, -11280(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm1
movsd %xmm1, -4360(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm13
movapd -5136(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm4, -14064(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm0
movapd %xmm0, -9200(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm13
movapd -496(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm1
mulsd %xmm13, %xmm1
addsd %xmm10, %xmm1
movsd -2032(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
movapd -2768(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm2
addsd %xmm1, %xmm2
movapd -3280(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm8
mulsd %xmm15, %xmm8
movapd -1344(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
movapd -1968(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm1
movsd -1984(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm0
movsd %xmm0, -7504(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm1
subsd %xmm1, %xmm8
movapd %xmm4, %xmm0
movsd -2192(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm14, %xmm0
movsd %xmm0, -8368(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm2
movapd %xmm6, %xmm0
mulsd %xmm14, %xmm0
movapd %xmm0, -7488(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm8
movapd -1008(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm4
mulsd %xmm8, %xmm4
addsd %xmm2, %xmm4
movapd -6912(%rbp), %xmm0 ## 16-byte Reload
movapd -48(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd %xmm0, -17728(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm1
movapd -6928(%rbp), %xmm6 ## 16-byte Reload
mulsd -320(%rbp), %xmm6 ## 8-byte Folded Reload
addsd %xmm1, %xmm6
addsd LCPI19_79(%rip), %xmm4
movapd -4064(%rbp), %xmm0 ## 16-byte Reload
mulsd LCPI19_74(%rip), %xmm0
movapd %xmm0, -4064(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm6
movapd %xmm6, %xmm1
mulsd LCPI19_73(%rip), %xmm1
addsd %xmm4, %xmm1
movapd -3968(%rbp), %xmm0 ## 16-byte Reload
mulsd LCPI19_98(%rip), %xmm0
mulsd LCPI19_22(%rip), %xmm12
addsd %xmm0, %xmm12
addsd %xmm1, %xmm12
movapd %xmm1, %xmm4
movsd LCPI19_13(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm4
addsd -3616(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -4176(%rbp), %xmm0 ## 16-byte Reload
mulsd LCPI19_23(%rip), %xmm0
movapd %xmm5, -16448(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm9
subsd %xmm9, %xmm0
movapd -752(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm13, -10272(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm1
addsd %xmm0, %xmm1
movapd -1808(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
mulsd %xmm15, %xmm2
addsd %xmm1, %xmm2
movapd -2592(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm15
movapd -2416(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm13, %xmm3
mulsd %xmm10, %xmm3
movsd %xmm3, -4352(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm1
subsd %xmm1, %xmm15
movapd %xmm0, %xmm1
mulsd %xmm14, %xmm1
movapd %xmm1, -8336(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm2
movapd %xmm9, %xmm1
mulsd %xmm14, %xmm1
movsd %xmm1, -4344(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm15
mulsd %xmm15, %xmm11
addsd %xmm2, %xmm11
movapd -64(%rbp), %xmm2 ## 16-byte Reload
mulsd -1056(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -3280(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm8, -3616(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm3
addsd %xmm2, %xmm3
movapd %xmm9, %xmm2
movapd %xmm15, -2768(%rbp) ## 16-byte Spill
mulsd %xmm15, %xmm2
addsd %xmm3, %xmm2
mulsd LCPI19_64(%rip), %xmm2
addsd %xmm11, %xmm2
addsd LCPI19_80(%rip), %xmm2
mulsd LCPI19_72(%rip), %xmm6
addsd %xmm2, %xmm6
addsd %xmm6, %xmm4
mulsd %xmm7, %xmm6
addsd %xmm12, %xmm6
movapd -1616(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm4, %xmm14
movapd -1088(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm14
movapd -11952(%rbp), %xmm5 ## 16-byte Reload
movsd LCPI19_81(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm5
movapd %xmm5, %xmm0
mulsd -960(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -1120(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movapd %xmm11, %xmm1
subsd %xmm0, %xmm1
movsd LCPI19_82(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
movapd %xmm0, %xmm10
movapd -11968(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
movapd %xmm0, %xmm3
movsd -1296(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm3
movapd %xmm9, %xmm8
movapd %xmm3, -8912(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm8
movsd LCPI19_83(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm8
movapd %xmm15, %xmm2
addsd %xmm1, %xmm8
movsd LCPI19_85(%rip), %xmm12 ## xmm12 = mem[0],zero
movapd -5872(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
movapd -10496(%rbp), %xmm7 ## 16-byte Reload
subsd %xmm1, %xmm7
movapd -6848(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm10, %xmm13
movapd %xmm7, %xmm15
subsd %xmm13, %xmm15
movapd %xmm0, -4176(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
movapd %xmm0, -9376(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm15
movapd %xmm15, %xmm0
movsd -560(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd -9184(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm3
subsd %xmm0, %xmm3
addsd LCPI19_84(%rip), %xmm8
movsd -2000(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
addsd LCPI19_86(%rip), %xmm0
addsd -6288(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm0, %xmm8
movapd %xmm3, -17712(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
movsd %xmm8, -2664(%rbp) ## 8-byte Spill
addsd %xmm8, %xmm1
movapd -9488(%rbp), %xmm3 ## 16-byte Reload
mulsd LCPI19_82(%rip), %xmm3
movapd -9504(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm12, %xmm2
addsd -8064(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm3, %xmm12
addsd %xmm2, %xmm12
movapd %xmm5, -5872(%rbp) ## 16-byte Spill
mulsd LCPI19_83(%rip), %xmm5
movapd %xmm5, -9360(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm12
movapd %xmm12, %xmm5
mulsd -976(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm1, %xmm5
movapd -848(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm14, %xmm1
movsd LCPI19_18(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm5, %xmm8
addsd %xmm1, %xmm8
movapd %xmm8, -7984(%rbp) ## 16-byte Spill
movapd %xmm7, -12336(%rbp) ## 16-byte Spill
movapd %xmm7, %xmm1
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm10
movapd %xmm9, %xmm1
movapd %xmm10, -17696(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm1
movsd %xmm0, -2000(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm1
movapd %xmm2, -9504(%rbp) ## 16-byte Spill
movsd -960(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
addsd %xmm1, %xmm2
mulsd LCPI19_20(%rip), %xmm2
movapd -1616(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm6, -16432(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
addsd %xmm2, %xmm1
movapd -1088(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm4, -16416(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
addsd %xmm1, %xmm2
movsd LCPI19_87(%rip), %xmm1 ## xmm1 = mem[0],zero
subsd %xmm2, %xmm1
movapd %xmm13, -6848(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm13
movapd %xmm13, -17680(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm9
movapd %xmm3, -9488(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm3
addsd %xmm9, %xmm3
mulsd LCPI19_83(%rip), %xmm11
movsd %xmm11, -1120(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm11
mulsd LCPI19_81(%rip), %xmm11
addsd %xmm1, %xmm11
movapd -2144(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
movapd %xmm14, -16384(%rbp) ## 16-byte Spill
mulsd %xmm14, %xmm1
mulsd LCPI19_19(%rip), %xmm5
addsd %xmm1, %xmm5
movapd -848(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm11, %xmm8
subsd %xmm5, %xmm8
movapd -6944(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movsd -560(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
movapd %xmm1, -17664(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
movapd -6000(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
mulsd -976(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd -6080(%rbp), %xmm0 ## 16-byte Reload
subsd %xmm1, %xmm0
movapd %xmm4, %xmm1
movapd %xmm11, -16352(%rbp) ## 16-byte Spill
mulsd %xmm11, %xmm1
movapd -10288(%rbp), %xmm7 ## 16-byte Reload
movsd LCPI19_88(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm7
movapd -10304(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm4, %xmm6
movapd %xmm4, %xmm5
movapd %xmm15, -12320(%rbp) ## 16-byte Spill
subsd %xmm2, %xmm15
movapd %xmm6, -9312(%rbp) ## 16-byte Spill
movapd %xmm6, %xmm2
movsd LCPI19_90(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm2
movapd %xmm2, -10336(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm15
movsd -128(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm15, -9968(%rbp) ## 16-byte Spill
mulsd %xmm15, %xmm2
movapd %xmm3, %xmm4
movapd %xmm12, -9344(%rbp) ## 16-byte Spill
addsd %xmm12, %xmm4
movapd %xmm7, -12304(%rbp) ## 16-byte Spill
movapd %xmm7, %xmm3
mulsd %xmm6, %xmm3
movapd %xmm3, -10320(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm4
movapd %xmm4, -9104(%rbp) ## 16-byte Spill
addsd LCPI19_92(%rip), %xmm8
mulsd %xmm5, %xmm0
testq %r15, %r15
je LBB19_44
## %bb.43:
movsd 184(%r15), %xmm3 ## xmm3 = mem[0],zero
movsd %xmm3, -920(%rbp) ## 8-byte Spill
LBB19_44:
movsd -2976(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
subsd -776(%rbp), %xmm3 ## 8-byte Folded Reload
movsd %xmm3, -2976(%rbp) ## 8-byte Spill
movapd -9264(%rbp), %xmm3 ## 16-byte Reload
subsd -3088(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, -13952(%rbp) ## 16-byte Spill
movapd -9088(%rbp), %xmm3 ## 16-byte Reload
addsd -3984(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, -9088(%rbp) ## 16-byte Spill
movapd -9184(%rbp), %xmm3 ## 16-byte Reload
subsd %xmm2, %xmm3
movapd %xmm3, -13968(%rbp) ## 16-byte Spill
movapd -6768(%rbp), %xmm2 ## 16-byte Reload
addsd -3696(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, -6768(%rbp) ## 16-byte Spill
movapd -7984(%rbp), %xmm2 ## 16-byte Reload
addsd %xmm1, %xmm2
movapd %xmm2, -7984(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm8
movapd %xmm8, -10992(%rbp) ## 16-byte Spill
movsd -4112(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm2
movsd LCPI19_0(%rip), %xmm1 ## xmm1 = mem[0],zero
subsd %xmm2, %xmm1
movapd -4752(%rbp), %xmm10 ## 16-byte Reload
mulsd -5072(%rbp), %xmm10 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm10
movapd %xmm0, %xmm4
movapd %xmm1, -4112(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm10
mulsd LCPI19_110(%rip), %xmm10
movapd -2896(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm0, %xmm0
movsd -4832(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
xorps %xmm2, %xmm2
sqrtsd %xmm1, %xmm2
movsd LCPI19_33(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
movsd -2672(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -5104(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_41(%rip), %xmm3
movsd %xmm2, -4752(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm3
addsd %xmm1, %xmm1
movsd %xmm1, -4832(%rbp) ## 8-byte Spill
divsd %xmm1, %xmm3
movapd %xmm0, -2896(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm10
movapd -528(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm0
mulsd -5328(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -1248(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm1
mulsd -12000(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -1264(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm0
mulsd -9408(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd %xmm0, %xmm10
movapd %xmm0, %xmm1
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm10
movapd %xmm10, %xmm9
subsd %xmm0, %xmm9
movapd -2944(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm0, %xmm0
movapd -6720(%rbp), %xmm2 ## 16-byte Reload
movsd %xmm0, -776(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm2
movapd -4688(%rbp), %xmm0 ## 16-byte Reload
mulsd -1696(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd -272(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
movapd %xmm0, %xmm4
mulsd -2848(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -592(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
movapd %xmm2, -6720(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm1
movapd -400(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm2, %xmm7
movapd %xmm4, %xmm0
mulsd -9456(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm7
movapd %xmm15, %xmm2
mulsd %xmm1, %xmm2
movapd %xmm15, %xmm0
movapd -7920(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm0
movapd %xmm0, -7792(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm2
addsd %xmm7, %xmm2
movapd -112(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm8
movapd -800(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm8
addsd %xmm2, %xmm8
movapd %xmm6, %xmm2
mulsd %xmm1, %xmm2
movapd %xmm1, %xmm12
movsd %xmm1, -4688(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm3, %xmm13
movsd %xmm1, -6224(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm2
movapd -912(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm7, %xmm3
addsd %xmm2, %xmm3
movapd -608(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
mulsd %xmm14, %xmm5
addsd %xmm3, %xmm5
movsd -536(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm4
movsd %xmm4, -2672(%rbp) ## 8-byte Spill
subsd %xmm4, %xmm8
movapd %xmm15, %xmm2
mulsd %xmm8, %xmm2
mulsd %xmm1, %xmm0
movapd %xmm0, -6208(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm5
movapd %xmm5, -6480(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm6
addsd %xmm2, %xmm6
movapd %xmm11, %xmm2
mulsd %xmm12, %xmm2
movapd %xmm11, %xmm4
mulsd %xmm13, %xmm4
movsd %xmm4, -8464(%rbp) ## 8-byte Spill
subsd %xmm4, %xmm2
movapd -1504(%rbp), %xmm4 ## 16-byte Reload
movsd %xmm7, -12488(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm4
addsd %xmm2, %xmm4
movapd -720(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm14
addsd %xmm4, %xmm14
mulsd %xmm1, %xmm2
movapd %xmm2, -8448(%rbp) ## 16-byte Spill
subsd %xmm2, %xmm14
movapd %xmm11, %xmm2
mulsd %xmm14, %xmm2
addsd %xmm6, %xmm2
movsd -4096(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm3
movsd LCPI19_0(%rip), %xmm5 ## xmm5 = mem[0],zero
movapd %xmm5, %xmm1
subsd %xmm3, %xmm1
movapd -336(%rbp), %xmm3 ## 16-byte Reload
mulsd -5088(%rbp), %xmm3 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm3
movapd %xmm1, -3696(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm3
movsd LCPI19_110(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
movapd %xmm3, %xmm6
movsd -1400(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm3, %xmm3
movsd -4848(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
xorps %xmm1, %xmm1
sqrtsd %xmm4, %xmm1
movsd LCPI19_33(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm1
movsd -5024(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd -1712(%rbp), %xmm7 ## 8-byte Folded Reload
mulsd LCPI19_42(%rip), %xmm7
movsd %xmm1, -4096(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm7
addsd %xmm4, %xmm4
movsd %xmm4, -4848(%rbp) ## 8-byte Spill
divsd %xmm4, %xmm7
movsd %xmm3, -1400(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm7
subsd %xmm7, %xmm6
subsd %xmm2, %xmm9
addsd %xmm2, %xmm6
mulsd %xmm11, %xmm2
subsd %xmm2, %xmm6
addsd %xmm9, %xmm6
mulsd -768(%rbp), %xmm15 ## 8-byte Folded Reload
movapd -2048(%rbp), %xmm2 ## 16-byte Reload
mulsd -5040(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm15, %xmm2
movapd -1104(%rbp), %xmm1 ## 16-byte Reload
mulsd -816(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
movsd -1352(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm3
subsd %xmm3, %xmm5
movsd -1040(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -1368(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd %xmm11, %xmm3
movapd %xmm5, -3984(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm3
mulsd %xmm0, %xmm3
movapd %xmm3, %xmm7
movsd -2496(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm2, %xmm2
movsd -648(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
xorps %xmm4, %xmm4
sqrtsd %xmm3, %xmm4
mulsd %xmm12, %xmm4
movsd -4016(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -4080(%rbp), %xmm5 ## 16-byte Folded Reload
mulsd LCPI19_69(%rip), %xmm5
movsd %xmm4, -3968(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm5
addsd %xmm3, %xmm3
movsd %xmm3, -648(%rbp) ## 8-byte Spill
divsd %xmm3, %xmm5
movsd %xmm2, -2496(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm5
subsd %xmm5, %xmm7
movapd %xmm6, %xmm9
movapd %xmm6, -336(%rbp) ## 16-byte Spill
movapd %xmm6, %xmm2
subsd %xmm1, %xmm2
movapd %xmm2, -8016(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm7
mulsd %xmm11, %xmm1
subsd %xmm1, %xmm7
movsd %xmm7, -1040(%rbp) ## 8-byte Spill
movapd -1856(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
mulsd -880(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd -5792(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm2, -8528(%rbp) ## 16-byte Spill
subsd %xmm2, %xmm1
movapd -1184(%rbp), %xmm2 ## 16-byte Reload
mulsd -5328(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movapd -2400(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
mulsd -4688(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
mulsd %xmm13, %xmm3
movapd %xmm3, -8512(%rbp) ## 16-byte Spill
subsd %xmm3, %xmm1
mulsd -1872(%rbp), %xmm8 ## 8-byte Folded Reload
addsd %xmm1, %xmm8
mulsd LCPI19_25(%rip), %xmm9
addsd %xmm8, %xmm9
movapd -2640(%rbp), %xmm0 ## 16-byte Reload
movapd -3344(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm0
movapd -6160(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm1
mulsd -2256(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movapd -4256(%rbp), %xmm2 ## 16-byte Reload
movapd -592(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm13, %xmm2
movsd -1552(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movapd -2864(%rbp), %xmm0 ## 16-byte Reload
mulsd -1472(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd -1840(%rbp), %xmm0 ## 16-byte Reload
mulsd -8960(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -2848(%rbp), %xmm1 ## 16-byte Reload
mulsd -1456(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movapd -2624(%rbp), %xmm4 ## 16-byte Reload
mulsd -6720(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm0, %xmm4
movsd -4432(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -272(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
movapd -6640(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm1
movapd -400(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm1
addsd %xmm0, %xmm1
movapd -6800(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm13, %xmm0
addsd %xmm1, %xmm0
movsd LCPI19_61(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd %xmm0, -14288(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
movapd %xmm0, -7808(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm4
movapd %xmm7, %xmm0
movsd -2720(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm0
movapd %xmm0, -8496(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm4
movapd -8160(%rbp), %xmm1 ## 16-byte Reload
movapd -5184(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm1
movapd %xmm1, -9280(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm4
movapd -688(%rbp), %xmm1 ## 16-byte Reload
movsd %xmm4, -12496(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm1
addsd %xmm9, %xmm1
mulsd %xmm0, %xmm12
movapd %xmm11, %xmm0
mulsd %xmm7, %xmm0
movsd %xmm0, -13480(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm12
movapd -1248(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm12, %xmm0
movapd -608(%rbp), %xmm4 ## 16-byte Reload
movapd -9120(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm4
addsd %xmm0, %xmm4
movapd %xmm4, %xmm7
movapd %xmm4, -3088(%rbp) ## 16-byte Spill
movapd -1264(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm12, %xmm0
movapd -720(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm3, %xmm15
addsd %xmm0, %xmm15
movapd %xmm8, %xmm9
movsd -784(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm9
mulsd %xmm11, %xmm5
subsd %xmm5, %xmm9
movapd %xmm13, %xmm0
movapd %xmm2, -14096(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm9
movsd -280(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2320(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -920(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd %xmm0, %xmm2
movsd -128(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4160(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movsd %xmm2, -800(%rbp) ## 8-byte Spill
movsd -368(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1280(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movsd -704(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3056(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movsd %xmm2, -2864(%rbp) ## 8-byte Spill
movsd -464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2800(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm2, %xmm3
subsd %xmm0, %xmm3
movsd -2728(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd -3328(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -1200(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm2, -2728(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm3
movsd %xmm3, -3328(%rbp) ## 8-byte Spill
movsd -152(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -480(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm3
movapd -192(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm2
mulsd -3048(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm3
movsd %xmm3, -1352(%rbp) ## 8-byte Spill
movsd -288(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
movapd %xmm0, %xmm8
subsd %xmm2, %xmm3
movapd %xmm6, %xmm2
mulsd -3040(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm3
movsd LCPI19_58(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
movsd %xmm0, -4376(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm1
movsd LCPI19_56(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm15, %xmm0
movsd %xmm0, -8480(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm1
movapd %xmm3, %xmm2
movsd LCPI19_61(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
movsd %xmm2, -7824(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm9
movapd %xmm9, %xmm2
movsd LCPI19_103(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm2
subsd %xmm2, %xmm1
movapd -2816(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm4, %xmm2
movsd -1552(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd %xmm0, %xmm4
mulsd -5968(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm2
movsd %xmm3, -9704(%rbp) ## 8-byte Spill
movapd %xmm3, %xmm4
subsd %xmm2, %xmm4
movapd -2640(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm13, %xmm3
movapd %xmm0, %xmm2
mulsd %xmm11, %xmm2
subsd %xmm2, %xmm3
movapd -400(%rbp), %xmm2 ## 16-byte Reload
mulsd -2256(%rbp), %xmm2 ## 16-byte Folded Reload
movsd %xmm3, -7456(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm13
subsd %xmm13, %xmm2
movsd LCPI19_50(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm4
mulsd %xmm11, %xmm2
addsd %xmm4, %xmm2
movapd -7920(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
movapd -5792(%rbp), %xmm5 ## 16-byte Reload
addsd %xmm5, %xmm0
addsd %xmm2, %xmm0
movapd %xmm0, -6272(%rbp) ## 16-byte Spill
movapd -10608(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm8, %xmm2
addsd %xmm0, %xmm2
movapd -8960(%rbp), %xmm0 ## 16-byte Reload
addsd -6720(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -10256(%rbp) ## 16-byte Spill
movapd -10624(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm6, %xmm3
addsd %xmm0, %xmm3
movapd %xmm6, %xmm0
movapd %xmm3, -17648(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm2, %xmm0
movapd %xmm0, %xmm2
movapd %xmm0, %xmm7
movapd %xmm0, -3344(%rbp) ## 16-byte Spill
mulsd LCPI19_25(%rip), %xmm2
subsd %xmm2, %xmm1
movapd -11376(%rbp), %xmm2 ## 16-byte Reload
mulsd -2336(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movapd -11360(%rbp), %xmm1 ## 16-byte Reload
mulsd -688(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -17632(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm2
movapd -2544(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
mulsd -3648(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm2
mulsd -1072(%rbp), %xmm3 ## 8-byte Folded Reload
movsd %xmm3, -4368(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm2
movsd -768(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2352(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm2, %xmm0
movsd %xmm0, -768(%rbp) ## 8-byte Spill
movapd -8016(%rbp), %xmm1 ## 16-byte Reload
addsd -1040(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -8016(%rbp) ## 16-byte Spill
movapd %xmm0, %xmm2
mulsd LCPI19_108(%rip), %xmm2
movapd %xmm1, %xmm0
mulsd LCPI19_21(%rip), %xmm0
subsd %xmm2, %xmm0
movapd %xmm0, -2848(%rbp) ## 16-byte Spill
movapd -336(%rbp), %xmm2 ## 16-byte Reload
mulsd LCPI19_24(%rip), %xmm2
mulsd LCPI19_112(%rip), %xmm7
addsd %xmm2, %xmm7
movapd -1600(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm2
mulsd -880(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd %xmm5, %xmm8
movsd %xmm8, -7264(%rbp) ## 8-byte Spill
subsd %xmm8, %xmm2
movapd -1184(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm8
mulsd -9408(%rbp), %xmm8 ## 16-byte Folded Reload
addsd %xmm2, %xmm8
movsd LCPI19_14(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm10
addsd %xmm8, %xmm10
movapd -2128(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm2
movsd -4688(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
addsd %xmm10, %xmm2
mulsd %xmm4, %xmm8
movapd %xmm4, %xmm13
movapd %xmm8, -12240(%rbp) ## 16-byte Spill
subsd %xmm8, %xmm2
movsd -1872(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm14
addsd %xmm2, %xmm14
movapd -7936(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
movapd %xmm1, -12224(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm14
movapd -3088(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd LCPI19_113(%rip), %xmm1
movapd %xmm1, -12208(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm14
movapd %xmm15, %xmm2
movsd LCPI19_54(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
movapd %xmm2, -12192(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm14
movapd %xmm9, %xmm3
movsd LCPI19_47(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm3
addsd %xmm14, %xmm3
movapd -6880(%rbp), %xmm1 ## 16-byte Reload
movapd -14368(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm1
movsd %xmm1, -7256(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm3
movsd LCPI19_63(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd -8032(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm1
movapd %xmm1, -9232(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm3
movapd -448(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm2
mulsd %xmm3, %xmm2
addsd %xmm7, %xmm2
movapd -1920(%rbp), %xmm1 ## 16-byte Reload
movapd -880(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm1, %xmm6
mulsd -5792(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -7248(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm6
mulsd -12000(%rbp), %xmm11 ## 16-byte Folded Reload
addsd %xmm6, %xmm11
movapd -2432(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm0
addsd %xmm11, %xmm0
mulsd %xmm13, %xmm1
movapd %xmm1, -12176(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm0
movapd -6480(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm8, %xmm7
addsd %xmm0, %xmm7
mulsd -7952(%rbp), %xmm12 ## 16-byte Folded Reload
movsd %xmm12, -8576(%rbp) ## 8-byte Spill
addsd %xmm12, %xmm7
movsd LCPI19_57(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm5
movapd %xmm5, -3088(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm7
mulsd LCPI19_113(%rip), %xmm15
movapd %xmm15, -8560(%rbp) ## 16-byte Spill
addsd %xmm15, %xmm7
movsd LCPI19_48(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm9
addsd %xmm7, %xmm9
movapd -6864(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
movapd %xmm1, -6480(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm9
movsd LCPI19_62(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm10, %xmm1
movsd %xmm1, -7240(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm9
movapd -512(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm5
mulsd %xmm9, %xmm5
addsd %xmm2, %xmm5
movapd -2912(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm2
movapd -3648(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm2
subsd %xmm2, %xmm5
mulsd -1072(%rbp), %xmm4 ## 8-byte Folded Reload
movapd %xmm4, -12160(%rbp) ## 16-byte Spill
subsd %xmm4, %xmm5
movapd -5040(%rbp), %xmm2 ## 16-byte Reload
mulsd -2352(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm5, %xmm2
movapd %xmm2, %xmm7
movapd -6960(%rbp), %xmm5 ## 16-byte Reload
movsd -480(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm5
movapd -6976(%rbp), %xmm4 ## 16-byte Reload
movapd -192(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm13, %xmm4
movapd %xmm13, %xmm2
movapd %xmm4, -17616(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm5
movsd -1352(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd %xmm15, %xmm2
movsd LCPI19_75(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm2
movsd %xmm2, -7232(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm5
movapd %xmm5, %xmm6
movsd LCPI19_73(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm6
addsd %xmm7, %xmm6
movsd -768(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_15(%rip), %xmm0
movapd -8016(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm2
mulsd LCPI19_22(%rip), %xmm2
addsd %xmm0, %xmm2
addsd %xmm6, %xmm2
movsd LCPI19_13(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm6
movapd %xmm4, %xmm10
addsd -2848(%rbp), %xmm6 ## 16-byte Folded Reload
movsd LCPI19_23(%rip), %xmm4 ## xmm4 = mem[0],zero
movapd -336(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm4, %xmm7
movapd -3344(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm7
movapd %xmm3, -13632(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm7, %xmm1
movapd %xmm14, %xmm4
movapd %xmm9, -16464(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm4
subsd %xmm4, %xmm1
movapd -3248(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm12
subsd %xmm12, %xmm1
movsd -1072(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm3
movsd %xmm3, -7224(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm1
movapd -816(%rbp), %xmm0 ## 16-byte Reload
mulsd -2352(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd LCPI19_64(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd -1040(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm1, %xmm3
addsd %xmm0, %xmm3
movsd LCPI19_72(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm5
addsd %xmm3, %xmm5
movsd -1720(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm8, %xmm4
mulsd %xmm8, %xmm1
movapd %xmm15, %xmm0
subsd %xmm1, %xmm0
movsd LCPI19_74(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd -4896(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm13, %xmm1
movapd %xmm13, %xmm3
movapd %xmm1, -4688(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm3
mulsd LCPI19_75(%rip), %xmm3
addsd %xmm0, %xmm3
addsd %xmm7, %xmm3
addsd -6272(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -10576(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm8, %xmm0
addsd %xmm3, %xmm0
movapd -10256(%rbp), %xmm4 ## 16-byte Reload
addsd -9856(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -8176(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm13, %xmm1
addsd %xmm4, %xmm1
movapd %xmm4, %xmm9
movapd %xmm13, %xmm7
movapd %xmm1, -17584(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm7
addsd %xmm0, %xmm7
addsd %xmm5, %xmm6
movapd %xmm7, %xmm4
mulsd LCPI19_114(%rip), %xmm4
addsd %xmm6, %xmm4
mulsd %xmm10, %xmm5
addsd %xmm2, %xmm5
mulsd LCPI19_31(%rip), %xmm7
addsd %xmm5, %xmm7
movapd -1312(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm13
mulsd %xmm4, %xmm13
movapd -1328(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm13
movsd LCPI19_111(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd %xmm11, %xmm14
movapd %xmm11, %xmm1
mulsd %xmm0, %xmm1
movapd %xmm0, %xmm10
movapd -1216(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm0
mulsd %xmm13, %xmm0
subsd %xmm1, %xmm0
movsd LCPI19_115(%rip), %xmm11 ## xmm11 = mem[0],zero
movapd %xmm14, %xmm1
mulsd %xmm11, %xmm1
movapd %xmm7, -16320(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm2
subsd %xmm1, %xmm2
movapd %xmm5, %xmm1
movapd %xmm4, -16336(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
addsd %xmm2, %xmm1
movsd LCPI19_85(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd -3328(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm2, %xmm7
movapd %xmm3, -12144(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm7
movapd -12352(%rbp), %xmm2 ## 16-byte Reload
movsd -2800(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm2
addsd %xmm7, %xmm2
movapd -9552(%rbp), %xmm4 ## 16-byte Reload
movsd -1200(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm4
movapd %xmm9, -10256(%rbp) ## 16-byte Spill
addsd %xmm9, %xmm4
movapd %xmm6, %xmm3
movapd %xmm4, -17600(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm3
addsd %xmm2, %xmm3
mulsd %xmm11, %xmm3
addsd %xmm1, %xmm3
movapd -6896(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm5, %xmm1
movapd %xmm5, %xmm12
movapd -9536(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm6, %xmm4
movapd %xmm6, %xmm2
movapd %xmm4, -17552(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm1
movsd -2864(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm4
movsd LCPI19_83(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
movsd %xmm4, -4304(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm1
mulsd LCPI19_116(%rip), %xmm1
subsd %xmm1, %xmm3
movapd -1936(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm1
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm0
movapd -6144(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
movapd %xmm5, %xmm2
subsd %xmm1, %xmm2
movsd LCPI19_82(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm2
movapd -8208(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm6, %xmm1
movapd %xmm6, %xmm4
movapd %xmm1, -8896(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm4
mulsd %xmm5, %xmm4
addsd %xmm2, %xmm4
movsd %xmm7, -3328(%rbp) ## 8-byte Spill
addsd %xmm7, %xmm4
movapd -10560(%rbp), %xmm1 ## 16-byte Reload
movsd -1280(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
movsd %xmm4, -6320(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm1
movapd -4864(%rbp), %xmm4 ## 16-byte Reload
movsd -704(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
addsd %xmm9, %xmm4
movapd %xmm2, %xmm5
movapd %xmm2, %xmm9
movapd %xmm4, -17568(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm5
addsd %xmm1, %xmm5
movapd %xmm5, %xmm1
mulsd %xmm10, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -10960(%rbp) ## 16-byte Spill
movsd LCPI19_30(%rip), %xmm4 ## xmm4 = mem[0],zero
movapd %xmm14, %xmm0
mulsd %xmm4, %xmm0
movapd %xmm11, %xmm1
movapd %xmm13, -16224(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm1
subsd %xmm0, %xmm1
movapd %xmm8, %xmm0
movapd %xmm3, -16208(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm4, %xmm5
addsd %xmm0, %xmm5
movapd -6944(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm7, %xmm1
mulsd %xmm7, %xmm0
movapd -6000(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm9, %xmm1
mulsd %xmm9, %xmm2
movapd %xmm2, -17536(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movsd LCPI19_90(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd -800(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -5040(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
mulsd LCPI19_117(%rip), %xmm0
subsd %xmm0, %xmm5
movapd %xmm5, -10976(%rbp) ## 16-byte Spill
movsd -3632(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm1
movsd LCPI19_0(%rip), %xmm0 ## xmm0 = mem[0],zero
subsd %xmm1, %xmm0
movapd -3072(%rbp), %xmm13 ## 16-byte Reload
mulsd -4800(%rbp), %xmm13 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm13
movapd %xmm0, -2864(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm13
movsd LCPI19_110(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm13
movapd -1424(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm0, %xmm0
movsd -3136(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
xorps %xmm2, %xmm2
sqrtsd %xmm1, %xmm2
mulsd LCPI19_33(%rip), %xmm2
movsd -6432(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -3744(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_41(%rip), %xmm3
movsd %xmm2, -3344(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm3
addsd %xmm1, %xmm1
movsd %xmm1, -3136(%rbp) ## 8-byte Spill
divsd %xmm1, %xmm3
movapd %xmm0, -1424(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm13
movapd -208(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm0
mulsd -5320(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -1520(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm1
mulsd -11904(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -576(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
mulsd -9248(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd %xmm0, %xmm13
movapd %xmm0, %xmm1
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm13
movapd %xmm13, %xmm11
subsd %xmm0, %xmm11
movapd -3424(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm0, %xmm0
movapd -6704(%rbp), %xmm1 ## 16-byte Reload
movsd %xmm0, -768(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm1
movapd -6448(%rbp), %xmm0 ## 16-byte Reload
mulsd -3792(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd -96(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm15
movapd %xmm0, %xmm4
mulsd -4704(%rbp), %xmm15 ## 16-byte Folded Reload
movapd -144(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd %xmm1, -6704(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm15
movapd -256(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm1, %xmm10
movapd %xmm4, %xmm0
mulsd -9328(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm10
movapd %xmm12, %xmm2
mulsd %xmm15, %xmm2
movapd %xmm12, %xmm0
movapd -6512(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm0
movapd %xmm4, %xmm14
movsd %xmm0, -7648(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm2
addsd %xmm10, %xmm2
movapd -64(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm7
movapd -1744(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm7
addsd %xmm2, %xmm7
movapd %xmm6, %xmm2
mulsd %xmm15, %xmm2
movsd %xmm15, -880(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm0
mulsd %xmm14, %xmm0
movsd %xmm0, -6192(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm2
movapd -1488(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm10, %xmm3
addsd %xmm2, %xmm3
movapd -176(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
mulsd %xmm8, %xmm1
addsd %xmm3, %xmm1
movapd %xmm4, %xmm2
movsd -2016(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm2
movsd %xmm2, -1352(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm7
movapd %xmm12, %xmm2
mulsd %xmm7, %xmm2
mulsd %xmm5, %xmm0
movapd %xmm0, -6176(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm1
movapd %xmm1, -336(%rbp) ## 16-byte Spill
movapd %xmm6, %xmm3
mulsd %xmm1, %xmm3
addsd %xmm2, %xmm3
movapd %xmm9, %xmm2
mulsd %xmm15, %xmm2
movapd %xmm9, %xmm4
movapd %xmm9, %xmm0
mulsd %xmm14, %xmm4
movapd %xmm14, %xmm6
movsd %xmm4, -8256(%rbp) ## 8-byte Spill
subsd %xmm4, %xmm2
movapd -864(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm10, -16368(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm4
addsd %xmm2, %xmm4
movapd -736(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm8
addsd %xmm4, %xmm8
mulsd %xmm5, %xmm2
movsd %xmm2, -8240(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm8
movapd %xmm0, %xmm2
mulsd %xmm8, %xmm2
addsd %xmm3, %xmm2
movsd -2240(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm3
movsd LCPI19_0(%rip), %xmm5 ## xmm5 = mem[0],zero
movapd %xmm5, %xmm0
subsd %xmm3, %xmm0
movapd -4000(%rbp), %xmm15 ## 16-byte Reload
mulsd -3728(%rbp), %xmm15 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm15
movapd %xmm0, -3648(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm15
movsd LCPI19_110(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm15
movapd %xmm0, %xmm9
movsd -1392(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
addsd %xmm10, %xmm10
movsd -3824(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
xorps %xmm4, %xmm4
sqrtsd %xmm0, %xmm4
movsd LCPI19_33(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm4
movsd -3952(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -3104(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_42(%rip), %xmm3
movsd %xmm4, -2848(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm3
addsd %xmm0, %xmm0
movsd %xmm0, -3824(%rbp) ## 8-byte Spill
divsd %xmm0, %xmm3
movsd %xmm10, -1392(%rbp) ## 8-byte Spill
mulsd %xmm10, %xmm3
subsd %xmm3, %xmm15
subsd %xmm2, %xmm11
addsd %xmm2, %xmm15
movapd %xmm1, %xmm10
mulsd %xmm1, %xmm2
subsd %xmm2, %xmm15
addsd %xmm11, %xmm15
mulsd -1056(%rbp), %xmm12 ## 8-byte Folded Reload
movapd -1344(%rbp), %xmm2 ## 16-byte Reload
mulsd -3616(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm12, %xmm2
movapd -2416(%rbp), %xmm1 ## 16-byte Reload
mulsd -2768(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
movsd -5600(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm0
movapd %xmm5, %xmm4
subsd %xmm0, %xmm4
movsd -3712(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
xorps %xmm3, %xmm3
sqrtsd %xmm2, %xmm3
mulsd %xmm14, %xmm3
movsd -432(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -1360(%rbp), %xmm5 ## 8-byte Folded Reload
mulsd %xmm10, %xmm5
movapd %xmm4, %xmm0
movapd %xmm4, -3072(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm5
mulsd %xmm9, %xmm5
movsd -2480(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm0, %xmm0
movsd -6416(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -4048(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd LCPI19_69(%rip), %xmm4
movsd %xmm3, -3952(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm4
addsd %xmm2, %xmm2
movsd %xmm2, -3712(%rbp) ## 8-byte Spill
divsd %xmm2, %xmm4
movsd %xmm0, -2480(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm4
subsd %xmm4, %xmm5
movapd %xmm15, %xmm0
subsd %xmm1, %xmm0
movapd %xmm0, -9136(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm5
mulsd %xmm10, %xmm1
subsd %xmm1, %xmm5
movsd %xmm5, -432(%rbp) ## 8-byte Spill
movapd -2384(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
mulsd -2448(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd -5648(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -8304(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm1
movapd -1024(%rbp), %xmm2 ## 16-byte Reload
mulsd -5320(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movapd -2368(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
mulsd -880(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
mulsd %xmm6, %xmm0
movsd %xmm0, -8288(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
mulsd -2080(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm1, %xmm7
movapd %xmm15, %xmm1
mulsd LCPI19_25(%rip), %xmm1
subsd %xmm1, %xmm7
movsd -2112(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -5616(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm1
movapd -9392(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm2
mulsd -1776(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm1
movapd -6832(%rbp), %xmm6 ## 16-byte Reload
movapd -144(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm6
movapd -992(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm6, %xmm2
addsd %xmm1, %xmm2
movapd -6464(%rbp), %xmm0 ## 16-byte Reload
mulsd -2160(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movapd -2064(%rbp), %xmm1 ## 16-byte Reload
mulsd -8928(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
movapd -4704(%rbp), %xmm0 ## 16-byte Reload
mulsd -2176(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd -2608(%rbp), %xmm2 ## 16-byte Reload
mulsd -6704(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movsd -8864(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -96(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm1
movsd -6496(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm3
movapd -256(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm3
addsd %xmm1, %xmm3
movapd -8080(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm5, %xmm0
addsd %xmm3, %xmm0
movapd %xmm0, -14272(%rbp) ## 16-byte Spill
movsd LCPI19_61(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm0, -7664(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm2
movsd -4224(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
movsd %xmm4, -8272(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm2
movapd -6096(%rbp), %xmm3 ## 16-byte Reload
movapd -4880(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm3
movapd %xmm3, -6464(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm2
movapd -624(%rbp), %xmm3 ## 16-byte Reload
movsd %xmm2, -12480(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm3
addsd %xmm7, %xmm3
mulsd %xmm1, %xmm11
movapd %xmm14, %xmm2
movapd %xmm14, %xmm1
mulsd %xmm0, %xmm1
movsd %xmm1, -12472(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm11
movapd -1520(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
movapd -176(%rbp), %xmm14 ## 16-byte Reload
movapd -8976(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm14
addsd %xmm0, %xmm14
movapd -576(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
movapd -736(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm1, %xmm12
addsd %xmm0, %xmm12
movsd -2512(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm10
mulsd %xmm2, %xmm9
subsd %xmm9, %xmm10
movapd %xmm6, -14016(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm5
subsd %xmm5, %xmm10
movsd -360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -976(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -800(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movsd -560(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3032(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movsd %xmm1, -1072(%rbp) ## 8-byte Spill
movsd -304(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -960(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm1, %xmm7
subsd %xmm0, %xmm7
movsd -2704(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd -1952(%rbp), %xmm1 ## 16-byte Folded Reload
movsd -1296(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm1, -2704(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm7
movsd %xmm7, -4576(%rbp) ## 8-byte Spill
movsd -160(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -320(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm2
subsd %xmm2, %xmm7
movapd -48(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm2
mulsd -3024(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm7
movsd %xmm7, -1952(%rbp) ## 8-byte Spill
movsd -296(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm9, %xmm2
movapd %xmm7, %xmm0
subsd %xmm2, %xmm0
movapd %xmm5, %xmm2
mulsd -3016(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm0
movsd LCPI19_105(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm14, %xmm1
movsd %xmm1, -4312(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm3
movsd LCPI19_104(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm12, %xmm1
movsd %xmm1, -4320(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm3
movapd %xmm0, %xmm1
movsd LCPI19_61(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
movsd %xmm1, -3472(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm10
movapd %xmm10, %xmm2
mulsd LCPI19_103(%rip), %xmm2
addsd %xmm3, %xmm2
movapd -1568(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm4, %xmm3
movapd -992(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm4
mulsd -5936(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm3
movsd %xmm0, -9696(%rbp) ## 8-byte Spill
movapd %xmm0, %xmm4
subsd %xmm3, %xmm4
movsd -2112(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -144(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm0
movapd %xmm1, %xmm3
mulsd -96(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm3, %xmm0
movapd -256(%rbp), %xmm3 ## 16-byte Reload
mulsd -1776(%rbp), %xmm3 ## 16-byte Folded Reload
movsd %xmm0, -5616(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm7
subsd %xmm7, %xmm3
movsd LCPI19_50(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
mulsd %xmm0, %xmm3
addsd %xmm4, %xmm3
movapd -6512(%rbp), %xmm0 ## 16-byte Reload
movapd -5648(%rbp), %xmm6 ## 16-byte Reload
addsd %xmm6, %xmm0
addsd %xmm3, %xmm0
movapd %xmm0, -4528(%rbp) ## 16-byte Spill
movapd -10528(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm9, %xmm4
addsd %xmm0, %xmm4
movapd -8928(%rbp), %xmm1 ## 16-byte Reload
addsd -6704(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -7968(%rbp) ## 16-byte Spill
movapd -10544(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm5, %xmm0
addsd %xmm1, %xmm0
movapd %xmm5, %xmm1
movapd %xmm0, -17520(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm1
addsd %xmm4, %xmm1
movapd %xmm1, %xmm4
movapd %xmm1, %xmm3
movapd %xmm1, -816(%rbp) ## 16-byte Spill
mulsd LCPI19_25(%rip), %xmm4
addsd %xmm2, %xmm4
movapd -11344(%rbp), %xmm2 ## 16-byte Reload
mulsd -72(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm4, %xmm2
movapd -11328(%rbp), %xmm0 ## 16-byte Reload
mulsd -624(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -17504(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm5
subsd %xmm5, %xmm2
movapd -1824(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm4
mulsd -1968(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm2
mulsd -1984(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -7216(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm2
movsd -1056(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2304(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
movsd %xmm1, -1056(%rbp) ## 8-byte Spill
movapd -9136(%rbp), %xmm0 ## 16-byte Reload
addsd -432(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, -9136(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm2
mulsd LCPI19_108(%rip), %xmm2
movapd %xmm0, %xmm1
mulsd LCPI19_21(%rip), %xmm1
addsd %xmm2, %xmm1
movapd %xmm1, -1040(%rbp) ## 16-byte Spill
movapd %xmm15, %xmm2
mulsd LCPI19_24(%rip), %xmm2
movapd %xmm3, %xmm4
mulsd LCPI19_112(%rip), %xmm4
addsd %xmm2, %xmm4
movapd -1888(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm2
mulsd -2448(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd %xmm6, %xmm7
movsd %xmm7, -7208(%rbp) ## 8-byte Spill
subsd %xmm7, %xmm2
movapd -1024(%rbp), %xmm7 ## 16-byte Reload
mulsd -9248(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm2, %xmm7
mulsd LCPI19_14(%rip), %xmm13
addsd %xmm7, %xmm13
movapd -1584(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm2
mulsd -880(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm13, %xmm2
movapd -6512(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm7
movapd %xmm7, -12128(%rbp) ## 16-byte Spill
subsd %xmm7, %xmm2
movapd -2080(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm8
addsd %xmm2, %xmm8
movsd -6528(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm11, %xmm0
movsd %xmm0, -7200(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm8
movapd %xmm14, %xmm0
movsd LCPI19_113(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm0
movapd %xmm0, -12112(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm8
movsd LCPI19_54(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
movsd %xmm0, -5440(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm8
movapd %xmm10, %xmm0
movsd LCPI19_47(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
addsd %xmm8, %xmm0
movapd -5136(%rbp), %xmm2 ## 16-byte Reload
movapd -11280(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm2
movsd %xmm2, -7192(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm0
movsd LCPI19_109(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd -14064(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm2
movapd %xmm2, -6432(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm0
movapd -496(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
addsd %xmm4, %xmm2
movapd -896(%rbp), %xmm4 ## 16-byte Reload
movapd -2448(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm4, %xmm6
mulsd -5648(%rbp), %xmm4 ## 16-byte Folded Reload
movsd %xmm4, -7184(%rbp) ## 8-byte Spill
subsd %xmm4, %xmm6
movapd -1024(%rbp), %xmm4 ## 16-byte Reload
mulsd -11904(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm6, %xmm4
movapd -1904(%rbp), %xmm7 ## 16-byte Reload
movsd -880(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm7, %xmm6
addsd %xmm4, %xmm6
mulsd %xmm9, %xmm7
movapd %xmm7, -12096(%rbp) ## 16-byte Spill
subsd %xmm7, %xmm6
movapd -336(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm5, %xmm4
addsd %xmm6, %xmm4
mulsd LCPI19_57(%rip), %xmm14
mulsd -6544(%rbp), %xmm11 ## 8-byte Folded Reload
movsd %xmm11, -4336(%rbp) ## 8-byte Spill
addsd %xmm11, %xmm4
movapd %xmm14, -8320(%rbp) ## 16-byte Spill
addsd %xmm14, %xmm4
mulsd %xmm13, %xmm12
movsd %xmm12, -4328(%rbp) ## 8-byte Spill
addsd %xmm12, %xmm4
movapd %xmm4, %xmm5
movsd LCPI19_48(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm10
addsd %xmm5, %xmm10
movapd -5152(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm1, %xmm4
movapd %xmm4, -6416(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm10
movsd LCPI19_107(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm3, %xmm1
movsd %xmm1, -7176(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm10
movapd -752(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm4
mulsd %xmm10, %xmm4
addsd %xmm2, %xmm4
movsd -2032(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm1, %xmm2
mulsd -1968(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm4
mulsd -1984(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -7160(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm4
movapd -3616(%rbp), %xmm1 ## 16-byte Reload
mulsd -2304(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm4, %xmm1
movapd %xmm1, %xmm3
movapd -6912(%rbp), %xmm5 ## 16-byte Reload
movsd -320(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm13, %xmm5
movapd -6928(%rbp), %xmm1 ## 16-byte Reload
movapd -48(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm1
movapd %xmm7, %xmm2
movapd %xmm1, -17488(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm2
subsd %xmm2, %xmm5
movsd -1952(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd %xmm9, %xmm1
movsd LCPI19_75(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm1
movsd %xmm1, -7168(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm5
movapd %xmm5, %xmm6
mulsd LCPI19_73(%rip), %xmm6
addsd %xmm3, %xmm6
movsd -1056(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd LCPI19_15(%rip), %xmm1
movapd -9136(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm2
mulsd LCPI19_22(%rip), %xmm2
subsd %xmm1, %xmm2
addsd %xmm6, %xmm2
movsd LCPI19_13(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm6
addsd -1040(%rbp), %xmm6 ## 16-byte Folded Reload
movsd LCPI19_23(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm15
movapd -816(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm1, %xmm3
subsd %xmm3, %xmm15
movapd %xmm8, %xmm3
movapd %xmm0, -16256(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm3
addsd %xmm15, %xmm3
movapd -496(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm10, -16288(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm4
subsd %xmm4, %xmm3
movapd -1808(%rbp), %xmm0 ## 16-byte Reload
movapd -1968(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm3
movsd -1984(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm0
movsd %xmm0, -7152(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm3
movapd -2768(%rbp), %xmm0 ## 16-byte Reload
mulsd -2304(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm3, %xmm0
movsd -432(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd LCPI19_64(%rip), %xmm1
addsd %xmm0, %xmm1
mulsd LCPI19_72(%rip), %xmm5
addsd %xmm1, %xmm5
movsd -3872(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm13, %xmm1
mulsd %xmm13, %xmm3
movapd %xmm9, %xmm0
subsd %xmm3, %xmm0
mulsd LCPI19_74(%rip), %xmm0
movapd %xmm0, %xmm3
movapd -3232(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
movapd %xmm7, %xmm4
movapd %xmm0, -5600(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm4
mulsd %xmm11, %xmm4
addsd %xmm3, %xmm4
addsd %xmm8, %xmm4
addsd -4528(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -10496(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm13, %xmm0
addsd %xmm4, %xmm0
movapd -7968(%rbp), %xmm15 ## 16-byte Reload
addsd -9840(%rbp), %xmm15 ## 8-byte Folded Reload
movapd -8064(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm7, %xmm1
addsd %xmm15, %xmm1
movapd %xmm1, -17456(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm7
addsd %xmm0, %xmm7
addsd %xmm5, %xmm6
movapd %xmm7, %xmm1
mulsd LCPI19_114(%rip), %xmm1
addsd %xmm6, %xmm1
mulsd %xmm12, %xmm5
addsd %xmm2, %xmm5
mulsd LCPI19_31(%rip), %xmm7
addsd %xmm5, %xmm7
movapd -1616(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm6
mulsd %xmm1, %xmm6
movapd -1088(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm6
movapd %xmm14, %xmm2
movsd LCPI19_111(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm2
movapd -848(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm0
mulsd %xmm6, %xmm0
subsd %xmm2, %xmm0
movapd %xmm14, %xmm2
movsd LCPI19_115(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm2
movapd %xmm7, -16160(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm3
subsd %xmm2, %xmm3
movapd %xmm5, %xmm2
movapd %xmm1, -16176(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm2
addsd %xmm3, %xmm2
movsd -4576(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd LCPI19_85(%rip), %xmm1
movapd %xmm4, -12080(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm1
movapd -12336(%rbp), %xmm3 ## 16-byte Reload
movsd -960(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm3
addsd %xmm1, %xmm3
movapd %xmm1, %xmm12
movapd -9504(%rbp), %xmm1 ## 16-byte Reload
movsd -1296(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm1
addsd %xmm15, %xmm1
movapd %xmm4, %xmm5
movapd %xmm1, -17472(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm5
addsd %xmm3, %xmm5
mulsd %xmm9, %xmm5
addsd %xmm2, %xmm5
movapd -6848(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm7, %xmm2
movapd -9488(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
movapd %xmm4, %xmm3
movapd %xmm4, %xmm11
movapd %xmm1, -17424(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm3
subsd %xmm3, %xmm2
movsd -1072(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm1
mulsd LCPI19_83(%rip), %xmm1
movsd %xmm1, -2960(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm2
mulsd LCPI19_116(%rip), %xmm2
subsd %xmm2, %xmm5
movapd -2144(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
mulsd %xmm5, %xmm2
subsd %xmm2, %xmm0
movapd -4176(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm7, %xmm2
movapd %xmm4, %xmm1
subsd %xmm2, %xmm1
movsd LCPI19_82(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
movapd %xmm1, %xmm4
movapd -5872(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm11, %xmm1
movapd %xmm1, -8880(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm11
mulsd %xmm2, %xmm11
addsd %xmm4, %xmm11
movsd %xmm12, -4576(%rbp) ## 8-byte Spill
addsd %xmm12, %xmm11
movapd -12320(%rbp), %xmm1 ## 16-byte Reload
movsd -976(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
addsd %xmm11, %xmm1
movapd -9344(%rbp), %xmm2 ## 16-byte Reload
movsd -560(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm2
addsd %xmm15, %xmm2
movapd %xmm9, %xmm4
movapd %xmm2, -17440(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm4
addsd %xmm1, %xmm4
movapd %xmm4, %xmm13
mulsd %xmm10, %xmm13
addsd %xmm0, %xmm13
movapd %xmm14, %xmm0
movsd LCPI19_30(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movapd %xmm3, %xmm1
movapd %xmm6, -16112(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
subsd %xmm0, %xmm1
movapd %xmm8, %xmm0
movapd %xmm5, -16096(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm2, %xmm4
addsd %xmm0, %xmm4
movapd -6944(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm7, %xmm1
mulsd %xmm7, %xmm0
movapd -6000(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm9, %xmm3
mulsd %xmm9, %xmm2
movapd %xmm9, %xmm1
movapd %xmm9, %xmm7
movapd %xmm2, -17408(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
addsd -5040(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd LCPI19_117(%rip), %xmm0
subsd %xmm0, %xmm4
movapd %xmm4, -9920(%rbp) ## 16-byte Spill
movapd -5888(%rbp), %xmm5 ## 16-byte Reload
movsd -128(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm5
movapd -8096(%rbp), %xmm0 ## 16-byte Reload
movsd -704(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
movapd %xmm0, -11536(%rbp) ## 16-byte Spill
movapd -9008(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd %xmm0, -13600(%rbp) ## 16-byte Spill
movapd -8992(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd %xmm0, -13840(%rbp) ## 16-byte Spill
movapd -9312(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm9, %xmm0
movapd %xmm0, -11568(%rbp) ## 16-byte Spill
movsd -7136(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movapd %xmm12, %xmm0
mulsd %xmm1, %xmm0
movsd %xmm0, -9616(%rbp) ## 8-byte Spill
movsd -3000(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd %xmm0, %xmm2
mulsd %xmm1, %xmm2
movsd %xmm2, -7008(%rbp) ## 8-byte Spill
movapd -6784(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
mulsd %xmm1, %xmm3
movapd %xmm3, -11136(%rbp) ## 16-byte Spill
movsd -5272(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movapd %xmm14, %xmm3
mulsd %xmm1, %xmm3
movsd %xmm3, -9624(%rbp) ## 8-byte Spill
movapd -9472(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm6
mulsd %xmm4, %xmm6
movapd %xmm6, -13856(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm9
movapd -10096(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm9
addsd -10256(%rbp), %xmm9 ## 16-byte Folded Reload
movapd -12304(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm6
mulsd %xmm7, %xmm6
movapd %xmm6, -11520(%rbp) ## 16-byte Spill
mulsd -9104(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -12464(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm15
movapd %xmm15, -7968(%rbp) ## 16-byte Spill
movsd -2272(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
addsd %xmm7, %xmm7
movsd -1536(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm4
addsd %xmm6, %xmm4
movq (%r14), %rax
testq %rax, %rax
movsd %xmm6, -1536(%rbp) ## 8-byte Spill
movapd %xmm13, -10944(%rbp) ## 16-byte Spill
movapd %xmm5, -11152(%rbp) ## 16-byte Spill
movsd %xmm9, -9688(%rbp) ## 8-byte Spill
movsd %xmm4, -880(%rbp) ## 8-byte Spill
movsd %xmm7, -1072(%rbp) ## 8-byte Spill
movsd %xmm11, -5584(%rbp) ## 8-byte Spill
je LBB19_46
## %bb.45:
movsd -280(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movapd %xmm11, %xmm1
mulsd %xmm0, %xmm1
addsd -4032(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -128(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd %xmm15, %xmm0
mulsd -2976(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd %xmm11, %xmm1
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movapd %xmm15, %xmm2
mulsd -11152(%rbp), %xmm2 ## 16-byte Folded Reload
movsd LCPI19_7(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm1
movapd %xmm4, %xmm5
mulsd LCPI19_8(%rip), %xmm2
addsd %xmm1, %xmm2
movapd %xmm3, %xmm1
mulsd -1280(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -4640(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm3
subsd %xmm1, %xmm3
movsd -704(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -11536(%rbp), %xmm1 ## 16-byte Folded Reload
addsd LCPI19_9(%rip), %xmm2
movapd %xmm15, %xmm4
mulsd -13600(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm2
movsd LCPI19_89(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm3
movsd LCPI19_90(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm1
addsd %xmm3, %xmm1
movapd %xmm11, %xmm3
mulsd %xmm14, %xmm3
addsd %xmm2, %xmm3
movapd %xmm3, %xmm4
movsd %xmm3, -1056(%rbp) ## 8-byte Spill
movapd %xmm15, %xmm2
mulsd -13840(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm11, %xmm3
mulsd %xmm12, %xmm3
addsd %xmm2, %xmm3
mulsd -976(%rbp), %xmm10 ## 8-byte Folded Reload
mulsd %xmm5, %xmm0
movapd %xmm5, %xmm12
addsd LCPI19_91(%rip), %xmm1
addsd %xmm3, %xmm0
movsd %xmm0, -1040(%rbp) ## 8-byte Spill
subsd %xmm10, %xmm7
movsd -560(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -11568(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm4, %xmm2
mulsd LCPI19_4(%rip), %xmm2
movsd LCPI19_10(%rip), %xmm4 ## xmm4 = mem[0],zero
subsd %xmm2, %xmm4
addsd -3120(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm15, %xmm2
mulsd -13952(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
mulsd LCPI19_89(%rip), %xmm7
mulsd %xmm13, %xmm3
addsd %xmm7, %xmm3
movsd LCPI19_6(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm4
movapd %xmm11, %xmm1
mulsd %xmm8, %xmm1
addsd %xmm2, %xmm1
movapd %xmm1, %xmm5
movsd %xmm1, -336(%rbp) ## 8-byte Spill
movapd -2784(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm10
movapd %xmm2, -2784(%rbp) ## 16-byte Spill
mulsd -9088(%rbp), %xmm1 ## 16-byte Folded Reload
addsd LCPI19_91(%rip), %xmm3
subsd %xmm1, %xmm4
movsd LCPI19_29(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm5, %xmm1
addsd %xmm4, %xmm1
addsd -2664(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm15, %xmm2
mulsd -13968(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm3, %xmm2
movapd -2880(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm4
movapd %xmm3, %xmm9
mulsd -6768(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm1, %xmm4
movapd %xmm11, %xmm13
mulsd -9104(%rbp), %xmm13 ## 16-byte Folded Reload
addsd %xmm2, %xmm13
movapd -2528(%rbp), %xmm1 ## 16-byte Reload
mulsd -7984(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm4
movsd LCPI19_29(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm13, %xmm1
addsd %xmm4, %xmm1
movapd -3184(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm14
movapd %xmm2, %xmm5
mulsd -10992(%rbp), %xmm14 ## 16-byte Folded Reload
addsd %xmm1, %xmm14
movapd %xmm11, %xmm6
mulsd -8992(%rbp), %xmm6 ## 16-byte Folded Reload
movapd %xmm15, %xmm1
mulsd -9616(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm6
movapd %xmm11, %xmm1
mulsd -6736(%rbp), %xmm1 ## 16-byte Folded Reload
movsd -920(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm2
subsd %xmm1, %xmm2
movapd %xmm15, %xmm1
mulsd -7008(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm2
movapd %xmm2, %xmm1
movapd %xmm12, %xmm7
mulsd %xmm12, %xmm1
subsd %xmm1, %xmm6
movapd %xmm6, %xmm1
movsd LCPI19_5(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
movapd %xmm11, %xmm4
mulsd -5888(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm2
mulsd %xmm12, %xmm2
movapd %xmm15, %xmm4
mulsd -11136(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd %xmm12, %xmm4
subsd %xmm4, %xmm2
movapd %xmm11, %xmm4
mulsd -9008(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm2, %xmm4
movapd %xmm15, %xmm7
mulsd -9624(%rbp), %xmm7 ## 8-byte Folded Reload
addsd %xmm4, %xmm7
movapd %xmm7, %xmm2
mulsd LCPI19_118(%rip), %xmm2
addsd %xmm1, %xmm2
movsd LCPI19_119(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd -8016(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm2
movapd %xmm9, %xmm4
mulsd -10960(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm2, %xmm4
movapd %xmm10, %xmm1
mulsd -10976(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm4
movapd -8096(%rbp), %xmm1 ## 16-byte Reload
mulsd -1280(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -800(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm2
subsd %xmm1, %xmm2
movsd LCPI19_89(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm2
movsd -704(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -13856(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd %xmm9, %xmm1
addsd %xmm2, %xmm1
addsd -6320(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm11, %xmm2
mulsd -9024(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movapd %xmm15, %xmm3
mulsd -9688(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm2, %xmm3
movapd %xmm3, %xmm1
mulsd %xmm0, %xmm1
addsd %xmm4, %xmm1
movapd -9136(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm2
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm1
movapd %xmm5, %xmm2
mulsd -10944(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movapd -2528(%rbp), %xmm1 ## 16-byte Reload
mulsd -9920(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm2
movapd -9312(%rbp), %xmm1 ## 16-byte Reload
mulsd -976(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm10
mulsd %xmm9, %xmm10
movsd -560(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -11520(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd %xmm9, %xmm1
addsd %xmm10, %xmm1
movapd -7968(%rbp), %xmm4 ## 16-byte Reload
addsd -5584(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm11, %xmm5
mulsd -9968(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm1, %xmm5
movapd %xmm15, %xmm10
mulsd %xmm4, %xmm10
addsd %xmm5, %xmm10
movapd %xmm10, %xmm1
mulsd %xmm0, %xmm1
addsd %xmm2, %xmm1
mulsd LCPI19_120(%rip), %xmm8
mulsd %xmm0, %xmm8
movapd %xmm0, %xmm15
addsd %xmm1, %xmm8
addsd %xmm14, %xmm14
addsd %xmm8, %xmm8
mulsd LCPI19_6(%rip), %xmm6
addsd %xmm14, %xmm8
mulsd LCPI19_3(%rip), %xmm7
addsd %xmm6, %xmm7
movsd LCPI19_29(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd -8016(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm1, %xmm2
subsd %xmm2, %xmm7
movapd -2784(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm2
mulsd -10960(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm7, %xmm2
movsd -1072(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd -2880(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm5
mulsd -10976(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm2, %xmm5
mulsd %xmm1, %xmm3
addsd %xmm5, %xmm3
mulsd %xmm1, %xmm12
subsd %xmm12, %xmm3
movapd -2528(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm2
mulsd -10944(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm3, %xmm2
movapd -3184(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm3
mulsd -9920(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm2, %xmm3
mulsd %xmm1, %xmm10
addsd %xmm3, %xmm10
movsd -1056(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_118(%rip), %xmm0
movsd LCPI19_121(%rip), %xmm1 ## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movsd -1040(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_5(%rip), %xmm0
addsd %xmm1, %xmm0
movapd %xmm4, %xmm1
mulsd -9088(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -880(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movsd -336(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd %xmm15, %xmm2
mulsd %xmm15, %xmm0
subsd %xmm0, %xmm1
movapd %xmm14, %xmm0
mulsd -6768(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
mulsd -7984(%rbp), %xmm6 ## 16-byte Folded Reload
addsd %xmm0, %xmm6
mulsd %xmm15, %xmm13
subsd %xmm13, %xmm6
mulsd -10992(%rbp), %xmm12 ## 16-byte Folded Reload
addsd %xmm6, %xmm12
movsd -4032(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd LCPI19_120(%rip), %xmm1
mulsd %xmm2, %xmm1
addsd LCPI19_122(%rip), %xmm12
subsd %xmm1, %xmm12
movsd -1536(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm1
mulsd %xmm8, %xmm1
addsd %xmm10, %xmm12
movapd %xmm7, %xmm2
mulsd %xmm12, %xmm2
subsd %xmm2, %xmm1
movsd -2272(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm8
mulsd %xmm4, %xmm12
addsd %xmm8, %xmm12
mulsd %xmm6, %xmm1
movsd LCPI19_1(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
mulsd %xmm3, %xmm12
mulsd %xmm2, %xmm12
subsd %xmm12, %xmm1
movsd %xmm1, (%rax)
LBB19_46:
movapd -3888(%rbp), %xmm5 ## 16-byte Reload
movapd -6608(%rbp), %xmm8 ## 16-byte Reload
divsd %xmm8, %xmm5
movapd %xmm5, %xmm2
mulsd -6624(%rbp), %xmm2 ## 16-byte Folded Reload
movsd -5728(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm4
mulsd -5760(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_37(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm0
mulsd -456(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, -5024(%rbp) ## 8-byte Spill
movsd -1704(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
movsd %xmm1, -7312(%rbp) ## 8-byte Spill
movsd -1408(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm1, %xmm10
movsd LCPI19_43(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm10
movapd %xmm1, %xmm6
addsd %xmm0, %xmm10
movapd -2944(%rbp), %xmm3 ## 16-byte Reload
divsd -3168(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, %xmm1
mulsd -6592(%rbp), %xmm1 ## 16-byte Folded Reload
movsd -4768(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd %xmm4, %xmm0
mulsd -3760(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm15, %xmm0
mulsd -1384(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -7344(%rbp) ## 8-byte Spill
movsd -4816(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm1, %xmm2
movsd %xmm2, -7320(%rbp) ## 8-byte Spill
movsd -1152(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
mulsd %xmm6, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, %xmm9
movsd -760(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
divsd -2200(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, %xmm2
mulsd -2968(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, %xmm0
mulsd -4656(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm4, %xmm0
mulsd -3312(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd %xmm15, %xmm0
mulsd -1168(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, -7336(%rbp) ## 8-byte Spill
movsd -1656(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm2, %xmm7
movsd %xmm7, -7328(%rbp) ## 8-byte Spill
movsd -3856(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm7, %xmm14
mulsd %xmm6, %xmm14
addsd %xmm0, %xmm14
mulsd -3152(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm5, %xmm0
mulsd %xmm4, %xmm0
movapd %xmm4, %xmm12
movsd %xmm0, -13208(%rbp) ## 8-byte Spill
movapd %xmm0, %xmm7
subsd -5856(%rbp), %xmm7 ## 16-byte Folded Reload
movsd LCPI19_44(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm5
mulsd -4784(%rbp), %xmm5 ## 8-byte Folded Reload
movsd LCPI19_45(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
movapd %xmm0, %xmm11
movsd %xmm5, -13232(%rbp) ## 8-byte Spill
addsd %xmm5, %xmm7
divsd -5840(%rbp), %xmm7 ## 16-byte Folded Reload
movapd -4208(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm7, %xmm2
movapd -272(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
movapd %xmm0, %xmm13
mulsd %xmm2, %xmm5
movsd -5776(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
divsd %xmm8, %xmm0
mulsd -5248(%rbp), %xmm7 ## 16-byte Folded Reload
movsd %xmm0, -13192(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm7
movsd %xmm7, -9816(%rbp) ## 8-byte Spill
movapd -592(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
movapd %xmm4, %xmm8
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm5
movsd %xmm5, -1968(%rbp) ## 8-byte Spill
movapd -528(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm5, %xmm4
movapd -400(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm5
mulsd %xmm7, %xmm5
movapd %xmm13, %xmm0
mulsd %xmm10, %xmm0
movsd %xmm0, -13184(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm5
movapd %xmm5, -10080(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm4
movapd %xmm6, %xmm7
movapd %xmm6, %xmm5
mulsd %xmm2, %xmm7
movapd %xmm2, %xmm6
movapd %xmm2, -4016(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm0
movapd %xmm8, %xmm13
mulsd %xmm10, %xmm0
movapd %xmm0, -12272(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm7
movapd -112(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm4
movapd %xmm4, -11888(%rbp) ## 16-byte Spill
movapd -1440(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm4, %xmm0
movapd -1856(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm7, %xmm2
subsd %xmm2, %xmm0
mulsd -536(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, %xmm2
mulsd %xmm12, %xmm2
movsd %xmm2, -13224(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm4
subsd -5824(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd %xmm15, %xmm3
mulsd -4144(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd %xmm11, %xmm3
movsd %xmm3, -13240(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm4
divsd -5808(%rbp), %xmm4 ## 16-byte Folded Reload
movsd %xmm4, -3504(%rbp) ## 8-byte Spill
movapd -4288(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm4, %xmm2
movapd %xmm5, %xmm11
movapd %xmm5, %xmm3
mulsd %xmm2, %xmm3
movapd %xmm2, %xmm12
movapd %xmm2, -816(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm2
mulsd %xmm9, %xmm2
movapd %xmm2, -6448(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm3
movsd %xmm3, -2240(%rbp) ## 8-byte Spill
movapd -2400(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm0
movapd %xmm5, %xmm3
movsd LCPI19_50(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm3
movapd -2816(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm3, %xmm2
movapd %xmm3, %xmm15
movapd %xmm3, -11872(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm5
movsd LCPI19_61(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm5
movapd %xmm3, %xmm8
movsd -1552(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm5, %xmm3
subsd %xmm3, %xmm2
movapd -640(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm6, %xmm3
addsd %xmm2, %xmm3
movapd -1840(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm10, %xmm2
movsd %xmm2, -13280(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm3
movapd -1232(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm12, %xmm2
addsd %xmm3, %xmm2
movapd -2624(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm9, -4704(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm3
movsd %xmm3, -13272(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm2
movapd -688(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm3
movapd %xmm2, -16400(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm3
addsd %xmm0, %xmm3
movsd %xmm10, -7448(%rbp) ## 8-byte Spill
addsd %xmm9, %xmm10
movapd -192(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
mulsd %xmm8, %xmm0
subsd %xmm10, %xmm0
movapd %xmm2, %xmm12
movapd %xmm2, %xmm9
movapd %xmm0, -17392(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm12
movapd %xmm12, %xmm4
movsd LCPI19_25(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
addsd %xmm3, %xmm4
movapd %xmm5, -14032(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm13
mulsd %xmm15, %xmm11
subsd %xmm11, %xmm13
movapd %xmm13, %xmm0
movsd LCPI19_103(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm4
movsd -1552(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd LCPI19_50(%rip), %xmm11
movapd %xmm11, -11296(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm11
movapd %xmm11, -17376(%rbp) ## 16-byte Spill
movapd %xmm9, %xmm0
mulsd %xmm9, %xmm11
addsd %xmm4, %xmm11
movapd %xmm9, %xmm4
movapd %xmm9, %xmm6
mulsd %xmm14, %xmm4
movapd -2544(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm4, %xmm2
movsd %xmm2, -13264(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm11
movsd -4560(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
divsd -2200(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd -1640(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, %xmm3
mulsd LCPI19_1(%rip), %xmm3
movsd %xmm3, -13200(%rbp) ## 8-byte Spill
subsd -6672(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_44(%rip), %xmm1
mulsd -3584(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm1
movsd %xmm1, -13248(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm3
divsd -6656(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd -2712(%rbp), %xmm3 ## 8-byte Folded Reload
movsd %xmm2, -13216(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm3
movsd %xmm3, -9800(%rbp) ## 8-byte Spill
movapd %xmm9, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm1, -1952(%rbp) ## 16-byte Spill
movapd -528(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd -112(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm4, %xmm2
movsd %xmm2, -7744(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm0
movapd %xmm0, -1056(%rbp) ## 16-byte Spill
movapd -2576(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm11
movsd %xmm10, -9832(%rbp) ## 8-byte Spill
movsd %xmm14, -7440(%rbp) ## 8-byte Spill
addsd %xmm14, %xmm10
movsd %xmm10, -5352(%rbp) ## 8-byte Spill
movapd %xmm11, %xmm5
movsd LCPI19_15(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm5
movsd LCPI19_124(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd %xmm9, %xmm14
movapd %xmm9, %xmm2
mulsd %xmm1, %xmm2
subsd %xmm10, %xmm2
movapd %xmm2, -11824(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm14
movapd %xmm14, %xmm0
movsd LCPI19_22(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
addsd %xmm5, %xmm0
movapd %xmm0, -336(%rbp) ## 16-byte Spill
movapd -1248(%rbp), %xmm5 ## 16-byte Reload
movsd -1968(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm5
movapd -912(%rbp), %xmm10 ## 16-byte Reload
movapd -10080(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm10
addsd %xmm5, %xmm10
movapd -608(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm5
mulsd %xmm7, %xmm5
subsd %xmm5, %xmm10
movapd -1440(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm5
mulsd %xmm10, %xmm5
movapd %xmm10, -11856(%rbp) ## 16-byte Spill
movapd -1920(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm7, %xmm6
subsd %xmm6, %xmm5
movapd -2432(%rbp), %xmm6 ## 16-byte Reload
movsd -2240(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm6
subsd %xmm6, %xmm5
movapd %xmm13, %xmm2
movsd LCPI19_48(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm2
addsd %xmm5, %xmm2
movapd %xmm12, %xmm6
movsd LCPI19_24(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm6
movapd -512(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
addsd %xmm6, %xmm0
movapd -1264(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm1, %xmm6
movapd -1504(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm3, %xmm1
addsd %xmm6, %xmm1
movapd -720(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm6
mulsd %xmm7, %xmm6
subsd %xmm6, %xmm1
movapd %xmm9, %xmm6
mulsd %xmm1, %xmm6
movapd %xmm1, %xmm5
movapd %xmm1, -11840(%rbp) ## 16-byte Spill
mulsd -1600(%rbp), %xmm7 ## 16-byte Folded Reload
subsd %xmm7, %xmm6
movapd -112(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm1
mulsd -11888(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd %xmm10, %xmm15
addsd %xmm1, %xmm15
mulsd %xmm5, %xmm3
addsd %xmm15, %xmm3
movsd LCPI19_14(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm3
addsd %xmm6, %xmm3
movapd -2128(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm8, %xmm6
subsd %xmm6, %xmm3
movsd LCPI19_47(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm13
addsd %xmm3, %xmm13
movapd -448(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm7
movapd %xmm1, %xmm3
mulsd %xmm13, %xmm7
addsd %xmm0, %xmm7
movapd -2912(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
movapd %xmm1, -17312(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm7
movapd -2048(%rbp), %xmm0 ## 16-byte Reload
movapd -1952(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm0
movapd -3264(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd %xmm4, %xmm1
movapd %xmm1, -7728(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
movapd -2576(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm1
mulsd %xmm0, %xmm1
movapd %xmm0, %xmm8
movsd %xmm0, -2768(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm7
movsd LCPI19_23(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm12
movapd %xmm3, %xmm1
movapd %xmm2, -16272(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm12
movapd -512(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm13, -16304(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm1
addsd %xmm12, %xmm1
movapd -3248(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm4, %xmm2
movsd %xmm2, -13256(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm1
movapd -1104(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm6, %xmm0
movapd -2560(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm4
movapd %xmm4, -9152(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm0
movapd %xmm10, %xmm2
mulsd %xmm0, %xmm2
movsd %xmm0, -2448(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm1
movapd %xmm9, %xmm2
mulsd -1056(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd %xmm8, %xmm5
addsd %xmm2, %xmm5
movapd %xmm6, %xmm2
mulsd %xmm0, %xmm2
addsd %xmm5, %xmm2
movsd LCPI19_64(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm2
subsd %xmm2, %xmm1
mulsd LCPI19_11(%rip), %xmm11
movsd LCPI19_21(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm14
addsd %xmm11, %xmm14
movapd -192(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm0
movsd LCPI19_75(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movapd %xmm12, %xmm2
movapd %xmm0, -14256(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm2
movapd %xmm2, %xmm5
movsd LCPI19_73(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
movapd %xmm5, -4608(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm7
movapd -336(%rbp), %xmm5 ## 16-byte Reload
addsd %xmm7, %xmm5
movsd LCPI19_13(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm7
addsd %xmm14, %xmm7
movsd LCPI19_72(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
movapd %xmm2, -1744(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm1
addsd %xmm1, %xmm7
movapd %xmm1, %xmm4
mulsd %xmm3, %xmm4
addsd %xmm5, %xmm4
movsd -1200(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm8
mulsd LCPI19_123(%rip), %xmm8
movapd %xmm8, %xmm6
subsd -5352(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm5, %xmm1
mulsd %xmm6, %xmm1
movsd LCPI19_115(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
movapd -1312(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
movapd %xmm3, %xmm9
mulsd %xmm4, %xmm2
subsd %xmm1, %xmm2
movapd -1328(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
movapd %xmm3, %xmm0
mulsd %xmm7, %xmm1
addsd %xmm2, %xmm1
movapd %xmm5, %xmm3
movsd LCPI19_82(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm3
movsd %xmm3, -7112(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm5
movsd LCPI19_81(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm5
subsd %xmm1, %xmm5
movapd %xmm9, %xmm3
movapd %xmm7, -16080(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm3
movapd %xmm0, %xmm1
movapd %xmm4, -16048(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm3
movsd %xmm6, -13416(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm2
subsd %xmm8, %xmm2
movapd %xmm2, -13904(%rbp) ## 16-byte Spill
movapd -1936(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm8
mulsd %xmm3, %xmm0
movsd -704(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm1
mulsd LCPI19_125(%rip), %xmm1
movsd %xmm1, -9808(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm2
movapd %xmm7, %xmm1
movapd %xmm2, -10112(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
movapd %xmm1, %xmm2
movsd LCPI19_30(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm0
movapd -1216(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm2
movapd %xmm4, %xmm6
movapd %xmm4, -1216(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm2
subsd %xmm0, %xmm2
movapd %xmm7, %xmm0
movsd LCPI19_89(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
movapd %xmm7, %xmm4
movsd %xmm0, -13424(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm4
movsd LCPI19_88(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
addsd %xmm2, %xmm4
movapd %xmm4, -10928(%rbp) ## 16-byte Spill
movapd %xmm6, %xmm0
movapd %xmm3, -15920(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
movsd LCPI19_111(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movapd %xmm8, %xmm1
movapd %xmm5, -15936(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -10912(%rbp) ## 16-byte Spill
movapd -4912(%rbp), %xmm5 ## 16-byte Reload
movapd -3408(%rbp), %xmm0 ## 16-byte Reload
divsd %xmm0, %xmm5
movsd -1128(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm5, %xmm1
movapd %xmm1, %xmm2
movsd LCPI19_1(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm2
movsd %xmm2, -13344(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm3
subsd -5696(%rbp), %xmm3 ## 16-byte Folded Reload
movsd LCPI19_44(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm1
mulsd -1688(%rbp), %xmm1 ## 8-byte Folded Reload
movsd LCPI19_45(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm1
movsd %xmm1, -13352(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm3
divsd -5680(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -4192(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm3, %xmm4
movapd -96(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm2
movapd %xmm1, %xmm8
mulsd %xmm4, %xmm2
movsd -1672(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
divsd %xmm0, %xmm1
mulsd -5200(%rbp), %xmm3 ## 16-byte Folded Reload
movsd %xmm1, -13328(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm3
movapd -144(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
mulsd %xmm3, %xmm0
movsd %xmm3, -9792(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm2
movapd %xmm2, %xmm6
movsd %xmm2, -800(%rbp) ## 8-byte Spill
mulsd -3664(%rbp), %xmm5 ## 16-byte Folded Reload
movsd -3360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm5, %xmm0
mulsd %xmm7, %xmm0
mulsd -4736(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_37(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm0
mulsd -2288(%rbp), %xmm5 ## 8-byte Folded Reload
movsd %xmm5, -4640(%rbp) ## 8-byte Spill
movsd -3776(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm5, %xmm1
movsd %xmm1, -7352(%rbp) ## 8-byte Spill
movsd -936(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm1, %xmm7
movsd LCPI19_43(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm7
addsd %xmm0, %xmm7
movapd -208(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm6, %xmm2
movapd -256(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm5
mulsd %xmm3, %xmm5
movapd %xmm8, %xmm0
mulsd %xmm7, %xmm0
movsd %xmm0, -13320(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm5
movapd %xmm5, -11232(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm2
movapd %xmm10, %xmm15
mulsd %xmm4, %xmm15
movapd %xmm4, %xmm8
movapd %xmm4, -4000(%rbp) ## 16-byte Spill
movapd %xmm9, %xmm1
mulsd %xmm7, %xmm1
movapd %xmm1, -8656(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm15
movapd -64(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm15, %xmm1
subsd %xmm1, %xmm2
movapd %xmm2, -11808(%rbp) ## 16-byte Spill
movapd -2096(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm2, %xmm1
movapd -2384(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm15, %xmm2
subsd %xmm2, %xmm1
movapd -3424(%rbp), %xmm2 ## 16-byte Reload
divsd -3392(%rbp), %xmm2 ## 16-byte Folded Reload
movsd -2016(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm2, %xmm3
movapd %xmm2, %xmm5
mulsd -3680(%rbp), %xmm5 ## 16-byte Folded Reload
movsd -1664(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm5, %xmm2
movsd LCPI19_1(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
mulsd -1680(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd %xmm12, %xmm2
mulsd -1792(%rbp), %xmm5 ## 8-byte Folded Reload
movsd %xmm5, -8624(%rbp) ## 8-byte Spill
movsd -3808(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm5, %xmm6
movsd %xmm6, -7368(%rbp) ## 8-byte Spill
movsd -1144(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm6, %xmm5
mulsd %xmm11, %xmm5
addsd %xmm2, %xmm5
movapd %xmm5, %xmm6
movapd %xmm3, %xmm2
mulsd %xmm0, %xmm2
movsd %xmm2, -13304(%rbp) ## 8-byte Spill
subsd -5744(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd %xmm14, %xmm3
mulsd -3376(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd %xmm13, %xmm3
movsd %xmm3, -13336(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm2
divsd -5712(%rbp), %xmm2 ## 16-byte Folded Reload
movsd %xmm2, -3496(%rbp) ## 8-byte Spill
movapd -4272(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm2, %xmm3
movapd %xmm10, %xmm0
mulsd %xmm3, %xmm0
movapd %xmm3, %xmm11
movapd %xmm3, -432(%rbp) ## 16-byte Spill
movapd %xmm9, %xmm2
mulsd %xmm5, %xmm2
movapd %xmm2, -9168(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm0
movsd %xmm0, -3632(%rbp) ## 8-byte Spill
movapd -2368(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm1
movapd %xmm10, %xmm3
movsd LCPI19_50(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm3
movapd -1568(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm3, %xmm2
movapd %xmm3, %xmm0
movapd %xmm3, -10176(%rbp) ## 16-byte Spill
movapd %xmm9, %xmm5
movsd LCPI19_61(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm5
movapd -992(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm3
mulsd %xmm5, %xmm3
subsd %xmm3, %xmm2
movapd -832(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm4, %xmm3
addsd %xmm2, %xmm3
movapd -2064(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm7, %xmm2
movsd %xmm2, -13392(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm3
movapd -672(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm11, %xmm4
addsd %xmm3, %xmm4
movapd -2608(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm6, %xmm8
movapd %xmm6, -4032(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm2
movsd %xmm2, -13384(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm4
movapd -624(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
movapd %xmm3, %xmm6
movapd %xmm4, -16192(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
addsd %xmm1, %xmm2
movapd -48(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd %xmm12, %xmm1
movsd %xmm7, -7416(%rbp) ## 8-byte Spill
movapd %xmm7, %xmm11
addsd %xmm8, %xmm11
subsd %xmm11, %xmm1
movapd %xmm4, %xmm3
movapd %xmm4, %xmm12
movapd %xmm1, -17360(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm3
movapd %xmm3, -1984(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm1
mulsd LCPI19_25(%rip), %xmm1
subsd %xmm1, %xmm2
movapd %xmm9, %xmm13
movapd %xmm5, -14000(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm13
mulsd %xmm0, %xmm10
subsd %xmm10, %xmm13
movapd %xmm13, %xmm3
mulsd LCPI19_103(%rip), %xmm3
addsd %xmm2, %xmm3
mulsd LCPI19_50(%rip), %xmm14
movapd %xmm14, -11264(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm14
movapd %xmm4, %xmm8
movapd %xmm14, -17344(%rbp) ## 16-byte Spill
mulsd %xmm14, %xmm8
addsd %xmm3, %xmm8
movsd -928(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movsd -2952(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
divsd %xmm4, %xmm3
movapd %xmm3, %xmm5
mulsd -5552(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm5, %xmm2
mulsd -2832(%rbp), %xmm2 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
mulsd -4624(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd LCPI19_37(%rip), %xmm2
mulsd -2464(%rbp), %xmm5 ## 8-byte Folded Reload
movsd %xmm5, -7376(%rbp) ## 8-byte Spill
movsd -1648(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm5, %xmm6
movsd %xmm6, -7360(%rbp) ## 8-byte Spill
movsd -1136(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm6, %xmm5
mulsd LCPI19_43(%rip), %xmm5
addsd %xmm2, %xmm5
movapd %xmm12, %xmm14
mulsd %xmm5, %xmm14
movapd -1824(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm14, %xmm2
movsd %xmm2, -13376(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm8
movsd -1624(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
divsd %xmm4, %xmm2
mulsd -2192(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, %xmm4
mulsd %xmm1, %xmm4
movsd %xmm4, -13288(%rbp) ## 8-byte Spill
subsd -6576(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd LCPI19_44(%rip), %xmm3
mulsd -5536(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm3
movsd %xmm3, -13312(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm4
divsd -6560(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd -2696(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm2, -13296(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm4
movsd %xmm4, -9776(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm0
mulsd %xmm4, %xmm0
movapd %xmm0, -1040(%rbp) ## 16-byte Spill
movapd -208(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movapd -64(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm14, %xmm2
movsd %xmm2, -3616(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm1
movsd %xmm1, -336(%rbp) ## 8-byte Spill
movapd -1008(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm1, %xmm2
subsd %xmm2, %xmm8
movapd %xmm8, %xmm4
movsd LCPI19_108(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
movapd %xmm12, %xmm3
movsd LCPI19_124(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm3
movsd %xmm11, -9824(%rbp) ## 8-byte Spill
movsd %xmm5, -7408(%rbp) ## 8-byte Spill
addsd %xmm5, %xmm11
movsd %xmm11, -5344(%rbp) ## 8-byte Spill
subsd %xmm11, %xmm3
movapd %xmm12, %xmm10
movapd %xmm3, -11712(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm10
movapd %xmm10, %xmm0
mulsd LCPI19_21(%rip), %xmm0
addsd %xmm4, %xmm0
movapd %xmm0, -3936(%rbp) ## 16-byte Spill
movapd -1520(%rbp), %xmm4 ## 16-byte Reload
movsd -800(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm4
movapd -1488(%rbp), %xmm9 ## 16-byte Reload
movapd -11232(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm9
addsd %xmm4, %xmm9
movapd -176(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm4
mulsd %xmm15, %xmm4
subsd %xmm4, %xmm9
movapd -2096(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm4
mulsd %xmm9, %xmm4
movapd %xmm9, -11760(%rbp) ## 16-byte Spill
movapd -896(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm15, %xmm5
subsd %xmm5, %xmm4
movapd -1904(%rbp), %xmm5 ## 16-byte Reload
movsd -3632(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm5
subsd %xmm5, %xmm4
movapd %xmm13, %xmm3
movsd LCPI19_48(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm3
addsd %xmm4, %xmm3
movapd -1984(%rbp), %xmm5 ## 16-byte Reload
mulsd LCPI19_24(%rip), %xmm5
movapd -752(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm3, %xmm4
addsd %xmm5, %xmm4
movapd -576(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm12, %xmm5
movapd -864(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm0, %xmm6
addsd %xmm5, %xmm6
movapd -736(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm5
mulsd %xmm15, %xmm5
subsd %xmm5, %xmm6
mulsd %xmm6, %xmm7
movapd %xmm6, %xmm12
movapd %xmm6, -11776(%rbp) ## 16-byte Spill
mulsd -1888(%rbp), %xmm15 ## 16-byte Folded Reload
subsd %xmm15, %xmm7
movapd -64(%rbp), %xmm0 ## 16-byte Reload
mulsd -11808(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd %xmm9, %xmm11
addsd %xmm0, %xmm11
movapd %xmm1, %xmm0
mulsd %xmm6, %xmm0
addsd %xmm11, %xmm0
mulsd LCPI19_14(%rip), %xmm0
addsd %xmm7, %xmm0
movapd -1584(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm2, %xmm5
subsd %xmm5, %xmm0
movsd LCPI19_47(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm13
addsd %xmm0, %xmm13
movapd -496(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
movapd %xmm5, %xmm6
mulsd %xmm13, %xmm0
addsd %xmm4, %xmm0
movsd -2032(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm14, %xmm4
movsd %xmm4, -13368(%rbp) ## 8-byte Spill
subsd %xmm4, %xmm0
movapd -1344(%rbp), %xmm15 ## 16-byte Reload
movapd -1040(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm15
movapd -3280(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm4
mulsd %xmm14, %xmm4
movapd %xmm4, -7760(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm15
movapd -1008(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm15, %xmm4
movsd %xmm15, -920(%rbp) ## 8-byte Spill
subsd %xmm4, %xmm0
mulsd LCPI19_98(%rip), %xmm8
mulsd LCPI19_22(%rip), %xmm10
addsd %xmm8, %xmm10
movapd -48(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd LCPI19_75(%rip), %xmm1
movapd %xmm1, -17328(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm5
movapd %xmm5, %xmm1
mulsd LCPI19_73(%rip), %xmm1
movapd %xmm1, -3296(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
addsd %xmm0, %xmm10
movapd %xmm0, %xmm4
movsd LCPI19_13(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
addsd -3936(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -1984(%rbp), %xmm1 ## 16-byte Reload
mulsd LCPI19_23(%rip), %xmm1
movapd %xmm6, %xmm0
movapd %xmm3, -16000(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm1
movapd -752(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm13, -16064(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm6
addsd %xmm1, %xmm6
movapd -1808(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm14, %xmm0
movsd %xmm0, -13360(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm6
movapd -2416(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm11, %xmm7
movapd -2592(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm14
movapd %xmm14, -11696(%rbp) ## 16-byte Spill
addsd %xmm14, %xmm7
movsd %xmm7, -1984(%rbp) ## 8-byte Spill
movapd -1008(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm6
movapd -64(%rbp), %xmm0 ## 16-byte Reload
mulsd -336(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm12, %xmm1
mulsd %xmm15, %xmm1
addsd %xmm0, %xmm1
movapd %xmm3, %xmm0
mulsd %xmm7, %xmm0
addsd %xmm1, %xmm0
mulsd LCPI19_64(%rip), %xmm0
subsd %xmm0, %xmm6
mulsd LCPI19_72(%rip), %xmm5
movapd %xmm5, -4592(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm6
addsd %xmm6, %xmm4
mulsd %xmm2, %xmm6
addsd %xmm10, %xmm6
movapd -1616(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
movapd %xmm0, %xmm10
mulsd %xmm4, %xmm5
movapd -1088(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm12
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm5
movsd -1296(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm0
mulsd LCPI19_123(%rip), %xmm0
movapd %xmm0, %xmm3
subsd -5344(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, %xmm2
subsd %xmm0, %xmm2
movapd %xmm2, -13888(%rbp) ## 16-byte Spill
movapd -848(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm8
movapd %xmm1, -848(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
movsd -560(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd %xmm9, %xmm1
mulsd LCPI19_125(%rip), %xmm1
movsd %xmm1, -9784(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm2
movapd %xmm9, %xmm1
movapd %xmm2, -11616(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
movapd %xmm1, %xmm2
mulsd LCPI19_111(%rip), %xmm2
subsd %xmm2, %xmm0
movapd %xmm7, %xmm2
movsd %xmm3, -13400(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm2
mulsd LCPI19_115(%rip), %xmm2
movapd %xmm10, %xmm3
movapd %xmm6, -15872(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm3
subsd %xmm2, %xmm3
movapd %xmm12, %xmm2
movapd %xmm4, -15888(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
addsd %xmm3, %xmm2
movapd %xmm7, %xmm4
movapd %xmm7, %xmm3
mulsd LCPI19_82(%rip), %xmm3
movsd %xmm3, -7096(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm4
mulsd LCPI19_81(%rip), %xmm4
subsd %xmm2, %xmm4
movapd -2144(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
mulsd %xmm4, %xmm2
addsd %xmm0, %xmm2
movapd %xmm2, -10864(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm0
movapd %xmm5, -15792(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
mulsd LCPI19_30(%rip), %xmm1
subsd %xmm1, %xmm0
movapd %xmm8, %xmm1
movapd %xmm4, -15776(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
subsd %xmm0, %xmm1
movapd %xmm9, %xmm2
movapd %xmm9, %xmm0
mulsd LCPI19_89(%rip), %xmm0
movsd %xmm0, -13408(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm2
mulsd LCPI19_88(%rip), %xmm2
addsd %xmm1, %xmm2
movapd %xmm2, -10848(%rbp) ## 16-byte Spill
movapd -528(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
movapd -11888(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
movapd -1248(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd -11856(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -1264(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm0
mulsd -11840(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd -7312(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd -5104(%rbp), %xmm9 ## 8-byte Folded Reload
movsd LCPI19_41(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm9
mulsd -4752(%rbp), %xmm9 ## 8-byte Folded Reload
divsd -4832(%rbp), %xmm9 ## 8-byte Folded Reload
mulsd -2896(%rbp), %xmm9 ## 16-byte Folded Reload
movsd -5024(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -5072(%rbp), %xmm3 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm3
mulsd -4112(%rbp), %xmm3 ## 16-byte Folded Reload
movsd LCPI19_110(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm3
movapd %xmm1, %xmm8
movsd %xmm9, -7312(%rbp) ## 8-byte Spill
movsd %xmm3, -5024(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm9
addsd %xmm0, %xmm9
movapd %xmm0, %xmm1
mulsd %xmm2, %xmm1
movapd %xmm2, %xmm14
subsd %xmm1, %xmm9
movapd %xmm9, %xmm6
subsd %xmm0, %xmm6
movapd -272(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm13
mulsd -816(%rbp), %xmm13 ## 16-byte Folded Reload
movsd -1696(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
divsd -3168(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -3504(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -776(%rbp), %xmm3 ## 8-byte Folded Reload
movsd %xmm0, -13000(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm3
movsd %xmm3, -3504(%rbp) ## 8-byte Spill
movapd -592(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm13
movapd %xmm7, %xmm1
movapd %xmm7, %xmm12
mulsd %xmm13, %xmm1
movapd -400(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm3, %xmm5
movapd %xmm2, %xmm0
mulsd -4704(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -12992(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm5
addsd %xmm5, %xmm1
movapd -112(%rbp), %xmm2 ## 16-byte Reload
movsd -2240(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm1
movapd %xmm4, %xmm3
movapd %xmm4, %xmm2
mulsd %xmm13, %xmm2
movapd -912(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm5, %xmm15
addsd %xmm2, %xmm15
movapd -608(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
movapd %xmm0, %xmm4
subsd %xmm2, %xmm15
movsd %xmm15, -3936(%rbp) ## 8-byte Spill
movapd %xmm7, %xmm2
mulsd %xmm1, %xmm2
movapd %xmm3, %xmm0
mulsd %xmm15, %xmm0
addsd %xmm2, %xmm0
movapd %xmm10, %xmm7
movapd %xmm10, %xmm2
mulsd %xmm13, %xmm2
movapd -1504(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm5, -11008(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm3
addsd %xmm2, %xmm3
mulsd -720(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm3
movapd %xmm10, %xmm2
mulsd %xmm3, %xmm2
addsd %xmm0, %xmm2
subsd %xmm2, %xmm6
movsd -7320(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd -1712(%rbp), %xmm15 ## 8-byte Folded Reload
movsd LCPI19_42(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm15
mulsd -4096(%rbp), %xmm15 ## 8-byte Folded Reload
divsd -4848(%rbp), %xmm15 ## 8-byte Folded Reload
mulsd -1400(%rbp), %xmm15 ## 8-byte Folded Reload
movsd -7344(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5088(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm14, %xmm0
mulsd -3696(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm8, %xmm4
mulsd %xmm8, %xmm0
movsd %xmm15, -7320(%rbp) ## 8-byte Spill
movsd %xmm0, -7344(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm15
addsd %xmm2, %xmm15
mulsd %xmm14, %xmm2
subsd %xmm2, %xmm15
addsd %xmm6, %xmm15
movapd %xmm12, %xmm0
mulsd -1056(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -2048(%rbp), %xmm6 ## 16-byte Reload
mulsd -2768(%rbp), %xmm6 ## 8-byte Folded Reload
addsd %xmm0, %xmm6
movapd -1104(%rbp), %xmm10 ## 16-byte Reload
mulsd -2448(%rbp), %xmm10 ## 8-byte Folded Reload
addsd %xmm6, %xmm10
movapd %xmm15, %xmm8
addsd %xmm10, %xmm8
movsd -7328(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -4080(%rbp), %xmm2 ## 16-byte Folded Reload
movsd LCPI19_69(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
mulsd -3968(%rbp), %xmm2 ## 8-byte Folded Reload
divsd -648(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd -2496(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, %xmm0
movsd -7336(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -1368(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd %xmm14, %xmm2
mulsd -3984(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd %xmm4, %xmm2
movsd %xmm0, -7328(%rbp) ## 8-byte Spill
movsd %xmm2, -7336(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm0
subsd %xmm10, %xmm0
mulsd %xmm14, %xmm10
addsd %xmm0, %xmm10
movapd -1856(%rbp), %xmm0 ## 16-byte Reload
movsd -1968(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
movapd -1184(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm11, %xmm2
mulsd %xmm11, %xmm6
addsd %xmm0, %xmm6
movapd -2400(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm13, %xmm0
addsd %xmm6, %xmm0
mulsd -1872(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd %xmm15, %xmm0
movsd LCPI19_25(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm0
addsd %xmm1, %xmm0
movapd -2256(%rbp), %xmm6 ## 16-byte Reload
movsd LCPI19_50(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm6
movapd -2640(%rbp), %xmm1 ## 16-byte Reload
movapd -11872(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm1
movapd %xmm6, -2240(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm6
movapd -4016(%rbp), %xmm1 ## 16-byte Reload
mulsd -1472(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm6
movapd -1840(%rbp), %xmm1 ## 16-byte Reload
movsd -9816(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm1
addsd %xmm6, %xmm1
movapd -816(%rbp), %xmm2 ## 16-byte Reload
mulsd -1456(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm1
movapd -2624(%rbp), %xmm2 ## 16-byte Reload
movsd -3504(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm2
addsd %xmm1, %xmm2
movapd -688(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm2, -16240(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm6
addsd %xmm0, %xmm6
movsd -784(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
mulsd -272(%rbp), %xmm11 ## 16-byte Folded Reload
movsd %xmm0, -7128(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm11
movapd %xmm11, %xmm0
mulsd LCPI19_103(%rip), %xmm0
subsd %xmm0, %xmm6
movsd -480(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movsd LCPI19_61(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm7
addsd %xmm5, %xmm12
movsd %xmm12, -9768(%rbp) ## 8-byte Spill
movapd -192(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm7
movapd %xmm7, %xmm1
mulsd %xmm14, %xmm1
addsd %xmm6, %xmm1
movapd -11296(%rbp), %xmm5 ## 16-byte Reload
mulsd -2336(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm1, %xmm5
movapd -2544(%rbp), %xmm1 ## 16-byte Reload
movapd -1952(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm1
subsd %xmm1, %xmm5
movapd -1056(%rbp), %xmm0 ## 16-byte Reload
mulsd -2352(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm5
movapd %xmm8, %xmm14
addsd %xmm10, %xmm14
movapd %xmm14, -10016(%rbp) ## 16-byte Spill
movapd %xmm5, %xmm1
mulsd LCPI19_11(%rip), %xmm1
mulsd LCPI19_21(%rip), %xmm14
addsd %xmm1, %xmm14
movapd %xmm15, %xmm1
movsd LCPI19_24(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
movapd %xmm7, %xmm6
mulsd %xmm0, %xmm6
addsd %xmm1, %xmm6
movapd -1600(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
movapd -1184(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm0
mulsd -11840(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
mulsd LCPI19_14(%rip), %xmm9
addsd %xmm0, %xmm9
movapd -2128(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm13, %xmm0
addsd %xmm9, %xmm0
movsd -1872(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm3
addsd %xmm0, %xmm3
movapd %xmm11, %xmm9
movsd LCPI19_47(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm9
addsd %xmm3, %xmm9
movapd -448(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm9, %xmm0
addsd %xmm6, %xmm0
mulsd -1920(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd -11856(%rbp), %xmm8 ## 16-byte Folded Reload
addsd %xmm4, %xmm8
mulsd -2432(%rbp), %xmm13 ## 16-byte Folded Reload
addsd %xmm8, %xmm13
movsd -3936(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
addsd %xmm13, %xmm1
movapd %xmm1, %xmm2
movsd LCPI19_48(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm11
addsd %xmm2, %xmm11
movapd -512(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
mulsd %xmm11, %xmm1
addsd %xmm0, %xmm1
movapd -2912(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm12, %xmm0
movapd %xmm12, %xmm13
subsd %xmm0, %xmm1
movsd -2768(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2352(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movsd -480(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movapd %xmm12, %xmm3
mulsd LCPI19_75(%rip), %xmm3
movapd %xmm3, %xmm4
mulsd LCPI19_73(%rip), %xmm4
addsd %xmm1, %xmm4
mulsd LCPI19_15(%rip), %xmm5
movapd -10016(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
movsd LCPI19_22(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm0
addsd %xmm5, %xmm0
addsd %xmm4, %xmm0
movsd LCPI19_13(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm4
addsd %xmm14, %xmm4
movsd LCPI19_23(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm15
mulsd %xmm1, %xmm7
addsd %xmm15, %xmm7
movapd %xmm2, %xmm1
movapd %xmm9, -16144(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm1
addsd %xmm7, %xmm1
movapd -448(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm11, -11872(%rbp) ## 16-byte Spill
mulsd %xmm11, %xmm2
subsd %xmm2, %xmm1
movapd %xmm13, %xmm2
mulsd -3248(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm1
movsd -2448(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -2352(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm1
mulsd LCPI19_64(%rip), %xmm10
addsd %xmm1, %xmm10
mulsd LCPI19_72(%rip), %xmm3
addsd %xmm10, %xmm3
movapd %xmm12, %xmm15
movsd LCPI19_124(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm15
movsd -9768(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
addsd -9800(%rbp), %xmm9 ## 8-byte Folded Reload
movapd -192(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm15
addsd %xmm3, %xmm4
movapd %xmm15, %xmm2
mulsd LCPI19_21(%rip), %xmm2
addsd %xmm4, %xmm2
mulsd %xmm5, %xmm3
addsd %xmm0, %xmm3
mulsd %xmm8, %xmm15
addsd %xmm3, %xmm15
movapd -1312(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm4
mulsd %xmm2, %xmm4
movapd -1328(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
mulsd %xmm15, %xmm0
subsd %xmm0, %xmm4
movapd %xmm6, %xmm5
movapd %xmm6, %xmm0
movsd LCPI19_111(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm0
movapd -1216(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm7
mulsd %xmm4, %xmm7
subsd %xmm0, %xmm7
movapd %xmm6, %xmm0
movsd LCPI19_115(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm0
movapd %xmm15, -15840(%rbp) ## 16-byte Spill
mulsd %xmm15, %xmm1
subsd %xmm0, %xmm1
movapd %xmm2, -15824(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm3
addsd %xmm1, %xmm3
movsd -2800(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm1
mulsd LCPI19_123(%rip), %xmm1
movsd -1200(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm9, %xmm0
movsd %xmm1, -816(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm3
movsd LCPI19_83(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm2
mulsd LCPI19_81(%rip), %xmm2
movapd %xmm2, -11648(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm3
movapd -1936(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm7
movsd -1280(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_127(%rip), %xmm0
movsd -704(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm9, -11120(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm0
movapd %xmm0, %xmm1
mulsd %xmm8, %xmm1
subsd %xmm1, %xmm7
movapd %xmm7, -10832(%rbp) ## 16-byte Spill
movapd %xmm6, %xmm1
movsd LCPI19_30(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm1
movapd %xmm4, -15744(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm1, %xmm2
movapd %xmm11, %xmm6
movapd %xmm3, -15760(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm6
addsd %xmm2, %xmm6
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm6
movapd %xmm6, -9072(%rbp) ## 16-byte Spill
movapd -208(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm0
movapd -11808(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
movapd -1520(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd -11760(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -576(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd -11776(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd -7352(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd -3744(%rbp), %xmm15 ## 8-byte Folded Reload
mulsd LCPI19_41(%rip), %xmm15
mulsd -3344(%rbp), %xmm15 ## 8-byte Folded Reload
divsd -3136(%rbp), %xmm15 ## 8-byte Folded Reload
mulsd -1424(%rbp), %xmm15 ## 16-byte Folded Reload
movsd -4640(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -4800(%rbp), %xmm1 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm1
mulsd -2864(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd LCPI19_110(%rip), %xmm1
movsd %xmm15, -7352(%rbp) ## 8-byte Spill
movsd %xmm1, -4640(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm15
addsd %xmm0, %xmm15
movapd %xmm0, %xmm1
mulsd %xmm8, %xmm1
movapd %xmm8, %xmm10
subsd %xmm1, %xmm15
movapd %xmm15, %xmm1
subsd %xmm0, %xmm1
movapd -96(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm12
mulsd -432(%rbp), %xmm12 ## 16-byte Folded Reload
movsd -3792(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
divsd -3392(%rbp), %xmm2 ## 16-byte Folded Reload
movsd -3496(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -768(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm2, -13152(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm4
movsd %xmm4, -3496(%rbp) ## 8-byte Spill
movapd -144(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm12
movapd %xmm13, %xmm7
movapd %xmm13, %xmm8
mulsd %xmm12, %xmm7
movapd -256(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm4, %xmm3
movapd %xmm0, %xmm2
mulsd -4032(%rbp), %xmm2 ## 16-byte Folded Reload
movsd %xmm2, -13144(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm3
addsd %xmm3, %xmm7
movapd -64(%rbp), %xmm2 ## 16-byte Reload
movsd -3632(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm7
movapd %xmm5, %xmm2
mulsd %xmm12, %xmm2
movapd -1488(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm3, %xmm14
addsd %xmm2, %xmm14
movapd -176(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm14
movapd %xmm13, %xmm2
mulsd %xmm7, %xmm2
mulsd %xmm14, %xmm5
addsd %xmm2, %xmm5
movapd %xmm6, %xmm4
movapd %xmm6, %xmm2
mulsd %xmm12, %xmm2
movapd -864(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm3, -15904(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm6
addsd %xmm2, %xmm6
mulsd -736(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm6
movapd %xmm4, %xmm2
mulsd %xmm6, %xmm2
addsd %xmm5, %xmm2
subsd %xmm2, %xmm1
movsd -7368(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd -3104(%rbp), %xmm13 ## 8-byte Folded Reload
mulsd LCPI19_42(%rip), %xmm13
mulsd -2848(%rbp), %xmm13 ## 8-byte Folded Reload
divsd -3824(%rbp), %xmm13 ## 8-byte Folded Reload
mulsd -1392(%rbp), %xmm13 ## 8-byte Folded Reload
movsd -8624(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -3728(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd %xmm10, %xmm3
mulsd -3648(%rbp), %xmm3 ## 16-byte Folded Reload
movsd LCPI19_110(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
movsd %xmm13, -7368(%rbp) ## 8-byte Spill
movsd %xmm3, -8624(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm13
addsd %xmm2, %xmm13
mulsd %xmm10, %xmm2
subsd %xmm2, %xmm13
addsd %xmm1, %xmm13
movapd %xmm8, %xmm1
mulsd -336(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -1344(%rbp), %xmm4 ## 16-byte Reload
mulsd -920(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm1, %xmm4
movapd -2416(%rbp), %xmm2 ## 16-byte Reload
mulsd -1984(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm4, %xmm2
movapd %xmm13, %xmm9
addsd %xmm2, %xmm9
movsd -7360(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -4048(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd LCPI19_69(%rip), %xmm1
mulsd -3952(%rbp), %xmm1 ## 8-byte Folded Reload
divsd -3712(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd -2480(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -7376(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -1360(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd %xmm10, %xmm3
mulsd -3072(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd %xmm0, %xmm3
movsd %xmm1, -7360(%rbp) ## 8-byte Spill
movsd %xmm3, -7376(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm1
subsd %xmm2, %xmm1
mulsd %xmm10, %xmm2
addsd %xmm1, %xmm2
movapd -2384(%rbp), %xmm1 ## 16-byte Reload
movsd -800(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm1
movapd -1024(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm11, %xmm3
mulsd %xmm11, %xmm4
addsd %xmm1, %xmm4
movapd -2368(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
addsd %xmm4, %xmm1
mulsd -2080(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm1, %xmm7
movapd %xmm13, %xmm1
mulsd LCPI19_25(%rip), %xmm1
subsd %xmm1, %xmm7
movapd -1776(%rbp), %xmm4 ## 16-byte Reload
movsd LCPI19_50(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm4
movsd -2112(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -10176(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm1
movapd %xmm4, -2448(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm4
movapd -4000(%rbp), %xmm1 ## 16-byte Reload
mulsd -2160(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm4
movapd -2064(%rbp), %xmm1 ## 16-byte Reload
movsd -9792(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd %xmm11, %xmm1
addsd %xmm4, %xmm1
movapd -432(%rbp), %xmm3 ## 16-byte Reload
mulsd -2176(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm3, %xmm1
movapd -2608(%rbp), %xmm3 ## 16-byte Reload
movsd -3496(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
addsd %xmm1, %xmm3
movapd -624(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm3, -16016(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm7, %xmm1
movsd -2512(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm5, %xmm3
mulsd -96(%rbp), %xmm10 ## 16-byte Folded Reload
movsd %xmm3, -7016(%rbp) ## 8-byte Spill
subsd %xmm3, %xmm10
movapd %xmm10, %xmm4
mulsd LCPI19_103(%rip), %xmm4
addsd %xmm1, %xmm4
movsd -320(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movsd LCPI19_61(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm3
addsd %xmm0, %xmm11
movsd %xmm11, -9760(%rbp) ## 8-byte Spill
movapd -48(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm11, %xmm1
subsd %xmm1, %xmm3
movapd %xmm3, %xmm1
mulsd LCPI19_25(%rip), %xmm1
subsd %xmm1, %xmm4
movapd -11264(%rbp), %xmm1 ## 16-byte Reload
mulsd -72(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm4, %xmm1
movapd -1824(%rbp), %xmm4 ## 16-byte Reload
mulsd -1040(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm1
movsd -336(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -2304(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm1
movapd %xmm9, %xmm11
addsd %xmm2, %xmm11
movapd %xmm11, -9984(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm4
mulsd LCPI19_108(%rip), %xmm4
mulsd LCPI19_21(%rip), %xmm11
addsd %xmm4, %xmm11
movapd %xmm13, %xmm4
movsd LCPI19_24(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm4
movapd %xmm3, %xmm7
mulsd %xmm5, %xmm7
addsd %xmm4, %xmm7
movapd -1888(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm8, %xmm5
mulsd %xmm8, %xmm4
movapd -1024(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm8
mulsd -11776(%rbp), %xmm8 ## 16-byte Folded Reload
addsd %xmm4, %xmm8
mulsd LCPI19_14(%rip), %xmm15
addsd %xmm8, %xmm15
movapd -1584(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm12, %xmm4
addsd %xmm15, %xmm4
movapd -2080(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm6
addsd %xmm4, %xmm6
movapd %xmm10, %xmm8
movsd LCPI19_47(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm8
addsd %xmm6, %xmm8
movapd -496(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm8, %xmm4
addsd %xmm7, %xmm4
mulsd -896(%rbp), %xmm5 ## 16-byte Folded Reload
mulsd -11760(%rbp), %xmm9 ## 16-byte Folded Reload
addsd %xmm5, %xmm9
mulsd -1904(%rbp), %xmm12 ## 16-byte Folded Reload
addsd %xmm9, %xmm12
mulsd %xmm15, %xmm14
addsd %xmm12, %xmm14
movsd LCPI19_48(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm10
addsd %xmm14, %xmm10
movapd -752(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm6
mulsd %xmm10, %xmm6
addsd %xmm4, %xmm6
movsd -2032(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -1040(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm6
movsd -920(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -2304(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm0
subsd %xmm0, %xmm6
movsd LCPI19_75(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -320(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm15, %xmm0
movsd %xmm0, -1056(%rbp) ## 8-byte Spill
mulsd LCPI19_73(%rip), %xmm0
addsd %xmm6, %xmm0
mulsd LCPI19_98(%rip), %xmm1
movapd -9984(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm4
movsd LCPI19_22(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm4
addsd %xmm1, %xmm4
addsd %xmm0, %xmm4
movsd LCPI19_13(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
addsd %xmm11, %xmm0
movsd LCPI19_23(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm13
mulsd %xmm1, %xmm3
addsd %xmm13, %xmm3
movapd %xmm12, %xmm1
movapd %xmm8, -13616(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm1
addsd %xmm3, %xmm1
movapd -496(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm10, -10176(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm3
subsd %xmm3, %xmm1
movapd %xmm5, %xmm3
mulsd -1808(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm3, %xmm1
movsd -1984(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm14, %xmm3
subsd %xmm3, %xmm1
mulsd LCPI19_64(%rip), %xmm2
addsd %xmm1, %xmm2
movsd -1056(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd LCPI19_72(%rip), %xmm5
addsd %xmm2, %xmm5
movsd LCPI19_124(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm15, %xmm3
movsd -9760(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
addsd -9776(%rbp), %xmm12 ## 8-byte Folded Reload
movapd -48(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
subsd %xmm1, %xmm3
addsd %xmm5, %xmm0
movapd %xmm3, %xmm2
mulsd LCPI19_21(%rip), %xmm2
addsd %xmm0, %xmm2
mulsd %xmm6, %xmm5
addsd %xmm4, %xmm5
mulsd %xmm7, %xmm3
addsd %xmm5, %xmm3
movapd -1616(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm4
mulsd %xmm2, %xmm4
movapd -1088(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm4
movapd %xmm9, %xmm6
movapd %xmm9, %xmm0
movsd LCPI19_111(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm0
movapd -848(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm15
mulsd %xmm4, %xmm15
subsd %xmm0, %xmm15
movapd %xmm9, %xmm0
movapd %xmm9, %xmm7
movsd LCPI19_115(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
movapd %xmm3, -16032(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
subsd %xmm0, %xmm1
movapd %xmm5, %xmm3
movapd %xmm2, -15712(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm3
addsd %xmm1, %xmm3
movsd -960(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd LCPI19_123(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
movsd -1296(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
movsd %xmm1, -1040(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm3
movapd %xmm2, %xmm0
mulsd LCPI19_83(%rip), %xmm0
mulsd LCPI19_81(%rip), %xmm0
movapd %xmm0, -11552(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm3
movapd -2144(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm15
movsd -976(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movsd LCPI19_127(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm5, %xmm2
movsd -560(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm2
movapd %xmm2, %xmm0
mulsd %xmm8, %xmm0
subsd %xmm0, %xmm15
movapd %xmm9, %xmm0
movsd LCPI19_30(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
movapd %xmm4, -15632(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
subsd %xmm0, %xmm1
movapd %xmm3, -15648(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm14
addsd %xmm1, %xmm14
mulsd %xmm6, %xmm2
subsd %xmm2, %xmm14
movsd LCPI19_8(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -128(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm0
movapd %xmm10, %xmm9
movsd %xmm0, -9600(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm9
movsd LCPI19_6(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm9, %xmm11
movsd LCPI19_126(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm10, %xmm8
movapd %xmm8, %xmm7
addsd -13904(%rbp), %xmm7 ## 16-byte Folded Reload
addsd -13888(%rbp), %xmm8 ## 16-byte Folded Reload
movsd -1280(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_90(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movsd LCPI19_88(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
movapd %xmm0, -9056(%rbp) ## 16-byte Spill
movapd -9072(%rbp), %xmm4 ## 16-byte Reload
addsd %xmm0, %xmm4
movapd %xmm4, -9072(%rbp) ## 16-byte Spill
movapd %xmm5, %xmm0
mulsd %xmm1, %xmm0
mulsd %xmm3, %xmm0
movapd %xmm0, -9040(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm14
movsd LCPI19_5(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm9, %xmm1
movsd LCPI19_128(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm10, %xmm0
movapd %xmm10, %xmm6
movsd %xmm0, -9592(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm6
mulsd LCPI19_16(%rip), %xmm6
testq %rax, %rax
movsd %xmm8, -12440(%rbp) ## 8-byte Spill
movsd %xmm7, -12432(%rbp) ## 8-byte Spill
movsd %xmm6, -1056(%rbp) ## 8-byte Spill
movsd %xmm11, -432(%rbp) ## 8-byte Spill
movsd %xmm1, -336(%rbp) ## 8-byte Spill
je LBB19_48
## %bb.47:
movapd -2880(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
movapd %xmm3, %xmm2
mulsd -10928(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm9, %xmm1
movsd LCPI19_4(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
addsd %xmm11, %xmm1
movapd -2784(%rbp), %xmm4 ## 16-byte Reload
mulsd -10912(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm1, %xmm4
movapd %xmm10, %xmm1
mulsd %xmm7, %xmm1
movsd %xmm1, -2768(%rbp) ## 8-byte Spill
movsd LCPI19_29(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm4
subsd %xmm4, %xmm0
movapd -2528(%rbp), %xmm1 ## 16-byte Reload
mulsd -10864(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movapd %xmm10, %xmm1
mulsd %xmm8, %xmm1
movsd %xmm1, -1984(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm1
addsd %xmm0, %xmm1
movapd -3184(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm5
mulsd -10848(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm1, %xmm5
movsd LCPI19_8(%rip), %xmm7 ## xmm7 = mem[0],zero
movsd -280(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm7
movsd LCPI19_5(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
movapd %xmm7, %xmm1
mulsd LCPI19_118(%rip), %xmm1
subsd %xmm1, %xmm0
movsd LCPI19_119(%rip), %xmm4 ## xmm4 = mem[0],zero
movapd -10016(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm0
movapd %xmm2, %xmm1
mulsd -10832(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -2784(%rbp), %xmm0 ## 16-byte Reload
mulsd -9072(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movsd LCPI19_126(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm8, %xmm0
movapd %xmm10, %xmm3
mulsd -11120(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm0, %xmm6
subsd %xmm3, %xmm6
movapd %xmm6, %xmm3
mulsd %xmm4, %xmm3
subsd %xmm3, %xmm1
movapd -9984(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm4, %xmm3
subsd %xmm3, %xmm1
mulsd %xmm15, %xmm13
addsd %xmm1, %xmm13
movapd -2528(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm13
mulsd %xmm12, %xmm10
subsd %xmm10, %xmm0
movapd %xmm0, %xmm1
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm13
movsd LCPI19_120(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm8, %xmm1
mulsd %xmm4, %xmm1
addsd %xmm13, %xmm1
addsd %xmm5, %xmm5
addsd %xmm1, %xmm1
addsd %xmm5, %xmm1
movsd LCPI19_6(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm7, %xmm3
mulsd LCPI19_4(%rip), %xmm7
addsd %xmm3, %xmm7
movapd -10016(%rbp), %xmm3 ## 16-byte Reload
movsd LCPI19_29(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm3
subsd %xmm3, %xmm7
movapd %xmm12, %xmm13
movapd -2784(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm3
mulsd -10832(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm7, %xmm3
movapd -2880(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm5
mulsd -9072(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm3, %xmm5
mulsd %xmm4, %xmm6
subsd %xmm6, %xmm5
movapd -9984(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm4, %xmm3
movapd %xmm4, %xmm6
subsd %xmm3, %xmm5
movapd %xmm2, %xmm4
movapd %xmm2, %xmm3
mulsd %xmm15, %xmm3
addsd %xmm5, %xmm3
movapd -3184(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm5
mulsd %xmm14, %xmm5
addsd %xmm3, %xmm5
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm5
mulsd LCPI19_118(%rip), %xmm9
movsd -336(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd %xmm9, %xmm0
movapd %xmm8, %xmm2
movsd -1056(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -10912(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movsd -2768(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_119(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm2
movapd %xmm12, %xmm0
movapd %xmm13, %xmm12
mulsd -10928(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm2, %xmm0
mulsd -10864(%rbp), %xmm10 ## 16-byte Folded Reload
addsd %xmm0, %xmm10
movsd -1984(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm10
movapd %xmm4, %xmm0
mulsd -10848(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm10, %xmm0
movsd -1536(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm6, %xmm0
addsd %xmm5, %xmm0
movsd -1072(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm2
movsd -2272(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
mulsd -880(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
mulsd %xmm4, %xmm2
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
mulsd %xmm3, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, 8(%rax)
LBB19_48:
movapd %xmm12, -13824(%rbp) ## 16-byte Spill
movapd %xmm14, -15488(%rbp) ## 16-byte Spill
movapd %xmm15, -15504(%rbp) ## 16-byte Spill
movsd -5728(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_1(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
mulsd -5760(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_37(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm0
movsd -456(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -1704(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm4, -7272(%rbp) ## 8-byte Spill
movsd -1408(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm4, %xmm2
movsd LCPI19_43(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm2
addsd %xmm0, %xmm2
movsd -4768(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm3, %xmm0
movapd %xmm3, %xmm4
mulsd -3760(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm5, %xmm0
movapd %xmm5, %xmm7
movsd -1384(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -4816(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -7280(%rbp) ## 8-byte Spill
movsd -1152(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm1, %xmm3
mulsd %xmm6, %xmm3
movapd %xmm6, %xmm5
addsd %xmm0, %xmm3
movapd %xmm3, %xmm11
movsd -4656(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
mulsd -3312(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd %xmm7, %xmm0
movsd -1168(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1656(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -4456(%rbp) ## 8-byte Spill
movsd -3856(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm1, %xmm14
mulsd %xmm6, %xmm14
addsd %xmm0, %xmm14
movapd -272(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm0
movsd LCPI19_61(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm0
movapd %xmm9, %xmm12
movapd -2816(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm0, %xmm3
movapd %xmm0, %xmm13
movapd %xmm0, -11728(%rbp) ## 16-byte Spill
subsd -2240(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -592(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm7
mulsd %xmm2, %xmm7
movapd -1856(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm7, %xmm1
movapd -112(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
movapd %xmm5, %xmm9
mulsd %xmm7, %xmm0
mulsd %xmm2, %xmm10
movsd %xmm0, -12904(%rbp) ## 8-byte Spill
movapd %xmm0, %xmm5
subsd %xmm10, %xmm5
movapd %xmm5, -11744(%rbp) ## 16-byte Spill
movapd -1440(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd %xmm5, %xmm0
movapd %xmm1, -16960(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
movapd %xmm4, %xmm8
movsd %xmm11, -4448(%rbp) ## 8-byte Spill
mulsd %xmm11, %xmm8
movapd -2400(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm8, %xmm1
movapd %xmm1, -16944(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
movapd -1840(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm2, %xmm1
movsd %xmm1, -7120(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm3
movapd -2624(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm11, %xmm1
movsd %xmm1, -12896(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm3
movapd -688(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm3, -16128(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
movsd %xmm2, -3936(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm4
addsd %xmm11, %xmm4
movapd -192(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
movapd %xmm0, %xmm11
mulsd %xmm4, %xmm2
movsd -480(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd %xmm15, %xmm5
movsd LCPI19_50(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
movapd %xmm2, -17008(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm5
movapd %xmm5, %xmm0
movsd LCPI19_25(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
movapd -400(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm13, %xmm1
movsd -7128(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd %xmm1, %xmm2
movapd %xmm2, %xmm1
movsd LCPI19_103(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm0
movsd -1552(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm12, %xmm13
movapd %xmm13, -11248(%rbp) ## 16-byte Spill
mulsd -2336(%rbp), %xmm13 ## 8-byte Folded Reload
addsd %xmm0, %xmm13
movapd %xmm4, -6400(%rbp) ## 16-byte Spill
addsd %xmm14, %xmm4
movapd %xmm4, -10208(%rbp) ## 16-byte Spill
movapd %xmm11, %xmm3
movapd %xmm11, %xmm12
movsd %xmm14, -5288(%rbp) ## 8-byte Spill
mulsd %xmm14, %xmm12
movapd -2544(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm12, %xmm0
movsd %xmm0, -12888(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm13
movapd %xmm9, %xmm1
mulsd %xmm12, %xmm1
movsd %xmm1, -4000(%rbp) ## 8-byte Spill
movapd -2576(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movsd %xmm0, -12880(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm13
movapd %xmm13, %xmm0
movsd LCPI19_15(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm11, %xmm1
mulsd %xmm4, %xmm1
mulsd LCPI19_129(%rip), %xmm15
movapd %xmm1, -16992(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm15
movapd %xmm15, %xmm14
movsd LCPI19_22(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm14
addsd %xmm0, %xmm14
movapd -1920(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm7, %xmm1
movapd -608(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm11
mulsd %xmm7, %xmm11
movapd -912(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm10, %xmm0
movapd %xmm11, -16912(%rbp) ## 16-byte Spill
movapd %xmm0, -10240(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm11
movapd %xmm6, %xmm0
mulsd %xmm11, %xmm0
movapd %xmm11, -11680(%rbp) ## 16-byte Spill
movsd %xmm1, -12872(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm0
movapd -2432(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm8, -12256(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm1
movapd %xmm1, -16896(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
movapd %xmm2, %xmm4
movsd LCPI19_48(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm4
addsd %xmm0, %xmm4
movapd %xmm5, %xmm1
movsd LCPI19_24(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
movapd -512(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm4, %xmm0
addsd %xmm1, %xmm0
movapd %xmm9, %xmm1
mulsd -11744(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm3, %xmm6
mulsd %xmm11, %xmm6
addsd %xmm1, %xmm6
movapd -1600(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm7, %xmm3
movapd -720(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm7
movapd -1504(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm10, -13984(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm1
movapd %xmm7, -16976(%rbp) ## 16-byte Spill
movapd %xmm1, -11792(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm7
movapd %xmm7, -11664(%rbp) ## 16-byte Spill
movapd %xmm9, %xmm1
mulsd %xmm7, %xmm1
addsd %xmm6, %xmm1
movapd -1440(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm7, %xmm6
movsd %xmm3, -12864(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm6
movsd LCPI19_14(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
addsd %xmm6, %xmm1
movapd -2128(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm8, %xmm6
movapd %xmm6, -16880(%rbp) ## 16-byte Spill
addsd %xmm6, %xmm1
movsd LCPI19_47(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm2
addsd %xmm1, %xmm2
movapd -448(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movapd -2912(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm12, %xmm0
movapd %xmm0, -16864(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm1
movapd -3264(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm6
mulsd %xmm12, %xmm6
movapd -2576(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm6, %xmm0
movapd %xmm6, %xmm9
movapd %xmm6, -7616(%rbp) ## 16-byte Spill
movsd %xmm0, -12856(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm1
movsd -480(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_74(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm0
movapd %xmm0, %xmm7
movsd LCPI19_73(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm7
addsd %xmm1, %xmm7
movsd LCPI19_23(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm5
movapd %xmm3, %xmm1
movapd %xmm4, -15952(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm5
movapd -512(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm2, -15968(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
addsd %xmm5, %xmm1
movapd -112(%rbp), %xmm5 ## 16-byte Reload
mulsd -4000(%rbp), %xmm5 ## 8-byte Folded Reload
mulsd %xmm9, %xmm8
addsd %xmm5, %xmm8
movapd -3248(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm12, %xmm11
mulsd %xmm12, %xmm3
movapd -2560(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm11
movapd %xmm11, -7632(%rbp) ## 16-byte Spill
movapd %xmm5, %xmm2
mulsd %xmm11, %xmm2
addsd %xmm8, %xmm2
movsd %xmm3, -12824(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm1
movapd -2576(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm11, %xmm12
movsd %xmm12, -12816(%rbp) ## 8-byte Spill
addsd %xmm12, %xmm1
movsd LCPI19_64(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm2
movsd %xmm2, -12832(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm1
movsd LCPI19_72(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm0
addsd %xmm1, %xmm0
movsd LCPI19_11(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm13
movsd LCPI19_21(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm15
addsd %xmm13, %xmm15
addsd %xmm7, %xmm14
movsd LCPI19_13(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm7
addsd %xmm15, %xmm7
addsd %xmm0, %xmm7
movapd %xmm0, %xmm1
mulsd %xmm2, %xmm1
addsd %xmm14, %xmm1
movsd -1200(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -10208(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm0
movsd %xmm0, -12840(%rbp) ## 8-byte Spill
movsd -816(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd %xmm0, %xmm2
mulsd LCPI19_20(%rip), %xmm2
movapd -1312(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movapd -1328(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm2
movapd -11648(%rbp), %xmm3 ## 16-byte Reload
addsd %xmm2, %xmm3
movapd %xmm7, -15680(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm4
movapd %xmm5, %xmm0
movapd %xmm1, -15664(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm4
movapd -1936(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm6
mulsd %xmm4, %xmm0
movsd -704(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm14, %xmm2
movsd -1280(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd LCPI19_125(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
movsd %xmm2, -12848(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm1
movapd %xmm1, %xmm2
movsd LCPI19_30(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm2
subsd %xmm2, %xmm0
movapd -1216(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm2
mulsd %xmm3, %xmm2
subsd %xmm0, %xmm2
movapd -9056(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm2, %xmm0
movapd %xmm0, -9056(%rbp) ## 16-byte Spill
movapd %xmm5, %xmm0
movapd %xmm4, -15600(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm0
movsd LCPI19_111(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movapd %xmm6, %xmm1
movapd %xmm3, -11648(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -10800(%rbp) ## 16-byte Spill
movsd -3360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_1(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
mulsd -4736(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_37(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm0
movsd -2288(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3776(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -7296(%rbp) ## 8-byte Spill
movsd -936(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm1, %xmm2
movsd LCPI19_43(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm2
addsd %xmm0, %xmm2
movsd -1664(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm3, %xmm0
mulsd -1680(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm14, %xmm0
movsd -1792(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3808(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -7304(%rbp) ## 8-byte Spill
movsd -1144(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm1, %xmm3
mulsd %xmm12, %xmm3
addsd %xmm0, %xmm3
movapd %xmm3, %xmm10
movapd -96(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm0
movsd LCPI19_61(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm0
movapd -1568(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm0, %xmm3
movapd %xmm0, %xmm8
movapd %xmm0, -11632(%rbp) ## 16-byte Spill
subsd -2448(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -144(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm5
mulsd %xmm2, %xmm5
movapd -2384(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm5, %xmm1
movapd -64(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
movapd %xmm4, %xmm6
mulsd %xmm5, %xmm0
mulsd %xmm2, %xmm13
movsd %xmm0, -13088(%rbp) ## 8-byte Spill
movapd %xmm0, %xmm4
subsd %xmm13, %xmm4
movsd %xmm4, -7080(%rbp) ## 8-byte Spill
movapd -2096(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm4, %xmm0
movapd %xmm1, -17152(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
movsd %xmm10, -4440(%rbp) ## 8-byte Spill
mulsd %xmm10, %xmm7
movapd %xmm7, -7696(%rbp) ## 16-byte Spill
movapd -2368(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm7, %xmm1
movsd %xmm1, -13096(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm0
movapd -2064(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm2, %xmm1
movsd %xmm1, -7104(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm3
movapd -2608(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm10, %xmm1
movsd %xmm1, -13080(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm3
movapd -624(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm3, -15856(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
movsd %xmm2, -2984(%rbp) ## 8-byte Spill
addsd %xmm10, %xmm2
movapd -48(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
movapd %xmm3, %xmm7
mulsd %xmm2, %xmm0
movsd -320(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm11
movapd %xmm3, %xmm10
mulsd LCPI19_50(%rip), %xmm11
movapd %xmm0, -17200(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm11
movapd %xmm11, %xmm0
mulsd LCPI19_25(%rip), %xmm0
subsd %xmm0, %xmm1
movapd -256(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm8, %xmm0
movsd -7016(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
subsd %xmm0, %xmm9
movapd %xmm9, %xmm0
mulsd LCPI19_103(%rip), %xmm0
addsd %xmm1, %xmm0
movapd %xmm15, %xmm3
mulsd -992(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, -11312(%rbp) ## 16-byte Spill
mulsd -72(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm0, %xmm3
movsd -2832(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_1(%rip), %xmm0
mulsd -4624(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd %xmm14, %xmm0
movsd -2464(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1648(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -2448(%rbp) ## 8-byte Spill
movsd -1136(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm1, %xmm4
mulsd %xmm12, %xmm4
addsd %xmm0, %xmm4
movapd %xmm7, %xmm14
mulsd %xmm4, %xmm14
movapd -1824(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm14, %xmm0
movsd %xmm0, -13072(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm3
movapd %xmm6, %xmm1
mulsd %xmm14, %xmm1
movsd %xmm1, -4016(%rbp) ## 8-byte Spill
movapd -1008(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd %xmm0, -17136(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm3
movapd %xmm3, %xmm0
movsd LCPI19_108(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm2, -6384(%rbp) ## 16-byte Spill
movsd %xmm4, -5280(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm2
movapd %xmm2, -10192(%rbp) ## 16-byte Spill
movapd %xmm7, %xmm1
mulsd %xmm2, %xmm1
movapd %xmm10, %xmm4
mulsd LCPI19_129(%rip), %xmm4
movapd %xmm1, -17168(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm4
movapd %xmm4, %xmm15
mulsd LCPI19_21(%rip), %xmm15
addsd %xmm0, %xmm15
movapd -896(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm5, %xmm1
movapd -176(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm10
mulsd %xmm5, %xmm10
movapd -1488(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm13, %xmm0
movapd %xmm10, -17120(%rbp) ## 16-byte Spill
movsd %xmm0, -4464(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm10
movapd -2096(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm0
mulsd %xmm10, %xmm0
movapd %xmm10, -11584(%rbp) ## 16-byte Spill
movsd %xmm1, -13064(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm0
movapd -1904(%rbp), %xmm1 ## 16-byte Reload
movapd -7696(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm1
movapd %xmm1, -17104(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
movapd %xmm9, %xmm8
movsd LCPI19_48(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm8
addsd %xmm0, %xmm8
movapd %xmm11, %xmm1
mulsd LCPI19_24(%rip), %xmm1
movapd -752(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm8, %xmm2
addsd %xmm1, %xmm2
movapd -64(%rbp), %xmm1 ## 16-byte Reload
mulsd -7080(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd %xmm10, %xmm6
addsd %xmm1, %xmm6
movapd -1888(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm5, %xmm0
movapd -736(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm5
movapd -864(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm13, -10048(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm1
movapd %xmm5, -17184(%rbp) ## 16-byte Spill
movsd %xmm1, -7088(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm5
movapd %xmm5, -11600(%rbp) ## 16-byte Spill
movapd %xmm10, %xmm1
mulsd %xmm5, %xmm1
addsd %xmm6, %xmm1
mulsd %xmm5, %xmm12
movsd %xmm0, -13048(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm12
mulsd LCPI19_14(%rip), %xmm1
addsd %xmm12, %xmm1
movapd -1584(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm7, %xmm5
movapd %xmm5, -17088(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm1
movsd LCPI19_47(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm9
addsd %xmm1, %xmm9
movapd -496(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
movapd %xmm0, %xmm7
mulsd %xmm9, %xmm1
addsd %xmm2, %xmm1
movsd -2032(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm14, %xmm0
movsd %xmm0, -13056(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm1
movapd -3280(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm2
mulsd %xmm14, %xmm2
movapd -1008(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
movapd %xmm2, %xmm5
movapd %xmm2, -7680(%rbp) ## 16-byte Spill
movapd %xmm0, -17072(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm1
movsd -320(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd LCPI19_74(%rip), %xmm6
movapd %xmm6, %xmm0
mulsd LCPI19_73(%rip), %xmm0
addsd %xmm1, %xmm0
movsd LCPI19_98(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm3
mulsd LCPI19_22(%rip), %xmm4
addsd %xmm3, %xmm4
addsd %xmm0, %xmm4
movapd %xmm0, %xmm3
movsd LCPI19_13(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm3
addsd %xmm15, %xmm3
mulsd LCPI19_23(%rip), %xmm11
movapd %xmm7, %xmm0
movapd %xmm8, -10880(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm0
subsd %xmm0, %xmm11
movapd -752(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm9, -15728(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm0
addsd %xmm11, %xmm0
movapd -64(%rbp), %xmm1 ## 16-byte Reload
mulsd -4016(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm10, %xmm2
mulsd %xmm5, %xmm2
addsd %xmm1, %xmm2
movapd -1808(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm14, %xmm13
mulsd %xmm14, %xmm5
movapd -2592(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm13
movapd %xmm13, -7712(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm1
addsd %xmm2, %xmm1
movsd %xmm5, -13016(%rbp) ## 8-byte Spill
addsd %xmm5, %xmm0
movapd -1008(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm13, %xmm14
movapd %xmm14, -17040(%rbp) ## 16-byte Spill
addsd %xmm14, %xmm0
mulsd LCPI19_64(%rip), %xmm1
movsd %xmm1, -13024(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm0
mulsd LCPI19_72(%rip), %xmm6
addsd %xmm0, %xmm6
addsd %xmm6, %xmm3
mulsd %xmm12, %xmm6
addsd %xmm4, %xmm6
movapd -1616(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm4
movapd %xmm0, %xmm7
mulsd %xmm3, %xmm4
movapd -1088(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm9
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm4
movapd -848(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm8
mulsd %xmm4, %xmm0
movsd -560(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd -10192(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm2
movsd -976(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd LCPI19_125(%rip), %xmm1
movsd %xmm2, -13040(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm1
movapd %xmm1, %xmm2
mulsd LCPI19_111(%rip), %xmm2
subsd %xmm2, %xmm0
movsd -1296(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm5, %xmm2
movsd %xmm2, -13032(%rbp) ## 8-byte Spill
movsd -1040(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
subsd %xmm2, %xmm5
mulsd LCPI19_20(%rip), %xmm5
movapd %xmm7, %xmm2
movapd %xmm6, -15584(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm2
subsd %xmm2, %xmm5
movapd %xmm9, %xmm2
movapd %xmm3, -15552(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm5
movapd -11552(%rbp), %xmm2 ## 16-byte Reload
addsd %xmm5, %xmm2
movapd -2144(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm3
mulsd %xmm2, %xmm3
addsd %xmm0, %xmm3
movapd %xmm3, -10784(%rbp) ## 16-byte Spill
movapd %xmm5, %xmm0
movapd %xmm4, -15456(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm0
mulsd LCPI19_30(%rip), %xmm1
subsd %xmm1, %xmm0
movapd %xmm8, %xmm1
movapd %xmm2, -11552(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
subsd %xmm0, %xmm1
movapd -9040(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm1, %xmm0
movapd %xmm0, -9040(%rbp) ## 16-byte Spill
movapd -528(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
movapd -11744(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm0
movapd -1248(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd -11680(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -1264(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm2
mulsd -11664(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movsd -456(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd -5072(%rbp), %xmm10 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm10
mulsd -4112(%rbp), %xmm10 ## 16-byte Folded Reload
movsd LCPI19_110(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm10
movapd %xmm0, %xmm15
movsd -7272(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5104(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_41(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm0
mulsd -4752(%rbp), %xmm0 ## 8-byte Folded Reload
divsd -4832(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd -2896(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm10, -12728(%rbp) ## 8-byte Spill
movsd %xmm0, -7272(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm10
addsd %xmm2, %xmm10
movapd %xmm2, %xmm0
mulsd %xmm14, %xmm0
subsd %xmm0, %xmm10
movapd %xmm10, %xmm0
subsd %xmm2, %xmm0
movapd -112(%rbp), %xmm6 ## 16-byte Reload
movapd -12256(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm6
movapd -272(%rbp), %xmm7 ## 16-byte Reload
mulsd -4448(%rbp), %xmm7 ## 8-byte Folded Reload
movsd %xmm6, -12736(%rbp) ## 8-byte Spill
subsd %xmm7, %xmm6
movapd %xmm3, %xmm1
mulsd %xmm6, %xmm1
movapd -608(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm11, %xmm12
movapd -912(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm7, %xmm2
movapd %xmm12, -16848(%rbp) ## 16-byte Spill
movapd %xmm2, -10160(%rbp) ## 16-byte Spill
subsd %xmm2, %xmm12
mulsd %xmm12, %xmm4
addsd %xmm1, %xmm4
mulsd -720(%rbp), %xmm11 ## 16-byte Folded Reload
movapd -1504(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm7, -13936(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm1
movapd %xmm11, -12256(%rbp) ## 16-byte Spill
movapd %xmm1, -10144(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm11
movapd %xmm5, %xmm1
mulsd %xmm11, %xmm1
addsd %xmm4, %xmm1
subsd %xmm1, %xmm0
movsd -1384(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -5088(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd %xmm14, %xmm4
mulsd -3696(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd %xmm15, %xmm4
movsd -7280(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -1712(%rbp), %xmm5 ## 8-byte Folded Reload
movsd LCPI19_42(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm5
mulsd -4096(%rbp), %xmm5 ## 8-byte Folded Reload
divsd -4848(%rbp), %xmm5 ## 8-byte Folded Reload
mulsd -1400(%rbp), %xmm5 ## 8-byte Folded Reload
movsd %xmm4, -12720(%rbp) ## 8-byte Spill
movsd %xmm5, -7280(%rbp) ## 8-byte Spill
subsd %xmm5, %xmm4
addsd %xmm1, %xmm4
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm4
addsd %xmm0, %xmm4
movapd %xmm3, %xmm0
mulsd -4000(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -2048(%rbp), %xmm1 ## 16-byte Reload
mulsd -7616(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -1104(%rbp), %xmm2 ## 16-byte Reload
mulsd -7632(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movapd -1184(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm8, %xmm1
mulsd %xmm8, %xmm0
movsd -1872(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm15, %xmm6
addsd %xmm0, %xmm6
movapd %xmm4, %xmm0
movsd LCPI19_25(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm0
addsd %xmm6, %xmm0
movapd -592(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm6
movsd LCPI19_50(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm6
movsd -1552(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm6, %xmm3
movapd -2640(%rbp), %xmm1 ## 16-byte Reload
movapd -11728(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm1
subsd %xmm1, %xmm3
movapd -688(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm5
movapd %xmm3, -15984(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm5
addsd %xmm0, %xmm5
mulsd -272(%rbp), %xmm8 ## 16-byte Folded Reload
movapd %xmm6, -13920(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm7
subsd %xmm7, %xmm8
movapd %xmm8, %xmm0
mulsd LCPI19_103(%rip), %xmm0
subsd %xmm0, %xmm5
movapd -192(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
mulsd %xmm13, %xmm0
movapd %xmm1, %xmm3
movapd %xmm0, -17296(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm3
movapd %xmm3, %xmm0
mulsd %xmm14, %xmm0
subsd %xmm0, %xmm5
movapd -11248(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm9, %xmm0
movapd %xmm0, -17280(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm5
movapd %xmm4, %xmm7
subsd %xmm2, %xmm7
movsd -1368(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1168(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd -3984(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd LCPI19_110(%rip), %xmm0
movsd -4456(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -4080(%rbp), %xmm6 ## 16-byte Folded Reload
movsd LCPI19_69(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm6
mulsd -3968(%rbp), %xmm6 ## 8-byte Folded Reload
divsd -648(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd -2496(%rbp), %xmm6 ## 8-byte Folded Reload
movsd %xmm0, -12704(%rbp) ## 8-byte Spill
movsd %xmm6, -4456(%rbp) ## 8-byte Spill
subsd %xmm6, %xmm0
addsd %xmm2, %xmm0
movsd %xmm2, -12712(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm2
movsd %xmm2, -12696(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm0
movapd %xmm7, %xmm13
addsd %xmm0, %xmm13
movsd %xmm13, -7000(%rbp) ## 8-byte Spill
movapd -2352(%rbp), %xmm14 ## 16-byte Reload
movsd -4000(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm14, %xmm1
movsd %xmm1, -4000(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm5
movapd %xmm5, %xmm6
mulsd LCPI19_11(%rip), %xmm6
movapd %xmm13, %xmm7
mulsd LCPI19_21(%rip), %xmm7
addsd %xmm6, %xmm7
movapd %xmm4, %xmm6
mulsd LCPI19_24(%rip), %xmm6
movapd %xmm3, %xmm1
movsd LCPI19_112(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
addsd %xmm6, %xmm1
movapd -1184(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm6
mulsd -11664(%rbp), %xmm6 ## 16-byte Folded Reload
mulsd LCPI19_14(%rip), %xmm10
addsd %xmm6, %xmm10
mulsd %xmm15, %xmm11
addsd %xmm10, %xmm11
movapd %xmm8, %xmm6
movsd LCPI19_47(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm6
addsd %xmm11, %xmm6
movapd -448(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm2
mulsd %xmm6, %xmm2
addsd %xmm1, %xmm2
movapd %xmm9, %xmm1
mulsd -11680(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd %xmm15, %xmm12
addsd %xmm1, %xmm12
movsd LCPI19_48(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm8
addsd %xmm12, %xmm8
movapd -512(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm1
mulsd %xmm8, %xmm1
addsd %xmm2, %xmm1
movapd -7616(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm14, %xmm2
movapd %xmm2, -7616(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm1
movapd -4608(%rbp), %xmm9 ## 16-byte Reload
addsd %xmm1, %xmm9
movapd %xmm9, %xmm2
movsd LCPI19_13(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm2
addsd %xmm7, %xmm2
movsd LCPI19_23(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm4
mulsd %xmm1, %xmm3
subsd %xmm3, %xmm4
movapd %xmm10, %xmm1
movapd %xmm6, -15808(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
addsd %xmm4, %xmm1
movapd %xmm8, -11728(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm11
subsd %xmm11, %xmm1
movapd -7632(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm14, %xmm3
movapd %xmm3, -7632(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm1
mulsd LCPI19_64(%rip), %xmm0
addsd %xmm1, %xmm0
movapd -1744(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm0, %xmm1
addsd %xmm1, %xmm2
movapd -192(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd LCPI19_129(%rip), %xmm0
movapd %xmm0, -17264(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm4
movapd %xmm4, %xmm3
movsd LCPI19_114(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
addsd %xmm2, %xmm3
mulsd LCPI19_15(%rip), %xmm5
movapd %xmm13, %xmm0
mulsd LCPI19_22(%rip), %xmm0
addsd %xmm5, %xmm0
addsd %xmm9, %xmm0
mulsd %xmm12, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_31(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
addsd %xmm1, %xmm4
movapd -1312(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm5
mulsd %xmm3, %xmm5
movapd -1328(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm5
movapd %xmm13, %xmm1
movsd LCPI19_111(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm1
movapd -1216(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm1, %xmm0
movapd %xmm13, %xmm1
mulsd LCPI19_20(%rip), %xmm1
movapd %xmm4, -15520(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
addsd %xmm1, %xmm2
movapd %xmm7, %xmm1
movapd %xmm3, -15536(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm2, %xmm1
movsd -1200(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
mulsd LCPI19_130(%rip), %xmm2
movapd %xmm4, %xmm3
movsd %xmm2, -13176(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm3
movsd LCPI19_115(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm3
addsd %xmm1, %xmm3
movapd %xmm4, %xmm2
movapd %xmm4, %xmm1
movsd LCPI19_83(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm2
movsd %xmm2, -13168(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm1
movsd LCPI19_116(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm3
movapd -1936(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm1
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm0
movsd -704(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm4
mulsd -9808(%rbp), %xmm4 ## 8-byte Folded Reload
movapd %xmm4, %xmm1
mulsd %xmm8, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -10752(%rbp) ## 16-byte Spill
movsd LCPI19_30(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm13
movapd %xmm7, %xmm1
movapd %xmm5, -15440(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
subsd %xmm13, %xmm1
movapd %xmm9, %xmm0
movapd %xmm3, -15424(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm2, %xmm4
addsd %xmm0, %xmm4
movapd %xmm6, %xmm0
movapd %xmm6, %xmm1
movsd LCPI19_90(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm1
movsd %xmm1, -13160(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
movsd LCPI19_117(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm4
movapd %xmm4, -10768(%rbp) ## 16-byte Spill
movsd -7296(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -3744(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd LCPI19_41(%rip), %xmm6
movapd -208(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
movsd -7080(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm0
movapd -1520(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd -11584(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -576(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm2
mulsd -11600(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movsd -2288(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd -4800(%rbp), %xmm10 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm10
mulsd -2864(%rbp), %xmm10 ## 16-byte Folded Reload
movsd LCPI19_110(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm10
movapd %xmm6, %xmm0
mulsd -3344(%rbp), %xmm0 ## 8-byte Folded Reload
divsd -3136(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd -1424(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm10, -12952(%rbp) ## 8-byte Spill
movsd %xmm0, -7296(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm10
addsd %xmm2, %xmm10
movapd %xmm2, %xmm0
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm13
subsd %xmm0, %xmm10
movapd %xmm10, %xmm0
subsd %xmm2, %xmm0
movapd -64(%rbp), %xmm6 ## 16-byte Reload
movapd -7696(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm6
movapd -96(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm7
mulsd -4440(%rbp), %xmm7 ## 8-byte Folded Reload
movsd %xmm6, -12960(%rbp) ## 8-byte Spill
subsd %xmm7, %xmm6
movapd %xmm3, %xmm1
mulsd %xmm6, %xmm1
movapd -176(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm15, %xmm8
movapd -1488(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm7, %xmm2
movapd %xmm8, -17024(%rbp) ## 16-byte Spill
movsd %xmm2, -7072(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm8
movapd %xmm8, -1040(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm4
addsd %xmm1, %xmm4
mulsd -736(%rbp), %xmm15 ## 16-byte Folded Reload
movapd -864(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm7, -10032(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm1
movapd %xmm15, -7696(%rbp) ## 16-byte Spill
movsd %xmm1, -7064(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm15
movapd %xmm5, %xmm1
mulsd %xmm15, %xmm1
addsd %xmm4, %xmm1
movsd -7304(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -3104(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd LCPI19_42(%rip), %xmm2
subsd %xmm1, %xmm0
movsd -1792(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -3728(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd %xmm13, %xmm4
mulsd -3648(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd %xmm11, %xmm4
mulsd -2848(%rbp), %xmm2 ## 8-byte Folded Reload
divsd -3824(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd -1392(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm4, -12944(%rbp) ## 8-byte Spill
movsd %xmm2, -7304(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm4
addsd %xmm1, %xmm4
mulsd %xmm13, %xmm1
subsd %xmm1, %xmm4
addsd %xmm0, %xmm4
movapd %xmm3, %xmm0
mulsd -4016(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -1344(%rbp), %xmm1 ## 16-byte Reload
mulsd -7680(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -2416(%rbp), %xmm2 ## 16-byte Reload
mulsd -7712(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movsd -2448(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4048(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd LCPI19_69(%rip), %xmm0
movsd %xmm0, -2448(%rbp) ## 8-byte Spill
movapd -1024(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm9, %xmm1
mulsd %xmm9, %xmm0
movapd -2080(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm13, %xmm6
addsd %xmm0, %xmm6
movapd %xmm4, %xmm0
mulsd LCPI19_25(%rip), %xmm0
subsd %xmm0, %xmm6
movapd -144(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm3
movsd LCPI19_50(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm3
movapd -992(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm3, %xmm1
movsd -2112(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -11632(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm0
subsd %xmm0, %xmm1
movapd -624(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm0
movapd %xmm1, -15696(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
addsd %xmm6, %xmm0
mulsd %xmm12, %xmm9
movapd %xmm5, %xmm1
movapd %xmm3, -13872(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm9
movapd %xmm9, %xmm7
mulsd LCPI19_103(%rip), %xmm7
addsd %xmm0, %xmm7
movapd -48(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm0
mulsd %xmm11, %xmm0
movapd %xmm12, %xmm8
movapd %xmm0, -17248(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm8
movapd %xmm8, %xmm1
mulsd LCPI19_25(%rip), %xmm1
addsd %xmm7, %xmm1
movapd -11312(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm14, %xmm5
movapd %xmm12, %xmm0
movapd %xmm5, -17232(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm1
movapd %xmm4, %xmm11
subsd %xmm2, %xmm11
movsd -1360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2464(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
mulsd -3072(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd LCPI19_110(%rip), %xmm0
movsd -2448(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -3952(%rbp), %xmm5 ## 8-byte Folded Reload
divsd -3712(%rbp), %xmm5 ## 8-byte Folded Reload
mulsd -2480(%rbp), %xmm5 ## 8-byte Folded Reload
movsd %xmm0, -12928(%rbp) ## 8-byte Spill
movsd %xmm5, -2448(%rbp) ## 8-byte Spill
subsd %xmm5, %xmm0
addsd %xmm2, %xmm0
movsd %xmm2, -12936(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm2
movsd %xmm2, -12920(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm0
addsd %xmm0, %xmm11
movapd -2304(%rbp), %xmm14 ## 16-byte Reload
movsd -4016(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm14, %xmm2
movsd %xmm2, -4016(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm1
movapd %xmm1, %xmm6
mulsd LCPI19_108(%rip), %xmm6
movapd %xmm11, %xmm7
mulsd LCPI19_21(%rip), %xmm7
addsd %xmm6, %xmm7
movapd %xmm4, %xmm6
mulsd LCPI19_24(%rip), %xmm6
movapd %xmm8, %xmm5
mulsd LCPI19_112(%rip), %xmm5
addsd %xmm6, %xmm5
movapd -1024(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm6
mulsd -11600(%rbp), %xmm6 ## 16-byte Folded Reload
mulsd LCPI19_14(%rip), %xmm10
addsd %xmm6, %xmm10
mulsd %xmm13, %xmm15
addsd %xmm10, %xmm15
movapd %xmm9, %xmm6
movsd LCPI19_47(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm6
addsd %xmm15, %xmm6
movapd -496(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm2
mulsd %xmm6, %xmm2
addsd %xmm5, %xmm2
movapd %xmm3, %xmm5
mulsd -11584(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -1040(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm13, %xmm3
addsd %xmm5, %xmm3
movsd LCPI19_48(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm9
addsd %xmm3, %xmm9
movapd -752(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm5
mulsd %xmm9, %xmm5
addsd %xmm2, %xmm5
movapd -7680(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm14, %xmm2
movapd %xmm2, -7680(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm5
movapd -3296(%rbp), %xmm10 ## 16-byte Reload
addsd %xmm5, %xmm10
movapd %xmm10, %xmm2
movsd LCPI19_13(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm2
addsd %xmm7, %xmm2
movsd LCPI19_23(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm4
mulsd %xmm5, %xmm8
subsd %xmm8, %xmm4
movapd %xmm6, -15616(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm3
addsd %xmm4, %xmm3
movapd %xmm15, %xmm4
movapd %xmm9, -11632(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm4
subsd %xmm4, %xmm3
movapd -7712(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm14, %xmm4
movsd -128(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movapd %xmm4, -7712(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm3
mulsd LCPI19_64(%rip), %xmm0
addsd %xmm3, %xmm0
movapd -4592(%rbp), %xmm5 ## 16-byte Reload
addsd %xmm0, %xmm5
addsd %xmm5, %xmm2
movapd %xmm12, %xmm0
mulsd LCPI19_129(%rip), %xmm0
movapd %xmm0, -17216(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm12
movapd %xmm12, %xmm3
mulsd LCPI19_114(%rip), %xmm3
addsd %xmm2, %xmm3
mulsd LCPI19_98(%rip), %xmm1
movapd %xmm11, %xmm0
mulsd LCPI19_22(%rip), %xmm0
addsd %xmm1, %xmm0
addsd %xmm10, %xmm0
mulsd %xmm13, %xmm5
addsd %xmm0, %xmm5
mulsd LCPI19_31(%rip), %xmm12
addsd %xmm5, %xmm12
movapd -1616(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm5
mulsd %xmm3, %xmm5
movapd -1088(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm5
movapd %xmm11, %xmm1
movsd LCPI19_111(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm1
movapd -848(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm1, %xmm0
movapd %xmm11, %xmm1
mulsd LCPI19_20(%rip), %xmm1
movapd %xmm12, -15376(%rbp) ## 16-byte Spill
mulsd %xmm12, %xmm2
addsd %xmm1, %xmm2
movapd %xmm7, %xmm1
movapd %xmm3, -15392(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm2, %xmm1
movsd -1296(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm3
mulsd LCPI19_130(%rip), %xmm3
movapd %xmm4, %xmm2
movsd %xmm3, -13136(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm2
mulsd LCPI19_115(%rip), %xmm2
addsd %xmm1, %xmm2
movsd LCPI19_83(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm4, %xmm3
movapd %xmm4, %xmm1
movsd %xmm3, -13472(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm1
mulsd LCPI19_116(%rip), %xmm1
subsd %xmm1, %xmm2
movapd -2144(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movsd -560(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm13
mulsd -9784(%rbp), %xmm13 ## 8-byte Folded Reload
movapd %xmm13, %xmm8
mulsd %xmm9, %xmm8
addsd %xmm0, %xmm8
movsd %xmm11, -9608(%rbp) ## 8-byte Spill
movsd LCPI19_30(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm11
movapd %xmm4, %xmm1
movapd %xmm5, -15312(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
subsd %xmm11, %xmm1
movapd %xmm6, %xmm0
movapd %xmm2, -15296(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm7, %xmm13
addsd %xmm0, %xmm13
movapd %xmm3, %xmm0
movsd LCPI19_90(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm3, %xmm1
movsd %xmm1, -13448(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
mulsd LCPI19_117(%rip), %xmm0
subsd %xmm0, %xmm13
movapd %xmm14, %xmm3
mulsd -10208(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm14, %xmm7
mulsd -10192(%rbp), %xmm7 ## 16-byte Folded Reload
movsd LCPI19_7(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm14, %xmm9
movsd LCPI19_131(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm14, %xmm0
testq %rax, %rax
movsd LCPI19_1(%rip), %xmm15 ## xmm15 = mem[0],zero
movapd %xmm8, -13536(%rbp) ## 16-byte Spill
movsd %xmm0, -9576(%rbp) ## 8-byte Spill
movsd %xmm9, -12416(%rbp) ## 8-byte Spill
movsd %xmm7, -12408(%rbp) ## 8-byte Spill
movsd %xmm3, -12392(%rbp) ## 8-byte Spill
je LBB19_50
## %bb.49:
movapd -2880(%rbp), %xmm0 ## 16-byte Reload
mulsd -9056(%rbp), %xmm0 ## 16-byte Folded Reload
movsd LCPI19_7(%rip), %xmm12 ## xmm12 = mem[0],zero
movsd -280(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm12
movapd %xmm1, %xmm4
movsd %xmm1, -280(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm1
movsd LCPI19_4(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
movsd LCPI19_6(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm12, %xmm2
addsd %xmm1, %xmm2
movapd -2784(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm1
mulsd -10800(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
movsd LCPI19_131(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm4, %xmm2
movapd %xmm2, %xmm10
addsd %xmm3, %xmm10
movsd LCPI19_17(%rip), %xmm6 ## xmm6 = mem[0],zero
movapd %xmm10, %xmm5
mulsd %xmm6, %xmm5
addsd %xmm1, %xmm5
subsd %xmm5, %xmm0
movapd -2528(%rbp), %xmm1 ## 16-byte Reload
mulsd -10784(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
addsd %xmm7, %xmm2
movsd LCPI19_29(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm2, %xmm5
addsd %xmm0, %xmm5
movapd -3184(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
movapd %xmm0, %xmm8
mulsd -9040(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm5, %xmm1
movapd %xmm14, %xmm7
mulsd %xmm9, %xmm7
movsd LCPI19_118(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
movsd -336(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm0, %xmm3
movsd LCPI19_16(%rip), %xmm11 ## xmm11 = mem[0],zero
movsd -7000(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm11, %xmm0
addsd %xmm3, %xmm0
movapd -2880(%rbp), %xmm5 ## 16-byte Reload
mulsd -10752(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm0, %xmm5
movapd %xmm15, %xmm0
mulsd -10768(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm5
movapd %xmm14, %xmm4
mulsd -9576(%rbp), %xmm4 ## 8-byte Folded Reload
movapd %xmm4, %xmm0
mulsd %xmm11, %xmm0
subsd %xmm0, %xmm5
movsd -9608(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd %xmm9, %xmm3
mulsd %xmm11, %xmm3
addsd %xmm5, %xmm3
movapd %xmm8, %xmm5
movapd -13536(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm5
addsd %xmm3, %xmm5
movapd -2528(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm3
mulsd %xmm13, %xmm3
subsd %xmm3, %xmm5
subsd %xmm0, %xmm5
movsd -1056(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
addsd %xmm5, %xmm14
addsd %xmm1, %xmm1
addsd %xmm14, %xmm14
addsd %xmm1, %xmm14
mulsd LCPI19_4(%rip), %xmm7
movsd -432(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm7, %xmm1
movsd -7000(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
addsd %xmm1, %xmm0
movapd %xmm15, %xmm1
mulsd -10752(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -2880(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
mulsd -10768(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
mulsd %xmm6, %xmm4
subsd %xmm4, %xmm0
mulsd %xmm9, %xmm6
addsd %xmm0, %xmm6
movapd %xmm11, %xmm0
mulsd %xmm8, %xmm0
addsd %xmm6, %xmm0
movapd -3184(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm1
mulsd %xmm13, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_2(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
mulsd LCPI19_5(%rip), %xmm12
subsd %xmm4, %xmm1
addsd %xmm0, %xmm12
movapd %xmm3, %xmm0
mulsd -10800(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm12, %xmm0
movsd LCPI19_16(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm10
addsd %xmm0, %xmm10
movapd %xmm15, %xmm0
mulsd -9056(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm10, %xmm0
movapd %xmm6, %xmm3
movsd LCPI19_1(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd -10784(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm0, %xmm3
mulsd %xmm12, %xmm2
addsd %xmm3, %xmm2
mulsd -9040(%rbp), %xmm11 ## 16-byte Folded Reload
addsd %xmm2, %xmm11
movsd LCPI19_128(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd -280(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd LCPI19_119(%rip), %xmm2
addsd %xmm11, %xmm2
movsd -1536(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm0
mulsd %xmm14, %xmm0
addsd %xmm1, %xmm2
movsd -1072(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movsd -2272(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm14
mulsd -880(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm14, %xmm2
mulsd %xmm3, %xmm0
mulsd %xmm15, %xmm0
mulsd %xmm1, %xmm2
mulsd %xmm15, %xmm2
subsd %xmm2, %xmm0
movsd %xmm0, 16(%rax)
LBB19_50:
movapd %xmm13, -15200(%rbp) ## 16-byte Spill
movapd -2928(%rbp), %xmm10 ## 16-byte Reload
subsd -216(%rbp), %xmm10 ## 8-byte Folded Reload
movsd %xmm10, -2768(%rbp) ## 8-byte Spill
addsd -232(%rbp), %xmm10 ## 8-byte Folded Reload
movapd %xmm10, %xmm9
movsd %xmm10, -1984(%rbp) ## 8-byte Spill
addsd -376(%rbp), %xmm9 ## 8-byte Folded Reload
movsd -360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -304(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm12
addsd -160(%rbp), %xmm12 ## 8-byte Folded Reload
movapd %xmm12, %xmm8
addsd -296(%rbp), %xmm8 ## 8-byte Folded Reload
movapd %xmm8, %xmm3
movsd %xmm8, -1056(%rbp) ## 8-byte Spill
addsd -832(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, %xmm1
divsd -3408(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -336(%rbp) ## 8-byte Spill
movapd -4912(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm1, %xmm5
movapd %xmm5, %xmm2
mulsd -3664(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm9, %xmm2
addsd -2160(%rbp), %xmm2 ## 16-byte Folded Reload
movsd -3360(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
mulsd %xmm15, %xmm1
mulsd -4736(%rbp), %xmm1 ## 8-byte Folded Reload
movsd LCPI19_37(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm1
movapd %xmm6, %xmm4
mulsd -2288(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, -3600(%rbp) ## 8-byte Spill
movsd -3776(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm2, %xmm6
movsd %xmm6, -2184(%rbp) ## 8-byte Spill
movsd -936(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm6, %xmm14
movsd LCPI19_43(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm14
addsd %xmm1, %xmm14
addsd -672(%rbp), %xmm8 ## 16-byte Folded Reload
movapd %xmm8, %xmm1
divsd -3392(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -2752(%rbp) ## 8-byte Spill
movapd -3424(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm1, %xmm2
movapd %xmm2, %xmm6
mulsd -3680(%rbp), %xmm6 ## 16-byte Folded Reload
addsd %xmm9, %xmm6
addsd -2176(%rbp), %xmm6 ## 16-byte Folded Reload
movsd -1664(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm6, %xmm1
mulsd %xmm15, %xmm1
mulsd -1680(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd %xmm4, %xmm1
movapd %xmm4, %xmm15
mulsd -1792(%rbp), %xmm6 ## 8-byte Folded Reload
movsd %xmm6, -3920(%rbp) ## 8-byte Spill
movsd -3808(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm6, %xmm4
movsd %xmm4, -1952(%rbp) ## 8-byte Spill
movsd -1144(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm4, %xmm13
mulsd %xmm7, %xmm13
addsd %xmm1, %xmm13
movapd %xmm12, %xmm1
addsd -4240(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -3480(%rbp) ## 8-byte Spill
divsd -2952(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -816(%rbp) ## 8-byte Spill
movsd -928(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd %xmm1, %xmm11
movapd %xmm11, %xmm4
mulsd -5552(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm10, %xmm4
addsd -3216(%rbp), %xmm4 ## 16-byte Folded Reload
movapd %xmm4, %xmm6
mulsd -2832(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm6
mulsd -4624(%rbp), %xmm6 ## 16-byte Folded Reload
mulsd %xmm15, %xmm6
mulsd -2464(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm4, -3296(%rbp) ## 8-byte Spill
movsd -1648(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm4, %xmm7
movsd %xmm7, -432(%rbp) ## 8-byte Spill
movsd -1136(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm7, %xmm15
mulsd LCPI19_43(%rip), %xmm15
addsd %xmm6, %xmm15
movsd LCPI19_50(%rip), %xmm4 ## xmm4 = mem[0],zero
movsd -1056(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm4, %xmm10
movsd %xmm10, -1056(%rbp) ## 8-byte Spill
movapd -8112(%rbp), %xmm4 ## 16-byte Reload
subsd %xmm10, %xmm4
movapd %xmm4, -10464(%rbp) ## 16-byte Spill
movsd LCPI19_74(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm12
movsd %xmm12, -5368(%rbp) ## 8-byte Spill
subsd %xmm12, %xmm4
addsd -3552(%rbp), %xmm4 ## 8-byte Folded Reload
movapd %xmm4, %xmm7
movapd %xmm4, -10480(%rbp) ## 16-byte Spill
movsd LCPI19_85(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm7
movapd %xmm7, -10448(%rbp) ## 16-byte Spill
movapd %xmm14, %xmm6
addsd %xmm13, %xmm6
movapd %xmm6, %xmm0
addsd %xmm15, %xmm0
movsd %xmm0, -7056(%rbp) ## 8-byte Spill
movapd %xmm7, %xmm12
movsd -1296(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm12
movapd %xmm0, %xmm7
subsd %xmm12, %xmm7
movsd %xmm7, -13120(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm1
addsd -2000(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -2768(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm4, %xmm7
movsd -1984(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm10, %xmm4
movsd %xmm4, -1984(%rbp) ## 8-byte Spill
mulsd LCPI19_50(%rip), %xmm9
movapd %xmm9, -1040(%rbp) ## 16-byte Spill
addsd -8128(%rbp), %xmm9 ## 16-byte Folded Reload
movsd %xmm9, -5360(%rbp) ## 8-byte Spill
addsd %xmm9, %xmm4
addsd -10512(%rbp), %xmm4 ## 16-byte Folded Reload
movsd %xmm4, -920(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm7
movsd %xmm7, -2768(%rbp) ## 8-byte Spill
mulsd -960(%rbp), %xmm7 ## 8-byte Folded Reload
addsd %xmm1, %xmm7
movsd %xmm7, -3520(%rbp) ## 8-byte Spill
mulsd -5696(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd -1128(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm5, %xmm0
mulsd LCPI19_1(%rip), %xmm0
subsd %xmm0, %xmm3
movsd LCPI19_44(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
movapd %xmm0, %xmm10
mulsd -1688(%rbp), %xmm5 ## 8-byte Folded Reload
movsd LCPI19_45(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm5
subsd %xmm5, %xmm3
movapd -144(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm4
movapd %xmm0, %xmm12
mulsd %xmm14, %xmm4
divsd -5680(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -4192(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm3, %xmm5
movapd -256(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
mulsd %xmm5, %xmm0
movapd %xmm5, %xmm1
movapd %xmm5, -1744(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm4
mulsd -5200(%rbp), %xmm3 ## 16-byte Folded Reload
movsd -336(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1672(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movapd %xmm3, %xmm5
movsd %xmm3, -9752(%rbp) ## 8-byte Spill
movapd -96(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm3
mulsd %xmm1, %xmm3
movapd %xmm12, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm3
movsd %xmm3, -800(%rbp) ## 8-byte Spill
movapd %xmm9, %xmm1
mulsd %xmm5, %xmm1
movapd %xmm7, %xmm0
mulsd %xmm14, %xmm0
subsd %xmm0, %xmm1
movsd %xmm1, -9680(%rbp) ## 8-byte Spill
movapd -208(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm3, %xmm0
subsd -4976(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -64(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm4, -3632(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movapd -2384(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm4, %xmm0
subsd -7536(%rbp), %xmm0 ## 16-byte Folded Reload
subsd -2680(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -5312(%rbp) ## 8-byte Spill
movapd -2096(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm1, %xmm3
addsd %xmm0, %xmm3
mulsd -5744(%rbp), %xmm8 ## 16-byte Folded Reload
mulsd -2016(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, %xmm0
mulsd LCPI19_1(%rip), %xmm0
subsd %xmm0, %xmm8
mulsd %xmm10, %xmm2
mulsd -3376(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm2
subsd %xmm2, %xmm8
movapd %xmm12, %xmm10
movapd %xmm12, %xmm5
movapd %xmm13, %xmm1
movapd %xmm13, -10224(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm5
divsd -5712(%rbp), %xmm8 ## 16-byte Folded Reload
movsd %xmm8, -7288(%rbp) ## 8-byte Spill
movapd -4272(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm8, %xmm4
movapd %xmm9, %xmm0
mulsd %xmm4, %xmm0
movapd %xmm4, %xmm8
movapd %xmm4, -4608(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm5
movapd %xmm5, -1968(%rbp) ## 16-byte Spill
movapd -2368(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm5, %xmm0
addsd %xmm3, %xmm0
movsd -1056(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm13
mulsd %xmm12, %xmm13
movapd -992(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm13, %xmm2
movapd -1040(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm3
mulsd -1776(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm2, %xmm3
movapd %xmm5, %xmm2
mulsd %xmm9, %xmm2
movapd %xmm4, %xmm5
mulsd %xmm7, %xmm5
addsd %xmm2, %xmm5
movapd -1568(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm5, %xmm2
movsd %xmm5, -3528(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm2
movapd -832(%rbp), %xmm3 ## 16-byte Reload
mulsd -1744(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm2, %xmm3
movapd -2064(%rbp), %xmm2 ## 16-byte Reload
movsd %xmm14, -13128(%rbp) ## 8-byte Spill
mulsd %xmm14, %xmm2
subsd %xmm2, %xmm3
movapd -672(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm8, %xmm12
addsd %xmm3, %xmm12
movapd -2608(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm1, %xmm2
subsd %xmm2, %xmm12
subsd -8592(%rbp), %xmm0 ## 16-byte Folded Reload
addsd LCPI19_106(%rip), %xmm0
addsd -4400(%rbp), %xmm12 ## 8-byte Folded Reload
addsd -7776(%rbp), %xmm12 ## 8-byte Folded Reload
addsd -9296(%rbp), %xmm12 ## 16-byte Folded Reload
movapd -624(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
movapd %xmm2, %xmm7
movapd %xmm12, -15568(%rbp) ## 16-byte Spill
mulsd %xmm12, %xmm3
addsd %xmm0, %xmm3
movapd -10464(%rbp), %xmm0 ## 16-byte Reload
movapd -48(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm6
movapd %xmm12, %xmm0
movsd %xmm6, -13104(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm0
addsd -5568(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -5360(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movsd -320(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm8
addsd %xmm0, %xmm8
movapd %xmm8, %xmm0
movsd LCPI19_25(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm3
movsd %xmm13, -9672(%rbp) ## 8-byte Spill
mulsd %xmm13, %xmm10
movapd %xmm9, %xmm4
mulsd %xmm5, %xmm4
addsd %xmm10, %xmm4
movapd -1040(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm5
mulsd -2512(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm4, %xmm5
addsd -4720(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm5, %xmm0
movsd LCPI19_103(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
addsd %xmm3, %xmm0
movsd -1056(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd -992(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm3
movapd -6032(%rbp), %xmm2 ## 16-byte Reload
subsd %xmm3, %xmm2
movapd %xmm2, -11216(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm2
movapd %xmm12, %xmm3
movapd %xmm2, -17056(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm3
addsd %xmm0, %xmm3
movapd %xmm9, %xmm0
mulsd %xmm1, %xmm0
movapd -6016(%rbp), %xmm4 ## 16-byte Reload
subsd %xmm0, %xmm4
movapd %xmm4, -11200(%rbp) ## 16-byte Spill
mulsd -72(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm3, %xmm4
movapd %xmm12, %xmm1
movsd %xmm15, -13112(%rbp) ## 8-byte Spill
mulsd %xmm15, %xmm1
movapd -1824(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm6
movapd %xmm1, -336(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm0
movsd -3480(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -6576(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd -2192(%rbp), %xmm11 ## 8-byte Folded Reload
movapd %xmm11, %xmm3
mulsd LCPI19_1(%rip), %xmm3
subsd %xmm3, %xmm2
mulsd LCPI19_44(%rip), %xmm11
mulsd -5536(%rbp), %xmm11 ## 16-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm11
subsd %xmm11, %xmm2
divsd -6560(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd -2696(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -816(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1624(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm2
movsd %xmm2, -3480(%rbp) ## 8-byte Spill
movapd -64(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm4
mulsd %xmm6, %xmm4
movapd %xmm12, %xmm3
mulsd %xmm2, %xmm3
movapd %xmm3, -2240(%rbp) ## 16-byte Spill
movapd -208(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm3, %xmm1
addsd -5424(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm4
subsd -8544(%rbp), %xmm0 ## 16-byte Folded Reload
subsd -7520(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm4, -816(%rbp) ## 8-byte Spill
movapd -1008(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm4, %xmm6
addsd %xmm0, %xmm6
movapd -10480(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm12, %xmm0
movsd -7056(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movapd %xmm12, %xmm0
movsd %xmm1, -13008(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
addsd -6288(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -920(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd -320(%rbp), %xmm14 ## 8-byte Folded Reload
addsd %xmm0, %xmm14
movapd %xmm6, %xmm0
movsd LCPI19_15(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm14, %xmm2
movsd LCPI19_22(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
subsd %xmm0, %xmm2
movsd %xmm2, -3512(%rbp) ## 8-byte Spill
movapd -1520(%rbp), %xmm0 ## 16-byte Reload
movsd -800(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm0
subsd -3432(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -1488(%rbp), %xmm13 ## 16-byte Reload
movsd -9680(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm13
addsd %xmm0, %xmm13
movapd -176(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm1
movapd -3632(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm1
addsd %xmm13, %xmm1
movapd -896(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
subsd -4392(%rbp), %xmm0 ## 8-byte Folded Reload
subsd -5408(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -2096(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm13
mulsd %xmm1, %xmm13
movapd %xmm1, %xmm15
movapd %xmm1, -11504(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm13
movapd -1904(%rbp), %xmm0 ## 16-byte Reload
movapd -1968(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm0
addsd %xmm13, %xmm0
subsd -8432(%rbp), %xmm0 ## 16-byte Folded Reload
addsd LCPI19_77(%rip), %xmm0
addsd -4384(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm5, %xmm3
movsd LCPI19_48(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm3
addsd %xmm0, %xmm3
movapd -576(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
subsd -5392(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -864(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm9, %xmm1
mulsd %xmm9, %xmm13
addsd %xmm0, %xmm13
movapd -1888(%rbp), %xmm0 ## 16-byte Reload
movapd -3632(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm0
movapd -736(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm9
movapd %xmm1, %xmm7
addsd %xmm13, %xmm9
subsd -8416(%rbp), %xmm0 ## 8-byte Folded Reload
subsd -2648(%rbp), %xmm9 ## 8-byte Folded Reload
mulsd %xmm9, %xmm4
movapd %xmm9, -3632(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm4
mulsd -5312(%rbp), %xmm11 ## 8-byte Folded Reload
mulsd %xmm15, %xmm10
addsd %xmm11, %xmm10
mulsd %xmm9, %xmm1
addsd %xmm10, %xmm1
movsd LCPI19_14(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm4, %xmm1
movapd -1584(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
movapd %xmm8, %xmm13
movsd LCPI19_24(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm13
addsd -8352(%rbp), %xmm3 ## 16-byte Folded Reload
addsd -9216(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -752(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm9
mulsd %xmm3, %xmm1
subsd -8384(%rbp), %xmm0 ## 16-byte Folded Reload
addsd LCPI19_78(%rip), %xmm0
addsd %xmm13, %xmm1
addsd -8400(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_47(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm5
addsd %xmm0, %xmm5
subsd -4360(%rbp), %xmm5 ## 8-byte Folded Reload
addsd -9200(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -496(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
movapd %xmm2, %xmm15
mulsd %xmm5, %xmm0
addsd %xmm1, %xmm0
movsd -2032(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -336(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movapd -3280(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm7
mulsd %xmm4, %xmm7
movapd -1344(%rbp), %xmm0 ## 16-byte Reload
movapd -2240(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
addsd -7504(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm7
subsd -8368(%rbp), %xmm1 ## 8-byte Folded Reload
subsd -7488(%rbp), %xmm7 ## 16-byte Folded Reload
movapd -1008(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd %xmm7, %xmm0
movapd %xmm7, -4592(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
movsd -5368(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm12, %xmm2
movsd %xmm2, -12984(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm12
movsd -1984(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -320(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm12, %xmm2
addsd LCPI19_79(%rip), %xmm0
addsd -4064(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm2, %xmm10
movsd LCPI19_73(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm10
addsd %xmm0, %xmm10
movsd LCPI19_23(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm8
movapd %xmm15, %xmm0
movapd %xmm3, -15360(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm8
movapd %xmm9, %xmm0
movapd %xmm5, -15408(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
addsd %xmm8, %xmm0
movapd -1808(%rbp), %xmm1 ## 16-byte Reload
movapd -336(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm1
addsd %xmm0, %xmm1
movapd -2592(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm12
movapd -2416(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
addsd -4352(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm12
subsd -8336(%rbp), %xmm1 ## 16-byte Folded Reload
subsd -4344(%rbp), %xmm12 ## 8-byte Folded Reload
mulsd %xmm12, %xmm4
movapd %xmm12, -336(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm4
movapd -64(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm1
mulsd -816(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd %xmm7, %xmm13
addsd %xmm1, %xmm13
movapd %xmm3, %xmm1
mulsd %xmm12, %xmm1
addsd %xmm13, %xmm1
movsd LCPI19_64(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
addsd %xmm4, %xmm1
addsd LCPI19_80(%rip), %xmm1
movsd LCPI19_72(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
addsd %xmm1, %xmm2
movsd LCPI19_108(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm6
movsd LCPI19_114(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm14
subsd %xmm14, %xmm6
movsd -3512(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm10, %xmm1
movsd LCPI19_13(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm10
addsd %xmm6, %xmm10
addsd %xmm2, %xmm10
mulsd %xmm0, %xmm2
addsd %xmm1, %xmm2
movsd LCPI19_115(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -3520(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm0, %xmm3
movapd -1616(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm6
mulsd %xmm2, %xmm0
subsd %xmm3, %xmm0
movapd -1088(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
movapd %xmm3, %xmm7
mulsd %xmm10, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_87(%rip), %xmm4 ## xmm4 = mem[0],zero
subsd %xmm1, %xmm4
movsd -360(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movsd LCPI19_82(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm5
movapd %xmm5, %xmm3
movapd %xmm5, %xmm8
movsd %xmm5, -7432(%rbp) ## 8-byte Spill
movsd -1296(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
movsd %xmm3, -12968(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm0
movapd -2928(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm1, %xmm3
movapd %xmm3, %xmm1
movsd %xmm3, -7424(%rbp) ## 8-byte Spill
mulsd -960(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd -1120(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd %xmm1, %xmm0
movsd LCPI19_116(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm4
movapd %xmm6, %xmm5
movapd %xmm10, -15248(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm5
movapd %xmm7, %xmm0
movapd %xmm2, -15216(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm5
movapd -10448(%rbp), %xmm1 ## 16-byte Reload
subsd %xmm8, %xmm1
addsd -9376(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm6
movapd %xmm1, -12288(%rbp) ## 16-byte Spill
movsd -560(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movsd -7056(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm1
subsd %xmm0, %xmm1
movapd %xmm2, %xmm0
movsd %xmm1, -12976(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
movsd -2664(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movapd %xmm3, %xmm0
addsd -2768(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -9360(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -2664(%rbp) ## 8-byte Spill
mulsd -976(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -2144(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm7
mulsd %xmm5, %xmm1
movapd %xmm0, %xmm2
movsd LCPI19_30(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm1
movapd -848(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
mulsd %xmm4, %xmm3
subsd %xmm1, %xmm3
movapd %xmm3, -10128(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm1
movapd %xmm5, -15168(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
movsd LCPI19_111(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm1
movapd %xmm4, -15184(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm7
addsd %xmm1, %xmm7
movapd %xmm7, -13760(%rbp) ## 16-byte Spill
movapd -10336(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm6, %xmm1
movapd %xmm1, -10064(%rbp) ## 16-byte Spill
movsd -128(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm8, %xmm1
subsd %xmm0, %xmm1
movsd %xmm1, -9656(%rbp) ## 8-byte Spill
movsd -3600(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4800(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm0
mulsd -2864(%rbp), %xmm0 ## 16-byte Folded Reload
movsd LCPI19_110(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm0
movsd -2184(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd -3744(%rbp), %xmm12 ## 8-byte Folded Reload
mulsd LCPI19_132(%rip), %xmm12
mulsd -3344(%rbp), %xmm12 ## 8-byte Folded Reload
divsd -3136(%rbp), %xmm12 ## 8-byte Folded Reload
mulsd -1424(%rbp), %xmm12 ## 16-byte Folded Reload
addsd %xmm0, %xmm12
movapd -208(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm0
mulsd -5312(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -1520(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd -11504(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -576(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd -3632(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd %xmm0, %xmm12
movapd %xmm0, %xmm1
mulsd LCPI19_1(%rip), %xmm1
subsd %xmm1, %xmm12
movapd %xmm12, %xmm13
subsd %xmm0, %xmm13
movsd -7288(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd -768(%rbp), %xmm9 ## 8-byte Folded Reload
movsd -2752(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3792(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm9
movapd -96(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm8
mulsd -4608(%rbp), %xmm8 ## 16-byte Folded Reload
movapd -144(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm0
mulsd %xmm9, %xmm0
subsd %xmm0, %xmm8
movapd -256(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm9, %xmm6
movapd %xmm1, %xmm0
mulsd -10224(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm6
movapd %xmm15, %xmm2
mulsd %xmm8, %xmm2
subsd -7648(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm6, %xmm2
movapd %xmm11, %xmm0
movapd -1968(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
addsd %xmm2, %xmm0
movapd %xmm4, %xmm2
mulsd %xmm8, %xmm2
subsd -6192(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -1488(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm6, %xmm3
addsd %xmm2, %xmm3
movapd -176(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm11, %xmm7
addsd %xmm3, %xmm7
subsd -1352(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm15, %xmm2
mulsd %xmm0, %xmm2
subsd -6176(%rbp), %xmm7 ## 16-byte Folded Reload
movapd %xmm4, %xmm3
mulsd %xmm7, %xmm3
addsd %xmm2, %xmm3
movapd %xmm5, %xmm1
movapd %xmm5, %xmm2
mulsd %xmm8, %xmm2
subsd -8256(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -864(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm6, -13584(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm4
addsd %xmm2, %xmm4
movapd %xmm11, %xmm5
mulsd -736(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm4, %xmm5
subsd -8240(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm5, -1968(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm2
mulsd %xmm5, %xmm2
addsd %xmm3, %xmm2
movsd -3920(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -3728(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm3
mulsd -3648(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd %xmm14, %xmm3
movsd -1952(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd -3104(%rbp), %xmm11 ## 8-byte Folded Reload
mulsd LCPI19_133(%rip), %xmm11
mulsd -2848(%rbp), %xmm11 ## 8-byte Folded Reload
divsd -3824(%rbp), %xmm11 ## 8-byte Folded Reload
mulsd -1392(%rbp), %xmm11 ## 8-byte Folded Reload
addsd %xmm3, %xmm11
subsd %xmm2, %xmm13
addsd %xmm2, %xmm11
mulsd LCPI19_1(%rip), %xmm2
subsd %xmm2, %xmm11
addsd %xmm13, %xmm11
movapd %xmm15, %xmm1
mulsd -816(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -1344(%rbp), %xmm2 ## 16-byte Reload
mulsd -4592(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movapd -2416(%rbp), %xmm1 ## 16-byte Reload
mulsd -336(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
movsd -3296(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -1360(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm3
mulsd -3072(%rbp), %xmm3 ## 16-byte Folded Reload
movsd -432(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -4048(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd LCPI19_134(%rip), %xmm2
mulsd %xmm14, %xmm3
mulsd -3952(%rbp), %xmm2 ## 8-byte Folded Reload
divsd -3712(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd -2480(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm3, %xmm2
movsd LCPI19_1(%rip), %xmm15 ## xmm15 = mem[0],zero
movapd %xmm11, %xmm14
subsd %xmm1, %xmm14
addsd %xmm1, %xmm2
mulsd %xmm15, %xmm1
subsd %xmm1, %xmm2
movsd %xmm2, -432(%rbp) ## 8-byte Spill
movapd -2384(%rbp), %xmm1 ## 16-byte Reload
mulsd -800(%rbp), %xmm1 ## 8-byte Folded Reload
subsd -8304(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -1024(%rbp), %xmm2 ## 16-byte Reload
mulsd -5312(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movapd -2368(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm8, %xmm1
addsd %xmm2, %xmm1
subsd -8288(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd -2080(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd %xmm11, %xmm1
movapd %xmm11, %xmm13
movsd %xmm11, -1952(%rbp) ## 8-byte Spill
mulsd LCPI19_25(%rip), %xmm1
subsd %xmm1, %xmm0
movsd -2112(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd -3528(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd %xmm11, %xmm1
movsd -1056(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm2
mulsd -1776(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm1
movapd -1040(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm10, %xmm4
mulsd %xmm10, %xmm3
movapd -992(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm3, %xmm2
addsd %xmm1, %xmm2
movapd -1744(%rbp), %xmm1 ## 16-byte Reload
mulsd -2160(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm2
movapd -2064(%rbp), %xmm1 ## 16-byte Reload
movsd -9752(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm1
addsd %xmm2, %xmm1
movapd -4608(%rbp), %xmm2 ## 16-byte Reload
mulsd -2176(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm1
movapd -2608(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm9, %xmm2
addsd %xmm1, %xmm2
addsd -7664(%rbp), %xmm2 ## 16-byte Folded Reload
addsd -8272(%rbp), %xmm2 ## 8-byte Folded Reload
addsd -6464(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -624(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
movsd %xmm2, -12456(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
mulsd -2512(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm11, %xmm0
mulsd -96(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm6
movapd %xmm4, %xmm0
movsd %xmm3, -9664(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm6
addsd -4312(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -4320(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -3472(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm6, %xmm2
mulsd LCPI19_103(%rip), %xmm2
addsd %xmm1, %xmm2
movapd -10464(%rbp), %xmm1 ## 16-byte Reload
mulsd -320(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -4528(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm9, -7288(%rbp) ## 8-byte Spill
addsd %xmm9, %xmm10
movsd -5360(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd -48(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm3
addsd %xmm10, %xmm3
movapd %xmm4, %xmm0
movsd %xmm3, -12912(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm1, %xmm0
movapd %xmm0, %xmm1
mulsd LCPI19_25(%rip), %xmm1
addsd %xmm2, %xmm1
movapd -11216(%rbp), %xmm2 ## 16-byte Reload
mulsd -72(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movapd -11200(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm5, %xmm3
movapd %xmm4, %xmm1
movapd %xmm3, -16928(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm2
movapd -1824(%rbp), %xmm1 ## 16-byte Reload
movapd -2240(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm2
subsd -7216(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -2304(%rbp), %xmm15 ## 16-byte Reload
movsd -816(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd %xmm15, %xmm11
addsd %xmm2, %xmm11
movsd %xmm11, -816(%rbp) ## 8-byte Spill
addsd -432(%rbp), %xmm14 ## 8-byte Folded Reload
mulsd LCPI19_108(%rip), %xmm11
movapd %xmm14, %xmm2
mulsd LCPI19_114(%rip), %xmm2
subsd %xmm2, %xmm11
movapd %xmm13, %xmm3
mulsd LCPI19_24(%rip), %xmm3
movsd LCPI19_112(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
addsd %xmm3, %xmm2
movapd -1888(%rbp), %xmm3 ## 16-byte Reload
movsd -800(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm3
subsd -7208(%rbp), %xmm3 ## 8-byte Folded Reload
movapd -1024(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm4
mulsd -3632(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm3, %xmm4
mulsd LCPI19_14(%rip), %xmm12
addsd %xmm4, %xmm12
movapd -1584(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm8, %xmm3
addsd %xmm12, %xmm3
subsd -12128(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -1968(%rbp), %xmm1 ## 16-byte Reload
movapd -2080(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm1
addsd %xmm3, %xmm1
addsd -7200(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -12112(%rbp), %xmm1 ## 16-byte Folded Reload
addsd -5440(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, %xmm3
movapd %xmm6, %xmm1
movsd LCPI19_47(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm1
addsd %xmm3, %xmm1
addsd -7192(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -6432(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -496(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm1, %xmm3
addsd %xmm2, %xmm3
movapd %xmm5, %xmm2
mulsd -896(%rbp), %xmm2 ## 16-byte Folded Reload
subsd -7184(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, %xmm4
movapd %xmm13, %xmm2
movapd %xmm14, %xmm13
mulsd -11504(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm4, %xmm2
mulsd -1904(%rbp), %xmm8 ## 16-byte Folded Reload
addsd %xmm2, %xmm8
subsd -12096(%rbp), %xmm8 ## 16-byte Folded Reload
mulsd %xmm12, %xmm7
addsd %xmm8, %xmm7
addsd -4336(%rbp), %xmm7 ## 8-byte Folded Reload
addsd -8320(%rbp), %xmm7 ## 16-byte Folded Reload
addsd -4328(%rbp), %xmm7 ## 8-byte Folded Reload
movsd LCPI19_48(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm6
addsd %xmm7, %xmm6
addsd -6416(%rbp), %xmm6 ## 16-byte Folded Reload
subsd -7176(%rbp), %xmm6 ## 8-byte Folded Reload
movapd -752(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm2
mulsd %xmm6, %xmm2
addsd %xmm3, %xmm2
movsd -2032(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm9, %xmm3
subsd %xmm3, %xmm2
subsd -7160(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -4592(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm15, %xmm3
addsd %xmm2, %xmm3
movapd %xmm3, %xmm5
movsd -5368(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -320(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm2
movsd -1984(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd -48(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm4
movapd %xmm12, %xmm3
movsd %xmm4, -12800(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm3
subsd %xmm3, %xmm2
addsd -7168(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, %xmm3
mulsd LCPI19_73(%rip), %xmm3
addsd %xmm5, %xmm3
movsd -816(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd LCPI19_15(%rip), %xmm5
movapd %xmm13, %xmm4
mulsd LCPI19_22(%rip), %xmm4
subsd %xmm5, %xmm4
addsd %xmm3, %xmm4
movsd LCPI19_13(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm3
addsd %xmm11, %xmm3
movapd -13760(%rbp), %xmm11 ## 16-byte Reload
movsd LCPI19_23(%rip), %xmm7 ## xmm7 = mem[0],zero
movsd -1952(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm7, %xmm5
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm5
movapd %xmm14, %xmm0
movapd %xmm1, -13552(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
addsd %xmm5, %xmm0
movapd -496(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm6, -13568(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm0
movapd -2240(%rbp), %xmm1 ## 16-byte Reload
mulsd -1808(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
subsd -7152(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -336(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm15, %xmm1
addsd %xmm0, %xmm1
movsd -432(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_64(%rip), %xmm0
addsd %xmm1, %xmm0
mulsd LCPI19_72(%rip), %xmm2
addsd %xmm0, %xmm2
movapd -10480(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm9, %xmm0
addsd -12080(%rbp), %xmm0 ## 16-byte Folded Reload
addsd -3480(%rbp), %xmm10 ## 8-byte Folded Reload
movsd -920(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm12, %xmm1
addsd %xmm10, %xmm1
movapd %xmm12, %xmm5
movsd %xmm1, -12792(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm5
addsd %xmm0, %xmm5
addsd %xmm2, %xmm3
movapd %xmm5, %xmm6
mulsd LCPI19_114(%rip), %xmm6
addsd %xmm3, %xmm6
mulsd %xmm8, %xmm2
addsd %xmm4, %xmm2
mulsd LCPI19_31(%rip), %xmm5
addsd %xmm2, %xmm5
movapd -1616(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
mulsd %xmm6, %xmm3
movapd -1088(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm3
movapd %xmm13, %xmm1
movsd LCPI19_111(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm1
movapd -848(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
mulsd %xmm3, %xmm0
subsd %xmm1, %xmm0
movapd %xmm13, %xmm1
movsd LCPI19_115(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm1
movapd %xmm5, -15152(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm2
subsd %xmm1, %xmm2
movapd %xmm4, %xmm1
movapd %xmm6, -15136(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
addsd %xmm2, %xmm1
movapd -10448(%rbp), %xmm2 ## 16-byte Reload
movsd -960(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm2
addsd -4576(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -2768(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movsd -1296(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm5
addsd %xmm10, %xmm5
movapd %xmm7, %xmm4
movsd %xmm5, -12808(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm4
addsd %xmm2, %xmm4
mulsd %xmm8, %xmm4
movapd -10128(%rbp), %xmm8 ## 16-byte Reload
addsd %xmm1, %xmm4
movsd -7432(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm6, %xmm1
movsd -7424(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm7, %xmm5
movapd %xmm7, %xmm2
movsd %xmm5, -12776(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm2
subsd %xmm2, %xmm1
addsd -2960(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd LCPI19_116(%rip), %xmm1
subsd %xmm1, %xmm4
movapd -2144(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm0
movapd -12288(%rbp), %xmm1 ## 16-byte Reload
mulsd -976(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -5584(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -2664(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm2
movsd -560(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm2
addsd %xmm10, %xmm2
movsd %xmm2, -12784(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm7
addsd %xmm1, %xmm7
movapd %xmm7, %xmm14
mulsd %xmm12, %xmm14
addsd %xmm0, %xmm14
movsd %xmm13, -9584(%rbp) ## 8-byte Spill
movsd LCPI19_30(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm13
movapd %xmm5, %xmm1
movsd -128(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm3, -15104(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
subsd %xmm13, %xmm1
movapd %xmm9, %xmm0
movapd %xmm4, -15088(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm2, %xmm7
addsd %xmm0, %xmm7
movapd -6080(%rbp), %xmm0 ## 16-byte Reload
movsd LCPI19_88(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movapd -5040(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm2, %xmm1
addsd LCPI19_92(%rip), %xmm8
movapd %xmm0, -6080(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm8
movapd -10320(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm6, %xmm0
addsd %xmm6, %xmm12
movapd %xmm1, -5040(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm7
movapd %xmm5, %xmm15
mulsd %xmm12, %xmm15
movsd %xmm10, -12424(%rbp) ## 8-byte Spill
addsd %xmm10, %xmm15
testq %rax, %rax
movapd %xmm8, -10128(%rbp) ## 16-byte Spill
je LBB19_52
## %bb.51:
movapd -3184(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm4
mulsd %xmm8, %xmm4
movapd -2528(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm1
movapd %xmm11, %xmm13
mulsd %xmm11, %xmm1
movapd %xmm5, %xmm2
mulsd -9656(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -280(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm0
mulsd %xmm12, %xmm0
addsd %xmm2, %xmm0
movsd LCPI19_29(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd %xmm0, %xmm3
mulsd %xmm2, %xmm3
subsd %xmm3, %xmm1
subsd %xmm1, %xmm4
addsd %xmm4, %xmm4
movsd LCPI19_119(%rip), %xmm3 ## xmm3 = mem[0],zero
movsd -9584(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movapd %xmm11, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm9, %xmm6
mulsd %xmm14, %xmm6
subsd %xmm1, %xmm6
movapd %xmm8, %xmm1
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm6
movapd %xmm10, %xmm1
mulsd -10064(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd %xmm15, %xmm5
addsd %xmm1, %xmm5
movapd %xmm5, %xmm1
mulsd %xmm3, %xmm1
addsd %xmm6, %xmm1
addsd %xmm1, %xmm1
addsd %xmm4, %xmm1
mulsd %xmm2, %xmm11
movapd %xmm8, %xmm6
mulsd %xmm14, %xmm6
subsd %xmm11, %xmm6
movapd %xmm9, %xmm4
mulsd %xmm7, %xmm4
addsd %xmm6, %xmm4
mulsd %xmm2, %xmm5
addsd %xmm4, %xmm5
mulsd %xmm3, %xmm0
movapd %xmm9, %xmm2
mulsd %xmm13, %xmm2
subsd %xmm0, %xmm2
movapd %xmm8, %xmm0
movapd -10128(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm0
addsd %xmm2, %xmm0
movsd -1536(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm5, %xmm0
movsd -1072(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm2
movsd -2272(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
mulsd -880(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
mulsd %xmm4, %xmm2
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
mulsd %xmm3, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, 24(%rax)
LBB19_52:
movsd %xmm15, -12400(%rbp) ## 8-byte Spill
movapd %xmm14, -14960(%rbp) ## 16-byte Spill
movapd %xmm7, -14992(%rbp) ## 16-byte Spill
movapd %xmm12, -10896(%rbp) ## 16-byte Spill
movsd -344(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd -352(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, -1968(%rbp) ## 8-byte Spill
addsd -224(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, %xmm11
movsd %xmm2, -1952(%rbp) ## 8-byte Spill
addsd -408(%rbp), %xmm11 ## 8-byte Folded Reload
movsd -368(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
addsd -464(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm6, %xmm15
addsd -152(%rbp), %xmm15 ## 8-byte Folded Reload
movapd %xmm15, %xmm12
addsd -288(%rbp), %xmm12 ## 8-byte Folded Reload
movapd %xmm12, %xmm9
addsd -640(%rbp), %xmm9 ## 16-byte Folded Reload
movapd %xmm9, %xmm0
divsd -6608(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -800(%rbp) ## 8-byte Spill
movapd -3888(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm0, %xmm3
movapd %xmm3, %xmm4
mulsd -6624(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm11, %xmm4
addsd -1472(%rbp), %xmm4 ## 16-byte Folded Reload
movsd -5728(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd -5760(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_37(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm0
mulsd -456(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm4, -4544(%rbp) ## 8-byte Spill
movsd -1704(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
movsd %xmm1, -1632(%rbp) ## 8-byte Spill
movsd -1408(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm1, %xmm13
movsd LCPI19_43(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm13
addsd %xmm0, %xmm13
movapd %xmm12, %xmm7
addsd -1232(%rbp), %xmm7 ## 16-byte Folded Reload
movapd %xmm7, %xmm0
divsd -3168(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -7840(%rbp) ## 8-byte Spill
movapd -2944(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm0, %xmm5
movapd %xmm5, %xmm4
mulsd -6592(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm11, %xmm4
addsd -1456(%rbp), %xmm4 ## 16-byte Folded Reload
movsd -4768(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
movsd LCPI19_1(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm0
mulsd -3760(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm8, %xmm0
mulsd -1384(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm4, -7856(%rbp) ## 8-byte Spill
movsd -4816(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm4, %xmm8
movsd %xmm8, -432(%rbp) ## 8-byte Spill
movsd -1152(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm8, %xmm1
mulsd %xmm10, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -4592(%rbp) ## 16-byte Spill
movapd %xmm15, %xmm0
addsd -5232(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -3464(%rbp) ## 8-byte Spill
divsd -2200(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -1744(%rbp) ## 8-byte Spill
movsd -760(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm0, %xmm8
movapd %xmm8, %xmm0
mulsd -2968(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm2, %xmm0
addsd -3840(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, %xmm4
mulsd -4656(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd %xmm14, %xmm4
mulsd -3312(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd LCPI19_37(%rip), %xmm4
mulsd -1168(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -3488(%rbp) ## 8-byte Spill
movsd -1656(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm0, %xmm10
movsd %xmm10, -336(%rbp) ## 8-byte Spill
movsd -3856(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm10, %xmm14
mulsd LCPI19_43(%rip), %xmm14
addsd %xmm4, %xmm14
movsd LCPI19_50(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm12
movapd %xmm12, -4608(%rbp) ## 16-byte Spill
movapd -8192(%rbp), %xmm0 ## 16-byte Reload
subsd %xmm12, %xmm0
movsd %xmm0, -3512(%rbp) ## 8-byte Spill
movsd LCPI19_74(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm15
movsd %xmm15, -3528(%rbp) ## 8-byte Spill
subsd %xmm15, %xmm0
addsd -3568(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -3520(%rbp) ## 8-byte Spill
movsd LCPI19_85(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm6
subsd %xmm6, %xmm0
movsd %xmm0, -5336(%rbp) ## 8-byte Spill
movapd %xmm13, %xmm10
addsd %xmm1, %xmm10
movapd %xmm10, %xmm6
addsd %xmm14, %xmm6
movsd %xmm6, -7048(%rbp) ## 8-byte Spill
movsd -1200(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm6
movsd %xmm6, -12760(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm1
addsd -1112(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -1968(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm2, %xmm15
movsd -1952(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
movsd %xmm0, -1952(%rbp) ## 8-byte Spill
mulsd LCPI19_50(%rip), %xmm11
movapd %xmm11, -816(%rbp) ## 16-byte Spill
addsd -9520(%rbp), %xmm11 ## 16-byte Folded Reload
movapd %xmm11, -10432(%rbp) ## 16-byte Spill
movapd %xmm0, %xmm2
addsd %xmm11, %xmm2
addsd -10592(%rbp), %xmm2 ## 16-byte Folded Reload
movsd %xmm2, -2240(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm15
movsd %xmm15, -1968(%rbp) ## 8-byte Spill
mulsd -2800(%rbp), %xmm15 ## 8-byte Folded Reload
addsd %xmm1, %xmm15
movsd %xmm15, -2184(%rbp) ## 8-byte Spill
mulsd -5856(%rbp), %xmm9 ## 16-byte Folded Reload
mulsd -3152(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, %xmm4
movsd LCPI19_1(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm4
subsd %xmm4, %xmm9
movsd LCPI19_44(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
mulsd -4784(%rbp), %xmm3 ## 8-byte Folded Reload
movsd LCPI19_45(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm3
subsd %xmm3, %xmm9
movapd -592(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm4
mulsd %xmm13, %xmm1
divsd -5840(%rbp), %xmm9 ## 16-byte Folded Reload
movapd -4208(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm9, %xmm2
movapd -400(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm3
mulsd %xmm2, %xmm3
movapd %xmm2, %xmm0
movapd %xmm2, -3600(%rbp) ## 16-byte Spill
subsd %xmm3, %xmm1
mulsd -5248(%rbp), %xmm9 ## 16-byte Folded Reload
movsd -800(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -5776(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm9
movapd -272(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
movapd %xmm2, %xmm6
mulsd %xmm0, %xmm3
movapd %xmm4, %xmm2
mulsd %xmm9, %xmm2
movsd %xmm9, -9744(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm3
movsd %xmm3, -3296(%rbp) ## 8-byte Spill
movapd %xmm11, %xmm0
mulsd %xmm9, %xmm0
movapd %xmm6, %xmm2
mulsd %xmm13, %xmm2
subsd %xmm2, %xmm0
movapd %xmm0, -13808(%rbp) ## 16-byte Spill
movapd -528(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm3, %xmm2
subsd -7904(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movapd -112(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm1, -5584(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
addsd %xmm2, %xmm0
movapd -1856(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm1, %xmm3
subsd -8848(%rbp), %xmm3 ## 16-byte Folded Reload
subsd -2688(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, -11488(%rbp) ## 16-byte Spill
movapd -1440(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
addsd %xmm3, %xmm2
mulsd -5824(%rbp), %xmm7 ## 16-byte Folded Reload
mulsd -536(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm5, %xmm3
mulsd %xmm12, %xmm3
subsd %xmm3, %xmm7
mulsd LCPI19_44(%rip), %xmm5
mulsd -4144(%rbp), %xmm5 ## 8-byte Folded Reload
mulsd %xmm15, %xmm5
subsd %xmm5, %xmm7
movapd %xmm4, %xmm12
movapd %xmm4, %xmm6
movapd -4592(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm6
divsd -5808(%rbp), %xmm7 ## 16-byte Folded Reload
movsd %xmm7, -3456(%rbp) ## 8-byte Spill
movapd -4288(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm7, %xmm5
movapd %xmm11, %xmm1
mulsd %xmm5, %xmm1
movapd %xmm5, -6352(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm6
movapd %xmm6, -800(%rbp) ## 16-byte Spill
movapd -2400(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm6, %xmm1
addsd %xmm2, %xmm1
movapd -4608(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm4
mulsd %xmm12, %xmm4
movsd -1552(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm4, %xmm2
movapd -816(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm3
mulsd -2256(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm2, %xmm3
movapd %xmm15, %xmm2
mulsd %xmm11, %xmm2
movapd %xmm7, %xmm6
mulsd -272(%rbp), %xmm6 ## 16-byte Folded Reload
addsd %xmm2, %xmm6
movapd -2816(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm6, %xmm2
movapd %xmm6, %xmm7
movapd %xmm6, -4960(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm2
movapd -640(%rbp), %xmm3 ## 16-byte Reload
mulsd -3600(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm2, %xmm3
movapd -1840(%rbp), %xmm2 ## 16-byte Reload
movsd %xmm13, -12768(%rbp) ## 8-byte Spill
mulsd %xmm13, %xmm2
subsd %xmm2, %xmm3
movapd -1232(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm5, %xmm9
addsd %xmm3, %xmm9
subsd -8816(%rbp), %xmm1 ## 16-byte Folded Reload
addsd LCPI19_46(%rip), %xmm1
movapd -2624(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm9
addsd -8832(%rbp), %xmm9 ## 8-byte Folded Reload
addsd -7888(%rbp), %xmm9 ## 16-byte Folded Reload
addsd -9424(%rbp), %xmm9 ## 16-byte Folded Reload
movapd -688(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
movapd %xmm9, -15472(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm2
addsd %xmm1, %xmm2
movsd -3512(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -192(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm10
movapd %xmm3, %xmm1
movapd %xmm3, %xmm6
movsd %xmm10, -12744(%rbp) ## 8-byte Spill
mulsd %xmm10, %xmm1
addsd -5008(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -10432(%rbp), %xmm9 ## 16-byte Reload
movsd -480(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm9
movapd %xmm3, %xmm10
addsd %xmm1, %xmm9
movapd %xmm9, %xmm1
movsd LCPI19_25(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
addsd %xmm2, %xmm1
movapd %xmm4, -13792(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm12
mulsd %xmm7, %xmm11
addsd %xmm12, %xmm11
movapd -816(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm5
mulsd -784(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm11, %xmm5
addsd -7872(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm5, %xmm2
movsd LCPI19_103(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm1
movsd -1552(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm15
movapd -6064(%rbp), %xmm3 ## 16-byte Reload
subsd %xmm15, %xmm3
movapd %xmm3, -11184(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm2
mulsd %xmm0, %xmm2
movapd %xmm2, -16832(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm2
addsd %xmm1, %xmm2
movapd %xmm7, %xmm1
mulsd %xmm4, %xmm1
movapd -6048(%rbp), %xmm4 ## 16-byte Reload
subsd %xmm1, %xmm4
movapd %xmm4, -11168(%rbp) ## 16-byte Spill
mulsd -2336(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm2, %xmm4
movapd %xmm6, %xmm0
movsd %xmm14, -12752(%rbp) ## 8-byte Spill
mulsd %xmm14, %xmm0
movapd -2544(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movapd %xmm0, %xmm7
movapd %xmm0, -2752(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm1
movsd -3464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -6672(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd -1640(%rbp), %xmm8 ## 8-byte Folded Reload
movapd %xmm8, %xmm2
mulsd LCPI19_1(%rip), %xmm2
subsd %xmm2, %xmm0
mulsd LCPI19_44(%rip), %xmm8
mulsd -3584(%rbp), %xmm8 ## 16-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm8
subsd %xmm8, %xmm0
divsd -6656(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd -2712(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -1744(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -4560(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm0
movsd %xmm0, -3464(%rbp) ## 8-byte Spill
movapd -112(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm7, %xmm3
movapd %xmm6, %xmm2
mulsd %xmm0, %xmm2
movapd %xmm2, -3920(%rbp) ## 16-byte Spill
movapd -528(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
addsd -7584(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm3
subsd -8800(%rbp), %xmm1 ## 16-byte Folded Reload
subsd -7568(%rbp), %xmm3 ## 8-byte Folded Reload
movsd %xmm3, -1744(%rbp) ## 8-byte Spill
movapd -2576(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm3, %xmm13
addsd %xmm1, %xmm13
movsd -3520(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
movsd -7048(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movapd %xmm6, %xmm0
movsd %xmm1, -12688(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
addsd -4992(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -2240(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm10, %xmm7
addsd %xmm0, %xmm7
movapd %xmm13, %xmm2
movsd LCPI19_15(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
movapd %xmm7, %xmm0
movsd LCPI19_31(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, -4944(%rbp) ## 8-byte Spill
movapd -1248(%rbp), %xmm0 ## 16-byte Reload
movsd -3296(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
subsd -5472(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -912(%rbp), %xmm1 ## 16-byte Reload
movapd -13808(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm1
addsd %xmm0, %xmm1
movapd -608(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm11
movapd -5584(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm11
addsd %xmm1, %xmm11
movapd -1920(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm14, %xmm0
subsd -4424(%rbp), %xmm0 ## 8-byte Folded Reload
subsd -5456(%rbp), %xmm11 ## 16-byte Folded Reload
movapd -1440(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm1
mulsd %xmm11, %xmm1
movapd %xmm11, -11472(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm1
movapd -2432(%rbp), %xmm0 ## 16-byte Reload
movapd -800(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm0
addsd %xmm1, %xmm0
subsd -8784(%rbp), %xmm0 ## 16-byte Folded Reload
addsd LCPI19_77(%rip), %xmm0
addsd -8768(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm5, %xmm4
movsd LCPI19_48(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm4
addsd %xmm0, %xmm4
movapd %xmm9, %xmm1
movsd LCPI19_24(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
addsd -8752(%rbp), %xmm4 ## 16-byte Folded Reload
addsd -5920(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -512(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm4, %xmm15
addsd %xmm1, %xmm15
movapd -1264(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm3, %xmm1
subsd -6240(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -1504(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm8, %xmm0
addsd %xmm1, %xmm0
movapd -1600(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm14, %xmm1
movapd -720(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm14
movapd %xmm2, %xmm3
addsd %xmm0, %xmm14
subsd -8704(%rbp), %xmm1 ## 8-byte Folded Reload
subsd -8608(%rbp), %xmm14 ## 16-byte Folded Reload
mulsd %xmm14, %xmm12
movapd %xmm14, -5584(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm12
movapd -112(%rbp), %xmm1 ## 16-byte Reload
mulsd -11488(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd %xmm11, %xmm10
addsd %xmm1, %xmm10
movapd %xmm2, %xmm1
mulsd %xmm14, %xmm1
addsd %xmm10, %xmm1
movsd LCPI19_14(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm12, %xmm1
movapd -2128(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm6, %xmm0
addsd %xmm1, %xmm0
subsd -8736(%rbp), %xmm0 ## 16-byte Folded Reload
addsd LCPI19_78(%rip), %xmm0
addsd -6256(%rbp), %xmm0 ## 16-byte Folded Reload
movsd LCPI19_47(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm5
addsd %xmm0, %xmm5
subsd -8720(%rbp), %xmm5 ## 16-byte Folded Reload
addsd -5904(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -448(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm14
mulsd %xmm5, %xmm0
addsd %xmm15, %xmm0
movapd -2912(%rbp), %xmm2 ## 16-byte Reload
movapd -2752(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm2
addsd %xmm0, %xmm2
movapd -3264(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm12
mulsd %xmm10, %xmm12
movapd -2048(%rbp), %xmm0 ## 16-byte Reload
movapd -3920(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
addsd -7552(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm12
subsd -8688(%rbp), %xmm2 ## 16-byte Folded Reload
subsd -8640(%rbp), %xmm12 ## 16-byte Folded Reload
movapd -2576(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm3
mulsd %xmm12, %xmm3
movapd %xmm12, -6336(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm3
movsd -3528(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -192(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm0
movsd %xmm0, -12680(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm2
movsd -1952(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -480(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
movsd LCPI19_23(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm9
movapd %xmm14, %xmm2
movapd %xmm4, -15328(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm9
movapd -512(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm5, -15344(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm2
addsd %xmm9, %xmm2
movapd -3248(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm10, %xmm6
addsd %xmm2, %xmm6
movapd -2560(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm10
movapd -1104(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm11, %xmm2
addsd -4416(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm10
subsd -8672(%rbp), %xmm6 ## 16-byte Folded Reload
subsd -4408(%rbp), %xmm10 ## 8-byte Folded Reload
mulsd %xmm10, %xmm8
addsd %xmm6, %xmm8
movapd -112(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm6
mulsd -1744(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd %xmm12, %xmm15
addsd %xmm6, %xmm15
mulsd %xmm10, %xmm4
movapd %xmm10, -2752(%rbp) ## 16-byte Spill
addsd %xmm15, %xmm4
movsd LCPI19_64(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
addsd %xmm8, %xmm4
addsd LCPI19_79(%rip), %xmm3
addsd -4128(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, %xmm2
movsd LCPI19_73(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm2
addsd LCPI19_80(%rip), %xmm4
addsd %xmm3, %xmm2
movsd LCPI19_72(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm4, %xmm1
movsd LCPI19_108(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm13
movsd LCPI19_21(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm7
subsd %xmm13, %xmm7
movsd -4944(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
addsd %xmm2, %xmm4
movsd LCPI19_13(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
addsd %xmm7, %xmm2
addsd %xmm1, %xmm2
movapd %xmm1, %xmm3
mulsd %xmm0, %xmm3
addsd %xmm4, %xmm3
movsd LCPI19_115(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -2184(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm0, %xmm4
movapd -1312(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm9
mulsd %xmm3, %xmm0
subsd %xmm4, %xmm0
movapd -1328(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
movapd %xmm4, %xmm7
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_87(%rip), %xmm4 ## xmm4 = mem[0],zero
subsd %xmm1, %xmm4
movsd -368(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movsd LCPI19_82(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm6
movapd %xmm6, %xmm5
movsd %xmm6, -7400(%rbp) ## 8-byte Spill
movsd -1200(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
movsd %xmm5, -12664(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm0
movsd -344(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm1, %xmm5
movapd %xmm5, %xmm1
movapd %xmm5, %xmm8
movsd %xmm5, -7392(%rbp) ## 8-byte Spill
mulsd -2800(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd -1760(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd %xmm1, %xmm0
movsd LCPI19_116(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm4
movapd %xmm9, %xmm5
movapd %xmm2, -15072(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm5
movapd %xmm7, %xmm0
movapd %xmm3, -15056(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm5
movsd -5336(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm6, %xmm1
addsd -9440(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm6
movsd %xmm1, -7384(%rbp) ## 8-byte Spill
movsd -704(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movsd -7048(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd %xmm9, %xmm1
subsd %xmm0, %xmm1
movapd %xmm2, %xmm0
movsd %xmm1, -12672(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
movsd -3120(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movapd %xmm8, %xmm0
addsd -1968(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -8144(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -2184(%rbp) ## 8-byte Spill
mulsd -1280(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -1936(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm7
mulsd %xmm5, %xmm1
movapd %xmm0, %xmm2
movsd LCPI19_30(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm1
movapd -1216(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
mulsd %xmm4, %xmm2
subsd %xmm1, %xmm2
addsd LCPI19_92(%rip), %xmm2
movapd -6080(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm2, %xmm1
movapd %xmm1, -6080(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm1
movapd %xmm5, -14944(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
movsd LCPI19_111(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm1
movapd %xmm4, -14976(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm7
addsd %xmm1, %xmm7
movapd %xmm7, -13664(%rbp) ## 16-byte Spill
movapd -10400(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm6, %xmm1
movsd -128(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd %xmm1, -10000(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
movapd %xmm9, %xmm1
subsd %xmm0, %xmm1
movsd %xmm1, -9648(%rbp) ## 8-byte Spill
movsd -4544(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5072(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm0
mulsd -4112(%rbp), %xmm0 ## 16-byte Folded Reload
movsd LCPI19_110(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm0
movsd -1632(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd -5104(%rbp), %xmm15 ## 8-byte Folded Reload
mulsd LCPI19_132(%rip), %xmm15
mulsd -4752(%rbp), %xmm15 ## 8-byte Folded Reload
divsd -4832(%rbp), %xmm15 ## 8-byte Folded Reload
mulsd -2896(%rbp), %xmm15 ## 16-byte Folded Reload
addsd %xmm0, %xmm15
movapd -528(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
movapd -11488(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm0
movapd -1248(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd -11472(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -1264(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd -5584(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd %xmm0, %xmm15
movapd %xmm0, %xmm1
mulsd LCPI19_1(%rip), %xmm1
subsd %xmm1, %xmm15
movapd %xmm15, %xmm2
subsd %xmm0, %xmm2
movsd -3456(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -776(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -7840(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1696(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movsd %xmm3, -3456(%rbp) ## 8-byte Spill
movapd -272(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm8
mulsd -6352(%rbp), %xmm8 ## 16-byte Folded Reload
movapd -592(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm0
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm8
movapd -400(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm3, %xmm6
movapd %xmm1, %xmm0
mulsd -4592(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm6
movapd %xmm7, %xmm1
mulsd %xmm8, %xmm1
subsd -7792(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm6, %xmm1
movapd %xmm14, %xmm0
movapd -800(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm0
addsd %xmm1, %xmm0
movapd %xmm4, %xmm1
mulsd %xmm8, %xmm1
subsd -6224(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -912(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm6, %xmm3
addsd %xmm1, %xmm3
movapd -608(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
addsd %xmm3, %xmm1
subsd -2672(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm7, %xmm3
mulsd %xmm0, %xmm3
subsd -6208(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd %xmm1, %xmm4
addsd %xmm3, %xmm4
movapd %xmm5, %xmm3
movapd %xmm5, %xmm14
mulsd %xmm8, %xmm3
subsd -8464(%rbp), %xmm3 ## 8-byte Folded Reload
movapd -1504(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm6, -15280(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm5
addsd %xmm3, %xmm5
movapd %xmm12, %xmm6
mulsd -720(%rbp), %xmm6 ## 16-byte Folded Reload
addsd %xmm5, %xmm6
subsd -8448(%rbp), %xmm6 ## 16-byte Folded Reload
movapd %xmm6, -800(%rbp) ## 16-byte Spill
movapd %xmm14, %xmm3
mulsd %xmm6, %xmm3
addsd %xmm4, %xmm3
movsd -7856(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -5088(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm4
mulsd -3696(%rbp), %xmm4 ## 16-byte Folded Reload
movsd -432(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd -1712(%rbp), %xmm14 ## 8-byte Folded Reload
mulsd LCPI19_133(%rip), %xmm14
mulsd %xmm11, %xmm4
mulsd -4096(%rbp), %xmm14 ## 8-byte Folded Reload
divsd -4848(%rbp), %xmm14 ## 8-byte Folded Reload
mulsd -1400(%rbp), %xmm14 ## 8-byte Folded Reload
addsd %xmm4, %xmm14
subsd %xmm3, %xmm2
addsd %xmm3, %xmm14
mulsd LCPI19_1(%rip), %xmm3
subsd %xmm3, %xmm14
addsd %xmm2, %xmm14
movapd %xmm14, %xmm4
movapd %xmm7, %xmm2
mulsd -1744(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -2048(%rbp), %xmm3 ## 16-byte Reload
mulsd -6336(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm2, %xmm3
movapd -1104(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm10, %xmm2
addsd %xmm3, %xmm2
movsd -3488(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -1368(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm3
mulsd -3984(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd %xmm11, %xmm3
movsd -336(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -4080(%rbp), %xmm6 ## 16-byte Folded Reload
mulsd LCPI19_134(%rip), %xmm6
mulsd -3968(%rbp), %xmm6 ## 8-byte Folded Reload
divsd -648(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd -2496(%rbp), %xmm6 ## 8-byte Folded Reload
addsd %xmm3, %xmm6
movapd %xmm14, %xmm5
movsd %xmm14, -432(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm14
addsd %xmm2, %xmm6
mulsd LCPI19_1(%rip), %xmm2
subsd %xmm2, %xmm6
movsd %xmm6, -336(%rbp) ## 8-byte Spill
movapd -1856(%rbp), %xmm2 ## 16-byte Reload
mulsd -3296(%rbp), %xmm2 ## 8-byte Folded Reload
subsd -8528(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -1184(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm9, %xmm4
mulsd %xmm9, %xmm3
addsd %xmm2, %xmm3
movapd -2400(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm8, %xmm2
addsd %xmm3, %xmm2
subsd -8512(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd -1872(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm2, %xmm0
movapd %xmm5, %xmm3
mulsd LCPI19_25(%rip), %xmm3
addsd %xmm0, %xmm3
movapd -2640(%rbp), %xmm0 ## 16-byte Reload
movapd -4960(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm0
movapd -4608(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm2
mulsd -2256(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm0
movapd -816(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm13, %xmm9
mulsd %xmm13, %xmm5
movsd -1552(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm5, %xmm2
addsd %xmm0, %xmm2
movapd -3600(%rbp), %xmm0 ## 16-byte Reload
mulsd -1472(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movapd -1840(%rbp), %xmm0 ## 16-byte Reload
movsd -9744(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm13, %xmm0
addsd %xmm2, %xmm0
movapd -6352(%rbp), %xmm2 ## 16-byte Reload
mulsd -1456(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm0
movapd -2624(%rbp), %xmm4 ## 16-byte Reload
movsd -3456(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm4
addsd %xmm0, %xmm4
addsd -7808(%rbp), %xmm4 ## 16-byte Folded Reload
addsd -8496(%rbp), %xmm4 ## 16-byte Folded Reload
addsd -9280(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -688(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm2
movsd %xmm4, -12448(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm2
addsd %xmm3, %xmm2
mulsd -784(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd -272(%rbp), %xmm10 ## 16-byte Folded Reload
subsd %xmm10, %xmm6
movapd %xmm5, -13776(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm9
subsd %xmm9, %xmm6
addsd -4376(%rbp), %xmm2 ## 8-byte Folded Reload
addsd -8480(%rbp), %xmm2 ## 8-byte Folded Reload
addsd -7824(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm6, %xmm0
mulsd LCPI19_103(%rip), %xmm0
subsd %xmm0, %xmm2
movsd -3512(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -480(%rbp), %xmm3 ## 8-byte Folded Reload
addsd -6272(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm7, %xmm13
movsd %xmm13, -7040(%rbp) ## 8-byte Spill
movapd -10432(%rbp), %xmm4 ## 16-byte Reload
movapd -192(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm4
addsd %xmm13, %xmm4
movapd %xmm5, %xmm7
movapd %xmm4, -16816(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm7
addsd %xmm3, %xmm7
movapd %xmm7, %xmm3
mulsd LCPI19_25(%rip), %xmm3
subsd %xmm3, %xmm2
movapd -11184(%rbp), %xmm3 ## 16-byte Reload
mulsd -2336(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm2, %xmm3
movapd -11168(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm11, %xmm2
movapd %xmm2, -16800(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm2
subsd %xmm2, %xmm3
movapd -2544(%rbp), %xmm2 ## 16-byte Reload
mulsd -3920(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm3
subsd -4368(%rbp), %xmm3 ## 8-byte Folded Reload
movapd -2352(%rbp), %xmm12 ## 16-byte Reload
movsd -1744(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm12, %xmm13
addsd %xmm3, %xmm13
addsd -336(%rbp), %xmm14 ## 8-byte Folded Reload
movapd %xmm13, %xmm3
mulsd LCPI19_108(%rip), %xmm3
movapd %xmm14, %xmm2
mulsd LCPI19_21(%rip), %xmm2
subsd %xmm3, %xmm2
movsd -432(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd LCPI19_24(%rip), %xmm4
movsd LCPI19_112(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm7, %xmm10
addsd %xmm4, %xmm10
movapd -1600(%rbp), %xmm4 ## 16-byte Reload
movsd -3296(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm4
subsd -7264(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -1184(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
mulsd -5584(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm4, %xmm5
mulsd LCPI19_14(%rip), %xmm15
addsd %xmm5, %xmm15
movapd -2128(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm8, %xmm4
addsd %xmm15, %xmm4
subsd -12240(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -800(%rbp), %xmm3 ## 16-byte Reload
movsd -1872(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm3
addsd %xmm4, %xmm3
addsd -12224(%rbp), %xmm3 ## 16-byte Folded Reload
addsd -12208(%rbp), %xmm3 ## 16-byte Folded Reload
addsd -12192(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, %xmm4
movapd %xmm6, %xmm11
movsd LCPI19_47(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm11
addsd %xmm4, %xmm11
addsd -7256(%rbp), %xmm11 ## 8-byte Folded Reload
addsd -9232(%rbp), %xmm11 ## 16-byte Folded Reload
movapd -448(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm11, %xmm4
addsd %xmm10, %xmm4
movapd %xmm9, %xmm3
mulsd -1920(%rbp), %xmm3 ## 16-byte Folded Reload
subsd -7248(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, %xmm9
mulsd -11472(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm3, %xmm0
mulsd -2432(%rbp), %xmm8 ## 16-byte Folded Reload
addsd %xmm0, %xmm8
subsd -12176(%rbp), %xmm8 ## 16-byte Folded Reload
mulsd %xmm5, %xmm1
addsd %xmm8, %xmm1
addsd -8576(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -3088(%rbp), %xmm1 ## 16-byte Folded Reload
addsd -8560(%rbp), %xmm1 ## 16-byte Folded Reload
movsd LCPI19_48(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm6
addsd %xmm1, %xmm6
addsd -6480(%rbp), %xmm6 ## 16-byte Folded Reload
subsd -7240(%rbp), %xmm6 ## 8-byte Folded Reload
movapd -512(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm1
mulsd %xmm6, %xmm1
addsd %xmm4, %xmm1
movapd -2912(%rbp), %xmm3 ## 16-byte Reload
movapd -3920(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm3
subsd %xmm3, %xmm1
subsd -12160(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -6336(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm12, %xmm3
addsd %xmm1, %xmm3
movapd %xmm3, %xmm12
movsd -3528(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd -480(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm1
movsd -1952(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd -192(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm4
movapd %xmm5, %xmm3
movsd %xmm4, -12648(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm3
subsd %xmm3, %xmm1
addsd -7232(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, %xmm3
mulsd LCPI19_73(%rip), %xmm3
addsd %xmm12, %xmm3
mulsd LCPI19_15(%rip), %xmm13
movapd %xmm14, %xmm4
movsd LCPI19_31(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm4
subsd %xmm4, %xmm13
addsd %xmm3, %xmm13
movsd LCPI19_13(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm3
addsd %xmm2, %xmm3
movsd LCPI19_23(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd -432(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm2, %xmm4
mulsd %xmm2, %xmm7
subsd %xmm7, %xmm4
movapd %xmm15, %xmm0
movapd -13664(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm11, -15232(%rbp) ## 16-byte Spill
mulsd %xmm11, %xmm0
addsd %xmm4, %xmm0
movapd -448(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm6, -15264(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm2
subsd %xmm2, %xmm0
mulsd -3248(%rbp), %xmm10 ## 16-byte Folded Reload
subsd %xmm10, %xmm0
subsd -7224(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -2752(%rbp), %xmm2 ## 16-byte Reload
mulsd -2352(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movsd -336(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_64(%rip), %xmm0
addsd %xmm2, %xmm0
mulsd LCPI19_72(%rip), %xmm1
addsd %xmm0, %xmm1
movsd -3520(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm9, %xmm0
addsd -12144(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -7040(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
addsd -3464(%rbp), %xmm10 ## 8-byte Folded Reload
movsd -2240(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm5, %xmm2
addsd %xmm10, %xmm2
movapd %xmm5, %xmm4
movsd %xmm2, -12640(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm4
addsd %xmm0, %xmm4
addsd %xmm1, %xmm3
movsd LCPI19_114(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm4, %xmm5
addsd %xmm3, %xmm5
mulsd %xmm8, %xmm1
addsd %xmm13, %xmm1
mulsd %xmm12, %xmm4
addsd %xmm1, %xmm4
movapd -1312(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
mulsd %xmm5, %xmm3
movapd -1328(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm3
movapd %xmm14, %xmm1
movsd LCPI19_111(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm1
movapd -1216(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
mulsd %xmm3, %xmm0
subsd %xmm1, %xmm0
movapd %xmm14, %xmm1
movsd LCPI19_115(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm1
movapd %xmm14, %xmm12
movapd %xmm4, -14912(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm1, %xmm2
movapd %xmm6, %xmm1
movapd %xmm5, -14896(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
addsd %xmm2, %xmm1
movsd -5336(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -2800(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm2
addsd -3328(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -1968(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movsd -1200(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm5
addsd %xmm10, %xmm5
movapd %xmm7, %xmm4
movsd %xmm5, -12656(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm4
addsd %xmm2, %xmm4
mulsd %xmm8, %xmm4
addsd %xmm1, %xmm4
movsd -7400(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm6, %xmm1
movsd -7392(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm7, %xmm5
movapd %xmm7, %xmm2
movsd %xmm5, -12624(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm2
subsd %xmm2, %xmm1
addsd -4304(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd LCPI19_116(%rip), %xmm1
subsd %xmm1, %xmm4
movapd -1936(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm1
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm0
movsd -7384(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1280(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -6320(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -2184(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm5
movsd -704(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm5
addsd %xmm10, %xmm5
movsd %xmm5, -12632(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm2
addsd %xmm1, %xmm2
movapd %xmm2, %xmm14
mulsd %xmm11, %xmm14
addsd %xmm0, %xmm14
movsd %xmm12, -9568(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm0
movsd LCPI19_30(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm0
movapd %xmm7, %xmm1
movapd %xmm3, -14784(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
subsd %xmm0, %xmm1
movapd %xmm9, %xmm0
movapd %xmm4, -14768(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm5, %xmm2
addsd %xmm0, %xmm2
movapd -5040(%rbp), %xmm11 ## 16-byte Reload
addsd %xmm2, %xmm11
movapd -10384(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm6, %xmm0
addsd %xmm6, %xmm12
movsd -128(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm5
mulsd %xmm12, %xmm7
movsd %xmm10, -7040(%rbp) ## 8-byte Spill
addsd %xmm10, %xmm7
movapd -6080(%rbp), %xmm13 ## 16-byte Reload
testq %rax, %rax
je LBB19_54
## %bb.53:
movapd -2880(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm4
mulsd %xmm13, %xmm4
movapd -2784(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm1
movapd %xmm15, %xmm10
mulsd %xmm15, %xmm1
movapd %xmm5, %xmm2
mulsd -9648(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -280(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd %xmm15, %xmm0
mulsd %xmm12, %xmm0
addsd %xmm2, %xmm0
movapd %xmm0, %xmm3
mulsd LCPI19_29(%rip), %xmm3
subsd %xmm3, %xmm1
subsd %xmm1, %xmm4
addsd %xmm4, %xmm4
movsd LCPI19_119(%rip), %xmm3 ## xmm3 = mem[0],zero
movsd -9568(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm8, %xmm6
mulsd %xmm14, %xmm6
subsd %xmm1, %xmm6
movapd %xmm9, %xmm1
mulsd %xmm11, %xmm1
subsd %xmm1, %xmm6
mulsd -10000(%rbp), %xmm15 ## 16-byte Folded Reload
mulsd %xmm7, %xmm5
addsd %xmm15, %xmm5
movapd %xmm5, %xmm1
mulsd %xmm3, %xmm1
addsd %xmm6, %xmm1
addsd %xmm1, %xmm1
addsd %xmm4, %xmm1
movapd %xmm2, %xmm4
movsd LCPI19_29(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
movapd %xmm9, %xmm6
mulsd %xmm14, %xmm6
subsd %xmm4, %xmm6
movapd %xmm8, %xmm4
mulsd %xmm11, %xmm4
addsd %xmm6, %xmm4
mulsd %xmm2, %xmm5
addsd %xmm4, %xmm5
mulsd %xmm3, %xmm0
movapd %xmm8, %xmm2
mulsd %xmm10, %xmm2
subsd %xmm0, %xmm2
movapd %xmm9, %xmm0
mulsd %xmm13, %xmm0
addsd %xmm2, %xmm0
movsd -1536(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm5, %xmm0
movsd -1072(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm2
movsd -2272(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
mulsd -880(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
mulsd %xmm4, %xmm2
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
mulsd %xmm3, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, 32(%rax)
LBB19_54:
movsd %xmm7, -12376(%rbp) ## 8-byte Spill
movapd %xmm14, -14672(%rbp) ## 16-byte Spill
movapd %xmm12, -10816(%rbp) ## 16-byte Spill
movapd %xmm11, -5040(%rbp) ## 16-byte Spill
movsd -232(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
subsd -216(%rbp), %xmm8 ## 8-byte Folded Reload
movapd %xmm8, %xmm12
movsd %xmm8, -800(%rbp) ## 8-byte Spill
addsd -376(%rbp), %xmm12 ## 8-byte Folded Reload
movsd -304(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
addsd -160(%rbp), %xmm10 ## 8-byte Folded Reload
movapd %xmm10, %xmm11
addsd -296(%rbp), %xmm11 ## 8-byte Folded Reload
movapd %xmm11, %xmm3
movapd %xmm11, -336(%rbp) ## 16-byte Spill
addsd -832(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, %xmm15
divsd -3408(%rbp), %xmm15 ## 16-byte Folded Reload
movapd -4912(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm15, %xmm5
movapd %xmm5, %xmm1
mulsd -3664(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm12, %xmm1
addsd -2160(%rbp), %xmm1 ## 16-byte Folded Reload
movsd -3360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movsd LCPI19_1(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm0
mulsd -4736(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_37(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
mulsd -2288(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -4928(%rbp) ## 8-byte Spill
movsd -3776(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm1, %xmm2
movsd %xmm2, -4496(%rbp) ## 8-byte Spill
movsd -936(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm2, %xmm9
movsd LCPI19_43(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm9
addsd %xmm0, %xmm9
addsd -672(%rbp), %xmm11 ## 16-byte Folded Reload
movapd %xmm11, %xmm0
divsd -3392(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -4512(%rbp) ## 8-byte Spill
movapd -3424(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
movapd %xmm2, %xmm4
mulsd -3680(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm12, %xmm4
addsd -2176(%rbp), %xmm4 ## 16-byte Folded Reload
movsd -1664(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
mulsd %xmm13, %xmm0
mulsd -1680(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm6, %xmm0
movapd %xmm6, %xmm1
mulsd -1792(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm4, -3904(%rbp) ## 8-byte Spill
movsd -3808(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm4, %xmm6
movsd %xmm6, -2752(%rbp) ## 8-byte Spill
movsd -1144(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm6, %xmm14
mulsd %xmm7, %xmm14
addsd %xmm0, %xmm14
movapd %xmm14, -3296(%rbp) ## 16-byte Spill
movapd %xmm10, %xmm0
addsd -4240(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -3448(%rbp) ## 8-byte Spill
divsd -2952(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -432(%rbp) ## 8-byte Spill
movsd -928(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm0, %xmm13
movapd %xmm13, %xmm0
mulsd -5552(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm8, %xmm0
addsd -3216(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, %xmm4
mulsd -2832(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm4
mulsd -4624(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd %xmm1, %xmm4
mulsd -2464(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -5520(%rbp) ## 8-byte Spill
movsd -1648(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm0, %xmm6
movsd %xmm6, -2656(%rbp) ## 8-byte Spill
movsd -1136(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
mulsd %xmm7, %xmm0
addsd %xmm4, %xmm0
movapd %xmm0, %xmm1
movsd %xmm0, -9728(%rbp) ## 8-byte Spill
movsd LCPI19_50(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd -336(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm0, %xmm8
movapd %xmm8, -336(%rbp) ## 16-byte Spill
movapd -8112(%rbp), %xmm6 ## 16-byte Reload
subsd %xmm8, %xmm6
movapd %xmm6, -7856(%rbp) ## 16-byte Spill
movsd LCPI19_74(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm10
movsd %xmm10, -6352(%rbp) ## 8-byte Spill
subsd %xmm10, %xmm6
addsd -3552(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm6, -6320(%rbp) ## 16-byte Spill
movsd -304(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movsd LCPI19_85(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm4
movapd %xmm6, %xmm7
subsd %xmm4, %xmm7
movapd %xmm7, -6336(%rbp) ## 16-byte Spill
movapd %xmm9, %xmm10
addsd %xmm14, %xmm10
movapd %xmm10, %xmm6
addsd %xmm1, %xmm6
movsd %xmm6, -7032(%rbp) ## 8-byte Spill
movapd %xmm7, %xmm4
movsd -1296(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm4
subsd %xmm4, %xmm6
movapd %xmm7, %xmm4
movsd %xmm6, -12608(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm4
movsd -2000(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
addsd %xmm4, %xmm6
movsd -800(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm0, %xmm14
movsd %xmm14, -800(%rbp) ## 8-byte Spill
mulsd LCPI19_50(%rip), %xmm12
movapd %xmm12, -3120(%rbp) ## 16-byte Spill
addsd -8128(%rbp), %xmm12 ## 16-byte Folded Reload
movapd %xmm12, -7840(%rbp) ## 16-byte Spill
movapd %xmm14, %xmm0
addsd %xmm12, %xmm0
addsd -10512(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -1744(%rbp) ## 8-byte Spill
movsd -216(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm8, %xmm4
subsd %xmm4, %xmm0
movsd %xmm0, -3488(%rbp) ## 8-byte Spill
mulsd -960(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm6, %xmm0
movsd %xmm0, -3600(%rbp) ## 8-byte Spill
mulsd -5696(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd -1128(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm5, %xmm4
mulsd LCPI19_1(%rip), %xmm4
subsd %xmm4, %xmm3
movsd LCPI19_44(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm5
movapd %xmm7, %xmm14
mulsd -1688(%rbp), %xmm5 ## 8-byte Folded Reload
movsd LCPI19_45(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm5
movapd %xmm8, %xmm12
subsd %xmm5, %xmm3
movapd -144(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
movapd %xmm0, %xmm8
mulsd %xmm9, %xmm5
divsd -5680(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -4192(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm3, %xmm0
movapd -256(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm4
mulsd %xmm0, %xmm4
movapd %xmm0, %xmm6
movapd %xmm0, -4944(%rbp) ## 16-byte Spill
subsd %xmm4, %xmm5
movapd %xmm3, %xmm4
mulsd -5200(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd -1672(%rbp), %xmm15 ## 8-byte Folded Reload
subsd %xmm15, %xmm4
movsd %xmm4, -9736(%rbp) ## 8-byte Spill
movapd -96(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm0
mulsd %xmm6, %xmm0
movapd %xmm8, %xmm3
mulsd %xmm4, %xmm3
subsd %xmm3, %xmm0
movsd %xmm0, -1632(%rbp) ## 8-byte Spill
movapd %xmm7, %xmm6
mulsd %xmm4, %xmm6
movapd %xmm15, %xmm3
mulsd %xmm9, %xmm3
subsd %xmm3, %xmm6
movapd %xmm6, -13744(%rbp) ## 16-byte Spill
movapd -208(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm0, %xmm3
subsd -4976(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm6, %xmm3
movapd -64(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm5, -3920(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
addsd %xmm3, %xmm0
movapd -2384(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm5, %xmm4
subsd -7536(%rbp), %xmm4 ## 16-byte Folded Reload
subsd -2680(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, -11456(%rbp) ## 16-byte Spill
movapd -2096(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm0, %xmm3
addsd %xmm4, %xmm3
mulsd -5744(%rbp), %xmm11 ## 16-byte Folded Reload
mulsd -2016(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, %xmm4
mulsd LCPI19_1(%rip), %xmm4
subsd %xmm4, %xmm11
mulsd %xmm14, %xmm2
mulsd -3376(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd %xmm12, %xmm2
subsd %xmm2, %xmm11
movapd %xmm8, %xmm12
movapd %xmm8, %xmm14
movapd -3296(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm14
divsd -5712(%rbp), %xmm11 ## 16-byte Folded Reload
movsd %xmm11, -3440(%rbp) ## 8-byte Spill
movapd -4272(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
movapd %xmm7, %xmm2
mulsd %xmm0, %xmm2
movapd %xmm0, %xmm11
movapd %xmm0, -4960(%rbp) ## 16-byte Spill
subsd %xmm2, %xmm14
movapd -2368(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm14, %xmm2
addsd %xmm3, %xmm2
movapd -336(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm6
mulsd %xmm8, %xmm6
movapd -992(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm6, %xmm3
movapd -3120(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm4
mulsd -1776(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm3, %xmm4
movapd %xmm0, %xmm3
mulsd %xmm7, %xmm3
mulsd %xmm15, %xmm5
addsd %xmm3, %xmm5
movapd -1568(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm5, %xmm3
movapd %xmm5, -5504(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm3
movapd -832(%rbp), %xmm4 ## 16-byte Reload
mulsd -4944(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm3, %xmm4
movapd -2064(%rbp), %xmm3 ## 16-byte Reload
movsd %xmm9, -12616(%rbp) ## 8-byte Spill
mulsd %xmm9, %xmm3
subsd %xmm3, %xmm4
movapd -672(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
addsd %xmm4, %xmm0
movapd -2608(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm1, %xmm3
subsd %xmm3, %xmm0
subsd -8592(%rbp), %xmm2 ## 16-byte Folded Reload
addsd LCPI19_106(%rip), %xmm2
addsd -4400(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -7776(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -9296(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -624(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm3
movapd %xmm1, %xmm9
movapd %xmm0, -15120(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm3
addsd %xmm2, %xmm3
movapd -7856(%rbp), %xmm2 ## 16-byte Reload
movapd -48(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm10
movapd %xmm0, %xmm4
movapd %xmm0, %xmm11
movsd %xmm10, -12600(%rbp) ## 8-byte Spill
mulsd %xmm10, %xmm4
addsd -5568(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -7840(%rbp), %xmm8 ## 16-byte Reload
movsd -320(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm8
movapd %xmm0, %xmm10
addsd %xmm4, %xmm8
movapd %xmm8, %xmm4
movsd LCPI19_25(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
subsd %xmm4, %xmm3
movapd %xmm6, -13728(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm12
mulsd %xmm5, %xmm7
addsd %xmm12, %xmm7
movapd -3120(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm4
mulsd -2512(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm7, %xmm4
addsd -4720(%rbp), %xmm4 ## 16-byte Folded Reload
movapd %xmm4, %xmm0
movsd LCPI19_103(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
addsd %xmm3, %xmm0
movapd -336(%rbp), %xmm3 ## 16-byte Reload
movapd -992(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm3
movapd -6032(%rbp), %xmm1 ## 16-byte Reload
subsd %xmm3, %xmm1
movapd %xmm1, -11104(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm1
movapd %xmm11, %xmm3
movapd %xmm1, -16784(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm3
addsd %xmm0, %xmm3
movapd %xmm5, %xmm0
mulsd %xmm2, %xmm0
movapd -6016(%rbp), %xmm1 ## 16-byte Reload
subsd %xmm0, %xmm1
movapd %xmm1, -11088(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm0
mulsd -72(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm3, %xmm0
movapd %xmm11, %xmm2
mulsd -9728(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -1824(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm2, %xmm3
movapd %xmm2, %xmm5
movapd %xmm2, -2000(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm3
movsd -3448(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -6576(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd -2192(%rbp), %xmm13 ## 8-byte Folded Reload
movapd %xmm13, %xmm0
mulsd LCPI19_1(%rip), %xmm0
subsd %xmm0, %xmm1
movapd %xmm1, %xmm0
mulsd LCPI19_44(%rip), %xmm13
mulsd -5536(%rbp), %xmm13 ## 16-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm13
subsd %xmm13, %xmm0
divsd -6560(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd -2696(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -432(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1624(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movsd %xmm0, -3448(%rbp) ## 8-byte Spill
movapd -64(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm2
mulsd %xmm5, %xmm2
movapd %xmm11, %xmm1
mulsd %xmm0, %xmm1
movapd %xmm1, -4544(%rbp) ## 16-byte Spill
movapd -208(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
addsd -5424(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
subsd -8544(%rbp), %xmm3 ## 16-byte Folded Reload
subsd -7520(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, -432(%rbp) ## 8-byte Spill
movapd -1008(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm2, %xmm7
addsd %xmm3, %xmm7
movapd -6320(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
movsd -7032(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movapd %xmm11, %xmm0
movsd %xmm1, -12592(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
addsd -6288(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -1744(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm10, %xmm15
addsd %xmm0, %xmm15
movapd %xmm7, %xmm0
movsd LCPI19_15(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm15, %xmm2
movsd LCPI19_22(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
subsd %xmm0, %xmm2
movsd %xmm2, -7600(%rbp) ## 8-byte Spill
movapd -1520(%rbp), %xmm0 ## 16-byte Reload
movsd -1632(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm0
subsd -3432(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -1488(%rbp), %xmm13 ## 16-byte Reload
movapd -13744(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm13
addsd %xmm0, %xmm13
movapd -176(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
movapd %xmm0, %xmm10
movapd -3920(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm1
addsd %xmm13, %xmm1
movapd -896(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm3, %xmm0
movapd %xmm3, %xmm11
subsd -4392(%rbp), %xmm0 ## 8-byte Folded Reload
subsd -5408(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -2096(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm13
mulsd %xmm1, %xmm13
movapd %xmm1, %xmm12
movapd %xmm1, -11440(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm13
movapd -1904(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm14, %xmm0
addsd %xmm13, %xmm0
subsd -8432(%rbp), %xmm0 ## 16-byte Folded Reload
addsd LCPI19_77(%rip), %xmm0
addsd -4384(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm4, %xmm3
movsd LCPI19_48(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm3
addsd %xmm0, %xmm3
movapd -576(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm5, %xmm0
subsd -5392(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -864(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm2, %xmm1
mulsd %xmm2, %xmm13
addsd %xmm0, %xmm13
movapd -1888(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm11, %xmm1
mulsd %xmm11, %xmm0
movapd -736(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm1
movapd %xmm5, %xmm2
addsd %xmm13, %xmm1
subsd -8416(%rbp), %xmm0 ## 8-byte Folded Reload
subsd -2648(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd %xmm1, %xmm6
movapd %xmm1, %xmm5
movapd %xmm1, -3920(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm6
mulsd -11456(%rbp), %xmm9 ## 16-byte Folded Reload
movapd %xmm10, %xmm0
mulsd %xmm12, %xmm0
addsd %xmm9, %xmm0
movapd %xmm2, %xmm1
mulsd %xmm5, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_14(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm6, %xmm1
movapd -1584(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm14, %xmm0
addsd %xmm1, %xmm0
movapd %xmm8, %xmm13
movsd LCPI19_24(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm13
addsd -8352(%rbp), %xmm3 ## 16-byte Folded Reload
addsd -9216(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -752(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm1
mulsd %xmm3, %xmm1
subsd -8384(%rbp), %xmm0 ## 16-byte Folded Reload
addsd LCPI19_78(%rip), %xmm0
addsd %xmm13, %xmm1
addsd -8400(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_47(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
addsd %xmm0, %xmm4
subsd -4360(%rbp), %xmm4 ## 8-byte Folded Reload
addsd -9200(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -496(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd %xmm4, %xmm0
addsd %xmm1, %xmm0
movsd -2032(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -2000(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm1
addsd %xmm0, %xmm1
movapd -3280(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm12
mulsd %xmm6, %xmm12
movapd -1344(%rbp), %xmm0 ## 16-byte Reload
movapd -4544(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm0
addsd -7504(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm12
subsd -8368(%rbp), %xmm1 ## 8-byte Folded Reload
subsd -7488(%rbp), %xmm12 ## 16-byte Folded Reload
movapd -1008(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd %xmm12, %xmm0
movapd %xmm12, -6304(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
movsd -6352(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd -48(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm2
movsd %xmm2, -12584(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm1
movsd -800(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd -320(%rbp), %xmm10 ## 8-byte Folded Reload
addsd %xmm1, %xmm10
addsd LCPI19_79(%rip), %xmm0
addsd -4064(%rbp), %xmm10 ## 16-byte Folded Reload
movapd %xmm10, %xmm2
movsd LCPI19_73(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
addsd %xmm0, %xmm2
movsd LCPI19_23(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm8
movapd %xmm3, -15024(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm5
subsd %xmm5, %xmm8
movapd %xmm11, %xmm0
movapd %xmm4, -15040(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm0
addsd %xmm8, %xmm0
movapd -1808(%rbp), %xmm1 ## 16-byte Reload
movapd -2000(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm1
addsd %xmm0, %xmm1
movapd -2592(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm11
movapd -2416(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm9, %xmm0
addsd -4352(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm11
subsd -8336(%rbp), %xmm1 ## 16-byte Folded Reload
subsd -4344(%rbp), %xmm11 ## 8-byte Folded Reload
mulsd %xmm11, %xmm6
movapd %xmm11, -2000(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm6
movapd -64(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm1
mulsd -432(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd %xmm12, %xmm13
addsd %xmm1, %xmm13
movapd %xmm3, %xmm1
mulsd %xmm11, %xmm1
addsd %xmm13, %xmm1
movsd LCPI19_64(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
addsd %xmm6, %xmm1
addsd LCPI19_80(%rip), %xmm1
movsd LCPI19_72(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm10
addsd %xmm1, %xmm10
movsd LCPI19_108(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm7
movsd LCPI19_114(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm15
subsd %xmm15, %xmm7
movsd -7600(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm2, %xmm1
movsd LCPI19_13(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
addsd %xmm7, %xmm2
addsd %xmm10, %xmm2
mulsd %xmm0, %xmm10
addsd %xmm1, %xmm10
movsd LCPI19_115(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -3600(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
movapd -1616(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd %xmm10, %xmm0
subsd %xmm1, %xmm0
movapd -1088(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_87(%rip), %xmm4 ## xmm4 = mem[0],zero
subsd %xmm1, %xmm4
movsd LCPI19_116(%rip), %xmm11 ## xmm11 = mem[0],zero
movsd -1120(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm11, %xmm0
subsd %xmm0, %xmm4
movapd %xmm2, -14720(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm5
movapd %xmm3, %xmm0
movapd %xmm10, -14688(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm0
subsd %xmm0, %xmm5
movapd -9376(%rbp), %xmm1 ## 16-byte Reload
addsd -6336(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm8
movapd %xmm1, -3600(%rbp) ## 16-byte Spill
movsd -560(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movsd -7032(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm1
subsd %xmm0, %xmm1
movapd %xmm2, %xmm0
movsd %xmm1, -12576(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
movapd -9360(%rbp), %xmm1 ## 16-byte Reload
addsd -3488(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -7600(%rbp) ## 16-byte Spill
mulsd -976(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd -2144(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
movapd %xmm2, %xmm6
mulsd %xmm5, %xmm0
movapd %xmm1, %xmm2
movsd LCPI19_30(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm0
movapd -848(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
mulsd %xmm4, %xmm2
subsd %xmm0, %xmm2
movapd %xmm2, -13520(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm0
movapd %xmm5, -14640(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
movsd LCPI19_111(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movapd %xmm6, %xmm1
movapd %xmm4, -14656(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -13504(%rbp) ## 16-byte Spill
movsd -128(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm8, %xmm0
movapd %xmm7, %xmm1
subsd %xmm0, %xmm1
movsd %xmm1, -9560(%rbp) ## 8-byte Spill
movsd -4928(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4800(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm0
mulsd -2864(%rbp), %xmm0 ## 16-byte Folded Reload
movsd LCPI19_110(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm0
movsd -4496(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd -3744(%rbp), %xmm12 ## 8-byte Folded Reload
mulsd LCPI19_132(%rip), %xmm12
mulsd -3344(%rbp), %xmm12 ## 8-byte Folded Reload
divsd -3136(%rbp), %xmm12 ## 8-byte Folded Reload
mulsd -1424(%rbp), %xmm12 ## 16-byte Folded Reload
addsd %xmm0, %xmm12
movapd -208(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
movapd -11456(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm0
movapd -1520(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd -11440(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -576(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm0
mulsd -3920(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd %xmm0, %xmm12
movapd %xmm0, %xmm1
mulsd LCPI19_1(%rip), %xmm1
subsd %xmm1, %xmm12
movapd %xmm12, %xmm1
subsd %xmm0, %xmm1
movsd -3440(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -768(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -4512(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3792(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movsd %xmm3, -3440(%rbp) ## 8-byte Spill
movapd -96(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm8
mulsd -4960(%rbp), %xmm8 ## 16-byte Folded Reload
movapd -144(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm8
movapd -256(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm3, %xmm6
movapd %xmm2, %xmm0
mulsd -3296(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm6
movapd %xmm5, %xmm2
mulsd %xmm8, %xmm2
subsd -7648(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm6, %xmm2
movapd %xmm9, %xmm0
mulsd %xmm14, %xmm0
addsd %xmm2, %xmm0
movapd %xmm4, %xmm2
mulsd %xmm8, %xmm2
subsd -6192(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -1488(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm6, %xmm3
addsd %xmm2, %xmm3
movapd -176(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm14, %xmm7
addsd %xmm3, %xmm7
subsd -1352(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm5, %xmm2
mulsd %xmm0, %xmm2
subsd -6176(%rbp), %xmm7 ## 16-byte Folded Reload
movapd %xmm4, %xmm3
mulsd %xmm7, %xmm3
addsd %xmm2, %xmm3
movapd %xmm11, %xmm2
mulsd %xmm8, %xmm2
subsd -8256(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -864(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm6, -14848(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm4
addsd %xmm2, %xmm4
mulsd -736(%rbp), %xmm14 ## 16-byte Folded Reload
addsd %xmm4, %xmm14
subsd -8240(%rbp), %xmm14 ## 8-byte Folded Reload
movapd %xmm11, %xmm2
mulsd %xmm14, %xmm2
addsd %xmm3, %xmm2
movsd -3904(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -3728(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm3
mulsd -3648(%rbp), %xmm3 ## 16-byte Folded Reload
movsd -2752(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd -3104(%rbp), %xmm13 ## 8-byte Folded Reload
mulsd LCPI19_133(%rip), %xmm13
mulsd %xmm15, %xmm3
mulsd -2848(%rbp), %xmm13 ## 8-byte Folded Reload
divsd -3824(%rbp), %xmm13 ## 8-byte Folded Reload
mulsd -1392(%rbp), %xmm13 ## 8-byte Folded Reload
addsd %xmm3, %xmm13
subsd %xmm2, %xmm1
addsd %xmm2, %xmm13
mulsd LCPI19_1(%rip), %xmm2
subsd %xmm2, %xmm13
addsd %xmm1, %xmm13
movapd %xmm13, %xmm3
movapd %xmm5, %xmm1
mulsd -432(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -1344(%rbp), %xmm2 ## 16-byte Reload
mulsd -6304(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movapd -2416(%rbp), %xmm1 ## 16-byte Reload
mulsd -2000(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
movsd -5520(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -1360(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm2
mulsd -3072(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd %xmm15, %xmm2
movsd -2656(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd -4048(%rbp), %xmm9 ## 16-byte Folded Reload
mulsd LCPI19_134(%rip), %xmm9
mulsd -3952(%rbp), %xmm9 ## 8-byte Folded Reload
divsd -3712(%rbp), %xmm9 ## 8-byte Folded Reload
mulsd -2480(%rbp), %xmm9 ## 8-byte Folded Reload
addsd %xmm2, %xmm9
movsd LCPI19_1(%rip), %xmm15 ## xmm15 = mem[0],zero
movapd %xmm13, %xmm4
movsd %xmm13, -2752(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm13
addsd %xmm1, %xmm9
mulsd %xmm15, %xmm1
subsd %xmm1, %xmm9
movapd %xmm9, %xmm15
movapd -2384(%rbp), %xmm1 ## 16-byte Reload
mulsd -1632(%rbp), %xmm1 ## 8-byte Folded Reload
subsd -8304(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -1024(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm10, %xmm3
mulsd %xmm10, %xmm2
addsd %xmm1, %xmm2
movapd -2368(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm8, %xmm1
addsd %xmm2, %xmm1
subsd -8288(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd -2080(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd %xmm4, %xmm1
mulsd LCPI19_25(%rip), %xmm1
subsd %xmm1, %xmm0
movsd -2112(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -5504(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm1
movapd -336(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm2
mulsd -1776(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm1
movapd -3120(%rbp), %xmm3 ## 16-byte Reload
movapd -144(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm3
movapd -992(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm3, %xmm2
addsd %xmm1, %xmm2
movapd -4944(%rbp), %xmm1 ## 16-byte Reload
mulsd -2160(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm2
movapd -2064(%rbp), %xmm1 ## 16-byte Reload
movsd -9736(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm1
addsd %xmm2, %xmm1
movapd -4960(%rbp), %xmm2 ## 16-byte Reload
mulsd -2176(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm1
movapd -2608(%rbp), %xmm2 ## 16-byte Reload
movsd -3440(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm2
addsd %xmm1, %xmm2
addsd -7664(%rbp), %xmm2 ## 16-byte Folded Reload
addsd -8272(%rbp), %xmm2 ## 8-byte Folded Reload
addsd -6464(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -624(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm1
movapd %xmm2, -14928(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
mulsd -2512(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm9, %xmm0
mulsd -96(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm5
movapd %xmm4, %xmm0
movapd %xmm3, -13712(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm5
addsd -4312(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -4320(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -3472(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm5, %xmm2
mulsd LCPI19_103(%rip), %xmm2
addsd %xmm1, %xmm2
movapd -7856(%rbp), %xmm1 ## 16-byte Reload
mulsd -320(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -4528(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm6, %xmm0
addsd %xmm6, %xmm10
movapd -7840(%rbp), %xmm3 ## 16-byte Reload
movapd -48(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm3
addsd %xmm10, %xmm3
movapd %xmm0, %xmm9
movapd %xmm3, -16768(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm9
addsd %xmm1, %xmm9
movapd %xmm9, %xmm1
mulsd LCPI19_25(%rip), %xmm1
addsd %xmm2, %xmm1
movapd -11104(%rbp), %xmm2 ## 16-byte Reload
mulsd -72(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movapd -11088(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm11, %xmm3
movapd %xmm0, %xmm1
movapd %xmm3, -16752(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm2
movapd -1824(%rbp), %xmm1 ## 16-byte Reload
movapd -4544(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm1
subsd %xmm1, %xmm2
subsd -7216(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -432(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2304(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
movsd %xmm1, -432(%rbp) ## 8-byte Spill
addsd %xmm15, %xmm13
movsd %xmm13, -9640(%rbp) ## 8-byte Spill
mulsd LCPI19_108(%rip), %xmm1
mulsd LCPI19_114(%rip), %xmm13
subsd %xmm13, %xmm1
movsd -2752(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd LCPI19_24(%rip), %xmm3
movsd LCPI19_112(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm9, %xmm2
addsd %xmm3, %xmm2
movapd -1888(%rbp), %xmm3 ## 16-byte Reload
movsd -1632(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
subsd -7208(%rbp), %xmm3 ## 8-byte Folded Reload
movapd -1024(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm4
mulsd -3920(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm3, %xmm4
mulsd LCPI19_14(%rip), %xmm12
addsd %xmm4, %xmm12
movapd -1584(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm8, %xmm3
addsd %xmm12, %xmm3
subsd -12128(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -2080(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm14
addsd %xmm3, %xmm14
addsd -7200(%rbp), %xmm14 ## 8-byte Folded Reload
addsd -12112(%rbp), %xmm14 ## 16-byte Folded Reload
addsd -5440(%rbp), %xmm14 ## 8-byte Folded Reload
movapd %xmm5, %xmm6
movsd LCPI19_47(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm6
addsd %xmm14, %xmm6
addsd -7192(%rbp), %xmm6 ## 8-byte Folded Reload
addsd -6432(%rbp), %xmm6 ## 16-byte Folded Reload
movapd -496(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm6, %xmm3
addsd %xmm2, %xmm3
mulsd -896(%rbp), %xmm0 ## 16-byte Folded Reload
subsd -7184(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm13, %xmm2
mulsd -11440(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
mulsd -1904(%rbp), %xmm8 ## 16-byte Folded Reload
addsd %xmm2, %xmm8
subsd -12096(%rbp), %xmm8 ## 16-byte Folded Reload
mulsd %xmm4, %xmm7
addsd %xmm8, %xmm7
addsd -4336(%rbp), %xmm7 ## 8-byte Folded Reload
addsd -8320(%rbp), %xmm7 ## 16-byte Folded Reload
addsd -4328(%rbp), %xmm7 ## 8-byte Folded Reload
movsd LCPI19_48(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm5
addsd %xmm7, %xmm5
addsd -6416(%rbp), %xmm5 ## 16-byte Folded Reload
subsd -7176(%rbp), %xmm5 ## 8-byte Folded Reload
movapd -752(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm2
mulsd %xmm5, %xmm2
addsd %xmm3, %xmm2
movsd -2032(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm11, %xmm3
subsd %xmm3, %xmm2
subsd -7160(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -6304(%rbp), %xmm3 ## 16-byte Reload
movapd -2304(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm3
addsd %xmm2, %xmm3
movapd %xmm3, %xmm0
movsd -6352(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -320(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm2
movsd -800(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd -48(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm4
movapd %xmm7, %xmm3
movapd %xmm7, %xmm11
movsd %xmm4, -12568(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm3
subsd %xmm3, %xmm2
addsd -7168(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, %xmm3
mulsd LCPI19_73(%rip), %xmm3
addsd %xmm0, %xmm3
movsd -432(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_15(%rip), %xmm0
movsd -9640(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
movapd %xmm13, %xmm4
mulsd LCPI19_22(%rip), %xmm4
subsd %xmm0, %xmm4
addsd %xmm3, %xmm4
movsd LCPI19_13(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm3
addsd %xmm1, %xmm3
movsd LCPI19_23(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd -2752(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd %xmm1, %xmm9
subsd %xmm9, %xmm0
movapd %xmm0, %xmm1
movapd %xmm6, -14864(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm14
addsd %xmm0, %xmm14
movapd -496(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm5, -14880(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm14
movapd -4544(%rbp), %xmm1 ## 16-byte Reload
mulsd -1808(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm14
subsd -7152(%rbp), %xmm14 ## 8-byte Folded Reload
movapd -2000(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
addsd %xmm14, %xmm1
mulsd LCPI19_64(%rip), %xmm15
addsd %xmm1, %xmm15
mulsd LCPI19_72(%rip), %xmm2
addsd %xmm15, %xmm2
movapd -6320(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm8, %xmm0
addsd -12080(%rbp), %xmm0 ## 16-byte Folded Reload
addsd -3448(%rbp), %xmm10 ## 8-byte Folded Reload
movsd -1744(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm11, %xmm1
addsd %xmm10, %xmm1
movapd %xmm11, %xmm6
movsd %xmm1, -12560(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm6
addsd %xmm0, %xmm6
addsd %xmm2, %xmm3
movapd %xmm6, %xmm5
mulsd LCPI19_114(%rip), %xmm5
addsd %xmm3, %xmm5
mulsd %xmm7, %xmm2
addsd %xmm4, %xmm2
mulsd LCPI19_31(%rip), %xmm6
addsd %xmm2, %xmm6
movapd -1616(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm4
mulsd %xmm5, %xmm4
movapd -1088(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm4
movapd %xmm13, %xmm1
movsd LCPI19_111(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm1
movapd -848(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm1, %xmm0
movapd %xmm13, %xmm1
movsd LCPI19_115(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
movapd %xmm6, -14608(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm2
subsd %xmm1, %xmm2
movapd %xmm3, %xmm1
movsd -128(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd %xmm5, -14624(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
addsd %xmm2, %xmm1
movapd -6336(%rbp), %xmm2 ## 16-byte Reload
mulsd -960(%rbp), %xmm2 ## 8-byte Folded Reload
addsd -4576(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -3488(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movsd -1296(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm5
addsd %xmm10, %xmm5
movsd %xmm5, -12552(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm3
addsd %xmm2, %xmm3
mulsd %xmm7, %xmm3
addsd %xmm1, %xmm3
movsd -2960(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd LCPI19_116(%rip), %xmm1
subsd %xmm1, %xmm3
movapd -2144(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm0
movapd -3600(%rbp), %xmm1 ## 16-byte Reload
mulsd -976(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -7600(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm2
movsd -560(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm2
addsd %xmm10, %xmm2
movapd %xmm2, -16736(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm7
addsd %xmm1, %xmm7
movapd %xmm7, %xmm11
mulsd %xmm12, %xmm11
addsd %xmm0, %xmm11
movapd %xmm13, %xmm0
movsd LCPI19_30(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movapd %xmm5, %xmm1
movapd %xmm4, -14560(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
subsd %xmm0, %xmm1
movapd %xmm8, %xmm0
movapd %xmm3, -14576(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm2, %xmm7
addsd %xmm0, %xmm7
movapd %xmm9, %xmm12
movapd %xmm6, %xmm0
mulsd %xmm6, %xmm12
movsd %xmm10, -12384(%rbp) ## 8-byte Spill
addsd %xmm10, %xmm12
testq %rax, %rax
je LBB19_56
## %bb.55:
movapd %xmm6, %xmm3
movapd -3184(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm4
movapd -13520(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm4
movapd -2528(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm1
movapd -13504(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm1
movapd %xmm9, %xmm2
mulsd -9560(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -280(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm0
mulsd %xmm6, %xmm0
addsd %xmm2, %xmm0
movsd LCPI19_29(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd %xmm0, %xmm3
mulsd %xmm2, %xmm3
subsd %xmm3, %xmm1
subsd %xmm1, %xmm4
addsd %xmm4, %xmm4
movsd LCPI19_119(%rip), %xmm3 ## xmm3 = mem[0],zero
movapd %xmm13, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm8, %xmm6
mulsd %xmm11, %xmm6
subsd %xmm1, %xmm6
movapd %xmm10, %xmm1
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm6
movapd %xmm5, %xmm1
mulsd -3600(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm9, %xmm5
mulsd %xmm12, %xmm5
addsd %xmm1, %xmm5
movapd %xmm5, %xmm1
mulsd %xmm3, %xmm1
addsd %xmm6, %xmm1
addsd %xmm1, %xmm1
addsd %xmm4, %xmm1
mulsd %xmm2, %xmm13
movapd %xmm10, %xmm6
mulsd %xmm11, %xmm6
subsd %xmm13, %xmm6
movapd %xmm8, %xmm4
mulsd %xmm7, %xmm4
addsd %xmm6, %xmm4
mulsd %xmm2, %xmm5
addsd %xmm4, %xmm5
mulsd %xmm3, %xmm0
movapd %xmm8, %xmm2
mulsd %xmm14, %xmm2
subsd %xmm0, %xmm2
movapd %xmm10, %xmm0
mulsd %xmm15, %xmm0
addsd %xmm2, %xmm0
movsd -1536(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm5, %xmm0
movsd -1072(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm2
movsd -2272(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
mulsd -880(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
mulsd %xmm4, %xmm2
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
mulsd %xmm3, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, 40(%rax)
LBB19_56:
movsd %xmm12, -12368(%rbp) ## 8-byte Spill
movapd %xmm11, -14496(%rbp) ## 16-byte Spill
movapd %xmm7, -14512(%rbp) ## 16-byte Spill
movsd -224(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd -352(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, %xmm11
movsd %xmm2, -1120(%rbp) ## 8-byte Spill
addsd -408(%rbp), %xmm11 ## 8-byte Folded Reload
movsd -464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -152(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm10
movapd %xmm0, %xmm14
addsd -288(%rbp), %xmm10 ## 8-byte Folded Reload
movapd %xmm10, %xmm12
movapd %xmm10, -432(%rbp) ## 16-byte Spill
addsd -640(%rbp), %xmm12 ## 16-byte Folded Reload
movapd %xmm12, %xmm13
divsd -6608(%rbp), %xmm13 ## 16-byte Folded Reload
movapd -3888(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm13, %xmm3
movapd %xmm3, %xmm4
mulsd -6624(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm11, %xmm4
addsd -1472(%rbp), %xmm4 ## 16-byte Folded Reload
movsd -5728(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm5
mulsd -5760(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_37(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm0
movapd %xmm7, %xmm6
mulsd -456(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm4, -6984(%rbp) ## 8-byte Spill
movsd -1704(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
movsd %xmm1, -10672(%rbp) ## 8-byte Spill
movsd -1408(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm1, %xmm9
movsd LCPI19_43(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm9
addsd %xmm0, %xmm9
addsd -1232(%rbp), %xmm10 ## 16-byte Folded Reload
movapd %xmm10, %xmm0
divsd -3168(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -6992(%rbp) ## 8-byte Spill
movapd -2944(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movapd %xmm1, %xmm4
mulsd -6592(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm11, %xmm4
addsd -1456(%rbp), %xmm4 ## 16-byte Folded Reload
movsd -4768(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
mulsd %xmm5, %xmm0
movapd %xmm5, %xmm7
mulsd -3760(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm6, %xmm0
mulsd -1384(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm4, -10688(%rbp) ## 8-byte Spill
movsd -4816(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm4, %xmm5
movsd %xmm5, -4512(%rbp) ## 8-byte Spill
movsd -1152(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm5, %xmm8
mulsd %xmm15, %xmm8
addsd %xmm0, %xmm8
movapd %xmm8, -4576(%rbp) ## 16-byte Spill
movapd %xmm14, %xmm4
movapd %xmm14, %xmm6
addsd -5232(%rbp), %xmm4 ## 16-byte Folded Reload
movsd %xmm4, -2656(%rbp) ## 8-byte Spill
divsd -2200(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm4, -5488(%rbp) ## 8-byte Spill
movsd -760(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm4, %xmm14
movapd %xmm14, %xmm5
mulsd -2968(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm2, %xmm5
addsd -3840(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm5, %xmm4
mulsd -4656(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd %xmm7, %xmm4
mulsd -3312(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd LCPI19_37(%rip), %xmm4
mulsd -1168(%rbp), %xmm5 ## 8-byte Folded Reload
movsd %xmm5, -10704(%rbp) ## 8-byte Spill
movsd -1656(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm5, %xmm7
movsd %xmm7, -3904(%rbp) ## 8-byte Spill
movsd -3856(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
mulsd %xmm15, %xmm0
addsd %xmm4, %xmm0
movsd %xmm0, -9712(%rbp) ## 8-byte Spill
movsd LCPI19_50(%rip), %xmm4 ## xmm4 = mem[0],zero
movapd -432(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm4, %xmm2
movapd %xmm2, -432(%rbp) ## 16-byte Spill
movapd -8192(%rbp), %xmm7 ## 16-byte Reload
subsd %xmm2, %xmm7
movsd %xmm7, -4944(%rbp) ## 8-byte Spill
movsd LCPI19_74(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm6
movsd %xmm6, -1632(%rbp) ## 8-byte Spill
subsd %xmm6, %xmm7
addsd -3568(%rbp), %xmm7 ## 8-byte Folded Reload
movsd %xmm7, -2960(%rbp) ## 8-byte Spill
movsd -464(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movsd LCPI19_85(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm4
movapd %xmm7, %xmm5
subsd %xmm4, %xmm5
movsd %xmm5, -4544(%rbp) ## 8-byte Spill
movapd %xmm9, %xmm7
addsd %xmm8, %xmm7
movsd %xmm7, -7144(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm7
movsd %xmm7, -7024(%rbp) ## 8-byte Spill
movapd %xmm5, %xmm4
movsd -1200(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
movapd %xmm7, %xmm5
subsd %xmm4, %xmm5
movapd %xmm2, %xmm4
movsd %xmm5, -12536(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm4
movsd -1112(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
addsd %xmm4, %xmm5
movsd -1120(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm15, %xmm8
movsd %xmm8, -1120(%rbp) ## 8-byte Spill
mulsd LCPI19_50(%rip), %xmm11
movapd %xmm11, -2000(%rbp) ## 16-byte Spill
addsd -9520(%rbp), %xmm11 ## 16-byte Folded Reload
movapd %xmm11, -6304(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm2
addsd %xmm11, %xmm2
addsd -10592(%rbp), %xmm2 ## 16-byte Folded Reload
movsd %xmm2, -1112(%rbp) ## 8-byte Spill
mulsd -352(%rbp), %xmm6 ## 8-byte Folded Reload
subsd %xmm6, %xmm2
movsd %xmm2, -4960(%rbp) ## 8-byte Spill
mulsd -2800(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm5, %xmm2
movsd %xmm2, -5504(%rbp) ## 8-byte Spill
mulsd -5856(%rbp), %xmm12 ## 16-byte Folded Reload
mulsd -3152(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, %xmm4
movsd LCPI19_1(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm4
subsd %xmm4, %xmm12
movsd LCPI19_44(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm3
movapd %xmm8, %xmm11
mulsd -4784(%rbp), %xmm3 ## 8-byte Folded Reload
movsd LCPI19_45(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm3
subsd %xmm3, %xmm12
movapd -592(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm4
mulsd %xmm9, %xmm4
divsd -5840(%rbp), %xmm12 ## 16-byte Folded Reload
movapd -4208(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm12, %xmm2
movapd -400(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm3
mulsd %xmm2, %xmm3
movapd %xmm2, %xmm5
subsd %xmm3, %xmm4
mulsd -5248(%rbp), %xmm12 ## 16-byte Folded Reload
mulsd -5776(%rbp), %xmm13 ## 8-byte Folded Reload
subsd %xmm13, %xmm12
movapd -272(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
movapd %xmm2, %xmm7
mulsd %xmm5, %xmm3
movapd %xmm5, %xmm13
movapd %xmm5, -10656(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm2
mulsd %xmm12, %xmm2
movsd %xmm12, -9720(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm3
movsd %xmm3, -7472(%rbp) ## 8-byte Spill
movapd %xmm15, %xmm0
mulsd %xmm12, %xmm0
movapd %xmm7, %xmm2
movapd %xmm7, %xmm12
mulsd %xmm9, %xmm2
subsd %xmm2, %xmm0
movapd %xmm0, -13696(%rbp) ## 16-byte Spill
movapd -528(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm3, %xmm2
subsd -7904(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movapd -112(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm4, -2752(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm5
addsd %xmm2, %xmm5
movapd -1856(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm4, %xmm3
subsd -8848(%rbp), %xmm3 ## 16-byte Folded Reload
subsd -2688(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm5, -11424(%rbp) ## 16-byte Spill
movapd -1440(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm5, %xmm2
addsd %xmm3, %xmm2
mulsd -5824(%rbp), %xmm10 ## 16-byte Folded Reload
mulsd -536(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, %xmm3
mulsd %xmm6, %xmm3
subsd %xmm3, %xmm10
mulsd %xmm11, %xmm1
mulsd -4144(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm1
subsd %xmm1, %xmm10
movapd %xmm8, %xmm4
movapd -4576(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm4
divsd -5808(%rbp), %xmm10 ## 16-byte Folded Reload
movsd %xmm10, -5520(%rbp) ## 8-byte Spill
movapd -4288(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm10, %xmm0
movapd %xmm15, %xmm1
mulsd %xmm0, %xmm1
movapd %xmm0, %xmm10
movapd %xmm0, -8224(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm4
movapd %xmm4, -4928(%rbp) ## 16-byte Spill
movapd -2400(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
addsd %xmm2, %xmm1
movapd -432(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm4
mulsd %xmm8, %xmm4
movsd -1552(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm4, %xmm2
movapd -2000(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm3
mulsd -2256(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm2, %xmm3
movapd %xmm11, %xmm2
mulsd %xmm15, %xmm2
mulsd %xmm12, %xmm5
addsd %xmm2, %xmm5
movapd -2816(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm5, %xmm2
movapd %xmm5, -10640(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm2
movapd -640(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm13, %xmm3
subsd %xmm2, %xmm3
movapd -1840(%rbp), %xmm2 ## 16-byte Reload
movsd %xmm9, -12544(%rbp) ## 8-byte Spill
mulsd %xmm9, %xmm2
subsd %xmm2, %xmm3
movapd -1232(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm0, %xmm13
addsd %xmm3, %xmm13
movapd -2624(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm7, %xmm2
subsd %xmm2, %xmm13
subsd -8816(%rbp), %xmm1 ## 16-byte Folded Reload
addsd LCPI19_46(%rip), %xmm1
addsd -8832(%rbp), %xmm13 ## 8-byte Folded Reload
addsd -7888(%rbp), %xmm13 ## 16-byte Folded Reload
addsd -9424(%rbp), %xmm13 ## 16-byte Folded Reload
movapd -688(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm3
movapd %xmm13, -15008(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm3
addsd %xmm1, %xmm3
movsd -4944(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -192(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm1
movsd -7144(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
subsd %xmm1, %xmm7
movapd %xmm2, %xmm1
movapd %xmm2, %xmm6
movsd %xmm7, -7144(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm1
addsd -5008(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -6304(%rbp), %xmm9 ## 16-byte Reload
movsd -480(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm9
movapd %xmm2, %xmm10
addsd %xmm1, %xmm9
movapd %xmm9, %xmm1
movsd LCPI19_25(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
addsd %xmm3, %xmm1
movapd %xmm4, -13680(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm8
movapd %xmm15, %xmm4
mulsd %xmm5, %xmm4
addsd %xmm8, %xmm4
movapd -2000(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm5
mulsd -784(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm4, %xmm5
addsd -7872(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm5, %xmm3
movsd LCPI19_103(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm3
subsd %xmm3, %xmm1
movsd -1552(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm11
movapd -6064(%rbp), %xmm2 ## 16-byte Reload
subsd %xmm11, %xmm2
movapd %xmm2, -11072(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm3
mulsd %xmm0, %xmm3
movapd %xmm3, -16720(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm3
addsd %xmm1, %xmm3
movapd %xmm15, %xmm1
mulsd %xmm4, %xmm1
movapd -6048(%rbp), %xmm4 ## 16-byte Reload
subsd %xmm1, %xmm4
movapd %xmm4, -11056(%rbp) ## 16-byte Spill
mulsd -2336(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm3, %xmm4
movapd %xmm6, %xmm2
mulsd -9712(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -2544(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm2, %xmm1
movapd %xmm2, %xmm7
movapd %xmm2, -4496(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm1
movsd -2656(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -6672(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd -1640(%rbp), %xmm14 ## 8-byte Folded Reload
movapd %xmm14, %xmm3
mulsd LCPI19_1(%rip), %xmm3
subsd %xmm3, %xmm2
mulsd LCPI19_44(%rip), %xmm14
mulsd -3584(%rbp), %xmm14 ## 16-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm14
subsd %xmm14, %xmm2
divsd -6656(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd -2712(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -5488(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4560(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movsd %xmm2, -2656(%rbp) ## 8-byte Spill
movapd -112(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm4
mulsd %xmm7, %xmm4
movapd %xmm6, %xmm3
mulsd %xmm2, %xmm3
movapd %xmm3, -9888(%rbp) ## 16-byte Spill
movapd -528(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm3, %xmm0
addsd -7584(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm4
subsd -8800(%rbp), %xmm1 ## 16-byte Folded Reload
subsd -7568(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm4, -4296(%rbp) ## 8-byte Spill
movapd -2576(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm4, %xmm13
addsd %xmm1, %xmm13
movsd -2960(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
movsd -7024(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movapd %xmm6, %xmm0
movsd %xmm1, -12528(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
addsd -4992(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -1112(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm10, %xmm8
addsd %xmm0, %xmm8
movapd %xmm13, %xmm2
movsd LCPI19_15(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
movapd %xmm8, %xmm0
movsd LCPI19_31(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, -5488(%rbp) ## 8-byte Spill
movapd -1248(%rbp), %xmm0 ## 16-byte Reload
movsd -7472(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
subsd -5472(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -912(%rbp), %xmm15 ## 16-byte Reload
movapd -13696(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm15
addsd %xmm0, %xmm15
movapd -608(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm10
movapd -2752(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm10
addsd %xmm15, %xmm10
movapd -1920(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
subsd -4424(%rbp), %xmm0 ## 8-byte Folded Reload
subsd -5456(%rbp), %xmm10 ## 16-byte Folded Reload
movapd -1440(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm15
mulsd %xmm10, %xmm15
movapd %xmm10, -11408(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm15
movapd -2432(%rbp), %xmm0 ## 16-byte Reload
movapd -4928(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm0
addsd %xmm15, %xmm0
subsd -8784(%rbp), %xmm0 ## 16-byte Folded Reload
addsd LCPI19_77(%rip), %xmm0
addsd -8768(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm5, %xmm3
movsd LCPI19_48(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm3
addsd %xmm0, %xmm3
movapd -1264(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
subsd -6240(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -1504(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm7, %xmm15
addsd %xmm0, %xmm15
movapd -1600(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
movapd -720(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm11
movapd %xmm1, %xmm2
addsd %xmm15, %xmm11
subsd -8704(%rbp), %xmm0 ## 8-byte Folded Reload
subsd -8608(%rbp), %xmm11 ## 16-byte Folded Reload
mulsd %xmm11, %xmm6
movapd %xmm11, -2752(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm6
mulsd -11424(%rbp), %xmm14 ## 16-byte Folded Reload
mulsd %xmm10, %xmm12
addsd %xmm14, %xmm12
mulsd %xmm11, %xmm1
addsd %xmm12, %xmm1
movsd LCPI19_14(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm6, %xmm1
movapd -2128(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm4, %xmm0
addsd %xmm1, %xmm0
movapd %xmm9, %xmm15
movsd LCPI19_24(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm15
addsd -8752(%rbp), %xmm3 ## 16-byte Folded Reload
addsd -5920(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -512(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm12
mulsd %xmm3, %xmm1
subsd -8736(%rbp), %xmm0 ## 16-byte Folded Reload
addsd LCPI19_78(%rip), %xmm0
addsd %xmm15, %xmm1
addsd -6256(%rbp), %xmm0 ## 16-byte Folded Reload
movsd LCPI19_47(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm5
addsd %xmm0, %xmm5
subsd -8720(%rbp), %xmm5 ## 16-byte Folded Reload
addsd -5904(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -448(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
movapd %xmm2, %xmm7
mulsd %xmm5, %xmm0
addsd %xmm1, %xmm0
movapd -2912(%rbp), %xmm1 ## 16-byte Reload
movapd -4496(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm1
addsd %xmm0, %xmm1
movapd -3264(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm2
mulsd %xmm6, %xmm2
movapd -2048(%rbp), %xmm0 ## 16-byte Reload
movapd -9888(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm0
addsd -7552(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
subsd -8688(%rbp), %xmm1 ## 16-byte Folded Reload
subsd -8640(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -2576(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd %xmm2, %xmm0
movapd %xmm2, %xmm11
movapd %xmm2, -9904(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
movsd -1632(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd -192(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm2
movsd %xmm2, -12520(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm1
movsd -1120(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd -480(%rbp), %xmm10 ## 8-byte Folded Reload
addsd %xmm1, %xmm10
addsd LCPI19_79(%rip), %xmm0
addsd -4128(%rbp), %xmm10 ## 16-byte Folded Reload
movapd %xmm10, %xmm2
movsd LCPI19_73(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
addsd %xmm0, %xmm2
movsd LCPI19_23(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm9
movapd %xmm7, %xmm0
movapd %xmm3, -14800(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm9
movapd %xmm12, %xmm0
movapd %xmm5, -14816(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
addsd %xmm9, %xmm0
movapd -3248(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm6, %xmm1
addsd %xmm0, %xmm1
movapd -2560(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm6
movapd -1104(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm14, %xmm0
addsd -4416(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm6
subsd -8672(%rbp), %xmm1 ## 16-byte Folded Reload
subsd -4408(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd %xmm6, %xmm4
movapd %xmm6, -4496(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm4
movapd -112(%rbp), %xmm1 ## 16-byte Reload
movsd -4296(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm1
mulsd %xmm11, %xmm15
addsd %xmm1, %xmm15
movapd %xmm3, %xmm1
mulsd %xmm6, %xmm1
addsd %xmm15, %xmm1
movsd LCPI19_64(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
addsd %xmm4, %xmm1
addsd LCPI19_80(%rip), %xmm1
movsd LCPI19_72(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm10
addsd %xmm1, %xmm10
movsd LCPI19_108(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm13
movsd LCPI19_21(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm8
subsd %xmm13, %xmm8
movsd -5488(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm2, %xmm1
movsd LCPI19_13(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
addsd %xmm8, %xmm2
addsd %xmm10, %xmm2
mulsd %xmm0, %xmm10
addsd %xmm1, %xmm10
movsd LCPI19_115(%rip), %xmm0 ## xmm0 = mem[0],zero
movsd -5504(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm0, %xmm3
movapd -1312(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm5
mulsd %xmm10, %xmm0
subsd %xmm3, %xmm0
movapd -1328(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_87(%rip), %xmm3 ## xmm3 = mem[0],zero
subsd %xmm1, %xmm3
movsd -1760(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_116(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm3
movapd %xmm2, -14592(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm5
movapd %xmm4, %xmm0
movapd %xmm10, -14544(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm0
subsd %xmm0, %xmm5
movapd -9440(%rbp), %xmm1 ## 16-byte Reload
addsd -4544(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm8
movapd %xmm1, -5488(%rbp) ## 16-byte Spill
movsd -704(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movsd -7024(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm1
subsd %xmm0, %xmm1
movapd %xmm2, %xmm0
movsd %xmm1, -12512(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
movapd -8144(%rbp), %xmm1 ## 16-byte Reload
addsd -4960(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -5504(%rbp) ## 16-byte Spill
mulsd -1280(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd -1936(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
movapd %xmm2, %xmm6
mulsd %xmm5, %xmm0
movapd %xmm1, %xmm2
movsd LCPI19_30(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm0
movapd -1216(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm4
mulsd %xmm3, %xmm4
subsd %xmm0, %xmm4
movapd %xmm4, -10736(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm0
movapd %xmm5, -14480(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
movsd LCPI19_111(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movapd %xmm6, %xmm1
movapd %xmm3, -16496(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -10720(%rbp) ## 16-byte Spill
movsd -128(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm8, %xmm0
movapd %xmm7, %xmm1
subsd %xmm0, %xmm1
movsd %xmm1, -9632(%rbp) ## 8-byte Spill
movsd -6984(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5072(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm0
mulsd -4112(%rbp), %xmm0 ## 16-byte Folded Reload
movsd LCPI19_110(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
movsd -10672(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd -5104(%rbp), %xmm8 ## 8-byte Folded Reload
mulsd LCPI19_132(%rip), %xmm8
mulsd -4752(%rbp), %xmm8 ## 8-byte Folded Reload
divsd -4832(%rbp), %xmm8 ## 8-byte Folded Reload
mulsd -2896(%rbp), %xmm8 ## 16-byte Folded Reload
addsd %xmm0, %xmm8
movapd -528(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
movapd -11424(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm0
movapd -1248(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
mulsd -11408(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -1264(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
mulsd -2752(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd %xmm0, %xmm8
movapd %xmm0, %xmm1
mulsd LCPI19_1(%rip), %xmm1
subsd %xmm1, %xmm8
movapd %xmm8, %xmm15
subsd %xmm0, %xmm15
movsd -5520(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -776(%rbp), %xmm4 ## 8-byte Folded Reload
movsd -6992(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1696(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm4
movsd %xmm4, -5520(%rbp) ## 8-byte Spill
movapd -272(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm13
mulsd -8224(%rbp), %xmm13 ## 16-byte Folded Reload
movapd -592(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm13
movapd -400(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm4, %xmm6
movapd %xmm1, %xmm0
mulsd -4576(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm6
movapd %xmm7, %xmm1
mulsd %xmm13, %xmm1
subsd -7792(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm6, %xmm1
movapd -112(%rbp), %xmm0 ## 16-byte Reload
movapd -4928(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
addsd %xmm1, %xmm0
movapd %xmm3, %xmm1
movapd %xmm3, %xmm4
mulsd %xmm13, %xmm1
subsd -6224(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -912(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm6, %xmm3
addsd %xmm1, %xmm3
movapd -608(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm11, %xmm1
addsd %xmm3, %xmm1
subsd -2672(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm7, %xmm3
mulsd %xmm0, %xmm3
subsd -6208(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd %xmm1, %xmm4
addsd %xmm3, %xmm4
movapd %xmm2, %xmm3
mulsd %xmm13, %xmm3
subsd -8464(%rbp), %xmm3 ## 8-byte Folded Reload
movapd -1504(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm6, -14752(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm5
addsd %xmm3, %xmm5
movapd %xmm11, %xmm6
mulsd -720(%rbp), %xmm6 ## 16-byte Folded Reload
addsd %xmm5, %xmm6
subsd -8448(%rbp), %xmm6 ## 16-byte Folded Reload
movapd %xmm6, -4928(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm3
mulsd %xmm6, %xmm3
addsd %xmm4, %xmm3
movsd -10688(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -5088(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm4
mulsd -3696(%rbp), %xmm4 ## 16-byte Folded Reload
movsd -4512(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd -1712(%rbp), %xmm14 ## 8-byte Folded Reload
mulsd LCPI19_133(%rip), %xmm14
movsd LCPI19_110(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm4
mulsd -4096(%rbp), %xmm14 ## 8-byte Folded Reload
divsd -4848(%rbp), %xmm14 ## 8-byte Folded Reload
mulsd -1400(%rbp), %xmm14 ## 8-byte Folded Reload
addsd %xmm4, %xmm14
subsd %xmm3, %xmm15
addsd %xmm3, %xmm14
mulsd LCPI19_1(%rip), %xmm3
subsd %xmm3, %xmm14
addsd %xmm15, %xmm14
movapd %xmm7, %xmm2
mulsd %xmm12, %xmm2
movapd -2048(%rbp), %xmm3 ## 16-byte Reload
mulsd -9904(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm2, %xmm3
movapd -1104(%rbp), %xmm2 ## 16-byte Reload
mulsd -4496(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm3, %xmm2
movsd -10704(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -1368(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm3
mulsd -3984(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd %xmm5, %xmm3
movsd -3904(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -4080(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd LCPI19_134(%rip), %xmm4
mulsd -3968(%rbp), %xmm4 ## 8-byte Folded Reload
divsd -648(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd -2496(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm3, %xmm4
movapd %xmm14, %xmm15
movsd %xmm14, -4512(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm15
addsd %xmm2, %xmm4
mulsd LCPI19_1(%rip), %xmm2
subsd %xmm2, %xmm4
movsd %xmm4, -3904(%rbp) ## 8-byte Spill
movapd -1856(%rbp), %xmm2 ## 16-byte Reload
mulsd -7472(%rbp), %xmm2 ## 8-byte Folded Reload
subsd -8528(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -1184(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm10, %xmm4
mulsd %xmm10, %xmm3
addsd %xmm2, %xmm3
movapd -2400(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm13, %xmm2
addsd %xmm3, %xmm2
subsd -8512(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd -1872(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm2, %xmm0
movapd %xmm14, %xmm3
mulsd LCPI19_25(%rip), %xmm3
addsd %xmm0, %xmm3
movapd -2640(%rbp), %xmm0 ## 16-byte Reload
movapd -10640(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
movapd -432(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm2
mulsd -2256(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm0
movapd -2000(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm9, %xmm5
movsd -1552(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm5, %xmm2
addsd %xmm0, %xmm2
movapd -10656(%rbp), %xmm0 ## 16-byte Reload
mulsd -1472(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movapd -1840(%rbp), %xmm0 ## 16-byte Reload
movsd -9720(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm14, %xmm0
addsd %xmm2, %xmm0
movapd -8224(%rbp), %xmm2 ## 16-byte Reload
mulsd -1456(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm0
movapd -2624(%rbp), %xmm4 ## 16-byte Reload
movsd -5520(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm4
addsd %xmm0, %xmm4
addsd -7808(%rbp), %xmm4 ## 16-byte Folded Reload
addsd -8496(%rbp), %xmm4 ## 16-byte Folded Reload
addsd -9280(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -688(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm2
movapd %xmm4, -14832(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
addsd %xmm3, %xmm2
mulsd -784(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm11, %xmm0
mulsd -272(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm6
movapd %xmm5, -8224(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm9
subsd %xmm9, %xmm6
addsd -4376(%rbp), %xmm2 ## 8-byte Folded Reload
addsd -8480(%rbp), %xmm2 ## 8-byte Folded Reload
addsd -7824(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm6, %xmm0
mulsd LCPI19_103(%rip), %xmm0
subsd %xmm0, %xmm2
movsd -4944(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -480(%rbp), %xmm3 ## 8-byte Folded Reload
addsd -6272(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm7, %xmm0
addsd %xmm7, %xmm14
movapd -6304(%rbp), %xmm4 ## 16-byte Reload
movapd -192(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm4
addsd %xmm14, %xmm4
movapd %xmm11, %xmm9
movapd %xmm4, -10704(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm9
addsd %xmm3, %xmm9
movapd %xmm9, %xmm3
mulsd LCPI19_25(%rip), %xmm3
subsd %xmm3, %xmm2
movapd -11072(%rbp), %xmm3 ## 16-byte Reload
mulsd -2336(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm2, %xmm3
movapd -11056(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm12, %xmm2
movapd %xmm2, -10688(%rbp) ## 16-byte Spill
mulsd %xmm11, %xmm2
subsd %xmm2, %xmm3
movapd -2544(%rbp), %xmm2 ## 16-byte Reload
movapd -9888(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm2
subsd %xmm2, %xmm3
subsd -4368(%rbp), %xmm3 ## 8-byte Folded Reload
movapd -2352(%rbp), %xmm0 ## 16-byte Reload
movsd -4296(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm0, %xmm10
addsd %xmm3, %xmm10
addsd -3904(%rbp), %xmm15 ## 8-byte Folded Reload
movsd %xmm15, -4296(%rbp) ## 8-byte Spill
movapd %xmm10, %xmm3
mulsd LCPI19_108(%rip), %xmm3
mulsd LCPI19_21(%rip), %xmm15
subsd %xmm3, %xmm15
movsd %xmm15, -1760(%rbp) ## 8-byte Spill
movsd -4512(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd LCPI19_24(%rip), %xmm4
movsd LCPI19_112(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm9, %xmm3
addsd %xmm4, %xmm3
movapd -1600(%rbp), %xmm4 ## 16-byte Reload
movsd -7472(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
subsd -7264(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -1184(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm5
mulsd -2752(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm4, %xmm5
mulsd LCPI19_14(%rip), %xmm8
addsd %xmm5, %xmm8
movapd -2128(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm13, %xmm4
addsd %xmm8, %xmm4
subsd -12240(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -4928(%rbp), %xmm5 ## 16-byte Reload
movsd -1872(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
addsd %xmm4, %xmm5
addsd -12224(%rbp), %xmm5 ## 16-byte Folded Reload
addsd -12208(%rbp), %xmm5 ## 16-byte Folded Reload
addsd -12192(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm5, %xmm4
movapd %xmm6, %xmm8
movsd LCPI19_47(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm8
addsd %xmm5, %xmm8
addsd -7256(%rbp), %xmm8 ## 8-byte Folded Reload
addsd -9232(%rbp), %xmm8 ## 16-byte Folded Reload
movapd -448(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm8, %xmm4
addsd %xmm3, %xmm4
mulsd -1920(%rbp), %xmm2 ## 16-byte Folded Reload
subsd -7248(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm15, %xmm3
mulsd -11408(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm2, %xmm3
mulsd -2432(%rbp), %xmm13 ## 16-byte Folded Reload
addsd %xmm3, %xmm13
subsd -12176(%rbp), %xmm13 ## 16-byte Folded Reload
mulsd %xmm0, %xmm1
addsd %xmm13, %xmm1
addsd -8576(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -3088(%rbp), %xmm1 ## 16-byte Folded Reload
addsd -8560(%rbp), %xmm1 ## 16-byte Folded Reload
movsd LCPI19_48(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm6
addsd %xmm1, %xmm6
addsd -6480(%rbp), %xmm6 ## 16-byte Folded Reload
subsd -7240(%rbp), %xmm6 ## 8-byte Folded Reload
movapd -512(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
mulsd %xmm6, %xmm1
addsd %xmm4, %xmm1
movapd -2912(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm12, %xmm3
subsd %xmm3, %xmm1
subsd -12160(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -9904(%rbp), %xmm3 ## 16-byte Reload
mulsd -2352(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm1, %xmm3
movapd %xmm3, %xmm7
movsd -1632(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd -480(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm13, %xmm1
movsd -1120(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm11, %xmm4
movapd %xmm11, %xmm3
movapd %xmm11, %xmm15
movsd %xmm4, -9904(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm3
subsd %xmm3, %xmm1
addsd -7232(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, %xmm3
mulsd LCPI19_73(%rip), %xmm3
addsd %xmm7, %xmm3
mulsd LCPI19_15(%rip), %xmm10
movsd -4296(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movapd %xmm11, %xmm4
mulsd LCPI19_31(%rip), %xmm4
subsd %xmm4, %xmm10
addsd %xmm3, %xmm10
movsd LCPI19_13(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm3
addsd -1760(%rbp), %xmm3 ## 8-byte Folded Reload
movsd LCPI19_23(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd -4512(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm2, %xmm4
mulsd %xmm2, %xmm9
subsd %xmm9, %xmm4
movapd %xmm8, -14704(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm0
addsd %xmm4, %xmm0
movapd -448(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm6, -14736(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm2
subsd %xmm2, %xmm0
movapd %xmm12, %xmm2
mulsd -3248(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm0
subsd -7224(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -4496(%rbp), %xmm2 ## 16-byte Reload
mulsd -2352(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movsd -3904(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd LCPI19_64(%rip), %xmm0
addsd %xmm2, %xmm0
movsd -128(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd LCPI19_72(%rip), %xmm1
addsd %xmm0, %xmm1
movsd -2960(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm13, %xmm0
addsd -12144(%rbp), %xmm0 ## 16-byte Folded Reload
addsd -2656(%rbp), %xmm14 ## 8-byte Folded Reload
movsd -1112(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm15, %xmm2
addsd %xmm14, %xmm2
movapd %xmm15, %xmm5
movsd %xmm2, -9888(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm5
addsd %xmm0, %xmm5
addsd %xmm1, %xmm3
movsd LCPI19_114(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm5, %xmm4
addsd %xmm3, %xmm4
mulsd %xmm7, %xmm1
addsd %xmm10, %xmm1
movapd -5488(%rbp), %xmm13 ## 16-byte Reload
movapd -5504(%rbp), %xmm15 ## 16-byte Reload
mulsd LCPI19_31(%rip), %xmm5
addsd %xmm1, %xmm5
movapd -1312(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
mulsd %xmm4, %xmm6
movapd -1328(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm6
movapd %xmm11, %xmm1
movsd LCPI19_111(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm1
movapd -1216(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm0
mulsd %xmm6, %xmm0
subsd %xmm1, %xmm0
movapd %xmm11, %xmm1
movsd LCPI19_115(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
movapd %xmm5, -14448(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm2
subsd %xmm1, %xmm2
movapd %xmm3, %xmm1
movsd LCPI19_1(%rip), %xmm5 ## xmm5 = mem[0],zero
movapd %xmm4, -14464(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
addsd %xmm2, %xmm1
movsd -4544(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -2800(%rbp), %xmm2 ## 8-byte Folded Reload
addsd -3328(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -4960(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movsd -1200(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm4
addsd %xmm14, %xmm4
movsd %xmm4, -6992(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm3
addsd %xmm2, %xmm3
mulsd %xmm7, %xmm3
addsd %xmm1, %xmm3
movsd -4304(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd LCPI19_116(%rip), %xmm1
subsd %xmm1, %xmm3
movapd -1936(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm0
movapd %xmm13, %xmm9
movapd %xmm13, %xmm1
mulsd -1280(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm15, %xmm2
movsd -704(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm2
addsd %xmm14, %xmm2
movapd %xmm2, -10672(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm7
addsd %xmm1, %xmm7
movapd %xmm7, %xmm13
mulsd %xmm10, %xmm13
addsd %xmm0, %xmm13
movapd %xmm11, %xmm0
movsd LCPI19_30(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movapd %xmm4, %xmm1
movapd %xmm6, -14416(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
subsd %xmm0, %xmm1
movapd %xmm8, %xmm0
movapd %xmm3, -14432(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm2, %xmm7
addsd %xmm0, %xmm7
movapd %xmm12, %xmm2
mulsd %xmm15, %xmm12
movsd %xmm14, -12360(%rbp) ## 8-byte Spill
addsd %xmm14, %xmm12
movapd %xmm12, %xmm14
testq %rax, %rax
je LBB19_58
## %bb.57:
movapd -2880(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm4
mulsd -10736(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -2784(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm1
mulsd -10720(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm2, %xmm5
mulsd -9632(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -280(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm0
mulsd %xmm15, %xmm0
addsd %xmm2, %xmm0
movsd LCPI19_29(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd %xmm0, %xmm3
mulsd %xmm2, %xmm3
subsd %xmm3, %xmm1
subsd %xmm1, %xmm4
addsd %xmm4, %xmm4
movsd LCPI19_119(%rip), %xmm3 ## xmm3 = mem[0],zero
movapd %xmm11, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm8, %xmm6
mulsd %xmm13, %xmm6
subsd %xmm1, %xmm6
movapd %xmm12, %xmm1
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm6
mulsd %xmm9, %xmm10
mulsd %xmm14, %xmm5
addsd %xmm10, %xmm5
movapd %xmm5, %xmm1
mulsd %xmm3, %xmm1
addsd %xmm6, %xmm1
addsd %xmm1, %xmm1
addsd %xmm4, %xmm1
mulsd %xmm2, %xmm11
movapd %xmm12, %xmm6
mulsd %xmm13, %xmm6
subsd %xmm11, %xmm6
movapd %xmm8, %xmm4
mulsd %xmm7, %xmm4
addsd %xmm6, %xmm4
mulsd %xmm2, %xmm5
addsd %xmm4, %xmm5
mulsd %xmm3, %xmm0
movapd %xmm8, %xmm2
mulsd -10720(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movapd %xmm12, %xmm0
mulsd -10736(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm2, %xmm0
movsd -1536(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm5, %xmm0
movsd LCPI19_1(%rip), %xmm5 ## xmm5 = mem[0],zero
movsd -1072(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm2
movsd -2272(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
mulsd -880(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
mulsd %xmm4, %xmm2
mulsd %xmm5, %xmm2
mulsd %xmm3, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, 48(%rax)
LBB19_58:
movsd %xmm14, -6984(%rbp) ## 8-byte Spill
movapd %xmm13, -14384(%rbp) ## 16-byte Spill
movapd %xmm7, -14400(%rbp) ## 16-byte Spill
movsd -232(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd -376(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, -1760(%rbp) ## 16-byte Spill
movsd -160(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -296(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm14
movapd %xmm0, %xmm9
movapd %xmm0, -3328(%rbp) ## 16-byte Spill
addsd -832(%rbp), %xmm14 ## 16-byte Folded Reload
movapd %xmm14, %xmm1
divsd -3408(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -4912(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm1, %xmm3
movapd %xmm3, %xmm0
mulsd -3664(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm2, %xmm0
addsd -2160(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -2288(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
movsd -4800(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm2, %xmm4
mulsd %xmm5, %xmm4
mulsd -2864(%rbp), %xmm4 ## 16-byte Folded Reload
mulsd -3776(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -3744(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm2, %xmm15
mulsd LCPI19_132(%rip), %xmm15
movapd %xmm5, %xmm8
movsd LCPI19_110(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm4
mulsd -3344(%rbp), %xmm15 ## 8-byte Folded Reload
divsd -3136(%rbp), %xmm15 ## 8-byte Folded Reload
mulsd -1424(%rbp), %xmm15 ## 16-byte Folded Reload
addsd %xmm4, %xmm15
mulsd -5696(%rbp), %xmm14 ## 16-byte Folded Reload
mulsd -1128(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, %xmm4
mulsd %xmm8, %xmm4
subsd %xmm4, %xmm14
movsd LCPI19_44(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm3
movapd %xmm4, %xmm12
mulsd -1688(%rbp), %xmm3 ## 8-byte Folded Reload
movsd LCPI19_45(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm3
subsd %xmm3, %xmm14
divsd -5680(%rbp), %xmm14 ## 16-byte Folded Reload
movapd -4192(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm14, %xmm3
mulsd -5200(%rbp), %xmm14 ## 16-byte Folded Reload
mulsd -1672(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm14
movapd -96(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm4
mulsd %xmm3, %xmm4
movapd %xmm3, %xmm5
movapd %xmm3, -7472(%rbp) ## 16-byte Spill
movapd -144(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm1
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm4
movapd %xmm4, %xmm10
mulsd -3360(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm8, %xmm0
mulsd -4736(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_37(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm2, %xmm3
mulsd -936(%rbp), %xmm3 ## 8-byte Folded Reload
movsd LCPI19_43(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm3
addsd %xmm0, %xmm3
movsd %xmm3, -4928(%rbp) ## 8-byte Spill
movapd -256(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm4
mulsd %xmm14, %xmm4
movapd %xmm7, %xmm0
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm4
movapd %xmm11, %xmm2
mulsd %xmm3, %xmm2
movapd %xmm1, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm2
movapd -208(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm13
movapd %xmm10, %xmm6
mulsd %xmm10, %xmm0
subsd -4976(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm4, %xmm0
movapd -64(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm2, %xmm3
addsd %xmm0, %xmm3
movapd -1520(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm0
mulsd %xmm6, %xmm0
movapd %xmm6, %xmm5
movsd %xmm6, -4304(%rbp) ## 8-byte Spill
subsd -3432(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -1488(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movapd -176(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm2, %xmm6
movapd %xmm2, -10656(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm6
subsd -5408(%rbp), %xmm6 ## 16-byte Folded Reload
movapd %xmm6, -4512(%rbp) ## 16-byte Spill
subsd -2680(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, -3904(%rbp) ## 16-byte Spill
movapd %xmm13, %xmm0
mulsd %xmm3, %xmm0
movapd %xmm10, %xmm1
mulsd %xmm6, %xmm1
addsd %xmm0, %xmm1
movapd -576(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
movapd %xmm3, %xmm6
mulsd %xmm5, %xmm0
subsd -5392(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -864(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm4, -14528(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm3
addsd %xmm0, %xmm3
movapd -736(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm2, %xmm4
addsd %xmm3, %xmm4
subsd -2648(%rbp), %xmm4 ## 8-byte Folded Reload
movapd %xmm4, -4496(%rbp) ## 16-byte Spill
movapd %xmm6, %xmm0
mulsd %xmm4, %xmm0
addsd %xmm1, %xmm0
addsd %xmm0, %xmm15
movapd %xmm0, %xmm1
movapd %xmm8, %xmm2
mulsd %xmm8, %xmm1
subsd %xmm1, %xmm15
movapd %xmm15, %xmm8
subsd %xmm0, %xmm8
movapd %xmm9, %xmm0
addsd -672(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, %xmm13
mulsd -5744(%rbp), %xmm13 ## 16-byte Folded Reload
divsd -3392(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -3424(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm0, %xmm6
movsd -2016(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm6, %xmm3
movapd %xmm3, %xmm4
mulsd %xmm2, %xmm4
movapd %xmm2, %xmm5
subsd %xmm4, %xmm13
mulsd %xmm12, %xmm3
mulsd -3376(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm3
subsd %xmm3, %xmm13
divsd -5712(%rbp), %xmm13 ## 16-byte Folded Reload
movapd -4272(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm13, %xmm2
mulsd -768(%rbp), %xmm13 ## 8-byte Folded Reload
mulsd -3792(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm13
movapd %xmm7, %xmm9
mulsd %xmm2, %xmm9
movapd %xmm2, -5392(%rbp) ## 16-byte Spill
movapd %xmm11, %xmm0
mulsd %xmm13, %xmm0
subsd %xmm0, %xmm9
mulsd -3680(%rbp), %xmm6 ## 16-byte Folded Reload
addsd -1760(%rbp), %xmm6 ## 16-byte Folded Reload
addsd -2176(%rbp), %xmm6 ## 16-byte Folded Reload
movsd -1664(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
mulsd %xmm5, %xmm0
mulsd -1680(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI19_37(%rip), %xmm0
mulsd -1792(%rbp), %xmm6 ## 8-byte Folded Reload
movsd -3808(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm6, %xmm12
movsd -1144(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm12, %xmm5
mulsd LCPI19_43(%rip), %xmm5
addsd %xmm0, %xmm5
movsd %xmm5, -3432(%rbp) ## 8-byte Spill
movapd -256(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm3
mulsd %xmm13, %xmm3
mulsd %xmm5, %xmm7
subsd %xmm7, %xmm3
movapd %xmm11, %xmm1
mulsd %xmm5, %xmm1
movapd %xmm4, %xmm0
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm1
movapd -208(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm4
mulsd %xmm9, %xmm4
subsd -7648(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm3, %xmm4
movapd -64(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm1, %xmm11
movapd %xmm1, %xmm0
addsd %xmm4, %xmm11
movapd %xmm10, %xmm4
mulsd %xmm9, %xmm4
movsd %xmm9, -2648(%rbp) ## 8-byte Spill
subsd -6192(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -1488(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm3, %xmm5
addsd %xmm4, %xmm5
movapd -176(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm1, %xmm2
movapd %xmm1, -10640(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm2
subsd -6176(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm2, -6192(%rbp) ## 16-byte Spill
subsd -1352(%rbp), %xmm11 ## 8-byte Folded Reload
movapd %xmm7, %xmm4
mulsd %xmm11, %xmm4
mulsd %xmm2, %xmm10
addsd %xmm4, %xmm10
movapd -576(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm4
mulsd %xmm9, %xmm4
subsd -8256(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -864(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm3, -8256(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm4, %xmm1
movapd -736(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm0, %xmm9
addsd %xmm1, %xmm9
subsd -8240(%rbp), %xmm9 ## 8-byte Folded Reload
movsd %xmm9, -6176(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm1
mulsd %xmm9, %xmm1
addsd %xmm10, %xmm1
mulsd -3728(%rbp), %xmm6 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm6
mulsd -3648(%rbp), %xmm6 ## 16-byte Folded Reload
mulsd LCPI19_110(%rip), %xmm6
mulsd -3104(%rbp), %xmm12 ## 8-byte Folded Reload
mulsd LCPI19_133(%rip), %xmm12
mulsd -2848(%rbp), %xmm12 ## 8-byte Folded Reload
divsd -3824(%rbp), %xmm12 ## 8-byte Folded Reload
mulsd -1392(%rbp), %xmm12 ## 8-byte Folded Reload
addsd %xmm6, %xmm12
subsd %xmm1, %xmm8
addsd %xmm1, %xmm12
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm12
addsd %xmm8, %xmm12
movsd -160(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
addsd -4240(%rbp), %xmm7 ## 16-byte Folded Reload
movapd %xmm7, %xmm1
divsd -2952(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -928(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm1, %xmm3
movsd -5552(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm3, %xmm6
addsd -232(%rbp), %xmm6 ## 8-byte Folded Reload
addsd -3216(%rbp), %xmm6 ## 16-byte Folded Reload
movsd -2832(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm6, %xmm5
mulsd %xmm9, %xmm5
movapd -4624(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm5, %xmm4
mulsd -2464(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd LCPI19_37(%rip), %xmm4
movsd -1648(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm6, %xmm10
movsd -1136(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm10, %xmm0
mulsd LCPI19_43(%rip), %xmm0
addsd %xmm4, %xmm0
movapd %xmm0, %xmm2
movsd %xmm0, -1136(%rbp) ## 8-byte Spill
mulsd -2192(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd -6576(%rbp), %xmm7 ## 8-byte Folded Reload
movapd %xmm3, %xmm4
mulsd %xmm9, %xmm4
subsd %xmm4, %xmm7
mulsd LCPI19_44(%rip), %xmm3
mulsd -5536(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm3
subsd %xmm3, %xmm7
mulsd -1624(%rbp), %xmm1 ## 8-byte Folded Reload
divsd -6560(%rbp), %xmm7 ## 8-byte Folded Reload
mulsd -2696(%rbp), %xmm7 ## 8-byte Folded Reload
subsd %xmm1, %xmm7
movsd %xmm7, -2952(%rbp) ## 8-byte Spill
movapd -48(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm8
mulsd %xmm7, %xmm8
movapd -208(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm1
mulsd %xmm8, %xmm1
movsd -5424(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm1, %xmm3
movapd %xmm4, %xmm0
mulsd %xmm2, %xmm0
movapd -64(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
movapd %xmm0, %xmm5
subsd %xmm3, %xmm2
subsd -7520(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, -2832(%rbp) ## 8-byte Spill
movapd -1344(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd %xmm8, %xmm1
movapd %xmm8, -2464(%rbp) ## 16-byte Spill
movsd -7504(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm1, %xmm3
movapd -3280(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movapd %xmm0, %xmm7
movapd %xmm0, -5408(%rbp) ## 16-byte Spill
subsd %xmm3, %xmm1
subsd -7488(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, %xmm0
movapd %xmm1, -5424(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm9
mulsd %xmm1, %xmm4
addsd %xmm9, %xmm4
movapd -2416(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
mulsd %xmm8, %xmm1
movsd -4352(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
addsd %xmm1, %xmm5
movapd -2592(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
subsd %xmm5, %xmm0
subsd -4344(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -1624(%rbp) ## 8-byte Spill
movapd %xmm3, %xmm1
mulsd %xmm0, %xmm1
addsd %xmm4, %xmm1
movsd -1360(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm6, %xmm4
movsd LCPI19_1(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
movapd -3072(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm4, %xmm3
movapd -4048(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm10, %xmm4
mulsd LCPI19_134(%rip), %xmm4
movsd -3952(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm4, %xmm5
divsd -3712(%rbp), %xmm5 ## 8-byte Folded Reload
movsd -2480(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm5, %xmm10
mulsd LCPI19_110(%rip), %xmm3
addsd %xmm3, %xmm10
movapd %xmm12, %xmm0
subsd %xmm1, %xmm0
movsd %xmm0, -928(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm10
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm10
movapd -2384(%rbp), %xmm1 ## 16-byte Reload
mulsd -4304(%rbp), %xmm1 ## 8-byte Folded Reload
subsd -8304(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -1024(%rbp), %xmm4 ## 16-byte Reload
mulsd -3904(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm1, %xmm4
movapd -2368(%rbp), %xmm1 ## 16-byte Reload
mulsd -2648(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm4, %xmm1
subsd -8288(%rbp), %xmm1 ## 8-byte Folded Reload
mulsd -2080(%rbp), %xmm11 ## 16-byte Folded Reload
addsd %xmm1, %xmm11
movapd %xmm12, %xmm1
movsd LCPI19_25(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm11
movsd LCPI19_50(%rip), %xmm3 ## xmm3 = mem[0],zero
movapd -3328(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm3, %xmm8
movapd %xmm8, %xmm1
mulsd -256(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -1760(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm3, %xmm4
movapd %xmm4, %xmm2
movapd %xmm4, %xmm3
movapd %xmm4, -1760(%rbp) ## 16-byte Spill
movapd -96(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm2
addsd %xmm1, %xmm2
movsd -2112(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
movapd %xmm2, -2480(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm4
mulsd -1776(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm1
movapd %xmm3, %xmm5
movapd -144(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm5
movapd -992(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm5, %xmm4
addsd %xmm1, %xmm4
movapd -2160(%rbp), %xmm1 ## 16-byte Reload
mulsd -7472(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm4
movapd -2064(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm14, %xmm1
addsd %xmm4, %xmm1
movapd -2176(%rbp), %xmm4 ## 16-byte Reload
mulsd -5392(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm1
movapd -2608(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm13, %xmm4
addsd %xmm1, %xmm4
addsd -7664(%rbp), %xmm4 ## 16-byte Folded Reload
movsd -8272(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm4, %xmm1
movapd -6464(%rbp), %xmm3 ## 16-byte Reload
addsd %xmm1, %xmm3
movapd -624(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm1
movapd %xmm3, -6464(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
addsd %xmm11, %xmm1
movsd -4312(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm1, %xmm0
movsd -4320(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm0, %xmm3
movapd %xmm8, %xmm4
mulsd -2512(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd %xmm2, %xmm7
subsd %xmm7, %xmm4
movapd %xmm6, %xmm1
movapd %xmm5, -7520(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm4
addsd -3472(%rbp), %xmm4 ## 8-byte Folded Reload
movapd %xmm4, %xmm1
movsd LCPI19_103(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm3, %xmm1
movapd -8112(%rbp), %xmm6 ## 16-byte Reload
subsd %xmm8, %xmm6
movapd %xmm6, -4048(%rbp) ## 16-byte Spill
movapd %xmm8, -3328(%rbp) ## 16-byte Spill
mulsd -320(%rbp), %xmm6 ## 8-byte Folded Reload
addsd -4528(%rbp), %xmm6 ## 16-byte Folded Reload
movsd %xmm14, -4352(%rbp) ## 8-byte Spill
movsd %xmm13, -4344(%rbp) ## 8-byte Spill
addsd %xmm13, %xmm14
movsd %xmm14, -1648(%rbp) ## 8-byte Spill
movapd -1760(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
addsd -8128(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -3712(%rbp) ## 16-byte Spill
movapd -48(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm0
addsd %xmm14, %xmm0
movapd %xmm3, %xmm5
movapd %xmm3, %xmm7
movapd %xmm0, -8304(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm5
addsd %xmm6, %xmm5
movapd %xmm5, %xmm6
mulsd LCPI19_25(%rip), %xmm6
addsd %xmm1, %xmm6
movapd -992(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm8
movapd -6032(%rbp), %xmm3 ## 16-byte Reload
subsd %xmm8, %xmm3
movapd %xmm3, -5552(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm1
mulsd -72(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm6, %xmm1
movapd %xmm2, %xmm6
mulsd %xmm0, %xmm6
movapd -6016(%rbp), %xmm0 ## 16-byte Reload
subsd %xmm6, %xmm0
movapd %xmm0, -5536(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm0
movapd %xmm7, %xmm6
movapd %xmm0, -8288(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm6
subsd %xmm6, %xmm1
movapd -1824(%rbp), %xmm6 ## 16-byte Reload
movapd -2464(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm13, %xmm6
subsd %xmm6, %xmm1
subsd -7216(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -2304(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm6
movapd %xmm0, %xmm8
mulsd -2832(%rbp), %xmm6 ## 8-byte Folded Reload
addsd %xmm1, %xmm6
movsd -928(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm10, %xmm1
movsd %xmm1, -928(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm3
movsd LCPI19_108(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
movsd LCPI19_114(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm3
movapd %xmm3, -3072(%rbp) ## 16-byte Spill
movapd %xmm12, %xmm3
movsd LCPI19_24(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
movsd LCPI19_112(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm5, %xmm1
addsd %xmm3, %xmm1
movapd -1888(%rbp), %xmm3 ## 16-byte Reload
movsd -4304(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm14, %xmm3
subsd -7208(%rbp), %xmm3 ## 8-byte Folded Reload
movapd -1024(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
mulsd -4496(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm3, %xmm0
movsd LCPI19_14(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm15
addsd %xmm0, %xmm15
movapd -1584(%rbp), %xmm0 ## 16-byte Reload
movsd -2648(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm0
addsd %xmm15, %xmm0
subsd -12128(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -6176(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd -2080(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm2
addsd %xmm0, %xmm2
movsd -7200(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm2, %xmm0
movapd -12112(%rbp), %xmm2 ## 16-byte Reload
addsd %xmm0, %xmm2
movsd -5440(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm2, %xmm3
movapd %xmm4, %xmm0
movsd LCPI19_47(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
addsd %xmm3, %xmm0
movsd -7192(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm0, %xmm2
movapd -6432(%rbp), %xmm11 ## 16-byte Reload
addsd %xmm2, %xmm11
movapd -496(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm11, %xmm2
addsd %xmm1, %xmm2
movapd %xmm14, %xmm0
mulsd -896(%rbp), %xmm0 ## 16-byte Folded Reload
subsd -7184(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm1
movapd %xmm9, %xmm0
mulsd -4512(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd %xmm7, %xmm1
mulsd -1904(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
subsd -12096(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -6192(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm15, %xmm0
addsd %xmm1, %xmm0
movapd %xmm0, %xmm1
movsd -4336(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm1, %xmm0
movapd -8320(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm0, %xmm1
movsd -4328(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm1, %xmm0
movsd LCPI19_48(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm4
addsd %xmm0, %xmm4
movapd -6416(%rbp), %xmm9 ## 16-byte Reload
addsd %xmm4, %xmm9
subsd -7176(%rbp), %xmm9 ## 8-byte Folded Reload
movapd -752(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm4
mulsd %xmm9, %xmm0
addsd %xmm2, %xmm0
movsd -2032(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm13, %xmm1
subsd %xmm1, %xmm0
subsd -7160(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm8, %xmm1
movapd %xmm8, %xmm14
mulsd -5424(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -160(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movsd LCPI19_74(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm15
movapd %xmm15, %xmm0
movsd %xmm15, -2192(%rbp) ## 8-byte Spill
movsd -320(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm13, %xmm0
movsd -232(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm2, %xmm3
movsd %xmm3, -1360(%rbp) ## 8-byte Spill
movapd -48(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm3
movapd %xmm7, %xmm2
movsd %xmm3, -2648(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm0
addsd -7168(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm2
movsd LCPI19_73(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
addsd %xmm1, %xmm2
movsd LCPI19_15(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm6
movsd -928(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm1
movsd LCPI19_22(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
subsd %xmm6, %xmm1
addsd %xmm2, %xmm1
movsd LCPI19_13(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
movapd %xmm3, %xmm6
addsd -3072(%rbp), %xmm2 ## 16-byte Folded Reload
movsd LCPI19_23(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm12
mulsd %xmm3, %xmm5
subsd %xmm5, %xmm12
movapd %xmm4, %xmm3
movapd %xmm11, -6432(%rbp) ## 16-byte Spill
mulsd %xmm11, %xmm3
addsd %xmm12, %xmm3
movapd -496(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm9, -6416(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm4
subsd %xmm4, %xmm3
movapd -2464(%rbp), %xmm4 ## 16-byte Reload
mulsd -1808(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm3
subsd -7152(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm14, %xmm4
mulsd -1624(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm3, %xmm4
movsd LCPI19_64(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm10
addsd %xmm4, %xmm10
movsd LCPI19_72(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
addsd %xmm10, %xmm0
movapd -4048(%rbp), %xmm14 ## 16-byte Reload
subsd %xmm15, %xmm14
addsd -3552(%rbp), %xmm14 ## 8-byte Folded Reload
movapd %xmm14, %xmm3
mulsd %xmm13, %xmm3
addsd -12080(%rbp), %xmm3 ## 16-byte Folded Reload
movsd -1648(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
addsd -2952(%rbp), %xmm13 ## 8-byte Folded Reload
movsd -1360(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
addsd -3712(%rbp), %xmm4 ## 16-byte Folded Reload
addsd -10512(%rbp), %xmm4 ## 16-byte Folded Reload
movapd %xmm4, %xmm5
movapd %xmm4, %xmm10
movapd %xmm7, %xmm4
mulsd %xmm7, %xmm5
addsd %xmm13, %xmm5
movsd %xmm5, -8320(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm4
addsd %xmm3, %xmm4
addsd %xmm0, %xmm2
movapd %xmm4, %xmm3
mulsd LCPI19_114(%rip), %xmm3
addsd %xmm2, %xmm3
mulsd %xmm6, %xmm0
addsd %xmm1, %xmm0
mulsd LCPI19_31(%rip), %xmm4
addsd %xmm0, %xmm4
movapd -1616(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
movapd %xmm0, %xmm6
mulsd %xmm3, %xmm5
movapd -1088(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm9
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm5
movapd %xmm8, %xmm0
movsd LCPI19_111(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm11
movapd -848(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm7
mulsd %xmm5, %xmm1
subsd %xmm0, %xmm1
movapd %xmm8, %xmm0
movsd LCPI19_115(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movapd %xmm2, %xmm12
movapd %xmm6, %xmm2
movapd %xmm4, -8272(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm0, %xmm2
movapd %xmm9, %xmm0
movapd %xmm3, -6192(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm2, %xmm0
movapd %xmm14, %xmm2
mulsd -960(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm10, -2464(%rbp) ## 8-byte Spill
movapd %xmm10, %xmm4
movsd -1296(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm4
movsd %xmm13, -1648(%rbp) ## 8-byte Spill
addsd %xmm13, %xmm4
movsd %xmm4, -4328(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm3
addsd %xmm2, %xmm3
mulsd %xmm12, %xmm3
addsd %xmm0, %xmm3
movapd -2144(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
movapd %xmm2, %xmm6
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm1
movapd %xmm14, %xmm0
mulsd -976(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm10, %xmm2
movsd -560(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm2
addsd %xmm13, %xmm2
movsd %xmm2, -4336(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm4
addsd %xmm0, %xmm4
movapd %xmm4, %xmm0
mulsd %xmm11, %xmm0
addsd %xmm1, %xmm0
movapd %xmm0, -7488(%rbp) ## 16-byte Spill
movsd LCPI19_30(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm8
movapd %xmm6, %xmm1
movapd %xmm5, -6176(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
subsd %xmm8, %xmm1
movapd %xmm7, %xmm0
movapd %xmm3, -8240(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm2, %xmm4
addsd %xmm0, %xmm4
movapd %xmm4, -7504(%rbp) ## 16-byte Spill
movapd -2384(%rbp), %xmm0 ## 16-byte Reload
movapd -10656(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm13, %xmm0
subsd -7536(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -2096(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm1
mulsd -3904(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -2368(%rbp), %xmm0 ## 16-byte Reload
movapd -10640(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
addsd %xmm1, %xmm0
subsd -8592(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -3328(%rbp), %xmm4 ## 16-byte Reload
movapd -144(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm4
movapd -992(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
movapd -1776(%rbp), %xmm2 ## 16-byte Reload
movapd -1760(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm2
addsd %xmm1, %xmm2
movapd -1568(%rbp), %xmm1 ## 16-byte Reload
movapd -2480(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm1
addsd %xmm2, %xmm1
movapd -7472(%rbp), %xmm2 ## 16-byte Reload
mulsd -832(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm1, %xmm2
movapd -2064(%rbp), %xmm1 ## 16-byte Reload
movsd -4928(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm1
subsd %xmm1, %xmm2
movapd -5392(%rbp), %xmm5 ## 16-byte Reload
mulsd -672(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm2, %xmm5
movapd -2608(%rbp), %xmm1 ## 16-byte Reload
movsd -3432(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm5
movsd -4400(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm5, %xmm1
addsd -7776(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -9296(%rbp), %xmm2 ## 16-byte Reload
addsd %xmm1, %xmm2
addsd LCPI19_106(%rip), %xmm0
movapd -624(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm1
movapd %xmm2, -9296(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movapd %xmm6, %xmm0
addsd %xmm6, %xmm12
movapd -4048(%rbp), %xmm0 ## 16-byte Reload
movapd -48(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm0
movapd %xmm12, %xmm5
subsd %xmm0, %xmm5
movapd %xmm8, %xmm2
movsd %xmm5, -4400(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm2
addsd -5568(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -3712(%rbp), %xmm0 ## 16-byte Reload
movsd -320(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
addsd %xmm2, %xmm0
movapd %xmm0, %xmm2
mulsd LCPI19_25(%rip), %xmm2
subsd %xmm2, %xmm1
movapd %xmm3, %xmm2
movapd %xmm4, -7536(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
movapd %xmm9, %xmm4
mulsd -256(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm2, %xmm4
movapd %xmm4, %xmm2
movapd %xmm7, %xmm4
mulsd -2512(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm2, %xmm4
addsd -4720(%rbp), %xmm4 ## 16-byte Folded Reload
movapd %xmm4, %xmm2
mulsd LCPI19_103(%rip), %xmm2
addsd %xmm1, %xmm2
movapd -5552(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm10, %xmm5
movapd %xmm8, %xmm1
movapd %xmm5, -8592(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
addsd %xmm2, %xmm1
movapd -5536(%rbp), %xmm2 ## 16-byte Reload
mulsd -72(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movapd -1824(%rbp), %xmm1 ## 16-byte Reload
mulsd -5408(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
subsd -8544(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -1008(%rbp), %xmm2 ## 16-byte Reload
mulsd -2832(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
addsd -1136(%rbp), %xmm12 ## 8-byte Folded Reload
movapd %xmm14, %xmm1
mulsd %xmm8, %xmm1
movapd %xmm12, %xmm5
subsd %xmm1, %xmm5
movapd %xmm8, %xmm1
movsd %xmm5, -8544(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm1
movapd -6288(%rbp), %xmm5 ## 16-byte Reload
addsd %xmm1, %xmm5
movsd -2464(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm6, %xmm1
addsd %xmm5, %xmm1
movapd %xmm2, %xmm9
mulsd LCPI19_108(%rip), %xmm9
movapd %xmm1, %xmm5
mulsd LCPI19_114(%rip), %xmm5
subsd %xmm5, %xmm9
movapd -896(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm13, %xmm7
mulsd %xmm13, %xmm5
subsd -4392(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm15, %xmm6
movapd -4512(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm6
addsd %xmm5, %xmm6
movapd -1904(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm11, %xmm5
addsd %xmm6, %xmm5
subsd -8432(%rbp), %xmm5 ## 16-byte Folded Reload
addsd LCPI19_77(%rip), %xmm5
movsd -4384(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
addsd %xmm5, %xmm6
movapd %xmm4, %xmm5
movsd LCPI19_48(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm5
addsd %xmm6, %xmm5
movapd -8352(%rbp), %xmm6 ## 16-byte Reload
addsd %xmm5, %xmm6
movapd -9216(%rbp), %xmm13 ## 16-byte Reload
addsd %xmm6, %xmm13
movapd %xmm0, %xmm6
mulsd LCPI19_24(%rip), %xmm6
movapd -752(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm13, %xmm5
addsd %xmm6, %xmm5
movapd %xmm7, %xmm6
mulsd -1888(%rbp), %xmm6 ## 16-byte Folded Reload
subsd -8416(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm6, %xmm7
movapd %xmm15, %xmm6
movapd -4496(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm6
addsd %xmm7, %xmm6
movapd -64(%rbp), %xmm7 ## 16-byte Reload
mulsd -3904(%rbp), %xmm7 ## 16-byte Folded Reload
movapd -176(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm10, %xmm3
addsd %xmm7, %xmm3
movapd -736(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm15, %xmm10
movapd %xmm14, %xmm15
mulsd %xmm10, %xmm7
addsd %xmm3, %xmm7
mulsd LCPI19_14(%rip), %xmm7
addsd %xmm6, %xmm7
mulsd -1584(%rbp), %xmm11 ## 16-byte Folded Reload
addsd %xmm7, %xmm11
subsd -8384(%rbp), %xmm11 ## 16-byte Folded Reload
addsd LCPI19_78(%rip), %xmm11
movsd -8400(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm11, %xmm3
movsd LCPI19_47(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm4
addsd %xmm3, %xmm4
subsd -4360(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -9200(%rbp), %xmm6 ## 16-byte Reload
addsd %xmm4, %xmm6
movapd -496(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm3
mulsd %xmm6, %xmm3
addsd %xmm5, %xmm3
movsd -2032(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd -5408(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm4
addsd %xmm3, %xmm4
subsd -8368(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -1008(%rbp), %xmm3 ## 16-byte Reload
movapd -5424(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm3
addsd %xmm4, %xmm3
movsd -2192(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm8, %xmm5
movsd %xmm5, -4392(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm8
movsd -1360(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -320(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm8, %xmm5
addsd -4064(%rbp), %xmm5 ## 16-byte Folded Reload
addsd LCPI19_79(%rip), %xmm3
movapd %xmm5, %xmm4
mulsd LCPI19_73(%rip), %xmm4
addsd %xmm3, %xmm4
mulsd LCPI19_15(%rip), %xmm2
mulsd LCPI19_22(%rip), %xmm1
subsd %xmm2, %xmm1
addsd %xmm4, %xmm1
movsd LCPI19_13(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm4
addsd %xmm9, %xmm4
mulsd LCPI19_23(%rip), %xmm0
movapd %xmm11, %xmm2
movapd %xmm13, -9216(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm2
subsd %xmm2, %xmm0
movapd -752(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm6, -9200(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm2
addsd %xmm0, %xmm2
movapd %xmm14, %xmm3
mulsd -1808(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm2, %xmm3
subsd -8336(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -1008(%rbp), %xmm0 ## 16-byte Reload
movsd -1624(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
addsd %xmm3, %xmm0
movsd -2832(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -64(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm10, %xmm3
mulsd -3280(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm2, %xmm3
mulsd -2592(%rbp), %xmm6 ## 16-byte Folded Reload
addsd %xmm3, %xmm6
mulsd LCPI19_64(%rip), %xmm6
addsd %xmm0, %xmm6
addsd LCPI19_80(%rip), %xmm6
mulsd LCPI19_72(%rip), %xmm5
addsd %xmm6, %xmm5
addsd %xmm5, %xmm4
mulsd %xmm7, %xmm5
addsd %xmm1, %xmm5
movapd -1616(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm3
mulsd %xmm4, %xmm3
movapd -1088(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm3
movapd %xmm15, %xmm0
movsd -560(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm12, %xmm2
subsd %xmm0, %xmm2
movsd -2464(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movsd %xmm2, -4384(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm1
movapd %xmm9, %xmm0
mulsd -976(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -848(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm10
mulsd %xmm3, %xmm10
movapd %xmm0, %xmm1
mulsd LCPI19_111(%rip), %xmm1
subsd %xmm1, %xmm10
movapd %xmm15, %xmm1
movsd -1296(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm1
movapd %xmm12, %xmm2
subsd %xmm1, %xmm2
movapd %xmm6, %xmm1
movsd %xmm2, -8432(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm1
movapd %xmm9, %xmm2
mulsd -960(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
mulsd LCPI19_115(%rip), %xmm2
movapd %xmm7, %xmm1
movapd %xmm5, -8384(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
movsd -128(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
subsd %xmm2, %xmm1
movapd %xmm11, %xmm2
movapd %xmm4, -8368(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
addsd %xmm1, %xmm2
movapd -2144(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm10
movapd %xmm5, %xmm1
movapd %xmm15, -4624(%rbp) ## 16-byte Spill
mulsd %xmm15, %xmm1
movsd %xmm12, -5424(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm13
subsd %xmm1, %xmm13
movapd %xmm4, %xmm1
movapd %xmm3, -8352(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
mulsd LCPI19_30(%rip), %xmm0
subsd %xmm0, %xmm1
movapd %xmm8, %xmm12
movapd %xmm2, -8336(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm12
addsd %xmm1, %xmm12
movapd %xmm5, %xmm14
mulsd %xmm9, %xmm14
addsd -1648(%rbp), %xmm14 ## 8-byte Folded Reload
testq %rax, %rax
je LBB19_60
## %bb.59:
movsd LCPI19_119(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd -928(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm0
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm4
movapd -3184(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm3
movapd -7488(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm3
subsd %xmm0, %xmm3
movapd -2528(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm0
movapd -7504(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm3
movsd -280(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm0
mulsd -4624(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm5, %xmm2
mulsd %xmm14, %xmm2
addsd %xmm0, %xmm2
movapd %xmm2, %xmm0
mulsd %xmm4, %xmm0
addsd %xmm3, %xmm0
addsd %xmm0, %xmm0
movapd %xmm11, %xmm4
mulsd %xmm10, %xmm4
mulsd %xmm13, %xmm5
movapd %xmm6, %xmm3
mulsd %xmm9, %xmm3
addsd %xmm5, %xmm3
movsd LCPI19_29(%rip), %xmm5 ## xmm5 = mem[0],zero
movapd %xmm3, %xmm6
mulsd %xmm5, %xmm6
subsd %xmm6, %xmm4
movapd %xmm1, %xmm6
mulsd %xmm12, %xmm6
addsd %xmm4, %xmm6
addsd %xmm6, %xmm6
subsd %xmm6, %xmm0
mulsd %xmm5, %xmm8
movapd %xmm11, %xmm6
mulsd %xmm15, %xmm6
subsd %xmm8, %xmm6
movapd %xmm1, %xmm4
mulsd %xmm7, %xmm4
addsd %xmm6, %xmm4
mulsd %xmm5, %xmm2
addsd %xmm4, %xmm2
mulsd LCPI19_119(%rip), %xmm3
mulsd %xmm10, %xmm1
subsd %xmm3, %xmm1
movapd %xmm11, %xmm3
mulsd %xmm12, %xmm3
subsd %xmm3, %xmm1
movsd -1536(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm3
mulsd %xmm0, %xmm3
addsd %xmm2, %xmm1
movsd -1072(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm1, %xmm2
subsd %xmm2, %xmm3
movsd -2272(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
mulsd -880(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
mulsd %xmm4, %xmm3
movsd LCPI19_1(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
mulsd %xmm2, %xmm1
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm3
movsd %xmm3, 56(%rax)
LBB19_60:
movsd %xmm14, -4312(%rbp) ## 8-byte Spill
movsd %xmm13, -4320(%rbp) ## 8-byte Spill
movapd %xmm12, -5392(%rbp) ## 16-byte Spill
movapd %xmm10, -5408(%rbp) ## 16-byte Spill
movsd -224(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd -408(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, -1776(%rbp) ## 16-byte Spill
movsd -152(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
addsd -288(%rbp), %xmm9 ## 8-byte Folded Reload
movapd %xmm9, %xmm13
movsd %xmm9, -2480(%rbp) ## 8-byte Spill
addsd -640(%rbp), %xmm13 ## 16-byte Folded Reload
movapd %xmm13, %xmm1
divsd -6608(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -3888(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm1, %xmm2
movapd %xmm2, %xmm0
mulsd -6624(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm3, %xmm0
addsd -1472(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -456(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm0, %xmm6
movsd -5072(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm6, %xmm3
movsd LCPI19_1(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm3
movapd %xmm4, %xmm11
mulsd -4112(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd -1704(%rbp), %xmm6 ## 8-byte Folded Reload
movsd -5104(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm6, %xmm4
mulsd LCPI19_132(%rip), %xmm4
movsd LCPI19_110(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm3
mulsd -4752(%rbp), %xmm4 ## 8-byte Folded Reload
divsd -4832(%rbp), %xmm4 ## 8-byte Folded Reload
mulsd -2896(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm3, %xmm4
movapd %xmm4, %xmm15
mulsd -5856(%rbp), %xmm13 ## 16-byte Folded Reload
mulsd -3152(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, %xmm3
mulsd %xmm11, %xmm3
subsd %xmm3, %xmm13
movsd LCPI19_44(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
mulsd -4784(%rbp), %xmm2 ## 8-byte Folded Reload
movsd LCPI19_45(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm13
divsd -5840(%rbp), %xmm13 ## 16-byte Folded Reload
movapd -4208(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm13, %xmm2
mulsd -5248(%rbp), %xmm13 ## 16-byte Folded Reload
mulsd -5776(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm13
movapd -272(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm3
movapd %xmm1, %xmm7
mulsd %xmm2, %xmm3
movapd %xmm2, %xmm4
movapd %xmm2, -4528(%rbp) ## 16-byte Spill
movapd -592(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
mulsd %xmm13, %xmm1
subsd %xmm1, %xmm3
movapd %xmm3, %xmm10
mulsd -5728(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm11, %xmm0
mulsd -5760(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_37(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd -1408(%rbp), %xmm6 ## 8-byte Folded Reload
movsd LCPI19_43(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm6
addsd %xmm0, %xmm6
movsd %xmm6, -6288(%rbp) ## 8-byte Spill
movapd -400(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm3
mulsd %xmm13, %xmm3
movapd %xmm7, %xmm0
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm3
mulsd %xmm6, %xmm2
movapd %xmm1, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm2
movapd -528(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm0
movapd %xmm10, %xmm5
mulsd %xmm10, %xmm0
subsd -7904(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm3, %xmm0
movapd -112(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm4
movapd %xmm1, %xmm12
mulsd %xmm2, %xmm4
addsd %xmm0, %xmm4
movapd -1248(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm0
mulsd %xmm10, %xmm0
movapd %xmm10, %xmm6
movsd %xmm10, -5440(%rbp) ## 8-byte Spill
subsd -5472(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -912(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
movapd -608(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm2, %xmm5
movapd %xmm2, %xmm7
movapd %xmm2, -8416(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm5
subsd -5456(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm5, -3952(%rbp) ## 16-byte Spill
subsd -2688(%rbp), %xmm4 ## 8-byte Folded Reload
movapd %xmm4, -3072(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm8
movapd %xmm14, %xmm1
mulsd %xmm5, %xmm1
addsd %xmm8, %xmm1
movapd -1264(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm0
mulsd %xmm10, %xmm0
subsd -6240(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -1504(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm3, -2832(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm2
addsd %xmm0, %xmm2
movapd -720(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm7, %xmm3
addsd %xmm2, %xmm3
subsd -8608(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, -5568(%rbp) ## 16-byte Spill
movapd %xmm14, %xmm0
mulsd %xmm3, %xmm0
addsd %xmm1, %xmm0
addsd %xmm0, %xmm15
movapd %xmm0, %xmm1
movapd %xmm11, %xmm4
mulsd %xmm11, %xmm1
subsd %xmm1, %xmm15
movsd %xmm15, -4360(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm15
addsd -1232(%rbp), %xmm9 ## 16-byte Folded Reload
movapd %xmm9, %xmm11
mulsd -5824(%rbp), %xmm11 ## 16-byte Folded Reload
divsd -3168(%rbp), %xmm9 ## 16-byte Folded Reload
movapd -2944(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm9, %xmm2
movapd %xmm2, %xmm3
mulsd -536(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, %xmm5
mulsd %xmm4, %xmm5
movapd %xmm4, %xmm7
subsd %xmm5, %xmm11
mulsd LCPI19_44(%rip), %xmm3
mulsd -4144(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm3
subsd %xmm3, %xmm11
divsd -5808(%rbp), %xmm11 ## 16-byte Folded Reload
movapd -4288(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm11, %xmm1
mulsd -776(%rbp), %xmm11 ## 8-byte Folded Reload
mulsd -1696(%rbp), %xmm9 ## 8-byte Folded Reload
subsd %xmm9, %xmm11
movapd -272(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm10
mulsd %xmm1, %xmm10
movapd %xmm1, %xmm6
movapd %xmm1, -5472(%rbp) ## 16-byte Spill
movapd -592(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
mulsd %xmm11, %xmm0
subsd %xmm0, %xmm10
mulsd -6592(%rbp), %xmm2 ## 16-byte Folded Reload
addsd -1776(%rbp), %xmm2 ## 16-byte Folded Reload
addsd -1456(%rbp), %xmm2 ## 16-byte Folded Reload
movsd -4768(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
mulsd %xmm7, %xmm0
mulsd -3760(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI19_37(%rip), %xmm0
mulsd -1384(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -4816(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm2, %xmm8
movsd -1152(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm8, %xmm5
mulsd LCPI19_43(%rip), %xmm5
addsd %xmm0, %xmm5
movsd %xmm5, -1624(%rbp) ## 8-byte Spill
movapd -400(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm9
mulsd %xmm11, %xmm9
mulsd %xmm5, %xmm4
subsd %xmm4, %xmm9
mulsd %xmm5, %xmm1
movapd %xmm3, %xmm0
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm1
movapd -528(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm5
movapd %xmm10, %xmm4
mulsd %xmm10, %xmm5
subsd -7792(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm9, %xmm5
movapd %xmm12, %xmm10
mulsd %xmm1, %xmm10
movapd %xmm1, %xmm7
addsd %xmm5, %xmm10
movapd -1248(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm5
mulsd %xmm4, %xmm5
movsd %xmm4, -5456(%rbp) ## 8-byte Spill
subsd -6224(%rbp), %xmm5 ## 8-byte Folded Reload
movapd -912(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm9, %xmm6
addsd %xmm5, %xmm6
movapd -608(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
movapd %xmm7, %xmm12
movapd %xmm7, -8400(%rbp) ## 16-byte Spill
addsd %xmm6, %xmm0
subsd -6208(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -6224(%rbp) ## 16-byte Spill
subsd -2672(%rbp), %xmm10 ## 8-byte Folded Reload
movapd %xmm3, %xmm5
movapd %xmm3, %xmm7
mulsd %xmm10, %xmm5
mulsd %xmm0, %xmm1
addsd %xmm5, %xmm1
movapd %xmm14, %xmm5
mulsd %xmm4, %xmm5
subsd -8464(%rbp), %xmm5 ## 8-byte Folded Reload
movapd -1504(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm9, -6240(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm0
addsd %xmm5, %xmm0
movapd -720(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm12, %xmm3
addsd %xmm0, %xmm3
subsd -8448(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, -6208(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm14
addsd %xmm1, %xmm14
mulsd -5088(%rbp), %xmm2 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm2
mulsd -3696(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd LCPI19_110(%rip), %xmm2
mulsd -1712(%rbp), %xmm8 ## 8-byte Folded Reload
mulsd LCPI19_133(%rip), %xmm8
mulsd -4096(%rbp), %xmm8 ## 8-byte Folded Reload
divsd -4848(%rbp), %xmm8 ## 8-byte Folded Reload
mulsd -1400(%rbp), %xmm8 ## 8-byte Folded Reload
addsd %xmm2, %xmm8
subsd %xmm14, %xmm15
addsd %xmm14, %xmm8
mulsd %xmm9, %xmm14
subsd %xmm14, %xmm8
addsd %xmm15, %xmm8
movsd -152(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd -5232(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, %xmm1
divsd -2200(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -760(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm1, %xmm2
movsd -2968(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm2, %xmm6
addsd -224(%rbp), %xmm6 ## 8-byte Folded Reload
addsd -3840(%rbp), %xmm6 ## 16-byte Folded Reload
movsd -4656(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm6, %xmm5
mulsd %xmm9, %xmm5
movapd -3312(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm5, %xmm0
mulsd -1168(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd LCPI19_37(%rip), %xmm0
movsd -1656(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm6, %xmm12
movsd -3856(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm12, %xmm4
mulsd LCPI19_43(%rip), %xmm4
addsd %xmm0, %xmm4
movsd %xmm4, -3856(%rbp) ## 8-byte Spill
mulsd -1640(%rbp), %xmm2 ## 8-byte Folded Reload
mulsd -6672(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm2, %xmm0
mulsd %xmm9, %xmm0
subsd %xmm0, %xmm3
mulsd LCPI19_44(%rip), %xmm2
mulsd -3584(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm2
subsd %xmm2, %xmm3
mulsd -4560(%rbp), %xmm1 ## 8-byte Folded Reload
divsd -6656(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd -2712(%rbp), %xmm3 ## 8-byte Folded Reload
subsd %xmm1, %xmm3
movsd %xmm3, -2968(%rbp) ## 8-byte Spill
movapd -192(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm7, %xmm14
movapd %xmm7, %xmm0
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm7
movapd -7584(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm0, %xmm1
mulsd %xmm4, %xmm2
movapd -112(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
movapd %xmm2, %xmm4
subsd %xmm1, %xmm0
subsd -7568(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm15
movsd %xmm0, -1168(%rbp) ## 8-byte Spill
movapd -2048(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
mulsd %xmm7, %xmm0
movapd %xmm7, -3312(%rbp) ## 16-byte Spill
movsd -7552(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm0, %xmm2
movapd -3264(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm4, %xmm3
movapd %xmm4, -4560(%rbp) ## 16-byte Spill
subsd %xmm2, %xmm3
subsd -8640(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, -3584(%rbp) ## 16-byte Spill
movapd %xmm14, %xmm0
mulsd %xmm15, %xmm0
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
movapd -1104(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
mulsd %xmm7, %xmm0
movsd -4416(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
addsd %xmm0, %xmm5
movapd -2560(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm4, %xmm15
subsd %xmm5, %xmm15
subsd -4408(%rbp), %xmm15 ## 8-byte Folded Reload
movsd %xmm15, -1640(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm0
mulsd %xmm15, %xmm0
addsd %xmm1, %xmm0
movsd -1368(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm6, %xmm1
movapd %xmm9, %xmm3
mulsd %xmm9, %xmm1
movapd -3984(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm1, %xmm2
movapd -4080(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
mulsd LCPI19_134(%rip), %xmm1
movsd -3968(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm1, %xmm5
divsd -648(%rbp), %xmm5 ## 8-byte Folded Reload
movsd -2496(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm5, %xmm14
mulsd LCPI19_110(%rip), %xmm2
addsd %xmm2, %xmm14
movapd %xmm8, %xmm1
subsd %xmm0, %xmm1
movsd %xmm1, -760(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm14
mulsd %xmm9, %xmm0
subsd %xmm0, %xmm14
movapd -1856(%rbp), %xmm0 ## 16-byte Reload
mulsd -5440(%rbp), %xmm0 ## 8-byte Folded Reload
subsd -8528(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -1184(%rbp), %xmm1 ## 16-byte Reload
mulsd -3072(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -2400(%rbp), %xmm0 ## 16-byte Reload
mulsd -5456(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
subsd -8512(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd -1872(%rbp), %xmm10 ## 8-byte Folded Reload
addsd %xmm0, %xmm10
movapd %xmm8, %xmm1
movsd LCPI19_25(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm10, %xmm1
movsd LCPI19_50(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd -2480(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm2, %xmm4
movapd %xmm4, %xmm0
mulsd -400(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -1776(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm2, %xmm12
movapd %xmm12, %xmm2
movapd -272(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm2
addsd %xmm0, %xmm2
movapd -2640(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
movapd %xmm2, %xmm7
movsd %xmm2, -2496(%rbp) ## 8-byte Spill
movapd %xmm4, %xmm2
mulsd -2256(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm0
movapd %xmm12, %xmm6
movapd -592(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm6
movsd -1552(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd %xmm15, %xmm2
mulsd %xmm6, %xmm2
addsd %xmm0, %xmm2
movapd -1472(%rbp), %xmm0 ## 16-byte Reload
mulsd -4528(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movapd -1840(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm13, %xmm0
addsd %xmm2, %xmm0
movapd -1456(%rbp), %xmm2 ## 16-byte Reload
mulsd -5472(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm0
movapd -2624(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm11, %xmm2
addsd %xmm0, %xmm2
addsd -7808(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -8496(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm2, %xmm0
movapd -9280(%rbp), %xmm2 ## 16-byte Reload
addsd %xmm0, %xmm2
movapd -688(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
movapd %xmm2, -9280(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
movsd -4376(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movsd -8480(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm1, %xmm2
movapd %xmm4, %xmm5
mulsd -784(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm10, %xmm1
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm5
movapd %xmm3, %xmm1
movsd %xmm6, -7584(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm5
addsd -7824(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm5, %xmm1
movsd LCPI19_103(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm2
movapd -8192(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm4, %xmm3
movsd %xmm4, -2480(%rbp) ## 8-byte Spill
subsd %xmm4, %xmm1
movsd %xmm1, -1368(%rbp) ## 8-byte Spill
mulsd -480(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -6272(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm13, -8640(%rbp) ## 8-byte Spill
movsd %xmm11, -4416(%rbp) ## 8-byte Spill
addsd %xmm11, %xmm13
movsd %xmm13, -1656(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm4
movapd %xmm12, -1776(%rbp) ## 16-byte Spill
movapd %xmm12, %xmm0
addsd -9520(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -4064(%rbp) ## 8-byte Spill
movapd -192(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
addsd %xmm13, %xmm0
movapd %xmm11, %xmm12
movsd %xmm0, -4408(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm12
addsd %xmm1, %xmm12
movapd %xmm12, %xmm1
mulsd LCPI19_25(%rip), %xmm1
subsd %xmm1, %xmm2
movapd %xmm3, %xmm1
mulsd %xmm15, %xmm1
movapd -6064(%rbp), %xmm0 ## 16-byte Reload
subsd %xmm1, %xmm0
movapd %xmm0, -3984(%rbp) ## 16-byte Spill
movapd %xmm0, %xmm1
mulsd -2336(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
movapd %xmm4, %xmm2
mulsd %xmm15, %xmm2
movapd -6048(%rbp), %xmm0 ## 16-byte Reload
subsd %xmm2, %xmm0
movapd %xmm0, -3968(%rbp) ## 16-byte Spill
movapd %xmm0, %xmm2
mulsd %xmm9, %xmm2
movapd %xmm2, -8608(%rbp) ## 16-byte Spill
mulsd %xmm11, %xmm2
subsd %xmm2, %xmm1
movapd -2544(%rbp), %xmm2 ## 16-byte Reload
movapd -3312(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm2
subsd %xmm2, %xmm1
subsd -4368(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -2352(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
movapd %xmm0, %xmm15
mulsd -1168(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movsd -760(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm14, %xmm3
movsd %xmm3, -760(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm1
movsd LCPI19_108(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
movsd LCPI19_21(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
subsd %xmm1, %xmm3
movsd %xmm3, -648(%rbp) ## 8-byte Spill
movapd %xmm8, %xmm3
movsd LCPI19_24(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
movsd LCPI19_112(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm12, %xmm1
addsd %xmm3, %xmm1
movapd -1600(%rbp), %xmm3 ## 16-byte Reload
movsd -5440(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm3
subsd -7264(%rbp), %xmm3 ## 8-byte Folded Reload
movapd -1184(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm0
mulsd -5568(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm3, %xmm0
movsd LCPI19_14(%rip), %xmm3 ## xmm3 = mem[0],zero
movsd -4360(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm3, %xmm4
addsd %xmm0, %xmm4
movapd -2128(%rbp), %xmm0 ## 16-byte Reload
movsd -5456(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
addsd %xmm4, %xmm0
subsd -12240(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -6208(%rbp), %xmm3 ## 16-byte Reload
movsd -1872(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm3
addsd %xmm0, %xmm3
movapd -12224(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm3, %xmm0
movapd -12208(%rbp), %xmm3 ## 16-byte Reload
addsd %xmm0, %xmm3
movapd -12192(%rbp), %xmm4 ## 16-byte Reload
addsd %xmm3, %xmm4
movapd %xmm5, %xmm0
movsd LCPI19_47(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
addsd %xmm4, %xmm0
movsd -7256(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm0, %xmm3
movapd -9232(%rbp), %xmm13 ## 16-byte Reload
addsd %xmm3, %xmm13
movapd -448(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm13, %xmm4
addsd %xmm1, %xmm4
movapd %xmm9, %xmm0
mulsd -1920(%rbp), %xmm0 ## 16-byte Folded Reload
subsd -7248(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm1
mulsd -3952(%rbp), %xmm10 ## 16-byte Folded Reload
addsd %xmm0, %xmm10
movapd %xmm6, %xmm1
mulsd -2432(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm10, %xmm1
subsd -12176(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -6224(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
addsd %xmm1, %xmm0
movapd %xmm0, %xmm1
movsd -8576(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm1, %xmm0
movapd -3088(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm0, %xmm1
movapd -8560(%rbp), %xmm3 ## 16-byte Reload
addsd %xmm1, %xmm3
movsd LCPI19_48(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
addsd %xmm3, %xmm5
movapd -6480(%rbp), %xmm9 ## 16-byte Reload
addsd %xmm5, %xmm9
subsd -7240(%rbp), %xmm9 ## 8-byte Folded Reload
movapd -512(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
mulsd %xmm9, %xmm0
addsd %xmm4, %xmm0
movapd -2912(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm11, %xmm1
subsd %xmm1, %xmm0
subsd -12160(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm15, %xmm1
movapd %xmm15, %xmm5
mulsd -3584(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -152(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movsd LCPI19_74(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm15
movapd %xmm15, %xmm0
movsd %xmm15, -2200(%rbp) ## 8-byte Spill
movsd -480(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
movsd -224(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm3, %xmm4
movsd %xmm4, -4080(%rbp) ## 8-byte Spill
movapd -192(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm4
movsd %xmm4, -5456(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm3
subsd %xmm3, %xmm0
addsd -7232(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm4
movsd LCPI19_73(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm4
addsd %xmm1, %xmm4
movsd LCPI19_15(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
movsd -760(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm1
movsd LCPI19_31(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm2
addsd %xmm4, %xmm2
movsd LCPI19_13(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm4
addsd -648(%rbp), %xmm4 ## 8-byte Folded Reload
movsd LCPI19_23(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm8
mulsd %xmm1, %xmm12
subsd %xmm12, %xmm8
movapd %xmm13, -9232(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm7
addsd %xmm8, %xmm7
movapd -448(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm9, -6480(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm3
subsd %xmm3, %xmm7
movapd -3312(%rbp), %xmm3 ## 16-byte Reload
mulsd -3248(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm3, %xmm7
subsd -7224(%rbp), %xmm7 ## 8-byte Folded Reload
movapd %xmm5, %xmm3
mulsd -1640(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm7, %xmm3
movsd LCPI19_64(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm14
addsd %xmm3, %xmm14
movsd LCPI19_72(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
addsd %xmm14, %xmm0
movsd -1368(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
subsd %xmm15, %xmm14
addsd -3568(%rbp), %xmm14 ## 8-byte Folded Reload
movapd %xmm14, %xmm1
mulsd %xmm6, %xmm1
addsd -12144(%rbp), %xmm1 ## 16-byte Folded Reload
movsd -1656(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd -2968(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -4080(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
addsd -4064(%rbp), %xmm9 ## 8-byte Folded Reload
addsd -10592(%rbp), %xmm9 ## 16-byte Folded Reload
movapd %xmm9, %xmm5
movapd -192(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm5
addsd %xmm3, %xmm5
movapd %xmm3, %xmm11
movsd %xmm3, -1656(%rbp) ## 8-byte Spill
movapd %xmm15, %xmm3
movapd %xmm5, -8576(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm3
addsd %xmm1, %xmm3
addsd %xmm0, %xmm4
movsd LCPI19_114(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm3, %xmm5
addsd %xmm4, %xmm5
mulsd LCPI19_13(%rip), %xmm0
addsd %xmm2, %xmm0
mulsd LCPI19_31(%rip), %xmm3
addsd %xmm0, %xmm3
movapd -1312(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm4
movapd %xmm0, %xmm7
mulsd %xmm5, %xmm4
movapd -1328(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm12
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm4
movapd %xmm10, %xmm2
movapd %xmm10, %xmm0
movsd LCPI19_111(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm0
movapd -1216(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm1
mulsd %xmm4, %xmm1
subsd %xmm0, %xmm1
movapd %xmm2, %xmm0
movapd %xmm2, %xmm8
movsd LCPI19_115(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movapd %xmm2, %xmm13
movapd %xmm7, %xmm2
movapd %xmm3, -8528(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm2
subsd %xmm0, %xmm2
movapd %xmm12, %xmm0
movapd %xmm5, -8512(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
addsd %xmm2, %xmm0
movapd %xmm14, %xmm2
mulsd -2800(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm9, %xmm5
movsd -1200(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm5
addsd %xmm11, %xmm5
movapd %xmm5, -8560(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm3
addsd %xmm2, %xmm3
mulsd %xmm13, %xmm3
addsd %xmm0, %xmm3
movapd -1936(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
movapd %xmm2, %xmm12
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm1
movapd %xmm14, %xmm0
mulsd -1280(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm9, %xmm2
movapd %xmm9, -3312(%rbp) ## 16-byte Spill
movsd -704(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm2
addsd %xmm11, %xmm2
movapd %xmm2, -5440(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm5
addsd %xmm0, %xmm5
movapd %xmm5, %xmm0
mulsd %xmm10, %xmm0
addsd %xmm1, %xmm0
movapd %xmm0, -7552(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm0
movsd LCPI19_30(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movapd %xmm12, %xmm1
movapd %xmm4, -8496(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
subsd %xmm0, %xmm1
movapd %xmm6, %xmm0
movapd %xmm3, -8480(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm2, %xmm5
addsd %xmm0, %xmm5
movapd %xmm5, -7568(%rbp) ## 16-byte Spill
movapd -1856(%rbp), %xmm0 ## 16-byte Reload
movapd -8416(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
subsd -8848(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -1440(%rbp), %xmm1 ## 16-byte Reload
mulsd -3072(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -2400(%rbp), %xmm0 ## 16-byte Reload
movapd -8400(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm13, %xmm0
addsd %xmm1, %xmm0
subsd -8816(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -2480(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd -592(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm3
movsd -1552(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm3, %xmm1
movapd -2256(%rbp), %xmm2 ## 16-byte Reload
movapd -1776(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm2
addsd %xmm1, %xmm2
movapd -2816(%rbp), %xmm1 ## 16-byte Reload
movsd -2496(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm1
addsd %xmm2, %xmm1
movapd -4528(%rbp), %xmm2 ## 16-byte Reload
mulsd -640(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm1, %xmm2
movapd -1840(%rbp), %xmm1 ## 16-byte Reload
movsd -6288(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm1
subsd %xmm1, %xmm2
movapd -5472(%rbp), %xmm5 ## 16-byte Reload
mulsd -1232(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm2, %xmm5
movapd -2624(%rbp), %xmm1 ## 16-byte Reload
movsd -1624(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm5
movsd -8832(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm5, %xmm2
addsd -7888(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -9424(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm2, %xmm1
addsd LCPI19_46(%rip), %xmm0
movapd -688(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm2
movapd %xmm1, -9424(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm2
addsd %xmm0, %xmm2
movapd %xmm7, %xmm0
addsd %xmm7, %xmm12
movsd -1368(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm15, %xmm0
movapd %xmm12, %xmm1
subsd %xmm0, %xmm1
movapd %xmm15, %xmm0
movsd %xmm1, -8848(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm0
addsd -5008(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -4064(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd -480(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, %xmm0
mulsd LCPI19_25(%rip), %xmm0
addsd %xmm2, %xmm0
movapd %xmm8, %xmm2
movsd %xmm3, -6272(%rbp) ## 8-byte Spill
mulsd %xmm3, %xmm2
movapd %xmm6, %xmm3
mulsd -400(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm2, %xmm3
movapd %xmm3, %xmm2
movapd %xmm4, %xmm3
mulsd -784(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm2, %xmm3
addsd -7872(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, %xmm2
mulsd LCPI19_103(%rip), %xmm2
subsd %xmm2, %xmm0
movapd -3984(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm5, %xmm2
movapd %xmm2, -8832(%rbp) ## 16-byte Spill
mulsd %xmm15, %xmm2
addsd %xmm0, %xmm2
movapd -3968(%rbp), %xmm0 ## 16-byte Reload
mulsd -2336(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm2, %xmm0
movapd -2544(%rbp), %xmm2 ## 16-byte Reload
mulsd -4560(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
subsd -8800(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -2576(%rbp), %xmm8 ## 16-byte Reload
mulsd -1168(%rbp), %xmm8 ## 8-byte Folded Reload
addsd %xmm2, %xmm8
movapd %xmm12, %xmm5
addsd -3856(%rbp), %xmm5 ## 8-byte Folded Reload
movsd %xmm5, -4528(%rbp) ## 8-byte Spill
movapd %xmm14, %xmm2
mulsd %xmm15, %xmm2
subsd %xmm2, %xmm5
movapd %xmm15, %xmm2
movsd %xmm5, -8816(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm2
movapd -4992(%rbp), %xmm5 ## 16-byte Reload
addsd %xmm2, %xmm5
movapd %xmm9, %xmm10
mulsd %xmm7, %xmm10
addsd %xmm5, %xmm10
movapd %xmm8, %xmm5
mulsd LCPI19_108(%rip), %xmm5
movapd %xmm10, %xmm12
mulsd LCPI19_21(%rip), %xmm12
subsd %xmm5, %xmm12
movapd -1920(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm11, %xmm0
mulsd %xmm11, %xmm5
subsd -4424(%rbp), %xmm5 ## 8-byte Folded Reload
movapd -1440(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm6
movapd -3952(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm6
addsd %xmm5, %xmm6
movapd -2432(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm13, %xmm5
addsd %xmm6, %xmm5
subsd -8784(%rbp), %xmm5 ## 16-byte Folded Reload
addsd LCPI19_77(%rip), %xmm5
movapd -8768(%rbp), %xmm2 ## 16-byte Reload
addsd %xmm5, %xmm2
movapd %xmm3, %xmm5
movsd LCPI19_48(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm5
addsd %xmm2, %xmm5
movapd -8752(%rbp), %xmm6 ## 16-byte Reload
addsd %xmm5, %xmm6
movapd -5920(%rbp), %xmm2 ## 16-byte Reload
addsd %xmm6, %xmm2
movapd %xmm1, %xmm6
mulsd LCPI19_24(%rip), %xmm6
movapd -512(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm2, %xmm5
addsd %xmm6, %xmm5
movapd %xmm0, %xmm6
mulsd -1600(%rbp), %xmm6 ## 16-byte Folded Reload
subsd -8704(%rbp), %xmm6 ## 8-byte Folded Reload
movapd %xmm6, %xmm7
movapd %xmm9, %xmm6
movapd -5568(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm6
addsd %xmm7, %xmm6
movapd -112(%rbp), %xmm7 ## 16-byte Reload
mulsd -3072(%rbp), %xmm7 ## 16-byte Folded Reload
movapd -608(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm11, %xmm4
addsd %xmm7, %xmm4
movapd -720(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm0, %xmm7
addsd %xmm4, %xmm7
mulsd LCPI19_14(%rip), %xmm7
addsd %xmm6, %xmm7
mulsd -2128(%rbp), %xmm13 ## 16-byte Folded Reload
addsd %xmm7, %xmm13
subsd -8736(%rbp), %xmm13 ## 16-byte Folded Reload
addsd LCPI19_78(%rip), %xmm13
movapd -6256(%rbp), %xmm4 ## 16-byte Reload
addsd %xmm13, %xmm4
movsd LCPI19_47(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
addsd %xmm4, %xmm3
subsd -8720(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -5904(%rbp), %xmm6 ## 16-byte Reload
addsd %xmm3, %xmm6
movapd -448(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm3
mulsd %xmm6, %xmm3
addsd %xmm5, %xmm3
movapd -2912(%rbp), %xmm4 ## 16-byte Reload
movapd -4560(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm4
addsd %xmm3, %xmm4
subsd -8688(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -2576(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm3
movapd -3584(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm3
addsd %xmm4, %xmm3
movsd -2200(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm15, %xmm5
movsd %xmm5, -8800(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm15
movsd -4080(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -480(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm15, %xmm5
addsd -4128(%rbp), %xmm5 ## 16-byte Folded Reload
addsd LCPI19_79(%rip), %xmm3
movsd LCPI19_73(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm5, %xmm4
addsd %xmm3, %xmm4
movapd %xmm4, %xmm7
movsd LCPI19_13(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm7
addsd %xmm12, %xmm7
mulsd LCPI19_23(%rip), %xmm1
movapd %xmm13, %xmm3
movapd %xmm2, -5920(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm3
subsd %xmm3, %xmm1
movapd -512(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm6, -5904(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm3
addsd %xmm1, %xmm3
mulsd -3248(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm3, %xmm0
subsd -8672(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm14, %xmm13
movsd -1640(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm9
addsd %xmm0, %xmm9
movsd -1168(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -112(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm11, %xmm2
mulsd -3264(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movapd %xmm3, %xmm0
mulsd -2560(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm2, %xmm0
mulsd LCPI19_64(%rip), %xmm0
addsd %xmm9, %xmm0
mulsd LCPI19_72(%rip), %xmm5
addsd LCPI19_80(%rip), %xmm0
addsd %xmm0, %xmm5
mulsd LCPI19_15(%rip), %xmm8
mulsd LCPI19_31(%rip), %xmm10
subsd %xmm10, %xmm8
movapd -3312(%rbp), %xmm10 ## 16-byte Reload
addsd %xmm4, %xmm8
addsd %xmm5, %xmm7
mulsd %xmm15, %xmm5
addsd %xmm8, %xmm5
movapd -1312(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm3
mulsd %xmm7, %xmm3
movapd -1328(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm3
movapd %xmm14, %xmm0
movsd -704(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movsd -4528(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm2
subsd %xmm0, %xmm2
movsd %xmm2, -4424(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm1
movapd %xmm10, %xmm0
mulsd -1280(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -1216(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm14
mulsd %xmm3, %xmm14
movapd %xmm0, %xmm1
mulsd LCPI19_111(%rip), %xmm1
subsd %xmm1, %xmm14
movapd %xmm13, %xmm1
movsd -1200(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm1
movapd %xmm6, %xmm2
movapd %xmm6, %xmm12
subsd %xmm1, %xmm2
movapd %xmm4, %xmm1
movsd %xmm2, -8784(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm1
movapd %xmm10, %xmm2
mulsd -2800(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
mulsd LCPI19_115(%rip), %xmm2
movapd %xmm11, %xmm1
movapd %xmm5, -8720(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
movsd -128(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
subsd %xmm2, %xmm1
movapd %xmm9, %xmm2
movapd %xmm7, -8704(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm2
addsd %xmm1, %xmm2
movapd -1936(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm14
movapd %xmm5, %xmm1
mulsd %xmm13, %xmm1
subsd %xmm1, %xmm12
movapd %xmm4, %xmm1
movapd %xmm3, -8688(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
mulsd LCPI19_30(%rip), %xmm0
subsd %xmm0, %xmm1
movapd %xmm8, %xmm7
movapd %xmm2, -8672(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm7
addsd %xmm1, %xmm7
movapd %xmm5, %xmm15
mulsd %xmm10, %xmm15
addsd -1656(%rbp), %xmm15 ## 8-byte Folded Reload
testq %rax, %rax
movsd LCPI19_1(%rip), %xmm9 ## xmm9 = mem[0],zero
movapd %xmm13, -4656(%rbp) ## 16-byte Spill
je LBB19_62
## %bb.61:
movsd LCPI19_119(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd -760(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm0
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm4
movapd -2880(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm3
movapd -7552(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm3
subsd %xmm0, %xmm3
movapd -2784(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm0
movapd -7568(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm0
subsd %xmm0, %xmm3
movsd -280(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm0
mulsd -4656(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm5, %xmm2
mulsd %xmm15, %xmm2
addsd %xmm0, %xmm2
movapd %xmm2, %xmm0
mulsd %xmm4, %xmm0
addsd %xmm3, %xmm0
addsd %xmm0, %xmm0
movapd %xmm13, %xmm4
mulsd %xmm14, %xmm4
mulsd %xmm12, %xmm5
movapd %xmm6, %xmm3
mulsd %xmm10, %xmm3
addsd %xmm5, %xmm3
movsd LCPI19_29(%rip), %xmm5 ## xmm5 = mem[0],zero
movapd %xmm3, %xmm6
mulsd %xmm5, %xmm6
subsd %xmm6, %xmm4
movapd %xmm1, %xmm6
mulsd %xmm7, %xmm6
addsd %xmm4, %xmm6
addsd %xmm6, %xmm6
subsd %xmm6, %xmm0
mulsd %xmm5, %xmm8
movapd %xmm13, %xmm6
mulsd %xmm9, %xmm6
subsd %xmm8, %xmm6
movapd %xmm1, %xmm4
mulsd %xmm11, %xmm4
addsd %xmm6, %xmm4
movsd LCPI19_1(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm5, %xmm2
addsd %xmm4, %xmm2
mulsd LCPI19_119(%rip), %xmm3
mulsd %xmm14, %xmm1
subsd %xmm3, %xmm1
movapd %xmm13, %xmm3
mulsd %xmm7, %xmm3
subsd %xmm3, %xmm1
movsd -1536(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm3
mulsd %xmm0, %xmm3
addsd %xmm2, %xmm1
movsd -1072(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm1, %xmm2
subsd %xmm2, %xmm3
movsd -2272(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
mulsd -880(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
mulsd %xmm4, %xmm3
mulsd %xmm6, %xmm3
mulsd %xmm2, %xmm1
movapd %xmm6, %xmm9
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm3
movsd %xmm3, 64(%rax)
LBB19_62:
movsd %xmm15, -4368(%rbp) ## 8-byte Spill
movsd %xmm12, -4376(%rbp) ## 8-byte Spill
movapd %xmm7, -8464(%rbp) ## 16-byte Spill
movapd %xmm14, -6208(%rbp) ## 16-byte Spill
movapd -4672(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm11
movapd -832(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm11
movapd %xmm1, %xmm7
movapd -208(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm8
movapd -2160(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm1
movapd %xmm2, %xmm4
subsd %xmm1, %xmm11
movapd -5056(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm5
mulsd -1672(%rbp), %xmm5 ## 8-byte Folded Reload
movapd -3408(%rbp), %xmm2 ## 16-byte Reload
divsd %xmm2, %xmm5
movapd %xmm5, -5056(%rbp) ## 16-byte Spill
divsd %xmm2, %xmm5
mulsd %xmm11, %xmm5
movsd %xmm5, -3088(%rbp) ## 8-byte Spill
divsd %xmm2, %xmm11
movapd -2064(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm15
mulsd %xmm8, %xmm15
subsd %xmm7, %xmm15
movapd %xmm15, %xmm5
divsd %xmm2, %xmm5
movapd %xmm5, -1168(%rbp) ## 16-byte Spill
mulsd %xmm11, %xmm1
movapd -4912(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm5, %xmm14
addsd %xmm1, %xmm14
movapd %xmm3, %xmm1
mulsd %xmm0, %xmm1
movapd -3664(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm14, %xmm0
addsd %xmm1, %xmm0
subsd %xmm4, %xmm0
movsd -3360(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
mulsd %xmm9, %xmm2
movsd -4736(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
movsd -2288(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
movsd %xmm2, -2288(%rbp) ## 8-byte Spill
movsd LCPI19_37(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
movsd -3776(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
movsd %xmm0, -3776(%rbp) ## 8-byte Spill
movsd -936(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm0, %xmm4
movsd LCPI19_43(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
addsd %xmm1, %xmm4
movsd %xmm4, -936(%rbp) ## 8-byte Spill
movapd -672(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm12
movapd %xmm0, %xmm2
mulsd %xmm3, %xmm12
movapd %xmm8, %xmm5
movapd %xmm8, %xmm9
movapd -2176(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm5
movapd %xmm3, %xmm0
subsd %xmm5, %xmm12
movapd -5664(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm5
mulsd -3792(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -3392(%rbp), %xmm3 ## 16-byte Reload
divsd %xmm3, %xmm4
movapd %xmm4, -5664(%rbp) ## 16-byte Spill
movapd %xmm4, %xmm6
divsd %xmm3, %xmm6
mulsd %xmm12, %xmm6
movsd %xmm6, -3664(%rbp) ## 8-byte Spill
divsd %xmm3, %xmm12
movapd %xmm8, %xmm7
movapd -2608(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm7
subsd %xmm2, %xmm7
movapd %xmm7, %xmm4
divsd %xmm3, %xmm4
movsd %xmm4, -6256(%rbp) ## 8-byte Spill
mulsd %xmm12, %xmm5
movapd -3424(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm4, %xmm13
addsd %xmm5, %xmm13
movapd -64(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm5
mulsd %xmm8, %xmm5
movapd -3680(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm13, %xmm3
addsd %xmm5, %xmm3
subsd %xmm0, %xmm3
movsd -1664(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm3, %xmm4
mulsd LCPI19_1(%rip), %xmm4
movsd -1680(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm4, %xmm5
movsd -1792(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm3, %xmm0
movsd %xmm0, -1792(%rbp) ## 8-byte Spill
mulsd LCPI19_37(%rip), %xmm5
movsd -3808(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
movsd %xmm1, -3808(%rbp) ## 8-byte Spill
movsd -1144(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd LCPI19_43(%rip), %xmm0
addsd %xmm5, %xmm0
movapd %xmm0, %xmm6
movsd %xmm0, -1144(%rbp) ## 8-byte Spill
movsd LCPI19_50(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm2
movsd %xmm2, -648(%rbp) ## 8-byte Spill
movapd -992(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm2
movapd -8128(%rbp), %xmm3 ## 16-byte Reload
subsd %xmm2, %xmm3
movapd %xmm3, -2256(%rbp) ## 16-byte Spill
movsd -936(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm0, %xmm2
movapd %xmm2, -4128(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm9
movsd %xmm9, -3408(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm9
movapd %xmm4, %xmm1
addsd -8112(%rbp), %xmm9 ## 16-byte Folded Reload
movapd %xmm9, -2496(%rbp) ## 16-byte Spill
movsd -1296(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm9
addsd %xmm2, %xmm9
movapd %xmm9, -8768(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm4
movapd %xmm3, %xmm5
mulsd -960(%rbp), %xmm5 ## 8-byte Folded Reload
subsd %xmm5, %xmm4
movapd %xmm4, -4736(%rbp) ## 16-byte Spill
movsd -8864(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -5952(%rbp), %xmm5 ## 16-byte Folded Reload
movsd -6496(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -5936(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm5, %xmm3
movapd -8080(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
mulsd -2512(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm3, %xmm5
movapd %xmm1, %xmm4
movapd %xmm1, %xmm3
mulsd -11936(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm3, %xmm5
movapd %xmm0, %xmm3
mulsd -96(%rbp), %xmm3 ## 16-byte Folded Reload
movsd -3560(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -256(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm0
movapd -5168(%rbp), %xmm2 ## 16-byte Reload
addsd LCPI19_60(%rip), %xmm2
addsd %xmm3, %xmm0
movapd %xmm2, -5168(%rbp) ## 16-byte Spill
movapd -144(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm2
addsd %xmm0, %xmm2
movapd %xmm2, -4672(%rbp) ## 16-byte Spill
movapd %xmm9, %xmm0
mulsd %xmm2, %xmm0
movapd %xmm6, %xmm3
mulsd -9864(%rbp), %xmm3 ## 8-byte Folded Reload
subsd %xmm3, %xmm0
movapd %xmm10, %xmm1
mulsd %xmm10, %xmm5
mulsd %xmm10, %xmm0
addsd %xmm5, %xmm0
movapd -672(%rbp), %xmm3 ## 16-byte Reload
mulsd -5664(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd -2016(%rbp), %xmm8 ## 8-byte Folded Reload
addsd %xmm3, %xmm8
movsd LCPI19_67(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm4, %xmm3
addsd %xmm3, %xmm8
movapd -832(%rbp), %xmm3 ## 16-byte Reload
movapd -5056(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm3
movapd -2064(%rbp), %xmm5 ## 16-byte Reload
movsd -1128(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm5
addsd %xmm3, %xmm5
addsd %xmm8, %xmm5
addsd %xmm0, %xmm5
mulsd -5696(%rbp), %xmm15 ## 16-byte Folded Reload
addsd -3088(%rbp), %xmm15 ## 8-byte Folded Reload
movapd %xmm14, %xmm2
mulsd %xmm4, %xmm2
movapd %xmm2, %xmm0
mulsd LCPI19_1(%rip), %xmm0
subsd %xmm15, %xmm0
movsd LCPI19_44(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm2
mulsd -1688(%rbp), %xmm2 ## 8-byte Folded Reload
movsd LCPI19_45(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm2
addsd %xmm0, %xmm2
movsd -1672(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm11
divsd -5680(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -4192(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm2, %xmm14
addsd %xmm11, %xmm14
movapd %xmm9, %xmm0
mulsd %xmm14, %xmm0
movapd %xmm14, -5008(%rbp) ## 16-byte Spill
movapd %xmm6, %xmm1
movsd -936(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -3360(%rbp) ## 16-byte Spill
movapd -2384(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm5
movapd -208(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm15
mulsd %xmm10, %xmm15
addsd %xmm4, %xmm15
movapd %xmm3, %xmm0
mulsd %xmm15, %xmm0
subsd %xmm0, %xmm5
movsd -2680(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd -4976(%rbp), %xmm3 ## 8-byte Folded Reload
movsd %xmm3, -2680(%rbp) ## 8-byte Spill
movapd -1168(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm8, %xmm0
movapd %xmm2, %xmm8
mulsd -5200(%rbp), %xmm8 ## 16-byte Folded Reload
addsd %xmm0, %xmm8
movapd %xmm8, -4912(%rbp) ## 16-byte Spill
movapd -96(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm4
mulsd %xmm14, %xmm4
mulsd %xmm8, %xmm6
subsd %xmm6, %xmm4
movsd %xmm4, -3680(%rbp) ## 8-byte Spill
movapd %xmm11, %xmm0
mulsd %xmm4, %xmm0
addsd %xmm3, %xmm0
movapd -64(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm3
mulsd %xmm10, %xmm3
subsd -5648(%rbp), %xmm3 ## 16-byte Folded Reload
movsd %xmm3, -1168(%rbp) ## 8-byte Spill
movapd %xmm11, %xmm1
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
movapd -256(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm0
mulsd %xmm8, %xmm0
movapd %xmm2, %xmm3
movapd %xmm2, %xmm14
mulsd %xmm9, %xmm3
addsd %xmm0, %xmm3
movsd %xmm3, -1672(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm1
movapd %xmm6, %xmm2
movapd %xmm6, %xmm0
mulsd -3360(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd %xmm6, %xmm0
mulsd %xmm15, %xmm0
subsd %xmm0, %xmm1
movsd %xmm1, -1680(%rbp) ## 8-byte Spill
movapd -2096(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
addsd %xmm5, %xmm0
mulsd -5744(%rbp), %xmm7 ## 16-byte Folded Reload
addsd -3664(%rbp), %xmm7 ## 8-byte Folded Reload
movsd -2016(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm13
movapd %xmm13, %xmm4
mulsd LCPI19_1(%rip), %xmm4
subsd %xmm7, %xmm4
mulsd LCPI19_44(%rip), %xmm13
mulsd -3376(%rbp), %xmm13 ## 8-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm13
addsd %xmm4, %xmm13
mulsd -3792(%rbp), %xmm12 ## 8-byte Folded Reload
divsd -5712(%rbp), %xmm13 ## 16-byte Folded Reload
movapd %xmm13, -3424(%rbp) ## 16-byte Spill
movapd -4272(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm13, %xmm3
addsd %xmm12, %xmm3
movapd %xmm10, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm3, %xmm8
movapd %xmm3, -3088(%rbp) ## 16-byte Spill
movapd -144(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm12
movsd -1144(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm12
addsd %xmm1, %xmm12
movapd -2368(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
mulsd %xmm12, %xmm1
movapd %xmm12, -3584(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm0
movapd %xmm11, %xmm9
mulsd -5664(%rbp), %xmm9 ## 16-byte Folded Reload
addsd %xmm6, %xmm9
movapd %xmm3, %xmm1
mulsd %xmm9, %xmm1
movsd %xmm9, -4992(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm0
movsd -648(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm3
mulsd %xmm10, %xmm3
movsd -3408(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm1
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm3
movsd %xmm3, -1128(%rbp) ## 8-byte Spill
movapd -8080(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm3, %xmm1
movapd %xmm2, %xmm4
movapd %xmm2, %xmm7
mulsd -4672(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm1
movapd -832(%rbp), %xmm4 ## 16-byte Reload
mulsd -5008(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm1, %xmm4
movapd -2064(%rbp), %xmm1 ## 16-byte Reload
mulsd -936(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm4, %xmm1
movapd -672(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm8, %xmm4
addsd %xmm1, %xmm4
movapd -2608(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm5, %xmm1
addsd %xmm4, %xmm1
movapd %xmm13, %xmm3
mulsd %xmm13, %xmm2
addsd -4224(%rbp), %xmm2 ## 8-byte Folded Reload
movsd %xmm2, -1688(%rbp) ## 8-byte Spill
movsd -8864(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm4
mulsd %xmm2, %xmm4
addsd %xmm1, %xmm4
movsd -7776(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
addsd %xmm4, %xmm5
movapd %xmm6, %xmm1
movapd %xmm6, %xmm8
mulsd %xmm13, %xmm1
movapd -4880(%rbp), %xmm3 ## 16-byte Reload
subsd %xmm1, %xmm3
movapd %xmm3, -2016(%rbp) ## 16-byte Spill
movsd -6496(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm4
mulsd %xmm3, %xmm4
addsd %xmm5, %xmm4
addsd LCPI19_106(%rip), %xmm0
movapd -624(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
movapd %xmm3, %xmm6
movapd %xmm4, -8736(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movapd -2256(%rbp), %xmm13 ## 16-byte Reload
mulsd -320(%rbp), %xmm13 ## 8-byte Folded Reload
movapd -2496(%rbp), %xmm5 ## 16-byte Reload
movapd -48(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm5
addsd -4128(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm3, %xmm4
movapd %xmm5, -7776(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm4
subsd %xmm4, %xmm13
movapd %xmm13, %xmm4
movsd LCPI19_25(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm4
subsd %xmm4, %xmm1
movapd -4720(%rbp), %xmm4 ## 16-byte Reload
movsd LCPI19_103(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm4
addsd %xmm1, %xmm4
movapd %xmm7, %xmm1
mulsd -6096(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm8, %xmm5
mulsd %xmm2, %xmm5
addsd %xmm1, %xmm5
movapd -6032(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm5, %xmm1
movapd %xmm1, -6032(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm5
mulsd %xmm6, %xmm5
movapd %xmm3, %xmm1
movapd %xmm5, -8752(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
addsd %xmm4, %xmm1
movapd %xmm7, %xmm4
mulsd %xmm2, %xmm4
movapd %xmm10, %xmm0
mulsd %xmm8, %xmm0
addsd %xmm4, %xmm0
movapd -6016(%rbp), %xmm11 ## 16-byte Reload
addsd %xmm0, %xmm11
movapd %xmm11, -6016(%rbp) ## 16-byte Spill
mulsd -72(%rbp), %xmm11 ## 8-byte Folded Reload
addsd %xmm1, %xmm11
movapd %xmm11, %xmm1
movsd LCPI19_15(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm1
movapd %xmm13, %xmm0
movsd LCPI19_22(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
subsd %xmm1, %xmm0
movapd %xmm0, -4976(%rbp) ## 16-byte Spill
movapd -1488(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
mulsd -2680(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -1520(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
movsd -3680(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm6
addsd %xmm1, %xmm6
movapd %xmm2, %xmm1
mulsd -1168(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm6, %xmm1
movapd %xmm0, %xmm2
mulsd -1672(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movapd -176(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
movapd -3360(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm2
movapd %xmm0, %xmm1
mulsd %xmm15, %xmm1
subsd %xmm1, %xmm2
movsd %xmm2, -1664(%rbp) ## 8-byte Spill
movapd -896(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm1
mulsd %xmm14, %xmm1
mulsd %xmm15, %xmm6
addsd %xmm1, %xmm6
movapd -2096(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm2, %xmm1
subsd %xmm6, %xmm1
movapd -1904(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
mulsd %xmm12, %xmm6
subsd %xmm6, %xmm1
movapd %xmm2, %xmm6
mulsd %xmm9, %xmm6
subsd %xmm6, %xmm1
movapd %xmm7, %xmm2
movapd %xmm7, %xmm6
mulsd -5952(%rbp), %xmm6 ## 16-byte Folded Reload
movapd %xmm8, %xmm7
movapd %xmm8, %xmm9
mulsd -5936(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm6, %xmm7
movapd -256(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm6
movsd -1688(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm6
addsd %xmm7, %xmm6
movapd -96(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
movapd -2016(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm0
addsd %xmm6, %xmm0
movapd %xmm0, -3392(%rbp) ## 16-byte Spill
addsd LCPI19_77(%rip), %xmm1
movsd -6544(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm0, %xmm6
addsd %xmm1, %xmm6
movapd -4720(%rbp), %xmm1 ## 16-byte Reload
movsd LCPI19_48(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
addsd %xmm6, %xmm1
movapd %xmm2, %xmm6
movsd -2512(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm6
movapd %xmm12, %xmm7
movsd -1128(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm7
addsd %xmm6, %xmm7
movapd -144(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm6
mulsd %xmm4, %xmm6
addsd %xmm7, %xmm6
addsd -14048(%rbp), %xmm6 ## 16-byte Folded Reload
movapd %xmm6, -3664(%rbp) ## 16-byte Spill
movsd LCPI19_62(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm6
subsd %xmm6, %xmm1
mulsd %xmm3, %xmm5
mulsd %xmm2, %xmm9
addsd %xmm5, %xmm9
mulsd %xmm8, %xmm0
addsd %xmm9, %xmm0
addsd -14080(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -5152(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm0, %xmm4
movapd %xmm0, %xmm5
movapd %xmm0, -4560(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm4
movapd %xmm13, %xmm6
movsd LCPI19_24(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm6
movapd -752(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
addsd %xmm6, %xmm1
movapd -864(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
mulsd -2680(%rbp), %xmm6 ## 8-byte Folded Reload
movapd -576(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm7
mulsd %xmm10, %xmm7
addsd %xmm6, %xmm7
movapd %xmm0, %xmm6
mulsd -1168(%rbp), %xmm6 ## 8-byte Folded Reload
addsd %xmm7, %xmm6
movapd %xmm2, %xmm0
mulsd -1672(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm6, %xmm0
movapd -736(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
movapd %xmm14, %xmm3
mulsd %xmm14, %xmm6
subsd %xmm6, %xmm0
movapd %xmm2, %xmm6
movapd %xmm2, %xmm7
mulsd %xmm15, %xmm6
subsd %xmm6, %xmm0
movapd -1888(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm3
mulsd %xmm2, %xmm15
addsd %xmm3, %xmm15
movapd -2096(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
subsd %xmm15, %xmm2
movapd -64(%rbp), %xmm3 ## 16-byte Reload
movsd -1680(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm3
movapd -176(%rbp), %xmm6 ## 16-byte Reload
movsd -1664(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm6
addsd %xmm3, %xmm6
movapd %xmm7, %xmm3
mulsd %xmm0, %xmm3
movapd %xmm0, %xmm15
movsd %xmm0, -1640(%rbp) ## 8-byte Spill
addsd %xmm6, %xmm3
movsd LCPI19_14(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm3
addsd %xmm2, %xmm3
movapd -1584(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
mulsd -3584(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm3
movapd %xmm0, %xmm2
mulsd -4992(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm3
addsd LCPI19_78(%rip), %xmm3
movsd -6528(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -3392(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm3, %xmm2
movsd LCPI19_47(%rip), %xmm8 ## xmm8 = mem[0],zero
movapd -4720(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm8, %xmm0
addsd %xmm2, %xmm0
movapd -3664(%rbp), %xmm2 ## 16-byte Reload
movsd LCPI19_63(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
addsd %xmm0, %xmm2
movapd -5136(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm5, %xmm3
addsd %xmm2, %xmm3
movapd -496(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm6
mulsd %xmm3, %xmm6
addsd %xmm1, %xmm6
movapd %xmm13, %xmm1
movsd LCPI19_23(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
movapd %xmm7, %xmm2
movapd %xmm4, -3376(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm1
movapd -752(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm3, -4720(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm2
addsd %xmm1, %xmm2
movsd LCPI19_108(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm11
movsd LCPI19_114(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm13
subsd %xmm13, %xmm11
movapd -4976(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm6, %xmm1
movsd LCPI19_13(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm6
addsd %xmm11, %xmm6
addsd %xmm2, %xmm6
mulsd %xmm0, %xmm2
addsd %xmm1, %xmm2
movsd LCPI19_20(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd -4736(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm0, %xmm4
movapd -1616(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm4
movapd -1088(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm4
movapd %xmm4, %xmm7
movapd %xmm6, -5472(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm3
movapd %xmm1, %xmm0
movapd %xmm2, -6224(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm3
movapd -2256(%rbp), %xmm0 ## 16-byte Reload
mulsd -976(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -2496(%rbp), %xmm2 ## 16-byte Reload
movsd -560(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
addsd -4128(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm2, -8864(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movapd -2144(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm5
mulsd %xmm3, %xmm1
movapd %xmm0, %xmm2
movsd LCPI19_30(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm2
subsd %xmm2, %xmm1
movapd -848(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm2
mulsd %xmm7, %xmm2
subsd %xmm1, %xmm2
movapd %xmm2, -3360(%rbp) ## 16-byte Spill
movapd %xmm4, %xmm1
movapd %xmm3, -8448(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
movsd LCPI19_111(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm1
movapd %xmm5, %xmm0
movapd %xmm7, -4736(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm0
addsd %xmm1, %xmm0
movapd %xmm0, -4976(%rbp) ## 16-byte Spill
movsd -3744(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3776(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI19_41(%rip), %xmm0
movsd -3344(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
divsd -3136(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -1424(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm1, %xmm13
movsd -4800(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2288(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI19_1(%rip), %xmm0
movapd -2864(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movsd LCPI19_110(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm1
subsd %xmm1, %xmm13
movapd -208(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
mulsd %xmm9, %xmm0
movapd -1520(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm1
mulsd %xmm12, %xmm1
addsd %xmm0, %xmm1
movapd -576(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm0
mulsd %xmm15, %xmm0
addsd %xmm1, %xmm0
addsd %xmm0, %xmm13
movapd %xmm0, %xmm1
mulsd LCPI19_1(%rip), %xmm1
subsd %xmm1, %xmm13
movapd %xmm13, -1424(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm13
movsd -1352(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
addsd -7648(%rbp), %xmm5 ## 8-byte Folded Reload
movsd -6256(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3792(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -3424(%rbp), %xmm1 ## 16-byte Reload
mulsd -768(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd -96(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm8
mulsd -3088(%rbp), %xmm8 ## 16-byte Folded Reload
movapd -144(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd %xmm1, -3424(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm8
movapd %xmm7, %xmm2
mulsd %xmm8, %xmm2
addsd %xmm5, %xmm2
movapd -64(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd -5664(%rbp), %xmm0 ## 16-byte Folded Reload
subsd -6512(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm7, %xmm4
movapd %xmm7, %xmm12
mulsd %xmm0, %xmm4
movapd %xmm0, %xmm7
addsd %xmm2, %xmm4
movapd -256(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm1, %xmm2
movapd %xmm3, %xmm1
mulsd -1144(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
addsd %xmm1, %xmm4
movapd %xmm6, %xmm2
movapd -3584(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm2
subsd %xmm2, %xmm4
movapd %xmm6, %xmm2
movsd -4992(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm15, %xmm2
subsd %xmm2, %xmm4
movapd -1488(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm2
mulsd %xmm5, %xmm2
movapd %xmm9, %xmm3
mulsd %xmm8, %xmm3
addsd %xmm2, %xmm3
movapd %xmm9, %xmm6
movapd %xmm9, %xmm2
mulsd %xmm0, %xmm6
movapd %xmm0, %xmm9
addsd %xmm3, %xmm6
mulsd %xmm1, %xmm11
addsd %xmm6, %xmm11
movapd -176(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm3
mulsd %xmm14, %xmm3
subsd %xmm3, %xmm11
movapd %xmm0, %xmm3
mulsd %xmm15, %xmm3
subsd %xmm3, %xmm11
movapd %xmm12, %xmm3
mulsd %xmm4, %xmm3
movapd %xmm2, %xmm6
mulsd %xmm11, %xmm6
addsd %xmm3, %xmm6
movapd -864(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
movsd %xmm5, -1352(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm3
movapd %xmm10, %xmm7
mulsd %xmm8, %xmm7
movapd %xmm8, %xmm12
movsd %xmm8, -2288(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm7
movapd %xmm10, %xmm5
mulsd %xmm9, %xmm5
movsd %xmm9, -3136(%rbp) ## 8-byte Spill
addsd %xmm7, %xmm5
movapd %xmm2, %xmm3
movapd %xmm1, -7648(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm3
addsd %xmm5, %xmm3
movapd -736(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm14, %xmm1
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm3
movapd %xmm15, %xmm1
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm3
movsd LCPI19_1(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm3, %xmm10
addsd %xmm6, %xmm10
movsd -3104(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -3808(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd LCPI19_42(%rip), %xmm6
movsd -2848(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm6, %xmm7
divsd -3824(%rbp), %xmm7 ## 8-byte Folded Reload
movsd -1392(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm7, %xmm14
movsd -3728(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -1792(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd %xmm15, %xmm6
movapd -3648(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm6, %xmm7
mulsd LCPI19_110(%rip), %xmm7
subsd %xmm7, %xmm14
subsd %xmm10, %xmm13
addsd %xmm10, %xmm14
mulsd %xmm15, %xmm10
subsd %xmm10, %xmm14
addsd %xmm13, %xmm14
movsd -6496(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -5952(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -6096(%rbp), %xmm5 ## 16-byte Reload
mulsd -5936(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm1, %xmm5
movsd -3560(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
movsd -2512(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm13, %xmm0
addsd %xmm5, %xmm0
movapd %xmm0, %xmm1
movapd -992(%rbp), %xmm0 ## 16-byte Reload
mulsd -9696(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd %xmm0, %xmm6
movapd -144(%rbp), %xmm1 ## 16-byte Reload
mulsd -14272(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -96(%rbp), %xmm5 ## 16-byte Reload
movapd -4672(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm5
subsd %xmm5, %xmm1
movsd LCPI19_50(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm6
mulsd %xmm5, %xmm1
addsd %xmm6, %xmm1
movapd -2176(%rbp), %xmm10 ## 16-byte Reload
movapd -5664(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm10, %xmm5
movapd -2608(%rbp), %xmm2 ## 16-byte Reload
movapd -6512(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm2, %xmm6
subsd %xmm6, %xmm5
movapd %xmm5, %xmm7
movapd -2160(%rbp), %xmm6 ## 16-byte Reload
movapd -5056(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm6, %xmm5
movapd -2064(%rbp), %xmm8 ## 16-byte Reload
movapd -5648(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm8, %xmm15
subsd %xmm15, %xmm5
addsd %xmm7, %xmm5
subsd %xmm5, %xmm1
movapd -2384(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm5
mulsd -3680(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm1, %xmm5
movapd %xmm7, %xmm1
mulsd -1168(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm5, %xmm1
movapd -1024(%rbp), %xmm5 ## 16-byte Reload
mulsd -1680(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm1, %xmm5
movapd -2368(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm1
mulsd %xmm12, %xmm1
addsd %xmm5, %xmm1
movapd %xmm7, %xmm5
mulsd %xmm9, %xmm5
addsd %xmm1, %xmm5
movapd -2080(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm4
movapd %xmm1, %xmm9
addsd %xmm5, %xmm4
movapd %xmm14, %xmm1
mulsd LCPI19_25(%rip), %xmm1
subsd %xmm1, %xmm4
movsd -648(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm15, %xmm0
mulsd -1128(%rbp), %xmm13 ## 8-byte Folded Reload
addsd %xmm0, %xmm13
movapd -5008(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm13
movapd %xmm8, %xmm5
movapd -4912(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm5
addsd %xmm13, %xmm5
movapd -3088(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm10, %xmm0
subsd %xmm0, %xmm5
movapd %xmm2, %xmm1
movapd -3424(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm1
addsd %xmm5, %xmm1
movapd -7664(%rbp), %xmm5 ## 16-byte Reload
addsd %xmm1, %xmm5
movsd -6496(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd -1688(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
addsd %xmm5, %xmm1
movapd -6096(%rbp), %xmm0 ## 16-byte Reload
movapd -2016(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm0
addsd %xmm1, %xmm0
movapd -624(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm1
movapd %xmm0, -6096(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm1
addsd %xmm4, %xmm1
movsd -3408(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -5936(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -256(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm6, %xmm4
addsd %xmm5, %xmm4
movapd %xmm15, %xmm5
mulsd -5952(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -96(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm7, %xmm0
mulsd %xmm7, %xmm6
addsd %xmm5, %xmm6
movsd %xmm6, -4672(%rbp) ## 8-byte Spill
subsd %xmm6, %xmm4
movapd -1520(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm4, %xmm5
movapd -176(%rbp), %xmm6 ## 16-byte Reload
movapd -3392(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm6
addsd %xmm5, %xmm6
movsd LCPI19_105(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm6, %xmm5
addsd %xmm1, %xmm5
movapd -576(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm4, %xmm7
movapd -736(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm0, %xmm10
addsd %xmm7, %xmm10
movsd LCPI19_104(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm10, %xmm7
addsd %xmm5, %xmm7
movsd -3472(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd %xmm15, %xmm5
mulsd LCPI19_103(%rip), %xmm5
addsd %xmm7, %xmm5
movsd -320(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd -2496(%rbp), %xmm7 ## 16-byte Folded Reload
movapd %xmm8, %xmm13
movapd %xmm2, %xmm0
addsd %xmm2, %xmm13
movsd %xmm13, -5056(%rbp) ## 8-byte Spill
movapd -2256(%rbp), %xmm1 ## 16-byte Reload
movapd -48(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm1
addsd %xmm13, %xmm1
movapd %xmm0, %xmm8
movapd %xmm1, -5664(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm8
addsd %xmm7, %xmm8
movapd %xmm8, %xmm7
mulsd LCPI19_25(%rip), %xmm7
addsd %xmm5, %xmm7
movsd -72(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -6032(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm7, %xmm1
movapd %xmm1, %xmm7
movapd -6016(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
movapd %xmm0, %xmm5
movapd %xmm1, -5648(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm5
movapd %xmm7, %xmm0
subsd %xmm5, %xmm0
movsd %xmm0, -72(%rbp) ## 8-byte Spill
mulsd LCPI19_108(%rip), %xmm0
movapd %xmm14, %xmm5
mulsd LCPI19_114(%rip), %xmm5
subsd %xmm5, %xmm0
movsd %xmm0, -320(%rbp) ## 8-byte Spill
movapd %xmm14, %xmm5
mulsd LCPI19_24(%rip), %xmm5
movsd LCPI19_112(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm8, %xmm7
addsd %xmm5, %xmm7
movapd -1888(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm5
movsd -3680(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
mulsd -1168(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm5, %xmm1
movapd -1024(%rbp), %xmm5 ## 16-byte Reload
mulsd -1640(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm1, %xmm5
movapd -1424(%rbp), %xmm1 ## 16-byte Reload
mulsd LCPI19_14(%rip), %xmm1
addsd %xmm5, %xmm1
movapd %xmm1, %xmm2
movapd -1584(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
movsd -2288(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm13, %xmm1
addsd %xmm2, %xmm1
movsd -3136(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm5
addsd %xmm1, %xmm5
mulsd %xmm9, %xmm3
addsd %xmm5, %xmm3
movsd -6528(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm4, %xmm9
addsd %xmm3, %xmm9
movapd %xmm6, %xmm1
movsd LCPI19_55(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm9
movsd LCPI19_54(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm10, %xmm1
addsd %xmm9, %xmm1
movapd %xmm15, %xmm3
movsd LCPI19_47(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm3
addsd %xmm1, %xmm3
movapd -5136(%rbp), %xmm5 ## 16-byte Reload
movapd -3664(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm5
addsd %xmm3, %xmm5
movapd -4560(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movsd LCPI19_63(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm5
movapd -496(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm5, %xmm3
addsd %xmm7, %xmm3
movapd %xmm0, %xmm7
movapd -896(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm7
movsd -1168(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm7, %xmm1
movapd %xmm1, %xmm7
movapd -1024(%rbp), %xmm1 ## 16-byte Reload
mulsd -1664(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm7, %xmm1
movapd -1904(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm13
addsd %xmm1, %xmm13
mulsd %xmm7, %xmm12
addsd %xmm13, %xmm12
mulsd -2080(%rbp), %xmm11 ## 16-byte Folded Reload
addsd %xmm12, %xmm11
mulsd -6544(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm11, %xmm4
mulsd LCPI19_57(%rip), %xmm6
addsd %xmm4, %xmm6
mulsd LCPI19_55(%rip), %xmm10
subsd %xmm10, %xmm6
movsd LCPI19_48(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm15
addsd %xmm6, %xmm15
movapd -5152(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm9, %xmm0
addsd %xmm15, %xmm0
movapd %xmm2, %xmm4
mulsd LCPI19_62(%rip), %xmm4
addsd %xmm0, %xmm4
movapd -752(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd %xmm4, %xmm0
addsd %xmm3, %xmm0
movsd -72(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd LCPI19_15(%rip), %xmm2
movapd %xmm14, %xmm1
mulsd LCPI19_22(%rip), %xmm1
subsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_13(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm0
addsd -320(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm14, %xmm2
movsd LCPI19_23(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm2
movapd %xmm8, %xmm3
mulsd %xmm7, %xmm3
subsd %xmm3, %xmm2
movapd %xmm6, %xmm3
movapd %xmm5, -6544(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm3
addsd %xmm2, %xmm3
movapd -496(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm4, -6528(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm3
addsd %xmm3, %xmm0
movapd %xmm8, %xmm4
mulsd LCPI19_114(%rip), %xmm4
addsd %xmm0, %xmm4
mulsd %xmm9, %xmm3
addsd %xmm1, %xmm3
mulsd LCPI19_31(%rip), %xmm8
addsd %xmm3, %xmm8
movapd -1616(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
mulsd %xmm4, %xmm3
movapd -1088(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd %xmm8, %xmm0
subsd %xmm0, %xmm3
movapd %xmm14, %xmm1
movsd LCPI19_111(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm1
movapd -848(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm0
mulsd %xmm3, %xmm0
subsd %xmm1, %xmm0
movapd %xmm14, %xmm1
mulsd LCPI19_20(%rip), %xmm1
movapd %xmm8, -6512(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm2
addsd %xmm1, %xmm2
movapd %xmm5, %xmm1
movapd %xmm4, -6496(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
addsd %xmm2, %xmm1
movapd -2496(%rbp), %xmm5 ## 16-byte Reload
movsd -960(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm5, %xmm7
movapd -2256(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm4
movsd -1296(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
movsd -5056(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
addsd %xmm6, %xmm4
movapd %xmm4, -3680(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
addsd %xmm7, %xmm2
mulsd LCPI19_115(%rip), %xmm2
addsd %xmm1, %xmm2
movapd -2144(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm1
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movsd -976(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm5, %xmm4
movapd %xmm9, %xmm1
movsd -560(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
addsd %xmm6, %xmm1
movapd %xmm1, -3648(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm7
addsd %xmm4, %xmm7
movapd %xmm7, %xmm12
mulsd %xmm11, %xmm12
addsd %xmm0, %xmm12
movapd %xmm14, %xmm0
movsd LCPI19_30(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
movapd %xmm8, %xmm1
movapd %xmm3, -5008(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
subsd %xmm0, %xmm1
movapd %xmm10, %xmm0
movapd %xmm2, -3088(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm4, %xmm7
movapd %xmm9, %xmm2
movapd -3360(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm0, %xmm7
movsd -128(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd %xmm15, %xmm10
mulsd %xmm5, %xmm10
addsd -4128(%rbp), %xmm10 ## 16-byte Folded Reload
movapd %xmm15, %xmm9
mulsd %xmm2, %xmm15
movapd %xmm6, %xmm0
addsd %xmm6, %xmm15
movapd -4976(%rbp), %xmm13 ## 16-byte Reload
testq %rax, %rax
je LBB19_64
## %bb.63:
movapd %xmm5, %xmm3
movapd -3184(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm4
mulsd %xmm1, %xmm4
movapd -2528(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm1
mulsd %xmm13, %xmm1
movsd -280(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm0
mulsd %xmm2, %xmm0
movapd %xmm9, %xmm2
mulsd %xmm10, %xmm2
subsd %xmm2, %xmm0
movapd %xmm3, %xmm2
movapd %xmm0, %xmm3
mulsd LCPI19_29(%rip), %xmm3
subsd %xmm3, %xmm1
subsd %xmm1, %xmm4
addsd %xmm4, %xmm4
movsd LCPI19_119(%rip), %xmm3 ## xmm3 = mem[0],zero
movapd %xmm14, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm8, %xmm6
mulsd %xmm12, %xmm6
subsd %xmm1, %xmm6
movapd %xmm11, %xmm1
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm6
movapd %xmm5, %xmm1
mulsd %xmm2, %xmm1
mulsd %xmm15, %xmm9
addsd %xmm1, %xmm9
movapd %xmm9, %xmm1
mulsd %xmm3, %xmm1
addsd %xmm6, %xmm1
addsd %xmm1, %xmm1
addsd %xmm4, %xmm1
movapd %xmm14, %xmm4
movsd LCPI19_29(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
movapd %xmm11, %xmm6
mulsd %xmm12, %xmm6
subsd %xmm4, %xmm6
movapd %xmm8, %xmm4
mulsd %xmm7, %xmm4
addsd %xmm6, %xmm4
mulsd %xmm2, %xmm9
addsd %xmm4, %xmm9
mulsd %xmm3, %xmm0
movapd %xmm8, %xmm2
mulsd %xmm13, %xmm2
subsd %xmm0, %xmm2
movapd %xmm11, %xmm0
mulsd -3360(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm2, %xmm0
movsd -1536(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm9, %xmm0
movsd -1072(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm2
movsd -2272(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
mulsd -880(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
mulsd %xmm4, %xmm2
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
mulsd %xmm3, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, 72(%rax)
LBB19_64:
movsd %xmm10, -7664(%rbp) ## 8-byte Spill
movsd %xmm15, -3472(%rbp) ## 8-byte Spill
movapd %xmm12, -3584(%rbp) ## 16-byte Spill
movapd %xmm7, -4992(%rbp) ## 16-byte Spill
movsd %xmm14, -1392(%rbp) ## 8-byte Spill
movapd -640(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm5
movapd -112(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm8
movapd -528(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm7
movapd -1472(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm1
movapd %xmm2, %xmm3
subsd %xmm1, %xmm0
movapd -6688(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm4
mulsd -5776(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -6608(%rbp), %xmm2 ## 16-byte Reload
divsd %xmm2, %xmm4
movapd %xmm4, -6688(%rbp) ## 16-byte Spill
movapd %xmm4, %xmm6
divsd %xmm2, %xmm6
mulsd %xmm0, %xmm6
movsd %xmm6, -960(%rbp) ## 8-byte Spill
movapd %xmm0, %xmm6
divsd %xmm2, %xmm6
movapd %xmm6, -1792(%rbp) ## 16-byte Spill
movapd %xmm7, %xmm15
movapd -1840(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm15
subsd %xmm5, %xmm15
movapd %xmm15, %xmm4
divsd %xmm2, %xmm4
movapd %xmm4, -1424(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
movapd -3888(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm4, %xmm11
addsd %xmm1, %xmm11
movapd %xmm8, %xmm6
movapd %xmm8, %xmm1
mulsd %xmm0, %xmm1
movapd -6624(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
addsd %xmm1, %xmm0
subsd %xmm3, %xmm0
movsd -5728(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
movsd -5760(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
movsd -456(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm0, %xmm2
movsd %xmm2, -456(%rbp) ## 8-byte Spill
movsd LCPI19_37(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
movsd -1704(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
movsd %xmm0, -1704(%rbp) ## 8-byte Spill
movsd -1408(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm0, %xmm9
movsd LCPI19_43(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm9
addsd %xmm1, %xmm9
movsd %xmm9, -1408(%rbp) ## 8-byte Spill
movapd -1232(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm10
movapd %xmm0, %xmm12
mulsd %xmm8, %xmm10
movapd %xmm7, %xmm5
movapd %xmm7, %xmm8
movapd -1456(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm5
movapd %xmm2, %xmm0
subsd %xmm5, %xmm10
movapd -5120(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm5
mulsd -1696(%rbp), %xmm13 ## 8-byte Folded Reload
movapd -3168(%rbp), %xmm2 ## 16-byte Reload
divsd %xmm2, %xmm13
movapd %xmm13, -5120(%rbp) ## 16-byte Spill
divsd %xmm2, %xmm13
mulsd %xmm10, %xmm13
divsd %xmm2, %xmm10
movapd -2624(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm7
subsd %xmm12, %xmm7
movapd %xmm7, %xmm4
divsd %xmm2, %xmm4
movapd %xmm4, -2864(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm5
movapd -2944(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm4, %xmm12
addsd %xmm5, %xmm12
movapd %xmm6, %xmm5
mulsd %xmm14, %xmm5
movapd -6592(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm12, %xmm4
addsd %xmm5, %xmm4
subsd %xmm0, %xmm4
movsd -4768(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm4, %xmm5
mulsd LCPI19_1(%rip), %xmm5
movsd -3760(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm5, %xmm2
mulsd LCPI19_37(%rip), %xmm2
movsd -1384(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
movsd %xmm0, -1384(%rbp) ## 8-byte Spill
movsd -4816(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
movsd %xmm1, -4816(%rbp) ## 8-byte Spill
movsd -1152(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd %xmm3, %xmm0
addsd %xmm2, %xmm0
movapd %xmm0, %xmm4
movsd %xmm0, -1152(%rbp) ## 8-byte Spill
movapd %xmm6, %xmm5
movsd LCPI19_50(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm5
movsd %xmm5, -72(%rbp) ## 8-byte Spill
movsd -1552(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm5
movapd -9520(%rbp), %xmm3 ## 16-byte Reload
subsd %xmm5, %xmm3
movsd %xmm3, -320(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm9
movapd %xmm9, -2288(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm8
movapd %xmm8, -976(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm8
addsd -8192(%rbp), %xmm8 ## 16-byte Folded Reload
movapd %xmm8, -992(%rbp) ## 16-byte Spill
movsd -1200(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm8
addsd %xmm9, %xmm8
movapd %xmm8, -5760(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm4
movapd %xmm3, %xmm5
mulsd -2800(%rbp), %xmm5 ## 8-byte Folded Reload
subsd %xmm5, %xmm4
movapd %xmm4, -3776(%rbp) ## 16-byte Spill
movsd -4432(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -5984(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -6640(%rbp), %xmm3 ## 16-byte Reload
mulsd -5968(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm5, %xmm3
movapd -6800(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
mulsd -784(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm3, %xmm5
movapd %xmm1, %xmm4
movapd %xmm1, %xmm3
mulsd -12064(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm3, %xmm5
movapd %xmm0, %xmm3
mulsd -272(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -6816(%rbp), %xmm0 ## 16-byte Reload
movapd -400(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd -6752(%rbp), %xmm9 ## 16-byte Reload
addsd LCPI19_60(%rip), %xmm9
addsd %xmm3, %xmm0
movapd %xmm9, -6752(%rbp) ## 16-byte Spill
movapd -592(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm9
addsd %xmm0, %xmm9
movapd %xmm9, -3760(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm0
mulsd %xmm9, %xmm0
movapd %xmm2, %xmm3
movapd %xmm2, %xmm9
mulsd -14352(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm3, %xmm0
movapd %xmm6, %xmm1
mulsd %xmm6, %xmm5
mulsd %xmm6, %xmm0
addsd %xmm5, %xmm0
movapd -1232(%rbp), %xmm3 ## 16-byte Reload
mulsd -5120(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd -536(%rbp), %xmm14 ## 8-byte Folded Reload
addsd %xmm3, %xmm14
movsd LCPI19_67(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm4, %xmm3
addsd %xmm3, %xmm14
movapd -640(%rbp), %xmm3 ## 16-byte Reload
movapd -6688(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm3
movapd -1840(%rbp), %xmm5 ## 16-byte Reload
movsd -3152(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm5
addsd %xmm3, %xmm5
addsd %xmm14, %xmm5
addsd %xmm0, %xmm5
mulsd -5856(%rbp), %xmm15 ## 16-byte Folded Reload
addsd -960(%rbp), %xmm15 ## 8-byte Folded Reload
mulsd %xmm4, %xmm11
movapd %xmm11, %xmm0
mulsd LCPI19_1(%rip), %xmm0
subsd %xmm15, %xmm0
movsd LCPI19_44(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm11
mulsd -4784(%rbp), %xmm11 ## 8-byte Folded Reload
movsd LCPI19_45(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm11
addsd %xmm0, %xmm11
movsd -5776(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd -1792(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm15, %xmm0
divsd -5840(%rbp), %xmm11 ## 16-byte Folded Reload
movapd -4208(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm11, %xmm1
addsd %xmm0, %xmm1
movapd -400(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm14
movapd %xmm1, -3744(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm1
movsd -1408(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, -3104(%rbp) ## 16-byte Spill
movapd -1856(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm5
movapd -528(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
movapd %xmm6, %xmm1
mulsd %xmm6, %xmm0
addsd %xmm4, %xmm0
movapd %xmm0, %xmm4
movapd %xmm0, -2848(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm5
movsd -2688(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd -7904(%rbp), %xmm2 ## 16-byte Folded Reload
movsd %xmm2, -2688(%rbp) ## 8-byte Spill
movapd -1424(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm15, %xmm0
movapd %xmm11, %xmm15
mulsd -5248(%rbp), %xmm15 ## 16-byte Folded Reload
addsd %xmm0, %xmm15
movapd -272(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm4
mulsd %xmm14, %xmm4
mulsd %xmm15, %xmm9
movapd %xmm15, -3888(%rbp) ## 16-byte Spill
subsd %xmm9, %xmm4
movsd %xmm4, -1424(%rbp) ## 8-byte Spill
movapd %xmm3, %xmm0
mulsd %xmm4, %xmm0
addsd %xmm2, %xmm0
movapd -112(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm2
mulsd %xmm1, %xmm2
subsd -5792(%rbp), %xmm2 ## 16-byte Folded Reload
movsd %xmm2, -1792(%rbp) ## 8-byte Spill
movapd %xmm3, %xmm1
movapd %xmm3, %xmm14
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movapd -400(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
mulsd %xmm15, %xmm0
movapd %xmm6, %xmm2
mulsd %xmm8, %xmm2
movapd %xmm8, %xmm15
addsd %xmm0, %xmm2
movapd %xmm2, -3136(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm1
movapd %xmm11, %xmm0
mulsd -3104(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd %xmm11, %xmm0
movapd -2848(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm0
subsd %xmm0, %xmm1
movapd %xmm1, -3824(%rbp) ## 16-byte Spill
movapd -1440(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
addsd %xmm5, %xmm0
mulsd -5824(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm13, %xmm7
movapd %xmm12, %xmm8
movsd -536(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm8
movapd %xmm8, %xmm4
mulsd LCPI19_1(%rip), %xmm4
subsd %xmm7, %xmm4
mulsd LCPI19_44(%rip), %xmm8
mulsd -4144(%rbp), %xmm8 ## 8-byte Folded Reload
mulsd LCPI19_45(%rip), %xmm8
addsd %xmm4, %xmm8
mulsd -1696(%rbp), %xmm10 ## 8-byte Folded Reload
divsd -5808(%rbp), %xmm8 ## 16-byte Folded Reload
movapd %xmm8, -2944(%rbp) ## 16-byte Spill
movapd -4288(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm8, %xmm4
addsd %xmm10, %xmm4
movapd %xmm3, %xmm2
movapd %xmm3, %xmm1
mulsd %xmm4, %xmm1
movapd %xmm4, %xmm8
movapd %xmm4, -3728(%rbp) ## 16-byte Spill
movapd -592(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm5
movsd -1152(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm5
addsd %xmm1, %xmm5
movapd %xmm5, -3152(%rbp) ## 16-byte Spill
movapd -2400(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm0
mulsd -5120(%rbp), %xmm14 ## 16-byte Folded Reload
addsd %xmm12, %xmm14
movapd %xmm14, -3168(%rbp) ## 16-byte Spill
movapd %xmm4, %xmm1
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm0
movsd -72(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm10, %xmm3
mulsd %xmm2, %xmm3
movapd -976(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm3
movsd %xmm3, -536(%rbp) ## 8-byte Spill
movapd -6800(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm3, %xmm1
movapd %xmm5, %xmm4
movapd %xmm5, %xmm6
mulsd -3760(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm1
movapd -640(%rbp), %xmm4 ## 16-byte Reload
mulsd -3744(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm1, %xmm4
movapd -1840(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm15, %xmm1
addsd %xmm4, %xmm1
movapd -1232(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm8, %xmm4
addsd %xmm1, %xmm4
movapd -2624(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm7, %xmm1
addsd %xmm4, %xmm1
movapd %xmm5, %xmm2
movapd %xmm11, %xmm3
mulsd %xmm11, %xmm2
addsd -2720(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm2, -2512(%rbp) ## 16-byte Spill
movsd -4432(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd %xmm15, %xmm4
mulsd %xmm2, %xmm4
addsd %xmm1, %xmm4
movapd -7888(%rbp), %xmm5 ## 16-byte Reload
addsd %xmm4, %xmm5
movapd %xmm10, %xmm1
movapd %xmm10, %xmm13
mulsd %xmm11, %xmm1
movapd -5184(%rbp), %xmm2 ## 16-byte Reload
subsd %xmm1, %xmm2
movapd %xmm2, -960(%rbp) ## 16-byte Spill
movapd -6640(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm4
mulsd %xmm2, %xmm4
addsd %xmm5, %xmm4
addsd LCPI19_46(%rip), %xmm0
movapd -688(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm8
movapd %xmm4, -3344(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movsd -320(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd -480(%rbp), %xmm12 ## 8-byte Folded Reload
movapd -992(%rbp), %xmm5 ## 16-byte Reload
movapd -192(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm5
addsd -2288(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm2, %xmm4
movapd %xmm2, %xmm10
movapd %xmm5, -5776(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm4
subsd %xmm4, %xmm12
movapd %xmm12, %xmm4
movsd LCPI19_25(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm4
addsd %xmm1, %xmm4
movapd -7872(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm1
movsd LCPI19_103(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm1
subsd %xmm1, %xmm4
movapd %xmm6, %xmm7
movapd %xmm6, %xmm1
mulsd -8160(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm13, %xmm6
movapd %xmm13, %xmm5
mulsd %xmm3, %xmm5
addsd %xmm1, %xmm5
movapd -6064(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm5, %xmm1
movapd %xmm1, -6064(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm1
movapd %xmm1, -5728(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
addsd %xmm4, %xmm1
movapd %xmm7, %xmm4
movapd %xmm7, %xmm5
mulsd %xmm3, %xmm4
movapd %xmm15, %xmm2
mulsd %xmm13, %xmm2
movapd %xmm13, %xmm0
addsd %xmm4, %xmm2
movapd -6048(%rbp), %xmm10 ## 16-byte Reload
addsd %xmm2, %xmm10
movapd %xmm10, -6048(%rbp) ## 16-byte Spill
mulsd -2336(%rbp), %xmm10 ## 8-byte Folded Reload
addsd %xmm1, %xmm10
movapd %xmm10, -4768(%rbp) ## 16-byte Spill
movapd %xmm10, %xmm2
movsd LCPI19_15(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
movapd %xmm12, %xmm1
movsd LCPI19_31(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm2
movapd %xmm2, -4144(%rbp) ## 16-byte Spill
movapd -912(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm1
mulsd -2688(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -1248(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
movsd -1424(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm6
addsd %xmm1, %xmm6
movapd %xmm2, %xmm1
movsd -1792(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd %xmm11, %xmm1
addsd %xmm6, %xmm1
movapd %xmm7, %xmm3
mulsd -3136(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm1, %xmm3
movapd -608(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd -3104(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm13, %xmm1
subsd %xmm1, %xmm3
movapd %xmm2, %xmm1
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm3
movapd %xmm3, -3792(%rbp) ## 16-byte Spill
movapd -1920(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm1
mulsd %xmm13, %xmm1
mulsd %xmm9, %xmm6
addsd %xmm1, %xmm6
movapd -1440(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm3, %xmm1
subsd %xmm6, %xmm1
movapd -2432(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
mulsd -3152(%rbp), %xmm6 ## 16-byte Folded Reload
subsd %xmm6, %xmm1
movapd %xmm2, %xmm6
mulsd -3168(%rbp), %xmm6 ## 16-byte Folded Reload
subsd %xmm6, %xmm1
movapd %xmm5, %xmm6
mulsd -5984(%rbp), %xmm6 ## 16-byte Folded Reload
movapd %xmm0, %xmm7
movapd %xmm0, %xmm3
mulsd -5968(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm6, %xmm7
movapd -400(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
movapd -2512(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm6
addsd %xmm7, %xmm6
movapd -272(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm4
movapd -960(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm4
addsd %xmm6, %xmm4
movapd %xmm4, -3808(%rbp) ## 16-byte Spill
addsd LCPI19_77(%rip), %xmm1
movapd -7952(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm4, %xmm6
addsd %xmm1, %xmm6
movapd %xmm14, %xmm1
movsd LCPI19_48(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
addsd %xmm6, %xmm1
movapd %xmm5, %xmm6
movsd -784(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm6
movapd %xmm2, %xmm7
movsd -536(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm7
addsd %xmm6, %xmm7
movapd -592(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm5
mulsd %xmm0, %xmm5
addsd %xmm7, %xmm5
addsd -14192(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm5, -1168(%rbp) ## 16-byte Spill
movsd LCPI19_62(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm5
addsd %xmm1, %xmm5
movapd %xmm8, %xmm1
mulsd %xmm2, %xmm1
mulsd %xmm4, %xmm3
addsd %xmm1, %xmm3
movapd %xmm6, %xmm8
mulsd %xmm15, %xmm8
addsd %xmm3, %xmm8
addsd -14176(%rbp), %xmm8 ## 16-byte Folded Reload
movapd -6864(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm8, %xmm4
movapd %xmm8, -4800(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm4
movapd %xmm12, %xmm6
movsd LCPI19_24(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm6
movapd -512(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm1
movapd %xmm7, %xmm15
mulsd %xmm4, %xmm1
addsd %xmm6, %xmm1
movapd -1504(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
mulsd -2688(%rbp), %xmm6 ## 8-byte Folded Reload
movapd -1264(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm7
mulsd %xmm10, %xmm7
addsd %xmm6, %xmm7
movapd %xmm0, %xmm6
mulsd %xmm11, %xmm6
addsd %xmm7, %xmm6
movapd %xmm2, %xmm7
mulsd -3136(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm6, %xmm7
movapd -720(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm6
mulsd %xmm13, %xmm6
subsd %xmm6, %xmm7
movapd %xmm0, %xmm6
movapd %xmm0, %xmm5
movapd %xmm9, %xmm2
mulsd %xmm9, %xmm6
subsd %xmm6, %xmm7
movapd -1600(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm13
mulsd %xmm0, %xmm2
addsd %xmm13, %xmm2
movapd %xmm2, %xmm0
movapd -1440(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm7, %xmm2
subsd %xmm0, %xmm2
movapd -112(%rbp), %xmm3 ## 16-byte Reload
movapd -3824(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm3
movapd -608(%rbp), %xmm6 ## 16-byte Reload
movapd -3792(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm6
addsd %xmm3, %xmm6
movapd %xmm5, %xmm3
mulsd %xmm7, %xmm3
movapd %xmm7, -4784(%rbp) ## 16-byte Spill
addsd %xmm6, %xmm3
movsd LCPI19_14(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm3
addsd %xmm2, %xmm3
movapd -2128(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
mulsd -3152(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm3
movapd %xmm0, %xmm2
mulsd -3168(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm2, %xmm3
addsd LCPI19_78(%rip), %xmm3
movapd -7936(%rbp), %xmm2 ## 16-byte Reload
mulsd -3808(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm3, %xmm2
movsd LCPI19_47(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm14
addsd %xmm2, %xmm14
movapd -1168(%rbp), %xmm2 ## 16-byte Reload
movsd LCPI19_63(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm14
movapd -6880(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm8, %xmm3
addsd %xmm14, %xmm3
movapd -448(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
mulsd %xmm3, %xmm6
addsd %xmm1, %xmm6
movapd %xmm12, %xmm1
movsd LCPI19_23(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm1
movapd %xmm4, -2848(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm1
movapd %xmm15, %xmm2
movapd %xmm3, -6624(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm2
addsd %xmm1, %xmm2
movsd LCPI19_108(%rip), %xmm1 ## xmm1 = mem[0],zero
movapd -4768(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movsd LCPI19_21(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm12
subsd %xmm0, %xmm12
movapd -4144(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm6, %xmm0
movsd LCPI19_13(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm6
addsd %xmm12, %xmm6
addsd %xmm2, %xmm6
mulsd %xmm1, %xmm2
addsd %xmm0, %xmm2
movsd LCPI19_20(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd -3776(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm0, %xmm3
movapd -1312(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm3
movapd -1328(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm3
movapd %xmm3, %xmm8
movapd %xmm6, -6608(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm4
movapd %xmm1, %xmm0
movapd %xmm2, -6592(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm4
movsd -320(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1280(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -992(%rbp), %xmm2 ## 16-byte Reload
movsd -704(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
addsd -2288(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm2, -4768(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movapd -1936(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
movapd %xmm2, %xmm5
mulsd %xmm4, %xmm1
movapd %xmm0, %xmm2
movsd LCPI19_30(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm1
movapd -1216(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm3
mulsd %xmm8, %xmm3
subsd %xmm1, %xmm3
movapd %xmm3, -4144(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm1
movapd %xmm4, -7904(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
movsd LCPI19_111(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm0
subsd %xmm0, %xmm1
movapd %xmm5, %xmm0
movapd %xmm8, -3776(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm0
addsd %xmm1, %xmm0
movapd %xmm0, -3104(%rbp) ## 16-byte Spill
movsd -5104(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1704(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI19_41(%rip), %xmm0
movsd -4752(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
divsd -4832(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -2896(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm1, %xmm15
movsd -5072(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -456(%rbp), %xmm0 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movapd -4112(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movsd LCPI19_110(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm1
subsd %xmm1, %xmm15
movapd -528(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd %xmm10, %xmm0
movapd -1248(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm1
mulsd %xmm9, %xmm1
addsd %xmm0, %xmm1
movapd -1264(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
addsd %xmm1, %xmm0
addsd %xmm0, %xmm15
movapd %xmm0, %xmm1
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm15
movapd %xmm15, %xmm13
subsd %xmm0, %xmm13
movsd -2672(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
addsd -7792(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -2864(%rbp), %xmm0 ## 16-byte Reload
mulsd -1696(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -2944(%rbp), %xmm1 ## 16-byte Reload
mulsd -776(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd -272(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm10
mulsd -3728(%rbp), %xmm10 ## 16-byte Folded Reload
movapd -592(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd %xmm1, -2944(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm10
movapd %xmm6, %xmm7
movapd %xmm6, %xmm2
mulsd %xmm10, %xmm2
addsd %xmm5, %xmm2
movapd -112(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm11
mulsd -5120(%rbp), %xmm11 ## 16-byte Folded Reload
subsd -7920(%rbp), %xmm11 ## 16-byte Folded Reload
movapd %xmm7, %xmm8
movapd %xmm7, %xmm14
mulsd %xmm11, %xmm8
addsd %xmm2, %xmm8
movapd -400(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm1, %xmm2
movapd %xmm3, %xmm1
mulsd -1152(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
addsd %xmm1, %xmm8
movapd %xmm6, %xmm2
movapd -3152(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm2
subsd %xmm2, %xmm8
movapd %xmm6, %xmm2
movapd -3168(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm8
movapd -912(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm2
mulsd %xmm5, %xmm2
movapd %xmm12, %xmm7
movapd %xmm12, %xmm3
mulsd %xmm10, %xmm3
movapd %xmm10, %xmm12
movsd %xmm10, -2896(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm3
movapd %xmm7, %xmm6
movsd %xmm11, -456(%rbp) ## 8-byte Spill
mulsd %xmm11, %xmm6
addsd %xmm3, %xmm6
movapd %xmm4, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm6, %xmm2
movapd -608(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm3
mulsd %xmm9, %xmm3
subsd %xmm3, %xmm2
movapd %xmm4, %xmm3
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm2
movapd %xmm14, %xmm3
mulsd %xmm8, %xmm3
movapd %xmm7, %xmm14
mulsd %xmm2, %xmm14
addsd %xmm3, %xmm14
movapd -1504(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm3
movsd %xmm5, -2672(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm3
movapd -1264(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm7
mulsd %xmm10, %xmm7
addsd %xmm3, %xmm7
movapd %xmm6, %xmm5
mulsd %xmm11, %xmm5
addsd %xmm7, %xmm5
movapd %xmm4, %xmm3
movapd %xmm1, -4832(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm3
addsd %xmm5, %xmm3
movapd -720(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm9
subsd %xmm9, %xmm3
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm3
movapd %xmm6, %xmm5
mulsd %xmm3, %xmm5
addsd %xmm14, %xmm5
movsd -1712(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -4816(%rbp), %xmm6 ## 8-byte Folded Reload
mulsd LCPI19_42(%rip), %xmm6
movsd -4096(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm6, %xmm7
divsd -4848(%rbp), %xmm7 ## 8-byte Folded Reload
movsd -1400(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
mulsd %xmm7, %xmm14
movsd -5088(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -1384(%rbp), %xmm6 ## 8-byte Folded Reload
movsd LCPI19_1(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm6
movapd -3696(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm6, %xmm7
mulsd LCPI19_110(%rip), %xmm7
subsd %xmm7, %xmm14
subsd %xmm5, %xmm13
addsd %xmm5, %xmm14
mulsd %xmm0, %xmm5
subsd %xmm5, %xmm14
addsd %xmm13, %xmm14
movapd -6640(%rbp), %xmm1 ## 16-byte Reload
mulsd -5984(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -8160(%rbp), %xmm5 ## 16-byte Reload
mulsd -5968(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm1, %xmm5
movapd -6816(%rbp), %xmm4 ## 16-byte Reload
movsd -784(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
addsd %xmm5, %xmm1
movapd %xmm1, %xmm5
movsd -1552(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -9704(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm5, %xmm1
movapd %xmm1, %xmm6
movapd -592(%rbp), %xmm1 ## 16-byte Reload
mulsd -14288(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -272(%rbp), %xmm5 ## 16-byte Reload
movapd -3760(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm5
subsd %xmm5, %xmm1
movsd LCPI19_50(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm6
mulsd %xmm5, %xmm1
addsd %xmm6, %xmm1
movapd -1456(%rbp), %xmm9 ## 16-byte Reload
movapd -5120(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm9, %xmm5
movapd -2624(%rbp), %xmm11 ## 16-byte Reload
movapd -7920(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm11, %xmm6
subsd %xmm6, %xmm5
movapd %xmm5, %xmm7
movapd -1472(%rbp), %xmm6 ## 16-byte Reload
movapd -6688(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm6, %xmm5
movapd -1840(%rbp), %xmm10 ## 16-byte Reload
movapd -5792(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm10, %xmm12
subsd %xmm12, %xmm5
addsd %xmm7, %xmm5
subsd %xmm5, %xmm1
movapd -1856(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm5
mulsd -1424(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm1, %xmm5
movapd %xmm7, %xmm1
mulsd -1792(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm5, %xmm1
movapd -1184(%rbp), %xmm5 ## 16-byte Reload
mulsd -3824(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm1, %xmm5
movapd -2400(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm1
mulsd -2896(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm5, %xmm1
movapd %xmm7, %xmm5
mulsd -456(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm1, %xmm5
mulsd -1872(%rbp), %xmm8 ## 8-byte Folded Reload
addsd %xmm5, %xmm8
movapd %xmm14, %xmm1
mulsd LCPI19_25(%rip), %xmm1
addsd %xmm8, %xmm1
movapd %xmm0, %xmm5
movsd -72(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd %xmm12, %xmm5
mulsd -536(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm5, %xmm4
movapd -3744(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm6, %xmm5
subsd %xmm5, %xmm4
movapd %xmm10, %xmm5
mulsd -3888(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm4, %xmm5
movapd -3728(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm9, %xmm4
subsd %xmm4, %xmm5
movapd %xmm11, %xmm4
movapd -2944(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm4
addsd %xmm5, %xmm4
movapd -7808(%rbp), %xmm5 ## 16-byte Reload
addsd %xmm4, %xmm5
movapd -6640(%rbp), %xmm4 ## 16-byte Reload
movapd -2512(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm4
addsd %xmm5, %xmm4
movapd -8160(%rbp), %xmm6 ## 16-byte Reload
movapd -960(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm6
addsd %xmm4, %xmm6
movapd -688(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm5
movapd %xmm6, -8160(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm5
addsd %xmm1, %xmm5
movapd -976(%rbp), %xmm1 ## 16-byte Reload
mulsd -5968(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -400(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm7, %xmm4
addsd %xmm1, %xmm4
movapd %xmm12, %xmm1
mulsd -5984(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -272(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm9, %xmm6
addsd %xmm1, %xmm6
movsd %xmm6, -1712(%rbp) ## 8-byte Spill
subsd %xmm6, %xmm4
movapd -1248(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
movapd -608(%rbp), %xmm6 ## 16-byte Reload
movapd -3808(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm6
addsd %xmm1, %xmm6
movsd LCPI19_58(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm6, %xmm7
addsd %xmm5, %xmm7
movapd -1264(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm4, %xmm5
movapd -720(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm9, %xmm1
mulsd %xmm9, %xmm10
addsd %xmm5, %xmm10
movsd LCPI19_56(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm10, %xmm5
addsd %xmm7, %xmm5
movsd -7824(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movsd LCPI19_103(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm11, %xmm1
subsd %xmm1, %xmm5
movsd -480(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd -992(%rbp), %xmm12 ## 16-byte Folded Reload
movapd -3888(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm8, %xmm0
movsd %xmm0, -784(%rbp) ## 8-byte Spill
movsd -320(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -192(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm1
addsd %xmm0, %xmm1
movapd %xmm7, %xmm8
movapd %xmm7, %xmm9
movsd %xmm1, -1384(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm8
addsd %xmm12, %xmm8
movapd %xmm8, %xmm7
mulsd LCPI19_25(%rip), %xmm7
subsd %xmm7, %xmm5
movsd -2336(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -6064(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm5, %xmm1
movapd -6048(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm13, %xmm5
movapd %xmm5, -4848(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm5
subsd %xmm5, %xmm1
movapd %xmm1, %xmm5
movapd %xmm1, %xmm9
mulsd LCPI19_108(%rip), %xmm5
movapd %xmm14, %xmm0
mulsd LCPI19_21(%rip), %xmm0
subsd %xmm5, %xmm0
movsd %xmm0, -480(%rbp) ## 8-byte Spill
movapd %xmm14, %xmm5
mulsd LCPI19_24(%rip), %xmm5
movsd LCPI19_112(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm8, %xmm7
addsd %xmm5, %xmm7
movapd -1600(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm5
mulsd -1424(%rbp), %xmm5 ## 8-byte Folded Reload
mulsd -1792(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm5, %xmm1
movapd -1184(%rbp), %xmm5 ## 16-byte Reload
mulsd -4784(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm1, %xmm5
mulsd LCPI19_14(%rip), %xmm15
addsd %xmm5, %xmm15
movapd -2128(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
movsd -2896(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm15, %xmm1
mulsd -456(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm1, %xmm5
mulsd -1872(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm5, %xmm3
movapd -7936(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm4, %xmm12
addsd %xmm3, %xmm12
movapd %xmm6, %xmm1
movsd LCPI19_55(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm12
movsd LCPI19_54(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm10, %xmm1
addsd %xmm12, %xmm1
movapd %xmm11, %xmm3
movsd LCPI19_47(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm3
addsd %xmm1, %xmm3
movapd -6880(%rbp), %xmm1 ## 16-byte Reload
movapd -1168(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm1
addsd %xmm3, %xmm1
movapd -4800(%rbp), %xmm15 ## 16-byte Reload
movsd LCPI19_63(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm15, %xmm13
addsd %xmm1, %xmm13
movapd -448(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm13, %xmm3
addsd %xmm7, %xmm3
movsd -1424(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd -1920(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm7
movsd -1792(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm5, %xmm1
addsd %xmm7, %xmm1
movapd %xmm1, %xmm7
movapd -1184(%rbp), %xmm1 ## 16-byte Reload
mulsd -3792(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm7, %xmm1
movapd -2432(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm0
addsd %xmm1, %xmm0
movsd -456(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm7, %xmm1
addsd %xmm0, %xmm1
mulsd -1872(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
mulsd -7952(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm2, %xmm4
mulsd LCPI19_57(%rip), %xmm6
addsd %xmm4, %xmm6
mulsd LCPI19_55(%rip), %xmm10
subsd %xmm10, %xmm6
movsd LCPI19_48(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm11
addsd %xmm6, %xmm11
movapd -6864(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm12, %xmm4
addsd %xmm11, %xmm4
movsd LCPI19_62(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm15, %xmm0
subsd %xmm0, %xmm4
movapd -512(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd %xmm4, %xmm0
addsd %xmm3, %xmm0
mulsd LCPI19_15(%rip), %xmm9
movapd %xmm14, %xmm1
movsd LCPI19_31(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm9
addsd %xmm0, %xmm9
movsd LCPI19_13(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm0
addsd -480(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm14, %xmm1
movsd LCPI19_23(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
movapd %xmm8, %xmm2
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm1
movapd %xmm6, %xmm2
movapd %xmm13, -2896(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm2
addsd %xmm1, %xmm2
movapd -448(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm4, -1424(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm2
addsd %xmm2, %xmm0
movsd LCPI19_114(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm8, %xmm3
addsd %xmm0, %xmm3
mulsd %xmm7, %xmm2
addsd %xmm9, %xmm2
mulsd %xmm5, %xmm8
addsd %xmm2, %xmm8
movapd -1312(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm4
mulsd %xmm3, %xmm4
movapd -1328(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd %xmm8, %xmm0
subsd %xmm0, %xmm4
movapd %xmm14, %xmm1
movsd LCPI19_111(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm1
movapd -1216(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm0
mulsd %xmm4, %xmm0
subsd %xmm1, %xmm0
movsd LCPI19_20(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm14, %xmm2
movapd %xmm6, %xmm1
movapd %xmm8, -3168(%rbp) ## 16-byte Spill
mulsd %xmm8, %xmm1
addsd %xmm2, %xmm1
movapd %xmm5, %xmm2
movapd %xmm3, -3152(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm2
addsd %xmm1, %xmm2
movapd -992(%rbp), %xmm6 ## 16-byte Reload
movsd -2800(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm6, %xmm5
movsd -320(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm1
movsd -1200(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
movsd -784(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
addsd %xmm12, %xmm1
movsd %xmm1, -456(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm3
addsd %xmm5, %xmm3
mulsd LCPI19_115(%rip), %xmm3
addsd %xmm2, %xmm3
movapd -1936(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm1
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm0
movsd -1280(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm6, %xmm2
movapd %xmm7, %xmm1
movapd %xmm7, %xmm10
movsd -704(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
addsd %xmm12, %xmm1
movsd %xmm1, -1792(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm7
addsd %xmm2, %xmm7
movapd %xmm7, %xmm13
mulsd %xmm11, %xmm13
movapd %xmm10, %xmm2
movapd -4144(%rbp), %xmm10 ## 16-byte Reload
addsd %xmm0, %xmm13
movapd %xmm14, %xmm0
movsd LCPI19_30(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm0
movapd %xmm8, %xmm1
movapd %xmm4, -5104(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
subsd %xmm0, %xmm1
movapd %xmm9, %xmm0
movapd %xmm3, -5120(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm5, %xmm7
movsd -128(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
addsd %xmm0, %xmm7
movapd %xmm5, %xmm11
mulsd %xmm6, %xmm11
addsd -2288(%rbp), %xmm11 ## 16-byte Folded Reload
movapd %xmm5, %xmm0
mulsd %xmm2, %xmm0
movapd %xmm12, %xmm1
addsd %xmm12, %xmm0
movapd %xmm0, %xmm15
testq %rax, %rax
je LBB19_66
## %bb.65:
movapd -2880(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm4
mulsd %xmm10, %xmm4
movapd -2784(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm1
mulsd -3104(%rbp), %xmm1 ## 16-byte Folded Reload
movsd -280(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movapd %xmm12, %xmm0
mulsd %xmm2, %xmm0
movapd %xmm5, %xmm2
mulsd %xmm11, %xmm2
subsd %xmm2, %xmm0
movapd %xmm0, %xmm3
mulsd LCPI19_29(%rip), %xmm3
subsd %xmm3, %xmm1
subsd %xmm1, %xmm4
addsd %xmm4, %xmm4
movsd LCPI19_119(%rip), %xmm3 ## xmm3 = mem[0],zero
movapd %xmm14, %xmm1
mulsd %xmm3, %xmm1
movapd %xmm6, %xmm2
movapd %xmm8, %xmm6
mulsd %xmm13, %xmm6
subsd %xmm1, %xmm6
movapd %xmm9, %xmm1
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm6
mulsd %xmm2, %xmm12
mulsd %xmm15, %xmm5
addsd %xmm12, %xmm5
movapd %xmm5, %xmm1
mulsd %xmm3, %xmm1
addsd %xmm6, %xmm1
addsd %xmm1, %xmm1
addsd %xmm4, %xmm1
movapd %xmm14, %xmm4
movsd LCPI19_29(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
movapd %xmm9, %xmm6
mulsd %xmm13, %xmm6
subsd %xmm4, %xmm6
movapd %xmm8, %xmm4
mulsd %xmm7, %xmm4
addsd %xmm6, %xmm4
mulsd %xmm2, %xmm5
addsd %xmm4, %xmm5
mulsd %xmm3, %xmm0
movapd %xmm8, %xmm2
mulsd -3104(%rbp), %xmm2 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movapd %xmm9, %xmm0
mulsd %xmm10, %xmm0
addsd %xmm2, %xmm0
movsd -1536(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm5, %xmm0
movsd -1072(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm2
movsd -2272(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
mulsd -880(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
mulsd %xmm4, %xmm2
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
mulsd %xmm3, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, 80(%rax)
LBB19_66:
movsd %xmm11, -280(%rbp) ## 8-byte Spill
movsd %xmm15, -6640(%rbp) ## 8-byte Spill
movapd %xmm13, -3696(%rbp) ## 16-byte Spill
movapd %xmm7, -5792(%rbp) ## 16-byte Spill
movsd %xmm14, -1400(%rbp) ## 8-byte Spill
movsd -5024(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
subsd -7312(%rbp), %xmm7 ## 8-byte Folded Reload
movapd -5856(%rbp), %xmm0 ## 16-byte Reload
subsd -13208(%rbp), %xmm0 ## 8-byte Folded Reload
subsd -13232(%rbp), %xmm0 ## 8-byte Folded Reload
divsd -5840(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -4208(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movapd -5248(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm0, %xmm3
subsd -13192(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, -5248(%rbp) ## 16-byte Spill
movapd -272(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm15
mulsd %xmm1, %xmm15
movapd %xmm1, %xmm2
movapd %xmm1, -4208(%rbp) ## 16-byte Spill
movapd -592(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm1
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm15
movapd -400(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd %xmm3, %xmm0
subsd -13184(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm5, %xmm1
mulsd %xmm2, %xmm1
movapd -12272(%rbp), %xmm4 ## 16-byte Reload
subsd %xmm1, %xmm4
movapd -528(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm1
mulsd %xmm15, %xmm1
addsd %xmm0, %xmm1
movapd -112(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm6
mulsd %xmm4, %xmm6
addsd %xmm1, %xmm6
movapd %xmm6, %xmm9
movapd %xmm6, -1552(%rbp) ## 16-byte Spill
movapd -1248(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm1
mulsd %xmm15, %xmm1
movapd -912(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
addsd %xmm1, %xmm2
movapd -608(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
movapd %xmm4, -12272(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm1
movapd %xmm1, -1280(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm3
mulsd %xmm9, %xmm3
movapd %xmm6, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm3, %xmm2
movapd -1264(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm1
mulsd %xmm15, %xmm1
movapd -1504(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm3
movsd %xmm0, -1704(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm3
addsd %xmm1, %xmm3
movapd -720(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm4, %xmm0
addsd %xmm3, %xmm0
movapd %xmm0, -2336(%rbp) ## 16-byte Spill
movapd %xmm11, %xmm1
mulsd %xmm0, %xmm1
addsd %xmm2, %xmm1
addsd %xmm1, %xmm7
movapd %xmm1, %xmm2
movsd LCPI19_1(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm7
movsd %xmm7, -5024(%rbp) ## 8-byte Spill
movapd %xmm7, %xmm2
subsd %xmm1, %xmm2
movapd -5824(%rbp), %xmm1 ## 16-byte Reload
subsd -13224(%rbp), %xmm1 ## 8-byte Folded Reload
subsd -13240(%rbp), %xmm1 ## 8-byte Folded Reload
divsd -5808(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -4288(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm1, %xmm3
movsd -776(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
subsd -13000(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm3, %xmm10
movapd %xmm3, %xmm4
movapd %xmm3, -4288(%rbp) ## 16-byte Spill
movapd %xmm12, %xmm3
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm10
movapd %xmm5, %xmm12
mulsd %xmm0, %xmm12
subsd -12992(%rbp), %xmm12 ## 8-byte Folded Reload
mulsd %xmm4, %xmm5
movapd -6448(%rbp), %xmm7 ## 16-byte Reload
subsd %xmm5, %xmm7
movapd %xmm8, %xmm4
mulsd %xmm10, %xmm4
addsd %xmm12, %xmm4
movapd %xmm13, %xmm3
mulsd %xmm7, %xmm3
addsd %xmm4, %xmm3
movapd %xmm6, %xmm4
mulsd %xmm10, %xmm4
movapd -912(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm12, %xmm5
addsd %xmm4, %xmm5
movapd -608(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm7, %xmm14
movapd %xmm7, -6448(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm14
movapd %xmm8, %xmm4
mulsd %xmm3, %xmm4
movapd %xmm6, %xmm5
mulsd %xmm14, %xmm5
addsd %xmm4, %xmm5
movapd %xmm11, %xmm4
mulsd %xmm10, %xmm4
movapd %xmm9, %xmm6
movsd %xmm12, -1696(%rbp) ## 8-byte Spill
mulsd %xmm12, %xmm6
addsd %xmm4, %xmm6
movapd -720(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm7, %xmm13
addsd %xmm6, %xmm13
movapd %xmm11, %xmm4
mulsd %xmm13, %xmm4
addsd %xmm5, %xmm4
movsd -7344(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
subsd -7320(%rbp), %xmm9 ## 8-byte Folded Reload
subsd %xmm4, %xmm2
addsd %xmm4, %xmm9
mulsd LCPI19_1(%rip), %xmm4
subsd %xmm4, %xmm9
addsd %xmm2, %xmm9
movsd -6672(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd -13200(%rbp), %xmm2 ## 8-byte Folded Reload
subsd -13248(%rbp), %xmm2 ## 8-byte Folded Reload
divsd -6656(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -2712(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm2, %xmm4
subsd -13216(%rbp), %xmm4 ## 8-byte Folded Reload
movsd %xmm4, -2712(%rbp) ## 8-byte Spill
movapd -192(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm4, %xmm7
movapd %xmm8, %xmm4
mulsd %xmm7, %xmm4
movsd -7744(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
subsd %xmm4, %xmm12
movapd -2048(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm4
mulsd %xmm7, %xmm4
movapd -7728(%rbp), %xmm6 ## 16-byte Reload
subsd %xmm4, %xmm6
movapd %xmm6, -7728(%rbp) ## 16-byte Spill
mulsd %xmm12, %xmm8
movsd %xmm12, -7744(%rbp) ## 8-byte Spill
mulsd %xmm6, %xmm5
addsd %xmm8, %xmm5
movapd -1104(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm4
mulsd %xmm7, %xmm4
movapd %xmm7, -480(%rbp) ## 16-byte Spill
movapd -9152(%rbp), %xmm1 ## 16-byte Reload
subsd %xmm4, %xmm1
movapd %xmm1, -9152(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm4
mulsd %xmm1, %xmm4
addsd %xmm5, %xmm4
movsd -7336(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
subsd -7328(%rbp), %xmm8 ## 8-byte Folded Reload
movapd %xmm9, %xmm6
subsd %xmm4, %xmm6
addsd %xmm4, %xmm8
mulsd LCPI19_1(%rip), %xmm4
subsd %xmm4, %xmm8
movapd -1856(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm15, %xmm4
movapd -1184(%rbp), %xmm5 ## 16-byte Reload
mulsd -1552(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm4, %xmm5
movapd -2400(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm10, %xmm4
addsd %xmm5, %xmm4
mulsd -1872(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm4, %xmm3
movapd %xmm9, %xmm4
movsd LCPI19_25(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm4
addsd %xmm3, %xmm4
movapd -1840(%rbp), %xmm5 ## 16-byte Reload
movapd -5248(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm5
movapd -1472(%rbp), %xmm3 ## 16-byte Reload
mulsd -4208(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm3, %xmm5
movapd -1456(%rbp), %xmm3 ## 16-byte Reload
mulsd -4288(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm3, %xmm5
movapd -2624(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
addsd %xmm5, %xmm2
movapd -688(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm2, -2624(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm5
addsd %xmm4, %xmm5
movapd %xmm1, %xmm2
movsd %xmm0, -776(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm2
movapd %xmm2, -2800(%rbp) ## 16-byte Spill
movapd -192(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm2, %xmm11
movapd %xmm11, %xmm3
mulsd LCPI19_25(%rip), %xmm3
subsd %xmm3, %xmm5
movapd -2544(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm5
movapd -2352(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm3
mulsd %xmm12, %xmm3
addsd %xmm5, %xmm3
movapd %xmm6, %xmm12
addsd %xmm8, %xmm12
movapd %xmm3, %xmm6
movsd LCPI19_11(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm6
movapd %xmm12, %xmm5
movsd LCPI19_21(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
addsd %xmm6, %xmm5
movapd %xmm9, %xmm6
movsd LCPI19_24(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm6
movsd LCPI19_112(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm11, %xmm7
addsd %xmm6, %xmm7
movapd -1600(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm15, %xmm6
movapd -1184(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm2
mulsd -2336(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm6, %xmm2
movsd LCPI19_14(%rip), %xmm6 ## xmm6 = mem[0],zero
movsd -5024(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
addsd %xmm2, %xmm0
movapd -2128(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm10, %xmm2
addsd %xmm0, %xmm2
movsd -1872(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm13
addsd %xmm2, %xmm13
movapd -448(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm13, %xmm2
addsd %xmm7, %xmm2
mulsd -1920(%rbp), %xmm15 ## 16-byte Folded Reload
mulsd -1280(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm15, %xmm1
mulsd -2432(%rbp), %xmm10 ## 16-byte Folded Reload
addsd %xmm1, %xmm10
mulsd %xmm0, %xmm14
addsd %xmm10, %xmm14
movapd -512(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
movapd %xmm0, %xmm6
mulsd %xmm14, %xmm1
addsd %xmm2, %xmm1
movapd -2912(%rbp), %xmm0 ## 16-byte Reload
movapd -480(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm0
subsd %xmm0, %xmm1
movapd %xmm4, %xmm0
mulsd -7728(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd LCPI19_15(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm3
movapd %xmm12, %xmm1
movsd LCPI19_31(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm3
addsd %xmm0, %xmm3
movsd LCPI19_13(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm7
addsd %xmm5, %xmm0
movsd LCPI19_23(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm9
mulsd %xmm1, %xmm11
subsd %xmm11, %xmm9
movapd %xmm6, %xmm1
movapd %xmm13, -4816(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm1
addsd %xmm9, %xmm1
movapd -448(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm14, -2912(%rbp) ## 16-byte Spill
mulsd %xmm14, %xmm2
subsd %xmm2, %xmm1
movapd -3248(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm15, %xmm2
subsd %xmm2, %xmm1
movapd -9152(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm4
addsd %xmm1, %xmm4
movsd LCPI19_64(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm8
addsd %xmm4, %xmm8
addsd %xmm8, %xmm0
movapd -2800(%rbp), %xmm10 ## 16-byte Reload
addsd -2712(%rbp), %xmm10 ## 8-byte Folded Reload
movapd %xmm10, -2544(%rbp) ## 16-byte Spill
movapd -192(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm4
mulsd %xmm10, %xmm4
movsd LCPI19_114(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm4, %xmm5
addsd %xmm0, %xmm5
mulsd %xmm7, %xmm8
addsd %xmm3, %xmm8
mulsd LCPI19_31(%rip), %xmm4
addsd %xmm8, %xmm4
movapd -1312(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm6
movapd %xmm0, %xmm3
mulsd %xmm5, %xmm6
movapd -1328(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm7
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm6
movapd %xmm12, %xmm2
movapd %xmm12, %xmm0
movsd LCPI19_111(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm12
movapd -1216(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm1
mulsd %xmm6, %xmm1
subsd %xmm0, %xmm1
movapd %xmm2, %xmm0
movapd %xmm2, %xmm14
movsd %xmm2, -1840(%rbp) ## 8-byte Spill
movsd LCPI19_115(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm0
movapd %xmm3, %xmm2
movapd %xmm4, -5088(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm0, %xmm2
movapd %xmm7, %xmm0
movapd %xmm5, -5072(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
addsd %xmm2, %xmm0
movsd -1200(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm10, %xmm2
mulsd %xmm8, %xmm2
addsd %xmm0, %xmm2
movapd -1936(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm1
movsd -704(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd %xmm8, %xmm13
mulsd %xmm10, %xmm13
movapd %xmm13, %xmm0
mulsd %xmm12, %xmm0
addsd %xmm1, %xmm0
movapd %xmm0, -2352(%rbp) ## 16-byte Spill
movapd %xmm14, %xmm0
movsd LCPI19_30(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
movapd %xmm4, %xmm1
movapd %xmm6, -5856(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
subsd %xmm0, %xmm1
movapd %xmm15, %xmm0
movapd %xmm2, -5840(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm3, %xmm13
addsd %xmm0, %xmm13
movapd -12272(%rbp), %xmm5 ## 16-byte Reload
movapd -1856(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm5, %xmm1
movapd -1440(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
movapd -1552(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm0
addsd %xmm1, %xmm0
movapd -6448(%rbp), %xmm12 ## 16-byte Reload
movapd -2400(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, %xmm6
movapd -4208(%rbp), %xmm0 ## 16-byte Reload
mulsd -640(%rbp), %xmm0 ## 16-byte Folded Reload
subsd -13280(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -4288(%rbp), %xmm2 ## 16-byte Reload
mulsd -1232(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
subsd -13272(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -688(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm2, -4288(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm1
addsd %xmm6, %xmm1
movapd %xmm9, %xmm3
movapd %xmm9, %xmm0
mulsd -9832(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, %xmm2
mulsd LCPI19_25(%rip), %xmm2
addsd %xmm1, %xmm2
addsd -13264(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -2576(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm1
mulsd -7744(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
movapd %xmm1, %xmm2
mulsd LCPI19_11(%rip), %xmm2
mulsd -5352(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, -192(%rbp) ## 16-byte Spill
movsd LCPI19_21(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm3, %xmm7
addsd %xmm2, %xmm7
movsd %xmm7, -480(%rbp) ## 8-byte Spill
movapd -1920(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm5, %xmm2
movapd %xmm4, %xmm3
movapd -1280(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm3
addsd %xmm2, %xmm3
movapd -2432(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm12, %xmm7
addsd %xmm3, %xmm7
movapd %xmm0, %xmm2
mulsd LCPI19_24(%rip), %xmm2
movapd -512(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm7, %xmm3
addsd %xmm2, %xmm3
mulsd -1600(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm4, %xmm2
movapd -2336(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm2
addsd %xmm5, %xmm2
movapd -112(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm14, %xmm5
movapd %xmm13, %xmm14
mulsd %xmm5, %xmm4
movapd -608(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm9, %xmm5
addsd %xmm4, %xmm5
movapd -720(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm15, %xmm4
addsd %xmm5, %xmm4
mulsd LCPI19_14(%rip), %xmm4
addsd %xmm2, %xmm4
mulsd -2128(%rbp), %xmm12 ## 16-byte Folded Reload
addsd %xmm4, %xmm12
movapd -448(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm4
mulsd %xmm12, %xmm4
addsd %xmm3, %xmm4
addsd -17312(%rbp), %xmm4 ## 16-byte Folded Reload
movapd %xmm6, %xmm2
movapd -7728(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm2
addsd %xmm4, %xmm2
movapd -192(%rbp), %xmm3 ## 16-byte Reload
mulsd LCPI19_31(%rip), %xmm3
mulsd LCPI19_15(%rip), %xmm1
subsd %xmm3, %xmm1
addsd %xmm2, %xmm1
movapd %xmm2, %xmm3
movsd LCPI19_13(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm3
addsd -480(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd LCPI19_23(%rip), %xmm0
movapd %xmm10, %xmm2
movapd %xmm7, -1856(%rbp) ## 16-byte Spill
mulsd %xmm7, %xmm2
subsd %xmm2, %xmm0
movapd -512(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm12, -6448(%rbp) ## 16-byte Spill
mulsd %xmm12, %xmm2
addsd %xmm0, %xmm2
addsd -13256(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm11, %xmm5
mulsd %xmm11, %xmm6
addsd %xmm2, %xmm6
movsd -7744(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -112(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm9, %xmm0
mulsd -3264(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm2, %xmm0
mulsd -2560(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm0, %xmm5
mulsd LCPI19_64(%rip), %xmm5
addsd %xmm6, %xmm5
movsd -5352(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
addsd %xmm5, %xmm3
mulsd %xmm4, %xmm5
addsd %xmm1, %xmm5
movapd -1312(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
mulsd %xmm3, %xmm1
movapd -1328(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm0
mulsd %xmm5, %xmm0
movapd %xmm5, %xmm6
subsd %xmm0, %xmm1
movapd -1216(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm5
mulsd %xmm1, %xmm5
mulsd %xmm12, %xmm8
movapd %xmm8, %xmm0
mulsd LCPI19_111(%rip), %xmm0
subsd %xmm0, %xmm5
movsd -1200(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
mulsd LCPI19_115(%rip), %xmm0
movapd %xmm0, %xmm4
movapd %xmm2, %xmm0
movapd %xmm6, -9152(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm0
movapd %xmm5, %xmm11
subsd %xmm4, %xmm0
movsd -1840(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
movapd %xmm3, -3760(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm10
addsd %xmm0, %xmm10
movapd -1936(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
mulsd %xmm10, %xmm0
subsd %xmm0, %xmm11
movapd %xmm2, %xmm0
movapd %xmm1, -5808(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
mulsd LCPI19_30(%rip), %xmm8
subsd %xmm8, %xmm0
movapd -2352(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm7, %xmm5
movapd %xmm10, -4112(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm5
addsd %xmm0, %xmm5
testq %rax, %rax
movsd -1536(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movsd -2272(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
je LBB19_68
## %bb.67:
movsd LCPI19_119(%rip), %xmm4 ## xmm4 = mem[0],zero
movapd %xmm15, %xmm0
mulsd %xmm4, %xmm0
movapd -2880(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm3
mulsd %xmm8, %xmm3
subsd %xmm0, %xmm3
movapd -2784(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
mulsd %xmm14, %xmm0
subsd %xmm0, %xmm3
movsd -128(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm2
mulsd -2544(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm2, %xmm0
mulsd %xmm4, %xmm0
addsd %xmm3, %xmm0
addsd %xmm0, %xmm0
movapd %xmm7, %xmm4
mulsd %xmm11, %xmm4
movapd %xmm6, %xmm3
mulsd %xmm12, %xmm3
movsd LCPI19_29(%rip), %xmm10 ## xmm10 = mem[0],zero
movapd %xmm3, %xmm6
mulsd %xmm10, %xmm6
subsd %xmm6, %xmm4
movapd %xmm1, %xmm6
mulsd %xmm5, %xmm6
addsd %xmm4, %xmm6
addsd %xmm6, %xmm6
subsd %xmm6, %xmm0
movapd %xmm15, %xmm4
mulsd %xmm10, %xmm4
movapd %xmm7, %xmm6
mulsd %xmm8, %xmm6
subsd %xmm4, %xmm6
movapd %xmm1, %xmm4
mulsd %xmm14, %xmm4
addsd %xmm6, %xmm4
mulsd %xmm10, %xmm2
addsd %xmm4, %xmm2
mulsd LCPI19_119(%rip), %xmm3
mulsd %xmm11, %xmm1
subsd %xmm3, %xmm1
movapd %xmm7, %xmm3
mulsd %xmm5, %xmm3
subsd %xmm3, %xmm1
movapd %xmm9, %xmm3
mulsd %xmm0, %xmm3
addsd %xmm2, %xmm1
movsd -1072(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm1, %xmm2
subsd %xmm2, %xmm3
mulsd %xmm13, %xmm0
mulsd -880(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
mulsd %xmm9, %xmm3
movsd LCPI19_1(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm3
mulsd %xmm13, %xmm1
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm3
movsd %xmm3, 88(%rax)
LBB19_68:
movapd %xmm5, -5024(%rbp) ## 16-byte Spill
movapd %xmm11, -6656(%rbp) ## 16-byte Spill
movapd %xmm14, -2864(%rbp) ## 16-byte Spill
movapd -13984(%rbp), %xmm9 ## 16-byte Reload
subsd -12904(%rbp), %xmm9 ## 8-byte Folded Reload
movapd -1440(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm0
mulsd %xmm9, %xmm0
subsd -16960(%rbp), %xmm0 ## 16-byte Folded Reload
subsd -16944(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -7120(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd -12896(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -688(%rbp), %xmm4 ## 16-byte Reload
movsd %xmm1, -7120(%rbp) ## 8-byte Spill
mulsd %xmm1, %xmm4
addsd %xmm0, %xmm4
movapd -17008(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm0
movsd LCPI19_25(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm4
subsd -12888(%rbp), %xmm4 ## 8-byte Folded Reload
subsd -12880(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -16992(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm13
movsd LCPI19_22(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm13
movapd %xmm4, %xmm0
movsd LCPI19_15(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm0
subsd %xmm0, %xmm13
movapd -10240(%rbp), %xmm0 ## 16-byte Reload
subsd -16912(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm12, %xmm3
mulsd %xmm0, %xmm3
movapd %xmm0, %xmm5
movapd %xmm0, -10240(%rbp) ## 16-byte Spill
subsd -12872(%rbp), %xmm3 ## 8-byte Folded Reload
subsd -16896(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -512(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm8
mulsd %xmm3, %xmm0
movapd %xmm10, %xmm1
movsd LCPI19_112(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movapd -11792(%rbp), %xmm14 ## 16-byte Reload
subsd -16976(%rbp), %xmm14 ## 16-byte Folded Reload
mulsd %xmm14, %xmm12
subsd -12864(%rbp), %xmm12 ## 8-byte Folded Reload
movapd -112(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm9, %xmm0
movapd -608(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm5, %xmm2
addsd %xmm0, %xmm2
movapd -720(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm14, %xmm5
addsd %xmm2, %xmm5
movsd LCPI19_14(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm5
addsd %xmm12, %xmm5
subsd -16880(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -448(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm6
movapd %xmm0, %xmm2
mulsd %xmm5, %xmm6
addsd %xmm1, %xmm6
subsd -16864(%rbp), %xmm6 ## 16-byte Folded Reload
subsd -12856(%rbp), %xmm6 ## 8-byte Folded Reload
movsd LCPI19_23(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm10
movapd %xmm2, %xmm0
movapd %xmm3, -2576(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
addsd %xmm10, %xmm0
movapd %xmm8, %xmm1
movapd %xmm5, -3248(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm1
subsd %xmm0, %xmm1
subsd -12824(%rbp), %xmm1 ## 8-byte Folded Reload
subsd -12816(%rbp), %xmm1 ## 8-byte Folded Reload
subsd -12832(%rbp), %xmm1 ## 8-byte Folded Reload
movsd LCPI19_11(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm4
movsd LCPI19_114(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm11
addsd %xmm4, %xmm11
subsd %xmm6, %xmm13
movsd LCPI19_13(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm6
addsd %xmm11, %xmm6
addsd %xmm1, %xmm6
mulsd %xmm15, %xmm1
subsd %xmm1, %xmm13
movapd -1328(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm5
mulsd %xmm13, %xmm0
movapd -1312(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm4
movapd %xmm1, %xmm7
mulsd %xmm6, %xmm4
addsd %xmm0, %xmm4
movapd -1216(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm0
mulsd %xmm4, %xmm0
movsd -12848(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm1
movsd LCPI19_111(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_115(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd -12840(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
movapd %xmm7, %xmm2
movapd %xmm13, -3744(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm2
subsd %xmm0, %xmm2
movapd %xmm5, %xmm0
movapd %xmm6, -3728(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm2
movapd -1936(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm11
mulsd %xmm2, %xmm11
addsd %xmm1, %xmm11
mulsd LCPI19_19(%rip), %xmm3
movapd %xmm0, %xmm1
movapd %xmm4, -4096(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm3
movapd %xmm2, -4752(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm8
addsd %xmm3, %xmm8
movsd -7272(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
subsd -12728(%rbp), %xmm3 ## 8-byte Folded Reload
movapd -528(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm1
mulsd %xmm9, %xmm1
movapd -1248(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm2
movapd -10240(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm2
addsd %xmm1, %xmm2
movapd -1264(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
mulsd %xmm14, %xmm1
addsd %xmm2, %xmm1
addsd %xmm1, %xmm3
movapd %xmm1, %xmm2
movsd LCPI19_1(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm2
subsd %xmm2, %xmm3
movapd %xmm3, %xmm5
subsd %xmm1, %xmm5
movapd -13936(%rbp), %xmm2 ## 16-byte Reload
subsd -12736(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -10160(%rbp), %xmm13 ## 16-byte Reload
subsd -16848(%rbp), %xmm13 ## 16-byte Folded Reload
movapd %xmm6, %xmm1
mulsd %xmm2, %xmm1
mulsd %xmm13, %xmm4
addsd %xmm1, %xmm4
movapd -10144(%rbp), %xmm6 ## 16-byte Reload
subsd -12256(%rbp), %xmm6 ## 16-byte Folded Reload
movapd %xmm0, %xmm1
mulsd %xmm6, %xmm1
addsd %xmm4, %xmm1
movsd -7280(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
subsd -12720(%rbp), %xmm4 ## 8-byte Folded Reload
subsd %xmm1, %xmm5
addsd %xmm1, %xmm4
mulsd %xmm7, %xmm1
subsd %xmm1, %xmm4
addsd %xmm5, %xmm4
movsd -4456(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd -12704(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm4, %xmm10
movsd -12712(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm1, %xmm10
subsd %xmm1, %xmm0
addsd -12696(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -1184(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
movapd %xmm9, -2400(%rbp) ## 16-byte Spill
mulsd %xmm9, %xmm1
movsd -1872(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm2
addsd %xmm1, %xmm2
movapd %xmm4, %xmm1
mulsd LCPI19_25(%rip), %xmm1
addsd %xmm2, %xmm1
subsd -4000(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm10
mulsd %xmm1, %xmm12
movapd %xmm10, %xmm2
mulsd LCPI19_114(%rip), %xmm2
subsd %xmm2, %xmm12
movapd %xmm5, %xmm2
movapd %xmm14, -11792(%rbp) ## 16-byte Spill
mulsd %xmm14, %xmm2
mulsd LCPI19_14(%rip), %xmm3
addsd %xmm2, %xmm3
mulsd %xmm7, %xmm6
addsd %xmm3, %xmm6
movapd %xmm4, %xmm2
mulsd LCPI19_112(%rip), %xmm2
movapd -448(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm3
mulsd %xmm6, %xmm3
subsd %xmm2, %xmm3
mulsd %xmm7, %xmm13
mulsd %xmm15, %xmm5
addsd %xmm5, %xmm13
movapd -512(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm2
mulsd %xmm13, %xmm2
addsd %xmm3, %xmm2
subsd -7616(%rbp), %xmm2 ## 16-byte Folded Reload
mulsd LCPI19_15(%rip), %xmm1
movapd %xmm10, %xmm3
mulsd LCPI19_22(%rip), %xmm3
addsd %xmm1, %xmm3
addsd %xmm2, %xmm3
movapd %xmm2, %xmm5
movsd LCPI19_13(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm5
addsd %xmm12, %xmm5
movapd %xmm8, %xmm12
mulsd LCPI19_23(%rip), %xmm4
movapd %xmm7, %xmm1
movapd %xmm6, -10144(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm1
addsd %xmm4, %xmm1
movapd %xmm9, %xmm2
movapd %xmm13, -10160(%rbp) ## 16-byte Spill
mulsd %xmm13, %xmm2
subsd %xmm2, %xmm1
movapd %xmm10, %xmm2
mulsd LCPI19_111(%rip), %xmm2
subsd -7632(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd LCPI19_64(%rip), %xmm0
addsd %xmm1, %xmm0
addsd %xmm0, %xmm5
movapd -1312(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm4
mulsd %xmm5, %xmm4
mulsd %xmm14, %xmm0
addsd %xmm3, %xmm0
movapd -1328(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm1
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm4
movapd -1216(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm13
mulsd %xmm4, %xmm13
subsd %xmm2, %xmm13
movapd %xmm10, %xmm1
mulsd LCPI19_115(%rip), %xmm1
movapd %xmm6, %xmm2
movsd %xmm0, -4456(%rbp) ## 8-byte Spill
mulsd %xmm0, %xmm2
subsd %xmm1, %xmm2
movapd %xmm7, %xmm15
movapd %xmm5, -5824(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm15
addsd %xmm2, %xmm15
movapd -1936(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
mulsd %xmm15, %xmm1
subsd %xmm1, %xmm13
movapd %xmm10, %xmm1
mulsd LCPI19_19(%rip), %xmm1
movapd %xmm4, -6688(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
addsd %xmm1, %xmm2
movapd %xmm3, %xmm0
movapd %xmm15, -6672(%rbp) ## 16-byte Spill
mulsd %xmm15, %xmm0
addsd %xmm2, %xmm0
testq %rax, %rax
movapd -2784(%rbp), %xmm9 ## 16-byte Reload
je LBB19_70
## %bb.69:
movsd LCPI19_17(%rip), %xmm2 ## xmm2 = mem[0],zero
movsd -12392(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm1
mulsd %xmm2, %xmm1
movapd %xmm9, %xmm3
mulsd %xmm11, %xmm3
subsd %xmm3, %xmm1
movapd -2880(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm3
mulsd %xmm12, %xmm3
addsd %xmm1, %xmm3
movsd LCPI19_119(%rip), %xmm8 ## xmm8 = mem[0],zero
movapd %xmm10, %xmm5
mulsd %xmm8, %xmm5
movapd %xmm4, %xmm1
mulsd %xmm13, %xmm1
subsd %xmm5, %xmm1
movapd %xmm9, %xmm5
mulsd %xmm0, %xmm5
subsd %xmm5, %xmm1
addsd %xmm3, %xmm3
addsd %xmm1, %xmm1
addsd %xmm3, %xmm1
mulsd %xmm10, %xmm2
movapd %xmm9, %xmm3
mulsd %xmm13, %xmm3
addsd %xmm2, %xmm3
movapd %xmm4, %xmm2
mulsd %xmm0, %xmm2
addsd %xmm3, %xmm2
mulsd %xmm8, %xmm7
movapd %xmm4, %xmm3
mulsd %xmm11, %xmm3
addsd %xmm3, %xmm7
movapd %xmm9, %xmm3
mulsd %xmm12, %xmm3
addsd %xmm7, %xmm3
movsd -1536(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movapd %xmm5, %xmm4
mulsd %xmm1, %xmm4
addsd %xmm2, %xmm3
movsd -1072(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm4
movsd -2272(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
mulsd -880(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm1, %xmm3
mulsd %xmm5, %xmm4
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm4
mulsd %xmm2, %xmm3
mulsd %xmm1, %xmm3
subsd %xmm3, %xmm4
movsd %xmm4, 96(%rax)
LBB19_70:
movsd %xmm10, -7888(%rbp) ## 8-byte Spill
movapd %xmm0, -7920(%rbp) ## 16-byte Spill
movapd %xmm13, -7936(%rbp) ## 16-byte Spill
movapd %xmm12, -7952(%rbp) ## 16-byte Spill
movapd %xmm11, -4000(%rbp) ## 16-byte Spill
movsd -4640(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
subsd -7352(%rbp), %xmm5 ## 8-byte Folded Reload
movapd -5696(%rbp), %xmm0 ## 16-byte Reload
subsd -13344(%rbp), %xmm0 ## 8-byte Folded Reload
subsd -13352(%rbp), %xmm0 ## 8-byte Folded Reload
divsd -5680(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -4192(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movapd -5200(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm0, %xmm3
subsd -13328(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, -5200(%rbp) ## 16-byte Spill
movapd -96(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm12
mulsd %xmm1, %xmm12
movapd %xmm1, %xmm0
movapd %xmm1, -4192(%rbp) ## 16-byte Spill
movapd -144(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm2
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm12
movapd -256(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm1
mulsd %xmm3, %xmm1
subsd -13320(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm6, %xmm2
mulsd %xmm0, %xmm2
movapd -8656(%rbp), %xmm0 ## 16-byte Reload
subsd %xmm2, %xmm0
movapd -208(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm2
mulsd %xmm12, %xmm2
addsd %xmm1, %xmm2
movapd -64(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm7
movapd %xmm3, %xmm14
mulsd %xmm0, %xmm7
addsd %xmm2, %xmm7
movapd %xmm7, %xmm9
movapd %xmm7, -192(%rbp) ## 16-byte Spill
movapd -1520(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm2
mulsd %xmm12, %xmm2
movapd -1488(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm3
mulsd %xmm1, %xmm3
addsd %xmm2, %xmm3
movapd -176(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm0, %xmm2
movapd %xmm0, -8656(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm2
movapd %xmm2, -480(%rbp) ## 16-byte Spill
movapd %xmm13, %xmm4
mulsd %xmm9, %xmm4
movapd %xmm7, %xmm3
mulsd %xmm2, %xmm3
addsd %xmm4, %xmm3
movapd -576(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm2
mulsd %xmm12, %xmm2
movapd -864(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm1, -1872(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm4
addsd %xmm2, %xmm4
movapd -736(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
addsd %xmm4, %xmm1
movapd %xmm1, -704(%rbp) ## 16-byte Spill
movapd %xmm11, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm3, %xmm2
addsd %xmm2, %xmm5
movapd %xmm2, %xmm3
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm3
subsd %xmm3, %xmm5
movsd %xmm5, -4640(%rbp) ## 8-byte Spill
movapd %xmm5, %xmm3
subsd %xmm2, %xmm3
movapd -5744(%rbp), %xmm2 ## 16-byte Reload
subsd -13304(%rbp), %xmm2 ## 8-byte Folded Reload
subsd -13336(%rbp), %xmm2 ## 8-byte Folded Reload
divsd -5712(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -4272(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
movsd -768(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm2, %xmm9
subsd -13152(%rbp), %xmm9 ## 8-byte Folded Reload
movapd %xmm10, %xmm2
mulsd %xmm0, %xmm2
movapd %xmm0, -4272(%rbp) ## 16-byte Spill
movapd %xmm15, %xmm4
mulsd %xmm9, %xmm4
subsd %xmm4, %xmm2
movapd %xmm6, %xmm10
mulsd %xmm9, %xmm10
subsd -13144(%rbp), %xmm10 ## 8-byte Folded Reload
movapd %xmm6, %xmm4
mulsd %xmm0, %xmm4
movapd -9168(%rbp), %xmm6 ## 16-byte Reload
subsd %xmm4, %xmm6
movapd %xmm13, %xmm5
mulsd %xmm2, %xmm5
addsd %xmm10, %xmm5
movapd %xmm14, %xmm4
mulsd %xmm6, %xmm4
movapd %xmm6, %xmm0
addsd %xmm5, %xmm4
movapd %xmm7, %xmm5
mulsd %xmm2, %xmm5
movapd %xmm8, %xmm6
mulsd %xmm10, %xmm6
addsd %xmm5, %xmm6
movapd -176(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm0, %xmm15
movapd %xmm0, -9168(%rbp) ## 16-byte Spill
addsd %xmm6, %xmm15
movapd %xmm13, %xmm5
mulsd %xmm4, %xmm5
movapd %xmm7, %xmm6
mulsd %xmm15, %xmm6
addsd %xmm5, %xmm6
movapd %xmm11, %xmm5
mulsd %xmm2, %xmm5
movapd -864(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm10, -4208(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm7
addsd %xmm5, %xmm7
movapd -736(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm0, %xmm10
addsd %xmm7, %xmm10
movapd %xmm11, %xmm5
mulsd %xmm10, %xmm5
addsd %xmm6, %xmm5
movsd -8624(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
subsd -7368(%rbp), %xmm8 ## 8-byte Folded Reload
subsd %xmm5, %xmm3
addsd %xmm5, %xmm8
mulsd %xmm1, %xmm5
subsd %xmm5, %xmm8
addsd %xmm3, %xmm8
movsd -6576(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
subsd -13288(%rbp), %xmm3 ## 8-byte Folded Reload
subsd -13312(%rbp), %xmm3 ## 8-byte Folded Reload
divsd -6560(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -2696(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm3, %xmm0
subsd -13296(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -2696(%rbp) ## 8-byte Spill
movapd -48(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm0, %xmm14
movapd %xmm13, %xmm5
mulsd %xmm14, %xmm5
movsd -3616(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
subsd %xmm5, %xmm3
movsd %xmm3, -3616(%rbp) ## 8-byte Spill
movapd -1344(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm5
mulsd %xmm14, %xmm5
movapd -7760(%rbp), %xmm0 ## 16-byte Reload
subsd %xmm5, %xmm0
movapd %xmm0, -7760(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm13
mulsd %xmm0, %xmm6
addsd %xmm13, %xmm6
movapd -2416(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm5
mulsd %xmm14, %xmm5
movapd -11696(%rbp), %xmm11 ## 16-byte Reload
subsd %xmm5, %xmm11
movapd %xmm0, %xmm5
mulsd %xmm11, %xmm5
addsd %xmm6, %xmm5
movsd -7376(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
subsd -7360(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm8, %xmm7
subsd %xmm5, %xmm7
addsd %xmm5, %xmm3
mulsd %xmm1, %xmm5
subsd %xmm5, %xmm3
movapd -2384(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm12, %xmm5
movapd -1024(%rbp), %xmm6 ## 16-byte Reload
mulsd -192(%rbp), %xmm6 ## 16-byte Folded Reload
addsd %xmm5, %xmm6
movapd -2368(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm2, %xmm5
addsd %xmm6, %xmm5
mulsd -2080(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm5, %xmm4
movapd %xmm8, %xmm5
movsd LCPI19_25(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm5
movapd %xmm0, %xmm13
subsd %xmm5, %xmm4
movapd -2064(%rbp), %xmm6 ## 16-byte Reload
movapd -5200(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm6
movapd -2160(%rbp), %xmm5 ## 16-byte Reload
mulsd -4192(%rbp), %xmm5 ## 16-byte Folded Reload
subsd %xmm5, %xmm6
movapd -2176(%rbp), %xmm5 ## 16-byte Reload
mulsd -4272(%rbp), %xmm5 ## 16-byte Folded Reload
subsd %xmm5, %xmm6
movapd -2608(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm9, %xmm0
addsd %xmm6, %xmm0
movapd -624(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm0, -2608(%rbp) ## 16-byte Spill
mulsd %xmm0, %xmm5
addsd %xmm4, %xmm5
movsd %xmm9, -768(%rbp) ## 8-byte Spill
addsd %xmm9, %xmm1
movapd %xmm1, -1200(%rbp) ## 16-byte Spill
movapd -48(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm1, %xmm9
movapd %xmm9, %xmm6
mulsd %xmm13, %xmm6
addsd %xmm5, %xmm6
movapd -1824(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm14, %xmm0
subsd %xmm0, %xmm6
movapd -2304(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm5
mulsd -3616(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm6, %xmm5
addsd %xmm3, %xmm7
movsd %xmm7, -688(%rbp) ## 8-byte Spill
movapd %xmm5, %xmm1
movsd LCPI19_108(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
movsd LCPI19_114(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm7
subsd %xmm7, %xmm1
movapd %xmm1, -1184(%rbp) ## 16-byte Spill
movapd %xmm8, %xmm7
movsd LCPI19_24(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm7
movsd LCPI19_112(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm9, %xmm0
addsd %xmm7, %xmm0
movapd -1888(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm12, %xmm7
movapd -1024(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm1
mulsd -704(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm7, %xmm1
movsd LCPI19_14(%rip), %xmm7 ## xmm7 = mem[0],zero
movsd -4640(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm7, %xmm6
addsd %xmm1, %xmm6
movapd -1584(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm2, %xmm1
addsd %xmm6, %xmm1
movapd -2080(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm10
addsd %xmm1, %xmm10
movapd -496(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm10, %xmm1
addsd %xmm0, %xmm1
mulsd -896(%rbp), %xmm12 ## 16-byte Folded Reload
mulsd -480(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm12, %xmm4
mulsd -1904(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm4, %xmm2
mulsd %xmm6, %xmm15
addsd %xmm2, %xmm15
movapd -752(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd %xmm15, %xmm0
addsd %xmm1, %xmm0
movsd -2032(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm0
movapd %xmm13, %xmm1
mulsd -7760(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd LCPI19_98(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm5
movsd -688(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd %xmm7, %xmm0
movsd LCPI19_22(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
addsd %xmm5, %xmm0
addsd %xmm1, %xmm0
movsd LCPI19_13(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
movapd %xmm2, %xmm12
addsd -1184(%rbp), %xmm1 ## 16-byte Folded Reload
movsd LCPI19_23(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm8
mulsd %xmm2, %xmm9
subsd %xmm9, %xmm8
movapd %xmm10, -2064(%rbp) ## 16-byte Spill
mulsd %xmm10, %xmm6
addsd %xmm8, %xmm6
movapd -496(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm15, -1184(%rbp) ## 16-byte Spill
mulsd %xmm15, %xmm4
subsd %xmm4, %xmm6
mulsd -1808(%rbp), %xmm14 ## 16-byte Folded Reload
subsd %xmm14, %xmm6
mulsd %xmm11, %xmm13
addsd %xmm6, %xmm13
movsd LCPI19_64(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm3
addsd %xmm13, %xmm3
addsd %xmm3, %xmm1
movapd -1200(%rbp), %xmm13 ## 16-byte Reload
addsd -2696(%rbp), %xmm13 ## 8-byte Folded Reload
movapd -48(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm4
mulsd %xmm13, %xmm4
movapd %xmm4, %xmm5
movsd LCPI19_114(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm5
addsd %xmm1, %xmm5
mulsd %xmm12, %xmm3
mulsd LCPI19_31(%rip), %xmm4
addsd %xmm0, %xmm3
addsd %xmm3, %xmm4
movapd -1616(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm3
movapd %xmm0, %xmm6
mulsd %xmm5, %xmm3
movapd -1088(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm15
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm3
movapd %xmm7, %xmm2
movapd %xmm7, %xmm0
movsd LCPI19_111(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd %xmm1, %xmm14
movapd -848(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm1
mulsd %xmm3, %xmm1
subsd %xmm0, %xmm1
movapd %xmm2, %xmm0
movapd %xmm2, %xmm12
movsd LCPI19_115(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm0
movapd %xmm6, %xmm2
movapd %xmm4, -2032(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
subsd %xmm0, %xmm2
movapd %xmm15, %xmm0
movapd %xmm5, -2304(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
addsd %xmm2, %xmm0
movsd -1296(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm13, %xmm2
mulsd %xmm9, %xmm2
addsd %xmm0, %xmm2
movapd -2144(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm1
movsd -560(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm13, %xmm9
movapd %xmm9, %xmm15
mulsd %xmm14, %xmm15
addsd %xmm1, %xmm15
movapd %xmm12, %xmm0
movsd LCPI19_30(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm0
movapd %xmm4, %xmm1
movapd %xmm3, -5712(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm1
subsd %xmm0, %xmm1
movapd %xmm7, %xmm0
movapd %xmm2, -5696(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
mulsd %xmm5, %xmm9
addsd %xmm0, %xmm9
movapd -8656(%rbp), %xmm6 ## 16-byte Reload
movapd -2384(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm6, %xmm1
movapd -2096(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd -192(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -9168(%rbp), %xmm14 ## 16-byte Reload
movapd -2368(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm14, %xmm1
addsd %xmm0, %xmm1
movapd %xmm1, %xmm2
movapd -4192(%rbp), %xmm0 ## 16-byte Reload
mulsd -832(%rbp), %xmm0 ## 16-byte Folded Reload
subsd -13392(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -4272(%rbp), %xmm1 ## 16-byte Reload
mulsd -672(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
subsd -13384(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -624(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm1, -4272(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
addsd %xmm2, %xmm0
movapd %xmm8, %xmm3
movapd %xmm8, %xmm1
mulsd -9824(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, %xmm2
mulsd LCPI19_25(%rip), %xmm2
subsd %xmm2, %xmm0
addsd -13376(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -1008(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm2
mulsd -3616(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm0, %xmm2
movapd %xmm2, %xmm8
mulsd LCPI19_108(%rip), %xmm8
mulsd -5344(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm3, -48(%rbp) ## 16-byte Spill
movapd %xmm3, %xmm0
mulsd %xmm10, %xmm0
subsd %xmm0, %xmm8
movapd -896(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm6, %xmm0
movapd %xmm5, %xmm4
movapd -480(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm4
addsd %xmm0, %xmm4
movapd -1904(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm14, %xmm3
addsd %xmm4, %xmm3
movsd LCPI19_24(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm1, %xmm4
movapd -752(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm3, %xmm0
addsd %xmm4, %xmm0
mulsd -1888(%rbp), %xmm6 ## 16-byte Folded Reload
movapd %xmm5, %xmm4
movapd -704(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm4
addsd %xmm6, %xmm4
movapd -64(%rbp), %xmm5 ## 16-byte Reload
mulsd -192(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -176(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm10, %xmm6
addsd %xmm5, %xmm6
movapd -736(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm12, %xmm5
movapd %xmm13, %xmm12
movapd %xmm15, %xmm13
addsd %xmm6, %xmm5
mulsd LCPI19_14(%rip), %xmm5
addsd %xmm4, %xmm5
movapd %xmm14, %xmm6
mulsd -1584(%rbp), %xmm6 ## 16-byte Folded Reload
addsd %xmm5, %xmm6
movapd -496(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm5
mulsd %xmm6, %xmm5
addsd %xmm0, %xmm5
addsd -13368(%rbp), %xmm5 ## 8-byte Folded Reload
movapd %xmm7, %xmm4
movapd -7760(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm4
addsd %xmm5, %xmm4
mulsd LCPI19_98(%rip), %xmm2
movapd -48(%rbp), %xmm0 ## 16-byte Reload
mulsd LCPI19_22(%rip), %xmm0
addsd %xmm2, %xmm0
addsd %xmm4, %xmm0
movapd %xmm0, %xmm2
movsd LCPI19_13(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm4
addsd %xmm8, %xmm4
mulsd LCPI19_23(%rip), %xmm1
movapd %xmm14, %xmm0
movsd -5344(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movapd %xmm3, -2384(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm1
movapd -752(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm6, -9168(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm0
addsd %xmm1, %xmm0
addsd -13360(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd %xmm11, %xmm7
addsd %xmm0, %xmm7
movsd -3616(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -64(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm10, %xmm0
mulsd -3280(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
mulsd -2592(%rbp), %xmm11 ## 16-byte Folded Reload
addsd %xmm0, %xmm11
mulsd LCPI19_64(%rip), %xmm11
addsd %xmm7, %xmm11
addsd %xmm11, %xmm4
mulsd %xmm5, %xmm11
addsd %xmm2, %xmm11
movapd -1616(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm1
mulsd %xmm4, %xmm1
movapd -1088(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
mulsd %xmm11, %xmm0
subsd %xmm0, %xmm1
movapd -848(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm7
mulsd %xmm1, %xmm7
movsd -560(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm14, %xmm3
movapd %xmm3, %xmm0
mulsd LCPI19_111(%rip), %xmm0
subsd %xmm0, %xmm7
movsd -1296(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm14, %xmm0
mulsd LCPI19_115(%rip), %xmm0
movapd %xmm0, %xmm6
movapd %xmm2, %xmm0
movapd %xmm11, -11696(%rbp) ## 16-byte Spill
mulsd %xmm11, %xmm0
subsd %xmm6, %xmm0
movsd -688(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd %xmm5, %xmm2
movapd %xmm4, -1808(%rbp) ## 16-byte Spill
mulsd %xmm4, %xmm2
addsd %xmm0, %xmm2
movapd -2144(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm7
mulsd LCPI19_30(%rip), %xmm3
movapd %xmm4, %xmm0
movapd %xmm1, -6576(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
subsd %xmm3, %xmm0
movapd %xmm9, %xmm15
movapd %xmm8, %xmm5
movapd %xmm2, -6560(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm5
addsd %xmm0, %xmm5
testq %rax, %rax
movapd -2528(%rbp), %xmm11 ## 16-byte Reload
je LBB19_72
## %bb.71:
movsd LCPI19_119(%rip), %xmm8 ## xmm8 = mem[0],zero
movapd %xmm10, %xmm0
mulsd %xmm8, %xmm0
movapd -3184(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm4
mulsd %xmm13, %xmm4
subsd %xmm0, %xmm4
movapd %xmm11, %xmm0
mulsd %xmm15, %xmm0
subsd %xmm0, %xmm4
movsd -128(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm3
mulsd %xmm12, %xmm3
movapd %xmm3, %xmm1
mulsd %xmm8, %xmm1
addsd %xmm4, %xmm1
addsd %xmm1, %xmm1
movapd %xmm11, %xmm0
mulsd %xmm7, %xmm0
movapd %xmm6, %xmm4
mulsd %xmm14, %xmm4
movsd LCPI19_29(%rip), %xmm9 ## xmm9 = mem[0],zero
movapd %xmm4, %xmm6
mulsd %xmm9, %xmm6
subsd %xmm6, %xmm0
movapd %xmm2, %xmm6
mulsd %xmm5, %xmm6
addsd %xmm0, %xmm6
addsd %xmm6, %xmm6
subsd %xmm6, %xmm1
movapd %xmm10, %xmm0
mulsd %xmm9, %xmm0
movapd %xmm11, %xmm6
mulsd %xmm13, %xmm6
subsd %xmm0, %xmm6
movapd %xmm2, %xmm0
mulsd %xmm15, %xmm0
addsd %xmm6, %xmm0
mulsd %xmm9, %xmm3
addsd %xmm0, %xmm3
mulsd %xmm8, %xmm4
movapd %xmm2, %xmm0
mulsd %xmm7, %xmm0
subsd %xmm4, %xmm0
movapd %xmm11, %xmm2
mulsd %xmm5, %xmm2
subsd %xmm2, %xmm0
movsd -1536(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm3, %xmm0
movsd -1072(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm2
movsd -2272(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
mulsd -880(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
mulsd %xmm4, %xmm2
movsd LCPI19_1(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm2
mulsd %xmm3, %xmm0
mulsd %xmm1, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, 104(%rax)
LBB19_72:
movapd %xmm5, -7760(%rbp) ## 16-byte Spill
movapd %xmm7, -7792(%rbp) ## 16-byte Spill
movapd %xmm12, -1296(%rbp) ## 16-byte Spill
movapd %xmm13, -7824(%rbp) ## 16-byte Spill
movapd %xmm15, -7872(%rbp) ## 16-byte Spill
movapd -10048(%rbp), %xmm10 ## 16-byte Reload
subsd -13088(%rbp), %xmm10 ## 8-byte Folded Reload
movapd -2096(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm0
mulsd %xmm10, %xmm0
subsd -17152(%rbp), %xmm0 ## 16-byte Folded Reload
subsd -13096(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -7104(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd -13080(%rbp), %xmm1 ## 8-byte Folded Reload
movsd %xmm1, -7104(%rbp) ## 8-byte Spill
movapd -624(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm1, %xmm2
addsd %xmm0, %xmm2
movapd -17200(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm1
movsd LCPI19_25(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm1
addsd %xmm2, %xmm1
subsd -13072(%rbp), %xmm1 ## 8-byte Folded Reload
subsd -17136(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -17168(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm0
movsd LCPI19_22(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
movsd LCPI19_15(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm1, %xmm4
addsd %xmm0, %xmm4
movsd -4464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd -17120(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm12, %xmm6
mulsd %xmm0, %xmm6
movapd %xmm0, %xmm5
movsd %xmm0, -4464(%rbp) ## 8-byte Spill
subsd -13064(%rbp), %xmm6 ## 8-byte Folded Reload
subsd -17104(%rbp), %xmm6 ## 16-byte Folded Reload
movapd -752(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
movapd %xmm2, %xmm14
mulsd %xmm6, %xmm0
movapd %xmm11, %xmm2
movsd LCPI19_112(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm2
addsd %xmm0, %xmm2
movsd -7088(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
subsd -17184(%rbp), %xmm9 ## 16-byte Folded Reload
mulsd %xmm9, %xmm12
subsd -13048(%rbp), %xmm12 ## 8-byte Folded Reload
movapd -64(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm10, %xmm0
movapd -176(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm5, %xmm3
addsd %xmm0, %xmm3
movapd -736(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm9, %xmm7
addsd %xmm3, %xmm7
movsd LCPI19_14(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm7
addsd %xmm12, %xmm7
subsd -17088(%rbp), %xmm7 ## 16-byte Folded Reload
movapd -496(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm3
movapd %xmm0, %xmm5
mulsd %xmm7, %xmm3
addsd %xmm2, %xmm3
subsd -13056(%rbp), %xmm3 ## 8-byte Folded Reload
subsd -17072(%rbp), %xmm3 ## 16-byte Folded Reload
movsd LCPI19_23(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm11
movapd %xmm5, %xmm0
movapd %xmm6, -2368(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm0
addsd %xmm11, %xmm0
movapd %xmm14, %xmm2
movsd %xmm7, -1824(%rbp) ## 8-byte Spill
mulsd %xmm7, %xmm2
subsd %xmm0, %xmm2
subsd -13016(%rbp), %xmm2 ## 8-byte Folded Reload
subsd -17040(%rbp), %xmm2 ## 16-byte Folded Reload
subsd -13024(%rbp), %xmm2 ## 8-byte Folded Reload
movsd LCPI19_108(%rip), %xmm15 ## xmm15 = mem[0],zero
mulsd %xmm15, %xmm1
movsd LCPI19_114(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm12, %xmm8
addsd %xmm1, %xmm8
subsd %xmm3, %xmm4
movsd LCPI19_13(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm3
addsd %xmm8, %xmm3
addsd %xmm2, %xmm3
mulsd %xmm11, %xmm2
subsd %xmm2, %xmm4
movapd -1088(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd %xmm1, %xmm11
mulsd %xmm4, %xmm0
movapd -1616(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm5
movapd %xmm1, %xmm6
mulsd %xmm3, %xmm5
addsd %xmm0, %xmm5
movapd -848(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm0
mulsd %xmm5, %xmm0
movsd -13040(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movapd %xmm2, %xmm1
movsd LCPI19_111(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
addsd %xmm0, %xmm1
movsd LCPI19_115(%rip), %xmm7 ## xmm7 = mem[0],zero
movsd -13032(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
movsd %xmm4, -4192(%rbp) ## 8-byte Spill
mulsd %xmm4, %xmm6
subsd %xmm0, %xmm6
movapd %xmm11, %xmm0
movapd %xmm3, -5744(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm6
movapd -2144(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm15
mulsd %xmm6, %xmm15
addsd %xmm1, %xmm15
movsd LCPI19_19(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
movapd %xmm3, %xmm0
movapd %xmm5, -3616(%rbp) ## 16-byte Spill
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm2
movapd %xmm6, -4640(%rbp) ## 16-byte Spill
mulsd %xmm6, %xmm13
addsd %xmm2, %xmm13
movsd -7296(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
subsd -12952(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -208(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm0
mulsd %xmm10, %xmm0
movapd -1520(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
mulsd -4464(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd -576(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
mulsd %xmm9, %xmm0
addsd %xmm1, %xmm0
addsd %xmm0, %xmm4
movapd %xmm0, %xmm1
movsd LCPI19_1(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm4
movapd %xmm4, %xmm2
subsd %xmm0, %xmm2
movapd -10032(%rbp), %xmm6 ## 16-byte Reload
subsd -12960(%rbp), %xmm6 ## 8-byte Folded Reload
movsd -7072(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
subsd -17024(%rbp), %xmm14 ## 16-byte Folded Reload
movapd %xmm11, %xmm0
mulsd %xmm6, %xmm0
mulsd %xmm14, %xmm3
addsd %xmm0, %xmm3
movsd -7064(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
subsd -7696(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm7, %xmm0
mulsd %xmm5, %xmm0
addsd %xmm3, %xmm0
movsd -7304(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
subsd -12944(%rbp), %xmm8 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
addsd %xmm0, %xmm8
mulsd LCPI19_1(%rip), %xmm0
subsd %xmm0, %xmm8
addsd %xmm2, %xmm8
movsd -2448(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd -12928(%rbp), %xmm2 ## 8-byte Folded Reload
movapd %xmm8, %xmm1
movsd -12936(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm0, %xmm1
subsd %xmm0, %xmm2
addsd -12920(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -1024(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm0
movsd %xmm10, -1008(%rbp) ## 8-byte Spill
mulsd %xmm10, %xmm0
movapd -2080(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm6
addsd %xmm0, %xmm6
movsd LCPI19_25(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm8, %xmm0
subsd %xmm0, %xmm6
subsd -4016(%rbp), %xmm6 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
movapd %xmm6, %xmm10
mulsd LCPI19_108(%rip), %xmm10
mulsd %xmm1, %xmm12
subsd %xmm12, %xmm10
movapd %xmm11, %xmm12
movapd %xmm3, %xmm0
movsd %xmm9, -7088(%rbp) ## 8-byte Spill
mulsd %xmm9, %xmm0
movapd %xmm1, %xmm9
mulsd LCPI19_14(%rip), %xmm4
addsd %xmm0, %xmm4
mulsd %xmm7, %xmm5
addsd %xmm4, %xmm5
movsd LCPI19_112(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm8, %xmm1
movapd -496(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm0
mulsd %xmm5, %xmm0
subsd %xmm1, %xmm0
mulsd %xmm7, %xmm14
mulsd -4464(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm3, %xmm14
movapd -752(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm3
mulsd %xmm14, %xmm3
addsd %xmm0, %xmm3
subsd -7680(%rbp), %xmm3 ## 16-byte Folded Reload
mulsd LCPI19_98(%rip), %xmm6
movsd LCPI19_22(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm9, %xmm0
addsd %xmm6, %xmm0
addsd %xmm3, %xmm0
movapd %xmm0, %xmm4
movsd LCPI19_13(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm3
addsd %xmm10, %xmm3
mulsd LCPI19_23(%rip), %xmm8
movapd %xmm1, %xmm0
movsd %xmm5, -7064(%rbp) ## 8-byte Spill
mulsd %xmm5, %xmm0
addsd %xmm8, %xmm0
movapd %xmm11, %xmm1
movsd %xmm14, -7072(%rbp) ## 8-byte Spill
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm0
mulsd LCPI19_64(%rip), %xmm2
subsd -7712(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movsd LCPI19_111(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm9, %xmm6
addsd %xmm2, %xmm3
mulsd %xmm7, %xmm2
movapd -1616(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd %xmm3, %xmm1
addsd %xmm4, %xmm2
movapd -1088(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm1
movapd -848(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm7
mulsd %xmm1, %xmm7
subsd %xmm6, %xmm7
movapd %xmm13, %xmm11
movsd LCPI19_115(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm9, %xmm6
movapd %xmm5, %xmm0
movsd %xmm2, -2448(%rbp) ## 8-byte Spill
mulsd %xmm2, %xmm0
subsd %xmm6, %xmm0
movapd %xmm4, %xmm2
movapd %xmm3, -5680(%rbp) ## 16-byte Spill
mulsd %xmm3, %xmm2
addsd %xmm0, %xmm2
movapd -2144(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm7
movsd LCPI19_19(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm9, %xmm3
movapd %xmm4, %xmm0
movapd %xmm1, -4016(%rbp) ## 16-byte Spill
mulsd %xmm1, %xmm0
addsd %xmm3, %xmm0
movapd %xmm8, %xmm5
movapd %xmm2, -7808(%rbp) ## 16-byte Spill
mulsd %xmm2, %xmm5
addsd %xmm0, %xmm5
testq %rax, %rax
movapd -5184(%rbp), %xmm10 ## 16-byte Reload
je LBB19_74
## %bb.73:
movsd LCPI19_17(%rip), %xmm1 ## xmm1 = mem[0],zero
movsd -12408(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd %xmm6, %xmm0
mulsd %xmm1, %xmm0
movapd -2528(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm2
mulsd %xmm15, %xmm2
subsd %xmm2, %xmm0
movapd -3184(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
mulsd %xmm11, %xmm2
addsd %xmm0, %xmm2
movsd LCPI19_119(%rip), %xmm8 ## xmm8 = mem[0],zero
movapd %xmm9, %xmm4
mulsd %xmm8, %xmm4
movapd %xmm3, %xmm0
mulsd %xmm7, %xmm0
subsd %xmm4, %xmm0
movapd %xmm13, %xmm4
mulsd %xmm5, %xmm4
subsd %xmm4, %xmm0
movapd -208(%rbp), %xmm12 ## 16-byte Reload
addsd %xmm2, %xmm2
addsd %xmm0, %xmm0
addsd %xmm2, %xmm0
mulsd %xmm9, %xmm1
movapd %xmm13, %xmm2
mulsd %xmm7, %xmm2
addsd %xmm1, %xmm2
movapd %xmm3, %xmm1
mulsd %xmm5, %xmm1
addsd %xmm2, %xmm1
mulsd %xmm8, %xmm6
movapd %xmm3, %xmm2
mulsd %xmm15, %xmm2
addsd %xmm2, %xmm6
movapd %xmm13, %xmm2
mulsd %xmm11, %xmm2
addsd %xmm6, %xmm2
addsd %xmm1, %xmm2
movsd -1536(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd %xmm3, %xmm1
mulsd %xmm0, %xmm1
movsd -1072(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm2, %xmm4
subsd %xmm4, %xmm1
mulsd %xmm3, %xmm1
movsd -880(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm2, %xmm3
movsd -2272(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
addsd %xmm0, %xmm3
mulsd %xmm2, %xmm3
movsd LCPI19_1(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
mulsd %xmm0, %xmm3
subsd %xmm3, %xmm1
movsd %xmm1, 112(%rax)
LBB19_74:
movsd %xmm9, -7616(%rbp) ## 8-byte Spill
movapd %xmm5, -7632(%rbp) ## 16-byte Spill
movapd %xmm7, -7680(%rbp) ## 16-byte Spill
movapd %xmm11, -7728(%rbp) ## 16-byte Spill
movapd %xmm15, -7744(%rbp) ## 16-byte Spill
movapd -6960(%rbp), %xmm0 ## 16-byte Reload
mulsd -4688(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -17808(%rbp), %xmm2 ## 16-byte Reload
mulsd -4896(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movapd -6976(%rbp), %xmm0 ## 16-byte Reload
mulsd -4472(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd -17616(%rbp), %xmm0 ## 16-byte Reload
mulsd -1720(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd -6160(%rbp), %xmm0 ## 16-byte Reload
mulsd -7456(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -2640(%rbp), %xmm1 ## 16-byte Reload
mulsd -12048(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -4256(%rbp), %xmm0 ## 16-byte Reload
mulsd -4480(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -2816(%rbp), %xmm1 ## 16-byte Reload
mulsd -14096(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movapd -8960(%rbp), %xmm1 ## 16-byte Reload
mulsd -640(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -9872(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1472(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -6720(%rbp), %xmm0 ## 16-byte Reload
mulsd -1232(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd -9456(%rbp), %xmm0 ## 16-byte Reload
mulsd -1456(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd -2720(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -6800(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -2720(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm0
mulsd -6816(%rbp), %xmm10 ## 16-byte Folded Reload
movapd %xmm10, -5184(%rbp) ## 16-byte Spill
addsd %xmm10, %xmm0
movapd %xmm0, -9456(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm2
movapd -10608(%rbp), %xmm0 ## 16-byte Reload
mulsd -3040(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movsd -13464(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -408(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
movapd -10624(%rbp), %xmm0 ## 16-byte Reload
mulsd -5264(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -17648(%rbp), %xmm1 ## 16-byte Reload
mulsd -288(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -17840(%rbp), %xmm1 ## 16-byte Reload
mulsd -112(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -17632(%rbp), %xmm0 ## 16-byte Reload
mulsd -528(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movsd -9856(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5232(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movsd -13456(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -3840(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movapd -10576(%rbp), %xmm0 ## 16-byte Reload
mulsd -3048(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd -17824(%rbp), %xmm0 ## 16-byte Reload
mulsd -224(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm2, %xmm0
movapd -8176(%rbp), %xmm2 ## 16-byte Reload
mulsd -3544(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm0, %xmm2
movapd -17584(%rbp), %xmm0 ## 16-byte Reload
mulsd -152(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd -6896(%rbp), %xmm0 ## 16-byte Reload
mulsd -8896(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -17776(%rbp), %xmm1 ## 16-byte Reload
mulsd -8208(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -9536(%rbp), %xmm0 ## 16-byte Reload
mulsd -8944(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -17552(%rbp), %xmm1 ## 16-byte Reload
mulsd -6144(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -12352(%rbp), %xmm0 ## 16-byte Reload
mulsd -2728(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -17792(%rbp), %xmm1 ## 16-byte Reload
mulsd -352(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd %xmm2, -880(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm0
subsd %xmm1, %xmm0
movapd -9552(%rbp), %xmm2 ## 16-byte Reload
mulsd -2216(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm0, %xmm2
movapd -17600(%rbp), %xmm0 ## 16-byte Reload
mulsd -464(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd -6944(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
movapd -13856(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm0
movapd -14304(%rbp), %xmm1 ## 16-byte Reload
mulsd -9472(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -6000(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm0
mulsd -11536(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -17536(%rbp), %xmm1 ## 16-byte Reload
mulsd -8096(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd %xmm2, -6720(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm0
subsd %xmm1, %xmm0
movapd -10560(%rbp), %xmm1 ## 16-byte Reload
mulsd -3056(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -14336(%rbp), %xmm1 ## 16-byte Reload
mulsd -344(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd -4864(%rbp), %xmm13 ## 16-byte Reload
mulsd -2224(%rbp), %xmm13 ## 8-byte Folded Reload
addsd %xmm1, %xmm13
movapd -17568(%rbp), %xmm0 ## 16-byte Reload
mulsd -368(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm13
movapd -6912(%rbp), %xmm0 ## 16-byte Reload
mulsd -5600(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -17728(%rbp), %xmm2 ## 16-byte Reload
mulsd -3232(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movapd -6928(%rbp), %xmm0 ## 16-byte Reload
mulsd -2992(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd -17488(%rbp), %xmm0 ## 16-byte Reload
mulsd -3872(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd -9392(%rbp), %xmm0 ## 16-byte Reload
mulsd -5616(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -2112(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -11920(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -6832(%rbp), %xmm0 ## 16-byte Reload
mulsd -14320(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -1568(%rbp), %xmm1 ## 16-byte Reload
mulsd -14016(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movapd -8928(%rbp), %xmm1 ## 16-byte Reload
mulsd -832(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -9848(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2160(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -6704(%rbp), %xmm0 ## 16-byte Reload
mulsd -672(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd -9328(%rbp), %xmm0 ## 16-byte Reload
mulsd -2176(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd -4224(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -8080(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -4224(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm0
movapd -4880(%rbp), %xmm1 ## 16-byte Reload
mulsd -3560(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -4880(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm0
movapd %xmm0, -9328(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm2
movapd -10528(%rbp), %xmm0 ## 16-byte Reload
mulsd -3016(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movsd -13440(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -376(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
movapd -10544(%rbp), %xmm0 ## 16-byte Reload
mulsd -5216(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -17520(%rbp), %xmm1 ## 16-byte Reload
mulsd -296(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -17760(%rbp), %xmm1 ## 16-byte Reload
mulsd -64(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -17504(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm1
movsd -9840(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4240(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movsd -13432(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -3216(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movapd -10496(%rbp), %xmm0 ## 16-byte Reload
mulsd -3024(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd -17744(%rbp), %xmm0 ## 16-byte Reload
mulsd -232(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm2, %xmm0
movapd -8064(%rbp), %xmm2 ## 16-byte Reload
mulsd -3008(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm0, %xmm2
movapd -17456(%rbp), %xmm0 ## 16-byte Reload
mulsd -160(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd -6848(%rbp), %xmm0 ## 16-byte Reload
mulsd -8880(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -17680(%rbp), %xmm1 ## 16-byte Reload
mulsd -5872(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -9488(%rbp), %xmm0 ## 16-byte Reload
mulsd -8912(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -17424(%rbp), %xmm1 ## 16-byte Reload
mulsd -4176(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -12336(%rbp), %xmm0 ## 16-byte Reload
mulsd -2704(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -17696(%rbp), %xmm1 ## 16-byte Reload
mulsd -216(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd %xmm2, -1072(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm0
subsd %xmm1, %xmm0
movapd -9504(%rbp), %xmm2 ## 16-byte Reload
mulsd -2208(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm0, %xmm2
movapd -17472(%rbp), %xmm0 ## 16-byte Reload
mulsd -304(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd %xmm7, %xmm0
mulsd -11520(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -12304(%rbp), %xmm14 ## 16-byte Reload
movapd -17664(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm14, %xmm1
addsd %xmm0, %xmm1
mulsd -11568(%rbp), %xmm11 ## 16-byte Folded Reload
addsd %xmm1, %xmm11
movapd -17408(%rbp), %xmm1 ## 16-byte Reload
mulsd -9312(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm11, %xmm1
movapd %xmm2, -7712(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm0
subsd %xmm1, %xmm0
movapd -12320(%rbp), %xmm1 ## 16-byte Reload
mulsd -3032(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -17712(%rbp), %xmm1 ## 16-byte Reload
mulsd -2928(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -9344(%rbp), %xmm4 ## 16-byte Reload
mulsd -1376(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm1, %xmm4
movapd -17440(%rbp), %xmm0 ## 16-byte Reload
mulsd -360(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm4
testq %rax, %rax
movapd -3200(%rbp), %xmm15 ## 16-byte Reload
movapd -9104(%rbp), %xmm9 ## 16-byte Reload
movsd -3536(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movsd -2976(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd -13968(%rbp), %xmm2 ## 16-byte Reload
movapd -13952(%rbp), %xmm3 ## 16-byte Reload
movapd -11152(%rbp), %xmm6 ## 16-byte Reload
movsd -7008(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movapd -11136(%rbp), %xmm5 ## 16-byte Reload
movsd -9688(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movapd -13840(%rbp), %xmm1 ## 16-byte Reload
je LBB19_76
## %bb.75:
movapd -8992(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm5, %xmm0
mulsd -6784(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -7136(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm1
movsd -9616(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5888(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd -9008(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm1
movapd -13600(%rbp), %xmm0 ## 16-byte Reload
mulsd -3000(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movsd -5272(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm10, %xmm0
addsd %xmm1, %xmm0
movsd -9624(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -6736(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
addsd %xmm13, %xmm0
movapd -4160(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm1
mulsd -9024(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
mulsd %xmm15, %xmm3
addsd %xmm0, %xmm3
movapd %xmm12, %xmm0
mulsd -10096(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm3, %xmm0
movapd -2320(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm11
subsd %xmm11, %xmm0
addsd %xmm4, %xmm0
movapd %xmm5, %xmm1
mulsd -9968(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
mulsd %xmm15, %xmm2
addsd %xmm0, %xmm2
movapd %xmm12, %xmm0
mulsd %xmm9, %xmm0
addsd %xmm2, %xmm0
movapd %xmm3, %xmm1
mulsd -7968(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd %xmm0, 120(%rax)
LBB19_76:
movapd %xmm4, -8960(%rbp) ## 16-byte Spill
movapd %xmm13, -7696(%rbp) ## 16-byte Spill
movsd LCPI19_89(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm8
movsd -13424(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd -9472(%rbp), %xmm13 ## 16-byte Folded Reload
addsd %xmm8, %xmm13
movapd -8896(%rbp), %xmm0 ## 16-byte Reload
movsd LCPI19_82(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movsd -7112(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -8208(%rbp), %xmm6 ## 16-byte Folded Reload
addsd %xmm0, %xmm6
movsd -2728(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_130(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm0
subsd %xmm0, %xmm6
movsd -13416(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -352(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm6
movapd -4688(%rbp), %xmm1 ## 16-byte Reload
movsd LCPI19_74(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm1
movapd -4896(%rbp), %xmm0 ## 16-byte Reload
mulsd -14256(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -7456(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd LCPI19_50(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm1
movapd -2640(%rbp), %xmm3 ## 16-byte Reload
mulsd -14032(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm1, %xmm3
movsd -9816(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -640(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm3
movsd -7448(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1472(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -7448(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm3
movsd -3504(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1232(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm3
movapd -4704(%rbp), %xmm1 ## 16-byte Reload
mulsd -1456(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -4704(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm3
movapd %xmm3, -2096(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm0
movsd -3040(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm0
movapd -17392(%rbp), %xmm1 ## 16-byte Reload
mulsd -408(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd -9768(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -288(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd -17376(%rbp), %xmm3 ## 16-byte Reload
mulsd -112(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm1, %xmm3
movsd -9800(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5232(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm3
movsd -7440(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3840(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -7440(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm3
movsd -3048(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_129(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm3
movapd -11824(%rbp), %xmm1 ## 16-byte Reload
mulsd -224(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm3, %xmm1
movsd -152(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -11120(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm1
movapd %xmm1, -11824(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm6
movsd -464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm6
movsd %xmm6, -7112(%rbp) ## 8-byte Spill
addsd %xmm6, %xmm13
movsd -3056(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_125(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm13
movapd -10112(%rbp), %xmm9 ## 16-byte Reload
mulsd -344(%rbp), %xmm9 ## 8-byte Folded Reload
addsd %xmm13, %xmm9
movsd -368(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm9
movapd -11520(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
movsd -13408(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm14, %xmm5
addsd %xmm0, %xmm5
movapd -8880(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
movsd -7096(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -5872(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movsd -2704(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm8, %xmm0
subsd %xmm0, %xmm2
movsd -13400(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -216(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd -5600(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm10, %xmm1
movapd -3232(%rbp), %xmm0 ## 16-byte Reload
movapd -17328(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm13, %xmm0
subsd %xmm1, %xmm0
movsd -5616(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
movsd -2112(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -14000(%rbp), %xmm3 ## 16-byte Folded Reload
subsd %xmm1, %xmm3
movsd -9792(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -832(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm3
movsd -7416(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2160(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -7416(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm3
movsd -3496(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -672(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm3
movapd -4032(%rbp), %xmm1 ## 16-byte Reload
mulsd -2176(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -4032(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm3
movapd %xmm3, -1536(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm0
movsd -3016(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm0
movapd -17360(%rbp), %xmm1 ## 16-byte Reload
mulsd -376(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd -9760(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -296(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd -17344(%rbp), %xmm4 ## 16-byte Reload
mulsd -64(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm1, %xmm4
movsd -9776(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4240(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm4
movsd -7408(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3216(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -7408(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm4
movsd -3024(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm4
movapd -11712(%rbp), %xmm1 ## 16-byte Reload
mulsd -232(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm4, %xmm1
movsd -160(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -13824(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm1
movapd %xmm1, -11712(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm2
movsd -304(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, -7096(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm5
movsd -3032(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
subsd %xmm0, %xmm5
movapd -11616(%rbp), %xmm4 ## 16-byte Reload
mulsd -2928(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm5, %xmm4
movsd -360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm4
testq %rax, %rax
je LBB19_78
## %bb.77:
movsd -5296(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm1, %xmm0
mulsd -128(%rbp), %xmm0 ## 8-byte Folded Reload
mulsd LCPI19_128(%rip), %xmm0
mulsd -9592(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd LCPI19_7(%rip), %xmm2 ## xmm2 = mem[0],zero
movapd -11136(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm1
movapd -6784(%rbp), %xmm0 ## 16-byte Reload
movsd -9600(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm0
addsd %xmm1, %xmm0
movsd -7008(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movsd -3000(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm5, %xmm1
addsd %xmm0, %xmm1
addsd %xmm9, %xmm1
movsd LCPI19_126(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -4160(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -12432(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm15, %xmm2
addsd %xmm1, %xmm2
movapd -2320(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
mulsd %xmm12, %xmm1
subsd %xmm1, %xmm2
addsd %xmm4, %xmm2
addsd %xmm0, %xmm2
movsd -12440(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm15, %xmm1
addsd %xmm2, %xmm1
movapd %xmm3, %xmm0
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm1
movsd %xmm1, 128(%rax)
LBB19_78:
movapd %xmm4, -11616(%rbp) ## 16-byte Spill
movapd %xmm9, -10112(%rbp) ## 16-byte Spill
movapd -11536(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
movsd -13160(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd -8096(%rbp), %xmm7 ## 16-byte Folded Reload
subsd %xmm0, %xmm7
movapd -8944(%rbp), %xmm0 ## 16-byte Reload
movsd LCPI19_82(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm0
movsd -13168(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -6144(%rbp), %xmm5 ## 16-byte Folded Reload
subsd %xmm0, %xmm5
movsd -4472(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_74(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm0
movapd -14256(%rbp), %xmm4 ## 16-byte Reload
mulsd -1720(%rbp), %xmm4 ## 8-byte Folded Reload
subsd %xmm0, %xmm4
movsd -4480(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movsd LCPI19_50(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm6
movapd -2816(%rbp), %xmm0 ## 16-byte Reload
mulsd -13920(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm6
movsd -3936(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1472(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -3936(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm6
movsd -4448(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1456(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -4448(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm6
movapd %xmm6, -2080(%rbp) ## 16-byte Spill
addsd %xmm6, %xmm4
movapd -6400(%rbp), %xmm0 ## 16-byte Reload
mulsd -408(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, -6400(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm4
movapd -5264(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
addsd %xmm4, %xmm0
movapd -17296(%rbp), %xmm4 ## 16-byte Reload
mulsd -288(%rbp), %xmm4 ## 8-byte Folded Reload
subsd %xmm4, %xmm0
movapd -17280(%rbp), %xmm4 ## 16-byte Reload
mulsd -528(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm4, %xmm0
movsd -5288(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -3840(%rbp), %xmm4 ## 16-byte Folded Reload
movsd %xmm4, -5288(%rbp) ## 8-byte Spill
addsd %xmm4, %xmm0
movsd -224(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movapd -10208(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm6
movsd %xmm6, -624(%rbp) ## 8-byte Spill
addsd %xmm6, %xmm0
movapd %xmm11, %xmm12
movsd -3544(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movsd LCPI19_129(%rip), %xmm8 ## xmm8 = mem[0],zero
mulsd %xmm8, %xmm6
addsd %xmm0, %xmm6
movapd -17264(%rbp), %xmm0 ## 16-byte Reload
mulsd -152(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm6
movsd -352(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
movsd %xmm0, -48(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm5
movsd %xmm6, -6704(%rbp) ## 8-byte Spill
addsd %xmm6, %xmm5
movsd -2216(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movsd LCPI19_130(%rip), %xmm10 ## xmm10 = mem[0],zero
mulsd %xmm10, %xmm6
addsd %xmm5, %xmm6
movsd -13176(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -464(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm6
movsd %xmm6, -3496(%rbp) ## 8-byte Spill
addsd %xmm6, %xmm7
movsd -344(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
movsd %xmm0, -1440(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm7
movsd -2224(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movapd %xmm14, %xmm5
movsd LCPI19_125(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm5
addsd %xmm7, %xmm5
movsd -9808(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -368(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm5
movapd -11568(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
movsd -13448(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd -9312(%rbp), %xmm7 ## 16-byte Folded Reload
subsd %xmm0, %xmm7
mulsd -8912(%rbp), %xmm2 ## 16-byte Folded Reload
movsd -13472(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
mulsd -4176(%rbp), %xmm12 ## 16-byte Folded Reload
subsd %xmm2, %xmm12
mulsd -2992(%rbp), %xmm3 ## 8-byte Folded Reload
mulsd -3872(%rbp), %xmm13 ## 8-byte Folded Reload
subsd %xmm3, %xmm13
movapd -14320(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm3
mulsd %xmm1, %xmm3
movapd -1568(%rbp), %xmm0 ## 16-byte Reload
mulsd -13872(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm3
movsd -2984(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2160(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -2984(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm3
movsd -4440(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2176(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, -4440(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm3
movapd %xmm3, -2272(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm13
movapd -6384(%rbp), %xmm0 ## 16-byte Reload
mulsd -376(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, -6384(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm13
movapd -5216(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
addsd %xmm13, %xmm0
movapd -17248(%rbp), %xmm1 ## 16-byte Reload
mulsd -296(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -208(%rbp), %xmm13 ## 16-byte Reload
movapd -17232(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm13, %xmm1
subsd %xmm1, %xmm0
movsd -5280(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3216(%rbp), %xmm1 ## 16-byte Folded Reload
movsd %xmm1, -5280(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm0
movsd -232(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd -10192(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm3
movsd %xmm3, -1024(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm0
movsd -3008(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm8, %xmm3
addsd %xmm0, %xmm3
movsd -216(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movsd %xmm0, -560(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm12
movapd -17216(%rbp), %xmm0 ## 16-byte Reload
mulsd -160(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movsd %xmm3, -3504(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm12
movsd -2208(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm10, %xmm3
addsd %xmm12, %xmm3
movsd -13136(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -304(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movsd %xmm3, -8928(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm7
movapd -2928(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movsd %xmm0, -6256(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm7
movsd -1376(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm9, %xmm6
addsd %xmm7, %xmm6
movapd %xmm15, %xmm2
mulsd %xmm4, %xmm2
movsd -9784(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -360(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm6
movapd %xmm15, %xmm7
mulsd %xmm1, %xmm7
testq %rax, %rax
movapd -2320(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm15, %xmm12
je LBB19_80
## %bb.79:
movsd -128(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd -5304(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
mulsd LCPI19_128(%rip), %xmm0
movsd -9592(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm1, %xmm4
addsd %xmm0, %xmm4
movsd LCPI19_7(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd -11152(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm4
movsd -9600(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -5888(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm4, %xmm3
movsd -2976(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm0, %xmm1
addsd %xmm3, %xmm1
movsd -12416(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -6736(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
addsd %xmm5, %xmm1
addsd %xmm2, %xmm1
movsd LCPI19_131(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -3536(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd -9576(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm8, %xmm3
subsd %xmm3, %xmm1
addsd %xmm6, %xmm1
addsd %xmm7, %xmm1
addsd %xmm0, %xmm1
subsd %xmm3, %xmm1
movsd %xmm1, 136(%rax)
LBB19_80:
movapd %xmm2, -8624(%rbp) ## 16-byte Spill
movapd %xmm7, -8656(%rbp) ## 16-byte Spill
movsd %xmm6, -4432(%rbp) ## 8-byte Spill
movsd %xmm5, -128(%rbp) ## 8-byte Spill
movsd -5368(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5600(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -12984(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -3232(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm0, %xmm3
movsd -1984(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2992(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movsd -12800(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3872(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movsd -1056(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5616(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -2112(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -9672(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd -1040(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
addsd %xmm1, %xmm0
movapd -1568(%rbp), %xmm1 ## 16-byte Reload
mulsd -9664(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movsd -9752(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -832(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -13128(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2160(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -7288(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -672(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd -10224(%rbp), %xmm0 ## 16-byte Reload
mulsd -2176(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd -4224(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -4880(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -10224(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm3
movapd -10464(%rbp), %xmm0 ## 16-byte Reload
mulsd -3016(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movsd -13104(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -376(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm3, %xmm1
movsd -5360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5216(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd -12912(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -296(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -17056(%rbp), %xmm3 ## 16-byte Reload
mulsd -64(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm0, %xmm3
movapd -16928(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm13, %xmm0
subsd %xmm0, %xmm3
movsd -3480(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4240(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm3
movsd -13112(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3216(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm3, %xmm1
movapd -10480(%rbp), %xmm0 ## 16-byte Reload
mulsd -3024(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movsd -13008(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -232(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd -920(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -3008(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm0, %xmm3
movsd -12792(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -160(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movapd -8880(%rbp), %xmm0 ## 16-byte Reload
mulsd -7432(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -12968(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -5872(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -8912(%rbp), %xmm0 ## 16-byte Reload
mulsd -7424(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd -12776(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -4176(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -10448(%rbp), %xmm0 ## 16-byte Reload
mulsd -2704(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd -13120(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -216(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd %xmm3, -3480(%rbp) ## 8-byte Spill
movapd %xmm3, %xmm0
subsd %xmm1, %xmm0
movsd -2768(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2208(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd -12808(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -304(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd -12288(%rbp), %xmm0 ## 16-byte Reload
mulsd -3032(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm1, -8912(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movsd -12976(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2928(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd -2664(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -1376(%rbp), %xmm3 ## 8-byte Folded Reload
addsd %xmm0, %xmm3
movsd -12784(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -360(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
testq %rax, %rax
movsd -7456(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
movapd -7600(%rbp), %xmm4 ## 16-byte Reload
movapd -3600(%rbp), %xmm6 ## 16-byte Reload
movsd -9656(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movsd -9648(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
movsd -6984(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
movapd -5184(%rbp), %xmm10 ## 16-byte Reload
movsd -3536(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
je LBB19_82
## %bb.81:
movapd -4160(%rbp), %xmm0 ## 16-byte Reload
mulsd -10064(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm3, %xmm1
subsd %xmm0, %xmm1
mulsd %xmm12, %xmm7
addsd %xmm1, %xmm7
movapd %xmm2, %xmm0
mulsd -10896(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm7, %xmm0
movsd -12400(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2320(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd %xmm0, 144(%rax)
LBB19_82:
movsd %xmm3, -8880(%rbp) ## 8-byte Spill
movsd -3528(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4688(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -12680(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd -4896(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm0, %xmm7
movsd -1952(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4472(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm7
movsd -12648(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1720(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm7
movapd -4608(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm9, %xmm0
movapd -2640(%rbp), %xmm1 ## 16-byte Reload
mulsd -13792(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -816(%rbp), %xmm0 ## 16-byte Reload
mulsd -4480(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -2816(%rbp), %xmm1 ## 16-byte Reload
mulsd -13776(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -9744(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -640(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -12768(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1472(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -3456(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1232(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd -4592(%rbp), %xmm0 ## 16-byte Reload
mulsd -1456(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd -2720(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm10, %xmm0
movapd %xmm0, -4592(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm7
movsd -3512(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3040(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm7
movsd -12744(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -408(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm7, %xmm1
movapd -10432(%rbp), %xmm0 ## 16-byte Reload
mulsd -5264(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -16816(%rbp), %xmm1 ## 16-byte Reload
mulsd -288(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -16832(%rbp), %xmm7 ## 16-byte Reload
mulsd -112(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm0, %xmm7
movapd -16800(%rbp), %xmm0 ## 16-byte Reload
mulsd -528(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm7
movsd -3464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5232(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm7
movsd -12752(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3840(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm7, %xmm1
movsd -3520(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3048(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movsd -12688(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -224(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd -2240(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
movsd -3544(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm15, %xmm7
addsd %xmm0, %xmm7
movsd -12640(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -152(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm7
movapd -8896(%rbp), %xmm0 ## 16-byte Reload
mulsd -7400(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -12664(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -8208(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -8944(%rbp), %xmm0 ## 16-byte Reload
mulsd -7392(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd -12624(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -6144(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -5336(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2728(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd -12760(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -352(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd %xmm7, -2976(%rbp) ## 8-byte Spill
movapd %xmm7, %xmm0
subsd %xmm1, %xmm0
movsd -1968(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2216(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd -12656(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -464(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movsd -7384(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3056(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm1, -8944(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movsd -12672(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -344(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd -2184(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd %xmm14, %xmm3
addsd %xmm0, %xmm3
movsd -12632(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -368(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
testq %rax, %rax
je LBB19_84
## %bb.83:
movapd -4160(%rbp), %xmm0 ## 16-byte Reload
mulsd -10000(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm3, %xmm1
subsd %xmm0, %xmm1
mulsd %xmm12, %xmm5
addsd %xmm1, %xmm5
movapd %xmm2, %xmm0
mulsd -10816(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm5, %xmm0
movsd -12376(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2320(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd %xmm0, 152(%rax)
LBB19_84:
movsd -6352(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5600(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -12584(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -3232(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm0, %xmm5
movsd -800(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2992(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm5
movsd -12568(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3872(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm5
movapd -336(%rbp), %xmm0 ## 16-byte Reload
mulsd -5616(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -2112(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -13728(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -3120(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
addsd %xmm1, %xmm0
movapd -1568(%rbp), %xmm1 ## 16-byte Reload
mulsd -13712(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -9736(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -832(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -12616(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2160(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -3440(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -672(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd -3296(%rbp), %xmm0 ## 16-byte Reload
mulsd -2176(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd -4224(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -4880(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -3296(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm5
movapd -7856(%rbp), %xmm0 ## 16-byte Reload
mulsd -3016(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm5
movsd -12600(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -376(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm5, %xmm1
movapd -7840(%rbp), %xmm0 ## 16-byte Reload
mulsd -5216(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -16768(%rbp), %xmm1 ## 16-byte Reload
mulsd -296(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -16784(%rbp), %xmm5 ## 16-byte Reload
mulsd -64(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm0, %xmm5
movapd -16752(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm13, %xmm0
subsd %xmm0, %xmm5
movsd -3448(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4240(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm5
movsd -9728(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3216(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm5, %xmm1
movapd -6320(%rbp), %xmm0 ## 16-byte Reload
mulsd -3024(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movsd -12592(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -232(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd -1744(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -3008(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm0, %xmm5
movsd -12560(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -160(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm5
movapd -6336(%rbp), %xmm0 ## 16-byte Reload
mulsd -2704(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -12608(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -216(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd %xmm5, -3456(%rbp) ## 8-byte Spill
movapd %xmm5, %xmm0
subsd %xmm1, %xmm0
movsd -3488(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2208(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd -12552(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -304(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd %xmm6, %xmm0
mulsd -3032(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm1, -3440(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movsd -12576(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2928(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd %xmm4, %xmm2
mulsd -1376(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm0, %xmm2
movapd -16736(%rbp), %xmm0 ## 16-byte Reload
mulsd -360(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
testq %rax, %rax
movsd -9632(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
je LBB19_86
## %bb.85:
movapd -4160(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm6, %xmm0
movapd %xmm2, %xmm1
subsd %xmm0, %xmm1
movsd -9560(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm12, %xmm5
addsd %xmm1, %xmm5
movsd -3536(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
addsd %xmm5, %xmm0
movsd -12368(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2320(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd %xmm0, 160(%rax)
LBB19_86:
movapd %xmm2, -8896(%rbp) ## 16-byte Spill
movsd -1632(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4688(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -12520(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -4896(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm0, %xmm4
movsd -1120(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4472(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm4
movsd -9904(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1720(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm4
movapd -432(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm9, %xmm0
movapd -2640(%rbp), %xmm1 ## 16-byte Reload
mulsd -13680(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -2000(%rbp), %xmm0 ## 16-byte Reload
mulsd -4480(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -2816(%rbp), %xmm1 ## 16-byte Reload
mulsd -8224(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -9720(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -640(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -12544(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1472(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -5520(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1232(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd -4576(%rbp), %xmm0 ## 16-byte Reload
mulsd -1456(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd -2720(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm10, %xmm0
movapd %xmm0, -4576(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm4
movsd -4944(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3040(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm4
movsd -7144(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -408(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm4, %xmm1
movapd -6304(%rbp), %xmm0 ## 16-byte Reload
mulsd -5264(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -10704(%rbp), %xmm1 ## 16-byte Reload
mulsd -288(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -16720(%rbp), %xmm4 ## 16-byte Reload
mulsd -112(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm0, %xmm4
movapd -10688(%rbp), %xmm0 ## 16-byte Reload
mulsd -528(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm4
movsd -2656(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5232(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm4
movsd -9712(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3840(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm4, %xmm1
movsd -2960(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3048(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movsd -12528(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -224(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd -1112(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm15, %xmm4
addsd %xmm0, %xmm4
movsd -9888(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -152(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm4
movsd -4544(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2728(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -12536(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -352(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd %xmm4, -3464(%rbp) ## 8-byte Spill
movapd %xmm4, %xmm0
subsd %xmm1, %xmm0
movsd -4960(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2216(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd -6992(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -464(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd -5488(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd -3056(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm1, -3448(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movsd -12512(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -344(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -5504(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm2
mulsd %xmm14, %xmm2
addsd %xmm0, %xmm2
movapd -10672(%rbp), %xmm0 ## 16-byte Reload
mulsd -368(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
testq %rax, %rax
je LBB19_88
## %bb.87:
movapd -4160(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm6, %xmm0
movapd %xmm2, %xmm1
subsd %xmm0, %xmm1
mulsd %xmm12, %xmm7
addsd %xmm1, %xmm7
movsd -3536(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
addsd %xmm7, %xmm0
mulsd -2320(%rbp), %xmm8 ## 16-byte Folded Reload
subsd %xmm8, %xmm0
movsd %xmm0, 168(%rax)
LBB19_88:
movapd %xmm2, -5520(%rbp) ## 16-byte Spill
movsd %xmm3, -2656(%rbp) ## 8-byte Spill
movapd -5600(%rbp), %xmm0 ## 16-byte Reload
mulsd -2192(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -4392(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -3232(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm0, %xmm4
movsd -2992(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1360(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm4
movsd -2648(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3872(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm4
movsd -5616(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3328(%rbp), %xmm1 ## 16-byte Folded Reload
movsd -2112(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -7536(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
mulsd -1760(%rbp), %xmm11 ## 16-byte Folded Reload
addsd %xmm0, %xmm11
movapd -1568(%rbp), %xmm0 ## 16-byte Reload
mulsd -7520(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm11
movsd -4352(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -832(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm11
movsd -4928(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2160(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm11, %xmm0
movsd -4344(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -672(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -3432(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2176(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -4224(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm1, %xmm0
movapd -4880(%rbp), %xmm10 ## 16-byte Reload
addsd %xmm0, %xmm10
movapd %xmm10, -4880(%rbp) ## 16-byte Spill
addsd %xmm10, %xmm4
movsd -3536(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movapd -4048(%rbp), %xmm0 ## 16-byte Reload
mulsd -3016(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm4
movsd -4400(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -376(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm4, %xmm1
movapd -3712(%rbp), %xmm0 ## 16-byte Reload
mulsd -5216(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -8304(%rbp), %xmm1 ## 16-byte Reload
mulsd -296(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -8592(%rbp), %xmm2 ## 16-byte Reload
mulsd -64(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movapd -8288(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm13, %xmm0
subsd %xmm0, %xmm2
movsd -2952(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4240(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movapd -3216(%rbp), %xmm1 ## 16-byte Reload
mulsd -1136(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
movapd -4624(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
mulsd -3024(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movsd -8544(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -232(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd -2464(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movapd %xmm4, %xmm2
movsd -3008(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm13, %xmm2
addsd %xmm0, %xmm2
movsd -8320(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -160(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd %xmm6, %xmm0
mulsd -2704(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -8432(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -216(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd %xmm2, -1136(%rbp) ## 8-byte Spill
movapd %xmm2, %xmm0
subsd %xmm1, %xmm0
movapd %xmm4, %xmm1
mulsd -2208(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movsd -4328(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -304(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd %xmm6, %xmm0
mulsd -3032(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm1, -3216(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movsd -4384(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -2928(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd %xmm4, %xmm3
movsd -1376(%rbp), %xmm8 ## 8-byte Reload
## xmm8 = mem[0],zero
mulsd %xmm8, %xmm3
addsd %xmm0, %xmm3
movsd -4336(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -360(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
testq %rax, %rax
je LBB19_90
## %bb.89:
movapd -4160(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm6, %xmm0
movapd %xmm3, %xmm1
subsd %xmm0, %xmm1
movsd -4320(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm12, %xmm2
addsd %xmm1, %xmm2
movapd %xmm14, %xmm0
mulsd %xmm4, %xmm0
addsd %xmm2, %xmm0
movsd -4312(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2320(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd %xmm0, 176(%rax)
LBB19_90:
movsd %xmm3, -2992(%rbp) ## 8-byte Spill
movapd %xmm12, %xmm11
movapd -4688(%rbp), %xmm0 ## 16-byte Reload
mulsd -2200(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -8800(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -4896(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movsd -4472(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4080(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movsd -5456(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1720(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
mulsd -2480(%rbp), %xmm9 ## 8-byte Folded Reload
movapd -2640(%rbp), %xmm0 ## 16-byte Reload
mulsd -6272(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm9, %xmm0
movsd -4480(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1776(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -2816(%rbp), %xmm0 ## 16-byte Reload
mulsd -7584(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movsd -8640(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -640(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movsd -6288(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1472(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd -4416(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1232(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -1624(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1456(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movsd -2720(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm1, %xmm0
movapd -5184(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm0, %xmm1
movapd %xmm1, -5184(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm2
movsd -1368(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3040(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movsd -8848(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -408(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
movsd -4064(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5264(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd -4408(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -288(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -8832(%rbp), %xmm2 ## 16-byte Reload
mulsd -112(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm0, %xmm2
movapd -8608(%rbp), %xmm0 ## 16-byte Reload
mulsd -528(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movsd -2968(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -5232(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movapd -3840(%rbp), %xmm1 ## 16-byte Reload
mulsd -3856(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
movapd -4656(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
mulsd -3048(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movsd -8816(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -224(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -3312(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm2
mulsd %xmm15, %xmm2
addsd %xmm0, %xmm2
movapd -8576(%rbp), %xmm0 ## 16-byte Reload
mulsd -152(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movapd %xmm4, %xmm0
mulsd -2728(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -8784(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -352(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd %xmm2, -3856(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm0
subsd %xmm1, %xmm0
movapd %xmm3, %xmm1
mulsd -2216(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm0, %xmm1
movapd -8560(%rbp), %xmm0 ## 16-byte Reload
mulsd -464(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd %xmm4, %xmm0
mulsd -3056(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm1, -3840(%rbp) ## 16-byte Spill
subsd %xmm0, %xmm1
movsd -4424(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -344(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd %xmm3, %xmm2
mulsd -2224(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm0, %xmm2
movapd -5440(%rbp), %xmm0 ## 16-byte Reload
mulsd -368(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
testq %rax, %rax
movapd -2256(%rbp), %xmm6 ## 16-byte Reload
movapd -2496(%rbp), %xmm7 ## 16-byte Reload
movsd -320(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movapd -992(%rbp), %xmm10 ## 16-byte Reload
movapd -2320(%rbp), %xmm9 ## 16-byte Reload
je LBB19_92
## %bb.91:
movapd -4160(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm4, %xmm0
movapd %xmm2, %xmm1
subsd %xmm0, %xmm1
movsd -4376(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm11, %xmm4
addsd %xmm1, %xmm4
movapd %xmm14, %xmm0
mulsd %xmm3, %xmm0
addsd %xmm4, %xmm0
movsd -4368(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm0
movsd %xmm0, 184(%rax)
LBB19_92:
movapd %xmm2, -4224(%rbp) ## 16-byte Spill
movapd -8768(%rbp), %xmm3 ## 16-byte Reload
mulsd -216(%rbp), %xmm3 ## 8-byte Folded Reload
movsd -2704(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm3
movapd -14272(%rbp), %xmm1 ## 16-byte Reload
mulsd -3408(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -9864(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -648(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd -5168(%rbp), %xmm0 ## 16-byte Reload
mulsd -1128(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -4912(%rbp), %xmm1 ## 16-byte Reload
mulsd -832(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movapd -2160(%rbp), %xmm1 ## 16-byte Reload
mulsd -936(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -3424(%rbp), %xmm1 ## 16-byte Reload
mulsd -672(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movapd -2176(%rbp), %xmm1 ## 16-byte Reload
mulsd -1144(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movsd -1688(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -8080(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -2016(%rbp), %xmm0 ## 16-byte Reload
mulsd -3560(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movsd -3016(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm7, %xmm1
movapd %xmm0, -2016(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm0
movsd -376(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -7776(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm1
subsd %xmm1, %xmm0
movapd -5216(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm6, %xmm1
addsd %xmm0, %xmm1
movsd -296(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -5664(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm1
movapd -8752(%rbp), %xmm2 ## 16-byte Reload
mulsd -64(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movapd -5648(%rbp), %xmm0 ## 16-byte Reload
mulsd -208(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movsd -3024(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm7, %xmm0
subsd %xmm0, %xmm2
mulsd -232(%rbp), %xmm4 ## 8-byte Folded Reload
subsd %xmm4, %xmm2
mulsd %xmm6, %xmm13
addsd %xmm2, %xmm13
mulsd -160(%rbp), %xmm5 ## 8-byte Folded Reload
subsd %xmm5, %xmm13
addsd %xmm13, %xmm3
movsd -2208(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
addsd %xmm3, %xmm0
movapd -3680(%rbp), %xmm1 ## 16-byte Reload
mulsd -304(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movsd -3032(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm7, %xmm1
movsd %xmm0, -2208(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm0
movapd -8864(%rbp), %xmm1 ## 16-byte Reload
mulsd -2928(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
mulsd %xmm6, %xmm8
addsd %xmm0, %xmm8
movapd -3648(%rbp), %xmm0 ## 16-byte Reload
mulsd -360(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm8
testq %rax, %rax
je LBB19_94
## %bb.93:
movapd -4160(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
movapd %xmm8, %xmm1
subsd %xmm0, %xmm1
movsd -7664(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm11, %xmm0
subsd %xmm0, %xmm1
movapd %xmm14, %xmm0
mulsd %xmm6, %xmm0
addsd %xmm1, %xmm0
movsd -3472(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm0
movsd %xmm0, 192(%rax)
LBB19_94:
movapd -5760(%rbp), %xmm2 ## 16-byte Reload
mulsd -352(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -2728(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm10, %xmm0
subsd %xmm0, %xmm2
movapd -14288(%rbp), %xmm1 ## 16-byte Reload
mulsd -976(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -14352(%rbp), %xmm0 ## 16-byte Reload
mulsd -72(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd -6752(%rbp), %xmm0 ## 16-byte Reload
mulsd -536(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
movapd -3888(%rbp), %xmm1 ## 16-byte Reload
mulsd -640(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movapd -1472(%rbp), %xmm1 ## 16-byte Reload
mulsd -1408(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -2944(%rbp), %xmm1 ## 16-byte Reload
mulsd -1232(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movapd -1456(%rbp), %xmm1 ## 16-byte Reload
mulsd -1152(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -2512(%rbp), %xmm1 ## 16-byte Reload
mulsd -6800(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -960(%rbp), %xmm0 ## 16-byte Reload
mulsd -6816(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd -3040(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm10, %xmm1
movapd %xmm0, -960(%rbp) ## 16-byte Spill
subsd %xmm1, %xmm0
movsd -408(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -5776(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm0
movapd -5264(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm12, %xmm1
addsd %xmm0, %xmm1
movsd -288(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd -1384(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm1
movapd -5728(%rbp), %xmm0 ## 16-byte Reload
mulsd -112(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -4848(%rbp), %xmm1 ## 16-byte Reload
mulsd -528(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movsd -3048(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm10, %xmm1
subsd %xmm1, %xmm0
mulsd -224(%rbp), %xmm3 ## 8-byte Folded Reload
subsd %xmm3, %xmm0
mulsd %xmm12, %xmm15
addsd %xmm0, %xmm15
mulsd -152(%rbp), %xmm4 ## 8-byte Folded Reload
subsd %xmm4, %xmm15
addsd %xmm15, %xmm2
movsd -2216(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
addsd %xmm2, %xmm0
movsd -456(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -464(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movsd -3056(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm10, %xmm1
movsd %xmm0, -2216(%rbp) ## 8-byte Spill
subsd %xmm1, %xmm0
movapd -4768(%rbp), %xmm1 ## 16-byte Reload
mulsd -344(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movsd -2224(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm12, %xmm1
addsd %xmm0, %xmm1
movsd -1792(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -368(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
testq %rax, %rax
movsd -280(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
je LBB19_96
## %bb.95:
movapd -4160(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm10, %xmm3
movapd %xmm1, %xmm0
subsd %xmm3, %xmm0
mulsd %xmm11, %xmm2
subsd %xmm2, %xmm0
mulsd %xmm12, %xmm14
addsd %xmm0, %xmm14
movsd -6640(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm9, %xmm0
subsd %xmm0, %xmm14
movsd %xmm14, 200(%rax)
LBB19_96:
movsd %xmm15, -3544(%rbp) ## 8-byte Spill
movsd %xmm1, -2224(%rbp) ## 8-byte Spill
movapd -5248(%rbp), %xmm0 ## 16-byte Reload
mulsd -640(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -7448(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movsd -776(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -1232(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd -4704(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm1, %xmm0
movsd -9832(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -408(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm0, -4704(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm1
movapd -2800(%rbp), %xmm0 ## 16-byte Reload
mulsd -288(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd -5232(%rbp), %xmm0 ## 16-byte Reload
mulsd -2712(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movsd -7440(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm1, %xmm0
movsd -224(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movsd -5352(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm1
addsd %xmm0, %xmm1
movsd -152(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -2544(%rbp), %xmm12 ## 16-byte Reload
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm1
movsd -352(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
movsd %xmm1, -640(%rbp) ## 8-byte Spill
subsd %xmm0, %xmm1
movsd -464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm1
movsd -344(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm6, %xmm2
movsd %xmm1, -1472(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm2
movsd -368(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm12, %xmm0
subsd %xmm0, %xmm2
testq %rax, %rax
movsd %xmm2, -1456(%rbp) ## 8-byte Spill
je LBB19_97
## %bb.98:
movsd -3936(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd -4448(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm11, %xmm0
mulsd %xmm6, %xmm0
addsd %xmm2, %xmm0
movapd %xmm9, %xmm1
mulsd %xmm12, %xmm1
subsd %xmm1, %xmm0
movsd %xmm0, 208(%rax)
movsd %xmm3, -3936(%rbp) ## 8-byte Spill
movapd -6400(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm3, %xmm0
addsd -5288(%rbp), %xmm0 ## 8-byte Folded Reload
addsd -624(%rbp), %xmm0 ## 8-byte Folded Reload
movapd %xmm0, -6400(%rbp) ## 16-byte Spill
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
movsd %xmm1, -48(%rbp) ## 8-byte Spill
movapd %xmm1, %xmm0
subsd -1440(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -624(%rbp) ## 8-byte Spill
subsd -8624(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, 216(%rax)
jmp LBB19_99
LBB19_97:
movsd -3936(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -4448(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -3936(%rbp) ## 8-byte Spill
movapd -6400(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm0, %xmm1
addsd -5288(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -624(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -6400(%rbp) ## 16-byte Spill
movsd -48(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd %xmm1, %xmm0
movsd %xmm0, -48(%rbp) ## 8-byte Spill
subsd -1440(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -624(%rbp) ## 8-byte Spill
LBB19_99:
movsd -216(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -7984(%rbp), %xmm4 ## 16-byte Reload
movapd -7968(%rbp), %xmm7 ## 16-byte Reload
movapd -9920(%rbp), %xmm15 ## 16-byte Reload
movsd -760(%rbp), %xmm14 ## 8-byte Reload
## xmm14 = mem[0],zero
movsd -1840(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movsd %xmm13, -3008(%rbp) ## 8-byte Spill
movapd -5200(%rbp), %xmm0 ## 16-byte Reload
mulsd -832(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -7416(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
subsd %xmm0, %xmm3
movsd -768(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -672(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm3
movapd -4032(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm3, %xmm0
movsd -9824(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -376(%rbp), %xmm3 ## 8-byte Folded Reload
movapd %xmm0, -4032(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm3
movapd -1200(%rbp), %xmm0 ## 16-byte Reload
mulsd -296(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movapd -4240(%rbp), %xmm0 ## 16-byte Reload
mulsd -2696(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movsd -7408(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm3, %xmm0
movsd -232(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movsd -5344(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm3
addsd %xmm0, %xmm3
movsd -160(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -1296(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm3
movapd %xmm1, %xmm0
mulsd %xmm5, %xmm0
movsd %xmm3, -672(%rbp) ## 8-byte Spill
movapd %xmm3, %xmm1
subsd %xmm0, %xmm1
movsd -304(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm1
testq %rax, %rax
movsd %xmm1, -832(%rbp) ## 8-byte Spill
je LBB19_100
## %bb.101:
movapd %xmm9, %xmm3
movapd -2928(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm5, %xmm9
addsd %xmm1, %xmm9
movsd -360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm2, %xmm0
subsd %xmm0, %xmm9
mulsd %xmm5, %xmm11
addsd %xmm9, %xmm11
mulsd %xmm2, %xmm3
subsd %xmm3, %xmm11
movsd %xmm11, 224(%rax)
movsd -2984(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -4440(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -2984(%rbp) ## 8-byte Spill
movapd -6384(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm0, %xmm1
addsd -5280(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -1024(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -6384(%rbp) ## 16-byte Spill
movsd -560(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
subsd %xmm1, %xmm13
movsd %xmm13, -560(%rbp) ## 8-byte Spill
subsd -6256(%rbp), %xmm13 ## 8-byte Folded Reload
movapd %xmm13, %xmm0
subsd -8656(%rbp), %xmm0 ## 16-byte Folded Reload
movsd %xmm0, 232(%rax)
movapd -10256(%rbp), %xmm0 ## 16-byte Reload
subsd -12464(%rbp), %xmm0 ## 8-byte Folded Reload
unpcklpd -11120(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
unpcklpd -13824(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
addpd %xmm0, %xmm7
movupd %xmm7, 240(%rax)
movq $0, 256(%rax)
movsd -12424(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 264(%rax)
movsd -7040(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 272(%rax)
movsd -12384(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 280(%rax)
movsd -12360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 288(%rax)
movsd -1648(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 296(%rax)
movsd -1656(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 304(%rax)
movsd -5056(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 312(%rax)
movsd -784(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 320(%rax)
movsd %xmm12, 328(%rax)
movsd %xmm2, 336(%rax)
movapd -8016(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -10016(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd -9136(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -9984(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
addpd %xmm1, %xmm0
movupd %xmm0, 344(%rax)
movsd -7000(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -9608(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, 360(%rax)
movsd -9584(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 368(%rax)
movsd -9568(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 376(%rax)
movsd -9640(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 384(%rax)
movsd -4296(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 392(%rax)
movsd -928(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 400(%rax)
movsd %xmm14, 408(%rax)
movsd -1392(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 416(%rax)
movsd -1400(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 424(%rax)
movsd %xmm10, 432(%rax)
movsd -7888(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 440(%rax)
movsd -688(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 448(%rax)
movsd -7616(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 456(%rax)
movapd -9264(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -13904(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd -9184(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -13888(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
addpd %xmm0, %xmm1
movupd %xmm1, 464(%rax)
movapd -10208(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm0
movapd -10192(%rbp), %xmm3 ## 16-byte Reload
addsd %xmm3, %xmm0
movsd %xmm0, 480(%rax)
movsd -7056(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 488(%rax)
movsd -7048(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 496(%rax)
movsd -7032(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 504(%rax)
movsd -7024(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 512(%rax)
movsd -5424(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 520(%rax)
movsd -4528(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 528(%rax)
movapd -4128(%rbp), %xmm7 ## 16-byte Reload
unpcklpd -2288(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
movapd LCPI19_135(%rip), %xmm0 ## xmm0 = [-0.0E+0,-0.0E+0]
xorpd %xmm0, %xmm7
movupd %xmm7, 536(%rax)
movsd %xmm6, 552(%rax)
xorpd %xmm0, %xmm1
movlpd %xmm1, 560(%rax)
movsd %xmm5, 568(%rax)
xorpd %xmm0, %xmm3
movlpd %xmm3, 576(%rax)
movapd -10944(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -15504(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
unpcklpd -10864(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd -6128(%rbp), %xmm12 ## 16-byte Reload
movsd %xmm8, -1376(%rbp) ## 8-byte Spill
movddup %xmm12, %xmm10 ## xmm10 = xmm12[0,0]
movapd %xmm10, %xmm0
mulpd %xmm5, %xmm0
movapd -6112(%rbp), %xmm11 ## 16-byte Reload
movddup %xmm11, %xmm7 ## xmm7 = xmm11[0,0]
movapd %xmm7, %xmm3
mulpd %xmm4, %xmm3
subpd %xmm3, %xmm0
unpcklpd -15488(%rbp), %xmm15 ## 16-byte Folded Reload
## xmm15 = xmm15[0],mem[0]
movapd %xmm7, %xmm3
mulpd %xmm15, %xmm3
subpd %xmm3, %xmm0
movapd -10992(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -10848(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm10, %xmm3
mulpd %xmm1, %xmm3
addpd %xmm0, %xmm3
mulpd %xmm10, %xmm4
mulpd %xmm7, %xmm5
addpd %xmm5, %xmm4
mulpd %xmm10, %xmm15
addpd %xmm4, %xmm15
mulpd %xmm7, %xmm1
addpd %xmm15, %xmm1
movapd -3184(%rbp), %xmm2 ## 16-byte Reload
movddup %xmm2, %xmm8 ## xmm8 = xmm2[0,0]
mulpd %xmm8, %xmm3
movapd -2528(%rbp), %xmm5 ## 16-byte Reload
movddup %xmm5, %xmm0 ## xmm0 = xmm5[0,0]
mulpd %xmm0, %xmm1
subpd %xmm1, %xmm3
movupd %xmm3, 584(%rax)
movapd -13536(%rbp), %xmm14 ## 16-byte Reload
unpcklpd -14960(%rbp), %xmm14 ## 16-byte Folded Reload
## xmm14 = xmm14[0],mem[0]
movapd -10784(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -13760(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd %xmm10, %xmm3
mulpd %xmm14, %xmm3
movapd %xmm7, %xmm15
mulpd %xmm4, %xmm15
subpd %xmm15, %xmm3
movapd -15200(%rbp), %xmm15 ## 16-byte Reload
unpcklpd -14992(%rbp), %xmm15 ## 16-byte Folded Reload
## xmm15 = xmm15[0],mem[0]
movapd %xmm7, %xmm6
mulpd %xmm15, %xmm6
subpd %xmm6, %xmm3
movapd -9040(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -10128(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm10, %xmm6
mulpd %xmm1, %xmm6
addpd %xmm3, %xmm6
mulpd %xmm10, %xmm4
mulpd %xmm7, %xmm14
addpd %xmm14, %xmm4
mulpd %xmm10, %xmm15
addpd %xmm4, %xmm15
mulpd %xmm7, %xmm1
addpd %xmm15, %xmm1
mulpd %xmm8, %xmm6
mulpd %xmm0, %xmm1
subpd %xmm1, %xmm6
movupd %xmm6, 600(%rax)
movapd -14496(%rbp), %xmm14 ## 16-byte Reload
unpcklpd -7488(%rbp), %xmm14 ## 16-byte Folded Reload
## xmm14 = xmm14[0],mem[0]
movapd -13504(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -5408(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd %xmm10, %xmm3
mulpd %xmm14, %xmm3
movapd %xmm7, %xmm6
mulpd %xmm4, %xmm6
subpd %xmm6, %xmm3
movapd -14512(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -7504(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm7, %xmm6
mulpd %xmm1, %xmm6
subpd %xmm6, %xmm3
movapd -13520(%rbp), %xmm15 ## 16-byte Reload
unpcklpd -5392(%rbp), %xmm15 ## 16-byte Folded Reload
## xmm15 = xmm15[0],mem[0]
mulpd %xmm10, %xmm4
mulpd %xmm7, %xmm14
addpd %xmm14, %xmm4
mulpd %xmm10, %xmm1
addpd %xmm4, %xmm1
movapd %xmm10, %xmm6
mulpd %xmm15, %xmm6
movapd %xmm3, %xmm14
addpd %xmm6, %xmm14
subpd %xmm6, %xmm3
blendpd $1, %xmm14, %xmm3 ## xmm3 = xmm14[0],xmm3[1]
mulpd %xmm7, %xmm15
movapd %xmm1, %xmm4
addpd %xmm15, %xmm4
subpd %xmm15, %xmm1
blendpd $1, %xmm4, %xmm1 ## xmm1 = xmm4[0],xmm1[1]
mulpd %xmm8, %xmm3
mulpd %xmm0, %xmm1
subpd %xmm1, %xmm3
movupd %xmm3, 616(%rax)
movapd -3584(%rbp), %xmm15 ## 16-byte Reload
unpcklpd -7824(%rbp), %xmm15 ## 16-byte Folded Reload
## xmm15 = xmm15[0],mem[0]
movapd -4976(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -7792(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd %xmm10, %xmm3
mulpd %xmm15, %xmm3
movapd %xmm7, %xmm1
mulpd %xmm6, %xmm1
subpd %xmm1, %xmm3
movapd -4992(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -7872(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd %xmm7, %xmm1
mulpd %xmm4, %xmm1
subpd %xmm1, %xmm3
movapd -3360(%rbp), %xmm14 ## 16-byte Reload
unpcklpd -7760(%rbp), %xmm14 ## 16-byte Folded Reload
## xmm14 = xmm14[0],mem[0]
mulpd %xmm10, %xmm6
mulpd %xmm7, %xmm15
addpd %xmm15, %xmm6
mulpd %xmm10, %xmm4
addpd %xmm6, %xmm4
movapd %xmm10, %xmm1
mulpd %xmm14, %xmm1
movapd %xmm3, %xmm6
addpd %xmm1, %xmm6
subpd %xmm1, %xmm3
blendpd $1, %xmm6, %xmm3 ## xmm3 = xmm6[0],xmm3[1]
mulpd %xmm8, %xmm3
mulpd %xmm7, %xmm14
movapd %xmm4, %xmm1
addpd %xmm14, %xmm1
subpd %xmm14, %xmm4
blendpd $1, %xmm1, %xmm4 ## xmm4 = xmm1[0],xmm4[1]
mulpd %xmm0, %xmm4
subpd %xmm4, %xmm3
movupd %xmm3, 632(%rax)
movapd %xmm12, %xmm0
movapd -7680(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm0
movapd %xmm11, %xmm1
movapd -7744(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm1
subsd %xmm1, %xmm0
movapd %xmm11, %xmm1
movapd -7632(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm0
movapd %xmm12, %xmm1
movapd -7728(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
mulsd %xmm2, %xmm1
mulsd %xmm11, %xmm8
mulsd %xmm12, %xmm3
addsd %xmm8, %xmm3
mulsd %xmm12, %xmm6
addsd %xmm3, %xmm6
mulsd %xmm11, %xmm4
addsd %xmm6, %xmm4
mulsd %xmm5, %xmm4
subsd %xmm4, %xmm1
movsd %xmm1, 648(%rax)
movaps -8960(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 656(%rax)
movaps -11616(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 664(%rax)
movsd -4432(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 672(%rax)
movsd -8880(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 680(%rax)
movaps -8896(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 688(%rax)
movsd -2992(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 696(%rax)
movsd -1376(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 704(%rax)
movsd %xmm9, 712(%rax)
movsd %xmm13, 720(%rax)
movapd -10960(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -10832(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd -9088(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -10912(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd %xmm10, %xmm0
mulpd %xmm6, %xmm0
movapd %xmm7, %xmm1
mulpd %xmm4, %xmm1
subpd %xmm1, %xmm0
movapd -10976(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -9072(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd %xmm7, %xmm1
mulpd %xmm5, %xmm1
subpd %xmm1, %xmm0
movapd -6768(%rbp), %xmm3 ## 16-byte Reload
unpcklpd -10928(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd %xmm10, %xmm1
mulpd %xmm3, %xmm1
addpd %xmm0, %xmm1
mulpd %xmm10, %xmm4
mulpd %xmm7, %xmm6
addpd %xmm6, %xmm4
mulpd %xmm10, %xmm5
addpd %xmm4, %xmm5
mulpd %xmm7, %xmm3
addpd %xmm5, %xmm3
movapd -2880(%rbp), %xmm9 ## 16-byte Reload
movddup %xmm9, %xmm4 ## xmm4 = xmm9[0,0]
mulpd %xmm4, %xmm1
movapd -2784(%rbp), %xmm8 ## 16-byte Reload
movddup %xmm8, %xmm13 ## xmm13 = xmm8[0,0]
mulpd %xmm13, %xmm3
subpd %xmm3, %xmm1
movupd %xmm1, 728(%rax)
movapd -10752(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -14672(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd -10800(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -13664(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd %xmm10, %xmm1
mulpd %xmm0, %xmm1
movapd %xmm7, %xmm3
mulpd %xmm6, %xmm3
subpd %xmm3, %xmm1
movapd -10768(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -5040(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd %xmm7, %xmm3
mulpd %xmm5, %xmm3
subpd %xmm3, %xmm1
movapd -9056(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -6080(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd %xmm10, %xmm3
mulpd %xmm2, %xmm3
addpd %xmm1, %xmm3
mulpd %xmm10, %xmm6
mulpd %xmm7, %xmm0
addpd %xmm0, %xmm6
mulpd %xmm10, %xmm5
addpd %xmm6, %xmm5
mulpd %xmm7, %xmm2
addpd %xmm5, %xmm2
mulpd %xmm4, %xmm3
mulpd %xmm13, %xmm2
subpd %xmm2, %xmm3
movupd %xmm3, 744(%rax)
movapd -14384(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -7552(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd -10720(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -6208(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd %xmm10, %xmm3
mulpd %xmm6, %xmm3
movapd %xmm7, %xmm1
mulpd %xmm2, %xmm1
subpd %xmm1, %xmm3
movapd -14400(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -7568(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd %xmm7, %xmm1
mulpd %xmm5, %xmm1
subpd %xmm1, %xmm3
movapd -10736(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -8464(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
mulpd %xmm10, %xmm2
mulpd %xmm7, %xmm6
addpd %xmm6, %xmm2
mulpd %xmm10, %xmm5
addpd %xmm2, %xmm5
movapd %xmm10, %xmm1
mulpd %xmm0, %xmm1
movapd %xmm3, %xmm6
addpd %xmm1, %xmm6
subpd %xmm1, %xmm3
blendpd $1, %xmm6, %xmm3 ## xmm3 = xmm6[0],xmm3[1]
mulpd %xmm7, %xmm0
movapd %xmm5, %xmm1
addpd %xmm0, %xmm1
subpd %xmm0, %xmm5
blendpd $1, %xmm1, %xmm5 ## xmm5 = xmm1[0],xmm5[1]
mulpd %xmm4, %xmm3
mulpd %xmm13, %xmm5
subpd %xmm5, %xmm3
movupd %xmm3, 760(%rax)
movapd -3696(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -2352(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd -3104(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -6656(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd %xmm10, %xmm3
mulpd %xmm6, %xmm3
movapd %xmm7, %xmm1
mulpd %xmm2, %xmm1
subpd %xmm1, %xmm3
movapd -5792(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -2864(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd %xmm7, %xmm1
mulpd %xmm5, %xmm1
subpd %xmm1, %xmm3
movapd -4144(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -5024(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
mulpd %xmm10, %xmm2
mulpd %xmm7, %xmm6
addpd %xmm6, %xmm2
mulpd %xmm10, %xmm5
addpd %xmm2, %xmm5
mulpd %xmm0, %xmm10
mulpd %xmm7, %xmm0
movapd %xmm3, %xmm1
addpd %xmm10, %xmm1
subpd %xmm10, %xmm3
blendpd $1, %xmm1, %xmm3 ## xmm3 = xmm1[0],xmm3[1]
mulpd %xmm4, %xmm3
movapd %xmm5, %xmm1
addpd %xmm0, %xmm1
subpd %xmm0, %xmm5
blendpd $1, %xmm1, %xmm5 ## xmm5 = xmm1[0],xmm5[1]
mulpd %xmm13, %xmm5
subpd %xmm5, %xmm3
movupd %xmm3, 776(%rax)
movapd %xmm12, %xmm0
movapd -7936(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm0
movapd %xmm11, %xmm1
movapd -4000(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movapd %xmm11, %xmm1
movapd -7920(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm0
movapd %xmm12, %xmm1
movapd -7952(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
mulsd %xmm9, %xmm1
mulsd %xmm11, %xmm4
mulsd %xmm12, %xmm2
addsd %xmm4, %xmm2
mulsd %xmm5, %xmm12
addsd %xmm2, %xmm12
mulsd %xmm3, %xmm11
addsd %xmm12, %xmm11
mulsd %xmm8, %xmm11
subsd %xmm11, %xmm1
movsd -3544(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
movsd %xmm1, 792(%rax)
movapd -912(%rbp), %xmm5 ## 16-byte Reload
movapd -848(%rbp), %xmm8 ## 16-byte Reload
movapd -8048(%rbp), %xmm13 ## 16-byte Reload
movapd -10112(%rbp), %xmm0 ## 16-byte Reload
movsd -128(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
jmp LBB19_102
LBB19_100:
movsd -2984(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -4440(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, -2984(%rbp) ## 8-byte Spill
movapd -6384(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm0, %xmm1
addsd -5280(%rbp), %xmm1 ## 8-byte Folded Reload
addsd -1024(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -6384(%rbp) ## 16-byte Spill
movsd -560(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
subsd %xmm1, %xmm0
movsd %xmm0, -560(%rbp) ## 8-byte Spill
movapd -912(%rbp), %xmm5 ## 16-byte Reload
movapd -848(%rbp), %xmm8 ## 16-byte Reload
movapd -8048(%rbp), %xmm13 ## 16-byte Reload
movapd -10112(%rbp), %xmm0 ## 16-byte Reload
movsd -128(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
movsd -3544(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
LBB19_102:
movq (%r14), %rax
movapd -64(%rbp), %xmm3 ## 16-byte Reload
movddup %xmm3, %xmm1 ## xmm1 = xmm3[0,0]
movapd %xmm1, -1232(%rbp) ## 16-byte Spill
movapd -208(%rbp), %xmm4 ## 16-byte Reload
movddup %xmm4, %xmm1 ## xmm1 = xmm4[0,0]
movapd %xmm1, -688(%rbp) ## 16-byte Spill
testq %rax, %rax
je LBB19_104
## %bb.103:
movaps -7696(%rbp), %xmm1 ## 16-byte Reload
movsd %xmm1, 800(%rax)
movsd %xmm0, 808(%rax)
movsd %xmm2, 816(%rax)
movsd -2656(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 824(%rax)
movaps -5520(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 832(%rax)
movaps -4224(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 840(%rax)
movsd -2224(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 848(%rax)
movsd -1456(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 856(%rax)
movsd -624(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 864(%rax)
movapd -16112(%rbp), %xmm7 ## 16-byte Reload
unpcklpd -15632(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
movapd -16384(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -15792(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd -10304(%rbp), %xmm10 ## 16-byte Reload
movddup %xmm10, %xmm1 ## xmm1 = xmm10[0,0]
movapd %xmm1, %xmm2
mulpd %xmm7, %xmm2
movapd -10288(%rbp), %xmm12 ## 16-byte Reload
movddup %xmm12, %xmm14 ## xmm14 = xmm12[0,0]
movapd %xmm14, %xmm3
mulpd %xmm5, %xmm3
subpd %xmm3, %xmm2
movapd -16096(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -15648(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd %xmm14, %xmm3
mulpd %xmm0, %xmm3
subpd %xmm3, %xmm2
movapd -16352(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -15776(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd %xmm1, %xmm4
mulpd %xmm6, %xmm4
addpd %xmm2, %xmm4
mulpd %xmm1, %xmm5
mulpd %xmm14, %xmm7
addpd %xmm7, %xmm5
mulpd %xmm1, %xmm0
addpd %xmm5, %xmm0
mulpd %xmm14, %xmm6
addpd %xmm0, %xmm6
movddup %xmm8, %xmm3 ## xmm3 = xmm8[0,0]
mulpd %xmm3, %xmm4
movapd -2144(%rbp), %xmm9 ## 16-byte Reload
movddup %xmm9, %xmm15 ## xmm15 = xmm9[0,0]
mulpd %xmm15, %xmm6
subpd %xmm6, %xmm4
movupd %xmm4, 872(%rax)
movapd -15312(%rbp), %xmm7 ## 16-byte Reload
unpcklpd -15104(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
movapd -15456(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -15168(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd %xmm1, %xmm4
mulpd %xmm7, %xmm4
movapd %xmm14, %xmm5
mulpd %xmm6, %xmm5
subpd %xmm5, %xmm4
movapd -15296(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -15088(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd %xmm14, %xmm5
mulpd %xmm2, %xmm5
subpd %xmm5, %xmm4
movapd -11552(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -15184(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd %xmm1, %xmm5
mulpd %xmm0, %xmm5
addpd %xmm4, %xmm5
mulpd %xmm1, %xmm6
mulpd %xmm14, %xmm7
addpd %xmm7, %xmm6
mulpd %xmm1, %xmm2
addpd %xmm6, %xmm2
mulpd %xmm14, %xmm0
addpd %xmm2, %xmm0
mulpd %xmm3, %xmm5
mulpd %xmm15, %xmm0
subpd %xmm0, %xmm5
movupd %xmm5, 888(%rax)
movapd -14560(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -6176(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd -14640(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -8352(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd %xmm1, %xmm4
mulpd %xmm6, %xmm4
movapd %xmm14, %xmm5
mulpd %xmm2, %xmm5
subpd %xmm5, %xmm4
movapd -14576(%rbp), %xmm7 ## 16-byte Reload
unpcklpd -8240(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
movapd %xmm14, %xmm5
mulpd %xmm7, %xmm5
subpd %xmm5, %xmm4
movapd -14656(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -8336(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
mulpd %xmm1, %xmm2
mulpd %xmm14, %xmm6
addpd %xmm6, %xmm2
mulpd %xmm1, %xmm7
addpd %xmm2, %xmm7
movapd %xmm1, %xmm5
mulpd %xmm0, %xmm5
movapd %xmm4, %xmm6
addpd %xmm5, %xmm6
subpd %xmm5, %xmm4
blendpd $1, %xmm6, %xmm4 ## xmm4 = xmm6[0],xmm4[1]
mulpd %xmm14, %xmm0
movapd %xmm7, %xmm5
addpd %xmm0, %xmm5
subpd %xmm0, %xmm7
blendpd $1, %xmm5, %xmm7 ## xmm7 = xmm5[0],xmm7[1]
mulpd %xmm3, %xmm4
mulpd %xmm15, %xmm7
subpd %xmm7, %xmm4
movupd %xmm4, 904(%rax)
movapd -5008(%rbp), %xmm7 ## 16-byte Reload
unpcklpd -5712(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
movapd -8448(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -6576(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd %xmm1, %xmm4
mulpd %xmm7, %xmm4
movapd %xmm14, %xmm5
mulpd %xmm0, %xmm5
subpd %xmm5, %xmm4
movapd -3088(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -5696(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd %xmm14, %xmm5
mulpd %xmm6, %xmm5
subpd %xmm5, %xmm4
movapd -4736(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -6560(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd %xmm1, %xmm0
mulpd %xmm14, %xmm7
addpd %xmm7, %xmm0
mulpd %xmm1, %xmm6
addpd %xmm0, %xmm6
mulpd %xmm2, %xmm1
mulpd %xmm14, %xmm2
movapd %xmm4, %xmm0
addpd %xmm1, %xmm0
subpd %xmm1, %xmm4
blendpd $1, %xmm0, %xmm4 ## xmm4 = xmm0[0],xmm4[1]
mulpd %xmm3, %xmm4
movapd %xmm6, %xmm0
addpd %xmm2, %xmm0
subpd %xmm2, %xmm6
blendpd $1, %xmm0, %xmm6 ## xmm6 = xmm0[0],xmm6[1]
mulpd %xmm15, %xmm6
subpd %xmm6, %xmm4
movupd %xmm4, 920(%rax)
movapd %xmm10, %xmm0
movapd -4016(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm0
movapd %xmm12, %xmm1
movapd -3616(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movapd %xmm12, %xmm1
movapd -7808(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm0
movapd %xmm10, %xmm1
movapd -4640(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
mulsd %xmm8, %xmm1
mulsd %xmm12, %xmm4
mulsd %xmm10, %xmm2
addsd %xmm4, %xmm2
mulsd %xmm5, %xmm10
addsd %xmm2, %xmm10
mulsd %xmm3, %xmm12
addsd %xmm10, %xmm12
mulsd %xmm9, %xmm12
subsd %xmm12, %xmm1
movsd %xmm1, 936(%rax)
movaps -7712(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 944(%rax)
movsd -7096(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 952(%rax)
movsd -8928(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 960(%rax)
movsd -8912(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 968(%rax)
movsd -3440(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 976(%rax)
movsd -3216(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 984(%rax)
movsd -2208(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 992(%rax)
movsd -832(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1000(%rax)
movsd -560(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1008(%rax)
movapd -16224(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -15744(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd -8000(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -15920(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd -10368(%rbp), %xmm10 ## 16-byte Reload
movddup %xmm10, %xmm1 ## xmm1 = xmm10[0,0]
movapd %xmm1, %xmm2
mulpd %xmm6, %xmm2
movapd -10352(%rbp), %xmm14 ## 16-byte Reload
movddup %xmm14, %xmm0 ## xmm0 = xmm14[0,0]
movapd %xmm0, %xmm3
mulpd %xmm5, %xmm3
subpd %xmm3, %xmm2
movapd -16208(%rbp), %xmm7 ## 16-byte Reload
unpcklpd -15760(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
movapd %xmm0, %xmm3
mulpd %xmm7, %xmm3
subpd %xmm3, %xmm2
unpcklpd -15936(%rbp), %xmm13 ## 16-byte Folded Reload
## xmm13 = xmm13[0],mem[0]
movapd %xmm1, %xmm4
mulpd %xmm13, %xmm4
addpd %xmm2, %xmm4
mulpd %xmm1, %xmm5
mulpd %xmm0, %xmm6
addpd %xmm6, %xmm5
mulpd %xmm1, %xmm7
addpd %xmm5, %xmm7
mulpd %xmm0, %xmm13
addpd %xmm7, %xmm13
movapd -1216(%rbp), %xmm9 ## 16-byte Reload
movddup %xmm9, %xmm15 ## xmm15 = xmm9[0,0]
mulpd %xmm15, %xmm4
movapd -1936(%rbp), %xmm8 ## 16-byte Reload
movddup %xmm8, %xmm12 ## xmm12 = xmm8[0,0]
mulpd %xmm12, %xmm13
subpd %xmm13, %xmm4
movupd %xmm4, 1016(%rax)
movapd -15440(%rbp), %xmm7 ## 16-byte Reload
unpcklpd -14784(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
movapd -15600(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -14944(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd %xmm1, %xmm4
mulpd %xmm7, %xmm4
movapd %xmm0, %xmm5
mulpd %xmm6, %xmm5
subpd %xmm5, %xmm4
movapd -15424(%rbp), %xmm3 ## 16-byte Reload
unpcklpd -14768(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd %xmm0, %xmm5
mulpd %xmm3, %xmm5
subpd %xmm5, %xmm4
movapd -11648(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -14976(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd %xmm1, %xmm5
mulpd %xmm2, %xmm5
addpd %xmm4, %xmm5
mulpd %xmm1, %xmm6
mulpd %xmm0, %xmm7
addpd %xmm7, %xmm6
mulpd %xmm1, %xmm3
addpd %xmm6, %xmm3
mulpd %xmm0, %xmm2
addpd %xmm3, %xmm2
mulpd %xmm15, %xmm5
mulpd %xmm12, %xmm2
subpd %xmm2, %xmm5
movupd %xmm5, 1032(%rax)
movapd -14416(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -8496(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd -14480(%rbp), %xmm3 ## 16-byte Reload
unpcklpd -8688(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd %xmm1, %xmm4
mulpd %xmm6, %xmm4
movapd %xmm0, %xmm5
mulpd %xmm3, %xmm5
subpd %xmm5, %xmm4
movapd -14432(%rbp), %xmm7 ## 16-byte Reload
unpcklpd -8480(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
movapd %xmm0, %xmm5
mulpd %xmm7, %xmm5
subpd %xmm5, %xmm4
movapd -16496(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -8672(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd %xmm1, %xmm3
mulpd %xmm0, %xmm6
addpd %xmm6, %xmm3
mulpd %xmm1, %xmm7
addpd %xmm3, %xmm7
movapd %xmm1, %xmm5
mulpd %xmm2, %xmm5
movapd %xmm4, %xmm6
addpd %xmm5, %xmm6
subpd %xmm5, %xmm4
blendpd $1, %xmm6, %xmm4 ## xmm4 = xmm6[0],xmm4[1]
mulpd %xmm0, %xmm2
movapd %xmm7, %xmm5
addpd %xmm2, %xmm5
subpd %xmm2, %xmm7
blendpd $1, %xmm5, %xmm7 ## xmm7 = xmm5[0],xmm7[1]
mulpd %xmm15, %xmm4
mulpd %xmm12, %xmm7
subpd %xmm7, %xmm4
movupd %xmm4, 1048(%rax)
movapd -5104(%rbp), %xmm7 ## 16-byte Reload
unpcklpd -5856(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
movapd -7904(%rbp), %xmm3 ## 16-byte Reload
unpcklpd -5808(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd %xmm1, %xmm4
mulpd %xmm7, %xmm4
movapd %xmm0, %xmm5
mulpd %xmm3, %xmm5
subpd %xmm5, %xmm4
movapd -5120(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -5840(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd %xmm0, %xmm5
mulpd %xmm6, %xmm5
subpd %xmm5, %xmm4
movapd -3776(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -4112(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd %xmm1, %xmm3
mulpd %xmm0, %xmm7
addpd %xmm7, %xmm3
mulpd %xmm1, %xmm6
addpd %xmm3, %xmm6
mulpd %xmm2, %xmm1
mulpd %xmm0, %xmm2
movapd %xmm4, %xmm0
addpd %xmm1, %xmm0
subpd %xmm1, %xmm4
blendpd $1, %xmm0, %xmm4 ## xmm4 = xmm0[0],xmm4[1]
mulpd %xmm15, %xmm4
movapd %xmm6, %xmm0
addpd %xmm2, %xmm0
subpd %xmm2, %xmm6
blendpd $1, %xmm0, %xmm6 ## xmm6 = xmm0[0],xmm6[1]
mulpd %xmm12, %xmm6
subpd %xmm6, %xmm4
movupd %xmm4, 1064(%rax)
movapd %xmm10, %xmm0
movapd -6688(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm0
movapd %xmm14, %xmm1
movapd -4096(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
movapd %xmm14, %xmm1
movapd -6672(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm0
movapd %xmm10, %xmm1
movapd -4752(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
mulsd %xmm9, %xmm1
mulsd %xmm14, %xmm4
mulsd %xmm10, %xmm2
addsd %xmm4, %xmm2
mulsd %xmm5, %xmm10
addsd %xmm2, %xmm10
mulsd %xmm3, %xmm14
addsd %xmm10, %xmm14
mulsd %xmm8, %xmm14
subsd %xmm14, %xmm1
movsd %xmm1, 1080(%rax)
movaps -6720(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1088(%rax)
movsd -7112(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1096(%rax)
movsd -3496(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1104(%rax)
movsd -8944(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1112(%rax)
movsd -3448(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1120(%rax)
movaps -3840(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1128(%rax)
movsd -2216(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1136(%rax)
movsd -1472(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1144(%rax)
movsd -48(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1152(%rax)
movapd -16176(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -15712(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd -16160(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -16032(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd -16528(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm2
mulpd %xmm4, %xmm2
movapd -16512(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm0
mulpd %xmm5, %xmm0
subpd %xmm0, %xmm2
movapd -16432(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -15872(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm6, %xmm0
mulpd %xmm1, %xmm0
subpd %xmm0, %xmm2
movapd -16416(%rbp), %xmm3 ## 16-byte Reload
unpcklpd -15888(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd %xmm7, %xmm0
mulpd %xmm3, %xmm0
subpd %xmm0, %xmm2
mulpd %xmm6, %xmm5
mulpd %xmm7, %xmm4
addpd %xmm5, %xmm4
mulpd %xmm7, %xmm1
subpd %xmm1, %xmm4
mulpd %xmm6, %xmm3
addpd %xmm4, %xmm3
movapd -1616(%rbp), %xmm9 ## 16-byte Reload
movddup %xmm9, %xmm10 ## xmm10 = xmm9[0,0]
mulpd %xmm10, %xmm2
movapd -1088(%rbp), %xmm8 ## 16-byte Reload
movddup %xmm8, %xmm12 ## xmm12 = xmm8[0,0]
mulpd %xmm12, %xmm3
subpd %xmm3, %xmm2
movupd %xmm2, 1160(%rax)
movapd -15392(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -15136(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd -15376(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -15152(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm6, %xmm2
mulpd %xmm0, %xmm2
movapd %xmm7, %xmm3
mulpd %xmm1, %xmm3
subpd %xmm3, %xmm2
movapd -15584(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -15216(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd %xmm6, %xmm3
mulpd %xmm4, %xmm3
subpd %xmm3, %xmm2
movapd -15552(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -15248(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd %xmm7, %xmm3
mulpd %xmm5, %xmm3
subpd %xmm3, %xmm2
mulpd %xmm6, %xmm1
mulpd %xmm7, %xmm0
addpd %xmm1, %xmm0
mulpd %xmm7, %xmm4
subpd %xmm4, %xmm0
mulpd %xmm6, %xmm5
addpd %xmm0, %xmm5
mulpd %xmm10, %xmm2
mulpd %xmm12, %xmm5
subpd %xmm5, %xmm2
movupd %xmm2, 1176(%rax)
movapd -14624(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -6192(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd -14608(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -8272(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd %xmm6, %xmm2
mulpd %xmm4, %xmm2
movapd %xmm7, %xmm3
mulpd %xmm5, %xmm3
subpd %xmm3, %xmm2
movapd -14688(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -8384(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm6, %xmm3
mulpd %xmm1, %xmm3
subpd %xmm3, %xmm2
movapd -14720(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -8368(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd %xmm7, %xmm3
mulpd %xmm0, %xmm3
subpd %xmm3, %xmm2
mulpd %xmm6, %xmm5
mulpd %xmm7, %xmm4
addpd %xmm5, %xmm4
mulpd %xmm7, %xmm1
subpd %xmm1, %xmm4
mulpd %xmm6, %xmm0
addpd %xmm4, %xmm0
mulpd %xmm10, %xmm2
mulpd %xmm12, %xmm0
subpd %xmm0, %xmm2
movupd %xmm2, 1192(%rax)
movapd -6496(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -2304(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd -6512(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -2032(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd %xmm6, %xmm2
mulpd %xmm5, %xmm2
movapd %xmm7, %xmm3
mulpd %xmm4, %xmm3
subpd %xmm3, %xmm2
movapd -6224(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -11696(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm6, %xmm3
mulpd %xmm1, %xmm3
subpd %xmm3, %xmm2
movapd -5472(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -1808(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd %xmm7, %xmm3
mulpd %xmm0, %xmm3
subpd %xmm3, %xmm2
mulpd %xmm6, %xmm4
mulpd %xmm7, %xmm5
addpd %xmm4, %xmm5
mulpd %xmm7, %xmm1
subpd %xmm1, %xmm5
mulpd %xmm6, %xmm0
addpd %xmm5, %xmm0
mulpd %xmm10, %xmm2
mulpd %xmm12, %xmm0
subpd %xmm0, %xmm2
movupd %xmm2, 1208(%rax)
movapd -11968(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
movapd -5680(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm0
movapd -11952(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
movsd -2448(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm0
movapd %xmm2, %xmm1
movsd -4192(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm1
addsd %xmm0, %xmm1
movapd %xmm3, %xmm0
movapd -5744(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm1
mulsd %xmm9, %xmm1
mulsd %xmm2, %xmm6
mulsd %xmm3, %xmm7
addsd %xmm6, %xmm7
mulsd %xmm4, %xmm3
addsd %xmm7, %xmm3
mulsd %xmm5, %xmm2
addsd %xmm3, %xmm2
mulsd %xmm8, %xmm2
subsd %xmm2, %xmm1
movsd %xmm1, 1224(%rax)
movaps -1072(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1232(%rax)
movaps -11712(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1240(%rax)
movsd -3504(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1248(%rax)
movsd -3480(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1256(%rax)
movsd -3456(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1264(%rax)
movsd -1136(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1272(%rax)
movsd -3008(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1280(%rax)
movsd -672(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1288(%rax)
movaps LCPI19_135(%rip), %xmm8 ## xmm8 = [-0.0E+0,-0.0E+0]
movaps -6384(%rbp), %xmm0 ## 16-byte Reload
xorps %xmm8, %xmm0
movlps %xmm0, 1296(%rax)
movapd -16336(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -15824(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd -16320(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -15840(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd -16640(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm3
mulpd %xmm5, %xmm3
movapd -16624(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm1
mulpd %xmm6, %xmm1
subpd %xmm1, %xmm3
movapd -14144(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -16048(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd %xmm7, %xmm1
mulpd %xmm2, %xmm1
subpd %xmm1, %xmm3
movapd -11984(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -16080(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd %xmm0, %xmm1
mulpd %xmm4, %xmm1
subpd %xmm1, %xmm3
mulpd %xmm7, %xmm6
mulpd %xmm0, %xmm5
addpd %xmm6, %xmm5
mulpd %xmm0, %xmm2
subpd %xmm2, %xmm5
mulpd %xmm7, %xmm4
addpd %xmm5, %xmm4
movapd -1312(%rbp), %xmm10 ## 16-byte Reload
movddup %xmm10, %xmm12 ## xmm12 = xmm10[0,0]
mulpd %xmm12, %xmm3
movapd -1328(%rbp), %xmm9 ## 16-byte Reload
movddup %xmm9, %xmm13 ## xmm13 = xmm9[0,0]
mulpd %xmm13, %xmm4
subpd %xmm4, %xmm3
movupd %xmm3, 1304(%rax)
movapd -15536(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -14896(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd -15520(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -14912(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd %xmm7, %xmm3
mulpd %xmm1, %xmm3
movapd %xmm0, %xmm4
mulpd %xmm2, %xmm4
subpd %xmm4, %xmm3
movapd -15664(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -15056(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd %xmm7, %xmm4
mulpd %xmm6, %xmm4
subpd %xmm4, %xmm3
movapd -15680(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -15072(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd %xmm0, %xmm4
mulpd %xmm5, %xmm4
subpd %xmm4, %xmm3
mulpd %xmm7, %xmm2
mulpd %xmm0, %xmm1
addpd %xmm2, %xmm1
mulpd %xmm0, %xmm6
subpd %xmm6, %xmm1
mulpd %xmm7, %xmm5
addpd %xmm1, %xmm5
mulpd %xmm12, %xmm3
mulpd %xmm13, %xmm5
subpd %xmm5, %xmm3
movupd %xmm3, 1320(%rax)
movapd -14464(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -8512(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd -14448(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -8528(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd %xmm7, %xmm3
mulpd %xmm5, %xmm3
movapd %xmm0, %xmm4
mulpd %xmm6, %xmm4
subpd %xmm4, %xmm3
movapd -14544(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -8720(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd %xmm7, %xmm4
mulpd %xmm2, %xmm4
subpd %xmm4, %xmm3
movapd -14592(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -8704(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm0, %xmm4
mulpd %xmm1, %xmm4
subpd %xmm4, %xmm3
mulpd %xmm7, %xmm6
mulpd %xmm0, %xmm5
addpd %xmm6, %xmm5
mulpd %xmm0, %xmm2
subpd %xmm2, %xmm5
mulpd %xmm7, %xmm1
addpd %xmm5, %xmm1
mulpd %xmm12, %xmm3
mulpd %xmm13, %xmm1
subpd %xmm1, %xmm3
movupd %xmm3, 1336(%rax)
movapd -3152(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -5072(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
movapd -3168(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -5088(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd %xmm7, %xmm3
mulpd %xmm6, %xmm3
movapd %xmm0, %xmm4
mulpd %xmm5, %xmm4
subpd %xmm4, %xmm3
movapd -6592(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -9152(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd %xmm7, %xmm4
mulpd %xmm2, %xmm4
subpd %xmm4, %xmm3
movapd -6608(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -3760(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm0, %xmm4
mulpd %xmm1, %xmm4
subpd %xmm4, %xmm3
mulpd %xmm7, %xmm5
mulpd %xmm0, %xmm6
addpd %xmm5, %xmm6
mulpd %xmm0, %xmm2
subpd %xmm2, %xmm6
mulpd %xmm7, %xmm1
addpd %xmm6, %xmm1
mulpd %xmm12, %xmm3
mulpd %xmm13, %xmm1
subpd %xmm1, %xmm3
movupd %xmm3, 1352(%rax)
movapd -12032(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
movapd -5824(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm1
movapd -12016(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm2
movsd -4456(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm2
subsd %xmm2, %xmm1
movapd %xmm3, %xmm2
movapd -3744(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm2
addsd %xmm1, %xmm2
movapd %xmm4, %xmm1
movapd -3728(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm2
mulsd %xmm10, %xmm2
mulsd %xmm3, %xmm0
mulsd %xmm4, %xmm7
addsd %xmm0, %xmm7
mulsd %xmm5, %xmm4
addsd %xmm7, %xmm4
mulsd %xmm6, %xmm3
addsd %xmm4, %xmm3
mulsd %xmm9, %xmm3
subsd %xmm3, %xmm2
movsd %xmm2, 1368(%rax)
movaps -880(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1376(%rax)
movaps -11824(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1384(%rax)
movsd -6704(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1392(%rax)
movsd -2976(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1400(%rax)
movsd -3464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1408(%rax)
movaps -3856(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1416(%rax)
movsd %xmm11, 1424(%rax)
movsd -640(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1432(%rax)
movaps -6400(%rbp), %xmm0 ## 16-byte Reload
xorps %xmm8, %xmm0
movlps %xmm0, 1440(%rax)
movsd -7016(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd -12472(%rbp), %xmm1 ## 8-byte Folded Reload
movapd %xmm1, -48(%rbp) ## 16-byte Spill
movapd -864(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm1, %xmm0
movapd -576(%rbp), %xmm1 ## 16-byte Reload
mulsd -11280(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -736(%rbp), %xmm0 ## 16-byte Reload
movapd -14064(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm15, %xmm0
addsd %xmm1, %xmm0
movapd %xmm0, -672(%rbp) ## 16-byte Spill
movapd -5136(%rbp), %xmm1 ## 16-byte Reload
blendpd $1, -8976(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = mem[0],xmm1[1]
movapd LCPI19_136(%rip), %xmm0 ## xmm0 = <-9.7139985562876935E-5,u>
unpcklpd -11232(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
mulpd %xmm1, %xmm0
movapd %xmm0, -848(%rbp) ## 16-byte Spill
movapd -16256(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm1
unpcklpd -13616(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movddup -1344(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = mem[0,0]
movapd %xmm0, -640(%rbp) ## 16-byte Spill
mulpd %xmm0, %xmm1
movapd -16288(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm2
unpcklpd -10176(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd -2416(%rbp), %xmm2 ## 16-byte Folded Reload
subpd %xmm2, %xmm1
movapd -16448(%rbp), %xmm14 ## 16-byte Reload
unpcklpd -16000(%rbp), %xmm14 ## 16-byte Folded Reload
## xmm14 = xmm14[0],mem[0]
movapd -2592(%rbp), %xmm2 ## 16-byte Reload
mulpd %xmm14, %xmm2
subpd %xmm2, %xmm1
movapd -10272(%rbp), %xmm12 ## 16-byte Reload
unpcklpd -16064(%rbp), %xmm12 ## 16-byte Folded Reload
## xmm12 = xmm12[0],mem[0]
movddup -3280(%rbp), %xmm10 ## 16-byte Folded Reload
## xmm10 = mem[0,0]
movapd %xmm10, -128(%rbp) ## 16-byte Spill
mulpd %xmm12, %xmm10
addpd %xmm1, %xmm10
movapd -96(%rbp), %xmm11 ## 16-byte Reload
movapd -11920(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm11, %xmm0
movapd -9392(%rbp), %xmm1 ## 16-byte Reload
mulsd -5936(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -14016(%rbp), %xmm4 ## 16-byte Reload
mulsd -256(%rbp), %xmm4 ## 16-byte Folded Reload
subsd %xmm1, %xmm4
movapd -6832(%rbp), %xmm1 ## 16-byte Reload
mulsd -5952(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm4
movapd %xmm4, %xmm1
movsd LCPI19_48(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
movapd -896(%rbp), %xmm3 ## 16-byte Reload
movapd -14112(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm3
addsd %xmm1, %xmm3
movapd -5632(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm2
movapd -11904(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm13, %xmm2
addsd %xmm3, %xmm2
movapd -16368(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm3
unpcklpd -14000(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd -1904(%rbp), %xmm1 ## 16-byte Reload
unpcklpd %xmm11, %xmm1 ## xmm1 = xmm1[0],xmm11[0]
mulpd %xmm1, %xmm3
movsd LCPI19_47(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
mulsd -1888(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm4, %xmm7
movapd %xmm6, %xmm1
movapd -9248(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm1
addsd %xmm7, %xmm1
movapd -1488(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm13
movapd %xmm0, %xmm6
movsd -5320(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd %xmm13, %xmm0
mulsd -864(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm0, %xmm4
movsd LCPI19_14(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
addsd %xmm1, %xmm4
mulsd -1584(%rbp), %xmm9 ## 16-byte Folded Reload
addpd %xmm4, %xmm9
unpcklpd -10176(%rbp), %xmm8 ## 16-byte Folded Reload
## xmm8 = xmm8[0],mem[0]
movapd -2416(%rbp), %xmm1 ## 16-byte Reload
movapd -1344(%rbp), %xmm0 ## 16-byte Reload
unpcklpd %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[0]
mulpd %xmm1, %xmm8
unpcklpd -13616(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
mulpd %xmm0, %xmm5
addpd %xmm8, %xmm5
mulpd -128(%rbp), %xmm14 ## 16-byte Folded Reload
addpd %xmm5, %xmm14
mulpd -2592(%rbp), %xmm12 ## 16-byte Folded Reload
addpd %xmm14, %xmm12
movapd %xmm2, %xmm1
unpcklpd -14048(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
addpd %xmm3, %xmm2
subpd %xmm3, %xmm1
movsd LCPI19_107(%rip), %xmm0 ## xmm0 = mem[0],zero
movapd -8976(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm0, %xmm3
movapd %xmm3, -8976(%rbp) ## 16-byte Spill
addpd %xmm3, %xmm2
movapd LCPI19_137(%rip), %xmm7 ## xmm7 = <u,5.7710603797978145E-3>
mulpd %xmm1, %xmm7
blendpd $1, %xmm2, %xmm7 ## xmm7 = xmm2[0],xmm7[1]
movapd -48(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
unpcklpd -11232(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd %xmm6, %xmm4
mulsd %xmm0, %xmm4
movapd %xmm0, %xmm6
unpcklpd -11808(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd -11760(%rbp), %xmm0 ## 16-byte Reload
movapd -11280(%rbp), %xmm3 ## 16-byte Reload
unpcklpd %xmm0, %xmm3 ## xmm3 = xmm3[0],xmm0[0]
mulpd -1520(%rbp), %xmm3 ## 16-byte Folded Reload
addpd %xmm3, %xmm4
movapd -176(%rbp), %xmm3 ## 16-byte Reload
blendpd $2, -208(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[1]
movapd %xmm3, -176(%rbp) ## 16-byte Spill
movapd -11776(%rbp), %xmm13 ## 16-byte Reload
unpcklpd %xmm13, %xmm15 ## xmm15 = xmm15[0],xmm13[0]
mulpd %xmm3, %xmm15
addpd %xmm15, %xmm4
movsd LCPI19_57(%rip), %xmm5 ## xmm5 = mem[0],zero
unpcklpd %xmm0, %xmm5 ## xmm5 = xmm5[0],xmm0[0]
movapd %xmm4, %xmm3
movapd %xmm4, %xmm11
movapd -5632(%rbp), %xmm8 ## 16-byte Reload
unpcklpd %xmm8, %xmm3 ## xmm3 = xmm3[0],xmm8[0]
mulpd %xmm3, %xmm5
movapd %xmm5, %xmm14
movapd %xmm5, -560(%rbp) ## 16-byte Spill
movapd -672(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm0
movapd -1904(%rbp), %xmm3 ## 16-byte Reload
unpcklpd %xmm3, %xmm0 ## xmm0 = xmm0[0],xmm3[0]
movapd LCPI19_138(%rip), %xmm4 ## xmm4 = <-1.9205962859860238E-4,u>
movapd -15904(%rbp), %xmm5 ## 16-byte Reload
unpcklpd %xmm5, %xmm4 ## xmm4 = xmm4[0],xmm5[0]
mulpd %xmm0, %xmm4
movapd %xmm4, -832(%rbp) ## 16-byte Spill
mulpd -5152(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm2, -624(%rbp) ## 16-byte Spill
addpd %xmm2, %xmm7
addpd %xmm14, %xmm7
addpd %xmm4, %xmm7
movapd LCPI19_94(%rip), %xmm4 ## xmm4 = [2.7755575615628914E-17,2.7755575615628914E-17]
movapd %xmm7, %xmm0
mulpd %xmm4, %xmm0
subpd %xmm0, %xmm10
mulpd LCPI19_139(%rip), %xmm1
movapd -848(%rbp), %xmm0 ## 16-byte Reload
subpd %xmm0, %xmm9
addpd %xmm0, %xmm1
blendpd $1, %xmm9, %xmm1 ## xmm1 = xmm9[0],xmm1[1]
unpcklpd %xmm13, %xmm6 ## xmm6 = xmm6[0],xmm13[0]
movapd -5136(%rbp), %xmm0 ## 16-byte Reload
unpcklpd %xmm8, %xmm0 ## xmm0 = xmm0[0],xmm8[0]
mulpd %xmm0, %xmm6
movapd %xmm6, -48(%rbp) ## 16-byte Spill
movapd LCPI19_97(%rip), %xmm14 ## xmm14 = <-8.24218900360216E-4,u>
unpcklpd %xmm5, %xmm14 ## xmm14 = xmm14[0],xmm5[0]
movapd %xmm15, %xmm0
unpcklpd -1584(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
mulpd %xmm0, %xmm14
movapd %xmm14, -1088(%rbp) ## 16-byte Spill
addpd %xmm6, %xmm1
mulpd LCPI19_140(%rip), %xmm11
movapd %xmm11, -672(%rbp) ## 16-byte Spill
addpd %xmm11, %xmm1
addpd %xmm14, %xmm1
movapd LCPI19_141(%rip), %xmm2 ## xmm2 = [1.3877787807814457E-17,1.3877787807814457E-17]
movapd %xmm1, %xmm0
mulpd %xmm2, %xmm0
subpd %xmm0, %xmm10
mulpd %xmm2, %xmm7
subpd %xmm7, %xmm12
mulpd %xmm4, %xmm1
addpd %xmm12, %xmm1
mulpd -9936(%rbp), %xmm10 ## 16-byte Folded Reload
mulpd -9952(%rbp), %xmm1 ## 16-byte Folded Reload
subpd %xmm1, %xmm10
movupd %xmm10, 1448(%rax)
movapd -10880(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -15360(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd %xmm0, %xmm11
movapd %xmm0, -10880(%rbp) ## 16-byte Spill
movapd -15728(%rbp), %xmm15 ## 16-byte Reload
unpcklpd -15408(%rbp), %xmm15 ## 16-byte Folded Reload
## xmm15 = xmm15[0],mem[0]
movapd -256(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
unpcklpd %xmm3, %xmm0 ## xmm0 = xmm0[0],xmm3[0]
movapd -13872(%rbp), %xmm10 ## 16-byte Reload
unpcklpd -13584(%rbp), %xmm10 ## 16-byte Folded Reload
## xmm10 = xmm10[0],mem[0]
mulpd %xmm0, %xmm10
movapd %xmm8, %xmm12
movapd -11584(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm12
movapd -1488(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm0
movsd -7080(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm0, %xmm2
movapd %xmm8, %xmm13
movapd %xmm8, %xmm6
movapd -11600(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm13
movapd -864(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm0
addsd %xmm2, %xmm0
movsd -9672(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -96(%rbp), %xmm1 ## 16-byte Folded Reload
movsd -1056(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd -5936(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm1, %xmm5
movsd -9664(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
subsd %xmm5, %xmm1
movapd -1040(%rbp), %xmm5 ## 16-byte Reload
mulsd -5952(%rbp), %xmm5 ## 16-byte Folded Reload
subsd %xmm5, %xmm1
movapd %xmm1, %xmm5
movsd LCPI19_48(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm5
movapd -896(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm2
movsd -9680(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm2
addsd %xmm5, %xmm2
movapd %xmm6, %xmm7
movapd -11504(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm7
addsd %xmm2, %xmm7
movsd LCPI19_47(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm1
mulsd -1888(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm1, %xmm4
movapd %xmm6, %xmm2
movapd -3632(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm2
addsd %xmm4, %xmm2
mulsd %xmm3, %xmm5
movsd -5312(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
addsd %xmm5, %xmm4
mulsd %xmm8, %xmm1
addsd %xmm4, %xmm1
movsd LCPI19_14(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
addsd %xmm2, %xmm1
movapd -15616(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm5
unpcklpd -13552(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
mulpd -640(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -11632(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm2
unpcklpd -13568(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd -2416(%rbp), %xmm2 ## 16-byte Folded Reload
subpd %xmm2, %xmm5
movapd -2592(%rbp), %xmm2 ## 16-byte Reload
mulpd %xmm11, %xmm2
subpd %xmm2, %xmm5
movapd -128(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm2
mulpd %xmm15, %xmm2
addpd %xmm2, %xmm5
movapd -14080(%rbp), %xmm6 ## 16-byte Reload
unpcklpd %xmm7, %xmm6 ## xmm6 = xmm6[0],xmm7[0]
movapd %xmm10, %xmm7
subpd %xmm6, %xmm7
addpd %xmm10, %xmm6
movapd -10048(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm9
movapd %xmm9, -1024(%rbp) ## 16-byte Spill
movapd %xmm7, %xmm2
movsd LCPI19_48(%rip), %xmm3 ## xmm3 = mem[0],zero
mulpd %xmm3, %xmm2
movddup -8976(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = mem[0,0]
movapd %xmm3, -1328(%rbp) ## 16-byte Spill
addpd %xmm3, %xmm6
subpd %xmm9, %xmm2
movddup -624(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = mem[0,0]
movapd %xmm3, -1312(%rbp) ## 16-byte Spill
addpd %xmm3, %xmm6
blendpd $1, %xmm2, %xmm6 ## xmm6 = xmm2[0],xmm6[1]
unpcklpd -560(%rbp), %xmm12 ## 16-byte Folded Reload
## xmm12 = xmm12[0],mem[0]
addpd %xmm6, %xmm12
movapd -1904(%rbp), %xmm3 ## 16-byte Reload
movapd -10032(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm3
movapd %xmm3, -624(%rbp) ## 16-byte Spill
movapd %xmm12, %xmm2
subpd %xmm3, %xmm2
movddup -832(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = mem[0,0]
movapd %xmm3, -1216(%rbp) ## 16-byte Spill
addpd %xmm3, %xmm12
blendpd $1, %xmm2, %xmm12 ## xmm12 = xmm2[0],xmm12[1]
movapd %xmm12, %xmm2
movapd LCPI19_94(%rip), %xmm10 ## xmm10 = [2.7755575615628914E-17,2.7755575615628914E-17]
mulpd %xmm10, %xmm2
subpd %xmm2, %xmm5
movapd -1584(%rbp), %xmm6 ## 16-byte Reload
movapd -13584(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm6, %xmm3
movddup %xmm1, %xmm2 ## xmm2 = xmm1[0,0]
movddup %xmm3, %xmm3 ## xmm3 = xmm3[0,0]
addpd %xmm2, %xmm3
movsd LCPI19_47(%rip), %xmm1 ## xmm1 = mem[0],zero
mulpd %xmm1, %xmm7
blendpd $2, %xmm3, %xmm7 ## xmm7 = xmm7[0],xmm3[1]
mulsd -1888(%rbp), %xmm11 ## 16-byte Folded Reload
movapd %xmm11, -10048(%rbp) ## 16-byte Spill
unpcklpd -848(%rbp), %xmm11 ## 16-byte Folded Reload
## xmm11 = xmm11[0],mem[0]
subpd %xmm11, %xmm7
unpcklpd -48(%rbp), %xmm13 ## 16-byte Folded Reload
## xmm13 = xmm13[0],mem[0]
addpd %xmm7, %xmm13
mulsd LCPI19_14(%rip), %xmm0
unpcklpd -672(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
addpd %xmm13, %xmm0
mulsd %xmm6, %xmm9
movapd %xmm9, -10032(%rbp) ## 16-byte Spill
movapd %xmm0, %xmm1
subpd %xmm9, %xmm1
movddup -1088(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = mem[0,0]
movapd %xmm2, -1088(%rbp) ## 16-byte Spill
addpd %xmm2, %xmm0
blendpd $1, %xmm1, %xmm0 ## xmm0 = xmm1[0],xmm0[1]
movapd %xmm0, %xmm1
movapd LCPI19_141(%rip), %xmm2 ## xmm2 = [1.3877787807814457E-17,1.3877787807814457E-17]
mulpd %xmm2, %xmm1
subpd %xmm1, %xmm5
unpcklpd -13552(%rbp), %xmm8 ## 16-byte Folded Reload
## xmm8 = xmm8[0],mem[0]
unpcklpd -13568(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd -1344(%rbp), %xmm1 ## 16-byte Reload
mulpd %xmm1, %xmm8
shufpd $1, %xmm1, %xmm1 ## xmm1 = xmm1[1,0]
movapd %xmm1, -832(%rbp) ## 16-byte Spill
mulpd %xmm1, %xmm4
addpd %xmm8, %xmm4
movapd -10880(%rbp), %xmm1 ## 16-byte Reload
mulpd %xmm14, %xmm1
addpd %xmm1, %xmm4
movapd -2592(%rbp), %xmm9 ## 16-byte Reload
mulpd %xmm9, %xmm15
addpd %xmm15, %xmm4
mulpd %xmm2, %xmm12
subpd %xmm12, %xmm4
mulpd %xmm10, %xmm0
addpd %xmm4, %xmm0
mulpd -9936(%rbp), %xmm5 ## 16-byte Folded Reload
mulpd -9952(%rbp), %xmm0 ## 16-byte Folded Reload
subpd %xmm0, %xmm5
movupd %xmm5, 1464(%rax)
movapd -15024(%rbp), %xmm12 ## 16-byte Reload
unpcklpd -9216(%rbp), %xmm12 ## 16-byte Folded Reload
## xmm12 = xmm12[0],mem[0]
movapd -15040(%rbp), %xmm11 ## 16-byte Reload
unpcklpd -9200(%rbp), %xmm11 ## 16-byte Folded Reload
## xmm11 = xmm11[0],mem[0]
movapd -13712(%rbp), %xmm6 ## 16-byte Reload
unpcklpd -7520(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
mulpd -256(%rbp), %xmm6 ## 16-byte Folded Reload
movapd -13728(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -7536(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movddup -96(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = mem[0,0]
mulpd %xmm0, %xmm2
movapd -336(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -3328(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movddup -5936(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = mem[0,0]
mulpd %xmm0, %xmm1
addpd %xmm2, %xmm1
subpd %xmm1, %xmm6
movapd -3120(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -1760(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movddup -5952(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = mem[0,0]
mulpd %xmm0, %xmm1
subpd %xmm1, %xmm6
movapd -11440(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -4512(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd -14848(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -8256(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd -3920(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -4496(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movddup -5632(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = mem[0,0]
movapd %xmm3, %xmm13
mulpd %xmm0, %xmm13
movapd -864(%rbp), %xmm8 ## 16-byte Reload
mulpd %xmm0, %xmm8
movapd -16560(%rbp), %xmm5 ## 16-byte Reload
mulpd %xmm1, %xmm5
movapd -16576(%rbp), %xmm14 ## 16-byte Reload
mulpd %xmm1, %xmm14
movapd -13744(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -14528(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd -896(%rbp), %xmm0 ## 16-byte Reload
mulpd %xmm1, %xmm0
movapd LCPI19_142(%rip), %xmm2 ## xmm2 = [5.7710603797978145E-3,5.7710603797978145E-3]
mulpd %xmm6, %xmm2
addpd %xmm0, %xmm2
movddup -480(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = mem[0,0]
mulpd %xmm3, %xmm0
movapd %xmm0, -96(%rbp) ## 16-byte Spill
movapd -704(%rbp), %xmm15 ## 16-byte Reload
movddup %xmm15, %xmm0 ## xmm0 = xmm15[0,0]
mulpd %xmm3, %xmm0
movapd %xmm0, -256(%rbp) ## 16-byte Spill
mulpd %xmm4, %xmm3
addpd %xmm3, %xmm2
addpd %xmm5, %xmm2
movapd -1888(%rbp), %xmm3 ## 16-byte Reload
mulpd %xmm1, %xmm3
mulpd LCPI19_143(%rip), %xmm6
addpd %xmm6, %xmm3
addpd %xmm13, %xmm3
movapd -1488(%rbp), %xmm5 ## 16-byte Reload
mulpd %xmm4, %xmm5
movapd -11456(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -3904(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
addpd %xmm5, %xmm4
addpd %xmm8, %xmm4
movapd -14864(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm5
movapd -6432(%rbp), %xmm8 ## 16-byte Reload
unpcklpd %xmm8, %xmm5 ## xmm5 = xmm5[0],xmm8[0]
mulpd -640(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -14880(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm7
movapd -6416(%rbp), %xmm13 ## 16-byte Reload
unpcklpd %xmm13, %xmm7 ## xmm7 = xmm7[0],xmm13[0]
mulpd -2416(%rbp), %xmm7 ## 16-byte Folded Reload
subpd %xmm7, %xmm5
movapd %xmm9, %xmm7
mulpd %xmm12, %xmm7
subpd %xmm7, %xmm5
movapd -128(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm7
mulpd %xmm11, %xmm7
addpd %xmm7, %xmm5
addpd -1328(%rbp), %xmm2 ## 16-byte Folded Reload
addpd -1312(%rbp), %xmm2 ## 16-byte Folded Reload
movddup -560(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = mem[0,0]
addpd %xmm2, %xmm7
addpd -1216(%rbp), %xmm7 ## 16-byte Folded Reload
movapd %xmm7, %xmm2
mulpd %xmm10, %xmm2
subpd %xmm2, %xmm5
mulpd LCPI19_144(%rip), %xmm4
addpd %xmm3, %xmm4
addpd %xmm14, %xmm4
movddup -848(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = mem[0,0]
subpd %xmm2, %xmm4
movddup -48(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = mem[0,0]
addpd %xmm4, %xmm2
movddup -672(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = mem[0,0]
addpd %xmm2, %xmm3
addpd -1088(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, %xmm2
movapd LCPI19_141(%rip), %xmm4 ## xmm4 = [1.3877787807814457E-17,1.3877787807814457E-17]
mulpd %xmm4, %xmm2
subpd %xmm2, %xmm5
unpcklpd %xmm8, %xmm1 ## xmm1 = xmm1[0],xmm8[0]
unpcklpd %xmm13, %xmm6 ## xmm6 = xmm6[0],xmm13[0]
mulpd -1344(%rbp), %xmm1 ## 16-byte Folded Reload
mulpd -832(%rbp), %xmm6 ## 16-byte Folded Reload
addpd %xmm1, %xmm6
mulpd %xmm0, %xmm12
addpd %xmm12, %xmm6
mulpd %xmm9, %xmm11
addpd %xmm11, %xmm6
mulpd %xmm4, %xmm7
subpd %xmm7, %xmm6
mulpd %xmm10, %xmm3
addpd %xmm6, %xmm3
mulpd -9936(%rbp), %xmm5 ## 16-byte Folded Reload
mulpd -9952(%rbp), %xmm3 ## 16-byte Folded Reload
subpd %xmm3, %xmm5
movupd %xmm5, 1480(%rax)
movaps -3376(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -2384(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movaps %xmm0, -3376(%rbp) ## 16-byte Spill
movaps -4720(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -9168(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movaps %xmm0, -4720(%rbp) ## 16-byte Spill
movapd -896(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm2
movsd -2680(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd %xmm11, %xmm2
movapd -1904(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm3
movsd -1352(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm3
addsd %xmm2, %xmm3
movsd LCPI19_67(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd -64(%rbp), %xmm2 ## 16-byte Folded Reload
movapd %xmm2, %xmm4
movsd LCPI19_48(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm4
addsd %xmm3, %xmm4
movsd -9696(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3408(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -11936(%rbp), %xmm3 ## 16-byte Reload
mulsd -648(%rbp), %xmm3 ## 8-byte Folded Reload
subsd %xmm3, %xmm1
movapd %xmm1, %xmm3
mulsd %xmm0, %xmm3
addsd %xmm4, %xmm3
movsd -1672(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm0, %xmm6
addsd %xmm3, %xmm6
movapd -5632(%rbp), %xmm3 ## 16-byte Reload
movsd -1664(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm10, %xmm3
addsd %xmm6, %xmm3
movapd %xmm5, %xmm4
movapd -7648(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm4
addsd %xmm3, %xmm4
movapd -3392(%rbp), %xmm13 ## 16-byte Reload
movsd LCPI19_107(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm13, %xmm3
addsd %xmm4, %xmm3
movapd -144(%rbp), %xmm5 ## 16-byte Reload
mulsd -1128(%rbp), %xmm5 ## 8-byte Folded Reload
addsd -7016(%rbp), %xmm5 ## 8-byte Folded Reload
subsd -4672(%rbp), %xmm5 ## 8-byte Folded Reload
movapd -5152(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm5, %xmm14
addpd %xmm3, %xmm14
movapd -4560(%rbp), %xmm12 ## 16-byte Reload
movapd -736(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm12, %xmm9
unpcklpd %xmm15, %xmm12 ## xmm12 = xmm12[0],xmm15[0]
mulpd -176(%rbp), %xmm12 ## 16-byte Folded Reload
movapd -864(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm4
mulsd %xmm5, %xmm4
movapd -3664(%rbp), %xmm8 ## 16-byte Reload
movapd -576(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm8, %xmm3
addsd %xmm4, %xmm3
addsd %xmm9, %xmm3
movapd %xmm3, -576(%rbp) ## 16-byte Spill
movapd -1888(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm11
movapd -1584(%rbp), %xmm9 ## 16-byte Reload
mulsd %xmm9, %xmm7
addsd %xmm11, %xmm7
movsd LCPI19_47(%rip), %xmm11 ## xmm11 = mem[0],zero
mulsd %xmm11, %xmm2
addsd %xmm7, %xmm2
mulsd %xmm11, %xmm1
addsd %xmm2, %xmm1
mulsd %xmm4, %xmm0
addsd %xmm1, %xmm0
movapd -5632(%rbp), %xmm2 ## 16-byte Reload
movsd -1640(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm2
addsd %xmm0, %xmm2
movapd -1488(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm10
movsd -1680(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
addsd %xmm10, %xmm1
mulsd %xmm15, %xmm4
addsd %xmm1, %xmm4
mulsd LCPI19_14(%rip), %xmm4
addsd %xmm2, %xmm4
mulsd %xmm9, %xmm6
mulsd LCPI19_63(%rip), %xmm13
addsd %xmm4, %xmm6
addpd %xmm6, %xmm13
movapd %xmm13, -3392(%rbp) ## 16-byte Spill
movapd -6544(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm2
movapd -2064(%rbp), %xmm9 ## 16-byte Reload
unpcklpd %xmm9, %xmm2 ## xmm2 = xmm2[0],xmm9[0]
mulpd -640(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -6528(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm4
unpcklpd -1184(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd -2416(%rbp), %xmm15 ## 16-byte Reload
mulpd %xmm15, %xmm4
subpd %xmm4, %xmm2
movapd -2592(%rbp), %xmm13 ## 16-byte Reload
movapd %xmm13, %xmm4
movapd -3376(%rbp), %xmm1 ## 16-byte Reload
mulpd %xmm1, %xmm4
subpd %xmm4, %xmm2
movapd -128(%rbp), %xmm11 ## 16-byte Reload
mulpd %xmm11, %xmm1
movapd %xmm1, -3376(%rbp) ## 16-byte Spill
movapd -4720(%rbp), %xmm10 ## 16-byte Reload
mulpd %xmm10, %xmm11
addpd %xmm11, %xmm2
unpcklpd -480(%rbp), %xmm8 ## 16-byte Folded Reload
## xmm8 = xmm8[0],mem[0]
mulpd -1520(%rbp), %xmm8 ## 16-byte Folded Reload
movapd %xmm7, %xmm4
mulsd %xmm5, %xmm4
unpcklpd -192(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
addpd %xmm8, %xmm4
addpd %xmm12, %xmm4
movapd -5152(%rbp), %xmm3 ## 16-byte Reload
blendpd $1, %xmm4, %xmm3 ## xmm3 = xmm4[0],xmm3[1]
movapd -1872(%rbp), %xmm11 ## 16-byte Reload
movsd LCPI19_57(%rip), %xmm1 ## xmm1 = mem[0],zero
unpcklpd %xmm11, %xmm1 ## xmm1 = xmm1[0],xmm11[0]
mulpd %xmm3, %xmm1
movapd -96(%rbp), %xmm8 ## 16-byte Reload
blendpd $1, %xmm14, %xmm8 ## xmm8 = xmm14[0],xmm8[1]
addpd %xmm1, %xmm8
movapd -576(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm3
unpcklpd -1904(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd LCPI19_101(%rip), %xmm7 ## xmm7 = <1.9205962859860238E-4,u>
movapd -4208(%rbp), %xmm14 ## 16-byte Reload
unpcklpd %xmm14, %xmm7 ## xmm7 = xmm7[0],xmm14[0]
mulpd %xmm3, %xmm7
addsubpd %xmm7, %xmm8
movapd %xmm8, %xmm3
mulpd LCPI19_94(%rip), %xmm3
subpd %xmm3, %xmm2
unpcklpd %xmm11, %xmm5 ## xmm5 = xmm5[0],xmm11[0]
mulpd -5136(%rbp), %xmm5 ## 16-byte Folded Reload
movapd -256(%rbp), %xmm7 ## 16-byte Reload
blendpd $1, -3392(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = mem[0],xmm7[1]
addpd %xmm5, %xmm7
mulpd LCPI19_145(%rip), %xmm4
movapd %xmm7, %xmm3
subpd %xmm4, %xmm3
addpd %xmm7, %xmm4
unpcklpd -1584(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd %xmm1, %xmm5
movapd LCPI19_146(%rip), %xmm1 ## xmm1 = <8.24218900360216E-4,u>
unpcklpd %xmm14, %xmm1 ## xmm1 = xmm1[0],xmm14[0]
mulpd %xmm5, %xmm1
subpd %xmm1, %xmm3
addpd %xmm4, %xmm1
blendpd $1, %xmm3, %xmm1 ## xmm1 = xmm3[0],xmm1[1]
movapd %xmm1, %xmm3
movapd LCPI19_141(%rip), %xmm5 ## xmm5 = [1.3877787807814457E-17,1.3877787807814457E-17]
mulpd %xmm5, %xmm3
subpd %xmm3, %xmm2
mulpd -9936(%rbp), %xmm2 ## 16-byte Folded Reload
unpcklpd %xmm9, %xmm6 ## xmm6 = xmm6[0],xmm9[0]
unpcklpd -1184(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
mulpd -832(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -1344(%rbp), %xmm4 ## 16-byte Reload
mulpd %xmm4, %xmm6
addpd %xmm6, %xmm0
addpd -3376(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm13, %xmm3
mulpd %xmm13, %xmm10
addpd %xmm10, %xmm0
mulpd %xmm5, %xmm8
subpd %xmm8, %xmm0
mulpd LCPI19_94(%rip), %xmm1
addpd %xmm0, %xmm1
mulpd -9952(%rbp), %xmm1 ## 16-byte Folded Reload
subpd %xmm1, %xmm2
movupd %xmm2, 1496(%rax)
movapd %xmm4, %xmm0
movapd %xmm4, %xmm14
movsd -7064(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm0
movapd %xmm15, %xmm1
movsd -7072(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm13, %xmm1
subsd %xmm1, %xmm0
movapd %xmm3, %xmm1
movapd %xmm3, %xmm10
movapd -2368(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm8, %xmm1
subsd %xmm1, %xmm0
movapd -3280(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm1
movsd -1824(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm1
addsd %xmm0, %xmm1
movapd -5632(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm0
movsd -4464(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
addsd -1024(%rbp), %xmm0 ## 16-byte Folded Reload
addsd -624(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, %xmm2
movsd LCPI19_108(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm2
subsd %xmm2, %xmm1
movsd -7088(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm5, %xmm6
addsd -10048(%rbp), %xmm6 ## 16-byte Folded Reload
mulsd -1488(%rbp), %xmm4 ## 16-byte Folded Reload
movsd -1008(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
addsd %xmm4, %xmm2
mulsd -864(%rbp), %xmm5 ## 16-byte Folded Reload
addsd %xmm2, %xmm5
mulsd LCPI19_14(%rip), %xmm5
addsd %xmm6, %xmm5
addsd -10032(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm5, %xmm2
movsd LCPI19_15(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm2
subsd %xmm2, %xmm1
movapd -752(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm1, %xmm2
mulsd %xmm15, %xmm9
mulsd %xmm14, %xmm13
addsd %xmm9, %xmm13
movapd %xmm12, %xmm1
mulsd %xmm8, %xmm1
addsd %xmm13, %xmm1
mulsd %xmm10, %xmm7
addsd %xmm1, %xmm7
mulsd %xmm4, %xmm0
subsd %xmm0, %xmm7
mulsd %xmm3, %xmm5
addsd %xmm7, %xmm5
movapd -496(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm2
movsd %xmm2, 1512(%rax)
movsd -12480(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -208(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm6, %xmm0
movapd -9328(%rbp), %xmm2 ## 16-byte Reload
addsd %xmm0, %xmm2
movapd -16544(%rbp), %xmm3 ## 16-byte Reload
unpcklpd -16016(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd -14128(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
shufpd $1, -16192(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[1],mem[0]
movapd -11344(%rbp), %xmm1 ## 16-byte Reload
movapd -64(%rbp), %xmm5 ## 16-byte Reload
unpcklpd %xmm5, %xmm1 ## xmm1 = xmm1[0],xmm5[0]
mulpd %xmm1, %xmm0
unpcklpd -1536(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd -13648(%rbp), %xmm3 ## 16-byte Folded Reload
addpd %xmm3, %xmm2
addpd %xmm0, %xmm2
movapd -11328(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -11264(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
mulpd %xmm4, %xmm0
addsubpd %xmm0, %xmm2
movupd %xmm2, 1520(%rax)
movapd -15696(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -15568(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd -14160(%rbp), %xmm0 ## 16-byte Reload
mulpd %xmm1, %xmm0
movsd -12456(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm6, %xmm2
movapd -10224(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm2, %xmm1
movapd -2272(%rbp), %xmm3 ## 16-byte Reload
unpcklpd %xmm1, %xmm3 ## xmm3 = xmm3[0],xmm1[0]
addpd %xmm0, %xmm3
movapd %xmm5, %xmm0
unpcklpd -11216(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd -15856(%rbp), %xmm2 ## 16-byte Reload
blendpd $2, %xmm4, %xmm2 ## xmm2 = xmm2[0],xmm4[1]
mulpd %xmm0, %xmm2
addpd %xmm3, %xmm2
movapd -11312(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -11200(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movddup %xmm4, %xmm1 ## xmm1 = xmm4[0,0]
mulpd %xmm1, %xmm0
subpd %xmm0, %xmm2
movupd %xmm2, 1536(%rax)
movapd -14928(%rbp), %xmm3 ## 16-byte Reload
unpcklpd -6464(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd -15120(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -9296(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd -3296(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -4880(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd -688(%rbp), %xmm3 ## 16-byte Folded Reload
addpd %xmm3, %xmm2
mulpd -1232(%rbp), %xmm0 ## 16-byte Folded Reload
addpd %xmm0, %xmm2
movapd -11104(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -5552(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
mulpd -16480(%rbp), %xmm0 ## 16-byte Folded Reload
addpd %xmm2, %xmm0
movapd -11088(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -5536(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd %xmm1, %xmm2
subpd %xmm2, %xmm0
movupd %xmm0, 1552(%rax)
movapd -6096(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm6, %xmm0
movapd -2016(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm0, %xmm1
movapd -8736(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm5, %xmm2
addsd %xmm1, %xmm2
movapd %xmm4, %xmm0
shufpd $1, -2608(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[1],mem[0]
movapd -6032(%rbp), %xmm1 ## 16-byte Reload
unpcklpd %xmm6, %xmm1 ## xmm1 = xmm1[0],xmm6[0]
mulpd %xmm1, %xmm0
unpcklpd -4032(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
addpd %xmm0, %xmm2
unpcklpd -4272(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd -6016(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm5, %xmm1
movapd -912(%rbp), %xmm5 ## 16-byte Reload
unpcklpd %xmm1, %xmm0 ## xmm0 = xmm0[0],xmm1[0]
mulpd %xmm0, %xmm4
addsubpd %xmm4, %xmm2
movupd %xmm2, 1568(%rax)
movsd -7104(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
subsd -2984(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, 1584(%rax)
LBB19_104:
movsd LCPI19_62(%rip), %xmm6 ## xmm6 = mem[0],zero
movapd -9120(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm6
movsd -7128(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
subsd -13480(%rbp), %xmm10 ## 8-byte Folded Reload
movapd %xmm5, %xmm0
mulsd %xmm10, %xmm0
movapd -1248(%rbp), %xmm1 ## 16-byte Reload
movapd -14368(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm1
addsd %xmm0, %xmm1
movapd -608(%rbp), %xmm4 ## 16-byte Reload
movapd -8032(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm13, %xmm4
addsd %xmm1, %xmm4
movapd -1504(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm10, %xmm0
mulsd -1264(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm0, %xmm3
movsd LCPI19_57(%rip), %xmm12 ## xmm12 = mem[0],zero
mulsd %xmm4, %xmm12
mulsd -720(%rbp), %xmm13 ## 16-byte Folded Reload
addsd %xmm3, %xmm13
movapd %xmm13, %xmm8
movsd LCPI19_113(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm0, %xmm8
mulsd %xmm0, %xmm4
movapd %xmm4, -96(%rbp) ## 16-byte Spill
movapd -6864(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm10, %xmm15
mulsd LCPI19_63(%rip), %xmm2
mulsd -6880(%rbp), %xmm10 ## 16-byte Folded Reload
mulsd LCPI19_54(%rip), %xmm13
testq %rax, %rax
movapd %xmm13, -8032(%rbp) ## 16-byte Spill
movapd %xmm10, -192(%rbp) ## 16-byte Spill
movapd %xmm6, -48(%rbp) ## 16-byte Spill
je LBB19_106
## %bb.105:
movapd -2048(%rbp), %xmm0 ## 16-byte Reload
mulsd -13632(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -1104(%rbp), %xmm1 ## 16-byte Reload
movapd -16464(%rbp), %xmm11 ## 16-byte Reload
mulsd %xmm11, %xmm1
subsd %xmm1, %xmm0
movapd -2560(%rbp), %xmm1 ## 16-byte Reload
movsd -12504(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm0
movapd -3264(%rbp), %xmm1 ## 16-byte Reload
movapd -10416(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm1
addsd %xmm0, %xmm1
movapd -14096(%rbp), %xmm5 ## 16-byte Reload
mulsd -400(%rbp), %xmm5 ## 16-byte Folded Reload
movapd %xmm2, %xmm13
movapd -12048(%rbp), %xmm2 ## 16-byte Reload
mulsd -272(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -6160(%rbp), %xmm0 ## 16-byte Reload
mulsd -5968(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm2, %xmm0
subsd %xmm0, %xmm5
movapd -4256(%rbp), %xmm0 ## 16-byte Reload
mulsd -5984(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm5
movsd LCPI19_48(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm5, %xmm0
movapd -1920(%rbp), %xmm4 ## 16-byte Reload
movapd -14208(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm4
addsd %xmm0, %xmm4
movapd -14240(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm6
movapd -12000(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm10, %xmm6
addsd %xmm4, %xmm6
movapd -2432(%rbp), %xmm0 ## 16-byte Reload
movsd -12488(%rbp), %xmm7 ## 8-byte Reload
## xmm7 = mem[0],zero
mulsd %xmm7, %xmm0
addsd %xmm6, %xmm0
addsd -48(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm15, %xmm0
addsd %xmm12, %xmm0
addsd %xmm8, %xmm0
movapd %xmm0, %xmm6
movsd LCPI19_108(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm6
addsd %xmm1, %xmm6
mulsd LCPI19_47(%rip), %xmm5
mulsd -1600(%rbp), %xmm3 ## 16-byte Folded Reload
addsd %xmm5, %xmm3
movapd -912(%rbp), %xmm5 ## 16-byte Reload
movapd -9408(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm2
addsd %xmm3, %xmm2
mulsd %xmm5, %xmm10
movsd -5328(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
addsd %xmm10, %xmm3
movapd -192(%rbp), %xmm10 ## 16-byte Reload
mulsd -1504(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm3, %xmm1
mulsd LCPI19_14(%rip), %xmm1
addsd %xmm2, %xmm1
movapd %xmm13, %xmm2
movapd -8032(%rbp), %xmm13 ## 16-byte Reload
mulsd -2128(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm1, %xmm7
subsd %xmm2, %xmm7
addsd %xmm10, %xmm7
addsd -96(%rbp), %xmm7 ## 16-byte Folded Reload
addsd %xmm13, %xmm7
movapd %xmm7, %xmm1
movsd LCPI19_15(%rip), %xmm3 ## xmm3 = mem[0],zero
mulsd %xmm3, %xmm1
addsd %xmm6, %xmm1
movapd -512(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm1, %xmm4
movapd -13632(%rbp), %xmm1 ## 16-byte Reload
mulsd -1104(%rbp), %xmm1 ## 16-byte Folded Reload
mulsd -2048(%rbp), %xmm11 ## 16-byte Folded Reload
addsd %xmm1, %xmm11
mulsd -3264(%rbp), %xmm9 ## 16-byte Folded Reload
addsd %xmm11, %xmm9
mulsd -2560(%rbp), %xmm14 ## 16-byte Folded Reload
addsd %xmm9, %xmm14
mulsd %xmm3, %xmm0
addsd %xmm14, %xmm0
mulsd LCPI19_108(%rip), %xmm7
subsd %xmm7, %xmm0
movapd -448(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm4
movsd %xmm4, 1592(%rax)
LBB19_106:
movapd %xmm15, -576(%rbp) ## 16-byte Spill
movapd %xmm8, -752(%rbp) ## 16-byte Spill
movapd %xmm12, -496(%rbp) ## 16-byte Spill
movq (%r14), %rax
movapd -528(%rbp), %xmm0 ## 16-byte Reload
movddup %xmm0, %xmm8 ## xmm8 = xmm0[0,0]
movapd -112(%rbp), %xmm5 ## 16-byte Reload
movddup %xmm5, %xmm3 ## xmm3 = xmm5[0,0]
testq %rax, %rax
movapd %xmm3, -144(%rbp) ## 16-byte Spill
movapd %xmm8, -176(%rbp) ## 16-byte Spill
je LBB19_108
## %bb.107:
movapd -16144(%rbp), %xmm5 ## 16-byte Reload
movapd %xmm5, %xmm0
movapd -15808(%rbp), %xmm14 ## 16-byte Reload
unpcklpd %xmm14, %xmm0 ## xmm0 = xmm0[0],xmm14[0]
movapd -2048(%rbp), %xmm1 ## 16-byte Reload
mulpd %xmm1, %xmm0
movapd %xmm2, -9120(%rbp) ## 16-byte Spill
movapd %xmm1, %xmm2
movapd -11872(%rbp), %xmm3 ## 16-byte Reload
movapd %xmm3, %xmm1
movapd -11728(%rbp), %xmm9 ## 16-byte Reload
unpcklpd %xmm9, %xmm1 ## xmm1 = xmm1[0],xmm9[0]
movddup -1104(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = mem[0,0]
movapd %xmm4, -128(%rbp) ## 16-byte Spill
mulpd %xmm4, %xmm1
subpd %xmm1, %xmm0
movapd -16272(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -15952(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd -2560(%rbp), %xmm10 ## 16-byte Reload
movapd %xmm10, %xmm1
mulpd %xmm4, %xmm1
subpd %xmm1, %xmm0
movapd -16304(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -15968(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movddup -3264(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = mem[0,0]
movapd %xmm7, %xmm6
movapd %xmm7, %xmm15
movapd %xmm7, -256(%rbp) ## 16-byte Spill
mulpd %xmm1, %xmm6
addpd %xmm0, %xmm6
movapd -10080(%rbp), %xmm12 ## 16-byte Reload
unpcklpd -13984(%rbp), %xmm12 ## 16-byte Folded Reload
## xmm12 = xmm12[0],mem[0]
movapd -11856(%rbp), %xmm11 ## 16-byte Reload
unpcklpd -11680(%rbp), %xmm11 ## 16-byte Folded Reload
## xmm11 = xmm11[0],mem[0]
movapd -11008(%rbp), %xmm8 ## 16-byte Reload
unpcklpd -13936(%rbp), %xmm8 ## 16-byte Folded Reload
## xmm8 = xmm8[0],mem[0]
movapd -11840(%rbp), %xmm7 ## 16-byte Reload
unpcklpd -11664(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
unpcklpd %xmm9, %xmm5 ## xmm5 = xmm5[0],xmm9[0]
unpcklpd %xmm14, %xmm3 ## xmm3 = xmm3[0],xmm14[0]
movapd %xmm2, %xmm0
movapd -1104(%rbp), %xmm2 ## 16-byte Reload
unpcklpd %xmm2, %xmm0 ## xmm0 = xmm0[0],xmm2[0]
mulpd %xmm0, %xmm3
mulpd %xmm2, %xmm5
addpd %xmm5, %xmm3
mulpd %xmm15, %xmm4
addpd %xmm3, %xmm4
mulpd %xmm10, %xmm1
addpd %xmm4, %xmm1
movapd -400(%rbp), %xmm2 ## 16-byte Reload
movapd -13920(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm2, %xmm0
movapd -14192(%rbp), %xmm3 ## 16-byte Reload
unpcklpd %xmm0, %xmm3 ## xmm3 = xmm3[0],xmm0[0]
movapd -272(%rbp), %xmm10 ## 16-byte Reload
movapd -14032(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm10, %xmm0
unpcklpd -14176(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
subpd %xmm0, %xmm3
movapd -1920(%rbp), %xmm4 ## 16-byte Reload
mulpd %xmm12, %xmm4
movapd %xmm4, -864(%rbp) ## 16-byte Spill
movapd LCPI19_142(%rip), %xmm15 ## xmm15 = [5.7710603797978145E-3,5.7710603797978145E-3]
movapd %xmm3, %xmm9
mulpd %xmm15, %xmm9
movapd %xmm9, %xmm0
addpd %xmm4, %xmm0
subpd %xmm4, %xmm9
blendpd $1, %xmm0, %xmm9 ## xmm9 = xmm0[0],xmm9[1]
movapd -6368(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
movapd %xmm4, %xmm5
mulpd %xmm11, %xmm0
addpd %xmm0, %xmm9
movapd -11040(%rbp), %xmm0 ## 16-byte Reload
mulpd %xmm8, %xmm0
movapd %xmm0, -640(%rbp) ## 16-byte Spill
movapd %xmm9, %xmm4
addpd %xmm0, %xmm4
subpd %xmm0, %xmm9
blendpd $1, %xmm4, %xmm9 ## xmm9 = xmm4[0],xmm9[1]
movapd %xmm9, %xmm4
mulpd LCPI19_94(%rip), %xmm4
addpd %xmm6, %xmm4
movapd -16688(%rbp), %xmm14 ## 16-byte Reload
mulpd %xmm14, %xmm12
movapd %xmm12, -10080(%rbp) ## 16-byte Spill
mulpd LCPI19_143(%rip), %xmm3
movapd %xmm3, %xmm2
addpd %xmm12, %xmm2
subpd %xmm12, %xmm3
blendpd $1, %xmm2, %xmm3 ## xmm3 = xmm2[0],xmm3[1]
movapd %xmm5, %xmm2
mulpd %xmm7, %xmm2
addpd %xmm2, %xmm3
movapd -11888(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -11744(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd -912(%rbp), %xmm6 ## 16-byte Reload
mulpd %xmm6, %xmm11
addpd %xmm11, %xmm5
movapd -1504(%rbp), %xmm0 ## 16-byte Reload
mulpd %xmm0, %xmm7
movapd %xmm0, %xmm11
addpd %xmm7, %xmm5
mulpd LCPI19_144(%rip), %xmm5
addpd %xmm3, %xmm5
mulpd -11024(%rbp), %xmm8 ## 16-byte Folded Reload
movapd %xmm8, -11008(%rbp) ## 16-byte Spill
movapd %xmm5, %xmm2
addpd %xmm8, %xmm2
subpd %xmm8, %xmm5
blendpd $1, %xmm2, %xmm5 ## xmm5 = xmm2[0],xmm5[1]
movapd LCPI19_141(%rip), %xmm2 ## xmm2 = [1.3877787807814457E-17,1.3877787807814457E-17]
movapd %xmm5, %xmm0
mulpd %xmm2, %xmm0
addpd %xmm4, %xmm0
mulpd %xmm2, %xmm9
addpd %xmm1, %xmm9
mulpd LCPI19_94(%rip), %xmm5
subpd %xmm5, %xmm9
movddup -512(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = mem[0,0]
movapd %xmm1, -512(%rbp) ## 16-byte Spill
mulpd %xmm1, %xmm0
movddup -448(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = mem[0,0]
movapd %xmm1, -448(%rbp) ## 16-byte Spill
mulpd %xmm1, %xmm9
subpd %xmm9, %xmm0
movupd %xmm0, 1600(%rax)
movapd -15328(%rbp), %xmm13 ## 16-byte Reload
unpcklpd -14800(%rbp), %xmm13 ## 16-byte Folded Reload
## xmm13 = xmm13[0],mem[0]
movapd -15344(%rbp), %xmm12 ## 16-byte Reload
unpcklpd -14816(%rbp), %xmm12 ## 16-byte Folded Reload
## xmm12 = xmm12[0],mem[0]
movapd -13776(%rbp), %xmm3 ## 16-byte Reload
unpcklpd -8224(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd -13792(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -13680(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movddup %xmm10, %xmm0 ## xmm0 = xmm10[0,0]
mulpd %xmm1, %xmm0
movapd -4608(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -432(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movddup -5968(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = mem[0,0]
mulpd %xmm4, %xmm5
addpd %xmm0, %xmm5
mulpd -400(%rbp), %xmm3 ## 16-byte Folded Reload
subpd %xmm5, %xmm3
movapd -816(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -2000(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movddup -5984(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = mem[0,0]
mulpd %xmm0, %xmm4
subpd %xmm4, %xmm3
movapd -13808(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -13696(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd -11472(%rbp), %xmm5 ## 16-byte Reload
unpcklpd -11408(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
movapd -15280(%rbp), %xmm8 ## 16-byte Reload
unpcklpd -14752(%rbp), %xmm8 ## 16-byte Folded Reload
## xmm8 = xmm8[0],mem[0]
movapd -1920(%rbp), %xmm0 ## 16-byte Reload
mulpd %xmm1, %xmm0
mulpd %xmm14, %xmm1
movapd -5584(%rbp), %xmm4 ## 16-byte Reload
unpcklpd -2752(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
mulpd %xmm3, %xmm15
addpd %xmm0, %xmm15
movapd -6368(%rbp), %xmm14 ## 16-byte Reload
movapd %xmm14, %xmm0
mulpd %xmm5, %xmm0
addpd %xmm0, %xmm15
movapd -11040(%rbp), %xmm0 ## 16-byte Reload
mulpd %xmm8, %xmm0
addpd %xmm0, %xmm15
mulpd LCPI19_143(%rip), %xmm3
addpd %xmm1, %xmm3
movapd %xmm14, %xmm0
mulpd %xmm4, %xmm0
addpd %xmm0, %xmm3
movapd -11488(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -11424(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
mulpd %xmm6, %xmm5
addpd %xmm5, %xmm1
mulpd %xmm11, %xmm4
addpd %xmm4, %xmm1
movapd -15232(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
movapd -14704(%rbp), %xmm14 ## 16-byte Reload
unpcklpd %xmm14, %xmm0 ## xmm0 = xmm0[0],xmm14[0]
mulpd -2048(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -15264(%rbp), %xmm7 ## 16-byte Reload
movapd %xmm7, %xmm4
movapd -14736(%rbp), %xmm10 ## 16-byte Reload
unpcklpd %xmm10, %xmm4 ## xmm4 = xmm4[0],xmm10[0]
mulpd -128(%rbp), %xmm4 ## 16-byte Folded Reload
subpd %xmm4, %xmm0
movapd -2560(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm4
mulpd %xmm13, %xmm4
subpd %xmm4, %xmm0
movapd -256(%rbp), %xmm9 ## 16-byte Reload
movapd %xmm9, %xmm4
mulpd %xmm12, %xmm4
addpd %xmm4, %xmm0
movddup -48(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = mem[0,0]
addpd %xmm15, %xmm4
movddup -576(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = mem[0,0]
addpd %xmm4, %xmm5
movddup -496(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = mem[0,0]
addpd %xmm5, %xmm4
movddup -752(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = mem[0,0]
addpd %xmm4, %xmm6
movapd %xmm6, %xmm5
movapd LCPI19_94(%rip), %xmm15 ## xmm15 = [2.7755575615628914E-17,2.7755575615628914E-17]
mulpd %xmm15, %xmm5
addpd %xmm0, %xmm5
mulpd LCPI19_144(%rip), %xmm1
addpd %xmm3, %xmm1
mulpd -11024(%rbp), %xmm8 ## 16-byte Folded Reload
addpd %xmm8, %xmm1
movddup -9120(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = mem[0,0]
subpd %xmm0, %xmm1
movddup -192(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = mem[0,0]
addpd %xmm1, %xmm0
movddup -96(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = mem[0,0]
addpd %xmm0, %xmm4
movddup -8032(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = mem[0,0]
addpd %xmm4, %xmm0
movapd %xmm0, %xmm4
movapd LCPI19_141(%rip), %xmm3 ## xmm3 = [1.3877787807814457E-17,1.3877787807814457E-17]
mulpd %xmm3, %xmm4
addpd %xmm5, %xmm4
unpcklpd %xmm14, %xmm7 ## xmm7 = xmm7[0],xmm14[0]
unpcklpd %xmm10, %xmm2 ## xmm2 = xmm2[0],xmm10[0]
movapd -1104(%rbp), %xmm1 ## 16-byte Reload
movapd %xmm1, %xmm5
shufpd $1, %xmm1, %xmm5 ## xmm5 = xmm5[1],xmm1[0]
movapd %xmm5, -736(%rbp) ## 16-byte Spill
mulpd %xmm5, %xmm7
mulpd %xmm1, %xmm2
addpd %xmm7, %xmm2
mulpd %xmm9, %xmm13
addpd %xmm13, %xmm2
mulpd %xmm11, %xmm12
addpd %xmm12, %xmm2
mulpd %xmm3, %xmm6
addpd %xmm2, %xmm6
mulpd %xmm15, %xmm0
subpd %xmm0, %xmm6
mulpd -512(%rbp), %xmm4 ## 16-byte Folded Reload
mulpd -448(%rbp), %xmm6 ## 16-byte Folded Reload
subpd %xmm6, %xmm4
movupd %xmm4, 1616(%rax)
movaps -5920(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -2848(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movaps %xmm0, -5920(%rbp) ## 16-byte Spill
movaps -5904(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -6624(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movaps %xmm0, -5904(%rbp) ## 16-byte Spill
movsd -7584(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -400(%rbp), %xmm1 ## 16-byte Folded Reload
movsd -6272(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -272(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -5968(%rbp), %xmm2 ## 16-byte Reload
mulsd -2480(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm0, %xmm2
subsd %xmm2, %xmm1
movapd -5984(%rbp), %xmm0 ## 16-byte Reload
mulsd -1776(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd %xmm1, %xmm0
movsd LCPI19_48(%rip), %xmm5 ## xmm5 = mem[0],zero
mulsd %xmm5, %xmm0
movapd -1920(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm10
movapd -2832(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm10
addsd %xmm0, %xmm10
movapd -3952(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -3792(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd %xmm0, %xmm8
movapd -6240(%rbp), %xmm3 ## 16-byte Reload
unpcklpd -4832(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movsd LCPI19_47(%rip), %xmm9 ## xmm9 = mem[0],zero
mulsd %xmm9, %xmm1
mulsd -1600(%rbp), %xmm4 ## 16-byte Folded Reload
addsd %xmm1, %xmm4
movapd %xmm4, -2832(%rbp) ## 16-byte Spill
movapd -5568(%rbp), %xmm12 ## 16-byte Reload
unpcklpd -4784(%rbp), %xmm12 ## 16-byte Folded Reload
## xmm12 = xmm12[0],mem[0]
movapd -11040(%rbp), %xmm7 ## 16-byte Reload
mulpd %xmm3, %xmm7
mulpd -11024(%rbp), %xmm3 ## 16-byte Folded Reload
movapd %xmm3, -6240(%rbp) ## 16-byte Spill
movapd %xmm2, %xmm1
movapd %xmm2, %xmm0
movsd -2688(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm15, %xmm0
movapd -2432(%rbp), %xmm4 ## 16-byte Reload
movsd -2672(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
mulsd %xmm13, %xmm4
addsd %xmm0, %xmm4
movsd LCPI19_67(%rip), %xmm6 ## xmm6 = mem[0],zero
mulsd -112(%rbp), %xmm6 ## 16-byte Folded Reload
movapd %xmm6, %xmm0
mulsd %xmm5, %xmm0
addsd %xmm4, %xmm0
movsd -9704(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
mulsd -976(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -12064(%rbp), %xmm4 ## 16-byte Reload
mulsd -72(%rbp), %xmm4 ## 8-byte Folded Reload
subsd %xmm4, %xmm3
mulsd %xmm3, %xmm5
addsd %xmm0, %xmm5
movapd %xmm2, %xmm0
movapd -3136(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm2, %xmm0
addsd %xmm5, %xmm0
unpcklpd %xmm0, %xmm10 ## xmm10 = xmm10[0],xmm0[0]
movapd -6368(%rbp), %xmm0 ## 16-byte Reload
mulpd %xmm8, %xmm0
movapd %xmm8, %xmm9
addpd %xmm0, %xmm10
addpd %xmm7, %xmm10
movapd -592(%rbp), %xmm4 ## 16-byte Reload
mulsd -536(%rbp), %xmm4 ## 8-byte Folded Reload
addsd -7128(%rbp), %xmm4 ## 8-byte Folded Reload
subsd -1712(%rbp), %xmm4 ## 8-byte Folded Reload
movapd -912(%rbp), %xmm11 ## 16-byte Reload
movapd %xmm11, %xmm0
mulsd %xmm4, %xmm0
movapd -1248(%rbp), %xmm5 ## 16-byte Reload
movapd -1168(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm5
addsd %xmm0, %xmm5
movapd -608(%rbp), %xmm7 ## 16-byte Reload
movapd -4800(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm7
addsd %xmm5, %xmm7
mulsd -1264(%rbp), %xmm14 ## 16-byte Folded Reload
movapd -1504(%rbp), %xmm8 ## 16-byte Reload
movapd %xmm8, %xmm0
mulsd %xmm4, %xmm0
addsd %xmm0, %xmm14
movapd -720(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm1, %xmm5
addsd %xmm14, %xmm5
movapd -1600(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm0, %xmm15
mulsd -2128(%rbp), %xmm13 ## 16-byte Folded Reload
addsd %xmm15, %xmm13
movsd LCPI19_47(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm6
addsd %xmm13, %xmm6
mulsd %xmm1, %xmm3
addsd %xmm6, %xmm3
mulsd %xmm0, %xmm2
addsd %xmm3, %xmm2
movapd -2832(%rbp), %xmm1 ## 16-byte Reload
unpcklpd %xmm2, %xmm1 ## xmm1 = xmm1[0],xmm2[0]
movapd -6368(%rbp), %xmm0 ## 16-byte Reload
mulpd %xmm12, %xmm0
addpd %xmm0, %xmm1
movapd %xmm1, -2832(%rbp) ## 16-byte Spill
movapd -3072(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -3824(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
mulpd %xmm11, %xmm9
addpd %xmm9, %xmm0
mulpd %xmm8, %xmm12
addpd %xmm12, %xmm0
movapd -6864(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm4, %xmm13
movapd -6880(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm4, %xmm2
movsd LCPI19_57(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm7, %xmm1
movapd %xmm5, %xmm11
movsd LCPI19_55(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm11
mulsd %xmm4, %xmm7
movapd -9232(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm4
movapd -2896(%rbp), %xmm9 ## 16-byte Reload
unpcklpd %xmm9, %xmm4 ## xmm4 = xmm4[0],xmm9[0]
mulpd -2048(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -6480(%rbp), %xmm15 ## 16-byte Reload
movapd %xmm15, %xmm6
unpcklpd -1424(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
mulpd -128(%rbp), %xmm6 ## 16-byte Folded Reload
subpd %xmm6, %xmm4
movapd -2560(%rbp), %xmm6 ## 16-byte Reload
mulpd -5920(%rbp), %xmm6 ## 16-byte Folded Reload
subpd %xmm6, %xmm4
movapd -256(%rbp), %xmm6 ## 16-byte Reload
mulpd -5904(%rbp), %xmm6 ## 16-byte Folded Reload
addpd %xmm6, %xmm4
movsd LCPI19_62(%rip), %xmm6 ## xmm6 = mem[0],zero
movapd -3808(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm3, %xmm6
movapd -48(%rbp), %xmm8 ## 16-byte Reload
unpcklpd %xmm6, %xmm8 ## xmm8 = xmm8[0],xmm6[0]
addpd %xmm10, %xmm8
movapd -576(%rbp), %xmm6 ## 16-byte Reload
unpcklpd %xmm13, %xmm6 ## xmm6 = xmm6[0],xmm13[0]
addpd %xmm8, %xmm6
movapd %xmm6, %xmm10
movapd -496(%rbp), %xmm6 ## 16-byte Reload
unpcklpd %xmm1, %xmm6 ## xmm6 = xmm6[0],xmm1[0]
addpd %xmm10, %xmm6
movddup %xmm11, %xmm10 ## xmm10 = xmm11[0,0]
movapd -752(%rbp), %xmm1 ## 16-byte Reload
addpd %xmm6, %xmm1
subpd %xmm10, %xmm6
blendpd $1, %xmm1, %xmm6 ## xmm6 = xmm1[0],xmm6[1]
movapd %xmm6, %xmm11
movapd LCPI19_94(%rip), %xmm8 ## xmm8 = [2.7755575615628914E-17,2.7755575615628914E-17]
mulpd %xmm8, %xmm11
addpd %xmm4, %xmm11
mulpd LCPI19_144(%rip), %xmm0
addpd -2832(%rbp), %xmm0 ## 16-byte Folded Reload
mulsd LCPI19_63(%rip), %xmm3
addpd -6240(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -9120(%rbp), %xmm1 ## 16-byte Reload
unpcklpd %xmm3, %xmm1 ## xmm1 = xmm1[0],xmm3[0]
subpd %xmm1, %xmm0
movapd -192(%rbp), %xmm1 ## 16-byte Reload
unpcklpd %xmm2, %xmm1 ## xmm1 = xmm1[0],xmm2[0]
addpd %xmm0, %xmm1
movddup %xmm7, %xmm0 ## xmm0 = xmm7[0,0]
movapd -96(%rbp), %xmm2 ## 16-byte Reload
addpd %xmm1, %xmm2
subpd %xmm0, %xmm1
mulsd LCPI19_147(%rip), %xmm5
movddup %xmm5, %xmm0 ## xmm0 = xmm5[0,0]
subpd %xmm0, %xmm1
addpd -8032(%rbp), %xmm2 ## 16-byte Folded Reload
blendpd $2, %xmm1, %xmm2 ## xmm2 = xmm2[0],xmm1[1]
movapd %xmm2, %xmm0
movapd LCPI19_141(%rip), %xmm1 ## xmm1 = [1.3877787807814457E-17,1.3877787807814457E-17]
mulpd %xmm1, %xmm0
addpd %xmm11, %xmm0
unpcklpd %xmm9, %xmm15 ## xmm15 = xmm15[0],xmm9[0]
unpcklpd -1424(%rbp), %xmm12 ## 16-byte Folded Reload
## xmm12 = xmm12[0],mem[0]
mulpd -736(%rbp), %xmm15 ## 16-byte Folded Reload
mulpd -1104(%rbp), %xmm12 ## 16-byte Folded Reload
addpd %xmm15, %xmm12
movapd -256(%rbp), %xmm5 ## 16-byte Reload
movapd -5920(%rbp), %xmm3 ## 16-byte Reload
mulpd %xmm5, %xmm3
addpd %xmm3, %xmm12
movapd -2560(%rbp), %xmm4 ## 16-byte Reload
movapd -5904(%rbp), %xmm3 ## 16-byte Reload
mulpd %xmm4, %xmm3
addpd %xmm3, %xmm12
mulpd %xmm1, %xmm6
addpd %xmm12, %xmm6
mulpd %xmm8, %xmm2
subpd %xmm2, %xmm6
mulpd -512(%rbp), %xmm0 ## 16-byte Folded Reload
mulpd -448(%rbp), %xmm6 ## 16-byte Folded Reload
subpd %xmm6, %xmm0
movupd %xmm0, 1632(%rax)
movapd -1856(%rbp), %xmm14 ## 16-byte Reload
unpcklpd -2576(%rbp), %xmm14 ## 16-byte Folded Reload
## xmm14 = xmm14[0],mem[0]
movapd -6448(%rbp), %xmm11 ## 16-byte Reload
unpcklpd -3248(%rbp), %xmm11 ## 16-byte Folded Reload
## xmm11 = xmm11[0],mem[0]
movapd -1280(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -10240(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movsd -1704(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -1920(%rbp), %xmm10 ## 16-byte Reload
mulsd %xmm0, %xmm10
movapd -1600(%rbp), %xmm13 ## 16-byte Reload
mulsd %xmm0, %xmm13
movapd -2336(%rbp), %xmm7 ## 16-byte Reload
unpcklpd -11792(%rbp), %xmm7 ## 16-byte Folded Reload
## xmm7 = xmm7[0],mem[0]
movapd -6368(%rbp), %xmm0 ## 16-byte Reload
movapd %xmm0, %xmm9
mulpd %xmm2, %xmm9
mulpd -912(%rbp), %xmm2 ## 16-byte Folded Reload
mulpd %xmm7, %xmm0
movapd %xmm0, -6368(%rbp) ## 16-byte Spill
mulpd -1504(%rbp), %xmm7 ## 16-byte Folded Reload
movsd -1696(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -2432(%rbp), %xmm8 ## 16-byte Reload
mulsd %xmm1, %xmm8
movapd -2128(%rbp), %xmm15 ## 16-byte Reload
mulsd %xmm1, %xmm15
movapd %xmm4, %xmm3
mulpd %xmm14, %xmm3
movapd %xmm5, %xmm1
mulpd %xmm5, %xmm14
mulpd %xmm11, %xmm1
mulpd %xmm4, %xmm11
movapd -4816(%rbp), %xmm6 ## 16-byte Reload
movapd %xmm6, %xmm4
unpcklpd -10144(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
mulpd -2048(%rbp), %xmm4 ## 16-byte Folded Reload
movapd -2912(%rbp), %xmm12 ## 16-byte Reload
movapd %xmm12, %xmm5
unpcklpd -10160(%rbp), %xmm5 ## 16-byte Folded Reload
## xmm5 = xmm5[0],mem[0]
mulpd -128(%rbp), %xmm5 ## 16-byte Folded Reload
subpd %xmm5, %xmm4
subpd %xmm3, %xmm4
addpd %xmm1, %xmm4
blendpd $2, -864(%rbp), %xmm10 ## 16-byte Folded Reload
## xmm10 = xmm10[0],mem[1]
movapd -112(%rbp), %xmm5 ## 16-byte Reload
addpd %xmm9, %xmm10
blendpd $2, -640(%rbp), %xmm8 ## 16-byte Folded Reload
## xmm8 = xmm8[0],mem[1]
addpd %xmm10, %xmm8
movapd %xmm8, %xmm0
movapd LCPI19_94(%rip), %xmm9 ## xmm9 = [2.7755575615628914E-17,2.7755575615628914E-17]
mulpd %xmm9, %xmm0
addpd %xmm4, %xmm0
blendpd $2, -10080(%rbp), %xmm13 ## 16-byte Folded Reload
## xmm13 = xmm13[0],mem[1]
addpd -6368(%rbp), %xmm13 ## 16-byte Folded Reload
movapd -1552(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -2400(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
addpd %xmm2, %xmm1
addpd %xmm7, %xmm1
mulpd LCPI19_144(%rip), %xmm1
addpd %xmm13, %xmm1
blendpd $2, -11008(%rbp), %xmm15 ## 16-byte Folded Reload
## xmm15 = xmm15[0],mem[1]
addpd %xmm1, %xmm15
movapd %xmm15, %xmm1
movapd LCPI19_141(%rip), %xmm2 ## xmm2 = [1.3877787807814457E-17,1.3877787807814457E-17]
mulpd %xmm2, %xmm1
addpd %xmm0, %xmm1
mulpd -512(%rbp), %xmm1 ## 16-byte Folded Reload
unpcklpd -10144(%rbp), %xmm12 ## 16-byte Folded Reload
## xmm12 = xmm12[0],mem[0]
mulpd -736(%rbp), %xmm12 ## 16-byte Folded Reload
unpcklpd -10160(%rbp), %xmm6 ## 16-byte Folded Reload
## xmm6 = xmm6[0],mem[0]
mulpd -1104(%rbp), %xmm6 ## 16-byte Folded Reload
addpd %xmm12, %xmm6
addpd %xmm14, %xmm6
addpd %xmm11, %xmm6
movapd %xmm8, %xmm0
mulpd %xmm2, %xmm0
addpd %xmm6, %xmm0
movapd -176(%rbp), %xmm8 ## 16-byte Reload
mulpd %xmm9, %xmm15
subpd %xmm15, %xmm0
mulpd -448(%rbp), %xmm0 ## 16-byte Folded Reload
subpd %xmm0, %xmm1
movupd %xmm1, 1648(%rax)
movsd -12496(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd -528(%rbp), %xmm7 ## 16-byte Reload
mulsd %xmm7, %xmm0
movapd -9456(%rbp), %xmm3 ## 16-byte Reload
addsd %xmm0, %xmm3
movapd -16672(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -16240(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd -16704(%rbp), %xmm2 ## 16-byte Reload
mulpd %xmm0, %xmm2
movapd -14224(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm4, %xmm0
shufpd $1, -16400(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[1],mem[0]
movapd -11376(%rbp), %xmm1 ## 16-byte Reload
unpcklpd %xmm5, %xmm1 ## xmm1 = xmm1[0],xmm5[0]
mulpd %xmm1, %xmm0
unpcklpd -2096(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
addpd %xmm2, %xmm3
addpd %xmm0, %xmm3
movapd -11360(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -11296(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
mulpd %xmm4, %xmm0
addsubpd %xmm0, %xmm3
movupd %xmm3, 1664(%rax)
movapd -15984(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -15472(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movsd -12448(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm7, %xmm2
movapd -4592(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm2, %xmm1
movapd -2080(%rbp), %xmm3 ## 16-byte Reload
unpcklpd %xmm1, %xmm3 ## xmm3 = xmm3[0],xmm1[0]
mulpd -11392(%rbp), %xmm0 ## 16-byte Folded Reload
addpd %xmm0, %xmm3
movapd %xmm5, %xmm0
unpcklpd -11184(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd -16128(%rbp), %xmm2 ## 16-byte Reload
blendpd $2, %xmm4, %xmm2 ## xmm2 = xmm2[0],xmm4[1]
mulpd %xmm0, %xmm2
addpd %xmm3, %xmm2
movapd -11248(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -11168(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movddup %xmm4, %xmm1 ## xmm1 = xmm4[0,0]
mulpd %xmm1, %xmm0
subpd %xmm0, %xmm2
movupd %xmm2, 1680(%rax)
movapd -14832(%rbp), %xmm3 ## 16-byte Reload
unpcklpd -9280(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd -15008(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -9424(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd -4576(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -5184(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd %xmm8, %xmm3
addpd %xmm3, %xmm2
movapd -144(%rbp), %xmm3 ## 16-byte Reload
mulpd %xmm3, %xmm0
addpd %xmm0, %xmm2
movapd -11072(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -3984(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
mulpd -16656(%rbp), %xmm0 ## 16-byte Folded Reload
addpd %xmm2, %xmm0
movapd -11056(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -3968(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd %xmm1, %xmm2
subpd %xmm2, %xmm0
movupd %xmm0, 1696(%rax)
movapd -8160(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm7, %xmm0
movapd -960(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm0, %xmm1
movapd -3344(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm5, %xmm2
addsd %xmm1, %xmm2
movapd %xmm4, %xmm0
shufpd $1, -2624(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[1],mem[0]
movapd -6064(%rbp), %xmm1 ## 16-byte Reload
unpcklpd %xmm7, %xmm1 ## xmm1 = xmm1[0],xmm7[0]
mulpd %xmm1, %xmm0
unpcklpd -4704(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
addpd %xmm0, %xmm2
unpcklpd -4288(%rbp), %xmm4 ## 16-byte Folded Reload
## xmm4 = xmm4[0],mem[0]
movapd -6048(%rbp), %xmm0 ## 16-byte Reload
unpcklpd %xmm5, %xmm0 ## xmm0 = xmm0[0],xmm5[0]
mulpd %xmm0, %xmm4
addsubpd %xmm4, %xmm2
movupd %xmm2, 1712(%rax)
movsd -7120(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm5, %xmm0
subsd -3936(%rbp), %xmm0 ## 8-byte Folded Reload
movsd %xmm0, 1728(%rax)
LBB19_108:
movapd -6944(%rbp), %xmm4 ## 16-byte Reload
movapd -8096(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
movapd -6000(%rbp), %xmm3 ## 16-byte Reload
movapd -9472(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm1
movapd -10560(%rbp), %xmm0 ## 16-byte Reload
mulsd -368(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd -4864(%rbp), %xmm8 ## 16-byte Reload
mulsd -344(%rbp), %xmm8 ## 8-byte Folded Reload
addsd %xmm1, %xmm8
movapd -6896(%rbp), %xmm1 ## 16-byte Reload
mulsd -6144(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -9536(%rbp), %xmm0 ## 16-byte Reload
mulsd -8208(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movsd -464(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
movapd -12352(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm13, %xmm0
subsd %xmm0, %xmm1
movapd -9552(%rbp), %xmm0 ## 16-byte Reload
mulsd -352(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd -6960(%rbp), %xmm2 ## 16-byte Reload
mulsd -1720(%rbp), %xmm2 ## 8-byte Folded Reload
movapd -6976(%rbp), %xmm0 ## 16-byte Reload
mulsd -4896(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm2, %xmm0
movapd -10608(%rbp), %xmm2 ## 16-byte Reload
mulsd -288(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm0
movapd -10624(%rbp), %xmm6 ## 16-byte Reload
mulsd -408(%rbp), %xmm6 ## 8-byte Folded Reload
addsd %xmm0, %xmm6
movapd -6160(%rbp), %xmm2 ## 16-byte Reload
mulsd -2816(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -4256(%rbp), %xmm0 ## 16-byte Reload
mulsd -2640(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm2, %xmm0
movapd -6752(%rbp), %xmm15 ## 16-byte Reload
movsd LCPI19_50(%rip), %xmm2 ## xmm2 = mem[0],zero
mulsd %xmm2, %xmm15
addsd %xmm15, %xmm0
movapd %xmm0, -4256(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm6
movapd -10576(%rbp), %xmm0 ## 16-byte Reload
mulsd -152(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm6
movapd -8176(%rbp), %xmm5 ## 16-byte Reload
mulsd -224(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm6, %xmm5
movapd -9312(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm4, %xmm6
movapd -12304(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm3, %xmm0
subsd %xmm0, %xmm6
movapd -12320(%rbp), %xmm0 ## 16-byte Reload
mulsd -360(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm6
movapd -2928(%rbp), %xmm0 ## 16-byte Reload
movapd -9344(%rbp), %xmm4 ## 16-byte Reload
movapd %xmm0, %xmm11
mulsd %xmm0, %xmm4
addsd %xmm6, %xmm4
movapd -6848(%rbp), %xmm3 ## 16-byte Reload
mulsd -4176(%rbp), %xmm3 ## 16-byte Folded Reload
movapd -9488(%rbp), %xmm0 ## 16-byte Reload
mulsd -5872(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm3
movapd -12336(%rbp), %xmm0 ## 16-byte Reload
mulsd -304(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movapd -9504(%rbp), %xmm0 ## 16-byte Reload
mulsd -216(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm3
movapd -6912(%rbp), %xmm6 ## 16-byte Reload
mulsd -3872(%rbp), %xmm6 ## 8-byte Folded Reload
movapd -6928(%rbp), %xmm0 ## 16-byte Reload
mulsd -3232(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm6, %xmm0
movapd -10528(%rbp), %xmm6 ## 16-byte Reload
mulsd -296(%rbp), %xmm6 ## 8-byte Folded Reload
subsd %xmm6, %xmm0
movapd -10544(%rbp), %xmm7 ## 16-byte Reload
mulsd -376(%rbp), %xmm7 ## 8-byte Folded Reload
addsd %xmm0, %xmm7
movapd -9392(%rbp), %xmm6 ## 16-byte Reload
mulsd -1568(%rbp), %xmm6 ## 16-byte Folded Reload
movapd -6832(%rbp), %xmm0 ## 16-byte Reload
mulsd -2112(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm6, %xmm0
movapd -5168(%rbp), %xmm6 ## 16-byte Reload
mulsd %xmm2, %xmm6
movapd %xmm6, -5168(%rbp) ## 16-byte Spill
addsd %xmm6, %xmm0
movapd %xmm0, -6832(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm7
movapd -10496(%rbp), %xmm0 ## 16-byte Reload
mulsd -160(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm7
movapd -8064(%rbp), %xmm2 ## 16-byte Reload
mulsd -232(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm7, %xmm2
movsd LCPI19_148(%rip), %xmm0 ## xmm0 = mem[0],zero
addsd %xmm0, %xmm5
addsd %xmm0, %xmm2
movsd LCPI19_149(%rip), %xmm0 ## xmm0 = mem[0],zero
addsd %xmm0, %xmm5
movapd %xmm5, -8176(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm1
addsd %xmm0, %xmm2
movsd LCPI19_150(%rip), %xmm0 ## xmm0 = mem[0],zero
addsd %xmm0, %xmm1
movapd %xmm1, -6896(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm8
movapd %xmm2, -8064(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm3
addsd %xmm0, %xmm3
movsd LCPI19_151(%rip), %xmm0 ## xmm0 = mem[0],zero
addsd %xmm0, %xmm8
movapd %xmm8, -4864(%rbp) ## 16-byte Spill
movapd %xmm3, -6848(%rbp) ## 16-byte Spill
addsd %xmm3, %xmm4
addsd %xmm0, %xmm4
testq %rax, %rax
movapd -2320(%rbp), %xmm8 ## 16-byte Reload
movsd -7136(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
je LBB19_110
## %bb.109:
movapd -5888(%rbp), %xmm0 ## 16-byte Reload
mulsd -8992(%rbp), %xmm0 ## 16-byte Folded Reload
movapd -6784(%rbp), %xmm2 ## 16-byte Reload
mulsd %xmm12, %xmm2
addsd %xmm0, %xmm2
movapd -6736(%rbp), %xmm0 ## 16-byte Reload
mulsd -9008(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm2
movsd -3000(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -5272(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
addsd LCPI19_152(%rip), %xmm1
movapd %xmm8, %xmm0
mulsd -9024(%rbp), %xmm0 ## 16-byte Folded Reload
subsd %xmm0, %xmm1
movapd -3200(%rbp), %xmm2 ## 16-byte Reload
movapd %xmm2, %xmm0
mulsd -10096(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd -4864(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm8, %xmm1
mulsd -9968(%rbp), %xmm1 ## 16-byte Folded Reload
subsd %xmm1, %xmm0
movapd %xmm2, %xmm1
mulsd -9104(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
addsd %xmm4, %xmm1
addsd LCPI19_153(%rip), %xmm1
movsd %xmm1, 1736(%rax)
LBB19_110:
movapd %xmm4, -9344(%rbp) ## 16-byte Spill
movsd -368(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_125(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
movapd -10400(%rbp), %xmm3 ## 16-byte Reload
subsd %xmm0, %xmm3
movapd %xmm13, %xmm0
movsd LCPI19_130(%rip), %xmm14 ## xmm14 = mem[0],zero
mulsd %xmm14, %xmm0
movapd -9440(%rbp), %xmm5 ## 16-byte Reload
subsd %xmm0, %xmm5
movsd -288(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_50(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movsd -3568(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
subsd %xmm0, %xmm2
addsd -8192(%rbp), %xmm2 ## 16-byte Folded Reload
movsd -152(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_129(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm0
subsd %xmm0, %xmm2
movsd -360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm4, %xmm0
movapd -10336(%rbp), %xmm7 ## 16-byte Reload
subsd %xmm0, %xmm7
movsd -304(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm14, %xmm0
movapd -9376(%rbp), %xmm4 ## 16-byte Reload
subsd %xmm0, %xmm4
movsd -296(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
movsd -3552(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
subsd %xmm0, %xmm1
addsd -8112(%rbp), %xmm1 ## 16-byte Folded Reload
movsd -160(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm13, %xmm0
subsd %xmm0, %xmm1
movsd -5304(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd LCPI19_128(%rip), %xmm6
movsd %xmm2, -3568(%rbp) ## 8-byte Spill
addsd %xmm2, %xmm5
movapd %xmm5, -9440(%rbp) ## 16-byte Spill
addsd %xmm5, %xmm3
movsd %xmm1, -3552(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm4
movapd %xmm4, -9376(%rbp) ## 16-byte Spill
addsd %xmm4, %xmm7
testq %rax, %rax
movsd -5296(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
je LBB19_112
## %bb.111:
movapd -16592(%rbp), %xmm1 ## 16-byte Reload
addsd %xmm6, %xmm1
subsd -8992(%rbp), %xmm1 ## 16-byte Folded Reload
movsd LCPI19_126(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd %xmm8, %xmm0
addsd %xmm0, %xmm1
addsd %xmm3, %xmm1
addsd %xmm0, %xmm1
addsd %xmm7, %xmm1
movsd %xmm1, 1744(%rax)
LBB19_112:
movapd %xmm7, -10336(%rbp) ## 16-byte Spill
movsd %xmm6, -5304(%rbp) ## 8-byte Spill
movapd %xmm3, -10400(%rbp) ## 16-byte Spill
movsd -344(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_125(%rip), %xmm4 ## xmm4 = mem[0],zero
mulsd %xmm4, %xmm0
movapd -10384(%rbp), %xmm14 ## 16-byte Reload
addsd %xmm0, %xmm14
movsd -352(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_130(%rip), %xmm7 ## xmm7 = mem[0],zero
mulsd %xmm7, %xmm0
movapd -8144(%rbp), %xmm2 ## 16-byte Reload
subsd %xmm0, %xmm2
movsd -408(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd LCPI19_50(%rip), %xmm1 ## xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movapd -10592(%rbp), %xmm5 ## 16-byte Reload
addsd %xmm0, %xmm5
addsd -9520(%rbp), %xmm5 ## 16-byte Folded Reload
movsd -224(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movsd LCPI19_129(%rip), %xmm13 ## xmm13 = mem[0],zero
mulsd %xmm13, %xmm3
addsd %xmm5, %xmm3
mulsd %xmm11, %xmm4
movapd -10320(%rbp), %xmm6 ## 16-byte Reload
addsd %xmm4, %xmm6
mulsd -216(%rbp), %xmm7 ## 8-byte Folded Reload
movapd -9360(%rbp), %xmm0 ## 16-byte Reload
subsd %xmm7, %xmm0
mulsd -376(%rbp), %xmm1 ## 8-byte Folded Reload
movapd -10512(%rbp), %xmm5 ## 16-byte Reload
addsd %xmm1, %xmm5
addsd -8128(%rbp), %xmm5 ## 16-byte Folded Reload
mulsd -232(%rbp), %xmm13 ## 8-byte Folded Reload
addsd %xmm5, %xmm13
mulsd LCPI19_120(%rip), %xmm10
movsd %xmm3, -96(%rbp) ## 8-byte Spill
addsd %xmm3, %xmm2
movapd %xmm2, -8144(%rbp) ## 16-byte Spill
addsd %xmm2, %xmm14
movsd %xmm13, -272(%rbp) ## 8-byte Spill
addsd %xmm13, %xmm0
movapd %xmm0, -9360(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm6
testq %rax, %rax
je LBB19_114
## %bb.113:
movapd -16608(%rbp), %xmm0 ## 16-byte Reload
addsd %xmm10, %xmm0
addsd %xmm0, %xmm12
movsd LCPI19_131(%rip), %xmm0 ## xmm0 = mem[0],zero
mulsd -3200(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm0, %xmm12
addsd %xmm14, %xmm12
addsd %xmm0, %xmm12
addsd %xmm6, %xmm12
movsd %xmm12, 1752(%rax)
LBB19_114:
movapd %xmm6, -10320(%rbp) ## 16-byte Spill
movsd %xmm10, -5296(%rbp) ## 8-byte Spill
movsd -2664(%rbp), %xmm5 ## 8-byte Reload
## xmm5 = mem[0],zero
mulsd %xmm11, %xmm5
movapd -12288(%rbp), %xmm0 ## 16-byte Reload
mulsd -360(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm5
movapd -4176(%rbp), %xmm0 ## 16-byte Reload
mulsd -7432(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -5872(%rbp), %xmm1 ## 16-byte Reload
mulsd -7424(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -10448(%rbp), %xmm1 ## 16-byte Reload
mulsd -304(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movsd -5368(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -3872(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -1984(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3232(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
movapd -10464(%rbp), %xmm2 ## 16-byte Reload
mulsd -296(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm1
movsd -5360(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd -376(%rbp), %xmm4 ## 8-byte Folded Reload
addsd %xmm1, %xmm4
movsd -1056(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -1568(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -1040(%rbp), %xmm1 ## 16-byte Reload
mulsd -2112(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm2, %xmm1
addsd -5168(%rbp), %xmm1 ## 16-byte Folded Reload
movapd %xmm1, -1040(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm4
movapd -10480(%rbp), %xmm1 ## 16-byte Reload
mulsd -160(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm4
movsd -920(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -232(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm4, %xmm1
movsd -2768(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -216(%rbp), %xmm2 ## 8-byte Folded Reload
addsd LCPI19_148(%rip), %xmm1
addsd LCPI19_149(%rip), %xmm1
subsd %xmm2, %xmm0
movsd %xmm1, -920(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm0
addsd LCPI19_150(%rip), %xmm0
movapd %xmm0, -4176(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm5
addsd LCPI19_151(%rip), %xmm5
testq %rax, %rax
movapd -10064(%rbp), %xmm2 ## 16-byte Reload
movsd -2184(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
movapd -10000(%rbp), %xmm6 ## 16-byte Reload
movsd -464(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
je LBB19_116
## %bb.115:
movapd -3200(%rbp), %xmm0 ## 16-byte Reload
mulsd -10896(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm8, %xmm1
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm0
addsd %xmm5, %xmm0
movsd %xmm0, 1760(%rax)
LBB19_116:
movsd %xmm5, -2664(%rbp) ## 8-byte Spill
mulsd -344(%rbp), %xmm10 ## 8-byte Folded Reload
movsd -7384(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -368(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm10
movapd -6144(%rbp), %xmm0 ## 16-byte Reload
mulsd -7400(%rbp), %xmm0 ## 8-byte Folded Reload
movapd -8208(%rbp), %xmm1 ## 16-byte Reload
mulsd -7392(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movsd -5336(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm12, %xmm1
subsd %xmm1, %xmm0
movsd -3528(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -1720(%rbp), %xmm2 ## 8-byte Folded Reload
movsd -1952(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -4896(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
movsd -3512(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -288(%rbp), %xmm2 ## 8-byte Folded Reload
subsd %xmm2, %xmm1
movapd -10432(%rbp), %xmm5 ## 16-byte Reload
mulsd -408(%rbp), %xmm5 ## 8-byte Folded Reload
addsd %xmm1, %xmm5
movapd -4608(%rbp), %xmm2 ## 16-byte Reload
mulsd -2816(%rbp), %xmm2 ## 16-byte Folded Reload
movapd -816(%rbp), %xmm1 ## 16-byte Reload
mulsd -2640(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm2, %xmm1
addsd %xmm15, %xmm1
movapd %xmm1, -816(%rbp) ## 16-byte Spill
addsd %xmm1, %xmm5
movsd -3520(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -152(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm5
movsd -2240(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -224(%rbp), %xmm1 ## 8-byte Folded Reload
addsd %xmm5, %xmm1
movsd -1968(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -352(%rbp), %xmm2 ## 8-byte Folded Reload
addsd LCPI19_148(%rip), %xmm1
addsd LCPI19_149(%rip), %xmm1
subsd %xmm2, %xmm0
movsd %xmm1, -2240(%rbp) ## 8-byte Spill
addsd %xmm1, %xmm0
addsd LCPI19_150(%rip), %xmm0
movapd %xmm0, -6144(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm10
addsd LCPI19_151(%rip), %xmm10
testq %rax, %rax
movapd -7600(%rbp), %xmm5 ## 16-byte Reload
movapd -3600(%rbp), %xmm9 ## 16-byte Reload
je LBB19_118
## %bb.117:
movapd -3200(%rbp), %xmm0 ## 16-byte Reload
mulsd -10816(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm8, %xmm1
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm0
addsd %xmm10, %xmm0
movsd %xmm0, 1768(%rax)
LBB19_118:
movapd %xmm14, -10384(%rbp) ## 16-byte Spill
movapd %xmm11, %xmm7
mulsd %xmm5, %xmm7
movsd -360(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm9, %xmm1
subsd %xmm1, %xmm7
movsd -6352(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3872(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -800(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3232(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movapd -7856(%rbp), %xmm1 ## 16-byte Reload
mulsd -296(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -7840(%rbp), %xmm2 ## 16-byte Reload
mulsd -376(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm0, %xmm2
movapd -336(%rbp), %xmm1 ## 16-byte Reload
mulsd -1568(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -3120(%rbp), %xmm0 ## 16-byte Reload
mulsd -2112(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
addsd -5168(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -3120(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm2
movapd -6320(%rbp), %xmm0 ## 16-byte Reload
mulsd -160(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movsd -1744(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -232(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm2, %xmm0
addsd LCPI19_148(%rip), %xmm0
addsd LCPI19_149(%rip), %xmm0
movapd -6336(%rbp), %xmm1 ## 16-byte Reload
mulsd -304(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -3488(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -216(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movsd %xmm0, -1744(%rbp) ## 8-byte Spill
movapd %xmm0, %xmm6
subsd %xmm2, %xmm6
addsd LCPI19_150(%rip), %xmm6
addsd %xmm6, %xmm7
testq %rax, %rax
je LBB19_120
## %bb.119:
movapd -3200(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm5, %xmm1
movapd %xmm8, %xmm2
mulsd %xmm9, %xmm2
subsd %xmm2, %xmm1
addsd %xmm7, %xmm1
movsd %xmm1, 1776(%rax)
LBB19_120:
movsd -344(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movapd -5504(%rbp), %xmm4 ## 16-byte Reload
mulsd %xmm4, %xmm3
movsd -368(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -5488(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm3
movsd -1632(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1720(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -1120(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -4896(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
movsd -4944(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -288(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm0
movapd -6304(%rbp), %xmm2 ## 16-byte Reload
mulsd -408(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm0, %xmm2
movapd -432(%rbp), %xmm1 ## 16-byte Reload
mulsd -2816(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -2000(%rbp), %xmm0 ## 16-byte Reload
mulsd -2640(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd %xmm15, %xmm0
movapd %xmm0, -2000(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm2
movsd -2960(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -152(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm2
movsd -1112(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -224(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm2, %xmm0
addsd LCPI19_148(%rip), %xmm0
addsd LCPI19_149(%rip), %xmm0
movsd -4544(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm12, %xmm1
movsd -4960(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -352(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movsd %xmm0, -1112(%rbp) ## 8-byte Spill
subsd %xmm2, %xmm0
addsd LCPI19_150(%rip), %xmm0
movsd %xmm0, -48(%rbp) ## 8-byte Spill
addsd %xmm0, %xmm3
testq %rax, %rax
je LBB19_122
## %bb.121:
movapd -3200(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm4, %xmm1
movapd %xmm8, %xmm2
mulsd %xmm5, %xmm2
subsd %xmm2, %xmm1
addsd %xmm3, %xmm1
movsd %xmm1, 1784(%rax)
LBB19_122:
movsd %xmm3, -192(%rbp) ## 8-byte Spill
movapd %xmm11, %xmm13
movsd -2464(%rbp), %xmm11 ## 8-byte Reload
## xmm11 = mem[0],zero
mulsd %xmm11, %xmm13
movsd -360(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -4624(%rbp), %xmm5 ## 16-byte Reload
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm13
movsd -2192(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -3872(%rbp), %xmm0 ## 8-byte Folded Reload
movsd -1360(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -3232(%rbp), %xmm1 ## 16-byte Folded Reload
addsd %xmm0, %xmm1
movapd -4048(%rbp), %xmm0 ## 16-byte Reload
mulsd -296(%rbp), %xmm0 ## 8-byte Folded Reload
subsd %xmm0, %xmm1
movapd -3712(%rbp), %xmm2 ## 16-byte Reload
mulsd -376(%rbp), %xmm2 ## 8-byte Folded Reload
addsd %xmm1, %xmm2
movapd -3328(%rbp), %xmm1 ## 16-byte Reload
mulsd -1568(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -1760(%rbp), %xmm0 ## 16-byte Reload
mulsd -2112(%rbp), %xmm0 ## 8-byte Folded Reload
addsd %xmm1, %xmm0
addsd -5168(%rbp), %xmm0 ## 16-byte Folded Reload
movapd %xmm0, -1760(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm2
movsd -160(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm5, %xmm1
subsd %xmm1, %xmm2
movsd -232(%rbp), %xmm9 ## 8-byte Reload
## xmm9 = mem[0],zero
mulsd %xmm11, %xmm9
addsd %xmm2, %xmm9
addsd LCPI19_148(%rip), %xmm9
addsd LCPI19_149(%rip), %xmm9
movsd -304(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm5, %xmm1
movsd -216(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd %xmm11, %xmm2
addsd %xmm1, %xmm2
movapd %xmm9, %xmm4
subsd %xmm2, %xmm4
addsd %xmm4, %xmm13
testq %rax, %rax
je LBB19_124
## %bb.123:
movapd -3200(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm11, %xmm1
movapd %xmm8, %xmm2
mulsd %xmm5, %xmm2
subsd %xmm2, %xmm1
addsd %xmm13, %xmm1
movsd %xmm1, 1792(%rax)
LBB19_124:
movsd -2480(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -2816(%rbp), %xmm1 ## 16-byte Folded Reload
movapd -1776(%rbp), %xmm0 ## 16-byte Reload
mulsd -2640(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd %xmm15, %xmm0
testq %rax, %rax
movapd %xmm0, -1776(%rbp) ## 16-byte Spill
je LBB19_126
## %bb.125:
movsd %xmm10, -2184(%rbp) ## 8-byte Spill
movsd %xmm13, -592(%rbp) ## 8-byte Spill
movsd -344(%rbp), %xmm13 ## 8-byte Reload
## xmm13 = mem[0],zero
movapd -3312(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm1, %xmm13
movapd %xmm1, %xmm3
movsd %xmm4, -608(%rbp) ## 8-byte Spill
movsd -368(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd -4656(%rbp), %xmm14 ## 16-byte Reload
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm13
movsd -2200(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd -1720(%rbp), %xmm1 ## 8-byte Folded Reload
movsd -4080(%rbp), %xmm2 ## 8-byte Reload
## xmm2 = mem[0],zero
mulsd -4896(%rbp), %xmm2 ## 16-byte Folded Reload
addsd %xmm1, %xmm2
movsd -1368(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm15, %xmm8
movsd %xmm7, -128(%rbp) ## 8-byte Spill
movsd %xmm6, -448(%rbp) ## 8-byte Spill
mulsd -288(%rbp), %xmm1 ## 8-byte Folded Reload
subsd %xmm1, %xmm2
movsd -408(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
movsd -4064(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm4, %xmm1
addsd %xmm1, %xmm2
addsd %xmm0, %xmm2
movsd -152(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm2
movsd -224(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
movapd %xmm3, %xmm6
mulsd %xmm3, %xmm1
addsd %xmm1, %xmm2
movapd %xmm12, %xmm1
mulsd %xmm14, %xmm1
movsd -352(%rbp), %xmm12 ## 8-byte Reload
## xmm12 = mem[0],zero
movapd %xmm12, %xmm7
mulsd %xmm3, %xmm7
addsd %xmm1, %xmm7
addsd LCPI19_148(%rip), %xmm2
addsd LCPI19_149(%rip), %xmm2
movapd %xmm2, %xmm0
subsd %xmm7, %xmm0
movapd %xmm0, %xmm10
movsd %xmm0, -720(%rbp) ## 8-byte Spill
movapd %xmm3, %xmm7
movapd -2256(%rbp), %xmm15 ## 16-byte Reload
unpcklpd %xmm15, %xmm7 ## xmm7 = xmm7[0],xmm15[0]
movapd -3200(%rbp), %xmm11 ## 16-byte Reload
movddup %xmm11, %xmm1 ## xmm1 = xmm11[0,0]
mulpd %xmm1, %xmm7
movapd -2496(%rbp), %xmm5 ## 16-byte Reload
unpcklpd %xmm5, %xmm14 ## xmm14 = xmm14[0],xmm5[0]
movddup -2320(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = mem[0,0]
mulpd %xmm0, %xmm14
subpd %xmm14, %xmm7
movapd -2928(%rbp), %xmm3 ## 16-byte Reload
mulsd %xmm15, %xmm3
movsd -360(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm3
movsd -376(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
mulsd %xmm15, %xmm1
movsd -296(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm1
movsd -3408(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
mulsd -3560(%rbp), %xmm6 ## 8-byte Folded Reload
movsd -648(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -8080(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm6, %xmm0
movapd -5168(%rbp), %xmm14 ## 16-byte Reload
addsd %xmm0, %xmm14
addsd %xmm14, %xmm1
movsd -160(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm5, %xmm0
subsd %xmm0, %xmm1
movsd -232(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movapd %xmm15, %xmm6
mulsd %xmm15, %xmm0
addsd %xmm0, %xmm1
movsd -304(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm5, %xmm0
movsd -216(%rbp), %xmm15 ## 8-byte Reload
## xmm15 = mem[0],zero
mulsd %xmm6, %xmm15
addsd %xmm0, %xmm15
movapd %xmm1, %xmm0
subsd %xmm15, %xmm0
movsd %xmm0, -400(%rbp) ## 8-byte Spill
addsd %xmm10, %xmm13
movapd %xmm13, -512(%rbp) ## 16-byte Spill
addsd %xmm0, %xmm3
unpcklpd %xmm3, %xmm13 ## xmm13 = xmm13[0],xmm3[0]
addpd %xmm7, %xmm13
movupd %xmm13, 1800(%rax)
movapd -976(%rbp), %xmm6 ## 16-byte Reload
mulsd -6816(%rbp), %xmm6 ## 16-byte Folded Reload
movsd -72(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -6800(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm6, %xmm0
addsd %xmm0, %xmm8
movsd -320(%rbp), %xmm6 ## 8-byte Reload
## xmm6 = mem[0],zero
movsd -344(%rbp), %xmm10 ## 8-byte Reload
## xmm10 = mem[0],zero
mulsd %xmm6, %xmm10
movapd -992(%rbp), %xmm15 ## 16-byte Reload
movsd -368(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm15, %xmm0
subsd %xmm0, %xmm10
mulsd %xmm6, %xmm4
movsd -288(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm15, %xmm0
subsd %xmm0, %xmm4
movapd %xmm8, -6752(%rbp) ## 16-byte Spill
addsd %xmm8, %xmm4
movsd -152(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm15, %xmm0
subsd %xmm0, %xmm4
movsd -224(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd %xmm6, %xmm0
addsd %xmm4, %xmm0
movsd -464(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
mulsd %xmm15, %xmm4
mulsd %xmm6, %xmm12
addsd %xmm4, %xmm12
movapd %xmm0, %xmm7
movapd %xmm0, %xmm13
subsd %xmm12, %xmm7
mulsd %xmm6, %xmm11
movapd -2320(%rbp), %xmm0 ## 16-byte Reload
mulsd %xmm15, %xmm0
subsd %xmm0, %xmm11
addsd %xmm7, %xmm10
addsd %xmm10, %xmm11
movsd %xmm11, 1816(%rax)
movapd -9008(%rbp), %xmm0 ## 16-byte Reload
addsd -9024(%rbp), %xmm0 ## 16-byte Folded Reload
addsd -9968(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -5304(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
addsd %xmm0, %xmm4
movsd %xmm4, 1824(%rax)
movabsq $4633922541587529730, %rcx ## imm = 0x404F000000000002
movq %rcx, 1832(%rax)
movaps -10064(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1840(%rax)
movaps -10000(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1848(%rax)
movaps -3600(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1856(%rax)
movaps -5488(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1864(%rax)
movaps -4624(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1872(%rax)
movaps -4656(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1880(%rax)
movsd %xmm5, 1888(%rax)
movsd %xmm15, 1896(%rax)
movsd -5272(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
addsd -10096(%rbp), %xmm0 ## 16-byte Folded Reload
addsd -9104(%rbp), %xmm0 ## 16-byte Folded Reload
movsd -5296(%rbp), %xmm4 ## 8-byte Reload
## xmm4 = mem[0],zero
addsd %xmm0, %xmm4
movsd %xmm4, 1904(%rax)
movq %rcx, 1912(%rax)
movaps -10896(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1920(%rax)
movaps -10816(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1928(%rax)
movaps -7600(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1936(%rax)
movaps -5504(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1944(%rax)
movsd -2464(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 1952(%rax)
movaps -3312(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1960(%rax)
movaps -2256(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1968(%rax)
movsd %xmm6, 1976(%rax)
movaps -9344(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1984(%rax)
movaps -10336(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 1992(%rax)
movaps -10320(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2000(%rax)
movsd -2664(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2008(%rax)
movsd -128(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2016(%rax)
movsd -592(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2024(%rax)
movsd %xmm3, 2032(%rax)
movaps -4864(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2040(%rax)
movaps -10400(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2048(%rax)
movaps -10384(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2056(%rax)
movsd -2184(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2064(%rax)
movsd -192(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2072(%rax)
movaps -512(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2080(%rax)
movsd %xmm10, 2088(%rax)
movaps -6848(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2096(%rax)
movaps -9376(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2104(%rax)
movaps -9360(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2112(%rax)
movaps -4176(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2120(%rax)
movsd -448(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2128(%rax)
movsd -608(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2136(%rax)
movsd -400(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2144(%rax)
movaps -6896(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2152(%rax)
movaps -9440(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2160(%rax)
movaps -8144(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2168(%rax)
movaps -6144(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2176(%rax)
movsd -48(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2184(%rax)
movsd -720(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2192(%rax)
movsd %xmm7, 2200(%rax)
movaps -8064(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2208(%rax)
movsd -3552(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2216(%rax)
movsd -272(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2224(%rax)
movsd -920(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2232(%rax)
movsd -1744(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2240(%rax)
movsd %xmm9, 2248(%rax)
movsd %xmm1, 2256(%rax)
movaps -8176(%rbp), %xmm0 ## 16-byte Reload
movsd %xmm0, 2264(%rax)
movsd -3568(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2272(%rax)
movsd -96(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2280(%rax)
movsd -2240(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2288(%rax)
movsd -1112(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movsd %xmm0, 2296(%rax)
movsd %xmm2, 2304(%rax)
movsd %xmm13, 2312(%rax)
movapd -208(%rbp), %xmm0 ## 16-byte Reload
movapd -11344(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movapd -6832(%rbp), %xmm2 ## 16-byte Reload
addsd %xmm1, %xmm2
movapd -11328(%rbp), %xmm3 ## 16-byte Reload
unpcklpd -11264(%rbp), %xmm3 ## 16-byte Folded Reload
## xmm3 = xmm3[0],mem[0]
movapd -13648(%rbp), %xmm1 ## 16-byte Reload
mulpd %xmm3, %xmm1
unpcklpd -8112(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
addpd %xmm1, %xmm2
movupd %xmm2, 2320(%rax)
movapd -11312(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -11200(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
movapd -11216(%rbp), %xmm1 ## 16-byte Reload
mulsd %xmm0, %xmm1
movapd -1040(%rbp), %xmm3 ## 16-byte Reload
addsd %xmm1, %xmm3
movapd -8128(%rbp), %xmm1 ## 16-byte Reload
unpcklpd %xmm3, %xmm1 ## xmm1 = xmm1[0],xmm3[0]
movapd -1232(%rbp), %xmm4 ## 16-byte Reload
mulpd %xmm4, %xmm2
addpd %xmm2, %xmm1
movupd %xmm1, 2336(%rax)
movapd -11104(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -5552(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd -688(%rbp), %xmm3 ## 16-byte Reload
mulpd %xmm1, %xmm3
movapd -11088(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -5536(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd %xmm4, %xmm2
movapd -3120(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -1760(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
addpd %xmm3, %xmm1
addpd %xmm2, %xmm1
movupd %xmm1, 2352(%rax)
unpcklpd -528(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd -6032(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -11376(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
mulpd %xmm0, %xmm1
movapd -64(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -112(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
movapd -6016(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -11360(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd %xmm0, %xmm2
unpcklpd -4256(%rbp), %xmm14 ## 16-byte Folded Reload
## xmm14 = xmm14[0],mem[0]
addpd %xmm1, %xmm14
addpd %xmm2, %xmm14
movupd %xmm14, 2368(%rax)
movapd -11296(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -11248(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
movapd -11392(%rbp), %xmm0 ## 16-byte Reload
mulpd %xmm1, %xmm0
movapd -8192(%rbp), %xmm1 ## 16-byte Reload
addpd %xmm0, %xmm1
movupd %xmm1, 2384(%rax)
jmp LBB19_127
LBB19_126:
movapd -976(%rbp), %xmm1 ## 16-byte Reload
mulsd -6816(%rbp), %xmm1 ## 16-byte Folded Reload
movsd -72(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
mulsd -6800(%rbp), %xmm0 ## 16-byte Folded Reload
addsd %xmm1, %xmm0
addsd %xmm0, %xmm15
movapd %xmm15, -6752(%rbp) ## 16-byte Spill
LBB19_127:
movapd -144(%rbp), %xmm3 ## 16-byte Reload
movapd -176(%rbp), %xmm4 ## 16-byte Reload
movq (%r14), %rax
testq %rax, %rax
je LBB19_129
## %bb.128:
movapd -11184(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -11072(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
mulpd %xmm4, %xmm1
movapd -11168(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -11056(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd %xmm3, %xmm2
movapd -816(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -2000(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
addpd %xmm1, %xmm0
addpd %xmm2, %xmm0
movupd %xmm0, 2400(%rax)
movapd -3984(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -6064(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
mulpd %xmm4, %xmm1
movapd -3968(%rbp), %xmm2 ## 16-byte Reload
unpcklpd -6048(%rbp), %xmm2 ## 16-byte Folded Reload
## xmm2 = xmm2[0],mem[0]
mulpd %xmm3, %xmm2
movapd -1776(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -6752(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
addpd %xmm1, %xmm0
addpd %xmm2, %xmm0
movupd %xmm0, 2416(%rax)
LBB19_129:
addq $17816, %rsp ## imm = 0x4598
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_alloc_mem ## -- Begin function jac_F_alloc_mem
.p2align 4, 0x90
_jac_F_alloc_mem: ## @jac_F_alloc_mem
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_init_mem ## -- Begin function jac_F_init_mem
.p2align 4, 0x90
_jac_F_init_mem: ## @jac_F_init_mem
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_free_mem ## -- Begin function jac_F_free_mem
.p2align 4, 0x90
_jac_F_free_mem: ## @jac_F_free_mem
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_checkout ## -- Begin function jac_F_checkout
.p2align 4, 0x90
_jac_F_checkout: ## @jac_F_checkout
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_release ## -- Begin function jac_F_release
.p2align 4, 0x90
_jac_F_release: ## @jac_F_release
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_incref ## -- Begin function jac_F_incref
.p2align 4, 0x90
_jac_F_incref: ## @jac_F_incref
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_decref ## -- Begin function jac_F_decref
.p2align 4, 0x90
_jac_F_decref: ## @jac_F_decref
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_n_in ## -- Begin function jac_F_n_in
.p2align 4, 0x90
_jac_F_n_in: ## @jac_F_n_in
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $2, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_n_out ## -- Begin function jac_F_n_out
.p2align 4, 0x90
_jac_F_n_out: ## @jac_F_n_out
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $1, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_default_in ## -- Begin function jac_F_default_in
.p2align 4, 0x90
_jac_F_default_in: ## @jac_F_default_in
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorps %xmm0, %xmm0
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_name_in ## -- Begin function jac_F_name_in
.p2align 4, 0x90
_jac_F_name_in: ## @jac_F_name_in
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %ecx, %ecx
cmpq $1, %rdi
leaq L_.str.2(%rip), %rax
cmoveq %rax, %rcx
testq %rdi, %rdi
leaq L_.str(%rip), %rax
cmovneq %rcx, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_name_out ## -- Begin function jac_F_name_out
.p2align 4, 0x90
_jac_F_name_out: ## @jac_F_name_out
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
testq %rdi, %rdi
leaq L_.str.3(%rip), %rcx
cmoveq %rcx, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_sparsity_in ## -- Begin function jac_F_sparsity_in
.p2align 4, 0x90
_jac_F_sparsity_in: ## @jac_F_sparsity_in
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %ecx, %ecx
cmpq $1, %rdi
leaq _foo_jac_s2(%rip), %rax
cmoveq %rax, %rcx
testq %rdi, %rdi
leaq _foo_jac_s0(%rip), %rax
cmovneq %rcx, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_sparsity_out ## -- Begin function jac_F_sparsity_out
.p2align 4, 0x90
_jac_F_sparsity_out: ## @jac_F_sparsity_out
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
testq %rdi, %rdi
leaq _foo_jac_s3(%rip), %rcx
cmoveq %rcx, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _jac_F_work ## -- Begin function jac_F_work
.p2align 4, 0x90
_jac_F_work: ## @jac_F_work
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
testq %rdi, %rdi
je LBB34_2
## %bb.1:
movq $2, (%rdi)
LBB34_2:
testq %rsi, %rsi
je LBB34_4
## %bb.3:
movq $1, (%rsi)
LBB34_4:
testq %rdx, %rdx
je LBB34_6
## %bb.5:
movq $0, (%rdx)
LBB34_6:
testq %rcx, %rcx
je LBB34_8
## %bb.7:
movq $0, (%rcx)
LBB34_8:
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "i0"
L_.str.1: ## @.str.1
.asciz "o0"
.section __TEXT,__const
.p2align 4 ## @foo_jac_s0
_foo_jac_s0:
.quad 33 ## 0x21
.quad 1 ## 0x1
.quad 0 ## 0x0
.quad 33 ## 0x21
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 4 ## 0x4
.quad 5 ## 0x5
.quad 6 ## 0x6
.quad 7 ## 0x7
.quad 8 ## 0x8
.quad 9 ## 0x9
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 15 ## 0xf
.quad 16 ## 0x10
.quad 17 ## 0x11
.quad 18 ## 0x12
.quad 19 ## 0x13
.quad 20 ## 0x14
.quad 21 ## 0x15
.quad 22 ## 0x16
.quad 23 ## 0x17
.quad 24 ## 0x18
.quad 25 ## 0x19
.quad 26 ## 0x1a
.quad 27 ## 0x1b
.quad 28 ## 0x1c
.quad 29 ## 0x1d
.quad 30 ## 0x1e
.quad 31 ## 0x1f
.quad 32 ## 0x20
.p2align 4 ## @foo_jac_s1
_foo_jac_s1:
.quad 15 ## 0xf
.quad 1 ## 0x1
.quad 0 ## 0x0
.quad 15 ## 0xf
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 4 ## 0x4
.quad 5 ## 0x5
.quad 6 ## 0x6
.quad 7 ## 0x7
.quad 8 ## 0x8
.quad 9 ## 0x9
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 13 ## 0xd
.quad 14 ## 0xe
.section __TEXT,__cstring,cstring_literals
L_.str.2: ## @.str.2
.asciz "out_o0"
L_.str.3: ## @.str.3
.asciz "jac"
.section __TEXT,__const
.p2align 4 ## @foo_jac_s2
_foo_jac_s2:
.quad 15 ## 0xf
.quad 1 ## 0x1
.quad 0 ## 0x0
.quad 0 ## 0x0
.p2align 4 ## @foo_jac_s3
_foo_jac_s3:
.quad 15 ## 0xf
.quad 33 ## 0x21
.quad 0 ## 0x0
.quad 15 ## 0xf
.quad 30 ## 0x1e
.quad 30 ## 0x1e
.quad 43 ## 0x2b
.quad 58 ## 0x3a
.quad 73 ## 0x49
.quad 82 ## 0x52
.quad 91 ## 0x5b
.quad 100 ## 0x64
.quad 109 ## 0x6d
.quad 118 ## 0x76
.quad 127 ## 0x7f
.quad 136 ## 0x88
.quad 145 ## 0x91
.quad 154 ## 0x9a
.quad 163 ## 0xa3
.quad 172 ## 0xac
.quad 181 ## 0xb5
.quad 190 ## 0xbe
.quad 199 ## 0xc7
.quad 208 ## 0xd0
.quad 217 ## 0xd9
.quad 228 ## 0xe4
.quad 238 ## 0xee
.quad 248 ## 0xf8
.quad 255 ## 0xff
.quad 262 ## 0x106
.quad 269 ## 0x10d
.quad 276 ## 0x114
.quad 283 ## 0x11b
.quad 290 ## 0x122
.quad 297 ## 0x129
.quad 304 ## 0x130
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 4 ## 0x4
.quad 5 ## 0x5
.quad 6 ## 0x6
.quad 7 ## 0x7
.quad 8 ## 0x8
.quad 9 ## 0x9
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 4 ## 0x4
.quad 5 ## 0x5
.quad 6 ## 0x6
.quad 7 ## 0x7
.quad 8 ## 0x8
.quad 9 ## 0x9
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 4 ## 0x4
.quad 5 ## 0x5
.quad 6 ## 0x6
.quad 7 ## 0x7
.quad 8 ## 0x8
.quad 9 ## 0x9
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 13 ## 0xd
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 4 ## 0x4
.quad 5 ## 0x5
.quad 6 ## 0x6
.quad 7 ## 0x7
.quad 8 ## 0x8
.quad 9 ## 0x9
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 4 ## 0x4
.quad 5 ## 0x5
.quad 6 ## 0x6
.quad 7 ## 0x7
.quad 8 ## 0x8
.quad 9 ## 0x9
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 13 ## 0xd
.quad 14 ## 0xe
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.quad 11 ## 0xb
.quad 12 ## 0xc
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 4 ## 0x4
.quad 5 ## 0x5
.quad 6 ## 0x6
.quad 7 ## 0x7
.quad 8 ## 0x8
.quad 9 ## 0x9
.quad 10 ## 0xa
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 3 ## 0x3
.quad 4 ## 0x4
.quad 5 ## 0x5
.quad 6 ## 0x6
.quad 7 ## 0x7
.quad 8 ## 0x8
.quad 9 ## 0x9
.quad 10 ## 0xa
.quad 0 ## 0x0
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 4 ## 0x4
.quad 5 ## 0x5
.quad 6 ## 0x6
.quad 7 ## 0x7
.quad 8 ## 0x8
.quad 9 ## 0x9
.quad 10 ## 0xa
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 3 ## 0x3
.quad 5 ## 0x5
.quad 7 ## 0x7
.quad 9 ## 0x9
.quad 0 ## 0x0
.quad 1 ## 0x1
.quad 2 ## 0x2
.quad 4 ## 0x4
.quad 6 ## 0x6
.quad 8 ## 0x8
.quad 10 ## 0xa
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _foo_jac_sq ; -- Begin function foo_jac_sq
.p2align 2
_foo_jac_sq: ; @foo_jac_sq
.cfi_startproc
; %bb.0:
fmul d0, d0, d0
ret
.cfi_endproc
; -- End function
.globl _foo_jac_fmin ; -- Begin function foo_jac_fmin
.p2align 2
_foo_jac_fmin: ; @foo_jac_fmin
.cfi_startproc
; %bb.0:
fminnm d0, d0, d1
ret
.cfi_endproc
; -- End function
.globl _F ; -- Begin function F
.p2align 2
_F: ; @F
.cfi_startproc
; %bb.0:
stp d15, d14, [sp, #-128]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 128
stp d13, d12, [sp, #16] ; 16-byte Folded Spill
stp d11, d10, [sp, #32] ; 16-byte Folded Spill
stp d9, d8, [sp, #48] ; 16-byte Folded Spill
stp x24, x23, [sp, #64] ; 16-byte Folded Spill
stp x22, x21, [sp, #80] ; 16-byte Folded Spill
stp x20, x19, [sp, #96] ; 16-byte Folded Spill
stp x29, x30, [sp, #112] ; 16-byte Folded Spill
add x29, sp, #112
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset b8, -72
.cfi_offset b9, -80
.cfi_offset b10, -88
.cfi_offset b11, -96
.cfi_offset b12, -104
.cfi_offset b13, -112
.cfi_offset b14, -120
.cfi_offset b15, -128
sub sp, sp, #1168
mov x19, x1
ldr x20, [x0]
cbz x20, LBB2_2
; %bb.1:
ldr d1, [x20, #176]
mov x8, #43850
movk x8, #51580, lsl #16
movk x8, #43198, lsl #32
movk x8, #16259, lsl #48
fmov d0, x8
stur d1, [x29, #-168] ; 8-byte Folded Spill
fmul d8, d1, d0
ldr d0, [x20]
b LBB2_3
LBB2_2:
movi d8, #0000000000000000
movi d0, #0000000000000000
stur d0, [x29, #-168] ; 8-byte Folded Spill
movi d0, #0000000000000000
LBB2_3:
fmov d1, #0.50000000
fmul d0, d0, d1
bl ___sincos_stret
fmul d2, d1, d1
fmul d3, d0, d0
fsub d11, d2, d3
mov x8, #33620
movk x8, #2364, lsl #16
movk x8, #33974, lsl #32
movk x8, #49073, lsl #48
fmov d2, x8
fmul d12, d11, d2
fmul d0, d1, d0
cbz x20, LBB2_5
; %bb.4:
ldp d2, d1, [x20, #184]
str d1, [sp, #1008] ; 8-byte Folded Spill
fmul d1, d12, d1
fadd d10, d0, d0
mov x8, #33620
movk x8, #2364, lsl #16
movk x8, #33974, lsl #32
movk x8, #49073, lsl #48
fmov d0, x8
fmul d13, d10, d0
str d2, [sp, #1000] ; 8-byte Folded Spill
fmul d0, d13, d2
fsub d0, d1, d0
fadd d0, d8, d0
mov x8, #43139
movk x8, #8835, lsl #16
movk x8, #28093, lsl #32
movk x8, #16419, lsl #48
fmov d1, x8
fmul d0, d0, d1
mov x8, #45848
movk x8, #59098, lsl #16
movk x8, #53494, lsl #32
movk x8, #16471, lsl #48
fmov d1, x8
fmul d1, d12, d1
fadd d0, d1, d0
str d0, [sp, #608] ; 8-byte Folded Spill
ldr d0, [x20, #200]
str d0, [sp, #640] ; 8-byte Folded Spill
ldr d0, [x20, #48]
b LBB2_6
LBB2_5:
movi d2, #0000000000000000
fmul d1, d12, d2
fadd d10, d0, d0
mov x8, #33620
movk x8, #2364, lsl #16
movk x8, #33974, lsl #32
movk x8, #49073, lsl #48
fmov d0, x8
fmul d13, d10, d0
movi d0, #0000000000000000
str d0, [sp, #640] ; 8-byte Folded Spill
fmul d0, d13, d2
fsub d0, d1, d0
fadd d0, d8, d0
mov x8, #43139
movk x8, #8835, lsl #16
movk x8, #28093, lsl #32
movk x8, #16419, lsl #48
fmov d1, x8
fmul d0, d0, d1
mov x8, #45848
movk x8, #59098, lsl #16
movk x8, #53494, lsl #32
movk x8, #16471, lsl #48
fmov d1, x8
fmul d1, d12, d1
fadd d0, d1, d0
str d0, [sp, #608] ; 8-byte Folded Spill
movi d0, #0000000000000000
str d0, [sp, #1008] ; 8-byte Folded Spill
movi d0, #0000000000000000
str d0, [sp, #1000] ; 8-byte Folded Spill
movi d0, #0000000000000000
LBB2_6:
bl ___sincos_stret
fmul d2, d11, d0
fmul d3, d10, d1
movi d5, #0000000000000000
movi d9, #0000000000000000
movi d4, #0000000000000000
cbz x20, LBB2_8
; %bb.7:
ldr d9, [x20, #8]
ldr d4, [x20, #24]
LBB2_8:
str d4, [sp, #840] ; 8-byte Folded Spill
fadd d2, d2, d3
fmul d1, d11, d1
fmul d0, d10, d0
fsub d0, d1, d0
stp d0, d2, [x29, #-128] ; 16-byte Folded Spill
movi d0, #0000000000000000
cbz x20, LBB2_10
; %bb.9:
ldr d5, [x20, #216]
ldr d0, [x20, #80]
LBB2_10:
str d5, [sp, #536] ; 8-byte Folded Spill
mov x21, #39127
movk x21, #24179, lsl #16
movk x21, #24811, lsl #32
movk x21, #16304, lsl #48
bl ___sincos_stret
ldur d2, [x29, #-128] ; 8-byte Folded Reload
fmul d6, d2, d0
ldur d2, [x29, #-120] ; 8-byte Folded Reload
fmul d7, d2, d1
mov x8, #11201
movk x8, #50599, lsl #16
movk x8, #31589, lsl #32
movk x8, #49010, lsl #48
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
movi d16, #0000000000000000
movi d2, #0000000000000000
cbz x20, LBB2_12
; %bb.11:
ldr d2, [x20, #56]
LBB2_12:
fmov d5, x21
fmov d3, x8
fmov d4, x9
fadd d6, d6, d7
stur d6, [x29, #-192] ; 8-byte Folded Spill
ldur d6, [x29, #-128] ; 8-byte Folded Reload
fmul d1, d6, d1
ldur d6, [x29, #-120] ; 8-byte Folded Reload
fmul d0, d6, d0
fsub d0, d1, d0
stur d0, [x29, #-200] ; 8-byte Folded Spill
movi d0, #0000000000000000
cbz x20, LBB2_14
; %bb.13:
ldr d16, [x20, #232]
ldr d0, [x20, #112]
LBB2_14:
str d16, [sp, #560] ; 8-byte Folded Spill
mov x21, #39127
movk x21, #24179, lsl #16
movk x21, #24811, lsl #32
movk x21, #49072, lsl #48
mov x22, #11201
movk x22, #50599, lsl #16
movk x22, #31589, lsl #32
movk x22, #49010, lsl #48
mov x23, #52090
movk x23, #42545, lsl #16
movk x23, #26349, lsl #32
movk x23, #49113, lsl #48
str d10, [sp, #904] ; 8-byte Folded Spill
fmul d8, d10, d5
fmov d14, d9
fadd d15, d9, d2
ldur d1, [x29, #-128] ; 8-byte Folded Reload
fmul d9, d1, d3
ldur d1, [x29, #-120] ; 8-byte Folded Reload
fmul d10, d1, d4
bl ___sincos_stret
ldp d5, d4, [x29, #-200] ; 16-byte Folded Reload
fmul d2, d5, d1
fmul d3, d4, d0
fsub d2, d2, d3
fmul d0, d5, d0
fmul d1, d4, d1
fadd d0, d0, d1
mov x8, #4363988038922010624
fmov d1, x8
mov x8, #43115
movk x8, #62349, lsl #16
movk x8, #30721, lsl #32
movk x8, #49115, lsl #48
movi d4, #0000000000000000
movi d17, #0000000000000000
cbz x20, LBB2_16
; %bb.15:
ldr d17, [x20, #88]
LBB2_16:
fmov d7, x21
fmov d16, x22
fmov d5, x23
fadd d20, d12, d8
fadd d21, d10, d9
fmov d6, x8
fmul d3, d0, d1
fadd d23, d15, d17
mov x8, #43115
movk x8, #62349, lsl #16
movk x8, #30721, lsl #32
movk x8, #49115, lsl #48
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #49064, lsl #48
fmov d17, x9
fmul d18, d2, d17
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #16292, lsl #48
fmov d17, x9
fmul d19, d0, d17
cbz x20, LBB2_18
; %bb.17:
ldr d4, [x20, #120]
LBB2_18:
fmul d7, d11, d7
ldur d17, [x29, #-120] ; 8-byte Folded Reload
fmul d16, d17, d16
fmov d17, x8
fadd d24, d19, d18
ldur d18, [x29, #-128] ; 8-byte Folded Reload
fmul d5, d18, d5
stur d14, [x29, #-176] ; 8-byte Folded Spill
fmul d26, d20, d14
fsub d14, d2, d3
fmul d25, d21, d15
ldur d18, [x29, #-192] ; 8-byte Folded Reload
fmul d22, d18, d6
fadd d9, d23, d4
mov x8, #62612
movk x8, #18904, lsl #16
movk x8, #1144, lsl #32
movk x8, #49064, lsl #48
fmov d4, x8
mov x8, #47272
movk x8, #56762, lsl #16
movk x8, #43178, lsl #32
movk x8, #49060, lsl #48
fmov d6, x8
fmul d1, d2, d1
fmul d18, d0, d4
movi d4, #0000000000000000
fmul d19, d2, d6
movi d6, #0000000000000000
cbz x20, LBB2_20
; %bb.19:
ldr d6, [x20, #40]
LBB2_20:
str d15, [sp, #784] ; 8-byte Folded Spill
str d21, [sp, #800] ; 8-byte Folded Spill
str d11, [sp, #912] ; 8-byte Folded Spill
str d20, [sp, #992] ; 8-byte Folded Spill
str d12, [sp, #624] ; 8-byte Folded Spill
fadd d7, d13, d7
fadd d5, d16, d5
ldur d16, [x29, #-200] ; 8-byte Folded Reload
fmul d16, d16, d17
fadd d17, d18, d19
fsub d11, d1, d0
fadd d18, d0, d1
fadd d19, d2, d3
str d26, [sp, #408] ; 8-byte Folded Spill
fadd d1, d26, d6
str d25, [sp, #632] ; 8-byte Folded Spill
stur d1, [x29, #-184] ; 8-byte Folded Spill
fadd d3, d25, d1
str d22, [sp, #776] ; 8-byte Folded Spill
str d23, [sp, #760] ; 8-byte Folded Spill
fmul d1, d22, d23
str d3, [sp, #704] ; 8-byte Folded Spill
fsub d1, d3, d1
str d1, [sp, #872] ; 8-byte Folded Spill
str d24, [sp, #752] ; 8-byte Folded Spill
fmul d15, d24, d9
mov x8, #43516
movk x8, #54001, lsl #16
movk x8, #25165, lsl #32
movk x8, #16240, lsl #48
fmov d1, x8
fmul d1, d14, d1
fmul d3, d14, d1
cbz x20, LBB2_22
; %bb.21:
ldr d4, [x20, #32]
LBB2_22:
str d7, [sp, #984] ; 8-byte Folded Spill
fadd d4, d7, d4
str d5, [sp, #792] ; 8-byte Folded Spill
stur d4, [x29, #-240] ; 8-byte Folded Spill
fadd d4, d5, d4
str d16, [sp, #768] ; 8-byte Folded Spill
fadd d4, d16, d4
str d17, [sp, #928] ; 8-byte Folded Spill
fadd d12, d17, d4
mov x8, #43516
movk x8, #54001, lsl #16
movk x8, #25165, lsl #32
movk x8, #16240, lsl #48
fmov d4, x8
fmul d4, d18, d4
fadd d4, d4, d12
mov x8, #20972
movk x8, #7864, lsl #16
movk x8, #60293, lsl #32
movk x8, #49057, lsl #48
fmov d5, x8
fadd d8, d4, d5
fmov d4, #0.50000000
fmul d4, d8, d4
fsub d4, d8, d4
fsub d4, d4, d12
stur d18, [x29, #-256] ; 8-byte Folded Spill
fmul d5, d18, d4
fadd d3, d3, d5
str d3, [sp, #344] ; 8-byte Folded Spill
fmul d3, d14, d3
fmul d5, d11, d1
str d19, [sp, #1016] ; 8-byte Folded Spill
fmul d6, d19, d4
fadd d5, d5, d6
str d5, [sp, #336] ; 8-byte Folded Spill
fmul d5, d11, d5
fadd d3, d3, d5
mov x8, #4359484439294640128
fmov d5, x8
fmul d6, d2, d5
mov x8, #4354980839667269632
fmov d7, x8
fmul d16, d0, d7
fadd d6, d6, d16
fmul d1, d6, d1
fmul d0, d0, d5
fmul d2, d2, d7
fsub d0, d0, d2
stur d0, [x29, #-136] ; 8-byte Folded Spill
fmul d0, d0, d4
fadd d0, d1, d0
stur d6, [x29, #-160] ; 8-byte Folded Spill
str d0, [sp, #648] ; 8-byte Folded Spill
fmul d0, d6, d0
fadd d10, d0, d3
fmul d0, d8, d8
mov x8, #26865
movk x8, #35043, lsl #16
movk x8, #63669, lsl #32
movk x8, #16100, lsl #48
fmov d1, x8
str d1, [sp, #392] ; 8-byte Folded Spill
fadd d0, d0, d1
fsqrt d0, d0
fmov d1, #1.50000000
bl _pow
str d0, [sp, #424] ; 8-byte Folded Spill
str d15, [sp, #488] ; 8-byte Folded Spill
ldr d0, [sp, #872] ; 8-byte Folded Reload
fadd d1, d15, d0
str d10, [sp, #568] ; 8-byte Folded Spill
fmul d0, d9, d10
stur d1, [x29, #-208] ; 8-byte Folded Spill
fadd d10, d1, d0
mov x8, #211106232532992
movk x8, #49266, lsl #48
fmov d0, x8
fmul d0, d8, d0
bl _tanh
str d0, [sp, #440] ; 8-byte Folded Spill
mov x8, #6148914691236517205
movk x8, #16341, lsl #48
fmov d0, x8
str d10, [sp, #400] ; 8-byte Folded Spill
fsub d0, d0, d10
mov x8, #4632233691727265792
fmov d1, x8
fmul d0, d0, d1
bl _tanh
str d0, [sp, #448] ; 8-byte Folded Spill
movi d8, #0000000000000000
movi d0, #0000000000000000
cbz x20, LBB2_24
; %bb.23:
ldr d0, [x20, #144]
LBB2_24:
str d13, [sp, #616] ; 8-byte Folded Spill
bl ___sincos_stret
mov x8, #4359484439294640128
fmov d2, x8
fmul d3, d1, d2
mov x8, #-4868391197187506176
fmov d4, x8
fmul d5, d0, d4
fadd d3, d3, d5
str d3, [sp, #896] ; 8-byte Folded Spill
fmul d3, d1, d4
fmul d2, d0, d2
fsub d2, d3, d2
str d2, [sp, #880] ; 8-byte Folded Spill
fmul d2, d14, d1
fmul d3, d11, d0
fadd d2, d2, d3
str d2, [sp, #888] ; 8-byte Folded Spill
fmul d2, d11, d1
fmul d3, d14, d0
fsub d2, d2, d3
stur d2, [x29, #-216] ; 8-byte Folded Spill
ldr d6, [sp, #1016] ; 8-byte Folded Reload
fmul d2, d6, d1
ldur d5, [x29, #-256] ; 8-byte Folded Reload
fmul d3, d5, d0
fsub d2, d2, d3
fmul d1, d5, d1
fmul d0, d6, d0
fadd d0, d1, d0
stp d2, d0, [x29, #-232] ; 16-byte Folded Spill
movi d7, #0000000000000000
cbz x20, LBB2_26
; %bb.25:
ldr d8, [x20, #248]
ldr d7, [x20, #152]
LBB2_26:
str d8, [sp, #472] ; 8-byte Folded Spill
mov x8, #36544
movk x8, #43611, lsl #16
movk x8, #860, lsl #32
movk x8, #16326, lsl #48
fmov d0, x8
str d14, [sp, #520] ; 8-byte Folded Spill
fmul d1, d14, d0
mov x8, #18456
movk x8, #63321, lsl #16
movk x8, #33926, lsl #32
movk x8, #16223, lsl #48
fmov d2, x8
str d11, [sp, #496] ; 8-byte Folded Spill
fmul d3, d11, d2
fsub d1, d1, d3
mov x8, #63706
movk x8, #13221, lsl #16
movk x8, #1281, lsl #32
movk x8, #16209, lsl #48
fmov d3, x8
ldur d16, [x29, #-160] ; 8-byte Folded Reload
fmul d4, d16, d3
fsub d4, d1, d4
fmul d0, d5, d0
fmul d1, d6, d2
fsub d0, d0, d1
ldur d2, [x29, #-136] ; 8-byte Folded Reload
fmul d1, d2, d3
fmov d3, d2
fsub d0, d0, d1
str d4, [sp, #720] ; 8-byte Folded Spill
fmul d8, d4, d9
fmul d1, d16, d7
stur d1, [x29, #-144] ; 8-byte Folded Spill
str d9, [sp, #576] ; 8-byte Folded Spill
str d7, [sp, #712] ; 8-byte Folded Spill
fadd d1, d9, d7
stur d1, [x29, #-152] ; 8-byte Folded Spill
str d0, [sp, #920] ; 8-byte Folded Spill
fadd d12, d0, d12
mov x8, #7864
movk x8, #60293, lsl #16
movk x8, #47185, lsl #32
movk x8, #49054, lsl #48
fmov d0, x8
fadd d14, d12, d0
fmov d0, #0.50000000
fmul d0, d14, d0
fsub d0, d14, d0
fsub d0, d0, d12
ldp d2, d1, [x29, #-232] ; 16-byte Folded Reload
fmul d11, d0, d1
ldr d15, [sp, #888] ; 8-byte Folded Reload
fmul d1, d15, d11
fmul d13, d0, d2
ldur d2, [x29, #-216] ; 8-byte Folded Reload
fmul d2, d2, d13
fadd d1, d1, d2
fmul d10, d3, d0
fmul d0, d16, d10
fadd d9, d0, d1
fmul d0, d14, d14
mov x8, #26865
movk x8, #35043, lsl #16
movk x8, #63669, lsl #32
movk x8, #16100, lsl #48
fmov d1, x8
str d1, [sp, #864] ; 8-byte Folded Spill
fadd d0, d0, d1
fsqrt d0, d0
fmov d1, #1.50000000
bl _pow
str d0, [sp, #328] ; 8-byte Folded Spill
str d8, [sp, #432] ; 8-byte Folded Spill
ldur d0, [x29, #-208] ; 8-byte Folded Reload
fadd d3, d8, d0
stur d3, [x29, #-208] ; 8-byte Folded Spill
str d9, [sp, #976] ; 8-byte Folded Spill
ldur d0, [x29, #-152] ; 8-byte Folded Reload
fmul d0, d9, d0
ldr d8, [sp, #896] ; 8-byte Folded Reload
str d11, [sp, #696] ; 8-byte Folded Spill
fmul d1, d8, d11
ldr d11, [sp, #880] ; 8-byte Folded Reload
str d13, [sp, #248] ; 8-byte Folded Spill
fmul d2, d11, d13
fadd d1, d1, d2
str d10, [sp, #688] ; 8-byte Folded Spill
fadd d1, d10, d1
str d1, [sp, #856] ; 8-byte Folded Spill
ldur d2, [x29, #-144] ; 8-byte Folded Reload
fmul d1, d1, d2
fsub d0, d0, d1
fadd d9, d3, d0
mov x8, #211106232532992
movk x8, #49266, lsl #48
fmov d0, x8
stur d0, [x29, #-248] ; 8-byte Folded Spill
fmul d0, d14, d0
bl _tanh
str d0, [sp, #376] ; 8-byte Folded Spill
mov x8, #6148914691236517205
movk x8, #16341, lsl #48
fmov d0, x8
str d0, [sp, #960] ; 8-byte Folded Spill
str d9, [sp, #312] ; 8-byte Folded Spill
fsub d0, d0, d9
mov x8, #4632233691727265792
fmov d14, x8
fmul d0, d0, d14
bl _tanh
str d0, [sp, #384] ; 8-byte Folded Spill
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16297, lsl #48
fmov d0, x8
fmul d1, d15, d0
fmul d2, d15, d1
ldp d5, d6, [x29, #-224] ; 16-byte Folded Reload
fmul d3, d5, d0
fadd d3, d12, d3
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #49049, lsl #48
fmov d4, x8
fadd d9, d3, d4
fmov d3, #0.50000000
fmul d3, d9, d3
fsub d3, d9, d3
fsub d3, d3, d12
fmul d4, d5, d3
fadd d2, d2, d4
fmul d0, d8, d0
fmul d4, d8, d0
fadd d12, d4, d2
fmul d2, d15, d12
fmul d4, d6, d1
ldur d5, [x29, #-232] ; 8-byte Folded Reload
fmul d5, d5, d3
fadd d4, d4, d5
fmul d5, d11, d0
fadd d13, d5, d4
fmul d4, d6, d13
fadd d2, d2, d4
ldur d4, [x29, #-160] ; 8-byte Folded Reload
fmul d1, d4, d1
ldur d5, [x29, #-136] ; 8-byte Folded Reload
fmul d3, d5, d3
fadd d1, d1, d3
fadd d15, d0, d1
fmul d0, d4, d15
fadd d10, d0, d2
fmul d0, d9, d9
ldr d1, [sp, #864] ; 8-byte Folded Reload
fadd d0, d0, d1
fsqrt d0, d0
fmov d1, #1.50000000
bl _pow
str d0, [sp, #304] ; 8-byte Folded Spill
str d10, [sp, #968] ; 8-byte Folded Spill
ldur d0, [x29, #-152] ; 8-byte Folded Reload
fmul d0, d10, d0
stp d13, d12, [sp, #224] ; 16-byte Folded Spill
fmul d1, d8, d12
fmul d2, d11, d13
fadd d1, d1, d2
str d15, [sp, #216] ; 8-byte Folded Spill
fadd d1, d15, d1
str d1, [sp, #848] ; 8-byte Folded Spill
ldur d2, [x29, #-144] ; 8-byte Folded Reload
fmul d1, d1, d2
fsub d0, d0, d1
ldur d1, [x29, #-208] ; 8-byte Folded Reload
fadd d8, d1, d0
ldur d0, [x29, #-248] ; 8-byte Folded Reload
fmul d0, d9, d0
bl _tanh
str d0, [sp, #352] ; 8-byte Folded Spill
str d8, [sp, #296] ; 8-byte Folded Spill
ldr d0, [sp, #960] ; 8-byte Folded Reload
fsub d0, d0, d8
fmul d0, d0, d14
bl _tanh
str d0, [sp, #368] ; 8-byte Folded Spill
movi d10, #0000000000000000
movi d1, #0000000000000000
movi d0, #0000000000000000
cbz x20, LBB2_28
; %bb.27:
ldr d1, [x20, #208]
ldr d0, [x20, #64]
LBB2_28:
str d1, [sp, #464] ; 8-byte Folded Spill
bl ___sincos_stret
ldr d13, [sp, #912] ; 8-byte Folded Reload
fmul d2, d13, d0
ldr d4, [sp, #904] ; 8-byte Folded Reload
fmul d3, d4, d1
fadd d8, d2, d3
fmul d1, d13, d1
fmul d0, d4, d0
fsub d9, d1, d0
movi d0, #0000000000000000
cbz x20, LBB2_30
; %bb.29:
ldr d10, [x20, #224]
ldr d0, [x20, #96]
LBB2_30:
str d10, [sp, #504] ; 8-byte Folded Spill
bl ___sincos_stret
fmul d5, d9, d0
fmul d6, d8, d1
mov x8, #11201
movk x8, #50599, lsl #16
movk x8, #31589, lsl #32
movk x8, #49010, lsl #48
fmov d3, x8
mov x8, #52090
movk x8, #42545, lsl #16
movk x8, #26349, lsl #32
movk x8, #16345, lsl #48
fmov d4, x8
movi d7, #0000000000000000
movi d2, #0000000000000000
cbz x20, LBB2_32
; %bb.31:
ldr d2, [x20, #72]
LBB2_32:
fmul d3, d9, d3
fmul d4, d8, d4
fadd d10, d5, d6
mov x8, #11201
movk x8, #50599, lsl #16
movk x8, #31589, lsl #32
movk x8, #49010, lsl #48
fmov d5, x8
mov x8, #52090
movk x8, #42545, lsl #16
movk x8, #26349, lsl #32
movk x8, #49113, lsl #48
fmov d6, x8
fmul d1, d9, d1
fmul d0, d8, d0
fsub d0, d1, d0
stur d0, [x29, #-208] ; 8-byte Folded Spill
movi d0, #0000000000000000
cbz x20, LBB2_34
; %bb.33:
ldr d7, [x20, #240]
ldr d0, [x20, #128]
LBB2_34:
str d7, [sp, #528] ; 8-byte Folded Spill
str d8, [sp, #552] ; 8-byte Folded Spill
fmul d8, d8, d5
str d9, [sp, #544] ; 8-byte Folded Spill
fmul d12, d9, d6
ldur d1, [x29, #-176] ; 8-byte Folded Reload
fadd d15, d1, d2
fadd d9, d4, d3
bl ___sincos_stret
ldur d4, [x29, #-208] ; 8-byte Folded Reload
fmul d2, d4, d1
fmul d3, d10, d0
fsub d2, d2, d3
fmul d0, d4, d0
fmul d1, d10, d1
fadd d0, d0, d1
mov x8, #4363988038922010624
fmov d3, x8
fmul d1, d0, d3
fmul d3, d2, d3
mov x8, #43115
movk x8, #62349, lsl #16
movk x8, #30721, lsl #32
movk x8, #49115, lsl #48
fmov d5, x8
movi d4, #0000000000000000
movi d6, #0000000000000000
cbz x20, LBB2_36
; %bb.35:
ldr d6, [x20, #104]
LBB2_36:
fadd d7, d8, d12
str d9, [sp, #832] ; 8-byte Folded Spill
fmul d24, d9, d15
str d10, [sp, #512] ; 8-byte Folded Spill
fmul d16, d10, d5
fsub d20, d2, d1
fsub d21, d3, d0
str d15, [sp, #944] ; 8-byte Folded Spill
fadd d19, d15, d6
mov x8, #43115
movk x8, #62349, lsl #16
movk x8, #30721, lsl #32
movk x8, #49115, lsl #48
fmov d5, x8
ldur d6, [x29, #-208] ; 8-byte Folded Reload
fmul d17, d6, d5
mov x8, #62612
movk x8, #18904, lsl #16
movk x8, #1144, lsl #32
movk x8, #49064, lsl #48
fmov d5, x8
fmul d5, d2, d5
mov x8, #47272
movk x8, #56762, lsl #16
movk x8, #43178, lsl #32
movk x8, #16292, lsl #48
fmov d6, x8
fmul d6, d0, d6
fadd d6, d6, d5
cbz x20, LBB2_38
; %bb.37:
ldr d4, [x20, #136]
LBB2_38:
fadd d10, d19, d4
mov x8, #62612
movk x8, #18904, lsl #16
movk x8, #1144, lsl #32
movk x8, #49064, lsl #48
fmov d4, x8
fmul d4, d0, d4
mov x8, #47272
movk x8, #56762, lsl #16
movk x8, #43178, lsl #32
movk x8, #49060, lsl #48
fmov d5, x8
fmul d5, d2, d5
fadd d18, d4, d5
fadd d23, d0, d3
fadd d22, d2, d1
str d24, [sp, #416] ; 8-byte Folded Spill
ldur d1, [x29, #-184] ; 8-byte Folded Reload
fadd d12, d1, d24
str d16, [sp, #816] ; 8-byte Folded Spill
str d19, [sp, #936] ; 8-byte Folded Spill
fmul d1, d16, d19
fsub d8, d12, d1
str d6, [sp, #808] ; 8-byte Folded Spill
fmul d14, d6, d10
mov x8, #43516
movk x8, #54001, lsl #16
movk x8, #25165, lsl #32
movk x8, #16240, lsl #48
fmov d1, x8
fmul d3, d20, d1
fmul d4, d20, d3
str d7, [sp, #824] ; 8-byte Folded Spill
ldur d5, [x29, #-240] ; 8-byte Folded Reload
fadd d5, d5, d7
str d17, [sp, #960] ; 8-byte Folded Spill
fadd d5, d5, d17
str d18, [sp, #952] ; 8-byte Folded Spill
fadd d6, d5, d18
fmul d1, d23, d1
fadd d1, d6, d1
mov x8, #20972
movk x8, #7864, lsl #16
movk x8, #60293, lsl #32
movk x8, #49057, lsl #48
fmov d5, x8
fadd d9, d1, d5
fmov d1, #0.50000000
fmul d1, d9, d1
fsub d1, d9, d1
str d6, [sp, #600] ; 8-byte Folded Spill
fsub d1, d1, d6
str d23, [sp, #728] ; 8-byte Folded Spill
fmul d5, d23, d1
fadd d4, d4, d5
str d20, [sp, #584] ; 8-byte Folded Spill
str d4, [sp, #264] ; 8-byte Folded Spill
fmul d4, d20, d4
fmul d5, d21, d3
str d22, [sp, #736] ; 8-byte Folded Spill
fmul d6, d22, d1
fadd d5, d5, d6
str d21, [sp, #744] ; 8-byte Folded Spill
str d5, [sp, #256] ; 8-byte Folded Spill
fmul d5, d21, d5
fadd d4, d4, d5
mov x8, #-4863887597560135680
fmov d5, x8
fmul d6, d2, d5
mov x8, #4354980839667269632
fmov d7, x8
fmul d16, d0, d7
fsub d6, d6, d16
fmul d3, d6, d3
fmul d0, d0, d5
fmul d2, d2, d7
fadd d15, d2, d0
fmul d0, d15, d1
fadd d0, d3, d0
stur d6, [x29, #-240] ; 8-byte Folded Spill
str d0, [sp, #280] ; 8-byte Folded Spill
fmul d0, d6, d0
fadd d11, d0, d4
fmul d0, d9, d9
mov x8, #26865
movk x8, #35043, lsl #16
movk x8, #63669, lsl #32
movk x8, #16100, lsl #48
fmov d1, x8
str d1, [sp, #320] ; 8-byte Folded Spill
fadd d0, d0, d1
fsqrt d0, d0
fmov d1, #1.50000000
bl _pow
str d0, [sp, #360] ; 8-byte Folded Spill
str d14, [sp, #456] ; 8-byte Folded Spill
fadd d1, d8, d14
stur d10, [x29, #-248] ; 8-byte Folded Spill
str d11, [sp, #480] ; 8-byte Folded Spill
fmul d0, d11, d10
str d1, [sp, #592] ; 8-byte Folded Spill
fadd d10, d0, d1
mov x8, #211106232532992
movk x8, #49266, lsl #48
fmov d0, x8
fmul d0, d9, d0
bl _tanh
str d0, [sp, #272] ; 8-byte Folded Spill
mov x8, #6148914691236517205
movk x8, #16341, lsl #48
fmov d0, x8
str d10, [sp, #240] ; 8-byte Folded Spill
fsub d0, d0, d10
mov x8, #4632233691727265792
fmov d1, x8
fmul d0, d0, d1
bl _tanh
str d0, [sp, #288] ; 8-byte Folded Spill
movi d9, #0000000000000000
movi d0, #0000000000000000
cbz x20, LBB2_40
; %bb.39:
ldr d0, [x20, #160]
LBB2_40:
bl ___sincos_stret
movi d5, #0000000000000000
cbz x20, LBB2_42
; %bb.41:
ldr d9, [x20, #256]
ldr d5, [x20, #168]
LBB2_42:
ldr x19, [x19]
cbz x19, LBB2_44
; %bb.43:
ldr d2, [sp, #704] ; 8-byte Folded Reload
ldr d3, [sp, #872] ; 8-byte Folded Reload
fsub d2, d3, d2
str d2, [sp, #168] ; 8-byte Folded Spill
fsub d2, d8, d12
str d2, [sp, #208] ; 8-byte Folded Spill
str d0, [sp, #664] ; 8-byte Folded Spill
ldp d31, d23, [x29, #-232] ; 16-byte Folded Reload
ldr d0, [sp, #696] ; 8-byte Folded Reload
fmul d2, d23, d0
ldr d0, [sp, #248] ; 8-byte Folded Reload
fmul d4, d31, d0
fadd d2, d2, d4
str d1, [sp, #656] ; 8-byte Folded Spill
ldur d1, [x29, #-136] ; 8-byte Folded Reload
ldr d0, [sp, #688] ; 8-byte Folded Reload
fmul d4, d1, d0
fadd d3, d4, d2
ldr d0, [sp, #232] ; 8-byte Folded Reload
fmul d2, d23, d0
ldr d0, [sp, #224] ; 8-byte Folded Reload
fmul d4, d31, d0
fadd d2, d2, d4
ldr d0, [sp, #216] ; 8-byte Folded Reload
fmul d4, d1, d0
fadd d7, d4, d2
ldr d0, [sp, #712] ; 8-byte Folded Reload
fmul d4, d1, d0
str d5, [sp, #680] ; 8-byte Folded Spill
ldr d0, [sp, #856] ; 8-byte Folded Reload
fmul d2, d0, d4
fmov d6, d4
ldur d0, [x29, #-152] ; 8-byte Folded Reload
str d3, [sp, #704] ; 8-byte Folded Spill
fmul d4, d3, d0
fmov d16, d0
fsub d4, d2, d4
ldr d0, [sp, #344] ; 8-byte Folded Reload
ldur d2, [x29, #-256] ; 8-byte Folded Reload
fmul d2, d2, d0
ldr d0, [sp, #336] ; 8-byte Folded Reload
ldr d5, [sp, #1016] ; 8-byte Folded Reload
fmul d5, d5, d0
fadd d2, d2, d5
ldur d0, [x29, #-144] ; 8-byte Folded Reload
fmul d5, d3, d0
fmov d17, d0
ldr d0, [sp, #976] ; 8-byte Folded Reload
fmov d3, d6
str d6, [sp, #688] ; 8-byte Folded Spill
fmul d6, d0, d6
fsub d22, d5, d6
ldr d0, [sp, #848] ; 8-byte Folded Reload
fmul d5, d0, d3
str d7, [sp, #696] ; 8-byte Folded Spill
fmul d6, d7, d16
fsub d6, d5, d6
ldr d0, [sp, #648] ; 8-byte Folded Reload
fmul d5, d1, d0
fadd d29, d5, d2
str d29, [sp, #216] ; 8-byte Folded Spill
fmul d2, d7, d17
ldr d0, [sp, #968] ; 8-byte Folded Reload
fmul d5, d0, d3
fsub d7, d2, d5
stp d7, d22, [sp, #336] ; 16-byte Folded Spill
ldr d5, [sp, #984] ; 8-byte Folded Reload
ldp d17, d0, [x29, #-176] ; 16-byte Folded Reload
fmul d2, d5, d17
ldr d16, [sp, #840] ; 8-byte Folded Reload
fsub d24, d16, d2
ldr d2, [sp, #992] ; 8-byte Folded Reload
fmul d2, d0, d2
ldr d3, [sp, #1008] ; 8-byte Folded Reload
fadd d2, d3, d2
fsub d16, d24, d16
fmul d16, d17, d16
fadd d26, d2, d16
str d26, [sp, #648] ; 8-byte Folded Spill
fmul d2, d0, d5
ldr d3, [sp, #1000] ; 8-byte Folded Reload
fsub d2, d3, d2
ldr d3, [sp, #408] ; 8-byte Folded Reload
fmul d16, d17, d3
fsub d28, d2, d16
str d28, [sp, #872] ; 8-byte Folded Spill
mov x8, #45033
movk x8, #40035, lsl #16
movk x8, #524, lsl #32
movk x8, #48971, lsl #48
mov x9, #45724
movk x9, #42429, lsl #16
movk x9, #11379, lsl #32
movk x9, #16169, lsl #48
fmov d16, x8
ldr d30, [sp, #896] ; 8-byte Folded Reload
fmul d16, d30, d16
fmov d17, x9
ldr d2, [sp, #880] ; 8-byte Folded Reload
fmul d18, d2, d17
fsub d16, d16, d18
mov x8, #40862
movk x8, #31695, lsl #16
movk x8, #12355, lsl #32
movk x8, #16198, lsl #48
fmul d17, d30, d17
fmov d18, x8
fmul d18, d2, d18
fsub d17, d18, d17
mov x8, #45033
movk x8, #40035, lsl #16
movk x8, #524, lsl #32
movk x8, #48971, lsl #48
mov x9, #45724
movk x9, #42429, lsl #16
movk x9, #11379, lsl #32
movk x9, #16169, lsl #48
fmov d18, x8
fmul d18, d23, d18
fmov d19, x9
fmul d21, d31, d19
fsub d18, d18, d21
mov x8, #40862
movk x8, #31695, lsl #16
movk x8, #12355, lsl #32
movk x8, #16198, lsl #48
fmul d19, d23, d19
fmov d21, x8
fmul d21, d31, d21
fsub d19, d21, d19
ldr d3, [sp, #792] ; 8-byte Folded Reload
ldr d20, [sp, #784] ; 8-byte Folded Reload
fmul d21, d3, d20
str d9, [sp, #672] ; 8-byte Folded Spill
fsub d9, d24, d21
str d24, [sp, #200] ; 8-byte Folded Spill
ldr d5, [sp, #768] ; 8-byte Folded Reload
ldr d11, [sp, #760] ; 8-byte Folded Reload
fmul d21, d5, d11
fsub d8, d9, d21
ldr d27, [sp, #576] ; 8-byte Folded Reload
ldr d21, [sp, #928] ; 8-byte Folded Reload
fmul d21, d21, d27
fsub d25, d8, d21
str d8, [sp, #144] ; 8-byte Folded Spill
ldr d21, [sp, #920] ; 8-byte Folded Reload
fmul d21, d21, d27
fsub d21, d25, d21
fmov d10, d25
str d25, [sp, #160] ; 8-byte Folded Spill
fadd d4, d21, d4
fmov d12, d21
str d21, [sp, #408] ; 8-byte Folded Spill
str d4, [sp, #184] ; 8-byte Folded Spill
fmul d4, d4, d4
fmul d21, d22, d22
fadd d22, d21, d4
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #48978, lsl #48
fmov d4, x8
fadd d14, d16, d4
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #48937, lsl #48
fmov d4, x8
fadd d25, d17, d4
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #48978, lsl #48
fmov d4, x8
fmul d4, d1, d4
fadd d21, d18, d4
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #48937, lsl #48
fmov d4, x8
fmul d4, d1, d4
stur d15, [x29, #-184] ; 8-byte Folded Spill
fadd d15, d19, d4
fadd d1, d12, d6
str d1, [sp, #192] ; 8-byte Folded Spill
fmul d4, d1, d1
fmul d6, d7, d7
fadd d16, d6, d4
ldr d1, [sp, #640] ; 8-byte Folded Reload
fadd d4, d0, d1
ldr d1, [sp, #800] ; 8-byte Folded Reload
fmul d6, d4, d1
fadd d6, d6, d26
fsub d17, d9, d24
fmul d17, d20, d17
fadd d6, d6, d17
fmul d17, d4, d3
str d4, [sp, #248] ; 8-byte Folded Spill
fsub d17, d28, d17
ldr d1, [sp, #632] ; 8-byte Folded Reload
fmul d18, d20, d1
fsub d3, d17, d18
stp d3, d6, [sp, #224] ; 16-byte Folded Spill
ldr d1, [sp, #864] ; 8-byte Folded Reload
fadd d13, d22, d1
fadd d0, d16, d1
str d0, [sp, #136] ; 8-byte Folded Spill
fmul d16, d27, d29
fsub d0, d10, d16
str d0, [sp, #176] ; 8-byte Folded Spill
fmul d16, d0, d0
ldr d1, [sp, #392] ; 8-byte Folded Reload
fadd d0, d16, d1
str d0, [sp, #152] ; 8-byte Folded Spill
fmov d17, d14
fmul d16, d30, d14
fmul d18, d2, d25
fadd d0, d16, d18
str d0, [sp, #632] ; 8-byte Folded Spill
fmul d18, d23, d21
fmul d19, d31, d15
fadd d12, d18, d19
str d12, [sp, #392] ; 8-byte Folded Spill
mov x8, #18811
movk x8, #34700, lsl #16
movk x8, #61210, lsl #32
movk x8, #16643, lsl #48
fmov d18, x8
ldr d1, [sp, #328] ; 8-byte Folded Reload
fmul d14, d1, d18
fmov d18, #3.00000000
ldr d1, [sp, #312] ; 8-byte Folded Reload
fmul d10, d1, d18
ldr d0, [sp, #296] ; 8-byte Folded Reload
fmul d0, d0, d18
str d0, [sp, #296] ; 8-byte Folded Spill
ldr d1, [sp, #536] ; 8-byte Folded Reload
fadd d29, d4, d1
fsub d19, d8, d9
ldr d0, [sp, #776] ; 8-byte Folded Reload
fmul d9, d29, d0
fsub d9, d6, d9
fmul d19, d11, d19
fadd d0, d9, d19
str d0, [sp, #864] ; 8-byte Folded Spill
fmul d9, d29, d5
str d29, [sp, #328] ; 8-byte Folded Spill
fsub d9, d3, d9
ldr d0, [sp, #168] ; 8-byte Folded Reload
fmul d20, d11, d0
fsub d0, d9, d20
str d0, [sp, #640] ; 8-byte Folded Spill
fmov d9, #-3.00000000
ldr d1, [sp, #400] ; 8-byte Folded Reload
fmul d9, d1, d9
mov x8, #56877
movk x8, #10885, lsl #16
movk x8, #2572, lsl #32
movk x8, #16289, lsl #48
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d11, x8
fmov d0, x9
ldr d6, [sp, #888] ; 8-byte Folded Reload
fmul d3, d6, d11
ldur d1, [x29, #-216] ; 8-byte Folded Reload
fmul d7, d1, d0
fadd d28, d3, d7
fmul d7, d23, d11
fmul d19, d31, d0
fadd d4, d7, d19
fmov d19, #1.00000000
fsub d10, d19, d10
fmul d10, d14, d10
mov x8, #10523
movk x8, #38535, lsl #16
movk x8, #12921, lsl #32
movk x8, #16642, lsl #48
fmov d14, x8
ldr d3, [sp, #304] ; 8-byte Folded Reload
fmul d14, d3, d14
fmul d11, d30, d11
fmul d0, d2, d0
fadd d18, d11, d0
mov x8, #45572
movk x8, #23979, lsl #16
movk x8, #34811, lsl #32
movk x8, #16645, lsl #48
fmov d11, x8
ldr d0, [sp, #424] ; 8-byte Folded Reload
fmul d11, d0, d11
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #16194, lsl #48
mov x9, #64744
movk x9, #21380, lsl #16
movk x9, #23316, lsl #32
movk x9, #48962, lsl #48
fmul d15, d1, d15
fmov d0, x8
fmul d3, d2, d0
fmov d20, x9
fmul d20, d31, d20
fmul d26, d31, d25
fmul d25, d1, d25
fmul d24, d1, d0
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #16153, lsl #48
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #16153, lsl #48
fmul d2, d6, d21
fmov d31, x8
fmul d1, d30, d31
fmov d22, x9
fmul d22, d23, d22
fmul d21, d23, d17
fmul d17, d6, d17
fmul d31, d6, d31
ldur d5, [x29, #-136] ; 8-byte Folded Reload
fmul d23, d5, d27
ldr d6, [sp, #712] ; 8-byte Folded Reload
fmul d7, d23, d6
ldur d0, [x29, #-160] ; 8-byte Folded Reload
fmul d23, d0, d27
fmul d16, d23, d6
mov x8, #54125
movk x8, #53060, lsl #16
movk x8, #15481, lsl #32
movk x8, #16273, lsl #48
fmov d23, x8
fmul d30, d0, d23
fadd d6, d30, d28
fmul d30, d5, d23
fadd d4, d30, d4
stp d4, d6, [sp, #304] ; 16-byte Folded Spill
fadd d15, d2, d15
fsub d28, d1, d3
mov x8, #16684
movk x8, #33360, lsl #16
movk x8, #18212, lsl #32
movk x8, #48931, lsl #48
ldr d1, [sp, #632] ; 8-byte Folded Reload
fadd d1, d12, d1
fmov d2, x8
fsub d1, d2, d1
str d1, [sp, #400] ; 8-byte Folded Spill
fmov d6, #0.50000000
ldr d1, [sp, #376] ; 8-byte Folded Reload
fmul d2, d1, d6
fadd d2, d2, d6
fmul d2, d2, d10
ldr d1, [sp, #296] ; 8-byte Folded Reload
fsub d30, d19, d1
fmul d10, d14, d30
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16329, lsl #48
fsqrt d13, d13
ldr d1, [sp, #136] ; 8-byte Folded Reload
fsqrt d4, d1
fmov d30, x8
fdiv d8, d13, d30
str d13, [sp, #888] ; 8-byte Folded Spill
fminnm d1, d8, d19
fdiv d30, d4, d30
str d4, [sp, #880] ; 8-byte Folded Spill
fminnm d19, d30, d19
fadd d14, d18, d23
fmov d18, #1.00000000
fadd d23, d9, d18
fmul d9, d11, d23
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16329, lsl #48
ldp d3, d30, [sp, #152] ; 16-byte Folded Reload
fsqrt d3, d3
fmov d23, x8
fdiv d23, d3, d23
fmov d12, d3
str d3, [sp, #376] ; 8-byte Folded Spill
fminnm d18, d23, d18
ldr d3, [sp, #560] ; 8-byte Folded Reload
fadd d23, d29, d3
ldr d3, [sp, #144] ; 8-byte Folded Reload
fsub d29, d30, d3
ldr d3, [sp, #752] ; 8-byte Folded Reload
fmul d8, d23, d3
ldr d3, [sp, #864] ; 8-byte Folded Reload
fadd d8, d8, d3
fmul d29, d27, d29
fadd d3, d8, d29
str d3, [sp, #296] ; 8-byte Folded Spill
ldr d3, [sp, #928] ; 8-byte Folded Reload
fmul d8, d23, d3
ldr d3, [sp, #640] ; 8-byte Folded Reload
fsub d8, d3, d8
ldr d3, [sp, #488] ; 8-byte Folded Reload
fmul d11, d27, d3
fsub d11, d8, d11
fadd d3, d17, d25
fadd d20, d22, d20
ldr d17, [sp, #472] ; 8-byte Folded Reload
fmul d22, d0, d17
fsub d0, d22, d7
stur d0, [x29, #-232] ; 8-byte Folded Spill
fadd d21, d21, d26
fsub d29, d31, d24
fmul d7, d5, d17
fadd d24, d7, d16
ldr d0, [sp, #408] ; 8-byte Folded Reload
fsub d5, d0, d30
fadd d15, d28, d15
ldr d0, [sp, #384] ; 8-byte Folded Reload
fmul d0, d0, d6
fadd d0, d0, d6
fmul d0, d0, d2
str d0, [sp, #896] ; 8-byte Folded Spill
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16361, lsl #48
fmov d0, x8
fmul d2, d1, d0
fmul d7, d13, d6
fadd d1, d7, d2
str d1, [sp, #712] ; 8-byte Folded Spill
ldr d1, [sp, #352] ; 8-byte Folded Reload
fmul d2, d1, d6
fadd d2, d2, d6
fmul d2, d2, d10
fmul d0, d19, d0
mov x8, #49235
movk x8, #28989, lsl #16
movk x8, #40841, lsl #32
movk x8, #16312, lsl #48
fmov d7, x8
ldr d16, [sp, #520] ; 8-byte Folded Reload
fmul d7, d16, d7
mov x8, #45974
movk x8, #34787, lsl #16
movk x8, #35902, lsl #32
movk x8, #16285, lsl #48
fmov d16, x8
ldr d1, [sp, #496] ; 8-byte Folded Reload
fmul d16, d1, d16
mov x8, #49235
movk x8, #28989, lsl #16
movk x8, #40841, lsl #32
movk x8, #16312, lsl #48
fmov d19, x8
ldur d1, [x29, #-256] ; 8-byte Folded Reload
fmul d19, d1, d19
mov x8, #45974
movk x8, #34787, lsl #16
movk x8, #35902, lsl #32
movk x8, #16285, lsl #48
fmov d22, x8
ldr d1, [sp, #1016] ; 8-byte Folded Reload
fmul d22, d1, d22
fmov d1, #0.50000000
ldr d25, [sp, #440] ; 8-byte Folded Reload
fmul d25, d25, d1
fadd d25, d25, d1
fmul d25, d25, d9
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16361, lsl #48
fmov d26, x8
fmul d18, d18, d26
fsub d10, d3, d20
fadd d20, d29, d21
ldr d3, [sp, #368] ; 8-byte Folded Reload
fmul d3, d3, d6
fadd d3, d3, d6
fmul d2, d3, d2
stur d2, [x29, #-256] ; 8-byte Folded Spill
fmul d2, d4, d6
fadd d0, d2, d0
stur d0, [x29, #-216] ; 8-byte Folded Spill
ldr d28, [sp, #688] ; 8-byte Folded Reload
fmul d0, d14, d28
ldp d21, d6, [x29, #-152] ; 16-byte Folded Reload
ldp d8, d9, [sp, #304] ; 16-byte Folded Reload
fmul d2, d8, d21
fsub d29, d0, d2
fmul d2, d8, d6
fmul d3, d9, d28
fmov d0, d28
fsub d3, d2, d3
fmul d2, d9, d21
fmul d4, d14, d6
fmov d28, d6
fsub d30, d2, d4
ldp d6, d2, [sp, #256] ; 16-byte Folded Reload
ldr d4, [sp, #728] ; 8-byte Folded Reload
fmul d2, d4, d2
ldr d4, [sp, #736] ; 8-byte Folded Reload
fmul d6, d4, d6
fadd d4, d2, d6
ldr d2, [sp, #448] ; 8-byte Folded Reload
fmul d6, d2, d1
fadd d6, d6, d1
fmul d2, d6, d25
stur d2, [x29, #-224] ; 8-byte Folded Spill
fmul d6, d12, d1
fadd d6, d6, d18
fadd d26, d7, d16
fadd d12, d19, d22
mov x8, #3449
movk x8, #18764, lsl #16
movk x8, #45194, lsl #32
movk x8, #16217, lsl #48
fmov d7, x8
ldr d1, [sp, #392] ; 8-byte Folded Reload
fadd d16, d1, d7
str d16, [sp, #560] ; 8-byte Folded Spill
ldr d1, [sp, #400] ; 8-byte Folded Reload
fadd d18, d1, d7
str d18, [sp, #520] ; 8-byte Folded Spill
ldr d1, [sp, #632] ; 8-byte Folded Reload
fadd d2, d1, d7
str d2, [sp, #448] ; 8-byte Folded Spill
ldr d1, [sp, #720] ; 8-byte Folded Reload
fmul d1, d23, d1
ldr d13, [sp, #296] ; 8-byte Folded Reload
fadd d1, d1, d13
fmul d5, d27, d5
fadd d31, d5, d1
ldr d1, [sp, #920] ; 8-byte Folded Reload
fmul d1, d23, d1
fsub d1, d11, d1
ldr d5, [sp, #432] ; 8-byte Folded Reload
fmul d5, d27, d5
fsub d22, d1, d5
str d15, [sp, #536] ; 8-byte Folded Spill
fmul d1, d15, d28
fmul d5, d16, d0
fadd d1, d1, d5
fmul d5, d18, d28
fmov d19, d28
fmul d7, d15, d0
fmov d28, d0
fadd d5, d5, d7
fadd d25, d23, d17
ldur d0, [x29, #-232] ; 8-byte Folded Reload
stp d14, d25, [sp, #424] ; 16-byte Folded Spill
fmul d7, d14, d0
fmul d16, d9, d25
fsub d15, d7, d16
fmul d7, d8, d25
str d24, [sp, #488] ; 8-byte Folded Spill
fmul d16, d14, d24
fsub d7, d7, d16
str d7, [sp, #1016] ; 8-byte Folded Spill
ldur d7, [x29, #-184] ; 8-byte Folded Reload
ldr d16, [sp, #280] ; 8-byte Folded Reload
fmul d7, d7, d16
fadd d14, d7, d4
str d14, [sp, #352] ; 8-byte Folded Spill
str d10, [sp, #440] ; 8-byte Folded Spill
fmul d7, d10, d0
str d20, [sp, #496] ; 8-byte Folded Spill
fmul d16, d20, d24
fadd d7, d7, d16
fmul d16, d20, d21
fadd d17, d16, d1
str d17, [sp, #400] ; 8-byte Folded Spill
fmul d1, d10, d21
fadd d18, d1, d5
str d18, [sp, #472] ; 8-byte Folded Spill
fmul d0, d21, d29
fmul d1, d19, d3
fsub d0, d0, d1
fmul d1, d28, d3
fmul d3, d21, d30
fsub d1, d1, d3
ldur d29, [x29, #-224] ; 8-byte Folded Reload
fmul d3, d29, d6
ldr d4, [sp, #176] ; 8-byte Folded Reload
fmul d3, d4, d3
fmul d4, d2, d25
fadd d4, d4, d7
fmov d20, d22
str d22, [sp, #408] ; 8-byte Folded Spill
fmul d5, d22, d8
fmul d6, d31, d9
fsub d5, d6, d5
fmul d6, d12, d27
fmul d6, d27, d6
fmul d7, d26, d27
fmul d7, d27, d7
fmul d16, d23, d12
fsub d16, d11, d16
fmul d27, d12, d11
ldr d2, [sp, #376] ; 8-byte Folded Reload
fdiv d24, d3, d2
fadd d3, d5, d4
fmul d4, d19, d17
fmul d5, d28, d18
fsub d4, d4, d5
ldr d21, [sp, #896] ; 8-byte Folded Reload
ldr d2, [sp, #712] ; 8-byte Folded Reload
fmul d2, d21, d2
stp d2, d31, [sp, #376] ; 16-byte Folded Spill
ldr d5, [sp, #184] ; 8-byte Folded Reload
fmul d5, d5, d2
ldr d2, [sp, #888] ; 8-byte Folded Reload
fdiv d19, d5, d2
ldr d5, [sp, #976] ; 8-byte Folded Reload
fmul d5, d5, d21
fmov d22, d21
ldr d18, [sp, #704] ; 8-byte Folded Reload
fmul d18, d18, d19
fmov d25, d19
str d19, [sp, #392] ; 8-byte Folded Spill
fadd d5, d5, d18
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #16326, lsl #48
fsub d18, d31, d15
fmov d19, x8
fmul d18, d18, d19
fmul d0, d0, d19
fadd d0, d18, d0
ldr d2, [sp, #1016] ; 8-byte Folded Reload
fsub d18, d20, d2
fmul d18, d18, d19
fmul d1, d1, d19
fadd d1, d18, d1
mov x8, #50080
movk x8, #49599, lsl #16
movk x8, #32579, lsl #32
movk x8, #16368, lsl #48
fmul d18, d23, d26
fadd d18, d18, d13
fmov d20, x8
fmul d18, d18, d20
fmul d6, d6, d20
fmul d16, d16, d20
fmul d7, d7, d20
fmul d20, d26, d13
fsub d17, d20, d27
mov x8, #2356
movk x8, #12413, lsl #16
movk x8, #55910, lsl #32
movk x8, #49095, lsl #48
fmov d20, x8
ldur d21, [x29, #-192] ; 8-byte Folded Reload
fmul d12, d21, d20
mov x8, #2356
movk x8, #12413, lsl #16
movk x8, #55910, lsl #32
movk x8, #49095, lsl #48
fmov d20, x8
ldur d21, [x29, #-200] ; 8-byte Folded Reload
fmul d27, d21, d20
fmul d3, d3, d19
fmul d4, d4, d19
mov x8, #11213
movk x8, #64899, lsl #16
movk x8, #2195, lsl #32
movk x8, #49148, lsl #48
fmov d19, x8
fmul d20, d9, d19
fadd d5, d5, d20
ldur d9, [x29, #-256] ; 8-byte Folded Reload
ldur d2, [x29, #-216] ; 8-byte Folded Reload
fmul d2, d9, d2
str d2, [sp, #368] ; 8-byte Folded Spill
ldr d20, [sp, #192] ; 8-byte Folded Reload
fmul d20, d20, d2
ldr d2, [sp, #880] ; 8-byte Folded Reload
fdiv d2, d20, d2
str d2, [sp, #576] ; 8-byte Folded Spill
ldr d20, [sp, #968] ; 8-byte Folded Reload
fmul d20, d20, d9
ldr d21, [sp, #696] ; 8-byte Folded Reload
fmul d21, d21, d2
fadd d20, d20, d21
fadd d19, d22, d19
fadd d19, d9, d19
fsub d19, d0, d19
fadd d0, d25, d2
fadd d1, d1, d0
fsub d6, d18, d6
fsub d16, d16, d7
ldr d2, [sp, #824] ; 8-byte Folded Reload
ldr d9, [sp, #944] ; 8-byte Folded Reload
fmul d0, d2, d9
fmov d31, d2
ldr d8, [sp, #200] ; 8-byte Folded Reload
fsub d25, d8, d0
ldr d22, [sp, #936] ; 8-byte Folded Reload
ldr d0, [sp, #960] ; 8-byte Folded Reload
fmul d0, d0, d22
fsub d2, d25, d0
stur d2, [x29, #-216] ; 8-byte Folded Spill
ldur d7, [x29, #-248] ; 8-byte Folded Reload
ldr d0, [sp, #952] ; 8-byte Folded Reload
fmul d0, d0, d7
fsub d2, d2, d0
fmul d0, d14, d7
fsub d0, d2, d0
stp d0, d2, [x29, #-200] ; 16-byte Folded Spill
fmul d7, d0, d0
ldr d0, [sp, #320] ; 8-byte Folded Reload
fadd d28, d7, d0
mov x8, #22951
movk x8, #29779, lsl #16
movk x8, #56802, lsl #32
movk x8, #16266, lsl #48
fmov d7, x8
fmul d7, d23, d7
fadd d7, d7, d17
mov x8, #42264
movk x8, #33609, lsl #16
movk x8, #14594, lsl #32
movk x8, #16420, lsl #48
fmov d17, x8
fmul d17, d26, d17
fmov d13, d29
ldr d2, [sp, #568] ; 8-byte Folded Reload
fmul d18, d2, d29
str d24, [sp, #632] ; 8-byte Folded Spill
ldr d0, [sp, #216] ; 8-byte Folded Reload
fmul d21, d0, d24
fadd d18, d18, d21
fadd d3, d3, d4
fadd d4, d20, d5
mov x8, #42264
movk x8, #33609, lsl #16
movk x8, #14594, lsl #32
movk x8, #49188, lsl #48
fmov d5, x8
fadd d5, d29, d5
fsub d5, d6, d5
fmov d6, #-3.00000000
ldr d0, [sp, #240] ; 8-byte Folded Reload
fmul d6, d0, d6
mov x8, #50080
movk x8, #49599, lsl #16
movk x8, #32579, lsl #32
movk x8, #16368, lsl #48
fmov d20, x8
fmul d20, d7, d20
fsub d17, d18, d17
mov x8, #28530
movk x8, #30490, lsl #16
movk x8, #27495, lsl #32
movk x8, #49093, lsl #48
fmov d7, x8
ldp d21, d18, [x29, #-128] ; 16-byte Folded Reload
fmul d13, d18, d7
mov x8, #28530
movk x8, #30490, lsl #16
movk x8, #27495, lsl #32
movk x8, #49093, lsl #48
fmov d18, x8
fmul d26, d21, d18
fsub d2, d3, d4
str d2, [sp, #568] ; 8-byte Folded Spill
ldr d0, [sp, #720] ; 8-byte Folded Reload
fmul d3, d0, d19
ldr d0, [sp, #920] ; 8-byte Folded Reload
fmul d4, d0, d1
fsub d3, d3, d4
fadd d4, d5, d19
fadd d5, d16, d24
fadd d5, d5, d1
mov x8, #45572
movk x8, #23979, lsl #16
movk x8, #34811, lsl #32
movk x8, #16645, lsl #48
fmov d1, x8
ldr d0, [sp, #360] ; 8-byte Folded Reload
fmul d16, d0, d1
fsub d18, d20, d17
mov x8, #6432
movk x8, #24166, lsl #16
movk x8, #7623, lsl #32
movk x8, #16309, lsl #48
mov x9, #30506
movk x9, #37777, lsl #16
movk x9, #58002, lsl #32
movk x9, #16361, lsl #48
fmov d1, x8
ldr d0, [sp, #864] ; 8-byte Folded Reload
fmul d17, d0, d1
fmov d19, x9
fadd d19, d17, d19
ldr d0, [sp, #640] ; 8-byte Folded Reload
fmul d20, d0, d1
ldr d14, [sp, #328] ; 8-byte Folded Reload
fmul d1, d14, d27
ldr d0, [sp, #224] ; 8-byte Folded Reload
fsub d21, d0, d1
fmul d29, d27, d0
ldr d0, [sp, #760] ; 8-byte Folded Reload
fmul d1, d27, d0
fmul d27, d0, d1
fmul d1, d12, d0
fmul d30, d0, d1
ldr d1, [sp, #464] ; 8-byte Folded Reload
ldur d0, [x29, #-168] ; 8-byte Folded Reload
fadd d24, d0, d1
fsub d1, d25, d8
ldr d17, [sp, #832] ; 8-byte Folded Reload
fmul d17, d24, d17
ldr d7, [sp, #648] ; 8-byte Folded Reload
fadd d17, d7, d17
fmul d1, d9, d1
fadd d0, d17, d1
fmul d1, d24, d31
fmov d10, d24
str d24, [sp, #864] ; 8-byte Folded Spill
ldr d11, [sp, #872] ; 8-byte Folded Reload
fsub d1, d11, d1
ldr d17, [sp, #416] ; 8-byte Folded Reload
fmul d17, d9, d17
fsub d17, d1, d17
str d17, [sp, #920] ; 8-byte Folded Spill
fmov d31, #1.00000000
fadd d6, d6, d31
fmul d16, d16, d6
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16329, lsl #48
fsqrt d1, d28
fmov d6, x8
fdiv d6, d1, d6
fmov d15, d1
str d1, [sp, #720] ; 8-byte Folded Spill
fminnm d28, d6, d31
mov x8, #48611
movk x8, #369, lsl #16
movk x8, #53146, lsl #32
movk x8, #16296, lsl #48
fmov d6, x8
fmul d31, d14, d6
fmul d6, d14, d12
ldr d1, [sp, #232] ; 8-byte Folded Reload
fsub d8, d1, d6
fmul d6, d12, d1
fadd d29, d29, d6
mov x8, #18772
movk x8, #35328, lsl #16
movk x8, #62057, lsl #32
movk x8, #16258, lsl #48
fmov d6, x8
fmul d23, d23, d6
fadd d3, d3, d2
fadd d3, d18, d3
ldr d1, [sp, #752] ; 8-byte Folded Reload
fmul d6, d1, d4
ldr d1, [sp, #928] ; 8-byte Folded Reload
fmul d18, d1, d5
fsub d9, d6, d18
fadd d6, d19, d4
fadd d18, d20, d5
mov x8, #5915
movk x8, #64709, lsl #16
movk x8, #30489, lsl #32
movk x8, #16392, lsl #48
fmov d4, x8
fmul d5, d8, d4
fmul d19, d27, d4
fsub d5, d5, d19
fmul d19, d21, d4
fmul d4, d30, d4
fmov d24, #0.50000000
ldr d1, [sp, #272] ; 8-byte Folded Reload
fmul d20, d1, d24
fadd d20, d20, d24
fmul d16, d20, d16
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16361, lsl #48
fmov d20, x8
fmul d21, d28, d20
fsub d27, d31, d29
mov x8, #6432
movk x8, #24166, lsl #16
movk x8, #7623, lsl #32
movk x8, #16309, lsl #48
fmov d20, x8
fmul d23, d23, d20
fadd d3, d9, d3
mov x8, #61302
movk x8, #27691, lsl #16
movk x8, #64897, lsl #32
movk x8, #16445, lsl #48
fmov d20, x8
fadd d5, d5, d20
fadd d4, d19, d4
ldr d1, [sp, #784] ; 8-byte Folded Reload
fmul d19, d26, d1
fmul d28, d1, d19
fmul d19, d13, d1
fmul d29, d1, d19
ldr d2, [sp, #504] ; 8-byte Folded Reload
fadd d30, d10, d2
ldur d10, [x29, #-216] ; 8-byte Folded Reload
fsub d19, d10, d25
ldr d2, [sp, #816] ; 8-byte Folded Reload
fmul d20, d30, d2
fmov d9, d0
str d0, [sp, #264] ; 8-byte Folded Spill
fsub d20, d0, d20
fmul d19, d22, d19
fadd d0, d20, d19
str d0, [sp, #296] ; 8-byte Folded Spill
ldr d1, [sp, #960] ; 8-byte Folded Reload
fmul d19, d30, d1
str d30, [sp, #784] ; 8-byte Folded Spill
fsub d19, d17, d19
ldr d2, [sp, #208] ; 8-byte Folded Reload
fmul d20, d22, d2
fsub d17, d19, d20
str d17, [sp, #760] ; 8-byte Folded Spill
ldr d1, [sp, #288] ; 8-byte Folded Reload
fmul d19, d1, d24
fadd d19, d19, d24
fmul d19, d19, d16
fmul d16, d15, d24
fadd d1, d16, d21
str d1, [sp, #752] ; 8-byte Folded Spill
fmov d15, d7
fmul d16, d13, d7
fmov d7, d11
fmul d21, d26, d11
fadd d16, d21, d16
mov x8, #5915
movk x8, #64709, lsl #16
movk x8, #30489, lsl #32
movk x8, #16392, lsl #48
fmov d21, x8
fmul d21, d27, d21
mov x8, #61302
movk x8, #27691, lsl #16
movk x8, #64897, lsl #32
movk x8, #49213, lsl #48
fmov d25, x8
fmul d24, d12, d25
fadd d11, d23, d3
str d11, [sp, #280] ; 8-byte Folded Spill
ldr d1, [sp, #776] ; 8-byte Folded Reload
fmul d3, d1, d6
ldr d1, [sp, #768] ; 8-byte Folded Reload
fmul d23, d1, d18
fadd d3, d3, d23
fadd d5, d5, d6
fadd d6, d4, d18
mov x8, #4125
movk x8, #18328, lsl #16
movk x8, #56919, lsl #32
movk x8, #16293, lsl #48
ldr d23, [sp, #248] ; 8-byte Folded Reload
fmul d4, d23, d26
fmov d18, x8
fmul d18, d23, d18
fmul d23, d23, d13
fmov d12, d13
mov x8, #64990
movk x8, #28266, lsl #16
movk x8, #45172, lsl #32
movk x8, #16414, lsl #48
fsub d23, d15, d23
fmov d25, x8
fmul d23, d23, d25
fmul d26, d28, d25
fsub d23, d23, d26
fsub d4, d7, d4
fmul d4, d4, d25
fmul d25, d29, d25
mov x8, #4359484439294640128
mov x9, #4354980839667269632
fmov d26, x8
fmov d27, x9
ldr d1, [sp, #656] ; 8-byte Folded Reload
fmul d28, d1, d26
ldr d2, [sp, #664] ; 8-byte Folded Reload
fmul d29, d2, d27
fsub d29, d29, d28
fmul d27, d1, d27
fmul d26, d2, d26
fadd d26, d27, d26
fsub d16, d18, d16
fadd d18, d21, d24
mov x8, #23440
movk x8, #2685, lsl #16
movk x8, #53080, lsl #32
movk x8, #16466, lsl #48
fmov d21, x8
fadd d21, d23, d21
fadd d23, d4, d25
mov x8, #28530
movk x8, #30490, lsl #16
movk x8, #27495, lsl #32
movk x8, #49093, lsl #48
fmov d4, x8
ldr d2, [sp, #552] ; 8-byte Folded Reload
fmul d14, d2, d4
ldr d2, [sp, #544] ; 8-byte Folded Reload
fmul d25, d2, d4
mov x8, #2356
movk x8, #12413, lsl #16
movk x8, #55910, lsl #32
movk x8, #49095, lsl #48
fmov d4, x8
ldr d2, [sp, #512] ; 8-byte Folded Reload
fmul d1, d2, d4
str d1, [sp, #776] ; 8-byte Folded Spill
mov x8, #2356
movk x8, #12413, lsl #16
movk x8, #55910, lsl #32
movk x8, #49095, lsl #48
fmov d4, x8
ldur d2, [x29, #-208] ; 8-byte Folded Reload
fmul d20, d2, d4
mov x8, #49235
movk x8, #28989, lsl #16
movk x8, #40841, lsl #32
movk x8, #16312, lsl #48
mov x9, #45974
movk x9, #34787, lsl #16
movk x9, #35902, lsl #32
movk x9, #16285, lsl #48
fmov d4, x8
ldr d13, [sp, #584] ; 8-byte Folded Reload
fmul d4, d13, d4
fmov d24, x9
ldr d31, [sp, #744] ; 8-byte Folded Reload
fmul d24, d31, d24
fadd d8, d4, d24
ldr d2, [sp, #528] ; 8-byte Folded Reload
fadd d27, d30, d2
str d27, [sp, #768] ; 8-byte Folded Spill
ldur d1, [x29, #-192] ; 8-byte Folded Reload
fsub d4, d1, d10
ldr d2, [sp, #808] ; 8-byte Folded Reload
fmul d22, d27, d2
fadd d22, d22, d0
ldur d7, [x29, #-248] ; 8-byte Folded Reload
fmul d4, d7, d4
fadd d1, d22, d4
str d1, [sp, #288] ; 8-byte Folded Spill
mov x8, #49235
movk x8, #28989, lsl #16
movk x8, #40841, lsl #32
movk x8, #16312, lsl #48
mov x9, #45974
movk x9, #34787, lsl #16
movk x9, #35902, lsl #32
movk x9, #16285, lsl #48
fmov d4, x8
ldr d28, [sp, #728] ; 8-byte Folded Reload
fmul d4, d28, d4
fmov d22, x9
ldr d24, [sp, #736] ; 8-byte Folded Reload
fmul d22, d24, d22
fadd d10, d4, d22
ldr d0, [sp, #952] ; 8-byte Folded Reload
fmul d4, d27, d0
fsub d4, d17, d4
ldr d2, [sp, #456] ; 8-byte Folded Reload
fmul d22, d7, d2
fsub d4, d4, d22
str d4, [sp, #928] ; 8-byte Folded Spill
fmov d30, d19
str d19, [sp, #1016] ; 8-byte Folded Spill
ldr d0, [sp, #752] ; 8-byte Folded Reload
fmul d19, d19, d0
ldur d0, [x29, #-200] ; 8-byte Folded Reload
fmul d0, d0, d19
mov x8, #64990
movk x8, #28266, lsl #16
movk x8, #45172, lsl #32
movk x8, #16414, lsl #48
fmov d19, x8
fmul d16, d16, d19
mov x8, #23440
movk x8, #2685, lsl #16
movk x8, #53080, lsl #32
movk x8, #49234, lsl #48
fmov d19, x8
fmul d7, d12, d19
fsub d3, d11, d3
fadd d12, d18, d3
str d12, [sp, #224] ; 8-byte Folded Spill
ldr d2, [sp, #800] ; 8-byte Folded Reload
fmul d3, d2, d5
ldr d2, [sp, #792] ; 8-byte Folded Reload
fmul d18, d2, d6
fsub d3, d3, d18
fadd d22, d21, d5
str d22, [sp, #640] ; 8-byte Folded Spill
fadd d23, d23, d6
str d23, [sp, #552] ; 8-byte Folded Spill
ldr d2, [sp, #720] ; 8-byte Folded Reload
fdiv d19, d0, d2
mov x8, #45033
movk x8, #40035, lsl #16
movk x8, #524, lsl #32
movk x8, #48971, lsl #48
mov x9, #45724
movk x9, #42429, lsl #16
movk x9, #11379, lsl #32
movk x9, #16169, lsl #48
fmov d0, x8
stp d26, d29, [x29, #-128] ; 16-byte Folded Spill
fmul d0, d29, d0
fmov d5, x9
fmul d6, d26, d5
fsub d0, d0, d6
mov x8, #40862
movk x8, #31695, lsl #16
movk x8, #12355, lsl #32
movk x8, #16198, lsl #48
fmul d5, d29, d5
fmov d6, x8
fmul d6, d26, d6
fsub d5, d6, d5
fadd d6, d16, d7
fmul d7, d15, d14
fmov d11, d25
str d25, [sp, #208] ; 8-byte Folded Spill
ldr d2, [sp, #872] ; 8-byte Folded Reload
fmul d16, d2, d25
fadd d7, d7, d16
ldr d27, [sp, #776] ; 8-byte Folded Reload
fmul d16, d27, d9
ldr d15, [sp, #920] ; 8-byte Folded Reload
fmul d17, d20, d15
stp d20, d10, [sp, #184] ; 16-byte Folded Spill
fadd d16, d17, d16
fmul d17, d8, d1
fmov d9, d8
fmul d18, d10, d4
fsub d17, d17, d18
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #16210, lsl #48
fmov d18, x8
fadd d25, d0, d18
str d25, [sp, #256] ; 8-byte Folded Spill
ldr d21, [sp, #656] ; 8-byte Folded Reload
fmul d0, d13, d21
ldr d2, [sp, #664] ; 8-byte Folded Reload
fmul d18, d31, d2
fadd d8, d0, d18
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #16169, lsl #48
fmov d0, x8
fadd d26, d5, d0
str d26, [sp, #272] ; 8-byte Folded Spill
fmul d0, d31, d21
fmul d5, d13, d2
fmov d31, d13
fsub d29, d0, d5
fmul d0, d24, d21
fmul d5, d28, d2
fsub d13, d0, d5
fmul d0, d28, d21
fmov d21, d28
fmul d5, d24, d2
fmov d18, d24
fadd d28, d0, d5
fadd d0, d3, d12
fadd d24, d6, d0
str d24, [sp, #200] ; 8-byte Folded Spill
ldr d0, [sp, #992] ; 8-byte Folded Reload
fmul d0, d0, d22
ldr d3, [sp, #984] ; 8-byte Folded Reload
fmul d3, d3, d23
fsub d0, d0, d3
mov x8, #4125
movk x8, #18328, lsl #16
movk x8, #56919, lsl #32
movk x8, #16293, lsl #48
fmov d3, x8
ldr d12, [sp, #864] ; 8-byte Folded Reload
fmul d3, d12, d3
fsub d3, d3, d7
mov x8, #48611
movk x8, #369, lsl #16
movk x8, #53146, lsl #32
movk x8, #16296, lsl #48
fmov d5, x8
ldr d1, [sp, #784] ; 8-byte Folded Reload
fmul d5, d1, d5
fsub d5, d5, d16
mov x8, #22951
movk x8, #29779, lsl #16
movk x8, #56802, lsl #32
movk x8, #16266, lsl #48
fmov d6, x8
ldr d4, [sp, #768] ; 8-byte Folded Reload
fmul d6, d4, d6
fadd d6, d6, d17
ldr d7, [sp, #480] ; 8-byte Folded Reload
fmul d7, d7, d30
ldr d2, [sp, #352] ; 8-byte Folded Reload
fmul d2, d2, d19
str d19, [sp, #512] ; 8-byte Folded Spill
fadd d2, d7, d2
mov x8, #64990
movk x8, #28266, lsl #16
movk x8, #45172, lsl #32
movk x8, #16414, lsl #48
fmov d7, x8
fmul d3, d3, d7
mov x8, #5915
movk x8, #64709, lsl #16
movk x8, #30489, lsl #32
movk x8, #16392, lsl #48
fmov d7, x8
fmul d5, d5, d7
mov x8, #50080
movk x8, #49599, lsl #16
movk x8, #32579, lsl #32
movk x8, #16368, lsl #48
fmov d7, x8
fmul d6, d6, d7
mov x8, #42264
movk x8, #33609, lsl #16
movk x8, #14594, lsl #32
movk x8, #49188, lsl #48
fmov d7, x8
fmul d7, d9, d7
str d9, [sp, #176] ; 8-byte Folded Spill
fadd d2, d2, d7
fmul d7, d8, d25
stp d29, d8, [x29, #-216] ; 16-byte Folded Spill
fmul d16, d29, d26
fadd d7, d7, d16
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #16194, lsl #48
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #48921, lsl #48
fmov d16, x8
fmul d16, d13, d16
fmov d17, x9
fmul d17, d28, d17
fadd d16, d16, d17
fadd d0, d0, d24
ldr d17, [sp, #608] ; 8-byte Folded Reload
fadd d0, d17, d0
str d0, [sp, #608] ; 8-byte Folded Spill
mov x8, #23440
movk x8, #2685, lsl #16
movk x8, #53080, lsl #32
movk x8, #49234, lsl #48
fmov d0, x8
str d14, [sp, #168] ; 8-byte Folded Spill
fmul d0, d14, d0
fadd d0, d3, d0
str d0, [sp, #544] ; 8-byte Folded Spill
mov x8, #61302
movk x8, #27691, lsl #16
movk x8, #64897, lsl #32
movk x8, #49213, lsl #48
fmov d0, x8
fmov d25, d27
fmul d0, d27, d0
fadd d0, d5, d0
str d0, [sp, #528] ; 8-byte Folded Spill
mov x8, #18772
movk x8, #35328, lsl #16
movk x8, #62057, lsl #32
movk x8, #16258, lsl #48
mov x9, #6432
movk x9, #24166, lsl #16
movk x9, #7623, lsl #32
movk x9, #16309, lsl #48
fmov d0, x8
fmul d0, d4, d0
fmov d27, d4
fmov d3, x9
fmul d0, d0, d3
str d0, [sp, #504] ; 8-byte Folded Spill
fsub d0, d6, d2
str d0, [sp, #480] ; 8-byte Folded Spill
fsub d0, d7, d16
str d0, [sp, #304] ; 8-byte Folded Spill
mov x8, #48998
movk x8, #16808, lsl #16
movk x8, #62387, lsl #32
movk x8, #49080, lsl #48
mov x9, #54885
movk x9, #33778, lsl #16
movk x9, #12745, lsl #32
movk x9, #16308, lsl #48
fmov d0, x8
ldr d16, [sp, #904] ; 8-byte Folded Reload
fmul d2, d16, d0
fmov d3, x9
ldr d6, [sp, #912] ; 8-byte Folded Reload
fmul d5, d6, d3
fadd d5, d5, d2
fmul d0, d6, d0
fmov d7, d6
fmul d2, d16, d3
fsub d3, d0, d2
ldur d6, [x29, #-168] ; 8-byte Folded Reload
fmul d0, d6, d5
fmov d22, d5
str d5, [sp, #456] ; 8-byte Folded Spill
ldr d2, [sp, #1000] ; 8-byte Folded Reload
fsub d0, d2, d0
ldur d17, [x29, #-176] ; 8-byte Folded Reload
fmul d2, d3, d17
fmov d23, d3
str d3, [sp, #416] ; 8-byte Folded Spill
fmul d2, d17, d2
fsub d24, d0, d2
mov x8, #39915
movk x8, #11776, lsl #16
movk x8, #40689, lsl #32
movk x8, #49053, lsl #48
mov x9, #26610
movk x9, #29696, lsl #16
movk x9, #48971, lsl #32
movk x9, #16339, lsl #48
fmov d0, x8
fmul d2, d16, d0
fmov d3, x9
fmul d5, d7, d3
fadd d5, d5, d2
fmul d0, d7, d0
fmul d2, d16, d3
fsub d7, d0, d2
str d7, [sp, #328] ; 8-byte Folded Spill
fmul d0, d6, d5
stp d5, d24, [sp, #136] ; 16-byte Folded Spill
fsub d0, d24, d0
mov x8, #47887
movk x8, #56309, lsl #16
movk x8, #15746, lsl #32
movk x8, #16444, lsl #48
fmov d16, x8
str d16, [sp, #360] ; 8-byte Folded Spill
fmul d0, d0, d16
fmul d2, d7, d17
fmul d2, d17, d2
fmul d2, d2, d16
fsub d0, d0, d2
str d0, [sp, #464] ; 8-byte Folded Spill
fmul d0, d22, d17
ldr d2, [sp, #840] ; 8-byte Folded Reload
fsub d0, d2, d0
fsub d0, d0, d2
fmul d2, d6, d23
ldr d3, [sp, #1008] ; 8-byte Folded Reload
fadd d2, d3, d2
fmul d0, d17, d0
fadd d2, d2, d0
str d2, [sp, #320] ; 8-byte Folded Spill
fmul d0, d6, d7
fadd d0, d0, d2
fmul d0, d0, d16
fmul d2, d5, d17
fmul d2, d17, d2
fmul d2, d2, d16
fsub d0, d0, d2
fmul d2, d12, d11
ldr d3, [sp, #872] ; 8-byte Folded Reload
fsub d2, d3, d2
mov x8, #64990
movk x8, #28266, lsl #16
movk x8, #45172, lsl #32
movk x8, #16414, lsl #48
fmov d5, x8
str d5, [sp, #152] ; 8-byte Folded Spill
fmul d2, d2, d5
ldr d6, [sp, #944] ; 8-byte Folded Reload
fmul d3, d14, d6
fmul d3, d6, d3
fmul d3, d3, d5
fadd d2, d2, d3
str d2, [sp, #352] ; 8-byte Folded Spill
fmul d2, d1, d20
fsub d1, d15, d2
mov x8, #5915
movk x8, #64709, lsl #16
movk x8, #30489, lsl #32
movk x8, #16392, lsl #48
fmov d3, x8
str d3, [sp, #128] ; 8-byte Folded Spill
fmul d1, d1, d3
ldr d5, [sp, #936] ; 8-byte Folded Reload
fmul d2, d25, d5
fmul d2, d5, d2
fmul d2, d2, d3
fadd d1, d1, d2
str d1, [sp, #312] ; 8-byte Folded Spill
fmul d1, d4, d10
ldr d4, [sp, #928] ; 8-byte Folded Reload
fsub d1, d4, d1
mov x8, #50080
movk x8, #49599, lsl #16
movk x8, #32579, lsl #32
movk x8, #16368, lsl #48
fmov d3, x8
str d3, [sp, #160] ; 8-byte Folded Spill
fmul d1, d1, d3
ldur d14, [x29, #-248] ; 8-byte Folded Reload
fmul d2, d9, d14
fmul d2, d14, d2
fmul d2, d2, d3
fsub d1, d1, d2
mov x8, #36544
movk x8, #43611, lsl #16
movk x8, #860, lsl #32
movk x8, #16326, lsl #48
fmov d2, x8
fmul d3, d21, d2
mov x8, #18456
movk x8, #63321, lsl #16
movk x8, #33926, lsl #32
movk x8, #16223, lsl #48
fmov d5, x8
fmul d6, d18, d5
fsub d3, d3, d6
mov x8, #63706
movk x8, #13221, lsl #16
movk x8, #1281, lsl #32
movk x8, #16209, lsl #48
fmov d6, x8
ldur d18, [x29, #-184] ; 8-byte Folded Reload
fmul d7, d18, d6
fadd d7, d7, d3
fmul d3, d27, d7
fmov d23, d7
fmov d20, d27
fsub d3, d4, d3
fmul d2, d31, d2
ldr d4, [sp, #744] ; 8-byte Folded Reload
fmul d4, d4, d5
fsub d2, d2, d4
ldur d22, [x29, #-240] ; 8-byte Folded Reload
fmul d4, d22, d6
fadd d2, d4, d2
str d2, [sp, #800] ; 8-byte Folded Spill
fmul d2, d2, d14
str d2, [sp, #912] ; 8-byte Folded Spill
fmul d2, d14, d2
fsub d21, d3, d2
str d21, [sp, #928] ; 8-byte Folded Spill
mov x8, #56877
movk x8, #10885, lsl #16
movk x8, #2572, lsl #32
movk x8, #16289, lsl #48
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d2, x8
fmul d3, d28, d2
fmov d4, x9
fmul d5, d13, d4
fadd d3, d3, d5
mov x8, #54125
movk x8, #53060, lsl #16
movk x8, #15481, lsl #32
movk x8, #49041, lsl #48
fmov d5, x8
fmul d6, d18, d5
fadd d24, d3, d6
ldp d25, d26, [x29, #-128] ; 16-byte Folded Reload
fmul d3, d26, d2
fmul d6, d25, d4
fadd d3, d3, d6
fmul d6, d22, d14
ldr d17, [sp, #680] ; 8-byte Folded Reload
fmul d6, d6, d17
ldr d16, [sp, #672] ; 8-byte Folded Reload
fmul d7, d18, d16
fadd d7, d7, d6
str d7, [sp, #920] ; 8-byte Folded Spill
fadd d27, d3, d5
fadd d3, d20, d16
str d3, [sp, #904] ; 8-byte Folded Spill
str d24, [sp, #120] ; 8-byte Folded Spill
fmul d3, d24, d3
fmul d6, d27, d7
str d27, [sp, #840] ; 8-byte Folded Spill
fsub d3, d3, d6
fmul d2, d8, d2
fmul d4, d29, d4
fadd d2, d2, d4
fmul d4, d22, d5
fadd d16, d2, d4
fmov d5, d17
fmul d7, d18, d17
fmul d6, d22, d17
str d6, [sp, #728] ; 8-byte Folded Spill
fmul d2, d24, d6
fmul d4, d16, d7
fsub d17, d2, d4
stp d16, d17, [sp, #88] ; 16-byte Folded Spill
fadd d11, d14, d5
fmul d2, d16, d11
fmul d4, d27, d6
fsub d2, d2, d4
fmul d2, d11, d2
fmul d4, d7, d17
str d7, [sp, #664] ; 8-byte Folded Spill
fsub d2, d4, d2
fsub d3, d21, d3
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #16326, lsl #48
fmov d4, x8
fmul d3, d3, d4
fmul d2, d2, d4
fadd d2, d3, d2
stp d4, d2, [sp, #104] ; 16-byte Folded Spill
ldr d2, [sp, #600] ; 8-byte Folded Reload
str d23, [sp, #216] ; 8-byte Folded Spill
fadd d12, d2, d23
mov x8, #7864
movk x8, #60293, lsl #16
movk x8, #47185, lsl #32
movk x8, #49054, lsl #48
fmov d2, x8
fadd d9, d12, d2
fmov d2, #0.50000000
fmul d2, d9, d2
fsub d2, d9, d2
fsub d2, d2, d12
str d28, [sp, #712] ; 8-byte Folded Spill
fmul d8, d2, d28
fmul d3, d26, d8
str d13, [sp, #720] ; 8-byte Folded Spill
fmul d10, d2, d13
fmul d4, d25, d10
fadd d3, d3, d4
fmul d4, d28, d8
fmul d5, d13, d10
fadd d4, d4, d5
fmul d13, d18, d2
fmul d2, d18, d13
fadd d4, d2, d4
str d4, [sp, #872] ; 8-byte Folded Spill
fadd d3, d13, d3
fmul d2, d3, d7
fmov d15, d3
str d3, [sp, #600] ; 8-byte Folded Spill
fmul d3, d4, d11
stur d11, [x29, #-200] ; 8-byte Folded Spill
fsub d2, d2, d3
mov x8, #4416
movk x8, #37438, lsl #16
movk x8, #20244, lsl #32
movk x8, #16497, lsl #48
mov x9, #6432
movk x9, #24166, lsl #16
movk x9, #7623, lsl #32
movk x9, #16309, lsl #48
mov x10, #26865
movk x10, #35043, lsl #16
movk x10, #63669, lsl #32
movk x10, #16100, lsl #48
fmov d3, x8
str d3, [sp, #232] ; 8-byte Folded Spill
fadd d0, d0, d3
str d0, [sp, #584] ; 8-byte Folded Spill
fmov d3, x9
str d3, [sp, #80] ; 8-byte Folded Spill
ldr d0, [sp, #760] ; 8-byte Folded Reload
fmul d4, d0, d3
fadd d0, d1, d19
stp d0, d4, [sp, #240] ; 16-byte Folded Spill
fmul d0, d23, d14
ldur d1, [x29, #-192] ; 8-byte Folded Reload
fsub d0, d1, d0
str d0, [sp, #736] ; 8-byte Folded Spill
fadd d0, d0, d2
str d0, [sp, #792] ; 8-byte Folded Spill
fmov d1, x10
str d1, [sp, #656] ; 8-byte Folded Spill
fmul d0, d9, d9
fadd d0, d0, d1
fsqrt d0, d0
fmov d1, #1.50000000
bl _pow
mov x8, #18811
movk x8, #34700, lsl #16
movk x8, #61210, lsl #32
movk x8, #16643, lsl #48
fmov d1, x8
fmul d0, d0, d1
ldr d1, [sp, #592] ; 8-byte Folded Reload
ldr d2, [sp, #912] ; 8-byte Folded Reload
fadd d3, d2, d1
str d3, [sp, #8] ; 8-byte Folded Spill
ldp d2, d14, [x29, #-216] ; 16-byte Folded Reload
fmul d1, d14, d8
fmul d2, d2, d10
fadd d1, d1, d2
ldur d2, [x29, #-240] ; 8-byte Folded Reload
fmul d2, d2, d13
fadd d2, d2, d1
fmul d1, d2, d11
fmov d13, d2
str d2, [sp, #912] ; 8-byte Folded Spill
ldr d11, [sp, #728] ; 8-byte Folded Reload
fmul d2, d15, d11
fsub d1, d1, d2
fadd d8, d3, d1
fmov d1, #3.00000000
fmul d1, d8, d1
fmov d2, #1.00000000
fsub d1, d2, d1
fmul d10, d0, d1
mov x8, #211106232532992
movk x8, #49266, lsl #48
fmov d0, x8
str d0, [sp, #32] ; 8-byte Folded Spill
fmul d0, d9, d0
bl _tanh
fmov d15, #0.50000000
fmul d0, d0, d15
fadd d0, d0, d15
fmul d9, d10, d0
mov x8, #6148914691236517205
movk x8, #16341, lsl #48
fmov d0, x8
str d0, [sp, #24] ; 8-byte Folded Spill
fsub d0, d0, d8
mov x8, #4632233691727265792
fmov d1, x8
str d1, [sp, #16] ; 8-byte Folded Spill
fmul d0, d0, d1
bl _tanh
fmul d0, d0, d15
fadd d0, d0, d15
fmov d6, #0.50000000
fmul d3, d9, d0
str d3, [sp, #752] ; 8-byte Folded Spill
ldr d4, [sp, #792] ; 8-byte Folded Reload
fmul d0, d4, d4
ldr d1, [sp, #872] ; 8-byte Folded Reload
fmul d1, d1, d11
ldr d7, [sp, #664] ; 8-byte Folded Reload
fmul d2, d13, d7
fsub d1, d1, d2
str d1, [sp, #56] ; 8-byte Folded Spill
fmul d1, d1, d1
fadd d0, d1, d0
ldr d18, [sp, #656] ; 8-byte Folded Reload
fadd d0, d0, d18
fsqrt d2, d0
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16329, lsl #48
fmov d0, x8
str d0, [sp, #72] ; 8-byte Folded Spill
fdiv d0, d2, d0
fmov d1, #1.00000000
fminnm d0, d0, d1
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16361, lsl #48
fmov d1, x8
str d1, [sp, #64] ; 8-byte Folded Spill
fmul d0, d0, d1
fmul d1, d2, d6
fadd d0, d1, d0
fmul d0, d0, d3
stp d2, d0, [sp, #40] ; 16-byte Folded Spill
fmul d0, d4, d0
fdiv d0, d0, d2
str d0, [sp, #792] ; 8-byte Folded Spill
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16297, lsl #48
fmov d0, x8
fmul d1, d14, d0
fmul d2, d14, d1
ldr d5, [sp, #712] ; 8-byte Folded Reload
fmul d3, d5, d0
fadd d3, d12, d3
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #49049, lsl #48
fmov d4, x8
fadd d10, d3, d4
fmul d3, d10, d6
fsub d3, d10, d3
fsub d3, d3, d12
fmul d4, d5, d3
fmov d17, d5
fadd d2, d2, d4
ldp d6, d5, [x29, #-128] ; 16-byte Folded Reload
fmul d0, d5, d0
fmul d4, d5, d0
fadd d11, d4, d2
fmul d2, d5, d11
ldur d14, [x29, #-216] ; 8-byte Folded Reload
fmul d4, d14, d1
ldr d16, [sp, #720] ; 8-byte Folded Reload
fmul d5, d16, d3
fadd d4, d4, d5
fmul d5, d6, d0
fadd d12, d5, d4
fmul d4, d6, d12
fadd d2, d2, d4
ldur d15, [x29, #-240] ; 8-byte Folded Reload
fmul d1, d15, d1
ldur d4, [x29, #-184] ; 8-byte Folded Reload
fmul d3, d4, d3
fadd d1, d1, d3
fadd d13, d0, d1
fadd d9, d13, d2
fmul d0, d9, d7
str d9, [sp, #592] ; 8-byte Folded Spill
fmul d1, d17, d11
fmul d2, d16, d12
fadd d1, d1, d2
fmul d2, d4, d13
fadd d1, d2, d1
str d1, [sp, #744] ; 8-byte Folded Spill
ldur d8, [x29, #-200] ; 8-byte Folded Reload
fmul d1, d1, d8
fsub d0, d0, d1
ldr d1, [sp, #736] ; 8-byte Folded Reload
fadd d0, d1, d0
str d0, [sp] ; 8-byte Folded Spill
fmul d0, d10, d10
fadd d0, d0, d18
fsqrt d0, d0
fmov d1, #1.50000000
bl _pow
mov x8, #10523
movk x8, #38535, lsl #16
movk x8, #12921, lsl #32
movk x8, #16642, lsl #48
fmov d1, x8
fmul d0, d0, d1
ldur d1, [x29, #-208] ; 8-byte Folded Reload
fmul d1, d1, d11
fmul d2, d14, d12
fadd d1, d1, d2
fmul d2, d15, d13
fadd d2, d2, d1
fmul d1, d2, d8
fmov d8, d2
str d2, [sp, #760] ; 8-byte Folded Spill
ldr d14, [sp, #728] ; 8-byte Folded Reload
fmul d2, d9, d14
fsub d1, d1, d2
ldr d2, [sp, #8] ; 8-byte Folded Reload
fadd d11, d2, d1
fmov d1, #3.00000000
fmul d1, d11, d1
fmov d9, #1.00000000
fsub d1, d9, d1
fmul d12, d0, d1
ldr d0, [sp, #32] ; 8-byte Folded Reload
fmul d0, d10, d0
bl _tanh
fmov d1, #0.50000000
fmul d0, d0, d1
fadd d0, d0, d1
fmov d13, #0.50000000
fmul d10, d12, d0
ldp d1, d0, [sp, #16] ; 16-byte Folded Reload
fsub d0, d0, d11
fmul d0, d0, d1
bl _tanh
ldr d1, [sp, #864] ; 8-byte Folded Reload
ldr d2, [sp, #168] ; 8-byte Folded Reload
fmul d1, d1, d2
ldr d2, [sp, #648] ; 8-byte Folded Reload
fsub d16, d2, d1
ldr d1, [sp, #784] ; 8-byte Folded Reload
ldr d2, [sp, #776] ; 8-byte Folded Reload
fmul d1, d1, d2
ldr d2, [sp, #264] ; 8-byte Folded Reload
fsub d17, d2, d1
ldr d1, [sp, #296] ; 8-byte Folded Reload
ldr d2, [sp, #80] ; 8-byte Folded Reload
fmul d18, d1, d2
ldur d1, [x29, #-192] ; 8-byte Folded Reload
ldr d2, [sp, #736] ; 8-byte Folded Reload
fsub d19, d2, d1
ldp d2, d1, [sp, #136] ; 16-byte Folded Reload
fmul d1, d2, d1
str d1, [sp, #648] ; 8-byte Folded Spill
ldr d1, [sp, #200] ; 8-byte Folded Reload
str d1, [x19, #24]
ldr d1, [sp, #224] ; 8-byte Folded Reload
str d1, [x19, #40]
ldr d1, [sp, #280] ; 8-byte Folded Reload
str d1, [x19, #56]
ldr d1, [sp, #344] ; 8-byte Folded Reload
ldr d2, [sp, #376] ; 8-byte Folded Reload
fmul d1, d1, d2
ldr d2, [sp, #888] ; 8-byte Folded Reload
fdiv d1, d1, d2
str d1, [sp, #736] ; 8-byte Folded Spill
ldr d1, [sp, #336] ; 8-byte Folded Reload
ldr d2, [sp, #368] ; 8-byte Folded Reload
fmul d1, d1, d2
ldp d3, d2, [sp, #48] ; 16-byte Folded Reload
fmul d2, d2, d3
ldr d3, [sp, #880] ; 8-byte Folded Reload
fdiv d1, d1, d3
str d1, [sp, #776] ; 8-byte Folded Spill
ldr d1, [sp, #40] ; 8-byte Folded Reload
fdiv d1, d2, d1
str d1, [sp, #888] ; 8-byte Folded Spill
fmul d0, d0, d13
fadd d0, d0, d13
fmul d0, d10, d0
ldr d31, [sp, #744] ; 8-byte Folded Reload
fmul d1, d31, d14
fmov d27, d14
ldr d28, [sp, #664] ; 8-byte Folded Reload
fmul d3, d8, d28
fsub d2, d1, d3
str d2, [sp, #880] ; 8-byte Folded Spill
ldr d6, [sp] ; 8-byte Folded Reload
fmul d1, d6, d6
fmul d4, d2, d2
fadd d1, d4, d1
ldr d2, [sp, #656] ; 8-byte Folded Reload
fadd d1, d1, d2
fsqrt d2, d1
str d2, [sp, #864] ; 8-byte Folded Spill
ldp d3, d1, [sp, #64] ; 16-byte Folded Reload
fdiv d1, d2, d1
fminnm d1, d1, d9
fmul d1, d1, d3
fmul d5, d2, d13
fadd d1, d5, d1
fmul d1, d1, d0
str d1, [sp, #784] ; 8-byte Folded Spill
fmov d22, d0
fmul d1, d6, d1
fdiv d0, d1, d2
stur d0, [x29, #-192] ; 8-byte Folded Spill
ldr d8, [sp, #792] ; 8-byte Folded Reload
fadd d20, d8, d0
ldr d0, [sp, #112] ; 8-byte Folded Reload
fadd d0, d0, d20
str d0, [sp, #344] ; 8-byte Folded Spill
ldr d0, [sp, #944] ; 8-byte Folded Reload
ldr d1, [sp, #208] ; 8-byte Folded Reload
fmul d20, d1, d0
fmul d20, d0, d20
ldr d0, [sp, #152] ; 8-byte Folded Reload
fmul d16, d16, d0
fmul d20, d20, d0
fsub d16, d16, d20
mov x8, #23440
movk x8, #2685, lsl #16
movk x8, #53080, lsl #32
movk x8, #16466, lsl #48
fmov d20, x8
fadd d0, d16, d20
str d0, [sp, #656] ; 8-byte Folded Spill
ldr d0, [sp, #936] ; 8-byte Folded Reload
ldr d1, [sp, #184] ; 8-byte Folded Reload
fmul d16, d1, d0
fmul d16, d0, d16
ldr d0, [sp, #128] ; 8-byte Folded Reload
fmul d17, d17, d0
fmul d16, d16, d0
fsub d16, d17, d16
mov x8, #61302
movk x8, #27691, lsl #16
movk x8, #64897, lsl #32
movk x8, #16445, lsl #48
fmov d17, x8
fadd d0, d16, d17
str d0, [sp, #936] ; 8-byte Folded Spill
mov x8, #30506
movk x8, #37777, lsl #16
movk x8, #58002, lsl #32
movk x8, #16361, lsl #48
fmov d16, x8
fadd d0, d18, d16
str d0, [sp, #368] ; 8-byte Folded Spill
ldr d3, [sp, #768] ; 8-byte Folded Reload
ldr d0, [sp, #176] ; 8-byte Folded Reload
fmul d16, d3, d0
ldr d2, [sp, #288] ; 8-byte Folded Reload
fadd d16, d16, d2
ldr d5, [sp, #160] ; 8-byte Folded Reload
fmul d16, d16, d5
ldur d0, [x29, #-248] ; 8-byte Folded Reload
ldr d1, [sp, #192] ; 8-byte Folded Reload
fmul d17, d1, d0
fmul d17, d0, d17
fmov d1, d0
fmul d17, d17, d5
fsub d16, d16, d17
mov x8, #42264
movk x8, #33609, lsl #16
movk x8, #14594, lsl #32
movk x8, #49188, lsl #48
fmov d17, x8
ldr d0, [sp, #1016] ; 8-byte Folded Reload
fadd d17, d0, d17
fsub d0, d16, d17
str d0, [sp, #336] ; 8-byte Folded Spill
ldr d0, [sp, #800] ; 8-byte Folded Reload
fmul d16, d3, d0
fadd d16, d16, d2
fmul d17, d1, d19
fadd d7, d17, d16
ldur d21, [x29, #-184] ; 8-byte Folded Reload
fmul d16, d21, d1
ldr d0, [sp, #680] ; 8-byte Folded Reload
fmul d16, d16, d0
ldr d0, [sp, #672] ; 8-byte Folded Reload
fmul d18, d15, d0
fsub d1, d18, d16
ldr d0, [sp, #840] ; 8-byte Folded Reload
fmul d18, d0, d1
fmov d25, d1
stur d1, [x29, #-248] ; 8-byte Folded Spill
ldr d6, [sp, #904] ; 8-byte Folded Reload
ldr d10, [sp, #88] ; 8-byte Folded Reload
fmul d19, d10, d6
fsub d18, d18, d19
fmul d19, d0, d28
ldur d14, [x29, #-200] ; 8-byte Folded Reload
ldr d9, [sp, #120] ; 8-byte Folded Reload
fmul d20, d9, d14
fsub d19, d19, d20
fmul d19, d14, d19
fmov d15, d14
ldp d0, d30, [sp, #96] ; 16-byte Folded Reload
fmul d20, d27, d0
fsub d19, d19, d20
fsub d18, d7, d18
str d7, [sp, #944] ; 8-byte Folded Spill
fmul d18, d18, d30
fmul d19, d19, d30
fadd d19, d18, d19
mov x8, #11213
movk x8, #64899, lsl #16
movk x8, #2195, lsl #32
movk x8, #49148, lsl #48
fmov d18, x8
ldr d13, [sp, #752] ; 8-byte Folded Reload
fadd d20, d13, d18
fadd d20, d20, d22
fmov d14, d22
str d22, [sp, #376] ; 8-byte Folded Spill
fsub d11, d19, d20
ldr d2, [sp, #256] ; 8-byte Folded Reload
ldr d1, [sp, #712] ; 8-byte Folded Reload
fmul d19, d1, d2
ldr d3, [sp, #272] ; 8-byte Folded Reload
ldr d4, [sp, #720] ; 8-byte Folded Reload
fmul d20, d4, d3
fadd d19, d19, d20
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #16194, lsl #48
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #16153, lsl #48
fmov d22, x8
ldp d16, d12, [x29, #-216] ; 16-byte Folded Reload
fmul d20, d16, d22
fmov d23, x9
fmul d24, d12, d23
fsub d20, d20, d24
fadd d19, d20, d19
ldr d17, [sp, #304] ; 8-byte Folded Reload
fmul d20, d17, d25
ldr d0, [sp, #920] ; 8-byte Folded Reload
fmul d24, d19, d0
fadd d24, d20, d24
ldp d5, d29, [x29, #-128] ; 16-byte Folded Reload
fmul d20, d29, d2
fmul d25, d5, d3
fadd d25, d20, d25
mov x8, #3449
movk x8, #18764, lsl #16
movk x8, #45194, lsl #32
movk x8, #16217, lsl #48
fmov d3, x8
fadd d20, d25, d3
fmul d26, d20, d6
fadd d24, d26, d24
ldr d0, [sp, #928] ; 8-byte Folded Reload
fmul d26, d0, d9
fmul d9, d7, d10
fmov d6, d10
fsub d26, d9, d26
fadd d24, d26, d24
mov x8, #45033
movk x8, #40035, lsl #16
movk x8, #524, lsl #32
movk x8, #48971, lsl #48
fmov d26, x8
fmul d26, d1, d26
mov x8, #45724
movk x8, #42429, lsl #16
movk x8, #11379, lsl #32
movk x8, #16169, lsl #48
fmov d9, x8
fmul d10, d4, d9
fsub d26, d26, d10
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #16210, lsl #48
fmov d10, x8
fmul d10, d21, d10
fadd d26, d10, d26
fmul d9, d1, d9
mov x8, #40862
movk x8, #31695, lsl #16
movk x8, #12355, lsl #32
movk x8, #16198, lsl #48
fmov d10, x8
fmul d10, d4, d10
fsub d9, d10, d9
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #16169, lsl #48
fmov d10, x8
fmul d10, d21, d10
fadd d9, d10, d9
fmul d10, d12, d26
fmul d12, d16, d9
fadd d10, d10, d12
fmul d22, d5, d22
fmul d23, d29, d23
fsub d22, d22, d23
fadd d22, d22, d10
fmul d23, d1, d26
fmul d26, d4, d9
fadd d9, d23, d26
fmul d26, d22, d27
fadd d0, d9, d3
stur d0, [x29, #-120] ; 8-byte Folded Spill
fmul d10, d0, d28
fadd d26, d26, d10
fmul d10, d19, d15
fadd d26, d10, d26
fadd d25, d9, d25
mov x8, #16684
movk x8, #33360, lsl #16
movk x8, #18212, lsl #32
movk x8, #48931, lsl #48
fmov d9, x8
fsub d25, d9, d25
fadd d29, d25, d3
fmul d25, d29, d27
fmul d9, d22, d28
fadd d25, d25, d9
fmul d9, d17, d15
fadd d25, d9, d25
fmul d9, d27, d26
fmov d23, d27
fmul d10, d28, d25
fmov d4, d28
fsub d9, d9, d10
fmul d24, d24, d30
fmul d9, d9, d30
fmov d16, d30
fadd d24, d24, d9
ldr d0, [sp, #912] ; 8-byte Folded Reload
fmul d9, d0, d13
ldr d5, [sp, #872] ; 8-byte Folded Reload
fmul d10, d5, d8
fadd d9, d9, d10
fmul d10, d6, d18
fadd d9, d9, d10
ldr d0, [sp, #760] ; 8-byte Folded Reload
fmul d10, d0, d14
ldur d0, [x29, #-192] ; 8-byte Folded Reload
fmul d12, d31, d0
fadd d10, d10, d12
fadd d9, d9, d10
fsub d24, d24, d9
ldp d0, d1, [sp, #336] ; 16-byte Folded Reload
fadd d8, d0, d11
ldr d0, [sp, #800] ; 8-byte Folded Reload
fmul d9, d0, d11
ldr d0, [sp, #240] ; 8-byte Folded Reload
fadd d10, d0, d1
ldr d0, [sp, #216] ; 8-byte Folded Reload
fmul d27, d0, d1
fsub d27, d9, d27
fadd d27, d24, d27
ldr d0, [sp, #480] ; 8-byte Folded Reload
fadd d27, d0, d27
ldr d0, [sp, #368] ; 8-byte Folded Reload
fadd d31, d0, d8
ldr d0, [sp, #808] ; 8-byte Folded Reload
fmul d8, d0, d8
ldr d0, [sp, #248] ; 8-byte Folded Reload
fadd d9, d0, d10
ldr d0, [sp, #952] ; 8-byte Folded Reload
fmul d10, d0, d10
fsub d8, d8, d10
fadd d27, d8, d27
ldr d0, [sp, #504] ; 8-byte Folded Reload
fadd d27, d0, d27
ldr d0, [sp, #936] ; 8-byte Folded Reload
fadd d30, d0, d31
ldr d0, [sp, #816] ; 8-byte Folded Reload
fmul d31, d0, d31
ldr d0, [sp, #312] ; 8-byte Folded Reload
fadd d8, d0, d9
ldr d0, [sp, #960] ; 8-byte Folded Reload
fmul d9, d0, d9
fadd d31, d31, d9
fsub d31, d27, d31
ldr d0, [sp, #528] ; 8-byte Folded Reload
fadd d31, d0, d31
ldr d0, [sp, #656] ; 8-byte Folded Reload
fadd d28, d0, d30
ldr d0, [sp, #832] ; 8-byte Folded Reload
fmul d30, d0, d30
ldr d0, [sp, #352] ; 8-byte Folded Reload
fadd d9, d0, d8
ldr d0, [sp, #824] ; 8-byte Folded Reload
fmul d8, d0, d8
fsub d30, d30, d8
fadd d30, d30, d31
ldr d0, [sp, #544] ; 8-byte Folded Reload
fadd d30, d0, d30
ldr d8, [sp, #992] ; 8-byte Folded Reload
fmul d8, d8, d28
ldr d10, [sp, #984] ; 8-byte Folded Reload
fmul d10, d10, d9
fsub d8, d8, d10
fadd d8, d8, d30
ldr d0, [sp, #608] ; 8-byte Folded Reload
fadd d8, d0, d8
ldp d1, d0, [sp, #320] ; 16-byte Folded Reload
fmul d10, d0, d1
ldr d1, [sp, #648] ; 8-byte Folded Reload
fsub d21, d10, d1
mov x8, #4610
movk x8, #16231, lsl #16
movk x8, #17604, lsl #32
movk x8, #16321, lsl #48
fmov d10, x8
ldur d13, [x29, #-168] ; 8-byte Folded Reload
fmul d10, d13, d10
fadd d21, d10, d21
ldr d1, [sp, #360] ; 8-byte Folded Reload
fmul d21, d21, d1
ldr d1, [sp, #232] ; 8-byte Folded Reload
fmul d10, d0, d1
fadd d21, d10, d21
ldr d0, [sp, #416] ; 8-byte Folded Reload
ldr d3, [sp, #584] ; 8-byte Folded Reload
fmul d10, d0, d3
ldp d0, d1, [sp, #456] ; 16-byte Folded Reload
fmul d11, d0, d1
fsub d10, d10, d11
fadd d21, d21, d10
fadd d21, d21, d8
str d21, [x19]
ldr d12, [sp, #616] ; 8-byte Folded Reload
fmul d21, d13, d12
ldr d8, [sp, #1000] ; 8-byte Folded Reload
fsub d21, d8, d21
mov x8, #43139
movk x8, #8835, lsl #16
movk x8, #28093, lsl #32
movk x8, #16419, lsl #48
fmov d8, x8
fmul d21, d21, d8
ldr d11, [sp, #624] ; 8-byte Folded Reload
ldur d0, [x29, #-176] ; 8-byte Folded Reload
fmul d10, d11, d0
fmul d10, d0, d10
fmov d2, d0
fmul d10, d10, d8
fsub d21, d21, d10
ldr d0, [sp, #552] ; 8-byte Folded Reload
fadd d21, d21, d0
fadd d21, d21, d9
fadd d21, d1, d21
str d21, [x19, #8]
fmul d21, d13, d11
ldr d9, [sp, #1008] ; 8-byte Folded Reload
fadd d21, d21, d9
fmul d9, d12, d2
fmul d9, d2, d9
fmul d21, d21, d8
fmul d8, d9, d8
fsub d21, d21, d8
mov x8, #45848
movk x8, #59098, lsl #16
movk x8, #53494, lsl #32
movk x8, #16471, lsl #48
fmov d8, x8
fadd d21, d21, d8
ldr d0, [sp, #640] ; 8-byte Folded Reload
fadd d21, d21, d0
fadd d21, d21, d28
fadd d21, d3, d21
str d21, [x19, #16]
str d30, [x19, #32]
str d31, [x19, #48]
str d27, [x19, #64]
ldur d0, [x29, #-232] ; 8-byte Folded Reload
ldr d1, [sp, #520] ; 8-byte Folded Reload
fmul d21, d1, d0
ldr d10, [sp, #536] ; 8-byte Folded Reload
ldp d15, d14, [sp, #488] ; 16-byte Folded Reload
fmul d27, d10, d15
fadd d21, d21, d27
ldp d2, d1, [sp, #432] ; 16-byte Folded Reload
fmul d27, d1, d2
fadd d21, d27, d21
ldr d31, [sp, #424] ; 8-byte Folded Reload
ldr d3, [sp, #384] ; 8-byte Folded Reload
fmul d27, d3, d31
fsub d21, d21, d27
ldp d8, d3, [x29, #-152] ; 16-byte Folded Reload
fmul d27, d1, d3
ldr d1, [sp, #688] ; 8-byte Folded Reload
fmul d28, d14, d1
fadd d27, d27, d28
ldr d28, [sp, #448] ; 8-byte Folded Reload
fmul d28, d28, d8
fadd d27, d28, d27
fmul d28, d1, d27
ldr d1, [sp, #400] ; 8-byte Folded Reload
fmul d30, d8, d1
fmov d1, d8
fsub d28, d28, d30
fmul d21, d21, d16
fmul d28, d28, d16
fadd d21, d21, d28
ldr d28, [sp, #704] ; 8-byte Folded Reload
ldr d7, [sp, #736] ; 8-byte Folded Reload
fmul d28, d28, d7
ldr d8, [sp, #856] ; 8-byte Folded Reload
ldr d13, [sp, #896] ; 8-byte Folded Reload
fmul d30, d8, d13
fadd d28, d30, d28
fmul d30, d31, d18
fmov d12, d31
fadd d28, d28, d30
ldr d30, [sp, #696] ; 8-byte Folded Reload
ldr d6, [sp, #776] ; 8-byte Folded Reload
fmul d30, d30, d6
ldr d9, [sp, #848] ; 8-byte Folded Reload
ldur d11, [x29, #-256] ; 8-byte Folded Reload
fmul d31, d9, d11
fadd d30, d31, d30
fadd d28, d30, d28
fadd d21, d21, d28
ldur d28, [x29, #-160] ; 8-byte Folded Reload
fmul d21, d28, d21
fmul d28, d10, d0
ldr d30, [sp, #560] ; 8-byte Folded Reload
fmul d30, d30, d15
fadd d28, d28, d30
fmul d30, d14, d2
fadd d28, d30, d28
ldr d0, [sp, #408] ; 8-byte Folded Reload
fmul d30, d0, d12
fadd d28, d30, d28
ldr d0, [sp, #472] ; 8-byte Folded Reload
fmul d30, d1, d0
fmul d27, d3, d27
fsub d27, d30, d27
fmul d28, d28, d16
fmul d27, d27, d16
fadd d27, d28, d27
ldr d28, [sp, #976] ; 8-byte Folded Reload
fmul d7, d28, d7
ldr d31, [sp, #392] ; 8-byte Folded Reload
fmul d28, d8, d31
fsub d7, d7, d28
ldr d28, [sp, #968] ; 8-byte Folded Reload
fmul d6, d28, d6
ldr d30, [sp, #576] ; 8-byte Folded Reload
fmul d28, d9, d30
fsub d6, d6, d28
fadd d6, d7, d6
fsub d6, d27, d6
ldur d7, [x29, #-136] ; 8-byte Folded Reload
fmul d6, d7, d6
fadd d6, d6, d21
ldr d0, [sp, #568] ; 8-byte Folded Reload
fadd d6, d0, d6
str d6, [x19, #72]
ldur d27, [x29, #-248] ; 8-byte Folded Reload
fmul d6, d29, d27
ldr d1, [sp, #920] ; 8-byte Folded Reload
fmul d7, d22, d1
fadd d6, d6, d7
ldr d8, [sp, #904] ; 8-byte Folded Reload
fmul d7, d17, d8
fadd d6, d7, d6
ldr d3, [sp, #840] ; 8-byte Folded Reload
ldr d2, [sp, #944] ; 8-byte Folded Reload
fmul d7, d2, d3
fsub d6, d6, d7
fmul d7, d17, d23
fmul d17, d19, d4
fadd d7, d7, d17
ldur d0, [x29, #-200] ; 8-byte Folded Reload
fmul d17, d20, d0
fadd d7, d17, d7
fmul d17, d4, d7
fmul d20, d0, d26
fmov d29, d0
fsub d17, d17, d20
fmul d6, d6, d16
fmul d17, d17, d16
fadd d6, d6, d17
ldr d2, [sp, #888] ; 8-byte Folded Reload
fmul d17, d5, d2
ldr d21, [sp, #600] ; 8-byte Folded Reload
ldr d26, [sp, #752] ; 8-byte Folded Reload
fmul d20, d21, d26
fadd d17, d20, d17
fmul d18, d3, d18
fmov d28, d3
fadd d17, d17, d18
ldr d0, [sp, #880] ; 8-byte Folded Reload
ldr d3, [sp, #784] ; 8-byte Folded Reload
fmul d3, d0, d3
ldr d0, [sp, #864] ; 8-byte Folded Reload
fdiv d3, d3, d0
ldr d0, [sp, #744] ; 8-byte Folded Reload
fmul d4, d0, d3
ldr d18, [sp, #592] ; 8-byte Folded Reload
ldr d20, [sp, #376] ; 8-byte Folded Reload
fmul d5, d18, d20
fadd d4, d5, d4
fadd d4, d17, d4
fadd d4, d6, d4
ldur d0, [x29, #-240] ; 8-byte Folded Reload
fmul d4, d0, d4
fmul d5, d22, d27
ldur d0, [x29, #-120] ; 8-byte Folded Reload
fmul d6, d0, d1
fadd d5, d5, d6
fmul d6, d19, d8
fadd d5, d6, d5
ldr d0, [sp, #928] ; 8-byte Folded Reload
fmul d6, d0, d28
fadd d5, d6, d5
fmul d5, d5, d16
fmul d6, d29, d25
fmul d7, d23, d7
fsub d6, d6, d7
fmul d6, d6, d16
fadd d5, d5, d6
ldr d0, [sp, #912] ; 8-byte Folded Reload
fmul d2, d0, d2
ldr d1, [sp, #792] ; 8-byte Folded Reload
fmul d6, d21, d1
fsub d2, d2, d6
ldr d0, [sp, #760] ; 8-byte Folded Reload
fmul d3, d0, d3
ldp d7, d0, [x29, #-192] ; 16-byte Folded Reload
fmul d6, d18, d7
fsub d3, d3, d6
fadd d2, d2, d3
fsub d2, d5, d2
fmul d2, d0, d2
fadd d2, d4, d2
fadd d2, d24, d2
str d2, [x19, #80]
ldr d0, [sp, #512] ; 8-byte Folded Reload
fadd d2, d0, d1
fadd d1, d2, d7
fneg d1, d1
str d1, [x19, #88]
ldr d0, [sp, #1016] ; 8-byte Folded Reload
fadd d1, d0, d26
fadd d0, d1, d20
str d0, [x19, #96]
ldr d0, [sp, #632] ; 8-byte Folded Reload
fadd d0, d0, d31
fadd d0, d30, d0
fneg d0, d0
str d0, [x19, #104]
ldur d0, [x29, #-224] ; 8-byte Folded Reload
fadd d0, d0, d13
fadd d0, d11, d0
str d0, [x19, #112]
LBB2_44:
mov w0, #0
add sp, sp, #1168
ldp x29, x30, [sp, #112] ; 16-byte Folded Reload
ldp x20, x19, [sp, #96] ; 16-byte Folded Reload
ldp x22, x21, [sp, #80] ; 16-byte Folded Reload
ldp x24, x23, [sp, #64] ; 16-byte Folded Reload
ldp d9, d8, [sp, #48] ; 16-byte Folded Reload
ldp d11, d10, [sp, #32] ; 16-byte Folded Reload
ldp d13, d12, [sp, #16] ; 16-byte Folded Reload
ldp d15, d14, [sp], #128 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _F_alloc_mem ; -- Begin function F_alloc_mem
.p2align 2
_F_alloc_mem: ; @F_alloc_mem
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.globl _F_init_mem ; -- Begin function F_init_mem
.p2align 2
_F_init_mem: ; @F_init_mem
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.globl _F_free_mem ; -- Begin function F_free_mem
.p2align 2
_F_free_mem: ; @F_free_mem
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.globl _F_checkout ; -- Begin function F_checkout
.p2align 2
_F_checkout: ; @F_checkout
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.globl _F_release ; -- Begin function F_release
.p2align 2
_F_release: ; @F_release
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.globl _F_incref ; -- Begin function F_incref
.p2align 2
_F_incref: ; @F_incref
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.globl _F_decref ; -- Begin function F_decref
.p2align 2
_F_decref: ; @F_decref
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.globl _F_n_in ; -- Begin function F_n_in
.p2align 2
_F_n_in: ; @F_n_in
.cfi_startproc
; %bb.0:
mov w0, #1
ret
.cfi_endproc
; -- End function
.globl _F_n_out ; -- Begin function F_n_out
.p2align 2
_F_n_out: ; @F_n_out
.cfi_startproc
; %bb.0:
mov w0, #1
ret
.cfi_endproc
; -- End function
.globl _F_default_in ; -- Begin function F_default_in
.p2align 2
_F_default_in: ; @F_default_in
.cfi_startproc
; %bb.0:
movi d0, #0000000000000000
ret
.cfi_endproc
; -- End function
.globl _F_name_in ; -- Begin function F_name_in
.p2align 2
_F_name_in: ; @F_name_in
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, l_.str@PAGE
Lloh1:
add x8, x8, l_.str@PAGEOFF
cmp x0, #0
csel x0, x8, xzr, eq
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.globl _F_name_out ; -- Begin function F_name_out
.p2align 2
_F_name_out: ; @F_name_out
.cfi_startproc
; %bb.0:
Lloh2:
adrp x8, l_.str.1@PAGE
Lloh3:
add x8, x8, l_.str.1@PAGEOFF
cmp x0, #0
csel x0, x8, xzr, eq
ret
.loh AdrpAdd Lloh2, Lloh3
.cfi_endproc
; -- End function
.globl _F_sparsity_in ; -- Begin function F_sparsity_in
.p2align 2
_F_sparsity_in: ; @F_sparsity_in
.cfi_startproc
; %bb.0:
Lloh4:
adrp x8, _foo_jac_s0@PAGE
Lloh5:
add x8, x8, _foo_jac_s0@PAGEOFF
cmp x0, #0
csel x0, x8, xzr, eq
ret
.loh AdrpAdd Lloh4, Lloh5
.cfi_endproc
; -- End function
.globl _F_sparsity_out ; -- Begin function F_sparsity_out
.p2align 2
_F_sparsity_out: ; @F_sparsity_out
.cfi_startproc
; %bb.0:
Lloh6:
adrp x8, _foo_jac_s1@PAGE
Lloh7:
add x8, x8, _foo_jac_s1@PAGEOFF
cmp x0, #0
csel x0, x8, xzr, eq
ret
.loh AdrpAdd Lloh6, Lloh7
.cfi_endproc
; -- End function
.globl _F_work ; -- Begin function F_work
.p2align 2
_F_work: ; @F_work
.cfi_startproc
; %bb.0:
cbz x0, LBB17_2
; %bb.1:
mov w8, #1
str x8, [x0]
LBB17_2:
cbz x1, LBB17_4
; %bb.3:
mov w8, #1
str x8, [x1]
LBB17_4:
cbz x2, LBB17_6
; %bb.5:
str xzr, [x2]
LBB17_6:
cbz x3, LBB17_8
; %bb.7:
str xzr, [x3]
LBB17_8:
mov w0, #0
ret
.cfi_endproc
; -- End function
.globl _jac_F ; -- Begin function jac_F
.p2align 2
_jac_F: ; @jac_F
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
bl _foo_jac_f1
mov w0, #0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function foo_jac_f1
_foo_jac_f1: ; @foo_jac_f1
.cfi_startproc
; %bb.0:
stp d15, d14, [sp, #-160]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 160
stp d13, d12, [sp, #16] ; 16-byte Folded Spill
stp d11, d10, [sp, #32] ; 16-byte Folded Spill
stp d9, d8, [sp, #48] ; 16-byte Folded Spill
stp x28, x27, [sp, #64] ; 16-byte Folded Spill
stp x26, x25, [sp, #80] ; 16-byte Folded Spill
stp x24, x23, [sp, #96] ; 16-byte Folded Spill
stp x22, x21, [sp, #112] ; 16-byte Folded Spill
stp x20, x19, [sp, #128] ; 16-byte Folded Spill
stp x29, x30, [sp, #144] ; 16-byte Folded Spill
add x29, sp, #144
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
.cfi_offset b8, -104
.cfi_offset b9, -112
.cfi_offset b10, -120
.cfi_offset b11, -128
.cfi_offset b12, -136
.cfi_offset b13, -144
.cfi_offset b14, -152
.cfi_offset b15, -160
mov w9, #12464
Lloh8:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh9:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #3, lsl #12 ; =12288
sub sp, sp, #176
mov x19, x1
ldr x20, [x0]
movi d12, #0000000000000000
cbz x20, LBB19_2
; %bb.1:
ldr d0, [x20]
fmov d1, #0.50000000
fmul d8, d0, d1
fmov d0, d8
bl _cos
fmov d13, d0
ldr d0, [x20, #192]
str d0, [sp, #9304] ; 8-byte Folded Spill
ldr d0, [x20, #176]
b LBB19_3
LBB19_2:
fmov d13, #1.00000000
movi d0, #0000000000000000
str d0, [sp, #9304] ; 8-byte Folded Spill
movi d8, #0000000000000000
movi d0, #0000000000000000
LBB19_3:
str d0, [sp, #11248] ; 8-byte Folded Spill
fmul d9, d13, d13
fmov d0, d8
bl _sin
fmov d14, d0
fmul d0, d0, d0
fmul d1, d13, d14
movi d2, #0000000000000000
str d2, [sp, #11240] ; 8-byte Folded Spill
cbz x20, LBB19_5
; %bb.4:
ldr d12, [x20, #8]
ldr d2, [x20, #24]
str d2, [sp, #11240] ; 8-byte Folded Spill
LBB19_5:
fsub d0, d9, d0
str q0, [sp, #10016] ; 16-byte Folded Spill
fadd d0, d1, d1
str q0, [sp, #10032] ; 16-byte Folded Spill
movi d8, #0000000000000000
cbz x20, LBB19_7
; %bb.6:
ldr d10, [x20, #64]
fmov d0, d10
bl _sin
str q0, [sp, #11136] ; 16-byte Folded Spill
ldr d9, [x20, #96]
fmov d0, d9
bl _cos
str q0, [sp, #12240] ; 16-byte Folded Spill
ldr d0, [x20, #128]
b LBB19_8
LBB19_7:
fmov d0, #1.00000000
str q0, [sp, #12240] ; 16-byte Folded Spill
movi d9, #0000000000000000
movi d10, #0000000000000000
movi d0, #0000000000000000
str q0, [sp, #11136] ; 16-byte Folded Spill
movi d0, #0000000000000000
LBB19_8:
bl ___sincos_stret
str q0, [sp, #11840] ; 16-byte Folded Spill
str q1, [sp, #12000] ; 16-byte Folded Spill
fmov d0, d10
bl _cos
; kill: def $d0 killed $d0 def $q0
ldr q3, [sp, #10016] ; 16-byte Folded Reload
fmul d4, d3, d0
ldr q2, [sp, #10032] ; 16-byte Folded Reload
ldr q5, [sp, #11136] ; 16-byte Folded Reload
fmul d1, d2, d5
fsub d4, d4, d1
str q4, [sp, #9600] ; 16-byte Folded Spill
ldr q1, [sp, #12240] ; 16-byte Folded Reload
fmul d10, d1, d4
fmul d3, d3, d5
str q0, [sp, #10736] ; 16-byte Folded Spill
fmul d1, d2, d0
fadd d0, d3, d1
str q0, [sp, #9584] ; 16-byte Folded Spill
fmov d0, d9
bl _sin
; kill: def $d0 killed $d0 def $q0
ldr q2, [sp, #9584] ; 16-byte Folded Reload
fmul d1, d0, d2
fsub d3, d10, d1
ldr q6, [sp, #12000] ; 16-byte Folded Reload
fmul d4, d6, d3
str q0, [sp, #11824] ; 16-byte Folded Spill
ldr q1, [sp, #9600] ; 16-byte Folded Reload
fmul d1, d0, d1
ldr q0, [sp, #12240] ; 16-byte Folded Reload
fmul d2, d0, d2
fadd d2, d1, d2
ldr q5, [sp, #11840] ; 16-byte Folded Reload
fmul d1, d5, d2
fsub d9, d4, d1
mov x8, #-4863887597560135680
fmov d0, x8
fmul d0, d9, d0
str q3, [sp, #6688] ; 16-byte Folded Spill
fmul d1, d5, d3
str q2, [sp, #6672] ; 16-byte Folded Spill
fmul d2, d6, d2
fadd d10, d1, d2
mov x8, #-4868391197187506176
fmov d1, x8
fmul d1, d10, d1
fadd d0, d0, d1
stur d0, [x29, #-168] ; 8-byte Folded Spill
mov x8, #4363988038922010624
fmov d0, x8
fmul d11, d10, d0
fsub d0, d9, d11
str d0, [sp, #11760] ; 8-byte Folded Spill
cbz x20, LBB19_10
; %bb.9:
ldr d8, [x20, #160]
LBB19_10:
fmov d0, d8
bl ___sincos_stret
fmov d2, d0
ldr d0, [sp, #11760] ; 8-byte Folded Reload
stp d1, d2, [x29, #-208] ; 16-byte Folded Spill
fmul d0, d0, d1
mov x8, #4363988038922010624
fmov d1, x8
fmul d1, d9, d1
fsub d3, d1, d10
str d3, [sp, #11496] ; 8-byte Folded Spill
fmul d2, d3, d2
fadd d2, d0, d2
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16297, lsl #48
fmov d0, x8
str d2, [sp, #12336] ; 8-byte Folded Spill
fmul d2, d2, d0
ldur d0, [x29, #-168] ; 8-byte Folded Reload
str d2, [sp, #11376] ; 8-byte Folded Spill
fmul d0, d0, d2
mov x8, #-4863887597560135680
fmov d2, x8
fmul d2, d10, d2
mov x8, #4354980839667269632
fmov d3, x8
fmul d3, d9, d3
fadd d2, d3, d2
stur d2, [x29, #-176] ; 8-byte Folded Spill
cbz x20, LBB19_12
; %bb.11:
ldr d2, [x20, #32]
b LBB19_13
LBB19_12:
movi d2, #0000000000000000
LBB19_13:
mov x8, #33620
movk x8, #2364, lsl #16
movk x8, #33974, lsl #32
movk x8, #49073, lsl #48
fmov d3, x8
ldr q4, [sp, #10032] ; 16-byte Folded Reload
fmul d4, d4, d3
mov x8, #39127
movk x8, #24179, lsl #16
movk x8, #24811, lsl #32
movk x8, #49072, lsl #48
fmov d3, x8
ldr q5, [sp, #10016] ; 16-byte Folded Reload
fmul d3, d5, d3
str d4, [sp, #5240] ; 8-byte Folded Spill
fadd d3, d4, d3
str d3, [sp, #12040] ; 8-byte Folded Spill
fadd d5, d3, d2
mov x8, #11201
movk x8, #50599, lsl #16
movk x8, #31589, lsl #32
movk x8, #49010, lsl #48
fmov d2, x8
ldr q3, [sp, #9584] ; 16-byte Folded Reload
fmul d2, d3, d2
mov x8, #52090
movk x8, #42545, lsl #16
movk x8, #26349, lsl #32
movk x8, #49113, lsl #48
fmov d3, x8
ldr q4, [sp, #9600] ; 16-byte Folded Reload
fmul d3, d4, d3
fadd d2, d2, d3
str d2, [sp, #12024] ; 8-byte Folded Spill
stur d5, [x29, #-232] ; 8-byte Folded Spill
fadd d2, d2, d5
mov x8, #43115
movk x8, #62349, lsl #16
movk x8, #30721, lsl #32
movk x8, #49115, lsl #48
fmov d3, x8
ldr q4, [sp, #6688] ; 16-byte Folded Reload
fmul d3, d4, d3
str d3, [sp, #12032] ; 8-byte Folded Spill
fadd d2, d3, d2
mov x8, #62612
movk x8, #18904, lsl #16
movk x8, #1144, lsl #32
movk x8, #49064, lsl #48
fmov d3, x8
fmul d3, d10, d3
mov x8, #47272
movk x8, #56762, lsl #16
movk x8, #43178, lsl #32
movk x8, #49060, lsl #48
fmov d4, x8
fmul d4, d9, d4
fadd d3, d3, d4
str d3, [sp, #12344] ; 8-byte Folded Spill
fadd d5, d3, d2
fadd d3, d10, d1
mov x8, #36544
movk x8, #43611, lsl #16
movk x8, #860, lsl #32
movk x8, #16326, lsl #48
fmov d1, x8
fmul d1, d3, d1
fadd d7, d9, d11
mov x8, #18456
movk x8, #63321, lsl #16
movk x8, #33926, lsl #32
movk x8, #48991, lsl #48
fmov d2, x8
fmul d2, d7, d2
fadd d1, d1, d2
mov x8, #63706
movk x8, #13221, lsl #16
movk x8, #1281, lsl #32
movk x8, #16209, lsl #48
fmov d2, x8
ldur d4, [x29, #-176] ; 8-byte Folded Reload
fmul d2, d4, d2
fadd d1, d2, d1
str d1, [sp, #12128] ; 8-byte Folded Spill
str d5, [sp, #12296] ; 8-byte Folded Spill
fadd d16, d1, d5
ldur d6, [x29, #-208] ; 8-byte Folded Reload
str d3, [sp, #11896] ; 8-byte Folded Spill
fmul d1, d3, d6
ldur d5, [x29, #-200] ; 8-byte Folded Reload
str d7, [sp, #12136] ; 8-byte Folded Spill
fmul d2, d7, d5
fadd d2, d1, d2
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16297, lsl #48
fmov d1, x8
str d2, [sp, #12016] ; 8-byte Folded Spill
fmul d2, d2, d1
fadd d2, d2, d16
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #49049, lsl #48
fmov d3, x8
fadd d3, d2, d3
fmov d2, #-0.50000000
fmul d2, d3, d2
str d3, [sp, #10400] ; 8-byte Folded Spill
fadd d2, d3, d2
str d16, [sp, #12176] ; 8-byte Folded Spill
fsub d2, d2, d16
str d2, [sp, #11384] ; 8-byte Folded Spill
fmul d2, d4, d2
fadd d0, d0, d2
mov x8, #-4863887597560135680
fmov d2, x8
fmul d2, d6, d2
mov x8, #4354980839667269632
fmov d3, x8
fmul d3, d5, d3
fadd d4, d3, d2
fmul d6, d4, d1
fadd d0, d6, d0
str d0, [sp, #11904] ; 8-byte Folded Spill
str d13, [sp, #11400] ; 8-byte Folded Spill
str d14, [sp, #11392] ; 8-byte Folded Spill
cbz x20, LBB19_15
; %bb.14:
ldr d0, [x20, #72]
fadd d5, d12, d0
ldr d0, [x20, #104]
fadd d15, d5, d0
ldr d0, [x20, #136]
fadd d8, d15, d0
ldr d17, [x20, #168]
fadd d7, d8, d17
ldr d0, [x20, #40]
b LBB19_16
LBB19_15:
movi d17, #0000000000000000
fadd d7, d12, d17
fmov d15, d7
fmov d5, d7
fmov d8, d7
movi d0, #0000000000000000
LBB19_16:
str d5, [sp, #12208] ; 8-byte Folded Spill
str d17, [sp, #12280] ; 8-byte Folded Spill
stur d7, [x29, #-184] ; 8-byte Folded Spill
mov x8, #33620
movk x8, #2364, lsl #16
movk x8, #33974, lsl #32
movk x8, #49073, lsl #48
fmov d1, x8
ldr q2, [sp, #10016] ; 16-byte Folded Reload
fmul d2, d2, d1
mov x8, #39127
movk x8, #24179, lsl #16
movk x8, #24811, lsl #32
movk x8, #16304, lsl #48
fmov d1, x8
ldr q3, [sp, #10032] ; 16-byte Folded Reload
fmul d1, d3, d1
str d2, [sp, #5232] ; 8-byte Folded Spill
fadd d1, d2, d1
str d1, [sp, #11416] ; 8-byte Folded Spill
fmul d1, d1, d12
str d1, [sp, #6144] ; 8-byte Folded Spill
fadd d3, d1, d0
mov x8, #11201
movk x8, #50599, lsl #16
movk x8, #31589, lsl #32
movk x8, #49010, lsl #48
fmov d0, x8
ldr q1, [sp, #9600] ; 16-byte Folded Reload
fmul d0, d1, d0
mov x8, #52090
movk x8, #42545, lsl #16
movk x8, #26349, lsl #32
movk x8, #16345, lsl #48
fmov d1, x8
ldr q2, [sp, #9584] ; 16-byte Folded Reload
fmul d1, d2, d1
fadd d0, d0, d1
str d0, [sp, #11480] ; 8-byte Folded Spill
fmul d0, d0, d5
str d0, [sp, #11040] ; 8-byte Folded Spill
stur d3, [x29, #-248] ; 8-byte Folded Spill
fadd d2, d0, d3
mov x8, #43115
movk x8, #62349, lsl #16
movk x8, #30721, lsl #32
movk x8, #49115, lsl #48
fmov d0, x8
ldr q1, [sp, #6672] ; 16-byte Folded Reload
fmul d0, d1, d0
str d0, [sp, #11296] ; 8-byte Folded Spill
fmul d0, d0, d15
str d2, [sp, #10984] ; 8-byte Folded Spill
fsub d2, d2, d0
mov x8, #62612
movk x8, #18904, lsl #16
movk x8, #1144, lsl #32
movk x8, #49064, lsl #48
fmov d0, x8
fmul d0, d9, d0
mov x8, #47272
movk x8, #56762, lsl #16
movk x8, #43178, lsl #32
movk x8, #16292, lsl #48
fmov d1, x8
fmul d1, d10, d1
fadd d0, d0, d1
str d0, [sp, #12272] ; 8-byte Folded Spill
fmul d0, d0, d8
str d0, [sp, #11032] ; 8-byte Folded Spill
str d2, [sp, #10392] ; 8-byte Folded Spill
fadd d16, d0, d2
mov x8, #36544
movk x8, #43611, lsl #16
movk x8, #860, lsl #32
movk x8, #16326, lsl #48
fmov d0, x8
ldr d2, [sp, #11760] ; 8-byte Folded Reload
fmul d0, d2, d0
mov x8, #18456
movk x8, #63321, lsl #16
movk x8, #33926, lsl #32
movk x8, #48991, lsl #48
fmov d1, x8
ldr d3, [sp, #11496] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d0, d1
mov x8, #63706
movk x8, #13221, lsl #16
movk x8, #1281, lsl #32
movk x8, #16209, lsl #48
fmov d1, x8
ldur d23, [x29, #-168] ; 8-byte Folded Reload
fmul d1, d23, d1
stur d12, [x29, #-256] ; 8-byte Folded Spill
fadd d0, d1, d0
str d0, [sp, #12120] ; 8-byte Folded Spill
fmul d0, d0, d8
str d0, [sp, #11024] ; 8-byte Folded Spill
str d16, [sp, #12232] ; 8-byte Folded Spill
fadd d21, d0, d16
str d21, [sp, #12144] ; 8-byte Folded Spill
ldr d19, [sp, #12336] ; 8-byte Folded Reload
ldr d22, [sp, #11376] ; 8-byte Folded Reload
fmul d0, d19, d22
ldr d16, [sp, #11384] ; 8-byte Folded Reload
ldr d1, [sp, #12016] ; 8-byte Folded Reload
fmul d1, d1, d16
fadd d0, d0, d1
fmul d1, d4, d6
fadd d13, d1, d0
fmul d0, d19, d13
fmov d20, d4
str d4, [sp, #12160] ; 8-byte Folded Spill
ldp d5, d4, [x29, #-208] ; 16-byte Folded Reload
fmul d1, d3, d5
fmul d2, d2, d4
fsub d2, d1, d2
fmul d1, d2, d22
fmov d19, d2
stur d2, [x29, #-216] ; 8-byte Folded Spill
ldr d2, [sp, #12136] ; 8-byte Folded Reload
fmul d2, d2, d5
ldr d3, [sp, #11896] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d2, d2, d3
str d2, [sp, #11864] ; 8-byte Folded Spill
fmul d2, d2, d16
fadd d1, d1, d2
mov x8, #4354980839667269632
fmov d2, x8
fmul d2, d5, d2
mov x8, #4359484439294640128
fmov d3, x8
fmul d3, d4, d3
fadd d3, d2, d3
str d3, [sp, #12200] ; 8-byte Folded Spill
str d6, [sp, #4456] ; 8-byte Folded Spill
fmul d2, d3, d6
fadd d9, d2, d1
fmul d1, d19, d9
fadd d0, d0, d1
ldr d4, [sp, #11904] ; 8-byte Folded Reload
fmul d1, d23, d4
fadd d10, d1, d0
fmul d0, d10, d7
fmul d5, d23, d17
str d5, [sp, #12320] ; 8-byte Folded Spill
fmul d1, d20, d13
fmul d2, d3, d9
fadd d1, d1, d2
fadd d1, d4, d1
str d1, [sp, #11736] ; 8-byte Folded Spill
fmul d1, d1, d5
fsub d0, d0, d1
fadd d12, d0, d21
mov x8, #6148914691236517205
movk x8, #16341, lsl #48
fmov d0, x8
str d0, [sp, #12112] ; 8-byte Folded Spill
fsub d0, d0, d12
mov x8, #4632233691727265792
fmov d1, x8
stur d1, [x29, #-192] ; 8-byte Folded Spill
fmul d0, d0, d1
bl _tanh
str d0, [sp, #12264] ; 8-byte Folded Spill
fmul d1, d0, d0
fmov d0, #1.00000000
fsub d0, d0, d1
str d0, [sp, #10904] ; 8-byte Folded Spill
ldr d14, [sp, #10400] ; 8-byte Folded Reload
fmul d0, d14, d14
mov x8, #26865
movk x8, #35043, lsl #16
movk x8, #63669, lsl #32
movk x8, #16100, lsl #48
fmov d1, x8
stur d1, [x29, #-224] ; 8-byte Folded Spill
fadd d0, d0, d1
fsqrt d0, d0
fmov d1, #1.50000000
str d0, [sp, #10336] ; 8-byte Folded Spill
bl _pow
mov x8, #10523
movk x8, #38535, lsl #16
movk x8, #12921, lsl #32
movk x8, #16642, lsl #48
fmov d1, x8
fmul d11, d0, d1
fmov d0, #3.00000000
fmul d0, d12, d0
fmov d1, #1.00000000
fsub d0, d1, d0
str d0, [sp, #10960] ; 8-byte Folded Spill
fmul d12, d11, d0
mov x8, #211106232532992
movk x8, #49266, lsl #48
fmov d0, x8
str d0, [sp, #12096] ; 8-byte Folded Spill
fmul d0, d14, d0
bl _tanh
fmov d1, #0.50000000
str d0, [sp, #10328] ; 8-byte Folded Spill
fmul d0, d0, d1
fadd d18, d0, d1
fmov d26, #0.50000000
ldr d0, [sp, #11416] ; 8-byte Folded Reload
ldr d1, [sp, #11480] ; 8-byte Folded Reload
fadd d0, d0, d1
stur d0, [x29, #-240] ; 8-byte Folded Spill
ldr d1, [sp, #11296] ; 8-byte Folded Reload
fsub d0, d0, d1
str d0, [sp, #12328] ; 8-byte Folded Spill
ldr d1, [sp, #12272] ; 8-byte Folded Reload
fadd d0, d0, d1
str d0, [sp, #12288] ; 8-byte Folded Spill
ldr d1, [sp, #12120] ; 8-byte Folded Reload
fadd d24, d0, d1
str d24, [sp, #12072] ; 8-byte Folded Spill
ldr d3, [sp, #12040] ; 8-byte Folded Reload
ldur d0, [x29, #-256] ; 8-byte Folded Reload
fmul d0, d3, d0
ldr d1, [sp, #11240] ; 8-byte Folded Reload
fsub d1, d1, d0
str d1, [sp, #11640] ; 8-byte Folded Spill
ldr d5, [sp, #12024] ; 8-byte Folded Reload
ldr d0, [sp, #12208] ; 8-byte Folded Reload
fmul d0, d5, d0
fsub d1, d1, d0
ldr d4, [sp, #12032] ; 8-byte Folded Reload
str d15, [sp, #12056] ; 8-byte Folded Spill
fmul d0, d4, d15
str d1, [sp, #12152] ; 8-byte Folded Spill
fsub d1, d1, d0
ldr d6, [sp, #12344] ; 8-byte Folded Reload
fmul d0, d6, d8
str d1, [sp, #12168] ; 8-byte Folded Spill
fsub d1, d1, d0
ldr d7, [sp, #12128] ; 8-byte Folded Reload
str d8, [sp, #11368] ; 8-byte Folded Spill
fmul d0, d7, d8
stur d1, [x29, #-160] ; 8-byte Folded Spill
fsub d16, d1, d0
str d16, [sp, #12048] ; 8-byte Folded Spill
ldur d23, [x29, #-176] ; 8-byte Folded Reload
ldr d0, [sp, #12280] ; 8-byte Folded Reload
fmul d1, d23, d0
ldr d0, [sp, #11736] ; 8-byte Folded Reload
fmul d0, d0, d1
fmov d28, d1
str d13, [sp, #12192] ; 8-byte Folded Spill
ldr d21, [sp, #12016] ; 8-byte Folded Reload
fmul d1, d21, d13
str d9, [sp, #11976] ; 8-byte Folded Spill
ldr d22, [sp, #11864] ; 8-byte Folded Reload
fmul d2, d22, d9
fadd d1, d1, d2
ldr d27, [sp, #11904] ; 8-byte Folded Reload
fmul d2, d23, d27
fadd d2, d2, d1
ldur d1, [x29, #-184] ; 8-byte Folded Reload
fmul d1, d2, d1
fsub d0, d0, d1
fadd d19, d0, d16
fmul d0, d19, d19
ldr d30, [sp, #12320] ; 8-byte Folded Reload
fmul d1, d2, d30
fmov d25, d2
str d2, [sp, #11752] ; 8-byte Folded Spill
fmul d2, d10, d28
fsub d17, d1, d2
fmul d1, d17, d17
fadd d0, d1, d0
ldur d15, [x29, #-224] ; 8-byte Folded Reload
fadd d0, d0, d15
fsqrt d16, d0
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16329, lsl #48
fmov d0, x8
str d0, [sp, #12088] ; 8-byte Folded Spill
fdiv d0, d16, d0
fmov d1, #1.00000000
fminnm d1, d0, d1
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16361, lsl #48
fmov d2, x8
fmul d1, d1, d2
fmov d29, d2
str d2, [sp, #11968] ; 8-byte Folded Spill
fmul d2, d16, d26
fadd d20, d2, d1
fadd d1, d3, d5
str d1, [sp, #12304] ; 8-byte Folded Spill
fadd d1, d1, d4
str d1, [sp, #12312] ; 8-byte Folded Spill
fadd d1, d1, d6
str d1, [sp, #12256] ; 8-byte Folded Spill
fadd d1, d1, d7
str d1, [sp, #12080] ; 8-byte Folded Spill
fadd d1, d1, d25
fdiv d2, d1, d16
fmul d3, d19, d2
fmul d4, d3, d20
fadd d4, d24, d4
str d10, [sp, #12184] ; 8-byte Folded Spill
fadd d4, d10, d4
str d12, [sp, #10976] ; 8-byte Folded Spill
fmul d7, d18, d12
fmul d5, d7, d4
fmul d5, d5, d26
ldr d6, [sp, #10904] ; 8-byte Folded Reload
fmul d5, d6, d5
ldur d24, [x29, #-192] ; 8-byte Folded Reload
fmul d5, d5, d24
ldr d6, [sp, #12264] ; 8-byte Folded Reload
fmul d6, d6, d26
fadd d6, d6, d26
fmul d4, d6, d4
str d18, [sp, #10544] ; 8-byte Folded Spill
str d4, [sp, #10320] ; 8-byte Folded Spill
fmul d4, d18, d4
str d11, [sp, #10568] ; 8-byte Folded Spill
str d4, [sp, #10344] ; 8-byte Folded Spill
fmul d4, d11, d4
fmov d18, #3.00000000
fmul d4, d4, d18
fadd d18, d5, d4
str d6, [sp, #10896] ; 8-byte Folded Spill
str d7, [sp, #10480] ; 8-byte Folded Spill
fmul d7, d6, d7
str d20, [sp, #9896] ; 8-byte Folded Spill
fmul d6, d7, d20
fmul d4, d19, d6
fdiv d5, d4, d16
fdiv d4, d5, d16
str d4, [sp, #10624] ; 8-byte Folded Spill
fmul d1, d1, d4
fmul d3, d3, d7
fmul d4, d3, d26
fsub d1, d1, d4
fmov d4, #1.00000000
fcmp d0, d4
fmul d0, d3, d29
movi d3, #0000000000000000
fcsel d3, d4, d3, ls
str d3, [sp, #9912] ; 8-byte Folded Spill
fmul d0, d0, d3
fmov d3, #5.00000000
fmul d0, d0, d3
fsub d0, d1, d0
str d16, [sp, #9928] ; 8-byte Folded Spill
fadd d1, d16, d16
str d1, [sp, #10608] ; 8-byte Folded Spill
fdiv d0, d0, d1
ldur d1, [x29, #-184] ; 8-byte Folded Reload
str d18, [sp, #8408] ; 8-byte Folded Spill
fmul d1, d1, d18
str d17, [sp, #4320] ; 8-byte Folded Spill
fadd d3, d17, d17
str d3, [sp, #10616] ; 8-byte Folded Spill
fmul d17, d3, d0
str d17, [sp, #10208] ; 8-byte Folded Spill
str d28, [sp, #11888] ; 8-byte Folded Spill
fmul d3, d28, d17
fsub d4, d1, d3
str d19, [sp, #9904] ; 8-byte Folded Spill
fadd d1, d19, d19
str d1, [sp, #10600] ; 8-byte Folded Spill
fmul d0, d1, d0
str d6, [sp, #9920] ; 8-byte Folded Spill
fmul d1, d2, d6
fsub d2, d0, d1
fmul d0, d30, d17
ldur d1, [x29, #-184] ; 8-byte Folded Reload
fmul d1, d1, d2
fsub d3, d0, d1
str d2, [sp, #6120] ; 8-byte Folded Spill
fmul d0, d28, d2
fmul d1, d30, d18
fsub d1, d0, d1
str d3, [sp, #11160] ; 8-byte Folded Spill
fmul d0, d23, d3
str d5, [sp, #9352] ; 8-byte Folded Spill
fmul d2, d23, d5
str d2, [sp, #9288] ; 8-byte Folded Spill
fsub d0, d0, d2
str d1, [sp, #5224] ; 8-byte Folded Spill
fadd d0, d1, d0
ldur d3, [x29, #-168] ; 8-byte Folded Reload
fmul d1, d3, d4
fadd d0, d1, d0
str d4, [sp, #12264] ; 8-byte Folded Spill
fmul d1, d27, d4
fmul d2, d27, d7
str d2, [sp, #8184] ; 8-byte Folded Spill
fsub d1, d1, d2
str d7, [sp, #10912] ; 8-byte Folded Spill
fmul d2, d3, d7
str d2, [sp, #9280] ; 8-byte Folded Spill
fsub d0, d0, d2
str d0, [sp, #6720] ; 8-byte Folded Spill
ldr d2, [sp, #11376] ; 8-byte Folded Reload
fmul d0, d2, d0
fadd d0, d1, d0
str d0, [sp, #11984] ; 8-byte Folded Spill
mov x8, #7864
movk x8, #60293, lsl #16
movk x8, #47185, lsl #32
movk x8, #49054, lsl #48
fmov d0, x8
ldr d1, [sp, #12176] ; 8-byte Folded Reload
fadd d10, d1, d0
fmul d0, d10, d26
fsub d0, d10, d0
fsub d2, d0, d1
fmul d9, d21, d2
ldr d0, [sp, #12336] ; 8-byte Folded Reload
fmul d0, d0, d9
fmul d13, d22, d2
ldur d1, [x29, #-216] ; 8-byte Folded Reload
fmul d1, d1, d13
fadd d0, d0, d1
str d2, [sp, #11280] ; 8-byte Folded Spill
fmul d11, d23, d2
fmul d1, d3, d11
fadd d8, d1, d0
ldr d0, [sp, #12160] ; 8-byte Folded Reload
fmul d0, d0, d9
ldr d1, [sp, #12200] ; 8-byte Folded Reload
fmul d1, d1, d13
fadd d0, d0, d1
fadd d1, d11, d0
str d1, [sp, #11648] ; 8-byte Folded Spill
ldur d0, [x29, #-184] ; 8-byte Folded Reload
fmul d0, d8, d0
fmul d1, d1, d30
fsub d0, d0, d1
ldr d1, [sp, #12144] ; 8-byte Folded Reload
fadd d12, d0, d1
ldr d0, [sp, #12112] ; 8-byte Folded Reload
fsub d0, d0, d12
fmul d0, d0, d24
bl _tanh
str d0, [sp, #12144] ; 8-byte Folded Spill
fmul d1, d0, d0
fmov d14, #1.00000000
fsub d0, d14, d1
str d0, [sp, #10888] ; 8-byte Folded Spill
fmul d0, d10, d10
fadd d0, d0, d15
fsqrt d0, d0
fmov d1, #1.50000000
str d0, [sp, #10168] ; 8-byte Folded Spill
bl _pow
mov x8, #18811
movk x8, #34700, lsl #16
movk x8, #61210, lsl #32
movk x8, #16643, lsl #48
fmov d1, x8
fmul d15, d0, d1
fmov d0, #3.00000000
fmul d0, d12, d0
fsub d0, d14, d0
fmov d14, #1.00000000
str d0, [sp, #10944] ; 8-byte Folded Spill
fmul d12, d15, d0
str d10, [sp, #10304] ; 8-byte Folded Spill
ldr d0, [sp, #12096] ; 8-byte Folded Reload
fmul d0, d10, d0
bl _tanh
movi d31, #0000000000000000
ldr d26, [sp, #12048] ; 8-byte Folded Reload
str d0, [sp, #10152] ; 8-byte Folded Spill
fmov d1, #0.50000000
fmul d0, d0, d1
fadd d16, d0, d1
fmov d23, #0.50000000
str d12, [sp, #10952] ; 8-byte Folded Spill
fmul d18, d16, d12
ldr d30, [sp, #11888] ; 8-byte Folded Reload
ldr d10, [sp, #11648] ; 8-byte Folded Reload
fmul d0, d10, d30
str d9, [sp, #12104] ; 8-byte Folded Spill
ldr d28, [sp, #12016] ; 8-byte Folded Reload
fmul d1, d28, d9
str d13, [sp, #12064] ; 8-byte Folded Spill
ldr d9, [sp, #11864] ; 8-byte Folded Reload
fmul d2, d9, d13
fadd d1, d1, d2
ldur d12, [x29, #-176] ; 8-byte Folded Reload
fmul d2, d12, d11
fadd d7, d2, d1
ldur d1, [x29, #-184] ; 8-byte Folded Reload
fmul d1, d7, d1
fsub d0, d0, d1
fadd d19, d0, d26
fmul d0, d19, d19
ldr d13, [sp, #12320] ; 8-byte Folded Reload
fmul d1, d7, d13
fmul d2, d8, d30
fsub d17, d1, d2
fmul d1, d17, d17
fadd d0, d1, d0
ldur d1, [x29, #-224] ; 8-byte Folded Reload
fadd d0, d0, d1
fsqrt d6, d0
ldr d0, [sp, #12088] ; 8-byte Folded Reload
fdiv d0, d6, d0
fminnm d1, d0, d14
ldr d25, [sp, #11968] ; 8-byte Folded Reload
fmul d1, d1, d25
fmul d2, d6, d23
fadd d20, d2, d1
ldr d22, [sp, #12080] ; 8-byte Folded Reload
fadd d1, d22, d7
fdiv d2, d1, d6
str d2, [sp, #9728] ; 8-byte Folded Spill
fmul d2, d19, d2
fmul d3, d2, d20
ldr d24, [sp, #12072] ; 8-byte Folded Reload
fadd d3, d24, d3
str d8, [sp, #12176] ; 8-byte Folded Spill
fadd d3, d8, d3
fmul d4, d18, d3
fmul d4, d4, d23
ldr d5, [sp, #10888] ; 8-byte Folded Reload
fmul d4, d5, d4
ldur d5, [x29, #-192] ; 8-byte Folded Reload
fmul d4, d4, d5
ldr d5, [sp, #12144] ; 8-byte Folded Reload
fmul d5, d5, d23
fadd d21, d5, d23
fmul d3, d21, d3
str d16, [sp, #10440] ; 8-byte Folded Spill
str d3, [sp, #9960] ; 8-byte Folded Spill
fmul d3, d16, d3
str d15, [sp, #10448] ; 8-byte Folded Spill
str d3, [sp, #9736] ; 8-byte Folded Spill
fmul d3, d15, d3
fmov d5, #3.00000000
fmul d3, d3, d5
fadd d16, d4, d3
ldur d3, [x29, #-184] ; 8-byte Folded Reload
fmul d3, d3, d16
str d17, [sp, #4288] ; 8-byte Folded Spill
fadd d5, d17, d17
str d18, [sp, #10432] ; 8-byte Folded Spill
str d21, [sp, #10880] ; 8-byte Folded Spill
fmul d17, d21, d18
str d20, [sp, #9880] ; 8-byte Folded Spill
fmul d4, d17, d20
str d4, [sp, #10936] ; 8-byte Folded Spill
str d19, [sp, #10424] ; 8-byte Folded Spill
fmul d4, d19, d4
fdiv d4, d4, d6
str d4, [sp, #6072] ; 8-byte Folded Spill
fdiv d4, d4, d6
str d4, [sp, #10584] ; 8-byte Folded Spill
fmul d1, d1, d4
fmul d2, d2, d17
fmul d4, d2, d23
fsub d1, d1, d4
fmov d4, #1.00000000
fcmp d0, d4
fcsel d4, d4, d31, ls
fmul d0, d2, d25
str d4, [sp, #9888] ; 8-byte Folded Spill
fmul d0, d0, d4
fmov d2, #5.00000000
fmul d0, d0, d2
fsub d0, d1, d0
str d6, [sp, #10496] ; 8-byte Folded Spill
fadd d1, d6, d6
str d1, [sp, #10576] ; 8-byte Folded Spill
fdiv d0, d0, d1
str d5, [sp, #10592] ; 8-byte Folded Spill
str d0, [sp, #9720] ; 8-byte Folded Spill
fmul d18, d5, d0
fmul d0, d30, d18
fsub d0, d3, d0
str d0, [sp, #10352] ; 8-byte Folded Spill
fmul d0, d11, d0
ldr d1, [sp, #11984] ; 8-byte Folded Reload
fadd d0, d0, d1
str d11, [sp, #11664] ; 8-byte Folded Spill
str d17, [sp, #11104] ; 8-byte Folded Spill
fmul d1, d11, d17
str d1, [sp, #8152] ; 8-byte Folded Spill
fsub d0, d0, d1
mov x8, #51491
movk x8, #54360, lsl #16
movk x8, #13074, lsl #32
movk x8, #49054, lsl #48
fmov d1, x8
fadd d0, d0, d1
mov x8, #56877
movk x8, #10885, lsl #16
movk x8, #2572, lsl #32
movk x8, #16289, lsl #48
fmov d1, x8
ldr d27, [sp, #12160] ; 8-byte Folded Reload
fmul d2, d27, d1
mov x8, #62994
movk x8, #14722, lsl #16
movk x8, #41829, lsl #32
movk x8, #16247, lsl #48
fmov d3, x8
ldr d29, [sp, #12200] ; 8-byte Folded Reload
fmul d4, d29, d3
fadd d2, d2, d4
mov x8, #54125
movk x8, #53060, lsl #16
movk x8, #15481, lsl #32
movk x8, #49041, lsl #48
fmov d19, d28
fmul d4, d28, d1
fmov d25, d9
fmul d5, d9, d3
fadd d4, d4, d5
fmov d5, x8
fmov d20, d12
fmul d6, d12, d5
fadd d6, d4, d6
fadd d17, d2, d5
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #16326, lsl #48
ldr d23, [sp, #12336] ; 8-byte Folded Reload
fmul d1, d23, d1
ldur d28, [x29, #-216] ; 8-byte Folded Reload
fmul d2, d28, d3
fadd d1, d1, d2
fmov d8, d13
fmul d2, d6, d13
ldur d3, [x29, #-168] ; 8-byte Folded Reload
fmul d3, d3, d5
fadd d1, d1, d3
str d1, [sp, #11936] ; 8-byte Folded Spill
fmul d1, d1, d30
fsub d3, d2, d1
fmov d1, x8
fmul d5, d22, d1
fmul d4, d24, d1
ldur d2, [x29, #-184] ; 8-byte Folded Reload
fmul d2, d5, d2
str d2, [sp, #6712] ; 8-byte Folded Spill
str d17, [sp, #11568] ; 8-byte Folded Spill
fmul d2, d17, d2
str d3, [sp, #11176] ; 8-byte Folded Spill
fmul d3, d4, d3
fadd d2, d2, d3
str d5, [sp, #9632] ; 8-byte Folded Spill
fmul d3, d5, d30
str d4, [sp, #9640] ; 8-byte Folded Spill
fmul d4, d4, d13
fadd d22, d3, d4
str d6, [sp, #11944] ; 8-byte Folded Spill
fmul d3, d6, d22
fadd d2, d3, d2
ldr d3, [sp, #11752] ; 8-byte Folded Reload
ldr d4, [sp, #10208] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d2, d3, d2
ldr d3, [sp, #11736] ; 8-byte Folded Reload
ldr d4, [sp, #8408] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d2, d2, d3
str d7, [sp, #11672] ; 8-byte Folded Spill
str d18, [sp, #10160] ; 8-byte Folded Spill
fmul d3, d7, d18
fadd d2, d3, d2
str d16, [sp, #8400] ; 8-byte Folded Spill
fmov d3, d10
fmul d3, d10, d16
fsub d2, d2, d3
mov x8, #45033
movk x8, #40035, lsl #16
movk x8, #524, lsl #32
movk x8, #48971, lsl #48
mov x9, #45724
movk x9, #42429, lsl #16
movk x9, #11379, lsl #32
movk x9, #16169, lsl #48
fmov d3, x8
fmov d16, d19
fmul d4, d19, d3
fmov d5, x9
fmul d6, d9, d5
fsub d4, d4, d6
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #16210, lsl #48
fmov d6, x8
fmov d17, d12
fmul d7, d12, d6
fadd d21, d7, d4
mov x8, #40862
movk x8, #31695, lsl #16
movk x8, #12355, lsl #32
movk x8, #16198, lsl #48
fmul d4, d19, d5
fmov d24, d19
fmov d7, x8
fmul d16, d9, d7
fsub d4, d16, d4
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #16169, lsl #48
fmov d16, x8
fmul d17, d12, d16
fadd d20, d17, d4
fmul d4, d19, d21
fmul d17, d9, d20
fadd d4, d4, d17
fmov d19, d27
fmul d3, d27, d3
fmov d27, d29
fmul d17, d29, d5
fsub d3, d3, d17
fadd d18, d3, d6
fmov d6, d19
fmul d3, d19, d5
fmul d5, d29, d7
fsub d3, d5, d3
fadd d19, d3, d16
fmul d3, d6, d18
fmov d17, d6
fmul d5, d29, d19
fmov d7, d29
fadd d3, d3, d5
mov x8, #16684
movk x8, #33360, lsl #16
movk x8, #18212, lsl #32
movk x8, #48931, lsl #48
str d3, [sp, #4240] ; 8-byte Folded Spill
fadd d3, d4, d3
fmov d5, x8
fsub d3, d5, d3
str d21, [sp, #9336] ; 8-byte Folded Spill
fmul d5, d23, d21
str d20, [sp, #9344] ; 8-byte Folded Spill
fmov d27, d28
fmul d6, d28, d20
fadd d5, d5, d6
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #16194, lsl #48
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #16153, lsl #48
fmov d6, x8
fmul d7, d29, d6
fmov d16, x9
fmul d17, d17, d16
fsub d7, d7, d17
fadd d20, d7, d5
ldur d7, [x29, #-184] ; 8-byte Folded Reload
mov x8, #3449
movk x8, #18764, lsl #16
movk x8, #45194, lsl #32
movk x8, #16217, lsl #48
fmov d5, x8
fadd d21, d3, d5
fadd d4, d4, d5
fmul d3, d20, d13
str d4, [sp, #8320] ; 8-byte Folded Spill
fmul d4, d4, d30
fadd d3, d3, d4
str d18, [sp, #10784] ; 8-byte Folded Spill
fmul d4, d24, d18
ldr d18, [sp, #11640] ; 8-byte Folded Reload
ldr d17, [sp, #12280] ; 8-byte Folded Reload
fmov d5, d9
str d19, [sp, #10776] ; 8-byte Folded Spill
fmul d5, d9, d19
fadd d4, d4, d5
fmov d5, d28
fmul d5, d28, d6
fmul d6, d23, d16
fsub d5, d5, d6
fadd d4, d5, d4
str d4, [sp, #10712] ; 8-byte Folded Spill
fmul d4, d4, d7
fadd d6, d4, d3
ldr d16, [sp, #11248] ; 8-byte Folded Reload
ldr d3, [sp, #11416] ; 8-byte Folded Reload
fmul d3, d16, d3
ldr d4, [sp, #9304] ; 8-byte Folded Reload
fadd d3, d4, d3
ldr d4, [sp, #11240] ; 8-byte Folded Reload
fsub d5, d18, d4
ldur d4, [x29, #-256] ; 8-byte Folded Reload
fmul d4, d4, d5
fadd d4, d3, d4
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #49094, lsl #48
fmov d3, x8
fmul d3, d30, d3
str d3, [sp, #9384] ; 8-byte Folded Spill
str d21, [sp, #8272] ; 8-byte Folded Spill
fmul d3, d21, d3
str d3, [sp, #8144] ; 8-byte Folded Spill
fadd d2, d3, d2
str d6, [sp, #6128] ; 8-byte Folded Spill
fmul d3, d6, d1
str d3, [sp, #9264] ; 8-byte Folded Spill
fadd d2, d3, d2
fmov d3, d13
fmul d1, d13, d1
str d1, [sp, #9376] ; 8-byte Folded Spill
str d20, [sp, #9296] ; 8-byte Folded Spill
fmul d1, d20, d1
str d1, [sp, #8136] ; 8-byte Folded Spill
fadd d1, d1, d2
str d1, [sp, #1944] ; 8-byte Folded Spill
fmul d1, d17, d1
str d4, [sp, #9320] ; 8-byte Folded Spill
cbz x20, LBB19_18
; %bb.17:
ldr d2, [x20, #208]
fadd d6, d16, d2
ldr d16, [sp, #11480] ; 8-byte Folded Reload
fmul d2, d16, d6
fadd d2, d4, d2
ldr d20, [sp, #12152] ; 8-byte Folded Reload
fsub d4, d20, d18
ldr d21, [sp, #12208] ; 8-byte Folded Reload
fmul d3, d21, d4
fadd d3, d3, d2
ldr d2, [x20, #224]
str d6, [sp, #11784] ; 8-byte Folded Spill
fadd d19, d6, d2
ldr d18, [sp, #11296] ; 8-byte Folded Reload
fmul d2, d18, d19
stur d3, [x29, #-224] ; 8-byte Folded Spill
fsub d2, d3, d2
ldr d24, [sp, #12168] ; 8-byte Folded Reload
fsub d6, d24, d20
ldr d23, [sp, #12056] ; 8-byte Folded Reload
fmul d3, d23, d6
fadd d20, d3, d2
ldr d31, [x20, #240]
b LBB19_19
LBB19_18:
fadd d19, d16, d31
ldr d16, [sp, #11480] ; 8-byte Folded Reload
fmul d2, d19, d16
fadd d2, d4, d2
ldr d6, [sp, #12152] ; 8-byte Folded Reload
fsub d4, d6, d18
ldr d21, [sp, #12208] ; 8-byte Folded Reload
fmul d3, d21, d4
fadd d3, d2, d3
ldr d18, [sp, #11296] ; 8-byte Folded Reload
fmul d2, d19, d18
stur d3, [x29, #-224] ; 8-byte Folded Spill
fsub d2, d3, d2
ldr d24, [sp, #12168] ; 8-byte Folded Reload
fsub d6, d24, d6
ldr d23, [sp, #12056] ; 8-byte Folded Reload
fmul d3, d23, d6
fadd d20, d3, d2
str d19, [sp, #11784] ; 8-byte Folded Spill
LBB19_19:
str d6, [sp, #8416] ; 8-byte Folded Spill
str d4, [sp, #6136] ; 8-byte Folded Spill
str d5, [sp, #5184] ; 8-byte Folded Spill
fadd d0, d1, d0
str d19, [sp, #11584] ; 8-byte Folded Spill
fadd d3, d19, d31
ldr d1, [sp, #12272] ; 8-byte Folded Reload
fmul d1, d1, d3
str d20, [sp, #12168] ; 8-byte Folded Spill
fadd d1, d20, d1
ldur d4, [x29, #-160] ; 8-byte Folded Reload
fsub d2, d4, d24
str d2, [sp, #10672] ; 8-byte Folded Spill
ldr d9, [sp, #11368] ; 8-byte Folded Reload
fmul d2, d9, d2
fadd d2, d2, d1
ldr d1, [sp, #12120] ; 8-byte Folded Reload
str d3, [sp, #11536] ; 8-byte Folded Spill
fmul d1, d1, d3
stur d2, [x29, #-192] ; 8-byte Folded Spill
fadd d1, d1, d2
fsub d2, d26, d4
str d2, [sp, #10664] ; 8-byte Folded Spill
fmul d2, d9, d2
fadd d1, d2, d1
str d1, [sp, #6728] ; 8-byte Folded Spill
movi d1, #0000000000000000
cbz x20, LBB19_21
; %bb.20:
ldr d1, [x20, #256]
LBB19_21:
ldp d4, d2, [x29, #-176] ; 16-byte Folded Reload
fmov d3, d1
fmul d1, d2, d1
fmov d25, d2
fmul d2, d4, d9
fmov d26, d4
str d2, [sp, #1936] ; 8-byte Folded Spill
fmul d2, d17, d2
fsub d1, d1, d2
str d1, [sp, #10112] ; 8-byte Folded Spill
ldr d16, [sp, #11568] ; 8-byte Folded Reload
fmul d1, d16, d1
ldr d2, [sp, #11536] ; 8-byte Folded Reload
fadd d18, d2, d3
fmov d20, d3
str d3, [sp, #11440] ; 8-byte Folded Spill
fmov d24, d2
ldr d5, [sp, #11936] ; 8-byte Folded Reload
fmul d2, d5, d18
fsub d1, d1, d2
ldr d6, [sp, #6728] ; 8-byte Folded Reload
fsub d1, d6, d1
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #16326, lsl #48
fmov d19, x8
fmul d1, d1, d19
ldr d3, [sp, #11888] ; 8-byte Folded Reload
fmul d2, d16, d3
fmov d21, d3
ldr d4, [sp, #11944] ; 8-byte Folded Reload
fmul d3, d4, d7
fsub d2, d2, d3
str d2, [sp, #10000] ; 8-byte Folded Spill
fmul d2, d7, d2
ldr d3, [sp, #11176] ; 8-byte Folded Reload
ldr d23, [sp, #12320] ; 8-byte Folded Reload
fmul d3, d23, d3
fsub d2, d2, d3
fmul d2, d2, d19
fadd d1, d2, d1
mov x8, #11213
movk x8, #64899, lsl #16
movk x8, #2195, lsl #32
movk x8, #49148, lsl #48
fmov d2, x8
ldr d3, [sp, #11104] ; 8-byte Folded Reload
fadd d2, d3, d2
ldr d3, [sp, #10912] ; 8-byte Folded Reload
fadd d2, d3, d2
fsub d23, d1, d2
str d23, [sp, #9168] ; 8-byte Folded Spill
ldr d1, [sp, #8408] ; 8-byte Folded Reload
ldr d2, [sp, #8400] ; 8-byte Folded Reload
fadd d2, d1, d2
str d2, [sp, #12072] ; 8-byte Folded Spill
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #49094, lsl #48
fmov d1, x8
fmul d1, d4, d1
str d1, [sp, #10088] ; 8-byte Folded Spill
ldr d3, [sp, #9632] ; 8-byte Folded Reload
fsub d1, d1, d3
str d1, [sp, #9512] ; 8-byte Folded Spill
fmul d1, d1, d9
fsub d1, d2, d1
str d1, [sp, #4440] ; 8-byte Folded Spill
fmul d1, d9, d1
fadd d1, d1, d23
fmul d2, d5, d19
str d2, [sp, #10080] ; 8-byte Folded Spill
ldr d4, [sp, #9640] ; 8-byte Folded Reload
fadd d2, d4, d2
str d2, [sp, #9504] ; 8-byte Folded Spill
fmul d2, d2, d24
fadd d2, d2, d1
str d2, [sp, #12112] ; 8-byte Folded Spill
mov x8, #63706
movk x8, #13221, lsl #16
movk x8, #1281, lsl #32
movk x8, #16209, lsl #48
fmov d1, x8
fmul d1, d2, d1
fadd d0, d0, d1
ldr d1, [sp, #6712] ; 8-byte Folded Reload
fmul d1, d7, d1
str d22, [sp, #9968] ; 8-byte Folded Spill
fmul d2, d21, d22
fadd d1, d1, d2
str d18, [sp, #11328] ; 8-byte Folded Spill
fmul d2, d4, d18
fadd d1, d1, d2
fmul d2, d6, d19
str d2, [sp, #9272] ; 8-byte Folded Spill
fadd d2, d2, d1
str d2, [sp, #12096] ; 8-byte Folded Spill
mov x8, #54125
movk x8, #53060, lsl #16
movk x8, #15481, lsl #32
movk x8, #49041, lsl #48
fmov d1, x8
fmul d1, d2, d1
fadd d0, d0, d1
ldr d1, [sp, #10712] ; 8-byte Folded Reload
fmul d2, d1, d19
str d19, [sp, #12080] ; 8-byte Folded Spill
fmul d1, d3, d16
str d2, [sp, #9256] ; 8-byte Folded Spill
fsub d1, d2, d1
str d1, [sp, #6408] ; 8-byte Folded Spill
fmul d1, d1, d17
str d1, [sp, #4432] ; 8-byte Folded Spill
fmul d1, d1, d9
fadd d0, d1, d0
ldr d1, [sp, #10784] ; 8-byte Folded Reload
ldr d2, [sp, #12336] ; 8-byte Folded Reload
fmul d1, d2, d1
ldr d2, [sp, #10776] ; 8-byte Folded Reload
ldur d3, [x29, #-216] ; 8-byte Folded Reload
fmul d2, d3, d2
fadd d1, d1, d2
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #16194, lsl #48
fmov d2, x8
str d2, [sp, #12088] ; 8-byte Folded Spill
ldr d3, [sp, #11864] ; 8-byte Folded Reload
fmul d2, d3, d2
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #16153, lsl #48
fmov d3, x8
str d3, [sp, #12144] ; 8-byte Folded Spill
ldr d5, [sp, #12016] ; 8-byte Folded Reload
fmul d3, d5, d3
fsub d2, d2, d3
fsub d1, d1, d2
str d1, [sp, #10008] ; 8-byte Folded Spill
fmul d2, d1, d19
fmul d1, d4, d16
str d2, [sp, #9248] ; 8-byte Folded Spill
fsub d1, d2, d1
str d1, [sp, #6400] ; 8-byte Folded Spill
fmul d1, d1, d20
fadd d0, d1, d0
str d0, [sp, #12048] ; 8-byte Folded Spill
mov x8, #43516
movk x8, #54001, lsl #16
movk x8, #25165, lsl #32
movk x8, #16240, lsl #48
fmov d1, x8
str d1, [sp, #12152] ; 8-byte Folded Spill
ldr d0, [sp, #11760] ; 8-byte Folded Reload
fmul d4, d0, d1
str d4, [sp, #11128] ; 8-byte Folded Spill
fmov d5, d0
fmul d0, d25, d4
ldr d2, [sp, #11896] ; 8-byte Folded Reload
fmul d1, d2, d1
fmov d6, d2
ldr d3, [sp, #12296] ; 8-byte Folded Reload
fadd d1, d1, d3
mov x8, #20972
movk x8, #7864, lsl #16
movk x8, #60293, lsl #32
movk x8, #49057, lsl #48
fmov d2, x8
fadd d11, d1, d2
fmov d1, #0.50000000
fmul d1, d11, d1
fsub d1, d11, d1
fsub d2, d1, d3
fmul d1, d26, d2
fadd d10, d0, d1
fmul d0, d5, d4
fmul d1, d6, d2
fadd d15, d0, d1
ldr d3, [sp, #11496] ; 8-byte Folded Reload
fmul d0, d3, d4
str d2, [sp, #10968] ; 8-byte Folded Spill
ldr d1, [sp, #12136] ; 8-byte Folded Reload
fmul d1, d1, d2
fadd d1, d0, d1
str d1, [sp, #11600] ; 8-byte Folded Spill
fmul d0, d5, d15
str d15, [sp, #11464] ; 8-byte Folded Spill
fmul d1, d3, d1
fadd d0, d0, d1
fmul d1, d25, d10
fadd d13, d1, d0
fmul d0, d13, d9
ldr d1, [sp, #12232] ; 8-byte Folded Reload
fadd d12, d0, d1
mov x26, #48998
movk x26, #16808, lsl #16
movk x26, #62387, lsl #32
movk x26, #49080, lsl #48
mov x25, #54885
movk x25, #33778, lsl #16
movk x25, #12745, lsl #32
movk x25, #16308, lsl #48
mov x24, #48998
movk x24, #16808, lsl #16
movk x24, #62387, lsl #32
movk x24, #49080, lsl #48
mov x23, #54885
movk x23, #33778, lsl #16
movk x23, #12745, lsl #32
movk x23, #49076, lsl #48
mov x21, #39915
movk x21, #11776, lsl #16
movk x21, #40689, lsl #32
movk x21, #49053, lsl #48
mov x22, #26610
movk x22, #29696, lsl #16
movk x22, #48971, lsl #32
movk x22, #16339, lsl #48
mov x8, #6148914691236517205
movk x8, #16341, lsl #48
fmov d0, x8
fsub d0, d0, d12
mov x8, #4632233691727265792
fmov d1, x8
str d1, [sp, #12296] ; 8-byte Folded Spill
fmul d0, d0, d1
bl _tanh
str d0, [sp, #12232] ; 8-byte Folded Spill
fmul d2, d0, d0
fmov d1, #1.00000000
fsub d0, d1, d2
str d0, [sp, #10368] ; 8-byte Folded Spill
fmul d0, d11, d11
mov x8, #26865
movk x8, #35043, lsl #16
movk x8, #63669, lsl #32
movk x8, #16100, lsl #48
fmov d8, x8
fadd d0, d0, d8
fsqrt d0, d0
fmov d1, #1.50000000
str d0, [sp, #9712] ; 8-byte Folded Spill
bl _pow
mov x8, #45572
movk x8, #23979, lsl #16
movk x8, #34811, lsl #32
movk x8, #16645, lsl #48
fmov d1, x8
fmul d14, d0, d1
fmov d0, #3.00000000
fmul d0, d12, d0
fmov d1, #1.00000000
fsub d0, d1, d0
str d0, [sp, #10384] ; 8-byte Folded Spill
fmul d12, d14, d0
mov x8, #211106232532992
movk x8, #49266, lsl #48
fmov d0, x8
str d11, [sp, #9656] ; 8-byte Folded Spill
fmul d0, d11, d0
bl _tanh
str d0, [sp, #9184] ; 8-byte Folded Spill
fmov d1, #0.50000000
fmul d0, d0, d1
fadd d16, d0, d1
fmov d28, #0.50000000
ldr d25, [sp, #11896] ; 8-byte Folded Reload
fmul d0, d25, d15
ldr d26, [sp, #12136] ; 8-byte Folded Reload
ldr d1, [sp, #11600] ; 8-byte Folded Reload
fmul d1, d26, d1
fadd d0, d0, d1
ldur d27, [x29, #-176] ; 8-byte Folded Reload
fmul d1, d27, d10
fadd d4, d1, d0
fmul d0, d4, d9
ldur d1, [x29, #-160] ; 8-byte Folded Reload
fsub d17, d1, d0
fmul d0, d17, d17
fadd d0, d0, d8
fsqrt d19, d0
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16329, lsl #48
fmov d0, x8
fdiv d1, d19, d0
fmov d0, #1.00000000
fminnm d0, d1, d0
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16361, lsl #48
fmov d2, x8
fmul d0, d0, d2
fmul d3, d19, d28
fadd d20, d3, d0
str d4, [sp, #11208] ; 8-byte Folded Spill
ldr d24, [sp, #12256] ; 8-byte Folded Reload
fadd d3, d24, d4
fdiv d4, d3, d19
fmul d5, d17, d4
fmul d0, d5, d20
ldr d23, [sp, #12288] ; 8-byte Folded Reload
fadd d0, d23, d0
str d13, [sp, #11200] ; 8-byte Folded Spill
fadd d0, d13, d0
str d12, [sp, #10376] ; 8-byte Folded Spill
fmul d21, d12, d16
fmul d6, d0, d21
fmul d6, d6, d28
ldr d7, [sp, #10368] ; 8-byte Folded Reload
fmul d6, d7, d6
ldr d7, [sp, #12296] ; 8-byte Folded Reload
fmul d6, d6, d7
ldr d7, [sp, #12232] ; 8-byte Folded Reload
fmul d7, d7, d28
fadd d7, d7, d28
fmul d0, d0, d7
str d16, [sp, #9824] ; 8-byte Folded Spill
str d0, [sp, #9176] ; 8-byte Folded Spill
fmul d0, d16, d0
str d14, [sp, #9832] ; 8-byte Folded Spill
str d0, [sp, #9120] ; 8-byte Folded Spill
fmul d0, d14, d0
fmov d16, #3.00000000
fmul d0, d0, d16
fadd d18, d0, d6
fmul d22, d9, d18
fmul d6, d10, d22
ldr d0, [sp, #12048] ; 8-byte Folded Reload
fadd d6, d6, d0
str d21, [sp, #9816] ; 8-byte Folded Spill
str d7, [sp, #10360] ; 8-byte Folded Spill
fmul d11, d7, d21
str d20, [sp, #9056] ; 8-byte Folded Spill
fmul d16, d20, d11
fmul d7, d17, d16
fdiv d14, d7, d19
fdiv d7, d14, d19
str d7, [sp, #9952] ; 8-byte Folded Spill
fmul d3, d3, d7
fmul d5, d5, d11
fmul d7, d5, d28
fsub d3, d3, d7
fmov d7, #1.00000000
fcmp d1, d7
fmul d1, d5, d2
movi d2, #0000000000000000
fmov d0, #1.00000000
str q0, [sp, #11952] ; 16-byte Folded Spill
movi d0, #0000000000000000
stur d0, [x29, #-160] ; 8-byte Folded Spill
fcsel d2, d7, d2, ls
str d2, [sp, #9008] ; 8-byte Folded Spill
fmul d1, d1, d2
fmov d2, #-5.00000000
fmul d1, d1, d2
fadd d1, d3, d1
str d19, [sp, #9064] ; 8-byte Folded Spill
fadd d2, d19, d19
str d2, [sp, #9944] ; 8-byte Folded Spill
fdiv d1, d1, d2
str d17, [sp, #9080] ; 8-byte Folded Spill
fadd d2, d17, d17
str d2, [sp, #9936] ; 8-byte Folded Spill
fmul d1, d2, d1
str d16, [sp, #9048] ; 8-byte Folded Spill
fmul d2, d4, d16
fsub d1, d1, d2
str d1, [sp, #6112] ; 8-byte Folded Spill
fmul d15, d9, d1
fmov d19, d27
fmul d1, d27, d15
fmul d2, d27, d14
str d2, [sp, #7968] ; 8-byte Folded Spill
fadd d1, d2, d1
ldur d21, [x29, #-168] ; 8-byte Folded Reload
fmul d2, d21, d22
fsub d1, d2, d1
str d10, [sp, #11352] ; 8-byte Folded Spill
fmul d2, d10, d11
str d2, [sp, #8128] ; 8-byte Folded Spill
fsub d2, d6, d2
fmul d3, d21, d11
fmov d10, d21
str d3, [sp, #7960] ; 8-byte Folded Spill
fsub d30, d1, d3
ldr d0, [sp, #11128] ; 8-byte Folded Reload
fmul d1, d0, d30
fadd d27, d2, d1
str d27, [sp, #12232] ; 8-byte Folded Spill
mov x8, #49235
movk x8, #28989, lsl #16
movk x8, #40841, lsl #32
movk x8, #16312, lsl #48
mov x9, #45974
movk x9, #34787, lsl #16
movk x9, #35902, lsl #32
movk x9, #16285, lsl #48
fmov d1, x8
str d1, [sp, #12296] ; 8-byte Folded Spill
ldr d0, [sp, #11760] ; 8-byte Folded Reload
fmul d3, d0, d1
fmov d21, x9
ldr d0, [sp, #11496] ; 8-byte Folded Reload
fmul d5, d0, d21
fadd d16, d3, d5
fmul d3, d25, d1
fmul d5, d26, d21
fadd d17, d3, d5
ldr d0, [sp, #11536] ; 8-byte Folded Reload
fmul d3, d16, d0
ldur d1, [x29, #-192] ; 8-byte Folded Reload
fadd d3, d3, d1
mov x8, #50080
movk x8, #49599, lsl #16
movk x8, #32579, lsl #32
movk x8, #16368, lsl #48
fmov d5, x8
fmul d3, d3, d5
mov x8, #50080
movk x8, #49599, lsl #16
movk x8, #32579, lsl #32
movk x8, #49136, lsl #48
fmul d6, d17, d9
str d6, [sp, #9992] ; 8-byte Folded Spill
fmul d6, d9, d6
fmov d7, x8
fmul d6, d6, d7
fadd d3, d3, d6
mov x8, #42264
movk x8, #33609, lsl #16
movk x8, #14594, lsl #32
movk x8, #49188, lsl #48
fmov d6, x8
fadd d6, d11, d6
fsub d3, d3, d6
ldr d1, [sp, #9168] ; 8-byte Folded Reload
fadd d20, d3, d1
str d20, [sp, #9160] ; 8-byte Folded Spill
str d18, [sp, #4424] ; 8-byte Folded Spill
ldr d1, [sp, #12072] ; 8-byte Folded Reload
fadd d1, d1, d18
str d1, [sp, #6536] ; 8-byte Folded Spill
fmul d4, d24, d5
str d17, [sp, #9624] ; 8-byte Folded Spill
fmul d6, d17, d7
ldr d2, [sp, #9512] ; 8-byte Folded Reload
fsub d3, d2, d4
str d6, [sp, #9488] ; 8-byte Folded Spill
fadd d2, d6, d3
str d2, [sp, #9480] ; 8-byte Folded Spill
fmul d3, d2, d9
fsub d3, d1, d3
str d3, [sp, #4416] ; 8-byte Folded Spill
fmul d3, d9, d3
fadd d3, d20, d3
fmul d23, d23, d5
ldr d1, [sp, #9504] ; 8-byte Folded Reload
fadd d6, d23, d1
str d16, [sp, #9648] ; 8-byte Folded Spill
fmul d7, d16, d5
str d7, [sp, #9496] ; 8-byte Folded Spill
fadd d1, d7, d6
str d1, [sp, #9472] ; 8-byte Folded Spill
fmul d6, d1, d0
fadd d0, d6, d3
str d0, [sp, #12288] ; 8-byte Folded Spill
mov x8, #-4863887597560135680
fmov d6, x8
fmul d6, d27, d6
mov x8, #62612
movk x8, #18904, lsl #16
movk x8, #1144, lsl #32
movk x8, #49064, lsl #48
fmov d7, x8
fmul d7, d0, d7
fadd d0, d7, d6
str d0, [sp, #12256] ; 8-byte Folded Spill
ldr d12, [sp, #11160] ; 8-byte Folded Reload
ldr d0, [sp, #11864] ; 8-byte Folded Reload
fmul d7, d0, d12
ldr d8, [sp, #9352] ; 8-byte Folded Reload
fmul d16, d0, d8
str d16, [sp, #7920] ; 8-byte Folded Spill
fsub d7, d7, d16
ldr d16, [sp, #12200] ; 8-byte Folded Reload
ldr d18, [sp, #5224] ; 8-byte Folded Reload
fmul d16, d16, d18
fadd d7, d16, d7
ldr d0, [sp, #12264] ; 8-byte Folded Reload
ldur d6, [x29, #-216] ; 8-byte Folded Reload
fmul d16, d6, d0
fadd d7, d16, d7
ldr d17, [sp, #11976] ; 8-byte Folded Reload
fmul d16, d17, d0
ldr d1, [sp, #10912] ; 8-byte Folded Reload
fmul d17, d17, d1
str d17, [sp, #8120] ; 8-byte Folded Spill
fsub d16, d16, d17
fmul d17, d6, d1
str d17, [sp, #7912] ; 8-byte Folded Spill
fsub d24, d7, d17
ldr d31, [sp, #11376] ; 8-byte Folded Reload
fmul d7, d31, d24
fadd d7, d16, d7
ldr d17, [sp, #12064] ; 8-byte Folded Reload
ldr d13, [sp, #10352] ; 8-byte Folded Reload
fmul d16, d17, d13
fadd d7, d16, d7
ldr d3, [sp, #11104] ; 8-byte Folded Reload
fmul d16, d17, d3
str d16, [sp, #8112] ; 8-byte Folded Spill
fsub d7, d7, d16
mov x8, #46543
movk x8, #48510, lsl #16
movk x8, #46414, lsl #32
movk x8, #16260, lsl #48
fmov d16, x8
fadd d7, d7, d16
ldr d25, [sp, #9384] ; 8-byte Folded Reload
ldr d2, [sp, #11888] ; 8-byte Folded Reload
fmul d16, d2, d25
ldr d26, [sp, #9376] ; 8-byte Folded Reload
ldr d2, [sp, #12320] ; 8-byte Folded Reload
fmul d17, d2, d26
fadd d27, d16, d17
ldr d16, [sp, #9344] ; 8-byte Folded Reload
fmul d16, d16, d27
str d16, [sp, #8096] ; 8-byte Folded Spill
fadd d7, d16, d7
mov x8, #62994
movk x8, #14722, lsl #16
movk x8, #41829, lsl #32
movk x8, #16247, lsl #48
fmov d16, x8
ldr d2, [sp, #12096] ; 8-byte Folded Reload
fmul d16, d2, d16
fadd d7, d7, d16
ldr d16, [sp, #11440] ; 8-byte Folded Reload
fmul d16, d19, d16
fmul d17, d10, d9
str d17, [sp, #1928] ; 8-byte Folded Spill
ldr d19, [sp, #12280] ; 8-byte Folded Reload
fmul d17, d19, d17
fadd d19, d17, d16
mov x8, #18456
movk x8, #63321, lsl #16
movk x8, #33926, lsl #32
movk x8, #48991, lsl #48
fmov d16, x8
ldr d20, [sp, #12112] ; 8-byte Folded Reload
fmul d16, d20, d16
ldur d29, [x29, #-184] ; 8-byte Folded Reload
fmul d17, d29, d26
str d19, [sp, #10768] ; 8-byte Folded Spill
ldr d26, [sp, #12080] ; 8-byte Folded Reload
fmul d19, d19, d26
str d19, [sp, #5208] ; 8-byte Folded Spill
fadd d28, d17, d19
ldr d17, [sp, #12088] ; 8-byte Folded Reload
fmul d17, d28, d17
str d17, [sp, #8080] ; 8-byte Folded Spill
fadd d7, d17, d7
ldr d17, [sp, #10112] ; 8-byte Folded Reload
fmul d19, d17, d26
fmul d17, d29, d25
str d19, [sp, #5216] ; 8-byte Folded Spill
fadd d29, d17, d19
ldr d17, [sp, #10776] ; 8-byte Folded Reload
fmul d17, d17, d29
str d17, [sp, #8072] ; 8-byte Folded Spill
fadd d25, d17, d7
ldur d19, [x29, #-208] ; 8-byte Folded Reload
fmul d7, d19, d25
fadd d7, d7, d16
ldr d17, [sp, #12016] ; 8-byte Folded Reload
fmul d16, d17, d12
fmul d17, d17, d8
str d17, [sp, #7904] ; 8-byte Folded Spill
fsub d16, d16, d17
ldr d17, [sp, #12160] ; 8-byte Folded Reload
fmul d17, d17, d18
fadd d16, d17, d16
ldr d26, [sp, #12336] ; 8-byte Folded Reload
fmul d17, d26, d0
fadd d16, d17, d16
ldr d18, [sp, #12192] ; 8-byte Folded Reload
fmul d17, d18, d0
fmul d18, d18, d1
str d18, [sp, #8064] ; 8-byte Folded Spill
fsub d17, d17, d18
fmul d18, d26, d1
fmov d0, d26
str d18, [sp, #7888] ; 8-byte Folded Spill
fsub d26, d16, d18
fmul d16, d31, d26
fadd d16, d17, d16
ldr d17, [sp, #6720] ; 8-byte Folded Reload
fmul d17, d10, d17
str d24, [sp, #6664] ; 8-byte Folded Spill
fmul d18, d6, d24
fadd d17, d17, d18
str d26, [sp, #6656] ; 8-byte Folded Spill
fmul d18, d0, d26
fadd d17, d18, d17
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16297, lsl #48
fmov d18, x8
fmul d17, d17, d18
fadd d16, d16, d17
ldr d18, [sp, #12104] ; 8-byte Folded Reload
fmul d17, d18, d13
fadd d16, d17, d16
fmul d17, d18, d3
str d17, [sp, #8056] ; 8-byte Folded Spill
fsub d16, d16, d17
mov x8, #57269
movk x8, #60105, lsl #16
movk x8, #55991, lsl #32
movk x8, #16301, lsl #48
fmov d17, x8
fadd d16, d16, d17
mov x8, #56877
movk x8, #10885, lsl #16
movk x8, #2572, lsl #32
movk x8, #16289, lsl #48
fmov d17, x8
fmul d17, d2, d17
str d27, [sp, #6184] ; 8-byte Folded Spill
ldr d18, [sp, #9336] ; 8-byte Folded Reload
fmul d18, d18, d27
str d18, [sp, #8032] ; 8-byte Folded Spill
fadd d16, d18, d16
fadd d16, d16, d17
str d28, [sp, #6176] ; 8-byte Folded Spill
ldr d0, [sp, #12144] ; 8-byte Folded Reload
fmul d17, d28, d0
str d17, [sp, #8048] ; 8-byte Folded Spill
fsub d16, d16, d17
str d29, [sp, #6168] ; 8-byte Folded Spill
ldr d0, [sp, #10784] ; 8-byte Folded Reload
fmul d17, d0, d29
str d17, [sp, #8040] ; 8-byte Folded Spill
fadd d24, d17, d16
ldur d18, [x29, #-200] ; 8-byte Folded Reload
fmul d16, d18, d24
fadd d7, d16, d7
ldr d1, [sp, #11600] ; 8-byte Folded Reload
fmul d16, d1, d22
fadd d7, d16, d7
ldr d0, [sp, #12136] ; 8-byte Folded Reload
fmul d16, d0, d15
fmul d17, d0, d14
str d17, [sp, #7952] ; 8-byte Folded Spill
fadd d16, d17, d16
ldr d0, [sp, #11496] ; 8-byte Folded Reload
fmul d17, d0, d22
fsub d16, d17, d16
fmul d17, d1, d11
str d17, [sp, #8024] ; 8-byte Folded Spill
fsub d7, d7, d17
fmul d17, d0, d11
fmov d27, d0
str d17, [sp, #7944] ; 8-byte Folded Spill
fsub d26, d16, d17
ldr d28, [sp, #11128] ; 8-byte Folded Reload
fmul d16, d28, d26
fadd d7, d7, d16
mov x8, #26288
movk x8, #13902, lsl #16
movk x8, #44107, lsl #32
movk x8, #16338, lsl #48
fmov d16, x8
fadd d7, d7, d16
str d4, [sp, #8584] ; 8-byte Folded Spill
fmul d16, d4, d9
str d16, [sp, #4408] ; 8-byte Folded Spill
fmul d16, d9, d16
str d23, [sp, #8576] ; 8-byte Folded Spill
ldr d0, [sp, #11536] ; 8-byte Folded Reload
fmul d17, d23, d0
fadd d16, d16, d17
ldur d0, [x29, #-192] ; 8-byte Folded Reload
fmul d5, d0, d5
str d5, [sp, #8016] ; 8-byte Folded Spill
fadd d5, d16, d5
fmul d4, d5, d21
fadd d4, d4, d7
mov x8, #4363988038922010624
fmov d7, x8
fmul d16, d4, d7
ldr d0, [sp, #12256] ; 8-byte Folded Reload
fadd d6, d0, d16
mov x8, #36544
movk x8, #43611, lsl #16
movk x8, #860, lsl #32
movk x8, #16326, lsl #48
fmov d16, x8
fmul d16, d20, d16
str d25, [sp, #1920] ; 8-byte Folded Spill
fmul d17, d18, d25
fsub d16, d16, d17
str d24, [sp, #1912] ; 8-byte Folded Spill
fmul d17, d19, d24
fadd d16, d17, d16
ldr d19, [sp, #11464] ; 8-byte Folded Reload
fmul d17, d19, d22
fadd d16, d17, d16
ldr d1, [sp, #11760] ; 8-byte Folded Reload
fmul d0, d1, d22
str d15, [sp, #9664] ; 8-byte Folded Spill
ldr d2, [sp, #11896] ; 8-byte Folded Reload
fmul d17, d2, d15
str d14, [sp, #9672] ; 8-byte Folded Spill
fmul d18, d2, d14
str d18, [sp, #7936] ; 8-byte Folded Spill
fadd d17, d18, d17
fsub d0, d0, d17
fmul d17, d19, d11
str d17, [sp, #8008] ; 8-byte Folded Spill
fsub d16, d16, d17
str d11, [sp, #9024] ; 8-byte Folded Spill
fmul d17, d1, d11
str d17, [sp, #7928] ; 8-byte Folded Spill
fsub d18, d0, d17
fmov d0, d28
fmul d0, d28, d18
fadd d0, d16, d0
str d30, [sp, #10200] ; 8-byte Folded Spill
fmul d16, d10, d30
str d26, [sp, #10192] ; 8-byte Folded Spill
fmul d17, d27, d26
fadd d16, d16, d17
str d18, [sp, #10184] ; 8-byte Folded Spill
fmul d17, d1, d18
fadd d16, d17, d16
ldr d1, [sp, #12152] ; 8-byte Folded Reload
fmul d16, d16, d1
fadd d0, d0, d16
mov x8, #21969
movk x8, #1325, lsl #16
movk x8, #7976, lsl #32
movk x8, #16367, lsl #48
fmov d16, x8
fadd d0, d0, d16
ldr d1, [sp, #12296] ; 8-byte Folded Reload
fmul d2, d5, d1
fadd d0, d2, d0
mov x8, #4354980839667269632
fmov d2, x8
ldr d1, [sp, #12232] ; 8-byte Folded Reload
fmul d1, d1, d2
mov x8, #47272
movk x8, #56762, lsl #16
movk x8, #43178, lsl #32
movk x8, #49060, lsl #48
fmov d2, x8
ldr d3, [sp, #12288] ; 8-byte Folded Reload
fmul d2, d3, d2
fadd d1, d1, d2
fadd d1, d1, d4
fadd d22, d6, d0
fmul d0, d0, d7
fadd d23, d1, d0
mov x8, #2356
movk x8, #12413, lsl #16
movk x8, #55910, lsl #32
movk x8, #49095, lsl #48
fmov d13, x8
ldr q0, [sp, #6672] ; 16-byte Folded Reload
fmul d6, d0, d13
ldr d25, [sp, #11584] ; 8-byte Folded Reload
fmul d0, d6, d25
ldur d27, [x29, #-224] ; 8-byte Folded Reload
fsub d0, d27, d0
mov x8, #5915
movk x8, #64709, lsl #16
movk x8, #30489, lsl #32
movk x8, #16392, lsl #48
fmov d1, x8
fmul d0, d0, d1
mov x8, #5915
movk x8, #64709, lsl #16
movk x8, #30489, lsl #32
movk x8, #49160, lsl #48
ldr q2, [sp, #6688] ; 16-byte Folded Reload
fmul d7, d2, d13
ldr d24, [sp, #12056] ; 8-byte Folded Reload
fmul d2, d7, d24
str d2, [sp, #8392] ; 8-byte Folded Spill
fmul d2, d24, d2
fmov d3, x8
fmul d2, d2, d3
fadd d0, d0, d2
mov x8, #61302
movk x8, #27691, lsl #16
movk x8, #64897, lsl #32
movk x8, #16445, lsl #48
fmov d2, x8
fadd d0, d0, d2
mov x8, #6432
movk x8, #24166, lsl #16
movk x8, #7623, lsl #32
movk x8, #16309, lsl #48
fmov d2, x8
ldr d4, [sp, #12168] ; 8-byte Folded Reload
fmul d4, d4, d2
mov x8, #30506
movk x8, #37777, lsl #16
movk x8, #58002, lsl #32
movk x8, #16361, lsl #48
fmov d5, x8
fadd d4, d4, d5
ldr d5, [sp, #12304] ; 8-byte Folded Reload
fmul d5, d5, d1
ldur d16, [x29, #-240] ; 8-byte Folded Reload
fmul d17, d16, d1
ldr d1, [sp, #12312] ; 8-byte Folded Reload
fmul d1, d1, d2
ldr d16, [sp, #12328] ; 8-byte Folded Reload
fmul d2, d16, d2
ldr d16, [sp, #9160] ; 8-byte Folded Reload
fadd d26, d4, d16
ldr d4, [sp, #9480] ; 8-byte Folded Reload
fsub d16, d4, d1
ldr d1, [sp, #9472] ; 8-byte Folded Reload
fadd d4, d2, d1
fmul d1, d16, d24
ldr d21, [sp, #6536] ; 8-byte Folded Reload
fsub d1, d21, d1
str d1, [sp, #4400] ; 8-byte Folded Spill
fmul d1, d24, d1
fadd d1, d26, d1
fmul d2, d4, d25
fadd d1, d2, d1
mov x8, #43115
movk x8, #62349, lsl #16
movk x8, #30721, lsl #32
movk x8, #49115, lsl #48
fmov d2, x8
fmul d1, d1, d2
ldr q19, [sp, #12000] ; 16-byte Folded Reload
fmul d2, d19, d23
fadd d1, d2, d1
ldr q18, [sp, #11840] ; 16-byte Folded Reload
fmul d2, d18, d22
fadd d10, d2, d1
fmul d1, d5, d24
str d1, [sp, #4384] ; 8-byte Folded Spill
fmul d1, d24, d1
fmul d2, d17, d25
fadd d1, d1, d2
str d26, [sp, #9088] ; 8-byte Folded Spill
fadd d20, d0, d26
str d5, [sp, #8552] ; 8-byte Folded Spill
str d16, [sp, #8536] ; 8-byte Folded Spill
fsub d0, d16, d5
str d7, [sp, #10696] ; 8-byte Folded Spill
fmul d2, d7, d3
str d2, [sp, #8560] ; 8-byte Folded Spill
fadd d0, d2, d0
str d0, [sp, #9464] ; 8-byte Folded Spill
ldr d2, [sp, #12208] ; 8-byte Folded Reload
fmul d0, d0, d2
fmov d5, d21
fsub d0, d21, d0
str d0, [sp, #4392] ; 8-byte Folded Spill
fmul d0, d2, d0
str d20, [sp, #8192] ; 8-byte Folded Spill
fadd d12, d20, d0
fmul d5, d27, d3
str d6, [sp, #10704] ; 8-byte Folded Spill
fmul d2, d6, d3
str d4, [sp, #8544] ; 8-byte Folded Spill
str d17, [sp, #8528] ; 8-byte Folded Spill
fadd d0, d17, d4
str d2, [sp, #8568] ; 8-byte Folded Spill
fadd d0, d2, d0
mov x8, #31036
movk x8, #52462, lsl #16
movk x8, #23267, lsl #32
movk x8, #16406, lsl #48
str d5, [sp, #6752] ; 8-byte Folded Spill
fsub d8, d5, d1
mov x28, #28530
movk x28, #30490, lsl #16
movk x28, #27495, lsl #32
movk x28, #49093, lsl #48
mov x9, #64990
movk x9, #28266, lsl #16
movk x9, #45172, lsl #32
movk x9, #16414, lsl #48
mov x27, #52090
movk x27, #42545, lsl #16
movk x27, #26349, lsl #32
movk x27, #49113, lsl #48
fmov d4, x8
fmov d2, x9
str q22, [sp, #5728] ; 16-byte Folded Spill
fmul d9, d19, d22
str q23, [sp, #5712] ; 16-byte Folded Spill
fmul d16, d18, d23
str d0, [sp, #9456] ; 8-byte Folded Spill
ldr d1, [sp, #11784] ; 8-byte Folded Reload
fmul d15, d0, d1
ldr d0, [sp, #12040] ; 8-byte Folded Reload
str d2, [sp, #9216] ; 8-byte Folded Spill
fmul d0, d0, d2
str d0, [sp, #11064] ; 8-byte Folded Spill
movi d11, #0000000000000000
movi d0, #0000000000000000
str q0, [sp, #11216] ; 16-byte Folded Spill
movi d0, #0000000000000000
cbz x20, LBB19_23
; %bb.22:
ldr d11, [x20, #48]
fmov d0, d11
fmov d14, d4
stur d16, [x29, #-192] ; 8-byte Folded Spill
bl _sin
str q0, [sp, #11216] ; 16-byte Folded Spill
ldr d0, [x20, #80]
stur d0, [x29, #-160] ; 8-byte Folded Spill
bl _cos
ldur d16, [x29, #-192] ; 8-byte Folded Reload
fmov d4, d14
str q0, [sp, #11952] ; 16-byte Folded Spill
ldr d0, [x20, #112]
LBB19_23:
str x26, [sp, #12328] ; 8-byte Folded Spill
str x25, [sp, #9208] ; 8-byte Folded Spill
stur x24, [x29, #-240] ; 8-byte Folded Spill
str x23, [sp, #12312] ; 8-byte Folded Spill
fsub d1, d9, d16
str q1, [sp, #6576] ; 16-byte Folded Spill
fadd d1, d15, d12
str d1, [sp, #11520] ; 8-byte Folded Spill
fmov d15, x21
fmov d14, x22
fsub d1, d4, d10
stur d1, [x29, #-192] ; 8-byte Folded Spill
str x28, [sp, #11528] ; 8-byte Folded Spill
mov x22, #64990
movk x22, #28266, lsl #16
movk x22, #45172, lsl #32
movk x22, #49182, lsl #48
fmul d12, d8, d13
ldr d1, [sp, #11416] ; 8-byte Folded Reload
ldr d2, [sp, #9216] ; 8-byte Folded Reload
fmul d1, d1, d2
str d1, [sp, #11056] ; 8-byte Folded Spill
stur x27, [x29, #-224] ; 8-byte Folded Spill
ldr d1, [sp, #11064] ; 8-byte Folded Reload
ldr d13, [sp, #12208] ; 8-byte Folded Reload
fmul d1, d1, d13
str d1, [sp, #6096] ; 8-byte Folded Spill
bl ___sincos_stret
str q0, [sp, #11808] ; 16-byte Folded Spill
str q1, [sp, #11984] ; 16-byte Folded Spill
fmov d0, d11
bl _cos
; kill: def $d0 killed $d0 def $q0
ldr q3, [sp, #10016] ; 16-byte Folded Reload
fmul d4, d3, d0
ldr q2, [sp, #10032] ; 16-byte Folded Reload
ldr q5, [sp, #11216] ; 16-byte Folded Reload
fmul d1, d2, d5
fsub d4, d4, d1
str q4, [sp, #9536] ; 16-byte Folded Spill
ldr q1, [sp, #11952] ; 16-byte Folded Reload
fmul d8, d1, d4
fmul d3, d3, d5
str q0, [sp, #10720] ; 16-byte Folded Spill
fmul d1, d2, d0
fadd d0, d3, d1
str q0, [sp, #9520] ; 16-byte Folded Spill
ldur d0, [x29, #-160] ; 8-byte Folded Reload
bl _sin
; kill: def $d0 killed $d0 def $q0
ldr q2, [sp, #9520] ; 16-byte Folded Reload
fmul d1, d0, d2
fsub d3, d8, d1
ldr q6, [sp, #11984] ; 16-byte Folded Reload
fmul d4, d6, d3
str q0, [sp, #11792] ; 16-byte Folded Spill
ldr q1, [sp, #9536] ; 16-byte Folded Reload
fmul d1, d0, d1
ldr q0, [sp, #11952] ; 16-byte Folded Reload
fmul d2, d0, d2
fadd d2, d1, d2
ldr q5, [sp, #11808] ; 16-byte Folded Reload
fmul d1, d5, d2
fsub d8, d4, d1
mov x8, #4359484439294640128
fmov d0, x8
fmul d0, d8, d0
str q3, [sp, #6624] ; 16-byte Folded Spill
fmul d1, d5, d3
str q2, [sp, #6608] ; 16-byte Folded Spill
fmul d2, d6, d2
fadd d9, d1, d2
mov x8, #4354980839667269632
fmov d1, x8
fmul d1, d9, d1
fadd d0, d0, d1
stur d0, [x29, #-160] ; 8-byte Folded Spill
mov x8, #4363988038922010624
fmov d0, x8
fmul d10, d9, d0
fsub d0, d8, d10
str d0, [sp, #11680] ; 8-byte Folded Spill
cbz x20, LBB19_25
; %bb.24:
ldr d0, [x20, #144]
b LBB19_26
LBB19_25:
movi d0, #0000000000000000
LBB19_26:
ldr d3, [sp, #11784] ; 8-byte Folded Reload
ldr q1, [sp, #10032] ; 16-byte Folded Reload
ldr d2, [sp, #12328] ; 8-byte Folded Reload
fmul d2, d1, d2
str d2, [sp, #12328] ; 8-byte Folded Spill
ldr q2, [sp, #10016] ; 16-byte Folded Reload
ldr d4, [sp, #9208] ; 8-byte Folded Reload
fmul d4, d2, d4
str d4, [sp, #12256] ; 8-byte Folded Spill
ldur d4, [x29, #-240] ; 8-byte Folded Reload
fmul d4, d2, d4
str d4, [sp, #12288] ; 8-byte Folded Spill
ldr d4, [sp, #12312] ; 8-byte Folded Reload
fmul d4, d1, d4
str d4, [sp, #12232] ; 8-byte Folded Spill
fmul d4, d2, d15
str d4, [sp, #12144] ; 8-byte Folded Spill
fmul d4, d1, d14
str d4, [sp, #12088] ; 8-byte Folded Spill
fmul d11, d1, d15
str d14, [sp, #6424] ; 8-byte Folded Spill
fmul d14, d2, d14
mov x21, #47887
movk x21, #56309, lsl #16
movk x21, #15746, lsl #32
movk x21, #16444, lsl #48
mov x23, #47887
movk x23, #56309, lsl #16
movk x23, #15746, lsl #32
movk x23, #49212, lsl #48
ldr q1, [sp, #9600] ; 16-byte Folded Reload
ldr d2, [sp, #11528] ; 8-byte Folded Reload
fmul d1, d1, d2
str d1, [sp, #9448] ; 8-byte Folded Spill
str x22, [sp, #9312] ; 8-byte Folded Spill
mov x22, #11201
movk x22, #50599, lsl #16
movk x22, #31589, lsl #32
movk x22, #49010, lsl #48
ldur d1, [x29, #-192] ; 8-byte Folded Reload
fadd d1, d1, d12
str q1, [sp, #6544] ; 16-byte Folded Spill
ldr q1, [sp, #11824] ; 16-byte Folded Reload
ldr q2, [sp, #6576] ; 16-byte Folded Reload
fmul d12, d1, d2
ldr d1, [sp, #11520] ; 8-byte Folded Reload
ldur d2, [x29, #-224] ; 8-byte Folded Reload
fmul d15, d1, d2
ldr d1, [sp, #6096] ; 8-byte Folded Reload
fmul d1, d13, d1
str d1, [sp, #12096] ; 8-byte Folded Spill
ldr d1, [sp, #11056] ; 8-byte Folded Reload
fmul d13, d1, d3
bl ___sincos_stret
fmov d17, d0
fmov d18, d1
ldr d0, [sp, #11680] ; 8-byte Folded Reload
fmul d0, d0, d1
mov x8, #4363988038922010624
fmov d1, x8
fmul d1, d8, d1
fsub d2, d1, d9
str d2, [sp, #11488] ; 8-byte Folded Spill
fmul d2, d2, d17
fadd d2, d0, d2
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16297, lsl #48
fmov d0, x8
str d2, [sp, #12296] ; 8-byte Folded Spill
fmul d3, d2, d0
ldur d2, [x29, #-160] ; 8-byte Folded Reload
str d3, [sp, #11472] ; 8-byte Folded Spill
fmul d2, d2, d3
mov x8, #4359484439294640128
fmov d3, x8
fmul d4, d9, d3
mov x8, #4354980839667269632
fmov d5, x8
fmul d6, d8, d5
fsub d16, d4, d6
mov x8, #11201
movk x8, #50599, lsl #16
movk x8, #31589, lsl #32
movk x8, #49010, lsl #48
fmov d4, x8
ldr q6, [sp, #9520] ; 16-byte Folded Reload
fmul d4, d6, d4
mov x8, #52090
movk x8, #42545, lsl #16
movk x8, #26349, lsl #32
movk x8, #49113, lsl #48
fmov d6, x8
ldr q7, [sp, #9536] ; 16-byte Folded Reload
fmul d6, d7, d6
fadd d4, d4, d6
str d4, [sp, #12080] ; 8-byte Folded Spill
ldur d6, [x29, #-232] ; 8-byte Folded Reload
fadd d4, d6, d4
mov x8, #43115
movk x8, #62349, lsl #16
movk x8, #30721, lsl #32
movk x8, #49115, lsl #48
fmov d6, x8
ldr q7, [sp, #6624] ; 16-byte Folded Reload
fmul d6, d7, d6
str d6, [sp, #12112] ; 8-byte Folded Spill
fadd d4, d4, d6
mov x8, #62612
movk x8, #18904, lsl #16
movk x8, #1144, lsl #32
movk x8, #49064, lsl #48
fmov d6, x8
fmul d6, d9, d6
mov x8, #47272
movk x8, #56762, lsl #16
movk x8, #43178, lsl #32
movk x8, #49060, lsl #48
fmov d7, x8
fmul d7, d8, d7
fadd d6, d6, d7
stur d6, [x29, #-224] ; 8-byte Folded Spill
fadd d19, d4, d6
fadd d6, d9, d1
mov x8, #36544
movk x8, #43611, lsl #16
movk x8, #860, lsl #32
movk x8, #16326, lsl #48
fmov d1, x8
fmul d1, d6, d1
fadd d7, d8, d10
mov x8, #18456
movk x8, #63321, lsl #16
movk x8, #33926, lsl #32
movk x8, #48991, lsl #48
fmov d4, x8
fmul d4, d7, d4
fadd d1, d1, d4
mov x8, #63706
movk x8, #13221, lsl #16
movk x8, #1281, lsl #32
movk x8, #48977, lsl #48
fmov d4, x8
fmul d4, d16, d4
fadd d1, d1, d4
str d1, [sp, #12264] ; 8-byte Folded Spill
str d19, [sp, #11288] ; 8-byte Folded Spill
fadd d19, d19, d1
str d6, [sp, #11632] ; 8-byte Folded Spill
fmul d1, d6, d18
str d7, [sp, #11688] ; 8-byte Folded Spill
fmul d4, d7, d17
fadd d1, d1, d4
str d1, [sp, #11776] ; 8-byte Folded Spill
fmul d1, d1, d0
fadd d1, d19, d1
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #49049, lsl #48
fmov d4, x8
fadd d4, d1, d4
fmov d1, #-0.50000000
fmul d1, d4, d1
str d4, [sp, #10176] ; 8-byte Folded Spill
fadd d1, d4, d1
str d19, [sp, #12168] ; 8-byte Folded Spill
fsub d1, d1, d19
stur d16, [x29, #-192] ; 8-byte Folded Spill
str d1, [sp, #11360] ; 8-byte Folded Spill
fmul d1, d16, d1
fadd d1, d2, d1
str d18, [sp, #12304] ; 8-byte Folded Spill
fmul d2, d18, d3
str d17, [sp, #12312] ; 8-byte Folded Spill
fmul d3, d17, d5
fsub d2, d2, d3
str d2, [sp, #12152] ; 8-byte Folded Spill
fmul d6, d2, d0
fadd d0, d6, d1
str d0, [sp, #11728] ; 8-byte Folded Spill
cbz x20, LBB19_28
; %bb.27:
ldr d0, [x20, #56]
ldur d1, [x29, #-256] ; 8-byte Folded Reload
fadd d16, d1, d0
ldr d0, [x20, #88]
fadd d17, d16, d0
ldr d0, [x20, #120]
fadd d1, d17, d0
ldr d3, [x20, #152]
b LBB19_29
LBB19_28:
movi d3, #0000000000000000
ldur d0, [x29, #-256] ; 8-byte Folded Reload
fadd d1, d0, d3
fmov d16, d1
fmov d17, d1
LBB19_29:
ldr d18, [sp, #9320] ; 8-byte Folded Reload
ldr d0, [sp, #12328] ; 8-byte Folded Reload
ldr d2, [sp, #12256] ; 8-byte Folded Reload
fadd d0, d2, d0
str d0, [sp, #8656] ; 8-byte Folded Spill
ldr d0, [sp, #12288] ; 8-byte Folded Reload
ldr d2, [sp, #12232] ; 8-byte Folded Reload
fadd d0, d0, d2
str d0, [sp, #8760] ; 8-byte Folded Spill
ldr d0, [sp, #12144] ; 8-byte Folded Reload
ldr d2, [sp, #12088] ; 8-byte Folded Reload
fsub d0, d0, d2
str d0, [sp, #8752] ; 8-byte Folded Spill
fadd d0, d14, d11
str d0, [sp, #8768] ; 8-byte Folded Spill
str x21, [sp, #9200] ; 8-byte Folded Spill
str x23, [sp, #9192] ; 8-byte Folded Spill
fadd d0, d12, d15
str d0, [sp, #11504] ; 8-byte Folded Spill
mov x21, #42186
movk x21, #52566, lsl #16
movk x21, #11879, lsl #32
movk x21, #16425, lsl #48
ldr d0, [sp, #12096] ; 8-byte Folded Reload
fadd d0, d0, d13
str d0, [sp, #11424] ; 8-byte Folded Spill
fadd d24, d1, d3
str d24, [sp, #12232] ; 8-byte Folded Spill
mov x8, #11201
movk x8, #50599, lsl #16
movk x8, #31589, lsl #32
movk x8, #49010, lsl #48
fmov d0, x8
fmov d15, d1
ldr q1, [sp, #9536] ; 16-byte Folded Reload
fmul d0, d1, d0
mov x8, #52090
movk x8, #42545, lsl #16
movk x8, #26349, lsl #32
movk x8, #16345, lsl #48
fmov d1, x8
ldr q2, [sp, #9520] ; 16-byte Folded Reload
fmul d1, d2, d1
fadd d0, d0, d1
str d0, [sp, #11408] ; 8-byte Folded Spill
fmul d0, d0, d16
str d0, [sp, #11016] ; 8-byte Folded Spill
ldur d1, [x29, #-248] ; 8-byte Folded Reload
fadd d2, d1, d0
mov x8, #43115
movk x8, #62349, lsl #16
movk x8, #30721, lsl #32
movk x8, #49115, lsl #48
fmov d0, x8
ldr q1, [sp, #6608] ; 16-byte Folded Reload
fmul d0, d1, d0
str d0, [sp, #11304] ; 8-byte Folded Spill
fmul d0, d0, d17
str d2, [sp, #10312] ; 8-byte Folded Spill
fsub d2, d2, d0
mov x8, #62612
movk x8, #18904, lsl #16
movk x8, #1144, lsl #32
movk x8, #49064, lsl #48
fmov d0, x8
fmul d0, d8, d0
mov x8, #47272
movk x8, #56762, lsl #16
movk x8, #43178, lsl #32
movk x8, #16292, lsl #48
fmov d1, x8
fmul d1, d9, d1
fadd d0, d0, d1
str d0, [sp, #12328] ; 8-byte Folded Spill
fmul d0, d0, d15
str d0, [sp, #11008] ; 8-byte Folded Spill
str d2, [sp, #10296] ; 8-byte Folded Spill
fadd d4, d0, d2
mov x8, #36544
movk x8, #43611, lsl #16
movk x8, #860, lsl #32
movk x8, #16326, lsl #48
fmov d0, x8
ldr d2, [sp, #11680] ; 8-byte Folded Reload
fmul d0, d2, d0
mov x8, #18456
movk x8, #63321, lsl #16
movk x8, #33926, lsl #32
movk x8, #48991, lsl #48
fmov d1, x8
fmov d19, d3
stur d3, [x29, #-240] ; 8-byte Folded Spill
ldr d3, [sp, #11488] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d0, d1
mov x8, #63706
movk x8, #13221, lsl #16
movk x8, #1281, lsl #32
movk x8, #48977, lsl #48
fmov d1, x8
ldur d23, [x29, #-160] ; 8-byte Folded Reload
fmul d1, d23, d1
fadd d0, d0, d1
str d0, [sp, #12256] ; 8-byte Folded Spill
fmul d0, d0, d15
str d0, [sp, #11000] ; 8-byte Folded Spill
str d4, [sp, #11192] ; 8-byte Folded Spill
fadd d22, d0, d4
str d22, [sp, #11744] ; 8-byte Folded Spill
ldr d4, [sp, #12296] ; 8-byte Folded Reload
ldr d1, [sp, #11472] ; 8-byte Folded Reload
fmul d0, d4, d1
fmov d20, d1
ldr d7, [sp, #11360] ; 8-byte Folded Reload
ldr d1, [sp, #11776] ; 8-byte Folded Reload
fmul d1, d1, d7
fadd d0, d0, d1
ldr d5, [sp, #12152] ; 8-byte Folded Reload
fmul d1, d5, d6
fmov d21, d5
fadd d14, d1, d0
fmul d0, d4, d14
ldr d5, [sp, #12304] ; 8-byte Folded Reload
fmul d1, d3, d5
ldr d4, [sp, #12312] ; 8-byte Folded Reload
fmul d2, d2, d4
fsub d2, d1, d2
fmul d1, d2, d20
fmov d20, d2
stur d2, [x29, #-232] ; 8-byte Folded Spill
ldr d2, [sp, #11688] ; 8-byte Folded Reload
fmul d2, d2, d5
ldr d3, [sp, #11632] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d2, d2, d3
str d2, [sp, #11768] ; 8-byte Folded Spill
fmul d2, d2, d7
fadd d1, d1, d2
mov x8, #-4868391197187506176
fmov d2, x8
fmul d2, d5, d2
mov x8, #-4863887597560135680
fmov d3, x8
fmul d3, d4, d3
fadd d3, d2, d3
str d3, [sp, #12144] ; 8-byte Folded Spill
str d6, [sp, #1888] ; 8-byte Folded Spill
fmul d2, d3, d6
fadd d12, d2, d1
fmul d1, d20, d12
fadd d0, d0, d1
ldr d4, [sp, #11728] ; 8-byte Folded Reload
fmul d1, d23, d4
fadd d0, d1, d0
str d0, [sp, #12288] ; 8-byte Folded Spill
fmul d0, d0, d24
fmul d5, d23, d19
stur d5, [x29, #-248] ; 8-byte Folded Spill
fmul d1, d21, d14
fmul d2, d3, d12
fadd d1, d1, d2
fadd d1, d4, d1
str d1, [sp, #11720] ; 8-byte Folded Spill
fmul d1, d1, d5
fsub d0, d0, d1
fadd d10, d22, d0
mov x8, #6148914691236517205
movk x8, #16341, lsl #48
fmov d0, x8
str d0, [sp, #11512] ; 8-byte Folded Spill
fsub d0, d0, d10
mov x8, #4632233691727265792
fmov d1, x8
str d1, [sp, #11928] ; 8-byte Folded Spill
fmul d0, d0, d1
ldr q1, [sp, #9584] ; 16-byte Folded Reload
ldr d2, [sp, #11528] ; 8-byte Folded Reload
fmul d1, d1, d2
str d1, [sp, #9440] ; 8-byte Folded Spill
ldr d1, [sp, #11064] ; 8-byte Folded Reload
ldr d2, [sp, #9464] ; 8-byte Folded Reload
fsub d1, d2, d1
str d1, [sp, #11096] ; 8-byte Folded Spill
ldr d1, [sp, #9448] ; 8-byte Folded Reload
ldr d3, [sp, #9312] ; 8-byte Folded Reload
fmul d1, d1, d3
str d1, [sp, #8520] ; 8-byte Folded Spill
str x22, [sp, #11184] ; 8-byte Folded Spill
ldr q1, [sp, #12240] ; 16-byte Folded Reload
ldr q2, [sp, #6544] ; 16-byte Folded Reload
fmul d1, d1, d2
str d1, [sp, #11312] ; 8-byte Folded Spill
fmul d1, d18, d3
str d1, [sp, #9240] ; 8-byte Folded Spill
fmov d13, d16
str d16, [sp, #12072] ; 8-byte Folded Spill
fmov d11, d17
str d17, [sp, #12048] ; 8-byte Folded Spill
bl _tanh
str d0, [sp, #12088] ; 8-byte Folded Spill
fmul d1, d0, d0
fmov d0, #1.00000000
fsub d0, d0, d1
str d0, [sp, #10816] ; 8-byte Folded Spill
ldr d9, [sp, #10176] ; 8-byte Folded Reload
fmul d0, d9, d9
mov x8, #26865
movk x8, #35043, lsl #16
movk x8, #63669, lsl #32
movk x8, #16100, lsl #48
fmov d1, x8
str d1, [sp, #11920] ; 8-byte Folded Spill
fadd d0, d0, d1
fsqrt d0, d0
fmov d1, #1.50000000
str d0, [sp, #9112] ; 8-byte Folded Spill
bl _pow
mov x8, #10523
movk x8, #38535, lsl #16
movk x8, #12921, lsl #32
movk x8, #16642, lsl #48
fmov d1, x8
fmul d8, d0, d1
fmov d0, #3.00000000
fmul d0, d10, d0
fmov d1, #1.00000000
fsub d0, d1, d0
str d0, [sp, #10856] ; 8-byte Folded Spill
fmul d10, d8, d0
mov x8, #211106232532992
movk x8, #49266, lsl #48
fmov d0, x8
str d0, [sp, #11168] ; 8-byte Folded Spill
fmul d0, d9, d0
bl _tanh
fmov d25, #0.50000000
str d0, [sp, #9104] ; 8-byte Folded Spill
fmul d0, d0, d25
fadd d17, d0, d25
ldr d0, [sp, #11416] ; 8-byte Folded Reload
ldr d1, [sp, #11408] ; 8-byte Folded Reload
fadd d0, d0, d1
str d0, [sp, #10688] ; 8-byte Folded Spill
ldr d1, [sp, #11304] ; 8-byte Folded Reload
fsub d0, d0, d1
str d0, [sp, #10680] ; 8-byte Folded Spill
ldr d1, [sp, #12328] ; 8-byte Folded Reload
fadd d0, d0, d1
str d0, [sp, #11048] ; 8-byte Folded Spill
ldr d1, [sp, #12256] ; 8-byte Folded Reload
fadd d20, d0, d1
str d20, [sp, #10752] ; 8-byte Folded Spill
ldr d4, [sp, #12080] ; 8-byte Folded Reload
fmul d0, d4, d13
ldr d1, [sp, #11640] ; 8-byte Folded Reload
fsub d1, d1, d0
ldr d3, [sp, #12112] ; 8-byte Folded Reload
fmul d0, d3, d11
str d1, [sp, #10872] ; 8-byte Folded Spill
fsub d1, d1, d0
ldur d5, [x29, #-224] ; 8-byte Folded Reload
fmul d0, d5, d15
str d1, [sp, #10992] ; 8-byte Folded Spill
fsub d1, d1, d0
ldr d6, [sp, #12264] ; 8-byte Folded Reload
str d15, [sp, #11624] ; 8-byte Folded Spill
fmul d0, d6, d15
str d1, [sp, #11320] ; 8-byte Folded Spill
fsub d16, d1, d0
str d16, [sp, #11432] ; 8-byte Folded Spill
ldur d23, [x29, #-192] ; 8-byte Folded Reload
ldur d0, [x29, #-240] ; 8-byte Folded Reload
fmul d7, d23, d0
ldr d0, [sp, #11720] ; 8-byte Folded Reload
fmul d0, d0, d7
str d14, [sp, #12096] ; 8-byte Folded Spill
ldr d21, [sp, #11776] ; 8-byte Folded Reload
fmul d1, d21, d14
str d12, [sp, #11880] ; 8-byte Folded Spill
ldr d22, [sp, #11768] ; 8-byte Folded Reload
fmul d2, d22, d12
fadd d1, d1, d2
ldr d28, [sp, #11728] ; 8-byte Folded Reload
fmul d2, d23, d28
fadd d2, d2, d1
ldr d27, [sp, #12232] ; 8-byte Folded Reload
fmul d1, d2, d27
fsub d0, d0, d1
fadd d18, d16, d0
fmul d0, d18, d18
ldur d30, [x29, #-248] ; 8-byte Folded Reload
fmul d1, d2, d30
fmov d24, d2
str d2, [sp, #11912] ; 8-byte Folded Spill
ldr d26, [sp, #12288] ; 8-byte Folded Reload
fmul d2, d26, d7
fmov d31, d7
fsub d16, d1, d2
fmul d1, d16, d16
fadd d0, d1, d0
ldr d13, [sp, #11920] ; 8-byte Folded Reload
fadd d0, d0, d13
fsqrt d7, d0
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16329, lsl #48
fmov d0, x8
str d0, [sp, #11072] ; 8-byte Folded Spill
fdiv d0, d7, d0
fmov d1, #1.00000000
fminnm d1, d0, d1
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16361, lsl #48
fmov d2, x8
fmul d1, d1, d2
fmov d29, d2
str d2, [sp, #10648] ; 8-byte Folded Spill
fmul d2, d7, d25
fadd d19, d2, d1
ldr d1, [sp, #12040] ; 8-byte Folded Reload
fadd d1, d1, d4
str d1, [sp, #9072] ; 8-byte Folded Spill
fadd d1, d1, d3
str d1, [sp, #9096] ; 8-byte Folded Spill
fadd d1, d1, d5
str d1, [sp, #10096] ; 8-byte Folded Spill
fadd d1, d1, d6
str d1, [sp, #10760] ; 8-byte Folded Spill
fadd d1, d1, d24
fdiv d2, d1, d7
fmul d3, d18, d2
fmul d4, d3, d19
fadd d4, d20, d4
fadd d4, d26, d4
str d10, [sp, #10864] ; 8-byte Folded Spill
fmul d20, d17, d10
fmul d5, d20, d4
fmul d5, d5, d25
ldr d6, [sp, #10816] ; 8-byte Folded Reload
fmul d5, d6, d5
ldr d24, [sp, #11928] ; 8-byte Folded Reload
fmul d5, d5, d24
ldr d6, [sp, #12088] ; 8-byte Folded Reload
fmul d6, d6, d25
fadd d6, d6, d25
fmul d4, d6, d4
str d17, [sp, #10280] ; 8-byte Folded Spill
str d4, [sp, #8864] ; 8-byte Folded Spill
fmul d4, d17, d4
str d8, [sp, #10288] ; 8-byte Folded Spill
str d4, [sp, #8848] ; 8-byte Folded Spill
fmul d4, d8, d4
fmov d17, #3.00000000
fmul d4, d4, d17
fadd d26, d5, d4
str d6, [sp, #10808] ; 8-byte Folded Spill
str d20, [sp, #10264] ; 8-byte Folded Spill
fmul d17, d6, d20
str d19, [sp, #9760] ; 8-byte Folded Spill
fmul d6, d17, d19
fmul d4, d18, d6
fdiv d5, d4, d7
fdiv d4, d5, d7
str d4, [sp, #10536] ; 8-byte Folded Spill
fmul d1, d1, d4
fmul d3, d17, d3
fmul d4, d3, d25
fsub d1, d1, d4
fmov d4, #1.00000000
fcmp d0, d4
fmul d0, d3, d29
movi d3, #0000000000000000
fcsel d3, d4, d3, ls
str d3, [sp, #9776] ; 8-byte Folded Spill
fmul d0, d0, d3
fmov d3, #5.00000000
fmul d0, d0, d3
fsub d0, d1, d0
str d7, [sp, #9808] ; 8-byte Folded Spill
fadd d1, d7, d7
str d1, [sp, #10520] ; 8-byte Folded Spill
fdiv d0, d0, d1
str d26, [sp, #8384] ; 8-byte Folded Spill
fmul d1, d27, d26
str d16, [sp, #3888] ; 8-byte Folded Spill
fadd d3, d16, d16
str d3, [sp, #10528] ; 8-byte Folded Spill
fmul d16, d3, d0
str d16, [sp, #9872] ; 8-byte Folded Spill
str d31, [sp, #11968] ; 8-byte Folded Spill
fmul d3, d31, d16
fsub d4, d1, d3
str d18, [sp, #9768] ; 8-byte Folded Spill
fadd d1, d18, d18
str d1, [sp, #10512] ; 8-byte Folded Spill
fmul d0, d1, d0
str d6, [sp, #9792] ; 8-byte Folded Spill
fmul d1, d2, d6
fsub d2, d0, d1
fmul d0, d30, d16
fmul d1, d27, d2
fsub d3, d0, d1
str d2, [sp, #6088] ; 8-byte Folded Spill
fmul d0, d31, d2
fmul d1, d30, d26
fsub d1, d0, d1
str d3, [sp, #9864] ; 8-byte Folded Spill
fmul d0, d23, d3
str d5, [sp, #9152] ; 8-byte Folded Spill
fmul d2, d23, d5
str d2, [sp, #8976] ; 8-byte Folded Spill
fsub d0, d0, d2
str d1, [sp, #5200] ; 8-byte Folded Spill
fadd d0, d1, d0
ldur d3, [x29, #-160] ; 8-byte Folded Reload
fmul d1, d3, d4
fadd d0, d1, d0
str d4, [sp, #9232] ; 8-byte Folded Spill
fmul d1, d28, d4
fmul d2, d28, d17
str d2, [sp, #7880] ; 8-byte Folded Spill
fsub d1, d1, d2
str d17, [sp, #10824] ; 8-byte Folded Spill
fmul d2, d3, d17
str d2, [sp, #8968] ; 8-byte Folded Spill
fsub d0, d0, d2
str d0, [sp, #6592] ; 8-byte Folded Spill
ldr d2, [sp, #11472] ; 8-byte Folded Reload
fmul d0, d2, d0
fadd d0, d1, d0
str d0, [sp, #10656] ; 8-byte Folded Spill
mov x8, #7864
movk x8, #60293, lsl #16
movk x8, #47185, lsl #32
movk x8, #49054, lsl #48
fmov d0, x8
ldr d1, [sp, #12168] ; 8-byte Folded Reload
fadd d9, d1, d0
fmul d0, d9, d25
fsub d0, d9, d0
fsub d2, d0, d1
fmul d12, d2, d21
ldr d0, [sp, #12296] ; 8-byte Folded Reload
fmul d0, d0, d12
fmul d14, d2, d22
ldur d1, [x29, #-232] ; 8-byte Folded Reload
fmul d1, d1, d14
fadd d0, d0, d1
str d2, [sp, #11272] ; 8-byte Folded Spill
fmul d8, d23, d2
fmul d1, d3, d8
fadd d15, d1, d0
ldr d0, [sp, #12152] ; 8-byte Folded Reload
fmul d0, d0, d12
ldr d1, [sp, #12144] ; 8-byte Folded Reload
fmul d1, d1, d14
fadd d0, d0, d1
fadd d1, d8, d0
str d1, [sp, #11696] ; 8-byte Folded Spill
fmul d0, d15, d27
fmul d1, d1, d30
fsub d0, d0, d1
ldr d1, [sp, #11744] ; 8-byte Folded Reload
fadd d11, d1, d0
ldr d0, [sp, #11512] ; 8-byte Folded Reload
fsub d0, d0, d11
fmul d0, d0, d24
bl _tanh
str d0, [sp, #11512] ; 8-byte Folded Spill
fmul d1, d0, d0
fmov d10, #1.00000000
fsub d0, d10, d1
str d0, [sp, #10800] ; 8-byte Folded Spill
fmul d0, d9, d9
fadd d0, d0, d13
fsqrt d0, d0
fmov d1, #1.50000000
str d0, [sp, #8736] ; 8-byte Folded Spill
bl _pow
mov x8, #18811
movk x8, #34700, lsl #16
movk x8, #61210, lsl #32
movk x8, #16643, lsl #48
fmov d1, x8
fmul d13, d0, d1
fmov d0, #3.00000000
fmul d0, d11, d0
fsub d0, d10, d0
fmov d10, #1.00000000
str d0, [sp, #10840] ; 8-byte Folded Spill
fmul d11, d13, d0
str d9, [sp, #8888] ; 8-byte Folded Spill
ldr d0, [sp, #11168] ; 8-byte Folded Reload
fmul d0, d9, d0
bl _tanh
movi d31, #0000000000000000
str d0, [sp, #8720] ; 8-byte Folded Spill
fmov d1, #0.50000000
fmul d0, d0, d1
fadd d16, d0, d1
fmov d22, #0.50000000
str d11, [sp, #10848] ; 8-byte Folded Spill
fmul d18, d16, d11
ldr d30, [sp, #11968] ; 8-byte Folded Reload
ldr d11, [sp, #11696] ; 8-byte Folded Reload
fmul d0, d11, d30
str d12, [sp, #12088] ; 8-byte Folded Spill
ldr d28, [sp, #11776] ; 8-byte Folded Reload
fmul d1, d28, d12
str d14, [sp, #11872] ; 8-byte Folded Spill
ldr d12, [sp, #11768] ; 8-byte Folded Reload
fmul d2, d12, d14
fadd d1, d1, d2
ldur d14, [x29, #-192] ; 8-byte Folded Reload
fmul d2, d14, d8
fadd d7, d2, d1
ldr d9, [sp, #12232] ; 8-byte Folded Reload
fmul d1, d7, d9
fsub d0, d0, d1
ldr d1, [sp, #11432] ; 8-byte Folded Reload
fadd d19, d1, d0
fmul d0, d19, d19
ldur d27, [x29, #-248] ; 8-byte Folded Reload
fmul d1, d7, d27
fmul d2, d15, d30
fsub d17, d1, d2
fmul d1, d17, d17
fadd d0, d1, d0
ldr d1, [sp, #11920] ; 8-byte Folded Reload
fadd d0, d0, d1
fsqrt d6, d0
ldr d0, [sp, #11072] ; 8-byte Folded Reload
fdiv d0, d6, d0
fminnm d1, d0, d10
ldr d24, [sp, #10648] ; 8-byte Folded Reload
fmul d1, d1, d24
fmul d2, d6, d22
fadd d20, d2, d1
ldr d23, [sp, #10760] ; 8-byte Folded Reload
fadd d1, d23, d7
fdiv d2, d1, d6
str d2, [sp, #8840] ; 8-byte Folded Spill
fmul d2, d19, d2
fmul d3, d2, d20
ldr d25, [sp, #10752] ; 8-byte Folded Reload
fadd d3, d25, d3
str d15, [sp, #12168] ; 8-byte Folded Spill
fadd d3, d15, d3
fmul d4, d18, d3
fmul d4, d4, d22
ldr d5, [sp, #10800] ; 8-byte Folded Reload
fmul d4, d5, d4
ldr d5, [sp, #11928] ; 8-byte Folded Reload
fmul d4, d4, d5
ldr d5, [sp, #11512] ; 8-byte Folded Reload
fmul d5, d5, d22
fadd d21, d5, d22
fmul d3, d21, d3
str d16, [sp, #10248] ; 8-byte Folded Spill
str d3, [sp, #8688] ; 8-byte Folded Spill
fmul d3, d16, d3
str d13, [sp, #10256] ; 8-byte Folded Spill
str d3, [sp, #8680] ; 8-byte Folded Spill
fmul d3, d13, d3
fmov d5, #3.00000000
fmul d3, d3, d5
fadd d16, d4, d3
fmul d3, d9, d16
str d17, [sp, #3864] ; 8-byte Folded Spill
fadd d5, d17, d17
str d18, [sp, #10240] ; 8-byte Folded Spill
str d21, [sp, #10792] ; 8-byte Folded Spill
fmul d17, d21, d18
str d20, [sp, #9744] ; 8-byte Folded Spill
fmul d4, d17, d20
str d4, [sp, #10832] ; 8-byte Folded Spill
str d19, [sp, #10224] ; 8-byte Folded Spill
fmul d4, d19, d4
fdiv d4, d4, d6
str d4, [sp, #5912] ; 8-byte Folded Spill
fdiv d4, d4, d6
str d4, [sp, #10416] ; 8-byte Folded Spill
fmul d1, d1, d4
fmul d2, d17, d2
fmul d4, d2, d22
fsub d1, d1, d4
fmov d4, #1.00000000
fcmp d0, d4
fcsel d4, d4, d31, ls
fmul d0, d2, d24
str d4, [sp, #9752] ; 8-byte Folded Spill
fmul d0, d0, d4
fmov d2, #5.00000000
fmul d0, d0, d2
fsub d0, d1, d0
str d6, [sp, #10272] ; 8-byte Folded Spill
fadd d1, d6, d6
str d1, [sp, #10408] ; 8-byte Folded Spill
fdiv d0, d0, d1
str d5, [sp, #10472] ; 8-byte Folded Spill
str d0, [sp, #8672] ; 8-byte Folded Spill
fmul d18, d5, d0
fmul d0, d30, d18
fsub d0, d3, d0
str d0, [sp, #9680] ; 8-byte Folded Spill
fmul d0, d8, d0
ldr d1, [sp, #10656] ; 8-byte Folded Reload
fadd d0, d0, d1
str d8, [sp, #11560] ; 8-byte Folded Spill
str d17, [sp, #11072] ; 8-byte Folded Spill
fmul d1, d8, d17
str d1, [sp, #7872] ; 8-byte Folded Spill
fsub d0, d0, d1
mov x8, #51491
movk x8, #54360, lsl #16
movk x8, #13074, lsl #32
movk x8, #16286, lsl #48
fmov d1, x8
fadd d0, d0, d1
mov x8, #56877
movk x8, #10885, lsl #16
movk x8, #2572, lsl #32
movk x8, #16289, lsl #48
fmov d1, x8
ldr d26, [sp, #12152] ; 8-byte Folded Reload
fmul d2, d26, d1
mov x8, #62994
movk x8, #14722, lsl #16
movk x8, #41829, lsl #32
movk x8, #16247, lsl #48
fmov d3, x8
ldr d29, [sp, #12144] ; 8-byte Folded Reload
fmul d4, d29, d3
fadd d2, d2, d4
mov x8, #54125
movk x8, #53060, lsl #16
movk x8, #15481, lsl #32
movk x8, #16273, lsl #48
fmov d19, d28
fmul d4, d28, d1
fmov d24, d12
fmul d5, d12, d3
fadd d4, d4, d5
fmov d5, x8
fmov d20, d14
fmul d6, d14, d5
fadd d6, d6, d4
fadd d17, d2, d5
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #16326, lsl #48
ldr d22, [sp, #12296] ; 8-byte Folded Reload
fmul d1, d22, d1
ldur d28, [x29, #-232] ; 8-byte Folded Reload
fmul d2, d28, d3
fadd d1, d1, d2
fmul d2, d6, d27
ldur d3, [x29, #-160] ; 8-byte Folded Reload
fmul d3, d3, d5
fadd d1, d3, d1
str d1, [sp, #11920] ; 8-byte Folded Spill
fmul d1, d1, d30
fsub d3, d2, d1
fmov d1, x8
fmul d5, d23, d1
fmul d4, d25, d1
fmul d2, d5, d9
str d2, [sp, #6568] ; 8-byte Folded Spill
str d17, [sp, #11512] ; 8-byte Folded Spill
fmul d2, d17, d2
str d3, [sp, #11168] ; 8-byte Folded Spill
fmul d3, d4, d3
fadd d2, d2, d3
str d5, [sp, #9560] ; 8-byte Folded Spill
fmul d3, d5, d30
str d4, [sp, #9568] ; 8-byte Folded Spill
fmul d4, d4, d27
fadd d25, d3, d4
str d6, [sp, #11928] ; 8-byte Folded Spill
fmul d3, d6, d25
fadd d2, d3, d2
ldr d3, [sp, #11912] ; 8-byte Folded Reload
ldr d4, [sp, #9872] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d2, d3, d2
ldr d3, [sp, #11720] ; 8-byte Folded Reload
ldr d4, [sp, #8384] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d2, d2, d3
str d7, [sp, #11744] ; 8-byte Folded Spill
str d18, [sp, #8712] ; 8-byte Folded Spill
fmul d3, d7, d18
fadd d2, d3, d2
str d16, [sp, #8376] ; 8-byte Folded Spill
fmov d3, d11
fmul d3, d11, d16
fsub d2, d2, d3
mov x8, #45033
movk x8, #40035, lsl #16
movk x8, #524, lsl #32
movk x8, #48971, lsl #48
mov x9, #45724
movk x9, #42429, lsl #16
movk x9, #11379, lsl #32
movk x9, #16169, lsl #48
fmov d3, x8
fmov d16, d19
fmul d4, d19, d3
fmov d5, x9
fmul d6, d12, d5
fsub d4, d4, d6
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #48978, lsl #48
fmov d6, x8
fmov d17, d14
fmul d7, d14, d6
fadd d21, d4, d7
mov x8, #40862
movk x8, #31695, lsl #16
movk x8, #12355, lsl #32
movk x8, #16198, lsl #48
fmul d4, d19, d5
fmov d23, d19
fmov d7, x8
fmul d16, d12, d7
fsub d4, d16, d4
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #48937, lsl #48
fmov d16, x8
fmul d17, d14, d16
fadd d20, d4, d17
fmul d4, d19, d21
fmul d17, d12, d20
fadd d4, d4, d17
fmov d19, d26
fmul d3, d26, d3
fmov d26, d29
fmul d17, d29, d5
fsub d3, d3, d17
fadd d18, d3, d6
fmov d6, d19
fmul d3, d19, d5
fmul d5, d29, d7
fsub d3, d5, d3
fadd d19, d3, d16
fmul d3, d6, d18
fmov d17, d6
fmul d5, d29, d19
fadd d3, d3, d5
mov x8, #16684
movk x8, #33360, lsl #16
movk x8, #18212, lsl #32
movk x8, #48931, lsl #48
str d3, [sp, #3808] ; 8-byte Folded Spill
fadd d3, d4, d3
fmov d5, x8
fsub d3, d5, d3
str d21, [sp, #9136] ; 8-byte Folded Spill
fmul d5, d22, d21
str d20, [sp, #9144] ; 8-byte Folded Spill
fmov d21, d28
fmul d6, d28, d20
fadd d5, d5, d6
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #48962, lsl #48
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #16153, lsl #48
fmov d6, x8
fmul d7, d29, d6
fmov d16, x9
fmul d17, d17, d16
fadd d7, d7, d17
fadd d7, d7, d5
mov x8, #3449
movk x8, #18764, lsl #16
movk x8, #45194, lsl #32
movk x8, #16217, lsl #48
fmov d5, x8
fadd d17, d3, d5
fadd d4, d4, d5
fmul d3, d7, d27
str d4, [sp, #8000] ; 8-byte Folded Spill
fmul d4, d4, d30
fadd d3, d3, d4
str d18, [sp, #10760] ; 8-byte Folded Spill
fmul d4, d23, d18
fmov d5, d12
str d19, [sp, #10752] ; 8-byte Folded Spill
fmul d5, d12, d19
fadd d4, d4, d5
fmov d5, d28
fmul d5, d28, d6
fmul d6, d22, d16
ldr d16, [sp, #12072] ; 8-byte Folded Reload
fadd d5, d5, d6
fadd d4, d5, d4
str d4, [sp, #10056] ; 8-byte Folded Spill
fmov d5, d9
fmul d4, d4, d9
fadd d4, d4, d3
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #49094, lsl #48
fmov d3, x8
fmul d3, d30, d3
str d3, [sp, #9368] ; 8-byte Folded Spill
str d17, [sp, #7896] ; 8-byte Folded Spill
fmul d3, d17, d3
str d3, [sp, #7864] ; 8-byte Folded Spill
fadd d2, d3, d2
str d4, [sp, #6104] ; 8-byte Folded Spill
fmul d3, d4, d1
str d3, [sp, #8960] ; 8-byte Folded Spill
fadd d2, d3, d2
fmul d1, d27, d1
str d1, [sp, #9360] ; 8-byte Folded Spill
str d7, [sp, #9128] ; 8-byte Folded Spill
fmul d1, d7, d1
ldr d7, [sp, #12048] ; 8-byte Folded Reload
str d1, [sp, #7856] ; 8-byte Folded Spill
fadd d1, d1, d2
str d1, [sp, #1880] ; 8-byte Folded Spill
ldur d2, [x29, #-240] ; 8-byte Folded Reload
fmul d3, d2, d1
cbz x20, LBB19_31
; %bb.30:
ldr d1, [x20, #200]
ldr d2, [sp, #11248] ; 8-byte Folded Reload
fadd d4, d2, d1
ldr d5, [sp, #11408] ; 8-byte Folded Reload
fmul d1, d5, d4
ldr d2, [sp, #9320] ; 8-byte Folded Reload
fadd d1, d2, d1
ldr d18, [sp, #10872] ; 8-byte Folded Reload
ldr d2, [sp, #11640] ; 8-byte Folded Reload
fsub d2, d18, d2
str d2, [sp, #10656] ; 8-byte Folded Spill
fmul d2, d16, d2
fadd d2, d2, d1
ldr d1, [x20, #216]
fadd d17, d4, d1
ldr d6, [sp, #11304] ; 8-byte Folded Reload
fmul d1, d6, d17
str d2, [sp, #8992] ; 8-byte Folded Spill
fsub d1, d2, d1
ldr d28, [sp, #10992] ; 8-byte Folded Reload
fsub d2, d28, d18
str d2, [sp, #10648] ; 8-byte Folded Spill
fmul d2, d7, d2
fadd d26, d2, d1
ldr d31, [x20, #232]
b LBB19_32
LBB19_31:
ldr d1, [sp, #11248] ; 8-byte Folded Reload
fadd d17, d1, d31
ldr d5, [sp, #11408] ; 8-byte Folded Reload
fmul d1, d17, d5
ldr d2, [sp, #9320] ; 8-byte Folded Reload
fadd d1, d2, d1
ldr d4, [sp, #10872] ; 8-byte Folded Reload
ldr d2, [sp, #11640] ; 8-byte Folded Reload
fsub d2, d4, d2
str d2, [sp, #10656] ; 8-byte Folded Spill
fmul d2, d16, d2
fadd d2, d1, d2
ldr d6, [sp, #11304] ; 8-byte Folded Reload
fmul d1, d17, d6
str d2, [sp, #8992] ; 8-byte Folded Spill
fsub d1, d2, d1
ldr d28, [sp, #10992] ; 8-byte Folded Reload
fsub d2, d28, d4
str d2, [sp, #10648] ; 8-byte Folded Spill
fmul d2, d7, d2
fadd d26, d2, d1
fmov d4, d17
LBB19_32:
ldur d22, [x29, #-256] ; 8-byte Folded Reload
str d4, [sp, #11640] ; 8-byte Folded Spill
ldr d2, [sp, #8656] ; 8-byte Folded Reload
fmul d1, d2, d22
ldr d4, [sp, #8768] ; 8-byte Folded Reload
ldr d5, [sp, #9192] ; 8-byte Folded Reload
fmul d20, d4, d5
ldr d4, [sp, #9200] ; 8-byte Folded Reload
fmul d21, d2, d4
ldr d2, [sp, #8760] ; 8-byte Folded Reload
fmul d18, d2, d4
ldr d2, [sp, #8752] ; 8-byte Folded Reload
fmul d19, d2, d4
ldr d2, [sp, #8520] ; 8-byte Folded Reload
ldr d4, [sp, #11096] ; 8-byte Folded Reload
fadd d2, d2, d4
str d2, [sp, #8648] ; 8-byte Folded Spill
ldr q2, [sp, #12240] ; 16-byte Folded Reload
ldr q4, [sp, #6576] ; 16-byte Folded Reload
fmul d4, d2, d4
ldr d2, [sp, #11520] ; 8-byte Folded Reload
ldr d5, [sp, #11184] ; 8-byte Folded Reload
fmul d5, d2, d5
ldr d2, [sp, #9456] ; 8-byte Folded Reload
ldr d6, [sp, #11056] ; 8-byte Folded Reload
fadd d2, d6, d2
ldr d6, [sp, #9440] ; 8-byte Folded Reload
ldr d7, [sp, #9312] ; 8-byte Folded Reload
fmul d6, d6, d7
str d6, [sp, #8512] ; 8-byte Folded Spill
ldr d6, [sp, #11504] ; 8-byte Folded Reload
ldr d7, [sp, #11312] ; 8-byte Folded Reload
fsub d6, d7, d6
fmov d7, x21
ldr d16, [sp, #9240] ; 8-byte Folded Reload
ldr d23, [sp, #11424] ; 8-byte Folded Reload
fsub d16, d16, d23
fadd d0, d3, d0
str d17, [sp, #11520] ; 8-byte Folded Spill
fadd d24, d17, d31
ldr d3, [sp, #12328] ; 8-byte Folded Reload
fmul d3, d3, d24
str d26, [sp, #8744] ; 8-byte Folded Spill
fadd d3, d26, d3
ldr d26, [sp, #11320] ; 8-byte Folded Reload
fsub d17, d26, d28
str d17, [sp, #10640] ; 8-byte Folded Spill
ldr d23, [sp, #11624] ; 8-byte Folded Reload
fmul d17, d23, d17
fadd d17, d17, d3
ldr d3, [sp, #12256] ; 8-byte Folded Reload
str d24, [sp, #11504] ; 8-byte Folded Spill
fmul d3, d3, d24
str d17, [sp, #10992] ; 8-byte Folded Spill
fadd d3, d3, d17
ldr d17, [sp, #11432] ; 8-byte Folded Reload
fsub d17, d17, d26
str d17, [sp, #10632] ; 8-byte Folded Spill
fmul d17, d23, d17
fadd d3, d17, d3
str d3, [sp, #6600] ; 8-byte Folded Spill
movi d3, #0000000000000000
cbz x20, LBB19_34
; %bb.33:
ldr d3, [x20, #248]
LBB19_34:
fadd d4, d4, d5
str d4, [sp, #8920] ; 8-byte Folded Spill
fadd d4, d6, d7
str d4, [sp, #8896] ; 8-byte Folded Spill
ldr d4, [sp, #11528] ; 8-byte Folded Reload
fmul d4, d16, d4
str d4, [sp, #8728] ; 8-byte Folded Spill
ldur d4, [x29, #-160] ; 8-byte Folded Reload
fmov d6, d3
fmul d3, d4, d3
fmov d8, d4
ldur d5, [x29, #-192] ; 8-byte Folded Reload
ldr d11, [sp, #11624] ; 8-byte Folded Reload
fmul d4, d5, d11
fmov d9, d5
str d4, [sp, #1872] ; 8-byte Folded Spill
ldur d5, [x29, #-240] ; 8-byte Folded Reload
fmul d4, d4, d5
fmov d24, d5
fsub d3, d3, d4
str d3, [sp, #10104] ; 8-byte Folded Spill
ldr d17, [sp, #11512] ; 8-byte Folded Reload
fmul d3, d17, d3
ldr d4, [sp, #11504] ; 8-byte Folded Reload
fadd d23, d4, d6
fmov d28, d6
str d6, [sp, #11424] ; 8-byte Folded Spill
fmov d31, d4
ldr d7, [sp, #11920] ; 8-byte Folded Reload
fmul d4, d7, d23
fsub d3, d3, d4
ldr d16, [sp, #6600] ; 8-byte Folded Reload
fsub d3, d16, d3
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #16326, lsl #48
fmov d26, x8
fmul d3, d3, d26
ldr d5, [sp, #11968] ; 8-byte Folded Reload
fmul d4, d17, d5
fmov d29, d5
ldr d6, [sp, #11928] ; 8-byte Folded Reload
ldr d27, [sp, #12232] ; 8-byte Folded Reload
fmul d5, d6, d27
fsub d4, d4, d5
str d4, [sp, #9984] ; 8-byte Folded Spill
fmul d4, d27, d4
ldr d5, [sp, #11168] ; 8-byte Folded Reload
ldur d30, [x29, #-248] ; 8-byte Folded Reload
fmul d5, d30, d5
fsub d4, d4, d5
fmul d4, d4, d26
fadd d3, d4, d3
mov x8, #11213
movk x8, #64899, lsl #16
movk x8, #2195, lsl #32
movk x8, #49148, lsl #48
fmov d4, x8
ldr d5, [sp, #11072] ; 8-byte Folded Reload
fadd d4, d5, d4
ldr d5, [sp, #10824] ; 8-byte Folded Reload
fadd d4, d5, d4
fsub d30, d3, d4
str d30, [sp, #8856] ; 8-byte Folded Spill
ldr d3, [sp, #8384] ; 8-byte Folded Reload
ldr d4, [sp, #8376] ; 8-byte Folded Reload
fadd d4, d3, d4
str d4, [sp, #9416] ; 8-byte Folded Spill
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #49094, lsl #48
fmov d3, x8
fmul d3, d6, d3
str d3, [sp, #10072] ; 8-byte Folded Spill
ldr d5, [sp, #9560] ; 8-byte Folded Reload
fsub d3, d3, d5
str d3, [sp, #9432] ; 8-byte Folded Spill
fmul d3, d3, d11
fsub d3, d4, d3
str d3, [sp, #4368] ; 8-byte Folded Spill
fmul d3, d11, d3
fadd d3, d3, d30
fmul d4, d7, d26
str d4, [sp, #10064] ; 8-byte Folded Spill
ldr d6, [sp, #9568] ; 8-byte Folded Reload
fadd d4, d6, d4
str d4, [sp, #9424] ; 8-byte Folded Spill
fmul d4, d4, d31
fadd d4, d4, d3
str d4, [sp, #9224] ; 8-byte Folded Spill
mov x8, #63706
movk x8, #13221, lsl #16
movk x8, #1281, lsl #32
movk x8, #48977, lsl #48
fmov d3, x8
fmul d3, d4, d3
fadd d0, d0, d3
ldr d3, [sp, #6568] ; 8-byte Folded Reload
fmul d3, d27, d3
str d25, [sp, #8696] ; 8-byte Folded Spill
fmul d4, d29, d25
fadd d3, d3, d4
str d23, [sp, #11312] ; 8-byte Folded Spill
fmul d4, d6, d23
fadd d3, d3, d4
fmul d4, d16, d26
str d4, [sp, #8952] ; 8-byte Folded Spill
fadd d4, d4, d3
str d4, [sp, #8784] ; 8-byte Folded Spill
mov x8, #54125
movk x8, #53060, lsl #16
movk x8, #15481, lsl #32
movk x8, #16273, lsl #48
fmov d3, x8
fmul d3, d4, d3
fadd d0, d3, d0
ldr d3, [sp, #10056] ; 8-byte Folded Reload
fmul d4, d3, d26
str d26, [sp, #8592] ; 8-byte Folded Spill
fmul d3, d5, d17
str d4, [sp, #8944] ; 8-byte Folded Spill
fsub d3, d4, d3
str d3, [sp, #6392] ; 8-byte Folded Spill
fmul d3, d3, d24
str d3, [sp, #4360] ; 8-byte Folded Spill
fmul d3, d11, d3
fadd d0, d3, d0
ldr d3, [sp, #10760] ; 8-byte Folded Reload
ldr d4, [sp, #12296] ; 8-byte Folded Reload
fmul d3, d4, d3
ldr d4, [sp, #10752] ; 8-byte Folded Reload
ldur d5, [x29, #-232] ; 8-byte Folded Reload
fmul d4, d5, d4
fadd d3, d3, d4
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #48962, lsl #48
fmov d4, x8
str d4, [sp, #8608] ; 8-byte Folded Spill
ldr d5, [sp, #11768] ; 8-byte Folded Reload
fmul d4, d5, d4
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #16153, lsl #48
fmov d5, x8
ldr d7, [sp, #11776] ; 8-byte Folded Reload
fmul d5, d7, d5
fadd d4, d4, d5
fsub d3, d3, d4
str d3, [sp, #9576] ; 8-byte Folded Spill
fmul d4, d3, d26
fmul d3, d6, d17
str d4, [sp, #8928] ; 8-byte Folded Spill
fsub d3, d4, d3
str d3, [sp, #6384] ; 8-byte Folded Spill
fmul d3, d3, d28
fadd d0, d3, d0
str d0, [sp, #10120] ; 8-byte Folded Spill
mov x8, #43516
movk x8, #54001, lsl #16
movk x8, #25165, lsl #32
movk x8, #16240, lsl #48
fmov d5, x8
str d5, [sp, #8704] ; 8-byte Folded Spill
ldr d3, [sp, #11632] ; 8-byte Folded Reload
fmul d0, d3, d5
fmov d7, d3
ldr d4, [sp, #11288] ; 8-byte Folded Reload
fadd d0, d4, d0
mov x8, #20972
movk x8, #7864, lsl #16
movk x8, #60293, lsl #32
movk x8, #49057, lsl #48
fmov d3, x8
fadd d14, d0, d3
fmov d0, #0.50000000
fmul d0, d14, d0
fsub d0, d14, d0
fsub d4, d0, d4
ldr d6, [sp, #11680] ; 8-byte Folded Reload
fmul d5, d6, d5
str d5, [sp, #11096] ; 8-byte Folded Spill
fmov d17, d8
fmul d0, d8, d5
fmul d3, d9, d4
fadd d8, d0, d3
fmul d0, d6, d5
fmul d3, d7, d4
fadd d12, d0, d3
ldr d7, [sp, #11488] ; 8-byte Folded Reload
fmul d0, d7, d5
str d4, [sp, #10872] ; 8-byte Folded Spill
ldr d3, [sp, #11688] ; 8-byte Folded Reload
fmul d3, d3, d4
fadd d3, d0, d3
str d3, [sp, #11528] ; 8-byte Folded Spill
fmul d0, d6, d12
str d12, [sp, #11432] ; 8-byte Folded Spill
fmul d3, d7, d3
fadd d0, d0, d3
fmul d3, d17, d8
fadd d13, d3, d0
fmul d0, d13, d11
ldr d3, [sp, #11192] ; 8-byte Folded Reload
fadd d9, d0, d3
ldr d0, [sp, #11240] ; 8-byte Folded Reload
fsub d0, d0, d1
str d0, [sp, #8624] ; 8-byte Folded Spill
ldr d0, [sp, #8512] ; 8-byte Folded Reload
fadd d0, d0, d2
str d0, [sp, #6840] ; 8-byte Folded Spill
mov x8, #6148914691236517205
movk x8, #16341, lsl #48
fmov d0, x8
fsub d0, d0, d9
mov x8, #4632233691727265792
fmov d1, x8
str d1, [sp, #11288] ; 8-byte Folded Spill
fmul d0, d0, d1
str d20, [sp, #1896] ; 8-byte Folded Spill
str d21, [sp, #6640] ; 8-byte Folded Spill
fsub d1, d20, d21
str d1, [sp, #6824] ; 8-byte Folded Spill
str d18, [sp, #6648] ; 8-byte Folded Spill
str d19, [sp, #1904] ; 8-byte Folded Spill
fadd d1, d18, d19
str d1, [sp, #6832] ; 8-byte Folded Spill
ldr d1, [sp, #8648] ; 8-byte Folded Reload
fmul d1, d22, d1
str d1, [sp, #8664] ; 8-byte Folded Spill
ldr q1, [sp, #11824] ; 16-byte Folded Reload
ldr q2, [sp, #6544] ; 16-byte Folded Reload
fmul d1, d1, d2
str d1, [sp, #8296] ; 8-byte Folded Spill
bl _tanh
str d0, [sp, #9976] ; 8-byte Folded Spill
fmul d2, d0, d0
fmov d1, #1.00000000
fsub d0, d1, d2
str d0, [sp, #10128] ; 8-byte Folded Spill
fmul d0, d14, d14
mov x8, #26865
movk x8, #35043, lsl #16
movk x8, #63669, lsl #32
movk x8, #16100, lsl #48
fmov d15, x8
fadd d0, d0, d15
fsqrt d0, d0
fmov d1, #1.50000000
str d0, [sp, #8360] ; 8-byte Folded Spill
bl _pow
mov x8, #45572
movk x8, #23979, lsl #16
movk x8, #34811, lsl #32
movk x8, #16645, lsl #48
fmov d1, x8
fmul d10, d0, d1
fmov d0, #3.00000000
fmul d0, d9, d0
fmov d1, #1.00000000
fsub d0, d1, d0
str d0, [sp, #10144] ; 8-byte Folded Spill
fmul d9, d10, d0
mov x8, #211106232532992
movk x8, #49266, lsl #48
fmov d0, x8
str d14, [sp, #8368] ; 8-byte Folded Spill
fmul d0, d14, d0
bl _tanh
str d0, [sp, #8352] ; 8-byte Folded Spill
fmov d1, #0.50000000
fmul d0, d0, d1
fadd d16, d0, d1
fmov d28, #0.50000000
ldr d25, [sp, #11632] ; 8-byte Folded Reload
fmul d0, d25, d12
ldr d26, [sp, #11688] ; 8-byte Folded Reload
ldr d1, [sp, #11528] ; 8-byte Folded Reload
fmul d1, d26, d1
fadd d0, d0, d1
ldur d27, [x29, #-192] ; 8-byte Folded Reload
fmul d1, d27, d8
fadd d4, d1, d0
fmul d0, d4, d11
ldr d1, [sp, #11320] ; 8-byte Folded Reload
fsub d17, d1, d0
fmul d0, d17, d17
fadd d0, d0, d15
fsqrt d19, d0
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16329, lsl #48
fmov d0, x8
fdiv d1, d19, d0
fmov d0, #1.00000000
fminnm d0, d1, d0
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16361, lsl #48
fmov d2, x8
fmul d0, d0, d2
fmul d3, d19, d28
fadd d20, d3, d0
str d4, [sp, #11192] ; 8-byte Folded Spill
ldr d24, [sp, #10096] ; 8-byte Folded Reload
fadd d3, d24, d4
fdiv d4, d3, d19
fmul d5, d17, d4
fmul d0, d5, d20
ldr d23, [sp, #11048] ; 8-byte Folded Reload
fadd d0, d23, d0
str d13, [sp, #11184] ; 8-byte Folded Spill
fadd d0, d13, d0
str d9, [sp, #10136] ; 8-byte Folded Spill
fmul d21, d9, d16
fmul d6, d0, d21
fmul d6, d6, d28
ldr d7, [sp, #10128] ; 8-byte Folded Reload
fmul d6, d7, d6
ldr d7, [sp, #11288] ; 8-byte Folded Reload
fmul d6, d6, d7
ldr d7, [sp, #9976] ; 8-byte Folded Reload
fmul d7, d7, d28
fadd d7, d7, d28
fmul d0, d0, d7
str d16, [sp, #9696] ; 8-byte Folded Spill
str d0, [sp, #8312] ; 8-byte Folded Spill
fmul d0, d16, d0
str d10, [sp, #9704] ; 8-byte Folded Spill
str d0, [sp, #8344] ; 8-byte Folded Spill
fmul d0, d10, d0
fmov d16, #3.00000000
fmul d0, d0, d16
fadd d18, d0, d6
fmul d22, d11, d18
fmul d6, d8, d22
ldr d0, [sp, #10120] ; 8-byte Folded Reload
fadd d6, d6, d0
str d21, [sp, #9688] ; 8-byte Folded Spill
str d7, [sp, #10120] ; 8-byte Folded Spill
fmul d10, d7, d21
str d20, [sp, #8800] ; 8-byte Folded Spill
fmul d16, d20, d10
fmul d7, d17, d16
fdiv d15, d7, d19
fdiv d7, d15, d19
str d7, [sp, #9856] ; 8-byte Folded Spill
fmul d3, d3, d7
fmul d5, d5, d10
fmul d7, d5, d28
fsub d3, d3, d7
fmov d7, #1.00000000
fcmp d1, d7
fmul d1, d5, d2
movi d0, #0000000000000000
fcsel d2, d7, d0, ls
str d2, [sp, #8776] ; 8-byte Folded Spill
fmul d1, d1, d2
fmov d2, #-5.00000000
fmul d1, d1, d2
fadd d1, d3, d1
str d19, [sp, #8808] ; 8-byte Folded Spill
fadd d2, d19, d19
str d2, [sp, #9848] ; 8-byte Folded Spill
fdiv d1, d1, d2
str d17, [sp, #8816] ; 8-byte Folded Spill
fadd d2, d17, d17
str d2, [sp, #9840] ; 8-byte Folded Spill
fmul d1, d2, d1
str d16, [sp, #8792] ; 8-byte Folded Spill
fmul d2, d4, d16
fsub d1, d1, d2
str d1, [sp, #6064] ; 8-byte Folded Spill
fmul d13, d11, d1
fmov d19, d27
fmul d1, d27, d13
fmul d2, d27, d15
str d2, [sp, #7320] ; 8-byte Folded Spill
fadd d1, d2, d1
ldur d28, [x29, #-160] ; 8-byte Folded Reload
fmul d2, d28, d22
fsub d1, d2, d1
str d8, [sp, #11320] ; 8-byte Folded Spill
fmul d2, d8, d10
str d2, [sp, #7848] ; 8-byte Folded Spill
fsub d2, d6, d2
fmul d3, d28, d10
str d3, [sp, #7296] ; 8-byte Folded Spill
fsub d1, d1, d3
str d1, [sp, #9328] ; 8-byte Folded Spill
ldr d0, [sp, #11096] ; 8-byte Folded Reload
fmul d1, d0, d1
fadd d4, d2, d1
str d4, [sp, #8472] ; 8-byte Folded Spill
mov x8, #49235
movk x8, #28989, lsl #16
movk x8, #40841, lsl #32
movk x8, #16312, lsl #48
mov x9, #45974
movk x9, #34787, lsl #16
movk x9, #35902, lsl #32
movk x9, #16285, lsl #48
fmov d1, x8
str d1, [sp, #8488] ; 8-byte Folded Spill
ldr d0, [sp, #11680] ; 8-byte Folded Reload
fmul d3, d0, d1
fmov d30, x9
ldr d0, [sp, #11488] ; 8-byte Folded Reload
fmul d5, d0, d30
fadd d16, d3, d5
fmul d3, d25, d1
fmul d5, d26, d30
fadd d17, d3, d5
ldr d0, [sp, #11504] ; 8-byte Folded Reload
fmul d3, d16, d0
ldr d1, [sp, #10992] ; 8-byte Folded Reload
fadd d3, d3, d1
mov x8, #50080
movk x8, #49599, lsl #16
movk x8, #32579, lsl #32
movk x8, #16368, lsl #48
fmov d5, x8
fmul d3, d3, d5
mov x8, #50080
movk x8, #49599, lsl #16
movk x8, #32579, lsl #32
movk x8, #49136, lsl #48
fmul d6, d17, d11
str d6, [sp, #9976] ; 8-byte Folded Spill
fmul d6, d11, d6
fmov d7, x8
fmul d6, d6, d7
fadd d3, d3, d6
mov x8, #42264
movk x8, #33609, lsl #16
movk x8, #14594, lsl #32
movk x8, #49188, lsl #48
fmov d6, x8
fadd d6, d10, d6
fsub d3, d3, d6
ldr d1, [sp, #8856] ; 8-byte Folded Reload
fadd d1, d3, d1
str d1, [sp, #8832] ; 8-byte Folded Spill
str d18, [sp, #4352] ; 8-byte Folded Spill
ldr d2, [sp, #9416] ; 8-byte Folded Reload
fadd d9, d2, d18
fmul d21, d24, d5
str d17, [sp, #11288] ; 8-byte Folded Spill
fmul d6, d17, d7
ldr d2, [sp, #9432] ; 8-byte Folded Reload
fsub d3, d2, d21
str d6, [sp, #9408] ; 8-byte Folded Spill
fadd d2, d6, d3
str d2, [sp, #9400] ; 8-byte Folded Spill
fmul d3, d2, d11
fsub d3, d9, d3
str d3, [sp, #4344] ; 8-byte Folded Spill
fmul d3, d11, d3
fadd d3, d1, d3
fmul d23, d23, d5
ldr d1, [sp, #9424] ; 8-byte Folded Reload
fadd d6, d23, d1
str d16, [sp, #11048] ; 8-byte Folded Spill
fmul d7, d16, d5
str d7, [sp, #9416] ; 8-byte Folded Spill
fadd d1, d7, d6
str d1, [sp, #9392] ; 8-byte Folded Spill
fmul d6, d1, d0
fadd d0, d6, d3
str d0, [sp, #8480] ; 8-byte Folded Spill
mov x8, #4359484439294640128
fmov d6, x8
fmul d6, d4, d6
mov x8, #62612
movk x8, #18904, lsl #16
movk x8, #1144, lsl #32
movk x8, #49064, lsl #48
fmov d7, x8
fmul d7, d0, d7
fadd d0, d6, d7
str d0, [sp, #8464] ; 8-byte Folded Spill
ldr d12, [sp, #9864] ; 8-byte Folded Reload
ldr d0, [sp, #11768] ; 8-byte Folded Reload
fmul d7, d0, d12
ldr d8, [sp, #9152] ; 8-byte Folded Reload
fmul d16, d0, d8
str d16, [sp, #7256] ; 8-byte Folded Spill
fsub d7, d7, d16
ldr d16, [sp, #12144] ; 8-byte Folded Reload
ldr d18, [sp, #5200] ; 8-byte Folded Reload
fmul d16, d16, d18
fadd d7, d16, d7
ldr d0, [sp, #9232] ; 8-byte Folded Reload
ldur d6, [x29, #-232] ; 8-byte Folded Reload
fmul d16, d6, d0
fadd d7, d16, d7
ldr d17, [sp, #11880] ; 8-byte Folded Reload
fmul d16, d17, d0
ldr d3, [sp, #10824] ; 8-byte Folded Reload
fmul d17, d17, d3
str d17, [sp, #7840] ; 8-byte Folded Spill
fsub d16, d16, d17
fmul d17, d6, d3
str d17, [sp, #7248] ; 8-byte Folded Spill
fsub d24, d7, d17
ldr d31, [sp, #11472] ; 8-byte Folded Reload
fmul d7, d31, d24
fadd d7, d16, d7
ldr d17, [sp, #11872] ; 8-byte Folded Reload
ldr d14, [sp, #9680] ; 8-byte Folded Reload
fmul d16, d17, d14
fadd d7, d16, d7
ldr d4, [sp, #11072] ; 8-byte Folded Reload
fmul d16, d17, d4
str d16, [sp, #7832] ; 8-byte Folded Spill
fsub d7, d7, d16
mov x8, #46543
movk x8, #48510, lsl #16
movk x8, #46414, lsl #32
movk x8, #16260, lsl #48
fmov d16, x8
fadd d7, d7, d16
ldr d25, [sp, #9368] ; 8-byte Folded Reload
ldr d2, [sp, #11968] ; 8-byte Folded Reload
fmul d16, d2, d25
ldr d27, [sp, #9360] ; 8-byte Folded Reload
ldur d2, [x29, #-248] ; 8-byte Folded Reload
fmul d17, d2, d27
fadd d26, d16, d17
ldr d16, [sp, #9144] ; 8-byte Folded Reload
fmul d16, d16, d26
str d16, [sp, #7776] ; 8-byte Folded Spill
fadd d7, d16, d7
mov x8, #62994
movk x8, #14722, lsl #16
movk x8, #41829, lsl #32
movk x8, #16247, lsl #48
fmov d16, x8
ldr d2, [sp, #8784] ; 8-byte Folded Reload
fmul d16, d2, d16
fadd d7, d7, d16
ldr d16, [sp, #11424] ; 8-byte Folded Reload
fmul d16, d19, d16
fmul d17, d28, d11
str d17, [sp, #1856] ; 8-byte Folded Spill
ldur d19, [x29, #-240] ; 8-byte Folded Reload
fmul d17, d17, d19
fadd d19, d17, d16
mov x8, #18456
movk x8, #63321, lsl #16
movk x8, #33926, lsl #32
movk x8, #48991, lsl #48
fmov d16, x8
ldr d20, [sp, #9224] ; 8-byte Folded Reload
fmul d16, d20, d16
ldr d29, [sp, #12232] ; 8-byte Folded Reload
fmul d17, d29, d27
str d19, [sp, #10096] ; 8-byte Folded Spill
ldr d27, [sp, #8592] ; 8-byte Folded Reload
fmul d19, d19, d27
str d19, [sp, #5160] ; 8-byte Folded Spill
fadd d1, d17, d19
ldr d17, [sp, #8608] ; 8-byte Folded Reload
fmul d17, d1, d17
str d17, [sp, #7744] ; 8-byte Folded Spill
fadd d7, d17, d7
ldr d17, [sp, #10104] ; 8-byte Folded Reload
fmul d19, d17, d27
fmul d17, d29, d25
str d19, [sp, #5168] ; 8-byte Folded Spill
fadd d29, d17, d19
ldr d17, [sp, #10752] ; 8-byte Folded Reload
fmul d17, d17, d29
str d17, [sp, #7728] ; 8-byte Folded Spill
fadd d25, d17, d7
ldr d19, [sp, #12304] ; 8-byte Folded Reload
fmul d7, d19, d25
fadd d7, d7, d16
ldr d17, [sp, #11776] ; 8-byte Folded Reload
fmul d16, d17, d12
fmul d17, d17, d8
str d17, [sp, #7240] ; 8-byte Folded Spill
fsub d16, d16, d17
ldr d17, [sp, #12152] ; 8-byte Folded Reload
fmul d17, d17, d18
fadd d16, d17, d16
ldr d27, [sp, #12296] ; 8-byte Folded Reload
fmul d17, d27, d0
fadd d16, d17, d16
ldr d18, [sp, #12096] ; 8-byte Folded Reload
fmul d17, d18, d0
fmul d18, d18, d3
str d18, [sp, #7720] ; 8-byte Folded Spill
fsub d17, d17, d18
fmul d18, d27, d3
fmov d0, d27
str d18, [sp, #7232] ; 8-byte Folded Spill
fsub d27, d16, d18
fmul d16, d31, d27
fadd d16, d17, d16
ldr d17, [sp, #6592] ; 8-byte Folded Reload
fmul d17, d28, d17
fmov d31, d28
str d24, [sp, #6528] ; 8-byte Folded Spill
fmul d18, d6, d24
fadd d17, d17, d18
str d27, [sp, #6520] ; 8-byte Folded Spill
fmul d18, d0, d27
fadd d17, d18, d17
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16297, lsl #48
fmov d18, x8
fmul d17, d17, d18
fadd d16, d16, d17
ldr d18, [sp, #12088] ; 8-byte Folded Reload
fmul d17, d18, d14
fadd d16, d17, d16
fmul d17, d18, d4
str d17, [sp, #7688] ; 8-byte Folded Spill
fsub d16, d16, d17
mov x8, #57269
movk x8, #60105, lsl #16
movk x8, #55991, lsl #32
movk x8, #16301, lsl #48
fmov d17, x8
fadd d16, d16, d17
mov x8, #56877
movk x8, #10885, lsl #16
movk x8, #2572, lsl #32
movk x8, #16289, lsl #48
fmov d17, x8
fmul d17, d2, d17
str d26, [sp, #5152] ; 8-byte Folded Spill
ldr d18, [sp, #9136] ; 8-byte Folded Reload
fmul d18, d18, d26
str d18, [sp, #7672] ; 8-byte Folded Spill
fadd d16, d18, d16
fadd d16, d16, d17
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #48921, lsl #48
fmov d17, x8
str d1, [sp, #5144] ; 8-byte Folded Spill
fmul d17, d1, d17
str d17, [sp, #7664] ; 8-byte Folded Spill
fsub d16, d16, d17
str d29, [sp, #5136] ; 8-byte Folded Spill
ldr d0, [sp, #10760] ; 8-byte Folded Reload
fmul d17, d0, d29
str d17, [sp, #7656] ; 8-byte Folded Spill
fadd d24, d17, d16
ldr d18, [sp, #12312] ; 8-byte Folded Reload
fmul d16, d18, d24
fadd d7, d16, d7
ldr d1, [sp, #11528] ; 8-byte Folded Reload
fmul d16, d1, d22
fadd d7, d16, d7
ldr d0, [sp, #11688] ; 8-byte Folded Reload
fmul d16, d0, d13
fmul d17, d0, d15
str d17, [sp, #7288] ; 8-byte Folded Spill
fadd d16, d17, d16
ldr d0, [sp, #11488] ; 8-byte Folded Reload
fmul d17, d0, d22
fsub d16, d17, d16
fmul d17, d1, d10
str d17, [sp, #7648] ; 8-byte Folded Spill
fsub d7, d7, d17
fmul d17, d0, d10
fmov d27, d0
str d17, [sp, #7280] ; 8-byte Folded Spill
fsub d26, d16, d17
ldr d28, [sp, #11096] ; 8-byte Folded Reload
fmul d16, d28, d26
fadd d7, d7, d16
mov x8, #26288
movk x8, #13902, lsl #16
movk x8, #44107, lsl #32
movk x8, #16338, lsl #48
fmov d16, x8
fadd d7, d7, d16
str d21, [sp, #8504] ; 8-byte Folded Spill
fmul d16, d21, d11
str d16, [sp, #4312] ; 8-byte Folded Spill
fmul d16, d11, d16
str d23, [sp, #8496] ; 8-byte Folded Spill
ldr d0, [sp, #11504] ; 8-byte Folded Reload
fmul d17, d23, d0
fadd d16, d16, d17
ldr d0, [sp, #10992] ; 8-byte Folded Reload
fmul d5, d0, d5
str d5, [sp, #7640] ; 8-byte Folded Spill
fadd d5, d16, d5
fmul d4, d5, d30
fadd d4, d4, d7
mov x8, #4363988038922010624
fmov d7, x8
fmul d16, d4, d7
ldr d0, [sp, #8464] ; 8-byte Folded Reload
fadd d6, d0, d16
mov x8, #36544
movk x8, #43611, lsl #16
movk x8, #860, lsl #32
movk x8, #16326, lsl #48
fmov d16, x8
fmul d16, d20, d16
str d25, [sp, #1824] ; 8-byte Folded Spill
fmul d17, d18, d25
fsub d16, d16, d17
str d24, [sp, #1816] ; 8-byte Folded Spill
fmul d17, d19, d24
fadd d16, d17, d16
ldr d19, [sp, #11432] ; 8-byte Folded Reload
fmul d17, d19, d22
fadd d16, d17, d16
ldr d2, [sp, #11680] ; 8-byte Folded Reload
fmul d0, d2, d22
str d13, [sp, #8592] ; 8-byte Folded Spill
ldr d1, [sp, #11632] ; 8-byte Folded Reload
fmul d17, d1, d13
str d15, [sp, #8608] ; 8-byte Folded Spill
fmul d18, d1, d15
str d18, [sp, #7272] ; 8-byte Folded Spill
fadd d17, d18, d17
fsub d0, d0, d17
fmul d17, d19, d10
str d17, [sp, #7560] ; 8-byte Folded Spill
fsub d16, d16, d17
str d10, [sp, #8784] ; 8-byte Folded Spill
fmul d17, d2, d10
movi d10, #0000000000000000
str d17, [sp, #7264] ; 8-byte Folded Spill
fsub d18, d0, d17
fmov d0, d28
fmul d0, d28, d18
fadd d0, d16, d0
ldr d1, [sp, #9328] ; 8-byte Folded Reload
fmul d16, d31, d1
str d26, [sp, #9232] ; 8-byte Folded Spill
fmul d17, d27, d26
fadd d16, d16, d17
str d18, [sp, #9224] ; 8-byte Folded Spill
fmul d17, d2, d18
fadd d16, d17, d16
ldr d1, [sp, #8704] ; 8-byte Folded Reload
fmul d16, d16, d1
fadd d0, d0, d16
mov x8, #21969
movk x8, #1325, lsl #16
movk x8, #7976, lsl #32
movk x8, #16367, lsl #48
fmov d16, x8
fadd d0, d0, d16
ldr d1, [sp, #8488] ; 8-byte Folded Reload
fmul d2, d5, d1
fadd d0, d2, d0
mov x8, #-4868391197187506176
fmov d2, x8
ldr d1, [sp, #8472] ; 8-byte Folded Reload
fmul d1, d1, d2
mov x8, #47272
movk x8, #56762, lsl #16
movk x8, #43178, lsl #32
movk x8, #49060, lsl #48
fmov d2, x8
ldr d3, [sp, #8480] ; 8-byte Folded Reload
fmul d2, d3, d2
fadd d1, d2, d1
fadd d1, d1, d4
fadd d24, d6, d0
fmul d0, d0, d7
fadd d25, d1, d0
ldr q23, [sp, #11984] ; 16-byte Folded Reload
fmul d0, d23, d24
ldr q22, [sp, #11808] ; 16-byte Folded Reload
fmul d1, d22, d25
fsub d26, d0, d1
mov x8, #2356
movk x8, #12413, lsl #16
movk x8, #55910, lsl #32
movk x8, #49095, lsl #48
fmov d0, x8
ldr q1, [sp, #6608] ; 16-byte Folded Reload
fmul d7, d1, d0
ldr d29, [sp, #11520] ; 8-byte Folded Reload
fmul d1, d7, d29
ldr d20, [sp, #8992] ; 8-byte Folded Reload
fsub d1, d20, d1
mov x8, #5915
movk x8, #64709, lsl #16
movk x8, #30489, lsl #32
movk x8, #16392, lsl #48
fmov d2, x8
fmul d1, d1, d2
mov x8, #5915
movk x8, #64709, lsl #16
movk x8, #30489, lsl #32
movk x8, #49160, lsl #48
ldr q3, [sp, #6624] ; 16-byte Folded Reload
fmul d16, d3, d0
ldr d28, [sp, #12048] ; 8-byte Folded Reload
fmul d3, d16, d28
str d3, [sp, #4376] ; 8-byte Folded Spill
fmul d3, d28, d3
fmov d4, x8
fmul d3, d3, d4
fadd d1, d1, d3
mov x8, #61302
movk x8, #27691, lsl #16
movk x8, #64897, lsl #32
movk x8, #16445, lsl #48
fmov d3, x8
fadd d1, d1, d3
mov x8, #6432
movk x8, #24166, lsl #16
movk x8, #7623, lsl #32
movk x8, #16309, lsl #48
fmov d3, x8
ldr d5, [sp, #8744] ; 8-byte Folded Reload
fmul d5, d5, d3
mov x8, #30506
movk x8, #37777, lsl #16
movk x8, #58002, lsl #32
movk x8, #16361, lsl #48
fmov d6, x8
fadd d5, d5, d6
ldr d6, [sp, #9072] ; 8-byte Folded Reload
fmul d6, d6, d2
ldr d17, [sp, #10688] ; 8-byte Folded Reload
fmul d17, d17, d2
ldr d2, [sp, #9096] ; 8-byte Folded Reload
fmul d2, d2, d3
ldr d18, [sp, #10680] ; 8-byte Folded Reload
fmul d3, d18, d3
ldr d18, [sp, #8832] ; 8-byte Folded Reload
fadd d31, d5, d18
fadd d19, d1, d31
ldr d1, [sp, #9400] ; 8-byte Folded Reload
fsub d5, d1, d2
fsub d1, d5, d6
str d16, [sp, #10680] ; 8-byte Folded Spill
fmul d2, d16, d4
str d2, [sp, #8480] ; 8-byte Folded Spill
fadd d18, d2, d1
ldr d27, [sp, #12072] ; 8-byte Folded Reload
fmul d1, d18, d27
fsub d1, d9, d1
str d1, [sp, #4280] ; 8-byte Folded Spill
fmul d1, d27, d1
str d19, [sp, #8288] ; 8-byte Folded Spill
fadd d1, d19, d1
ldr d2, [sp, #9392] ; 8-byte Folded Reload
fadd d16, d3, d2
fadd d2, d17, d16
str d7, [sp, #10688] ; 8-byte Folded Spill
fmul d3, d7, d4
str d3, [sp, #8488] ; 8-byte Folded Spill
fadd d7, d3, d2
ldr d21, [sp, #11640] ; 8-byte Folded Reload
fmul d2, d7, d21
fadd d2, d2, d1
mov x8, #11201
movk x8, #50599, lsl #16
movk x8, #31589, lsl #32
movk x8, #49010, lsl #48
fmov d1, x8
fmul d1, d2, d1
ldr q19, [sp, #11952] ; 16-byte Folded Reload
fmul d3, d19, d26
fadd d1, d3, d1
str d5, [sp, #8464] ; 8-byte Folded Spill
fmul d3, d5, d28
str d9, [sp, #5088] ; 8-byte Folded Spill
fsub d3, d9, d3
str d3, [sp, #4272] ; 8-byte Folded Spill
fmul d3, d28, d3
str d31, [sp, #8704] ; 8-byte Folded Spill
fadd d3, d31, d3
str d16, [sp, #8448] ; 8-byte Folded Spill
fmul d5, d16, d29
fadd d3, d5, d3
mov x8, #43115
movk x8, #62349, lsl #16
movk x8, #30721, lsl #32
movk x8, #49115, lsl #48
fmov d5, x8
fmul d3, d3, d5
str q25, [sp, #5680] ; 16-byte Folded Spill
fmul d5, d23, d25
fadd d3, d5, d3
str q24, [sp, #5696] ; 16-byte Folded Spill
fmul d5, d22, d24
fadd d3, d5, d3
mov x8, #31036
movk x8, #52462, lsl #16
movk x8, #23267, lsl #32
movk x8, #16406, lsl #48
fmov d5, x8
fsub d3, d5, d3
fmul d16, d20, d4
str d6, [sp, #8472] ; 8-byte Folded Spill
fmul d4, d6, d28
str d4, [sp, #4264] ; 8-byte Folded Spill
fmul d4, d28, d4
str d17, [sp, #8456] ; 8-byte Folded Spill
fmul d5, d17, d29
fadd d4, d4, d5
str d16, [sp, #9096] ; 8-byte Folded Spill
fsub d4, d16, d4
fmul d0, d4, d0
fadd d20, d3, d0
mov x8, #52090
movk x8, #42545, lsl #16
movk x8, #26349, lsl #32
movk x8, #49113, lsl #48
fmov d0, x8
fmul d0, d2, d0
ldr q17, [sp, #11792] ; 16-byte Folded Reload
str q26, [sp, #1760] ; 16-byte Folded Spill
fmul d2, d17, d26
fadd d0, d2, d0
fmul d2, d19, d20
fsub d0, d2, d0
mov x8, #42186
movk x8, #52566, lsl #16
movk x8, #11879, lsl #32
movk x8, #16425, lsl #48
fmov d2, x8
fadd d0, d0, d2
ldr d4, [sp, #11064] ; 8-byte Folded Reload
fmul d2, d4, d27
str d2, [sp, #4256] ; 8-byte Folded Spill
fmul d2, d27, d2
ldr d5, [sp, #11056] ; 8-byte Folded Reload
fmul d3, d5, d21
fadd d2, d2, d3
mov x8, #28530
movk x8, #30490, lsl #16
movk x8, #27495, lsl #32
movk x8, #49093, lsl #48
ldr d3, [sp, #9240] ; 8-byte Folded Reload
fsub d2, d3, d2
fmov d3, x8
fmul d2, d2, d3
ldr q6, [sp, #9520] ; 16-byte Folded Reload
fmul d6, d6, d3
ldr q16, [sp, #9536] ; 16-byte Folded Reload
fmul d16, d16, d3
mov x8, #64990
movk x8, #28266, lsl #16
movk x8, #45172, lsl #32
movk x8, #49182, lsl #48
fmov d19, x8
str d18, [sp, #8440] ; 8-byte Folded Spill
fsub d3, d18, d4
str d16, [sp, #8744] ; 8-byte Folded Spill
fmul d4, d16, d19
str d4, [sp, #6152] ; 8-byte Folded Spill
fadd d4, d4, d3
str d7, [sp, #8432] ; 8-byte Folded Spill
fadd d3, d5, d7
str d6, [sp, #8424] ; 8-byte Folded Spill
str d19, [sp, #6376] ; 8-byte Folded Spill
fmul d5, d6, d19
str d5, [sp, #6160] ; 8-byte Folded Spill
fadd d3, d5, d3
str d3, [sp, #6816] ; 8-byte Folded Spill
str q20, [sp, #1728] ; 16-byte Folded Spill
fmul d3, d17, d20
str d4, [sp, #6416] ; 8-byte Folded Spill
ldur d5, [x29, #-256] ; 8-byte Folded Reload
fmul d4, d5, d4
str d4, [sp, #8304] ; 8-byte Folded Spill
cbz x20, LBB19_36
; %bb.35:
ldr d10, [x20, #184]
LBB19_36:
ldr d4, [sp, #11240] ; 8-byte Folded Reload
ldr d5, [sp, #8624] ; 8-byte Folded Reload
fsub d4, d5, d4
str d4, [sp, #6288] ; 8-byte Folded Spill
ldr d4, [sp, #8920] ; 8-byte Folded Reload
ldr d5, [sp, #8296] ; 8-byte Folded Reload
fadd d4, d4, d5
str q4, [sp, #6336] ; 16-byte Folded Spill
ldr d4, [sp, #8896] ; 8-byte Folded Reload
ldr d5, [sp, #8728] ; 8-byte Folded Reload
fadd d4, d4, d5
str q4, [sp, #8624] ; 16-byte Folded Spill
fadd d1, d1, d3
str q1, [sp, #6320] ; 16-byte Folded Spill
fadd d0, d0, d2
str q0, [sp, #5072] ; 16-byte Folded Spill
ldr d0, [sp, #10984] ; 8-byte Folded Reload
ldr d1, [sp, #10392] ; 8-byte Folded Reload
fsub d0, d1, d0
str d0, [sp, #10992] ; 8-byte Folded Spill
ldr d0, [sp, #10312] ; 8-byte Folded Reload
ldr d1, [sp, #10296] ; 8-byte Folded Reload
fsub d0, d1, d0
str d0, [sp, #10984] ; 8-byte Folded Spill
ldr d2, [sp, #10336] ; 8-byte Folded Reload
fsqrt d0, d2
fmov d1, #1.50000000
fmul d1, d0, d1
ldr d0, [sp, #10960] ; 8-byte Folded Reload
ldr d3, [sp, #10344] ; 8-byte Folded Reload
fmul d0, d0, d3
mov x8, #10523
movk x8, #38535, lsl #16
movk x8, #12921, lsl #32
movk x8, #16642, lsl #48
fmov d3, x8
str d3, [sp, #11240] ; 8-byte Folded Spill
fmul d0, d0, d3
str d1, [sp, #10344] ; 8-byte Folded Spill
fmul d0, d1, d0
fadd d1, d2, d2
str d1, [sp, #10312] ; 8-byte Folded Spill
fdiv d0, d0, d1
ldr d1, [sp, #10328] ; 8-byte Folded Reload
fmul d1, d1, d1
fmov d2, #1.00000000
fsub d2, d2, d1
ldr d1, [sp, #10976] ; 8-byte Folded Reload
ldr d3, [sp, #10320] ; 8-byte Folded Reload
fmul d1, d1, d3
fmov d7, #0.50000000
fmul d1, d1, d7
str d2, [sp, #10320] ; 8-byte Folded Spill
fmul d1, d2, d1
mov x8, #211106232532992
movk x8, #16498, lsl #48
fmov d2, x8
fmul d1, d1, d2
fmov d8, d2
ldr d2, [sp, #10400] ; 8-byte Folded Reload
fadd d2, d2, d2
str d2, [sp, #10296] ; 8-byte Folded Spill
fmul d0, d2, d0
fsub d0, d1, d0
ldur d18, [x29, #-176] ; 8-byte Folded Reload
ldr d23, [sp, #6720] ; 8-byte Folded Reload
fmul d1, d18, d23
ldr d24, [sp, #11864] ; 8-byte Folded Reload
ldr d2, [sp, #6664] ; 8-byte Folded Reload
fmul d3, d24, d2
fadd d1, d1, d3
ldr d21, [sp, #12016] ; 8-byte Folded Reload
ldr d2, [sp, #6656] ; 8-byte Folded Reload
fmul d3, d21, d2
fadd d3, d3, d1
fadd d0, d0, d3
fmul d1, d3, d7
fsub d9, d0, d1
str d9, [sp, #8264] ; 8-byte Folded Spill
ldr d0, [sp, #10424] ; 8-byte Folded Reload
fadd d0, d0, d0
str d0, [sp, #10392] ; 8-byte Folded Spill
ldr d1, [sp, #9720] ; 8-byte Folded Reload
fmul d0, d0, d1
ldr d1, [sp, #10936] ; 8-byte Folded Reload
ldr d2, [sp, #9728] ; 8-byte Folded Reload
fmul d1, d2, d1
fsub d28, d0, d1
ldr d27, [sp, #12320] ; 8-byte Folded Reload
ldr d14, [sp, #10160] ; 8-byte Folded Reload
fmul d0, d27, d14
ldur d29, [x29, #-184] ; 8-byte Folded Reload
fmul d1, d29, d28
fsub d12, d0, d1
ldr d31, [sp, #11888] ; 8-byte Folded Reload
fmul d1, d31, d28
ldr d4, [sp, #8400] ; 8-byte Folded Reload
fmul d4, d27, d4
fsub d17, d1, d4
fmul d1, d18, d12
ldr d30, [sp, #6072] ; 8-byte Folded Reload
fmul d0, d18, d30
str d0, [sp, #8920] ; 8-byte Folded Spill
fsub d1, d1, d0
fadd d1, d17, d1
ldur d6, [x29, #-168] ; 8-byte Folded Reload
ldr d20, [sp, #10352] ; 8-byte Folded Reload
fmul d4, d6, d20
fadd d1, d4, d1
fmul d4, d24, d12
fmul d0, d24, d30
str d0, [sp, #7712] ; 8-byte Folded Spill
fsub d4, d4, d0
ldr d5, [sp, #12200] ; 8-byte Folded Reload
fmul d5, d5, d17
fadd d4, d5, d4
ldur d22, [x29, #-216] ; 8-byte Folded Reload
fmul d5, d22, d20
fadd d4, d5, d4
ldr d0, [sp, #11104] ; 8-byte Folded Reload
fmul d2, d6, d0
str d2, [sp, #8896] ; 8-byte Folded Spill
fsub d1, d1, d2
fmul d2, d22, d0
str d2, [sp, #7704] ; 8-byte Folded Spill
fsub d2, d4, d2
str d2, [sp, #10400] ; 8-byte Folded Spill
fmul d4, d18, d1
fmul d5, d24, d2
fadd d4, d4, d5
fmul d5, d21, d12
fmul d2, d21, d30
str d2, [sp, #7696] ; 8-byte Folded Spill
fsub d5, d5, d2
ldr d16, [sp, #12160] ; 8-byte Folded Reload
str d17, [sp, #1848] ; 8-byte Folded Spill
fmul d16, d16, d17
fadd d5, d16, d5
ldr d19, [sp, #12336] ; 8-byte Folded Reload
fmul d16, d19, d20
fadd d5, d16, d5
fmul d0, d19, d0
str d0, [sp, #7680] ; 8-byte Folded Spill
fsub d0, d5, d0
str d0, [sp, #8728] ; 8-byte Folded Spill
fmul d5, d21, d0
fadd d4, d5, d4
ldr d6, [sp, #10168] ; 8-byte Folded Reload
fsqrt d5, d6
fmov d25, #1.50000000
fmul d2, d5, d25
ldr d0, [sp, #10944] ; 8-byte Folded Reload
ldr d5, [sp, #9736] ; 8-byte Folded Reload
fmul d5, d0, d5
mov x8, #18811
movk x8, #34700, lsl #16
movk x8, #61210, lsl #32
movk x8, #16643, lsl #48
fmov d0, x8
str d0, [sp, #8296] ; 8-byte Folded Spill
fmul d5, d5, d0
str d2, [sp, #10352] ; 8-byte Folded Spill
fmul d5, d2, d5
fadd d0, d6, d6
str d0, [sp, #10328] ; 8-byte Folded Spill
fdiv d5, d5, d0
ldr d0, [sp, #10152] ; 8-byte Folded Reload
fmul d17, d0, d0
fmov d26, #1.00000000
fsub d2, d26, d17
ldr d0, [sp, #10952] ; 8-byte Folded Reload
ldr d6, [sp, #9960] ; 8-byte Folded Reload
fmul d17, d0, d6
fmul d17, d17, d7
str d2, [sp, #10336] ; 8-byte Folded Spill
fmul d17, d2, d17
str d8, [sp, #7224] ; 8-byte Folded Spill
fmul d17, d17, d8
ldr d0, [sp, #10304] ; 8-byte Folded Reload
fadd d0, d0, d0
str d0, [sp, #10304] ; 8-byte Folded Spill
fmul d5, d0, d5
fsub d5, d17, d5
fsub d3, d9, d3
fsub d3, d3, d4
fadd d5, d5, d4
fmul d4, d4, d7
fsub d4, d5, d4
fadd d6, d4, d3
str d6, [sp, #8176] ; 8-byte Folded Spill
ldr d0, [sp, #10200] ; 8-byte Folded Reload
fmul d3, d18, d0
ldr d5, [sp, #12136] ; 8-byte Folded Reload
ldr d0, [sp, #10192] ; 8-byte Folded Reload
fmul d5, d5, d0
fadd d3, d3, d5
ldr d5, [sp, #11896] ; 8-byte Folded Reload
ldr d0, [sp, #10184] ; 8-byte Folded Reload
fmul d5, d5, d0
fadd d20, d5, d3
ldr d4, [sp, #9712] ; 8-byte Folded Reload
fsqrt d3, d4
fmul d2, d3, d25
ldr d0, [sp, #10384] ; 8-byte Folded Reload
ldr d3, [sp, #9120] ; 8-byte Folded Reload
fmul d3, d0, d3
mov x8, #45572
movk x8, #23979, lsl #16
movk x8, #34811, lsl #32
movk x8, #16645, lsl #48
fmov d0, x8
str d0, [sp, #9120] ; 8-byte Folded Spill
fmul d3, d3, d0
str d2, [sp, #9736] ; 8-byte Folded Spill
fmul d3, d2, d3
fadd d0, d4, d4
str d0, [sp, #9728] ; 8-byte Folded Spill
fdiv d3, d3, d0
ldr d0, [sp, #9184] ; 8-byte Folded Reload
fmul d5, d0, d0
fsub d2, d26, d5
ldr d0, [sp, #10376] ; 8-byte Folded Reload
ldr d4, [sp, #9176] ; 8-byte Folded Reload
fmul d5, d0, d4
fmov d4, #0.50000000
fmul d5, d5, d4
str d2, [sp, #9712] ; 8-byte Folded Spill
fmul d5, d2, d5
fmul d5, d5, d8
ldr d0, [sp, #9656] ; 8-byte Folded Reload
fadd d0, d0, d0
str d0, [sp, #9720] ; 8-byte Folded Spill
fmul d3, d0, d3
fsub d3, d5, d3
fadd d3, d3, d20
fmul d5, d20, d4
fsub d0, d3, d5
str d0, [sp, #9960] ; 8-byte Folded Spill
ldr d2, [sp, #11904] ; 8-byte Folded Reload
ldr d0, [sp, #11160] ; 8-byte Folded Reload
fmul d3, d2, d0
ldr d9, [sp, #9352] ; 8-byte Folded Reload
fmul d0, d2, d9
str d0, [sp, #7824] ; 8-byte Folded Spill
fsub d3, d3, d0
ldr d0, [sp, #11384] ; 8-byte Folded Reload
fmul d5, d0, d23
fadd d3, d3, d5
ldr d2, [sp, #11664] ; 8-byte Folded Reload
fmul d5, d2, d12
fadd d3, d5, d3
fmul d0, d2, d30
str d0, [sp, #7816] ; 8-byte Folded Spill
fsub d3, d3, d0
ldr d0, [sp, #11280] ; 8-byte Folded Reload
fmul d1, d0, d1
fadd d1, d1, d3
mov x8, #63706
movk x8, #13221, lsl #16
movk x8, #1281, lsl #32
movk x8, #16209, lsl #48
fmov d16, x8
fmul d3, d6, d16
str d16, [sp, #7168] ; 8-byte Folded Spill
fadd d1, d1, d3
ldr d26, [sp, #11936] ; 8-byte Folded Reload
ldr d15, [sp, #9968] ; 8-byte Folded Reload
fmul d3, d26, d15
ldr d25, [sp, #9632] ; 8-byte Folded Reload
ldr d7, [sp, #11176] ; 8-byte Folded Reload
fmul d5, d25, d7
fsub d3, d3, d5
ldr d5, [sp, #9640] ; 8-byte Folded Reload
fmul d8, d5, d29
ldr d6, [sp, #11568] ; 8-byte Folded Reload
fmul d5, d6, d8
fadd d3, d5, d3
ldr d5, [sp, #12184] ; 8-byte Folded Reload
ldr d0, [sp, #10208] ; 8-byte Folded Reload
fmul d5, d5, d0
fsub d3, d3, d5
ldr d11, [sp, #6120] ; 8-byte Folded Reload
ldr d2, [sp, #11736] ; 8-byte Folded Reload
fmul d5, d2, d11
fadd d3, d3, d5
ldr d5, [sp, #12176] ; 8-byte Folded Reload
fmul d5, d5, d14
fsub d3, d3, d5
ldr d2, [sp, #11648] ; 8-byte Folded Reload
fmul d5, d2, d28
fadd d3, d5, d3
ldr d0, [sp, #8272] ; 8-byte Folded Reload
fmul d5, d0, d27
ldr d23, [sp, #9296] ; 8-byte Folded Reload
fmul d18, d23, d31
fadd d5, d5, d18
ldr d18, [sp, #10008] ; 8-byte Folded Reload
fmul d18, d18, d29
fadd d2, d18, d5
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #49094, lsl #48
fmov d4, x8
str d2, [sp, #6080] ; 8-byte Folded Spill
fmul d0, d2, d4
str d4, [sp, #7192] ; 8-byte Folded Spill
str d0, [sp, #8992] ; 8-byte Folded Spill
fadd d3, d0, d3
ldr d5, [sp, #9384] ; 8-byte Folded Reload
fmul d0, d23, d5
str d0, [sp, #7792] ; 8-byte Folded Spill
fadd d3, d0, d3
ldr d18, [sp, #9376] ; 8-byte Folded Reload
ldr d2, [sp, #8320] ; 8-byte Folded Reload
fmul d0, d2, d18
str d0, [sp, #7768] ; 8-byte Folded Spill
fadd d3, d0, d3
str d3, [sp, #1864] ; 8-byte Folded Spill
ldr d14, [sp, #12280] ; 8-byte Folded Reload
fmul d3, d14, d3
fadd d1, d3, d1
fmul d3, d31, d18
fmul d5, d27, d5
str d5, [sp, #4448] ; 8-byte Folded Spill
fsub d23, d3, d5
fmul d3, d24, d23
ldr d18, [sp, #6184] ; 8-byte Folded Reload
fmul d5, d22, d18
fadd d13, d3, d5
fmul d3, d21, d23
fmul d5, d19, d18
fadd d22, d3, d5
fmul d3, d27, d15
ldr d0, [sp, #11328] ; 8-byte Folded Reload
fmul d5, d25, d0
fsub d3, d5, d3
str d8, [sp, #5176] ; 8-byte Folded Spill
fmul d5, d29, d8
fsub d3, d3, d5
ldr d5, [sp, #12040] ; 8-byte Folded Reload
ldr d2, [sp, #11248] ; 8-byte Folded Reload
fmul d5, d2, d5
str d10, [sp, #6368] ; 8-byte Folded Spill
fsub d5, d10, d5
ldr d18, [sp, #6144] ; 8-byte Folded Reload
ldur d2, [x29, #-256] ; 8-byte Folded Reload
fmul d18, d2, d18
fsub d2, d5, d18
str d2, [sp, #9184] ; 8-byte Folded Spill
ldr d5, [sp, #12024] ; 8-byte Folded Reload
ldr d17, [sp, #11784] ; 8-byte Folded Reload
fmul d5, d5, d17
fsub d5, d2, d5
ldr d18, [sp, #11040] ; 8-byte Folded Reload
ldr d2, [sp, #12208] ; 8-byte Folded Reload
fmul d18, d2, d18
fsub d2, d5, d18
str d2, [sp, #10208] ; 8-byte Folded Spill
ldr d18, [sp, #12032] ; 8-byte Folded Reload
ldr d5, [sp, #11584] ; 8-byte Folded Reload
fmul d18, d18, d5
fsub d18, d2, d18
ldr d15, [sp, #12056] ; 8-byte Folded Reload
ldr d2, [sp, #10992] ; 8-byte Folded Reload
fmul d19, d15, d2
fsub d2, d18, d19
str d2, [sp, #10168] ; 8-byte Folded Spill
ldr d18, [sp, #12344] ; 8-byte Folded Reload
ldr d24, [sp, #11536] ; 8-byte Folded Reload
fmul d18, d18, d24
fsub d18, d2, d18
ldr d19, [sp, #11032] ; 8-byte Folded Reload
ldr d2, [sp, #11368] ; 8-byte Folded Reload
fmul d19, d2, d19
fsub d5, d18, d19
str d5, [sp, #10152] ; 8-byte Folded Spill
ldr d19, [sp, #12128] ; 8-byte Folded Reload
fmul d19, d19, d24
fsub d19, d5, d19
ldr d21, [sp, #11024] ; 8-byte Folded Reload
fmul d21, d2, d21
fsub d8, d19, d21
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #16169, lsl #48
fmov d19, x8
fmul d19, d13, d19
str d19, [sp, #7632] ; 8-byte Folded Spill
fadd d1, d19, d1
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #16210, lsl #48
fmov d19, x8
fmul d19, d22, d19
str d19, [sp, #7624] ; 8-byte Folded Spill
fadd d1, d19, d1
fmul d19, d8, d4
str d19, [sp, #9072] ; 8-byte Folded Spill
fadd d25, d3, d19
mov x8, #54125
movk x8, #53060, lsl #16
movk x8, #15481, lsl #32
movk x8, #16273, lsl #48
fmov d3, x8
str d3, [sp, #9176] ; 8-byte Folded Spill
fmul d3, d25, d3
fsub d3, d1, d3
ldr d1, [sp, #11944] ; 8-byte Folded Reload
fmul d1, d1, d0
ldr d19, [sp, #10768] ; 8-byte Folded Reload
fmul d19, d6, d19
fsub d1, d1, d19
fmul d19, d26, d29
fmul d21, d6, d27
fsub d21, d19, d21
fmul d19, d31, d7
str d21, [sp, #9968] ; 8-byte Folded Spill
fmul d21, d29, d21
fsub d19, d19, d21
str d8, [sp, #5192] ; 8-byte Folded Spill
fsub d1, d8, d1
mov x8, #54806
movk x8, #23353, lsl #16
movk x8, #56949, lsl #32
movk x8, #16326, lsl #48
fmov d0, x8
str d0, [sp, #8256] ; 8-byte Folded Spill
fmul d1, d1, d0
fmul d19, d19, d0
fadd d1, d19, d1
fadd d19, d30, d9
fadd d10, d19, d1
ldr d1, [sp, #9512] ; 8-byte Folded Reload
fmul d1, d1, d24
fadd d1, d1, d10
str d28, [sp, #4232] ; 8-byte Folded Spill
fadd d0, d11, d28
str d0, [sp, #10160] ; 8-byte Folded Spill
ldr d21, [sp, #9504] ; 8-byte Folded Reload
fmul d21, d21, d2
fadd d0, d21, d0
str d0, [sp, #4224] ; 8-byte Folded Spill
fmul d21, d2, d0
fadd d27, d21, d1
fmul d21, d27, d16
fsub d3, d3, d21
ldr d21, [sp, #6408] ; 8-byte Folded Reload
ldr d0, [sp, #11440] ; 8-byte Folded Reload
fmul d21, d21, d0
fadd d3, d21, d3
ldr d21, [sp, #6400] ; 8-byte Folded Reload
fmul d0, d21, d14
str d0, [sp, #4216] ; 8-byte Folded Spill
fmul d21, d0, d2
fsub d3, d3, d21
ldr d26, [sp, #11352] ; 8-byte Folded Reload
ldr d14, [sp, #9664] ; 8-byte Folded Reload
fmul d21, d26, d14
fsub d3, d3, d21
ldr d11, [sp, #9672] ; 8-byte Folded Reload
fmul d0, d26, d11
str d0, [sp, #7616] ; 8-byte Folded Spill
fsub d3, d3, d0
ldr d8, [sp, #10968] ; 8-byte Folded Reload
ldr d0, [sp, #10200] ; 8-byte Folded Reload
fmul d21, d8, d0
fadd d1, d21, d3
str d1, [sp, #10200] ; 8-byte Folded Spill
ldr d19, [sp, #8176] ; 8-byte Folded Reload
fsub d3, d19, d20
ldr d21, [sp, #9960] ; 8-byte Folded Reload
fadd d18, d3, d21
mov x8, #4359484439294640128
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #49064, lsl #48
fmov d0, x8
str d0, [sp, #8248] ; 8-byte Folded Spill
fmul d3, d1, d0
fmov d0, x9
str d0, [sp, #8240] ; 8-byte Folded Spill
fmul d20, d18, d0
fsub d20, d20, d3
mov x8, #18456
movk x8, #63321, lsl #16
movk x8, #33926, lsl #32
movk x8, #48991, lsl #48
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #16223, lsl #48
fmov d0, x8
str d0, [sp, #8232] ; 8-byte Folded Spill
fmul d3, d19, d0
fmov d0, x9
str d0, [sp, #8224] ; 8-byte Folded Spill
fmul d26, d27, d0
fadd d17, d3, d26
ldr d28, [sp, #12192] ; 8-byte Folded Reload
ldr d3, [sp, #11160] ; 8-byte Folded Reload
fmul d26, d28, d3
fmul d0, d28, d9
str d0, [sp, #7608] ; 8-byte Folded Spill
fsub d26, d26, d0
ldr d0, [sp, #6656] ; 8-byte Folded Reload
ldr d5, [sp, #11384] ; 8-byte Folded Reload
fmul d28, d5, d0
fadd d26, d26, d28
mov x8, #-7378697629483820647
movk x8, #39322
movk x8, #16297, lsl #48
fmov d0, x8
str d0, [sp, #8216] ; 8-byte Folded Spill
ldr d1, [sp, #8264] ; 8-byte Folded Reload
fmul d7, d1, d0
fadd d7, d26, d7
ldr d28, [sp, #12104] ; 8-byte Folded Reload
fmul d26, d28, d12
fadd d7, d26, d7
fmul d0, d28, d30
str d0, [sp, #7600] ; 8-byte Folded Spill
fsub d7, d7, d0
ldr d0, [sp, #8728] ; 8-byte Folded Reload
ldr d6, [sp, #11280] ; 8-byte Folded Reload
fmul d16, d6, d0
fadd d7, d16, d7
ldr d16, [sp, #9336] ; 8-byte Folded Reload
fmul d0, d16, d23
str d0, [sp, #7568] ; 8-byte Folded Spill
fadd d7, d0, d7
mov x8, #45724
movk x8, #42429, lsl #16
movk x8, #11379, lsl #32
movk x8, #48937, lsl #48
fmov d1, x8
fmul d0, d13, d1
str d1, [sp, #7048] ; 8-byte Folded Spill
str d0, [sp, #7536] ; 8-byte Folded Spill
fadd d7, d0, d7
mov x8, #45033
movk x8, #40035, lsl #16
movk x8, #524, lsl #32
movk x8, #48971, lsl #48
fmov d0, x8
str d0, [sp, #8208] ; 8-byte Folded Spill
fmul d0, d22, d0
str d0, [sp, #7520] ; 8-byte Folded Spill
fadd d7, d0, d7
mov x8, #56877
movk x8, #10885, lsl #16
movk x8, #2572, lsl #32
movk x8, #16289, lsl #48
fmov d0, x8
str d0, [sp, #8200] ; 8-byte Folded Spill
fmul d16, d25, d0
fadd d7, d7, d16
ldr d16, [sp, #10784] ; 8-byte Folded Reload
ldr d28, [sp, #6176] ; 8-byte Folded Reload
fmul d0, d16, d28
str d0, [sp, #7504] ; 8-byte Folded Spill
fadd d7, d0, d7
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #16153, lsl #48
fmov d16, x8
ldr d29, [sp, #6168] ; 8-byte Folded Reload
fmul d0, d29, d16
str d0, [sp, #7488] ; 8-byte Folded Spill
fadd d31, d0, d7
ldur d26, [x29, #-200] ; 8-byte Folded Reload
fmul d7, d26, d31
fadd d17, d7, d17
ldr d16, [sp, #11976] ; 8-byte Folded Reload
fmul d7, d16, d3
fmul d0, d16, d9
str d0, [sp, #7472] ; 8-byte Folded Spill
fsub d7, d7, d0
ldr d0, [sp, #6664] ; 8-byte Folded Reload
fmul d16, d5, d0
fadd d7, d7, d16
ldr d16, [sp, #12064] ; 8-byte Folded Reload
fmul d0, d16, d12
fadd d0, d0, d7
fmul d4, d16, d30
str d4, [sp, #7456] ; 8-byte Folded Spill
fsub d0, d0, d4
ldr d4, [sp, #10400] ; 8-byte Folded Reload
fmul d6, d6, d4
fadd d0, d6, d0
ldr d4, [sp, #9344] ; 8-byte Folded Reload
fmul d4, d4, d23
str d4, [sp, #7440] ; 8-byte Folded Spill
fadd d0, d4, d0
mov x8, #40862
movk x8, #31695, lsl #16
movk x8, #12355, lsl #32
movk x8, #16198, lsl #48
fmov d4, x8
str d4, [sp, #11160] ; 8-byte Folded Spill
fmul d4, d13, d4
str d4, [sp, #7432] ; 8-byte Folded Spill
fadd d0, d4, d0
fmul d4, d22, d1
str d4, [sp, #7408] ; 8-byte Folded Spill
fadd d0, d4, d0
mov x8, #62994
movk x8, #14722, lsl #16
movk x8, #41829, lsl #32
movk x8, #16247, lsl #48
fmov d1, x8
str d1, [sp, #8168] ; 8-byte Folded Spill
fmul d6, d25, d1
fadd d0, d0, d6
ldr d6, [sp, #10776] ; 8-byte Folded Reload
fmul d4, d6, d28
str d4, [sp, #7392] ; 8-byte Folded Spill
fadd d0, d4, d0
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #16194, lsl #48
fmov d6, x8
fmul d4, d29, d6
str d4, [sp, #7384] ; 8-byte Folded Spill
fsub d16, d0, d4
ldur d7, [x29, #-208] ; 8-byte Folded Reload
fmul d0, d7, d16
fadd d0, d0, d17
ldr d4, [sp, #11600] ; 8-byte Folded Reload
fmul d3, d4, d14
fsub d0, d0, d3
fmul d3, d4, d11
str d3, [sp, #7344] ; 8-byte Folded Spill
fsub d0, d0, d3
ldr d1, [sp, #10192] ; 8-byte Folded Reload
fmul d3, d8, d1
fadd d0, d3, d0
ldr d3, [sp, #8584] ; 8-byte Folded Reload
fmul d3, d3, d24
ldr d6, [sp, #8576] ; 8-byte Folded Reload
fmul d4, d6, d2
str d4, [sp, #4208] ; 8-byte Folded Spill
fmul d6, d2, d4
fsub d3, d3, d6
mov x8, #50080
movk x8, #49599, lsl #16
movk x8, #32579, lsl #32
movk x8, #49136, lsl #48
fmov d6, x8
ldr d5, [sp, #10152] ; 8-byte Folded Reload
fmul d4, d5, d6
fmov d22, d6
str d6, [sp, #6896] ; 8-byte Folded Spill
str d4, [sp, #7360] ; 8-byte Folded Spill
fadd d3, d3, d4
mov x8, #45974
movk x8, #34787, lsl #16
movk x8, #35902, lsl #32
movk x8, #16285, lsl #48
fmov d1, x8
str d1, [sp, #8160] ; 8-byte Folded Spill
fmul d6, d3, d1
fadd d0, d6, d0
mov x8, #4363988038922010624
fmov d4, x8
fmul d6, d0, d4
fmov d23, d4
str d4, [sp, #6440] ; 8-byte Folded Spill
fadd d6, d20, d6
mov x8, #36544
movk x8, #43611, lsl #16
movk x8, #860, lsl #32
movk x8, #16326, lsl #48
fmov d1, x8
str d1, [sp, #7992] ; 8-byte Folded Spill
fmul d4, d19, d1
fmul d1, d27, d1
fsub d1, d4, d1
str d31, [sp, #1840] ; 8-byte Folded Spill
fmul d4, d7, d31
fadd d1, d4, d1
str d16, [sp, #1832] ; 8-byte Folded Spill
fmul d4, d26, d16
fsub d1, d1, d4
ldr d7, [sp, #11464] ; 8-byte Folded Reload
fmul d4, d7, d14
fsub d1, d1, d4
fmul d4, d7, d11
str d4, [sp, #7336] ; 8-byte Folded Spill
fsub d1, d1, d4
ldr d4, [sp, #10184] ; 8-byte Folded Reload
fmul d4, d8, d4
fadd d1, d4, d1
mov x8, #43516
movk x8, #54001, lsl #16
movk x8, #25165, lsl #32
movk x8, #16240, lsl #48
fmov d4, x8
str d4, [sp, #7984] ; 8-byte Folded Spill
fmul d4, d21, d4
fadd d1, d4, d1
mov x8, #49235
movk x8, #28989, lsl #16
movk x8, #40841, lsl #32
movk x8, #16312, lsl #48
fmov d4, x8
str d4, [sp, #7976] ; 8-byte Folded Spill
fmul d3, d3, d4
fadd d1, d3, d1
ldr d3, [sp, #9624] ; 8-byte Folded Reload
fmul d3, d3, d24
fsub d3, d5, d3
mov x8, #50080
movk x8, #49599, lsl #16
movk x8, #32579, lsl #32
movk x8, #16368, lsl #48
fmov d4, x8
str d4, [sp, #6992] ; 8-byte Folded Spill
fmul d3, d3, d4
ldr d4, [sp, #9648] ; 8-byte Folded Reload
fmul d4, d4, d2
str d4, [sp, #9960] ; 8-byte Folded Spill
fmul d4, d2, d4
fmul d4, d4, d22
fadd d3, d3, d4
fadd d3, d11, d3
str d10, [sp, #7592] ; 8-byte Folded Spill
fadd d17, d3, d10
ldr d3, [sp, #9480] ; 8-byte Folded Reload
fmul d3, d3, d24
fadd d3, d3, d17
ldr d4, [sp, #6112] ; 8-byte Folded Reload
ldr d5, [sp, #10160] ; 8-byte Folded Reload
fadd d5, d5, d4
ldr d4, [sp, #9472] ; 8-byte Folded Reload
fmul d4, d4, d2
fadd d4, d4, d5
fmov d19, d5
str d4, [sp, #4200] ; 8-byte Folded Spill
fmul d4, d2, d4
fadd d3, d4, d3
fadd d4, d1, d6
mov x8, #62612
movk x8, #18904, lsl #16
movk x8, #1144, lsl #32
movk x8, #16296, lsl #48
fmov d2, x8
str d2, [sp, #6800] ; 8-byte Folded Spill
fmul d6, d3, d2
fadd d7, d4, d6
mov x8, #4354980839667269632
fmov d2, x8
str d2, [sp, #6784] ; 8-byte Folded Spill
ldr d4, [sp, #10200] ; 8-byte Folded Reload
fmul d4, d4, d2
mov x8, #47272
movk x8, #56762, lsl #16
movk x8, #43178, lsl #32
movk x8, #49060, lsl #48
fmov d2, x8
str d2, [sp, #6768] ; 8-byte Folded Spill
fmul d6, d18, d2
fadd d4, d4, d6
fadd d0, d0, d4
fmul d1, d1, d23
fadd d0, d0, d1
mov x8, #47272
movk x8, #56762, lsl #16
movk x8, #43178, lsl #32
movk x8, #16292, lsl #48
fmov d1, x8
str d1, [sp, #6736] ; 8-byte Folded Spill
fmul d1, d3, d1
fadd d16, d0, d1
ldr q3, [sp, #12000] ; 16-byte Folded Reload
fmul d0, d3, d7
ldr q4, [sp, #11840] ; 16-byte Folded Reload
fmul d1, d4, d16
fsub d2, d0, d1
mov x8, #11201
movk x8, #50599, lsl #16
movk x8, #31589, lsl #32
movk x8, #16242, lsl #48
fmov d1, x8
fmul d0, d18, d1
fmov d20, d1
str d1, [sp, #6432] ; 8-byte Folded Spill
ldr q6, [sp, #12240] ; 16-byte Folded Reload
fmul d1, d6, d2
fsub d0, d1, d0
mov x8, #43115
movk x8, #62349, lsl #16
movk x8, #30721, lsl #32
movk x8, #16347, lsl #48
fmov d5, x8
fmul d1, d18, d5
fmov d21, d5
str d5, [sp, #6472] ; 8-byte Folded Spill
str q16, [sp, #5648] ; 16-byte Folded Spill
fmul d3, d3, d16
fsub d1, d3, d1
str q7, [sp, #5664] ; 16-byte Folded Spill
fmul d3, d4, d7
fadd d1, d3, d1
mov x8, #6432
movk x8, #24166, lsl #16
movk x8, #7623, lsl #32
movk x8, #16309, lsl #48
fmov d3, x8
str d3, [sp, #6512] ; 8-byte Folded Spill
ldr d4, [sp, #10168] ; 8-byte Folded Reload
fmul d3, d4, d3
str d17, [sp, #7328] ; 8-byte Folded Spill
fadd d5, d3, d17
ldr d3, [sp, #8536] ; 8-byte Folded Reload
ldr d7, [sp, #11584] ; 8-byte Folded Reload
fmul d3, d3, d7
fadd d3, d3, d5
ldr d4, [sp, #8544] ; 8-byte Folded Reload
fmul d4, d4, d15
str d19, [sp, #4968] ; 8-byte Folded Spill
fadd d4, d4, d19
str d4, [sp, #4192] ; 8-byte Folded Spill
fmul d4, d15, d4
fadd d3, d4, d3
fmul d3, d3, d21
fadd d1, d1, d3
ldr d3, [sp, #8552] ; 8-byte Folded Reload
fmul d3, d3, d7
fmov d17, d7
ldr d4, [sp, #8528] ; 8-byte Folded Reload
fmul d4, d4, d15
str d4, [sp, #4184] ; 8-byte Folded Spill
fmul d4, d15, d4
fsub d3, d3, d4
mov x8, #5915
movk x8, #64709, lsl #16
movk x8, #30489, lsl #32
movk x8, #49160, lsl #48
fmov d4, x8
str d4, [sp, #6504] ; 8-byte Folded Spill
ldr d16, [sp, #10208] ; 8-byte Folded Reload
fmul d4, d16, d4
str d4, [sp, #6448] ; 8-byte Folded Spill
fadd d3, d3, d4
mov x8, #2356
movk x8, #12413, lsl #16
movk x8, #55910, lsl #32
movk x8, #16327, lsl #48
fmov d4, x8
str d4, [sp, #6496] ; 8-byte Folded Spill
fmul d3, d3, d4
fsub d7, d1, d3
ldr q4, [sp, #11824] ; 16-byte Folded Reload
fmul d1, d4, d7
fsub d0, d0, d1
ldr d1, [sp, #10696] ; 8-byte Folded Reload
fmul d1, d1, d17
fsub d1, d16, d1
mov x8, #5915
movk x8, #64709, lsl #16
movk x8, #30489, lsl #32
movk x8, #16392, lsl #48
fmov d16, x8
str d16, [sp, #6488] ; 8-byte Folded Spill
fmul d1, d1, d16
ldr d3, [sp, #10704] ; 8-byte Folded Reload
fmul d3, d3, d15
str d3, [sp, #8264] ; 8-byte Folded Spill
fmul d3, d15, d3
fmul d3, d3, d16
fadd d1, d3, d1
str d5, [sp, #6456] ; 8-byte Folded Spill
fadd d3, d1, d5
ldr d1, [sp, #9464] ; 8-byte Folded Reload
ldr d17, [sp, #11784] ; 8-byte Folded Reload
fmul d1, d1, d17
str d3, [sp, #8176] ; 8-byte Folded Spill
fadd d1, d1, d3
ldr d3, [sp, #9456] ; 8-byte Folded Reload
ldr d5, [sp, #12208] ; 8-byte Folded Reload
fmul d3, d3, d5
fadd d3, d3, d19
str d3, [sp, #4176] ; 8-byte Folded Spill
fmul d3, d5, d3
fadd d1, d3, d1
fmul d3, d1, d20
fadd d0, d0, d3
str q0, [sp, #5056] ; 16-byte Folded Spill
mov x8, #52090
movk x8, #42545, lsl #16
movk x8, #26349, lsl #32
movk x8, #16345, lsl #48
fmov d16, x8
str d16, [sp, #6464] ; 8-byte Folded Spill
str d18, [sp, #5016] ; 8-byte Folded Spill
fmul d0, d18, d16
str q2, [sp, #1680] ; 16-byte Folded Spill
fmul d3, d4, d2
fsub d0, d3, d0
str q7, [sp, #1664] ; 16-byte Folded Spill
fmul d3, d6, d7
fadd d0, d0, d3
fmul d1, d1, d16
fadd d0, d0, d1
fmov d2, d17
ldr d1, [sp, #11064] ; 8-byte Folded Reload
fmul d1, d1, d17
ldr d2, [sp, #11056] ; 8-byte Folded Reload
fmul d2, d2, d5
str d2, [sp, #4168] ; 8-byte Folded Spill
fmul d3, d5, d2
fsub d1, d1, d3
mov x8, #64990
movk x8, #28266, lsl #16
movk x8, #45172, lsl #32
movk x8, #49182, lsl #48
fmov d3, x8
ldr d31, [sp, #9184] ; 8-byte Folded Reload
fmul d2, d31, d3
str d2, [sp, #8728] ; 8-byte Folded Spill
fadd d1, d1, d2
mov x8, #28530
movk x8, #30490, lsl #16
movk x8, #27495, lsl #32
movk x8, #16325, lsl #48
fmov d2, x8
str d2, [sp, #6480] ; 8-byte Folded Spill
fmul d1, d1, d2
fsub d0, d0, d1
str q0, [sp, #5024] ; 16-byte Folded Spill
ldr d0, [sp, #10856] ; 8-byte Folded Reload
ldr d1, [sp, #8848] ; 8-byte Folded Reload
fmul d0, d0, d1
ldr d1, [sp, #11240] ; 8-byte Folded Reload
fmul d0, d0, d1
ldr d1, [sp, #9104] ; 8-byte Folded Reload
fmul d1, d1, d1
fmov d22, #1.00000000
fsub d2, d22, d1
ldr d1, [sp, #10864] ; 8-byte Folded Reload
ldr d3, [sp, #8864] ; 8-byte Folded Reload
fmul d1, d1, d3
fmov d5, #0.50000000
fmul d1, d1, d5
str d2, [sp, #10200] ; 8-byte Folded Spill
fmul d1, d2, d1
ldr d23, [sp, #7224] ; 8-byte Folded Reload
fmul d1, d1, d23
ldr d2, [sp, #10176] ; 8-byte Folded Reload
fadd d2, d2, d2
ldr d4, [sp, #9112] ; 8-byte Folded Reload
fsqrt d3, d4
fmov d6, #1.50000000
fmul d3, d3, d6
str d3, [sp, #10160] ; 8-byte Folded Spill
fmul d0, d3, d0
fadd d3, d4, d4
str d3, [sp, #10152] ; 8-byte Folded Spill
fdiv d0, d0, d3
str d2, [sp, #10168] ; 8-byte Folded Spill
fmul d0, d2, d0
fsub d0, d1, d0
ldur d26, [x29, #-192] ; 8-byte Folded Reload
ldr d17, [sp, #6592] ; 8-byte Folded Reload
fmul d1, d26, d17
ldr d15, [sp, #11768] ; 8-byte Folded Reload
ldr d2, [sp, #6528] ; 8-byte Folded Reload
fmul d3, d15, d2
fadd d1, d1, d3
ldr d13, [sp, #11776] ; 8-byte Folded Reload
ldr d2, [sp, #6520] ; 8-byte Folded Reload
fmul d3, d13, d2
fadd d1, d3, d1
fadd d0, d0, d1
fmul d3, d1, d5
fmov d25, #0.50000000
fsub d29, d0, d3
str d29, [sp, #7160] ; 8-byte Folded Spill
ldr d0, [sp, #10224] ; 8-byte Folded Reload
fadd d0, d0, d0
str d0, [sp, #10400] ; 8-byte Folded Spill
ldr d2, [sp, #8672] ; 8-byte Folded Reload
fmul d0, d0, d2
ldr d2, [sp, #10832] ; 8-byte Folded Reload
ldr d3, [sp, #8840] ; 8-byte Folded Reload
fmul d3, d3, d2
fsub d28, d0, d3
ldur d7, [x29, #-248] ; 8-byte Folded Reload
ldr d19, [sp, #8712] ; 8-byte Folded Reload
fmul d0, d7, d19
ldr d20, [sp, #12232] ; 8-byte Folded Reload
fmul d3, d20, d28
fsub d10, d0, d3
ldr d27, [sp, #11968] ; 8-byte Folded Reload
fmul d0, d27, d28
ldr d2, [sp, #8376] ; 8-byte Folded Reload
fmul d3, d7, d2
fsub d16, d0, d3
fmul d0, d26, d10
ldr d24, [sp, #5912] ; 8-byte Folded Reload
fmul d2, d26, d24
str d2, [sp, #8848] ; 8-byte Folded Spill
fsub d0, d0, d2
fadd d0, d16, d0
ldur d5, [x29, #-160] ; 8-byte Folded Reload
ldr d18, [sp, #9680] ; 8-byte Folded Reload
fmul d3, d5, d18
fadd d0, d3, d0
fmul d3, d15, d10
fmul d2, d15, d24
str d2, [sp, #7184] ; 8-byte Folded Spill
fsub d3, d3, d2
ldr d4, [sp, #12144] ; 8-byte Folded Reload
fmul d4, d4, d16
fadd d3, d4, d3
ldur d14, [x29, #-232] ; 8-byte Folded Reload
fmul d4, d14, d18
fadd d3, d4, d3
ldr d2, [sp, #11072] ; 8-byte Folded Reload
fmul d4, d5, d2
str d4, [sp, #8840] ; 8-byte Folded Spill
fsub d0, d0, d4
fmul d4, d14, d2
str d4, [sp, #7136] ; 8-byte Folded Spill
fsub d4, d3, d4
str d4, [sp, #9112] ; 8-byte Folded Spill
fmul d3, d26, d0
fmul d4, d15, d4
fadd d3, d3, d4
fmul d4, d13, d10
fmul d5, d13, d24
str d5, [sp, #7096] ; 8-byte Folded Spill
fsub d4, d4, d5
ldr d5, [sp, #12152] ; 8-byte Folded Reload
str d16, [sp, #1792] ; 8-byte Folded Spill
fmul d5, d5, d16
fadd d4, d5, d4
ldr d30, [sp, #12296] ; 8-byte Folded Reload
fmul d5, d30, d18
fadd d4, d5, d4
fmul d2, d30, d2
str d2, [sp, #7072] ; 8-byte Folded Spill
fsub d2, d4, d2
str d2, [sp, #9104] ; 8-byte Folded Spill
fmul d4, d13, d2
fadd d3, d4, d3
ldr d2, [sp, #10840] ; 8-byte Folded Reload
ldr d4, [sp, #8680] ; 8-byte Folded Reload
fmul d4, d2, d4
ldr d2, [sp, #8296] ; 8-byte Folded Reload
fmul d4, d4, d2
ldr d2, [sp, #8720] ; 8-byte Folded Reload
fmul d5, d2, d2
fsub d16, d22, d5
fmov d22, #1.00000000
ldr d2, [sp, #10848] ; 8-byte Folded Reload
ldr d5, [sp, #8688] ; 8-byte Folded Reload
fmul d5, d2, d5
fmul d5, d5, d25
str d16, [sp, #10208] ; 8-byte Folded Spill
fmul d5, d16, d5
fmul d5, d5, d23
ldr d2, [sp, #8888] ; 8-byte Folded Reload
fadd d2, d2, d2
ldr d18, [sp, #8736] ; 8-byte Folded Reload
fsqrt d25, d18
fmul d16, d25, d6
fmov d6, #1.50000000
str d16, [sp, #10184] ; 8-byte Folded Spill
fmul d4, d16, d4
fadd d16, d18, d18
str d16, [sp, #10176] ; 8-byte Folded Spill
fdiv d4, d4, d16
str d2, [sp, #10192] ; 8-byte Folded Spill
fmul d4, d2, d4
fsub d4, d5, d4
fsub d1, d29, d1
fsub d1, d1, d3
fadd d4, d4, d3
fmov d5, #0.50000000
fmul d3, d3, d5
fsub d3, d4, d3
fadd d18, d3, d1
str d18, [sp, #8688] ; 8-byte Folded Spill
ldr d1, [sp, #9328] ; 8-byte Folded Reload
fmul d1, d26, d1
ldr d3, [sp, #11688] ; 8-byte Folded Reload
ldr d2, [sp, #9232] ; 8-byte Folded Reload
fmul d3, d3, d2
fadd d1, d1, d3
ldr d3, [sp, #11632] ; 8-byte Folded Reload
ldr d2, [sp, #9224] ; 8-byte Folded Reload
fmul d3, d3, d2
fadd d11, d3, d1
ldr d1, [sp, #8352] ; 8-byte Folded Reload
fmul d1, d1, d1
fsub d2, d22, d1
ldr d1, [sp, #10136] ; 8-byte Folded Reload
ldr d3, [sp, #8312] ; 8-byte Folded Reload
fmul d1, d1, d3
fmul d1, d1, d5
fmov d16, #0.50000000
str d2, [sp, #9680] ; 8-byte Folded Spill
fmul d1, d2, d1
fmul d1, d1, d23
ldr d5, [sp, #8360] ; 8-byte Folded Reload
fsqrt d2, d5
fmul d3, d2, d6
ldr d2, [sp, #10144] ; 8-byte Folded Reload
ldr d4, [sp, #8344] ; 8-byte Folded Reload
fmul d2, d2, d4
ldr d4, [sp, #9120] ; 8-byte Folded Reload
fmul d2, d2, d4
ldr d4, [sp, #8368] ; 8-byte Folded Reload
fadd d4, d4, d4
str d3, [sp, #9672] ; 8-byte Folded Spill
fmul d2, d3, d2
fadd d3, d5, d5
str d3, [sp, #9656] ; 8-byte Folded Spill
fdiv d2, d2, d3
str d4, [sp, #9664] ; 8-byte Folded Spill
fmul d2, d4, d2
fsub d1, d1, d2
fadd d1, d1, d11
fmul d2, d11, d16
fsub d1, d1, d2
str d1, [sp, #11240] ; 8-byte Folded Spill
ldr d2, [sp, #11728] ; 8-byte Folded Reload
ldr d1, [sp, #9864] ; 8-byte Folded Reload
fmul d1, d2, d1
ldr d8, [sp, #9152] ; 8-byte Folded Reload
fmul d2, d2, d8
str d2, [sp, #7216] ; 8-byte Folded Spill
fsub d1, d1, d2
ldr d2, [sp, #11360] ; 8-byte Folded Reload
fmul d2, d2, d17
fadd d1, d1, d2
ldr d3, [sp, #11560] ; 8-byte Folded Reload
fmul d2, d3, d10
fadd d1, d2, d1
fmul d2, d3, d24
str d2, [sp, #7200] ; 8-byte Folded Spill
fsub d1, d1, d2
ldr d2, [sp, #11272] ; 8-byte Folded Reload
fmul d0, d2, d0
fadd d0, d0, d1
ldr d29, [sp, #7168] ; 8-byte Folded Reload
fmul d1, d18, d29
fsub d0, d0, d1
ldr d22, [sp, #11920] ; 8-byte Folded Reload
ldr d18, [sp, #8696] ; 8-byte Folded Reload
fmul d1, d22, d18
ldr d26, [sp, #9560] ; 8-byte Folded Reload
ldr d25, [sp, #11168] ; 8-byte Folded Reload
fmul d2, d26, d25
fsub d1, d1, d2
ldr d2, [sp, #9568] ; 8-byte Folded Reload
fmul d17, d2, d20
ldr d6, [sp, #11512] ; 8-byte Folded Reload
fmul d2, d6, d17
fadd d1, d2, d1
ldr d2, [sp, #12288] ; 8-byte Folded Reload
ldr d3, [sp, #9872] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d12, [sp, #6088] ; 8-byte Folded Reload
ldr d2, [sp, #11720] ; 8-byte Folded Reload
fmul d2, d2, d12
fadd d1, d1, d2
ldr d2, [sp, #12168] ; 8-byte Folded Reload
fmul d2, d2, d19
fsub d1, d1, d2
ldr d2, [sp, #11696] ; 8-byte Folded Reload
fmul d2, d2, d28
fadd d1, d2, d1
ldr d2, [sp, #7896] ; 8-byte Folded Reload
fmul d2, d2, d7
ldr d4, [sp, #9128] ; 8-byte Folded Reload
fmul d3, d4, d27
fadd d2, d2, d3
ldr d3, [sp, #9576] ; 8-byte Folded Reload
fmul d3, d3, d20
fadd d2, d3, d2
str d2, [sp, #6056] ; 8-byte Folded Spill
ldr d19, [sp, #7192] ; 8-byte Folded Reload
fmul d2, d2, d19
str d2, [sp, #8864] ; 8-byte Folded Spill
fadd d1, d2, d1
ldr d2, [sp, #9368] ; 8-byte Folded Reload
fmul d3, d4, d2
str d3, [sp, #7224] ; 8-byte Folded Spill
fadd d1, d3, d1
ldr d3, [sp, #9360] ; 8-byte Folded Reload
ldr d4, [sp, #8000] ; 8-byte Folded Reload
fmul d4, d4, d3
str d4, [sp, #7208] ; 8-byte Folded Spill
fadd d1, d4, d1
str d1, [sp, #1800] ; 8-byte Folded Spill
ldur d16, [x29, #-240] ; 8-byte Folded Reload
fmul d1, d16, d1
fadd d9, d1, d0
fmul d1, d27, d3
fmul d0, d7, d2
str d0, [sp, #1712] ; 8-byte Folded Spill
fsub d23, d1, d0
fmul d2, d15, d23
ldr d4, [sp, #5152] ; 8-byte Folded Reload
fmul d3, d14, d4
fadd d5, d2, d3
str d5, [sp, #8680] ; 8-byte Folded Spill
fmul d2, d13, d23
fmul d4, d30, d4
fadd d21, d2, d4
fmul d4, d7, d18
ldr d2, [sp, #11312] ; 8-byte Folded Reload
fmul d26, d26, d2
fsub d4, d26, d4
str d17, [sp, #5096] ; 8-byte Folded Spill
fmul d26, d20, d17
fsub d4, d4, d26
ldr d26, [sp, #12080] ; 8-byte Folded Reload
ldr d0, [sp, #11640] ; 8-byte Folded Reload
fmul d26, d26, d0
fmov d0, d31
fsub d26, d31, d26
ldr d17, [sp, #11016] ; 8-byte Folded Reload
ldr d31, [sp, #12072] ; 8-byte Folded Reload
fmul d30, d31, d17
fsub d0, d26, d30
str d0, [sp, #9120] ; 8-byte Folded Spill
ldr d26, [sp, #12112] ; 8-byte Folded Reload
ldr d1, [sp, #11520] ; 8-byte Folded Reload
fmul d26, d26, d1
fsub d26, d0, d26
ldr d30, [sp, #12048] ; 8-byte Folded Reload
ldr d0, [sp, #10984] ; 8-byte Folded Reload
fmul d14, d30, d0
fsub d0, d26, d14
str d0, [sp, #8736] ; 8-byte Folded Spill
ldur d13, [x29, #-224] ; 8-byte Folded Reload
ldr d18, [sp, #11504] ; 8-byte Folded Reload
fmul d14, d13, d18
fsub d14, d0, d14
ldr d17, [sp, #11008] ; 8-byte Folded Reload
ldr d3, [sp, #11624] ; 8-byte Folded Reload
fmul d15, d3, d17
fsub d0, d14, d15
str d0, [sp, #8720] ; 8-byte Folded Spill
ldr d13, [sp, #12264] ; 8-byte Folded Reload
fmul d15, d13, d18
fsub d15, d0, d15
ldr d17, [sp, #11000] ; 8-byte Folded Reload
fmul d13, d3, d17
fsub d0, d15, d13
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #48937, lsl #48
fmov d13, x8
fmul d1, d5, d13
str d1, [sp, #7024] ; 8-byte Folded Spill
fadd d1, d1, d9
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #48978, lsl #48
fmov d13, x8
fmul d17, d21, d13
fmov d14, d21
str d17, [sp, #6984] ; 8-byte Folded Spill
fadd d13, d17, d1
fmul d1, d0, d19
str d1, [sp, #8888] ; 8-byte Folded Spill
fadd d19, d4, d1
ldr d1, [sp, #9176] ; 8-byte Folded Reload
fmul d4, d19, d1
fadd d9, d13, d4
ldr d4, [sp, #11928] ; 8-byte Folded Reload
fmul d4, d4, d2
ldr d17, [sp, #10096] ; 8-byte Folded Reload
fmul d13, d6, d17
fsub d4, d4, d13
fmul d13, d22, d20
fmul d15, d6, d7
fsub d5, d13, d15
fmul d13, d27, d25
str d5, [sp, #9872] ; 8-byte Folded Spill
fmul d15, d20, d5
fsub d13, d13, d15
str d0, [sp, #5128] ; 8-byte Folded Spill
fsub d4, d0, d4
ldr d0, [sp, #8256] ; 8-byte Folded Reload
fmul d4, d4, d0
fmul d27, d13, d0
fadd d4, d27, d4
fadd d27, d24, d8
fadd d26, d27, d4
ldr d4, [sp, #9432] ; 8-byte Folded Reload
fmul d4, d4, d18
fadd d27, d4, d26
str d28, [sp, #4160] ; 8-byte Folded Spill
fadd d0, d12, d28
str d0, [sp, #9176] ; 8-byte Folded Spill
ldr d5, [sp, #9424] ; 8-byte Folded Reload
fmul d13, d5, d3
fadd d0, d13, d0
str d0, [sp, #4136] ; 8-byte Folded Spill
fmul d13, d3, d0
fadd d27, d13, d27
fmul d12, d27, d29
fadd d9, d9, d12
ldr d5, [sp, #6392] ; 8-byte Folded Reload
ldr d0, [sp, #11424] ; 8-byte Folded Reload
fmul d12, d5, d0
fadd d9, d12, d9
ldr d5, [sp, #6384] ; 8-byte Folded Reload
fmul d0, d5, d16
str d0, [sp, #4128] ; 8-byte Folded Spill
fmul d12, d3, d0
fsub d9, d9, d12
ldr d0, [sp, #11320] ; 8-byte Folded Reload
ldr d21, [sp, #8592] ; 8-byte Folded Reload
fmul d12, d0, d21
fsub d9, d9, d12
ldr d28, [sp, #8608] ; 8-byte Folded Reload
fmul d0, d0, d28
str d0, [sp, #6976] ; 8-byte Folded Spill
fsub d9, d9, d0
ldr d22, [sp, #10872] ; 8-byte Folded Reload
ldr d0, [sp, #9328] ; 8-byte Folded Reload
fmul d12, d22, d0
fadd d2, d12, d9
str d2, [sp, #9328] ; 8-byte Folded Spill
ldr d1, [sp, #8688] ; 8-byte Folded Reload
fsub d25, d1, d11
ldr d0, [sp, #11240] ; 8-byte Folded Reload
fadd d20, d25, d0
ldr d0, [sp, #8248] ; 8-byte Folded Reload
fmul d25, d2, d0
ldr d0, [sp, #8240] ; 8-byte Folded Reload
fmul d12, d20, d0
fadd d0, d25, d12
str d0, [sp, #8712] ; 8-byte Folded Spill
ldr d0, [sp, #8232] ; 8-byte Folded Reload
fmul d12, d1, d0
fmov d25, d1
ldr d0, [sp, #8224] ; 8-byte Folded Reload
fmul d13, d27, d0
fadd d12, d12, d13
ldr d5, [sp, #12096] ; 8-byte Folded Reload
ldr d4, [sp, #9864] ; 8-byte Folded Reload
fmul d13, d5, d4
fmul d0, d5, d8
str d0, [sp, #7168] ; 8-byte Folded Spill
fsub d13, d13, d0
ldr d0, [sp, #6520] ; 8-byte Folded Reload
ldr d6, [sp, #11360] ; 8-byte Folded Reload
fmul d15, d6, d0
fadd d13, d13, d15
ldur d9, [x29, #-256] ; 8-byte Folded Reload
ldr d0, [sp, #8216] ; 8-byte Folded Reload
ldr d1, [sp, #7160] ; 8-byte Folded Reload
fmul d29, d1, d0
fadd d29, d13, d29
ldr d5, [sp, #12088] ; 8-byte Folded Reload
fmul d13, d5, d10
fadd d29, d13, d29
ldr d13, [sp, #12208] ; 8-byte Folded Reload
fmul d0, d5, d24
str d0, [sp, #7192] ; 8-byte Folded Spill
fsub d29, d29, d0
ldr d0, [sp, #9104] ; 8-byte Folded Reload
ldr d2, [sp, #11272] ; 8-byte Folded Reload
fmul d11, d2, d0
fadd d29, d11, d29
ldr d0, [sp, #9136] ; 8-byte Folded Reload
fmul d0, d0, d23
str d0, [sp, #7160] ; 8-byte Folded Spill
fadd d29, d0, d29
ldr d1, [sp, #7048] ; 8-byte Folded Reload
ldr d15, [sp, #8680] ; 8-byte Folded Reload
fmul d0, d15, d1
str d0, [sp, #7120] ; 8-byte Folded Spill
fadd d29, d0, d29
ldr d0, [sp, #8208] ; 8-byte Folded Reload
fmul d0, d14, d0
str d0, [sp, #7088] ; 8-byte Folded Spill
fadd d29, d0, d29
ldr d0, [sp, #8200] ; 8-byte Folded Reload
fmul d11, d19, d0
fadd d29, d29, d11
ldr d5, [sp, #10760] ; 8-byte Folded Reload
ldr d7, [sp, #5144] ; 8-byte Folded Reload
fmul d0, d5, d7
str d0, [sp, #7104] ; 8-byte Folded Spill
fadd d29, d0, d29
mov x8, #61406
movk x8, #16023, lsl #16
movk x8, #30452, lsl #32
movk x8, #48921, lsl #48
fmov d11, x8
ldr d16, [sp, #5136] ; 8-byte Folded Reload
fmul d0, d16, d11
str d0, [sp, #7056] ; 8-byte Folded Spill
fadd d17, d0, d29
ldr d5, [sp, #12312] ; 8-byte Folded Reload
fmul d29, d5, d17
fadd d29, d29, d12
ldr d12, [sp, #11880] ; 8-byte Folded Reload
fmul d11, d12, d4
fmul d0, d12, d8
str d0, [sp, #7040] ; 8-byte Folded Spill
fsub d11, d11, d0
ldr d0, [sp, #6528] ; 8-byte Folded Reload
fmul d12, d6, d0
fadd d11, d11, d12
ldr d6, [sp, #11872] ; 8-byte Folded Reload
fmul d10, d6, d10
fadd d10, d10, d11
fmul d0, d6, d24
str d0, [sp, #7016] ; 8-byte Folded Spill
fsub d10, d10, d0
ldr d0, [sp, #9112] ; 8-byte Folded Reload
fmul d8, d2, d0
fadd d8, d8, d10
ldr d0, [sp, #11160] ; 8-byte Folded Reload
fmul d2, d15, d0
fmul d6, d14, d1
ldr d0, [sp, #8168] ; 8-byte Folded Reload
fmul d0, d19, d0
ldr d1, [sp, #9144] ; 8-byte Folded Reload
fmul d1, d1, d23
str d1, [sp, #6848] ; 8-byte Folded Spill
fadd d1, d1, d8
str d2, [sp, #6960] ; 8-byte Folded Spill
fadd d1, d2, d1
str d6, [sp, #6928] ; 8-byte Folded Spill
fadd d1, d6, d1
fadd d0, d1, d0
ldr d1, [sp, #10752] ; 8-byte Folded Reload
fmul d1, d1, d7
str d1, [sp, #6944] ; 8-byte Folded Spill
fadd d0, d1, d0
mov x8, #64744
movk x8, #21380, lsl #16
movk x8, #23316, lsl #32
movk x8, #48962, lsl #48
fmov d1, x8
fmul d1, d16, d1
str d1, [sp, #6880] ; 8-byte Folded Spill
fsub d6, d0, d1
ldr d8, [sp, #12304] ; 8-byte Folded Reload
fmul d0, d8, d6
fadd d0, d0, d29
ldr d2, [sp, #11528] ; 8-byte Folded Reload
fmul d1, d2, d21
fsub d0, d0, d1
fmul d1, d2, d28
str d1, [sp, #6864] ; 8-byte Folded Spill
fsub d0, d0, d1
ldr d1, [sp, #9232] ; 8-byte Folded Reload
fmul d1, d22, d1
fadd d0, d1, d0
ldr d1, [sp, #8504] ; 8-byte Folded Reload
fmul d1, d1, d18
ldr d2, [sp, #8496] ; 8-byte Folded Reload
fmul d2, d2, d3
str d2, [sp, #4112] ; 8-byte Folded Spill
fmul d2, d3, d2
fsub d1, d1, d2
ldr d19, [sp, #6896] ; 8-byte Folded Reload
ldr d29, [sp, #8720] ; 8-byte Folded Reload
fmul d2, d29, d19
str d2, [sp, #6912] ; 8-byte Folded Spill
fadd d1, d1, d2
ldr d2, [sp, #8160] ; 8-byte Folded Reload
fmul d2, d1, d2
fadd d2, d2, d0
ldr d16, [sp, #6440] ; 8-byte Folded Reload
fmul d0, d2, d16
ldr d4, [sp, #8712] ; 8-byte Folded Reload
fadd d7, d4, d0
ldr d23, [sp, #7992] ; 8-byte Folded Reload
fmul d0, d25, d23
fmul d24, d27, d23
fsub d0, d0, d24
str d17, [sp, #1704] ; 8-byte Folded Spill
fmul d24, d8, d17
fadd d0, d24, d0
str d6, [sp, #1696] ; 8-byte Folded Spill
fmul d24, d5, d6
fsub d0, d0, d24
ldr d5, [sp, #11432] ; 8-byte Folded Reload
fmul d24, d5, d21
fsub d0, d0, d24
fmul d5, d5, d28
str d5, [sp, #7048] ; 8-byte Folded Spill
fsub d0, d0, d5
ldr d4, [sp, #9224] ; 8-byte Folded Reload
fmul d24, d22, d4
fadd d0, d24, d0
ldr d4, [sp, #7984] ; 8-byte Folded Reload
ldr d5, [sp, #11240] ; 8-byte Folded Reload
fmul d5, d5, d4
fadd d0, d5, d0
ldr d4, [sp, #7976] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d1, d1, d0
ldr d0, [sp, #11288] ; 8-byte Folded Reload
fmul d0, d0, d18
fsub d0, d29, d0
ldr d4, [sp, #6992] ; 8-byte Folded Reload
fmul d0, d0, d4
ldr d5, [sp, #11048] ; 8-byte Folded Reload
fmul d5, d5, d3
str d5, [sp, #9864] ; 8-byte Folded Spill
fmul d5, d3, d5
fmul d5, d5, d19
fadd d0, d0, d5
fadd d0, d28, d0
str d26, [sp, #6896] ; 8-byte Folded Spill
fadd d17, d0, d26
ldr d0, [sp, #9400] ; 8-byte Folded Reload
fmul d0, d0, d18
fadd d5, d0, d17
ldr d0, [sp, #6064] ; 8-byte Folded Reload
ldr d4, [sp, #9176] ; 8-byte Folded Reload
fadd d0, d4, d0
ldr d4, [sp, #9392] ; 8-byte Folded Reload
fmul d4, d4, d3
fadd d4, d4, d0
str d4, [sp, #4120] ; 8-byte Folded Spill
fmul d4, d3, d4
fadd d4, d4, d5
fadd d3, d1, d7
ldr d5, [sp, #6800] ; 8-byte Folded Reload
fmul d5, d4, d5
fadd d6, d3, d5
ldr d3, [sp, #6784] ; 8-byte Folded Reload
ldr d5, [sp, #9328] ; 8-byte Folded Reload
fmul d3, d5, d3
ldr d5, [sp, #6768] ; 8-byte Folded Reload
fmul d5, d20, d5
fsub d3, d5, d3
fadd d2, d2, d3
fmul d1, d1, d16
fadd d1, d2, d1
ldr d2, [sp, #6736] ; 8-byte Folded Reload
fmul d2, d4, d2
fadd d7, d1, d2
ldr q3, [sp, #11984] ; 16-byte Folded Reload
fmul d1, d3, d6
ldr q4, [sp, #11808] ; 16-byte Folded Reload
fmul d2, d4, d7
fsub d16, d1, d2
ldr d18, [sp, #6432] ; 8-byte Folded Reload
fmul d1, d20, d18
ldr q5, [sp, #11952] ; 16-byte Folded Reload
fmul d2, d5, d16
fsub d1, d2, d1
ldr d19, [sp, #6472] ; 8-byte Folded Reload
fmul d2, d20, d19
str q7, [sp, #5616] ; 16-byte Folded Spill
fmul d3, d3, d7
fsub d2, d3, d2
str q6, [sp, #5632] ; 16-byte Folded Spill
fmul d3, d4, d6
fadd d2, d3, d2
ldr d3, [sp, #6512] ; 8-byte Folded Reload
ldr d4, [sp, #8736] ; 8-byte Folded Reload
fmul d3, d4, d3
str d17, [sp, #6992] ; 8-byte Folded Spill
fadd d7, d3, d17
ldr d3, [sp, #8464] ; 8-byte Folded Reload
ldr d6, [sp, #11520] ; 8-byte Folded Reload
fmul d3, d3, d6
fadd d3, d3, d7
ldr d4, [sp, #8448] ; 8-byte Folded Reload
fmul d4, d4, d30
fadd d4, d4, d0
str d4, [sp, #4104] ; 8-byte Folded Spill
fmul d4, d30, d4
fadd d3, d4, d3
fmul d3, d3, d19
fadd d2, d2, d3
ldr d3, [sp, #8472] ; 8-byte Folded Reload
fmul d3, d3, d6
fmov d19, d6
ldr d4, [sp, #8456] ; 8-byte Folded Reload
fmul d4, d4, d30
str d4, [sp, #4072] ; 8-byte Folded Spill
fmul d4, d30, d4
fsub d3, d3, d4
ldr d4, [sp, #6504] ; 8-byte Folded Reload
ldr d17, [sp, #9120] ; 8-byte Folded Reload
fmul d4, d17, d4
str d4, [sp, #8672] ; 8-byte Folded Spill
fadd d3, d3, d4
ldr d4, [sp, #6496] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d6, d2, d3
ldr q4, [sp, #11792] ; 16-byte Folded Reload
fmul d2, d4, d6
fsub d1, d1, d2
ldr d2, [sp, #10680] ; 8-byte Folded Reload
fmul d2, d2, d19
fsub d2, d17, d2
fmov d25, d20
ldr d17, [sp, #6488] ; 8-byte Folded Reload
fmul d2, d2, d17
ldr d3, [sp, #10688] ; 8-byte Folded Reload
fmul d3, d3, d30
str d3, [sp, #8224] ; 8-byte Folded Spill
fmul d3, d30, d3
fmul d3, d3, d17
fadd d2, d3, d2
str d7, [sp, #8680] ; 8-byte Folded Spill
fadd d10, d2, d7
ldr d2, [sp, #8440] ; 8-byte Folded Reload
ldr d17, [sp, #11640] ; 8-byte Folded Reload
fmul d2, d2, d17
fadd d2, d2, d10
ldr d3, [sp, #8432] ; 8-byte Folded Reload
fmul d3, d3, d31
fadd d3, d3, d0
str d3, [sp, #4088] ; 8-byte Folded Spill
fmul d3, d31, d3
fadd d2, d3, d2
fmul d3, d2, d18
fadd d12, d1, d3
ldr d7, [sp, #6464] ; 8-byte Folded Reload
fmul d1, d20, d7
str q16, [sp, #1568] ; 16-byte Folded Spill
fmul d3, d4, d16
fsub d1, d3, d1
str q6, [sp, #1552] ; 16-byte Folded Spill
fmul d3, d5, d6
fadd d1, d1, d3
fmul d2, d2, d7
fadd d1, d1, d2
fmov d3, d17
ldr d2, [sp, #11064] ; 8-byte Folded Reload
fmul d2, d2, d17
ldr d3, [sp, #11056] ; 8-byte Folded Reload
fmul d3, d3, d31
str d3, [sp, #4056] ; 8-byte Folded Spill
fmul d3, d31, d3
fsub d2, d2, d3
ldr d3, [sp, #8728] ; 8-byte Folded Reload
fadd d2, d2, d3
ldr d3, [sp, #6480] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
str q1, [sp, #4544] ; 16-byte Folded Spill
ldr x8, [x19]
ldr d1, [sp, #6536] ; 8-byte Folded Reload
ldr d2, [sp, #8664] ; 8-byte Folded Reload
fsub d27, d1, d2
ldr d1, [sp, #5088] ; 8-byte Folded Reload
ldr d2, [sp, #8304] ; 8-byte Folded Reload
fsub d28, d1, d2
ldr d17, [sp, #6840] ; 8-byte Folded Reload
fmul d1, d9, d17
ldr d2, [sp, #4968] ; 8-byte Folded Reload
fadd d16, d1, d2
ldr d22, [sp, #6816] ; 8-byte Folded Reload
fmul d1, d9, d22
str d1, [sp, #1464] ; 8-byte Folded Spill
fadd d11, d1, d0
ldr d18, [sp, #8768] ; 8-byte Folded Reload
fmul d4, d18, d9
ldr d20, [sp, #9448] ; 8-byte Folded Reload
fmul d23, d20, d13
ldr d19, [sp, #6824] ; 8-byte Folded Reload
fmul d24, d19, d9
ldr d14, [sp, #6640] ; 8-byte Folded Reload
fmul d0, d14, d9
str d0, [sp, #4480] ; 8-byte Folded Spill
ldr d0, [sp, #8744] ; 8-byte Folded Reload
fmul d26, d0, d31
ldr d6, [sp, #6648] ; 8-byte Folded Reload
fmul d0, d6, d9
str d0, [sp, #4488] ; 8-byte Folded Spill
ldr d0, [sp, #8760] ; 8-byte Folded Reload
fmul d29, d0, d9
ldr d2, [sp, #8752] ; 8-byte Folded Reload
fmul d8, d2, d9
ldr d5, [sp, #6832] ; 8-byte Folded Reload
fmul d15, d5, d9
ldr d3, [sp, #9440] ; 8-byte Folded Reload
fmul d1, d3, d13
ldr d7, [sp, #8424] ; 8-byte Folded Reload
fmul d21, d7, d31
ldr d30, [sp, #11392] ; 8-byte Folded Reload
fadd d13, d30, d30
ldr d30, [sp, #11400] ; 8-byte Folded Reload
fadd d30, d30, d30
str d30, [sp, #11240] ; 8-byte Folded Spill
str q12, [sp, #4560] ; 16-byte Folded Spill
str d11, [sp, #5608] ; 8-byte Folded Spill
str d4, [sp, #4536] ; 8-byte Folded Spill
str d26, [sp, #4152] ; 8-byte Folded Spill
str d23, [sp, #4144] ; 8-byte Folded Spill
str d21, [sp, #6488] ; 8-byte Folded Spill
str d1, [sp, #4824] ; 8-byte Folded Spill
str d16, [sp, #4816] ; 8-byte Folded Spill
str d27, [sp, #4080] ; 8-byte Folded Spill
str d24, [sp, #4808] ; 8-byte Folded Spill
str d10, [sp, #8256] ; 8-byte Folded Spill
cbz x8, LBB19_38
; %bb.37:
ldr d11, [sp, #11248] ; 8-byte Folded Reload
fmul d0, d11, d0
ldr d30, [sp, #9304] ; 8-byte Folded Reload
fadd d0, d30, d0
ldr d1, [sp, #6288] ; 8-byte Folded Reload
fmul d1, d9, d1
fadd d1, d0, d1
fmul d0, d11, d2
fadd d0, d0, d1
fmul d2, d9, d4
ldr d24, [sp, #9200] ; 8-byte Folded Reload
fmul d0, d0, d24
ldr d4, [sp, #9192] ; 8-byte Folded Reload
fmul d2, d2, d4
mov x9, #4416
movk x9, #37438, lsl #16
movk x9, #20244, lsl #32
movk x9, #16497, lsl #48
ldr d12, [sp, #11784] ; 8-byte Folded Reload
fmul d3, d3, d12
fadd d0, d0, d2
fmov d2, x9
ldr d16, [sp, #9320] ; 8-byte Folded Reload
fsub d3, d16, d3
ldr d21, [sp, #12208] ; 8-byte Folded Reload
fmul d4, d21, d23
fadd d0, d0, d2
ldr d2, [sp, #4808] ; 8-byte Folded Reload
fmul d2, d9, d2
ldr d23, [sp, #9216] ; 8-byte Folded Reload
fmul d3, d3, d23
ldr d23, [sp, #9312] ; 8-byte Folded Reload
fmul d4, d4, d23
mov x9, #23440
movk x9, #2685, lsl #16
movk x9, #53080, lsl #32
movk x9, #16466, lsl #48
fsub d0, d0, d2
fmul d2, d11, d5
ldr d5, [sp, #4480] ; 8-byte Folded Reload
fmul d5, d9, d5
fmul d6, d11, d6
fadd d3, d3, d4
fmov d4, x9
ldr d10, [sp, #11640] ; 8-byte Folded Reload
fmul d7, d7, d10
mov x9, #64990
movk x9, #28266, lsl #16
movk x9, #45172, lsl #32
movk x9, #16414, lsl #48
fadd d0, d2, d0
str d0, [sp, #11160] ; 8-byte Folded Spill
mov x10, #44164
movk x10, #4969, lsl #16
movk x10, #23770, lsl #32
movk x10, #49237, lsl #48
fadd d2, d6, d5
fmul d1, d1, d24
fadd d3, d3, d4
fsub d4, d16, d7
fmov d5, x9
ldr d6, [sp, #12072] ; 8-byte Folded Reload
fmul d6, d6, d26
ldr d7, [sp, #9208] ; 8-byte Folded Reload
fmul d7, d0, d7
fmov d16, x10
fadd d0, d2, d1
str d0, [sp, #9328] ; 8-byte Folded Spill
ldr d2, [sp, #8192] ; 8-byte Folded Reload
fadd d2, d3, d2
fmul d3, d9, d27
fmul d4, d4, d5
ldr d5, [sp, #6376] ; 8-byte Folded Reload
fmul d5, d6, d5
mov x9, #23440
movk x9, #2685, lsl #16
movk x9, #53080, lsl #32
movk x9, #16466, lsl #48
fsub d6, d16, d7
ldr d7, [sp, #6424] ; 8-byte Folded Reload
fmul d7, d0, d7
fadd d2, d2, d3
fmul d3, d11, d17
mov x10, #39127
movk x10, #24179, lsl #16
movk x10, #24811, lsl #32
movk x10, #16304, lsl #48
fadd d4, d4, d5
fmov d5, x9
fsub d6, d6, d7
ldr q23, [sp, #11136] ; 16-byte Folded Reload
ldr q0, [sp, #6336] ; 16-byte Folded Reload
fmul d7, d23, d0
fadd d0, d3, d2
str d0, [sp, #9320] ; 8-byte Folded Spill
fmov d3, x10
fadd d4, d4, d5
fsub d5, d6, d7
fmul d3, d0, d3
ldr d6, [sp, #8288] ; 8-byte Folded Reload
fadd d4, d4, d6
fmul d6, d9, d28
fadd d3, d5, d3
ldr q24, [sp, #10736] ; 16-byte Folded Reload
ldr q0, [sp, #8624] ; 16-byte Folded Reload
fmul d5, d24, d0
fadd d4, d4, d6
fmul d6, d11, d22
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fadd d5, d3, d5
ldr q26, [sp, #11216] ; 16-byte Folded Reload
ldr q0, [sp, #6320] ; 16-byte Folded Reload
fmul d7, d26, d0
fadd d0, d6, d4
str d0, [sp, #9312] ; 8-byte Folded Spill
fmov d4, x9
fsub d5, d5, d7
fmul d4, d0, d4
fadd d4, d5, d4
ldr q27, [sp, #10720] ; 16-byte Folded Reload
ldr q30, [sp, #5072] ; 16-byte Folded Reload
fmul d5, d27, d30
fadd d7, d4, d5
fmul d4, d11, d14
ldr d0, [sp, #4488] ; 8-byte Folded Reload
fmul d5, d9, d0
fsub d4, d4, d5
ldr d5, [sp, #8656] ; 8-byte Folded Reload
fmul d5, d11, d5
ldr d14, [sp, #6368] ; 8-byte Folded Reload
fsub d5, d14, d5
fmul d6, d9, d29
fsub d6, d5, d6
mov x9, #47887
movk x9, #56309, lsl #16
movk x9, #15746, lsl #32
movk x9, #16444, lsl #48
fmov d17, x9
fmul d5, d6, d17
fsub d16, d4, d5
mov x9, #39915
movk x9, #11776, lsl #16
movk x9, #40689, lsl #32
movk x9, #49053, lsl #48
fmov d5, x9
fmul d4, d16, d5
fmul d18, d11, d18
fsub d6, d6, d18
fmul d6, d6, d17
fmul d18, d9, d8
fmul d17, d18, d17
fsub d6, d6, d17
fmul d17, d11, d19
fadd d6, d17, d6
fmul d17, d9, d15
fadd d17, d17, d6
mov x9, #48998
movk x9, #16808, lsl #16
movk x9, #62387, lsl #32
movk x9, #16312, lsl #48
fmov d6, x9
fmul d18, d17, d6
fadd d18, d4, d18
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d4, x9
fmov d2, d15
ldr d15, [sp, #5016] ; 8-byte Folded Reload
fmul d19, d15, d4
fsub d18, d18, d19
fmov d0, d28
fmov d28, d8
ldr q8, [sp, #5056] ; 16-byte Folded Reload
fmul d19, d24, d8
fadd d18, d18, d19
fmov d3, d13
fmov d13, d29
ldr q29, [sp, #4544] ; 16-byte Folded Reload
ldr q31, [sp, #5024] ; 16-byte Folded Reload
fmul d19, d23, d31
fsub d18, d18, d19
fmul d19, d20, d12
ldr q12, [sp, #4560] ; 16-byte Folded Reload
ldr d22, [sp, #9184] ; 8-byte Folded Reload
fsub d19, d22, d19
mov x9, #64990
movk x9, #28266, lsl #16
movk x9, #45172, lsl #32
movk x9, #16414, lsl #48
fmov d20, x9
fmul d19, d19, d20
ldr d1, [sp, #4824] ; 8-byte Folded Reload
fmul d21, d21, d1
fmul d21, d21, d20
fadd d19, d21, d19
ldr d21, [sp, #8176] ; 8-byte Folded Reload
fadd d19, d19, d21
ldr d21, [sp, #8648] ; 8-byte Folded Reload
fmul d21, d11, d21
fadd d19, d21, d19
ldr d21, [sp, #4816] ; 8-byte Folded Reload
fmul d21, d9, d21
fadd d19, d21, d19
fmul d21, d19, d4
fadd d18, d18, d21
fmul d21, d25, d4
fsub d18, d18, d21
fmul d21, d27, d12
fadd d18, d21, d18
fmul d21, d26, d29
fsub d18, d18, d21
ldr d1, [sp, #8744] ; 8-byte Folded Reload
fmul d21, d1, d10
ldr d10, [sp, #8256] ; 8-byte Folded Reload
fsub d21, d22, d21
fmul d21, d21, d20
ldr d1, [sp, #6488] ; 8-byte Folded Reload
ldr d22, [sp, #12072] ; 8-byte Folded Reload
fmul d22, d22, d1
fmul d20, d22, d20
fadd d20, d20, d21
fadd d20, d20, d10
ldr d21, [sp, #6416] ; 8-byte Folded Reload
fmul d21, d11, d21
ldr d11, [sp, #5608] ; 8-byte Folded Reload
fadd d20, d21, d20
fmul d21, d9, d11
fadd d20, d21, d20
fmul d21, d20, d4
fadd d18, d18, d21
mov x9, #43139
movk x9, #8835, lsl #16
movk x9, #28093, lsl #32
movk x9, #16419, lsl #48
fmov d21, x9
fmul d22, d14, d21
fmul d22, d22, d4
fadd d18, d22, d18
fadd d7, d7, d7
fadd d18, d18, d18
fadd d7, d7, d18
mov x9, #26610
movk x9, #29696, lsl #16
movk x9, #48971, lsl #32
movk x9, #16339, lsl #48
fmov d18, x9
fmul d16, d16, d18
mov x9, #54885
movk x9, #33778, lsl #16
movk x9, #12745, lsl #32
movk x9, #49076, lsl #48
fmov d18, x9
fmul d17, d17, d18
fadd d16, d16, d17
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d17, x9
fmul d18, d15, d17
fmov d15, d2
fsub d16, d16, d18
fmul d18, d23, d8
fmov d8, d28
fmov d28, d0
fadd d16, d16, d18
fmul d18, d24, d31
mov.16b v0, v29
fmov d29, d13
fmov d13, d3
fadd d16, d16, d18
fmul d18, d19, d17
fadd d16, d16, d18
fmul d18, d25, d17
fsub d16, d16, d18
fmul d18, d26, d12
fadd d16, d18, d16
fmul d18, d27, d0
fadd d16, d18, d16
fmul d17, d20, d17
fadd d16, d16, d17
ldr d0, [sp, #11160] ; 8-byte Folded Reload
fmul d0, d0, d6
mov x9, #16739
movk x9, #36495, lsl #16
movk x9, #1443, lsl #32
movk x9, #49184, lsl #48
fmov d6, x9
fsub d0, d6, d0
ldr d1, [sp, #9328] ; 8-byte Folded Reload
fmul d1, d1, d5
fadd d0, d0, d1
ldr q1, [sp, #6336] ; 16-byte Folded Reload
fmul d1, d24, d1
fadd d0, d0, d1
ldr d1, [sp, #9320] ; 8-byte Folded Reload
fmul d1, d1, d4
fsub d0, d0, d1
ldr q1, [sp, #8624] ; 16-byte Folded Reload
fmul d1, d23, d1
fadd d0, d0, d1
ldr q1, [sp, #6320] ; 16-byte Folded Reload
fmul d1, d27, d1
fadd d0, d0, d1
ldr d1, [sp, #9312] ; 8-byte Folded Reload
fmul d1, d1, d4
fsub d0, d0, d1
fmul d1, d26, d30
fadd d0, d0, d1
mov x9, #2690
movk x9, #16625, lsl #16
movk x9, #5009, lsl #32
movk x9, #49178, lsl #48
fmov d1, x9
fadd d0, d0, d1
ldr d1, [sp, #9304] ; 8-byte Folded Reload
fmul d1, d1, d21
fmul d1, d1, d4
fsub d0, d0, d1
ldr d3, [sp, #11400] ; 8-byte Folded Reload
fmul d1, d3, d7
fadd d0, d0, d16
fmul d2, d13, d0
fsub d1, d1, d2
fmul d1, d3, d1
ldr d5, [sp, #11392] ; 8-byte Folded Reload
fmul d2, d5, d7
ldr d3, [sp, #11240] ; 8-byte Folded Reload
fmul d0, d3, d0
fadd d0, d0, d2
fmov d2, #0.50000000
fmul d1, d1, d2
fmul d0, d5, d0
fmul d0, d0, d2
fsub d0, d1, d0
str d0, [x8]
LBB19_38:
str d13, [sp, #11160] ; 8-byte Folded Spill
str d15, [sp, #4064] ; 8-byte Folded Spill
str d28, [sp, #4096] ; 8-byte Folded Spill
str d8, [sp, #4840] ; 8-byte Folded Spill
str d29, [sp, #4528] ; 8-byte Folded Spill
str d25, [sp, #1512] ; 8-byte Folded Spill
ldr d7, [sp, #9928] ; 8-byte Folded Reload
ldr d0, [sp, #9904] ; 8-byte Folded Reload
fdiv d2, d0, d7
ldr d0, [sp, #10424] ; 8-byte Folded Reload
ldr d1, [sp, #10496] ; 8-byte Folded Reload
fdiv d3, d0, d1
ldr d0, [sp, #9080] ; 8-byte Folded Reload
ldr d24, [sp, #9064] ; 8-byte Folded Reload
fdiv d1, d0, d24
fmov d13, #0.50000000
ldr d0, [sp, #10912] ; 8-byte Folded Reload
fmul d4, d2, d0
fmul d5, d4, d13
ldr d0, [sp, #10624] ; 8-byte Folded Reload
str d5, [sp, #3568] ; 8-byte Folded Spill
fsub d6, d5, d0
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16361, lsl #48
fmov d27, x9
fmul d4, d4, d27
ldr d5, [sp, #9912] ; 8-byte Folded Reload
fmul d4, d4, d5
fmov d0, #5.00000000
fmul d4, d4, d0
fmov d0, #5.00000000
str d4, [sp, #3552] ; 8-byte Folded Spill
fadd d4, d4, d6
ldr d6, [sp, #10608] ; 8-byte Folded Reload
fdiv d4, d4, d6
ldr d6, [sp, #9920] ; 8-byte Folded Reload
fdiv d18, d6, d7
ldr d6, [sp, #11104] ; 8-byte Folded Reload
fmul d6, d3, d6
fmul d16, d6, d13
ldr d7, [sp, #10584] ; 8-byte Folded Reload
str d16, [sp, #3520] ; 8-byte Folded Spill
fsub d7, d16, d7
fmul d6, d6, d27
ldr d16, [sp, #9888] ; 8-byte Folded Reload
fmul d6, d6, d16
fmul d6, d6, d0
fmov d10, #5.00000000
str d6, [sp, #3512] ; 8-byte Folded Spill
fadd d6, d6, d7
ldr d7, [sp, #10576] ; 8-byte Folded Reload
fdiv d20, d6, d7
str d20, [sp, #8168] ; 8-byte Folded Spill
ldr d6, [sp, #9896] ; 8-byte Folded Reload
fmul d6, d2, d6
ldr d2, [sp, #10480] ; 8-byte Folded Reload
fmul d2, d2, d6
fmul d2, d2, d13
ldr d7, [sp, #10904] ; 8-byte Folded Reload
fmul d7, d7, d2
mov x9, #4632233691727265792
fmov d25, x9
fmul d7, d7, d25
ldr d16, [sp, #10896] ; 8-byte Folded Reload
fmul d0, d16, d6
str d0, [sp, #8208] ; 8-byte Folded Spill
ldr d6, [sp, #10544] ; 8-byte Folded Reload
fmul d0, d6, d0
str d0, [sp, #8200] ; 8-byte Folded Spill
ldr d6, [sp, #10568] ; 8-byte Folded Reload
fmul d6, d6, d0
fmov d0, #3.00000000
fmul d6, d6, d0
fmov d0, #3.00000000
fadd d17, d7, d6
ldr d6, [sp, #9880] ; 8-byte Folded Reload
fmul d3, d3, d6
ldr d6, [sp, #10432] ; 8-byte Folded Reload
fmul d6, d6, d3
fmul d6, d6, d13
ldr d7, [sp, #10888] ; 8-byte Folded Reload
fmul d6, d7, d6
fmul d6, d6, d25
ldr d7, [sp, #10880] ; 8-byte Folded Reload
fmul d5, d7, d3
str d5, [sp, #8248] ; 8-byte Folded Spill
ldr d3, [sp, #10440] ; 8-byte Folded Reload
fmul d5, d3, d5
str d5, [sp, #8240] ; 8-byte Folded Spill
ldr d3, [sp, #10448] ; 8-byte Folded Reload
fmul d3, d3, d5
fmul d3, d3, d0
fadd d8, d6, d3
ldr d3, [sp, #9056] ; 8-byte Folded Reload
fmul d3, d1, d3
ldr d6, [sp, #9816] ; 8-byte Folded Reload
fmul d6, d3, d6
fmul d6, d6, d13
ldr d7, [sp, #10368] ; 8-byte Folded Reload
fmul d6, d7, d6
fmul d6, d6, d25
ldr d7, [sp, #10360] ; 8-byte Folded Reload
fmul d7, d3, d7
str d7, [sp, #8312] ; 8-byte Folded Spill
ldr d3, [sp, #9824] ; 8-byte Folded Reload
fmul d5, d3, d7
str d5, [sp, #8304] ; 8-byte Folded Spill
ldr d3, [sp, #9832] ; 8-byte Folded Reload
fmul d3, d3, d5
fmul d3, d3, d0
fadd d22, d3, d6
ldr d3, [sp, #10616] ; 8-byte Folded Reload
fmul d7, d3, d4
ldr d0, [sp, #12320] ; 8-byte Folded Reload
fmul d3, d0, d7
ldr d6, [sp, #10600] ; 8-byte Folded Reload
fmul d4, d6, d4
str d18, [sp, #3560] ; 8-byte Folded Spill
fadd d6, d18, d4
str d6, [sp, #5992] ; 8-byte Folded Spill
ldp d5, d2, [x29, #-184] ; 16-byte Folded Reload
fmul d4, d5, d6
fsub d4, d3, d4
fmul d3, d2, d4
fmov d31, d4
ldr d9, [sp, #11888] ; 8-byte Folded Reload
fmul d4, d9, d6
fmul d6, d0, d17
str d6, [sp, #3496] ; 8-byte Folded Spill
fadd d23, d6, d4
fadd d3, d23, d3
fmul d4, d9, d7
fmov d21, d7
str d7, [sp, #8296] ; 8-byte Folded Spill
fmul d6, d5, d17
fmov d30, d5
str d6, [sp, #3504] ; 8-byte Folded Spill
fadd d16, d6, d4
ldur d0, [x29, #-168] ; 8-byte Folded Reload
fmul d4, d0, d16
fmov d18, d0
fsub d19, d3, d4
ldr d26, [sp, #11376] ; 8-byte Folded Reload
fmul d3, d26, d19
str d19, [sp, #5120] ; 8-byte Folded Spill
ldr d4, [sp, #11904] ; 8-byte Folded Reload
fmul d4, d4, d16
fsub d3, d3, d4
ldr d4, [sp, #10592] ; 8-byte Folded Reload
fmul d6, d4, d20
fmul d4, d9, d6
fmov d28, d6
str d6, [sp, #7976] ; 8-byte Folded Spill
fmov d0, d8
str d8, [sp, #6016] ; 8-byte Folded Spill
fmul d6, d5, d8
str d6, [sp, #3544] ; 8-byte Folded Spill
fadd d5, d6, d4
ldr d4, [sp, #11664] ; 8-byte Folded Reload
fmul d4, d4, d5
fmov d29, d5
fsub d3, d3, d4
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
mov x10, #54806
movk x10, #23353, lsl #16
movk x10, #56949, lsl #32
movk x10, #49094, lsl #48
fmov d4, x9
fmul d6, d9, d4
fmov d5, d4
str d4, [sp, #8664] ; 8-byte Folded Spill
ldr d4, [sp, #11944] ; 8-byte Folded Reload
fmul d4, d4, d6
fmov d8, d6
str d6, [sp, #8216] ; 8-byte Folded Spill
fmov d6, x10
fmul d7, d30, d6
fmov d12, d6
ldr d20, [sp, #11568] ; 8-byte Folded Reload
fmul d6, d20, d7
fsub d4, d4, d6
ldr d6, [sp, #11752] ; 8-byte Folded Reload
fmul d6, d6, d21
fadd d4, d4, d6
ldr d6, [sp, #11736] ; 8-byte Folded Reload
fmul d6, d6, d17
str d6, [sp, #3624] ; 8-byte Folded Spill
fadd d4, d6, d4
ldr d6, [sp, #11672] ; 8-byte Folded Reload
fmul d6, d6, d28
fadd d4, d6, d4
ldr d6, [sp, #11648] ; 8-byte Folded Reload
fmul d6, d6, d0
str d6, [sp, #3632] ; 8-byte Folded Spill
fadd d4, d6, d4
str d4, [sp, #1808] ; 8-byte Folded Spill
ldr d6, [sp, #12280] ; 8-byte Folded Reload
fmul d4, d6, d4
fmov d28, d6
fadd d3, d4, d3
str d17, [sp, #4040] ; 8-byte Folded Spill
fadd d21, d17, d0
ldr d0, [sp, #11368] ; 8-byte Folded Reload
fmul d4, d0, d12
fsub d4, d4, d21
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
str d4, [sp, #4032] ; 8-byte Folded Spill
fmul d4, d0, d4
fmov d11, d0
fmov d6, x9
str d6, [sp, #9312] ; 8-byte Folded Spill
fmul d6, d4, d6
fadd d3, d6, d3
str d7, [sp, #5112] ; 8-byte Folded Spill
fmul d6, d30, d7
fmul d7, d9, d8
fsub d17, d6, d7
mov x9, #54125
movk x9, #53060, lsl #16
movk x9, #15481, lsl #32
movk x9, #16273, lsl #48
fmov d7, x9
fmul d6, d17, d7
fmov d15, d7
str d7, [sp, #8712] ; 8-byte Folded Spill
fsub d3, d3, d6
fmul d6, d20, d5
str d6, [sp, #6376] ; 8-byte Folded Spill
fmul d6, d6, d28
str d6, [sp, #4024] ; 8-byte Folded Spill
fmul d6, d6, d0
fadd d3, d6, d3
ldr d6, [sp, #9024] ; 8-byte Folded Reload
fmul d1, d1, d6
fmul d7, d1, d13
fmul d1, d1, d27
ldr d6, [sp, #9008] ; 8-byte Folded Reload
fmul d1, d1, d6
fmul d6, d1, d10
ldr d1, [sp, #9952] ; 8-byte Folded Reload
str d7, [sp, #3472] ; 8-byte Folded Spill
fsub d1, d7, d1
str d6, [sp, #3464] ; 8-byte Folded Spill
fadd d1, d6, d1
ldr d6, [sp, #9944] ; 8-byte Folded Reload
fdiv d1, d1, d6
fmul d6, d0, d22
ldr d7, [sp, #11352] ; 8-byte Folded Reload
fmul d7, d7, d6
str d7, [sp, #3616] ; 8-byte Folded Spill
fsub d3, d3, d7
ldr d7, [sp, #9936] ; 8-byte Folded Reload
fmul d1, d7, d1
ldr d7, [sp, #9048] ; 8-byte Folded Reload
fdiv d7, d7, d24
str d7, [sp, #3456] ; 8-byte Folded Spill
fadd d1, d7, d1
str d1, [sp, #5984] ; 8-byte Folded Spill
fmul d7, d0, d1
fmul d1, d2, d7
fmov d14, d7
fmul d7, d18, d6
str d7, [sp, #3536] ; 8-byte Folded Spill
fadd d2, d7, d1
ldr d8, [sp, #11128] ; 8-byte Folded Reload
fmul d1, d8, d2
str d2, [sp, #6432] ; 8-byte Folded Spill
fsub d5, d3, d1
mov x9, #65123
movk x9, #27942, lsl #16
movk x9, #23314, lsl #32
movk x9, #49139, lsl #48
str d21, [sp, #6040] ; 8-byte Folded Spill
str d22, [sp, #4048] ; 8-byte Folded Spill
fadd d28, d21, d22
fmov d3, x9
str d3, [sp, #9320] ; 8-byte Folded Spill
fmul d3, d0, d3
fsub d3, d3, d28
mov x9, #4354980839667269632
mov x10, #47272
movk x10, #56762, lsl #16
movk x10, #43178, lsl #32
movk x10, #49060, lsl #48
fmov d7, x9
str d7, [sp, #8592] ; 8-byte Folded Spill
fmul d7, d5, d7
str d3, [sp, #4016] ; 8-byte Folded Spill
fmul d3, d0, d3
fmov d20, x10
str d20, [sp, #9328] ; 8-byte Folded Spill
fmul d21, d3, d20
fadd d7, d7, d21
fmov d10, d31
str d31, [sp, #6800] ; 8-byte Folded Spill
ldr d0, [sp, #11864] ; 8-byte Folded Reload
fmul d21, d0, d31
ldr d0, [sp, #12200] ; 8-byte Folded Reload
fmul d22, d0, d23
fadd d21, d21, d22
ldur d1, [x29, #-216] ; 8-byte Folded Reload
fmul d22, d1, d16
fsub d0, d21, d22
fmul d21, d26, d0
str d0, [sp, #6512] ; 8-byte Folded Spill
ldr d20, [sp, #11976] ; 8-byte Folded Reload
fmul d22, d20, d16
fsub d21, d21, d22
ldr d20, [sp, #12064] ; 8-byte Folded Reload
str d29, [sp, #6504] ; 8-byte Folded Spill
fmul d22, d20, d29
fsub d21, d21, d22
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d20, x9
str d20, [sp, #9176] ; 8-byte Folded Spill
fmul d22, d17, d20
fadd d24, d22, d21
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
fmov d20, x9
str d20, [sp, #9304] ; 8-byte Folded Spill
fmul d21, d4, d20
ldur d20, [x29, #-208] ; 8-byte Folded Reload
fmul d22, d20, d24
fadd d21, d22, d21
ldr d22, [sp, #12016] ; 8-byte Folded Reload
fmul d22, d22, d31
str d23, [sp, #1784] ; 8-byte Folded Spill
ldr d31, [sp, #12160] ; 8-byte Folded Reload
fmul d23, d31, d23
fadd d22, d22, d23
ldr d31, [sp, #12336] ; 8-byte Folded Reload
fmul d23, d31, d16
fsub d30, d22, d23
str d30, [sp, #5104] ; 8-byte Folded Spill
ldr d22, [sp, #12192] ; 8-byte Folded Reload
fmul d16, d22, d16
fmul d22, d26, d30
fsub d16, d22, d16
fmul d22, d18, d19
fmul d23, d1, d0
fadd d22, d22, d23
fmul d23, d31, d30
fadd d22, d23, d22
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d0, x9
str d0, [sp, #9208] ; 8-byte Folded Spill
fmul d22, d22, d0
fadd d16, d16, d22
ldr d0, [sp, #12104] ; 8-byte Folded Reload
fmul d22, d0, d29
fsub d16, d16, d22
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d0, x9
str d0, [sp, #9192] ; 8-byte Folded Spill
fmul d17, d17, d0
fadd d22, d17, d16
ldur d17, [x29, #-200] ; 8-byte Folded Reload
fmul d16, d17, d22
fmov d26, d17
fadd d16, d21, d16
ldr d0, [sp, #11600] ; 8-byte Folded Reload
fmul d17, d0, d6
str d17, [sp, #3608] ; 8-byte Folded Spill
fsub d16, d16, d17
str d14, [sp, #6736] ; 8-byte Folded Spill
ldr d0, [sp, #12136] ; 8-byte Folded Reload
fmul d17, d0, d14
ldr d23, [sp, #11496] ; 8-byte Folded Reload
fmul d21, d23, d6
str d21, [sp, #3480] ; 8-byte Folded Spill
fadd d21, d21, d17
fmul d17, d8, d21
fmov d1, d21
str d21, [sp, #6304] ; 8-byte Folded Spill
fsub d16, d16, d17
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d0, x9
str d0, [sp, #9224] ; 8-byte Folded Spill
fmul d4, d4, d0
str d24, [sp, #1752] ; 8-byte Folded Spill
fmul d17, d26, d24
fsub d4, d4, d17
str d22, [sp, #1744] ; 8-byte Folded Spill
fmul d17, d20, d22
fadd d4, d4, d17
ldr d0, [sp, #11464] ; 8-byte Folded Reload
fmul d17, d0, d6
str d17, [sp, #3600] ; 8-byte Folded Spill
fsub d4, d4, d17
ldr d21, [sp, #11760] ; 8-byte Folded Reload
fmul d17, d21, d6
ldr d0, [sp, #11896] ; 8-byte Folded Reload
fmul d6, d0, d14
str d17, [sp, #3448] ; 8-byte Folded Spill
fadd d0, d17, d6
str d0, [sp, #6296] ; 8-byte Folded Spill
fmul d6, d8, d0
fsub d4, d4, d6
fmul d6, d18, d2
fmul d17, d23, d1
fadd d6, d6, d17
fmul d17, d21, d0
fadd d6, d17, d6
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d0, x9
str d0, [sp, #9120] ; 8-byte Folded Spill
fmul d6, d6, d0
fsub d4, d4, d6
mov x9, #50080
movk x9, #49599, lsl #16
movk x9, #32579, lsl #32
movk x9, #49136, lsl #48
mov x10, #45974
movk x10, #34787, lsl #16
movk x10, #35902, lsl #32
movk x10, #16285, lsl #48
fmov d10, x9
fmul d6, d11, d10
str d10, [sp, #8696] ; 8-byte Folded Spill
str d6, [sp, #6000] ; 8-byte Folded Spill
fmul d6, d11, d6
fmov d0, x10
str d0, [sp, #9112] ; 8-byte Folded Spill
fmul d17, d6, d0
str d17, [sp, #8360] ; 8-byte Folded Spill
fadd d16, d17, d16
fadd d7, d16, d7
mov x9, #49235
movk x9, #28989, lsl #16
movk x9, #40841, lsl #32
movk x9, #16312, lsl #48
fmov d0, x9
str d0, [sp, #9184] ; 8-byte Folded Spill
fmul d6, d6, d0
str d6, [sp, #8368] ; 8-byte Folded Spill
fadd d4, d6, d4
mov x9, #4363988038922010624
fmov d6, x9
fmul d17, d4, d6
fmov d2, d6
str d6, [sp, #9104] ; 8-byte Folded Spill
fadd d6, d7, d17
mov x9, #24565
movk x9, #58125, lsl #16
movk x9, #44270, lsl #32
movk x9, #49140, lsl #48
mov x10, #43115
movk x10, #62349, lsl #16
movk x10, #30721, lsl #32
movk x10, #16347, lsl #48
fmov d0, x9
str d0, [sp, #9216] ; 8-byte Folded Spill
ldr d26, [sp, #12056] ; 8-byte Folded Reload
fmul d7, d26, d0
str d28, [sp, #6048] ; 8-byte Folded Spill
fsub d22, d7, d28
fmul d17, d26, d22
fmov d0, x10
str d0, [sp, #9232] ; 8-byte Folded Spill
fmul d17, d17, d0
ldr q20, [sp, #12000] ; 16-byte Folded Reload
fmul d21, d20, d6
mov.16b v0, v20
fsub d17, d21, d17
mov x9, #-4863887597560135680
fmov d1, x9
str d1, [sp, #6480] ; 8-byte Folded Spill
fmul d1, d5, d1
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #49064, lsl #48
fmov d21, x9
fmul d3, d3, d21
fmov d11, d21
str d21, [sp, #8608] ; 8-byte Folded Spill
fadd d1, d3, d1
fmul d3, d16, d2
fadd d1, d1, d3
fadd d4, d1, d4
ldr q20, [sp, #11840] ; 16-byte Folded Reload
fmul d1, d20, d4
fadd d1, d1, d17
mov x9, #5915
movk x9, #64709, lsl #16
movk x9, #30489, lsl #32
movk x9, #16392, lsl #48
mov x10, #2356
movk x10, #12413, lsl #16
movk x10, #55910, lsl #32
movk x10, #49095, lsl #48
fmov d2, x9
str d2, [sp, #6224] ; 8-byte Folded Spill
fmul d3, d26, d2
str d3, [sp, #3984] ; 8-byte Folded Spill
fmul d3, d26, d3
fmov d2, x10
str d2, [sp, #9200] ; 8-byte Folded Spill
fmul d3, d3, d2
fsub d17, d3, d1
str q4, [sp, #1424] ; 16-byte Folded Spill
fmul d1, d0, d4
str q6, [sp, #1440] ; 16-byte Folded Spill
fmul d3, d20, d6
fsub d1, d1, d3
str d22, [sp, #4000] ; 8-byte Folded Spill
fsub d6, d22, d7
str d6, [sp, #4896] ; 8-byte Folded Spill
mov x9, #58251
movk x9, #46885, lsl #16
movk x9, #26312, lsl #32
movk x9, #16401, lsl #48
mov x10, #52090
movk x10, #42545, lsl #16
movk x10, #26349, lsl #32
movk x10, #16345, lsl #48
ldr q4, [sp, #11824] ; 16-byte Folded Reload
fmul d3, d4, d1
mov.16b v2, v4
fmov d4, x9
str d4, [sp, #6200] ; 8-byte Folded Spill
ldr d0, [sp, #12208] ; 8-byte Folded Reload
fmul d4, d0, d4
str d4, [sp, #6008] ; 8-byte Folded Spill
fsub d4, d6, d4
str d4, [sp, #4008] ; 8-byte Folded Spill
fmul d4, d0, d4
fmov d6, x10
str d6, [sp, #8688] ; 8-byte Folded Spill
fmul d7, d4, d6
fsub d3, d3, d7
ldr q6, [sp, #12240] ; 16-byte Folded Reload
fmul d7, d6, d17
str q6, [sp, #12240] ; 16-byte Folded Spill
fsub d7, d7, d3
mov x9, #64990
movk x9, #28266, lsl #16
movk x9, #45172, lsl #32
movk x9, #16414, lsl #48
mov x10, #28530
movk x10, #30490, lsl #16
movk x10, #27495, lsl #32
movk x10, #49093, lsl #48
fmov d3, x9
str d3, [sp, #6192] ; 8-byte Folded Spill
fmul d3, d0, d3
str d3, [sp, #3976] ; 8-byte Folded Spill
fmul d16, d0, d3
fmov d3, x10
str d3, [sp, #8736] ; 8-byte Folded Spill
fmul d16, d16, d3
fadd d3, d7, d16
str q3, [sp, #6256] ; 16-byte Folded Spill
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d31, x9
fmul d4, d4, d31
str d31, [sp, #5920] ; 8-byte Folded Spill
str q1, [sp, #1392] ; 16-byte Folded Spill
fmul d7, d6, d1
fsub d4, d7, d4
str q17, [sp, #1408] ; 16-byte Folded Spill
fmul d7, d2, d17
fadd d1, d4, d7
str q1, [sp, #6240] ; 16-byte Folded Spill
ldr d3, [sp, #9808] ; 8-byte Folded Reload
ldr d1, [sp, #9768] ; 8-byte Folded Reload
fdiv d4, d1, d3
ldr d1, [sp, #10824] ; 8-byte Folded Reload
fmul d7, d1, d4
fmul d6, d7, d13
fmul d7, d7, d27
ldr d1, [sp, #9776] ; 8-byte Folded Reload
fmul d7, d7, d1
fmov d5, #5.00000000
fmul d16, d7, d5
ldr d1, [sp, #10536] ; 8-byte Folded Reload
str d6, [sp, #3768] ; 8-byte Folded Spill
fsub d7, d6, d1
str d16, [sp, #3760] ; 8-byte Folded Spill
fadd d7, d16, d7
ldr d1, [sp, #10520] ; 8-byte Folded Reload
fdiv d7, d7, d1
ldr d1, [sp, #10528] ; 8-byte Folded Reload
fmul d6, d1, d7
ldr d1, [sp, #10512] ; 8-byte Folded Reload
fmul d7, d1, d7
ldr d1, [sp, #9792] ; 8-byte Folded Reload
fdiv d1, d1, d3
str d1, [sp, #3752] ; 8-byte Folded Spill
fadd d1, d1, d7
ldur d3, [x29, #-248] ; 8-byte Folded Reload
fmul d7, d3, d6
fmov d20, d3
fmov d17, d6
ldr d26, [sp, #12232] ; 8-byte Folded Reload
fmul d16, d26, d1
fmov d6, d1
str d1, [sp, #5976] ; 8-byte Folded Spill
fsub d23, d7, d16
ldr d1, [sp, #9760] ; 8-byte Folded Reload
fmul d4, d4, d1
ldr d1, [sp, #10264] ; 8-byte Folded Reload
fmul d7, d1, d4
fmul d7, d7, d13
ldr d1, [sp, #10816] ; 8-byte Folded Reload
fmul d7, d1, d7
fmul d7, d7, d25
ldr d1, [sp, #10808] ; 8-byte Folded Reload
fmul d0, d1, d4
str d0, [sp, #6768] ; 8-byte Folded Spill
ldr d1, [sp, #10280] ; 8-byte Folded Reload
fmul d0, d1, d0
str d0, [sp, #6472] ; 8-byte Folded Spill
ldr d1, [sp, #10288] ; 8-byte Folded Reload
fmul d4, d1, d0
fmov d19, #3.00000000
fmul d4, d4, d19
fadd d3, d7, d4
ldr d0, [sp, #11968] ; 8-byte Folded Reload
fmul d4, d0, d6
fmul d1, d20, d3
str d1, [sp, #3728] ; 8-byte Folded Spill
fadd d14, d1, d4
ldur d18, [x29, #-192] ; 8-byte Folded Reload
fmul d4, d18, d23
fadd d7, d14, d4
fmul d4, d0, d17
fmov d2, d0
fmov d20, d17
str d17, [sp, #8232] ; 8-byte Folded Spill
fmul d1, d26, d3
str d1, [sp, #3720] ; 8-byte Folded Spill
fadd d4, d1, d4
ldur d29, [x29, #-160] ; 8-byte Folded Reload
fmul d16, d29, d4
fsub d30, d7, d16
ldr d24, [sp, #11472] ; 8-byte Folded Reload
fmul d7, d24, d30
str d30, [sp, #5048] ; 8-byte Folded Spill
ldr d6, [sp, #11728] ; 8-byte Folded Reload
fmul d16, d6, d4
fsub d7, d7, d16
ldr d6, [sp, #10224] ; 8-byte Folded Reload
ldr d0, [sp, #10272] ; 8-byte Folded Reload
fdiv d16, d6, d0
ldr d6, [sp, #9744] ; 8-byte Folded Reload
fmul d17, d16, d6
ldr d6, [sp, #10240] ; 8-byte Folded Reload
fmul d21, d6, d17
fmul d21, d21, d13
ldr d6, [sp, #10800] ; 8-byte Folded Reload
fmul d21, d6, d21
fmul d21, d21, d25
ldr d6, [sp, #10792] ; 8-byte Folded Reload
fmul d0, d6, d17
str d0, [sp, #8160] ; 8-byte Folded Spill
ldr d6, [sp, #10248] ; 8-byte Folded Reload
fmul d0, d6, d0
str d0, [sp, #7984] ; 8-byte Folded Spill
ldr d6, [sp, #10256] ; 8-byte Folded Reload
fmul d17, d6, d0
fmul d17, d17, d19
fmov d1, #3.00000000
fadd d8, d21, d17
ldr d6, [sp, #11072] ; 8-byte Folded Reload
fmul d16, d6, d16
fmul d17, d16, d13
fmul d16, d16, d27
ldr d6, [sp, #9752] ; 8-byte Folded Reload
fmul d16, d16, d6
fmul d6, d16, d5
ldr d16, [sp, #10416] ; 8-byte Folded Reload
str d17, [sp, #3688] ; 8-byte Folded Spill
fsub d16, d17, d16
str d6, [sp, #3680] ; 8-byte Folded Spill
fadd d16, d6, d16
ldr d17, [sp, #10408] ; 8-byte Folded Reload
fdiv d0, d16, d17
str d0, [sp, #6784] ; 8-byte Folded Spill
ldr d16, [sp, #10472] ; 8-byte Folded Reload
fmul d6, d16, d0
fmul d16, d2, d6
fmov d28, d6
str d6, [sp, #6312] ; 8-byte Folded Spill
fmov d17, d26
fmul d6, d26, d8
str d6, [sp, #3736] ; 8-byte Folded Spill
fadd d5, d6, d16
ldr d0, [sp, #11560] ; 8-byte Folded Reload
fmul d16, d0, d5
fmov d19, d5
fsub d7, d7, d16
ldr d5, [sp, #8664] ; 8-byte Folded Reload
fmul d6, d2, d5
ldr d16, [sp, #11928] ; 8-byte Folded Reload
fmul d16, d16, d6
fmov d0, d6
str d6, [sp, #7992] ; 8-byte Folded Spill
fmov d26, d12
str d12, [sp, #8720] ; 8-byte Folded Spill
fmul d21, d17, d12
fmov d22, d17
ldr d12, [sp, #11512] ; 8-byte Folded Reload
fmul d17, d12, d21
fsub d16, d16, d17
ldr d17, [sp, #11912] ; 8-byte Folded Reload
fmul d17, d17, d20
fadd d16, d16, d17
ldr d6, [sp, #11720] ; 8-byte Folded Reload
fmul d6, d6, d3
str d6, [sp, #3856] ; 8-byte Folded Spill
fadd d16, d6, d16
ldr d17, [sp, #11744] ; 8-byte Folded Reload
fmul d17, d17, d28
fadd d16, d17, d16
ldr d6, [sp, #11696] ; 8-byte Folded Reload
str d8, [sp, #5952] ; 8-byte Folded Spill
fmul d6, d6, d8
str d6, [sp, #3848] ; 8-byte Folded Spill
fadd d16, d6, d16
str d16, [sp, #1656] ; 8-byte Folded Spill
ldur d6, [x29, #-240] ; 8-byte Folded Reload
fmul d16, d6, d16
fmov d20, d6
fadd d7, d16, d7
ldr d28, [sp, #11624] ; 8-byte Folded Reload
fmul d16, d28, d26
str d3, [sp, #3992] ; 8-byte Folded Spill
fadd d6, d3, d8
fsub d3, d16, d6
str d3, [sp, #3968] ; 8-byte Folded Spill
fmul d26, d28, d3
ldr d3, [sp, #9312] ; 8-byte Folded Reload
fmul d16, d26, d3
fsub d16, d7, d16
str d21, [sp, #5008] ; 8-byte Folded Spill
fmul d7, d22, d21
fmul d17, d2, d0
fsub d7, d7, d17
fmul d17, d7, d15
fadd d16, d17, d16
fmul d0, d12, d5
str d0, [sp, #6368] ; 8-byte Folded Spill
fmul d3, d0, d20
str d3, [sp, #3960] ; 8-byte Folded Spill
fmul d17, d28, d3
fadd d16, d17, d16
ldr d3, [sp, #8816] ; 8-byte Folded Reload
ldr d15, [sp, #8808] ; 8-byte Folded Reload
fdiv d17, d3, d15
ldr d3, [sp, #8800] ; 8-byte Folded Reload
fmul d21, d17, d3
ldr d3, [sp, #9688] ; 8-byte Folded Reload
fmul d12, d21, d3
fmul d12, d12, d13
ldr d3, [sp, #10128] ; 8-byte Folded Reload
fmul d12, d3, d12
fmul d2, d12, d25
ldr d3, [sp, #10120] ; 8-byte Folded Reload
fmul d0, d21, d3
str d0, [sp, #6464] ; 8-byte Folded Spill
ldr d3, [sp, #9696] ; 8-byte Folded Reload
fmul d0, d3, d0
str d0, [sp, #6440] ; 8-byte Folded Spill
ldr d3, [sp, #9704] ; 8-byte Folded Reload
fmul d21, d3, d0
fmul d21, d21, d1
fadd d21, d21, d2
ldr d2, [sp, #8784] ; 8-byte Folded Reload
fmul d2, d17, d2
fmul d0, d2, d27
ldr d3, [sp, #8776] ; 8-byte Folded Reload
fmul d0, d0, d3
fmov d1, #5.00000000
fmul d3, d0, d1
fmul d2, d2, d13
ldr d0, [sp, #9856] ; 8-byte Folded Reload
str d2, [sp, #3648] ; 8-byte Folded Spill
fsub d0, d2, d0
str d3, [sp, #3656] ; 8-byte Folded Spill
fadd d0, d3, d0
ldr d2, [sp, #9848] ; 8-byte Folded Reload
fdiv d0, d0, d2
fmul d2, d28, d21
ldr d3, [sp, #11320] ; 8-byte Folded Reload
fmul d3, d3, d2
str d3, [sp, #3824] ; 8-byte Folded Spill
fsub d5, d16, d3
ldr d3, [sp, #9840] ; 8-byte Folded Reload
fmul d0, d3, d0
ldr d3, [sp, #8792] ; 8-byte Folded Reload
fdiv d3, d3, d15
str d3, [sp, #3640] ; 8-byte Folded Spill
fadd d0, d3, d0
str d0, [sp, #5944] ; 8-byte Folded Spill
fmul d0, d28, d0
fmul d16, d18, d0
fmov d8, d0
fmul d0, d29, d2
str d0, [sp, #3704] ; 8-byte Folded Spill
fadd d22, d0, d16
ldr d3, [sp, #11096] ; 8-byte Folded Reload
fmul d16, d3, d22
str d22, [sp, #6208] ; 8-byte Folded Spill
fsub d5, d5, d16
ldr d0, [sp, #9320] ; 8-byte Folded Reload
fmul d16, d28, d0
str d6, [sp, #6024] ; 8-byte Folded Spill
str d21, [sp, #3952] ; 8-byte Folded Spill
fadd d6, d6, d21
fsub d0, d16, d6
mov x9, #4359484439294640128
fmov d16, x9
str d16, [sp, #6496] ; 8-byte Folded Spill
fmul d17, d5, d16
str d0, [sp, #3944] ; 8-byte Folded Spill
fmul d16, d28, d0
fmul d12, d16, d11
fadd d17, d17, d12
str d23, [sp, #6360] ; 8-byte Folded Spill
ldr d0, [sp, #11768] ; 8-byte Folded Reload
fmul d12, d0, d23
ldr d0, [sp, #12144] ; 8-byte Folded Reload
fmul d15, d0, d14
fadd d12, d12, d15
ldur d1, [x29, #-232] ; 8-byte Folded Reload
fmul d15, d1, d4
fsub d0, d12, d15
fmul d12, d24, d0
fmov d21, d0
str d0, [sp, #5000] ; 8-byte Folded Spill
ldr d0, [sp, #11880] ; 8-byte Folded Reload
fmul d15, d0, d4
fsub d12, d12, d15
ldr d0, [sp, #11872] ; 8-byte Folded Reload
fmov d27, d19
str d19, [sp, #6216] ; 8-byte Folded Spill
fmul d15, d0, d19
fsub d12, d12, d15
ldr d0, [sp, #9176] ; 8-byte Folded Reload
fmul d15, d7, d0
fadd d0, d15, d12
ldr d19, [sp, #9304] ; 8-byte Folded Reload
fmul d12, d26, d19
ldr d20, [sp, #12304] ; 8-byte Folded Reload
fmul d15, d20, d0
fmov d25, d20
fadd d12, d15, d12
ldr d11, [sp, #11776] ; 8-byte Folded Reload
fmul d15, d11, d23
str d14, [sp, #1616] ; 8-byte Folded Spill
ldr d18, [sp, #12152] ; 8-byte Folded Reload
fmul d14, d18, d14
fadd d14, d15, d14
ldr d20, [sp, #12296] ; 8-byte Folded Reload
fmul d15, d20, d4
fsub d19, d14, d15
str d19, [sp, #4992] ; 8-byte Folded Spill
ldr d18, [sp, #12096] ; 8-byte Folded Reload
fmul d4, d18, d4
fmul d14, d24, d19
fsub d4, d14, d4
fmul d14, d29, d30
fmul d15, d1, d21
fadd d14, d14, d15
fmul d15, d20, d19
fadd d14, d15, d14
ldr d1, [sp, #9208] ; 8-byte Folded Reload
fmul d14, d14, d1
fadd d4, d4, d14
ldr d1, [sp, #12088] ; 8-byte Folded Reload
fmul d14, d1, d27
fsub d4, d4, d14
ldr d1, [sp, #9192] ; 8-byte Folded Reload
fmul d7, d7, d1
fadd d1, d7, d4
ldr d7, [sp, #12312] ; 8-byte Folded Reload
fmul d4, d7, d1
fmov d20, d7
fadd d4, d12, d4
ldr d7, [sp, #11528] ; 8-byte Folded Reload
fmul d7, d7, d2
str d7, [sp, #3800] ; 8-byte Folded Spill
fsub d7, d4, d7
str d8, [sp, #6352] ; 8-byte Folded Spill
ldr d4, [sp, #11688] ; 8-byte Folded Reload
fmul d4, d4, d8
ldr d15, [sp, #11488] ; 8-byte Folded Reload
fmul d12, d15, d2
str d12, [sp, #3672] ; 8-byte Folded Spill
fadd d4, d12, d4
fmul d12, d3, d4
fmov d21, d4
str d4, [sp, #6280] ; 8-byte Folded Spill
fsub d7, d7, d12
fmul d12, d28, d10
str d12, [sp, #5968] ; 8-byte Folded Spill
fmul d12, d28, d12
ldr d4, [sp, #9112] ; 8-byte Folded Reload
fmul d14, d12, d4
str d14, [sp, #8352] ; 8-byte Folded Spill
fadd d7, d14, d7
ldr d18, [sp, #9104] ; 8-byte Folded Reload
fmul d14, d7, d18
fadd d17, d17, d14
ldr d4, [sp, #9224] ; 8-byte Folded Reload
fmul d26, d26, d4
str d0, [sp, #1608] ; 8-byte Folded Spill
fmul d14, d20, d0
fsub d26, d26, d14
str d1, [sp, #1592] ; 8-byte Folded Spill
fmul d14, d25, d1
fadd d26, d26, d14
ldr d0, [sp, #11432] ; 8-byte Folded Reload
fmul d0, d0, d2
str d0, [sp, #3776] ; 8-byte Folded Spill
fsub d26, d26, d0
ldr d0, [sp, #11680] ; 8-byte Folded Reload
fmul d1, d0, d2
ldr d2, [sp, #11632] ; 8-byte Folded Reload
fmul d2, d2, d8
str d1, [sp, #3664] ; 8-byte Folded Spill
fadd d1, d1, d2
str d1, [sp, #6272] ; 8-byte Folded Spill
fmul d14, d3, d1
fsub d26, d26, d14
fmul d14, d29, d22
fmul d15, d15, d21
fadd d14, d14, d15
fmul d15, d0, d1
fadd d14, d15, d14
ldr d0, [sp, #9120] ; 8-byte Folded Reload
fmul d14, d14, d0
fsub d26, d26, d14
mov x9, #-4868391197187506176
fmov d0, x9
str d0, [sp, #6424] ; 8-byte Folded Spill
fmul d5, d5, d0
ldr d0, [sp, #9328] ; 8-byte Folded Reload
fmul d16, d16, d0
fadd d5, d16, d5
fadd d5, d7, d5
ldr d0, [sp, #9184] ; 8-byte Folded Reload
fmul d0, d12, d0
str d0, [sp, #8344] ; 8-byte Folded Spill
fadd d7, d0, d26
fadd d0, d17, d7
fmul d7, d7, d18
fadd d2, d5, d7
ldr q1, [sp, #11984] ; 16-byte Folded Reload
fmul d5, d1, d0
mov.16b v20, v1
ldr q1, [sp, #11808] ; 16-byte Folded Reload
fmul d7, d1, d2
mov.16b v18, v1
fsub d3, d5, d7
ldr d16, [sp, #12048] ; 8-byte Folded Reload
ldr d1, [sp, #9216] ; 8-byte Folded Reload
fmul d5, d16, d1
str d6, [sp, #6032] ; 8-byte Folded Spill
fsub d6, d5, d6
fsub d4, d6, d5
str d4, [sp, #4872] ; 8-byte Folded Spill
ldr d17, [sp, #12072] ; 8-byte Folded Reload
ldr d1, [sp, #6200] ; 8-byte Folded Reload
fmul d5, d17, d1
ldr q7, [sp, #11952] ; 16-byte Folded Reload
fmul d1, d7, d3
mov.16b v19, v7
str q7, [sp, #11952] ; 16-byte Folded Spill
str d5, [sp, #5960] ; 8-byte Folded Spill
fsub d5, d4, d5
str d5, [sp, #3936] ; 8-byte Folded Spill
fmul d5, d17, d5
fmul d7, d5, d31
fsub d1, d1, d7
str d6, [sp, #3928] ; 8-byte Folded Spill
fmov d4, d16
fmul d7, d16, d6
ldr d6, [sp, #9232] ; 8-byte Folded Reload
fmul d7, d7, d6
str q2, [sp, #1360] ; 16-byte Folded Spill
fmul d16, d20, d2
fsub d7, d16, d7
str q0, [sp, #1376] ; 16-byte Folded Spill
fmul d16, d18, d0
fadd d7, d16, d7
ldr d0, [sp, #6224] ; 8-byte Folded Reload
fmul d0, d4, d0
str d0, [sp, #3920] ; 8-byte Folded Spill
fmul d16, d4, d0
ldr d0, [sp, #9200] ; 8-byte Folded Reload
fmul d16, d16, d0
fsub d0, d16, d7
ldr q2, [sp, #11792] ; 16-byte Folded Reload
fmul d7, d2, d0
fadd d1, d1, d7
str q1, [sp, #4848] ; 16-byte Folded Spill
ldr d1, [sp, #8688] ; 8-byte Folded Reload
fmul d1, d5, d1
str q3, [sp, #1344] ; 16-byte Folded Spill
fmul d5, d2, d3
fsub d1, d5, d1
str q0, [sp, #1312] ; 16-byte Folded Spill
fmul d5, d19, d0
fsub d1, d5, d1
ldr d0, [sp, #6192] ; 8-byte Folded Reload
fmul d0, d17, d0
str d0, [sp, #3912] ; 8-byte Folded Spill
fmul d3, d17, d0
ldr d0, [sp, #8736] ; 8-byte Folded Reload
fmul d3, d3, d0
fadd d0, d1, d3
str q0, [sp, #6224] ; 16-byte Folded Spill
ldr d22, [sp, #5120] ; 8-byte Folded Reload
ldur d17, [x29, #-176] ; 8-byte Folded Reload
fmul d1, d17, d22
ldr d0, [sp, #6512] ; 8-byte Folded Reload
ldr d18, [sp, #11864] ; 8-byte Folded Reload
fmul d3, d18, d0
fadd d1, d1, d3
ldr d8, [sp, #5104] ; 8-byte Folded Reload
ldr d4, [sp, #12016] ; 8-byte Folded Reload
fmul d3, d4, d8
fadd d3, d3, d1
ldr d0, [sp, #10960] ; 8-byte Folded Reload
ldr d1, [sp, #8200] ; 8-byte Folded Reload
fmul d5, d0, d1
mov x9, #10523
movk x9, #38535, lsl #16
movk x9, #12921, lsl #32
movk x9, #16642, lsl #48
fmov d0, x9
str d0, [sp, #8200] ; 8-byte Folded Spill
fmul d5, d5, d0
ldr d0, [sp, #10344] ; 8-byte Folded Reload
fmul d5, d0, d5
ldr d0, [sp, #10312] ; 8-byte Folded Reload
fdiv d5, d5, d0
ldr d0, [sp, #10296] ; 8-byte Folded Reload
fmul d1, d0, d5
ldr d0, [sp, #10976] ; 8-byte Folded Reload
ldr d2, [sp, #8208] ; 8-byte Folded Reload
fmul d5, d0, d2
fmul d5, d5, d13
ldr d0, [sp, #10320] ; 8-byte Folded Reload
fmul d5, d0, d5
mov x9, #211106232532992
movk x9, #16498, lsl #48
fmov d10, x9
fmul d0, d5, d10
str d1, [sp, #3064] ; 8-byte Folded Spill
str d0, [sp, #3056] ; 8-byte Folded Spill
fsub d5, d1, d0
fadd d5, d5, d3
fmul d7, d3, d13
fsub d28, d5, d7
ldr d0, [sp, #10392] ; 8-byte Folded Reload
ldr d1, [sp, #8168] ; 8-byte Folded Reload
fmul d7, d0, d1
ldr d0, [sp, #10936] ; 8-byte Folded Reload
ldr d1, [sp, #10496] ; 8-byte Folded Reload
fdiv d0, d0, d1
str d0, [sp, #3072] ; 8-byte Folded Spill
fadd d6, d0, d7
ldr d2, [sp, #12320] ; 8-byte Folded Reload
ldr d12, [sp, #7976] ; 8-byte Folded Reload
fmul d7, d2, d12
ldur d0, [x29, #-184] ; 8-byte Folded Reload
fmul d16, d0, d6
fsub d26, d7, d16
fmul d7, d9, d6
ldr d0, [sp, #6016] ; 8-byte Folded Reload
fmul d0, d2, d0
fmov d30, d2
str d0, [sp, #3080] ; 8-byte Folded Spill
fadd d0, d0, d7
fmov d2, d17
fmul d7, d17, d26
fadd d7, d0, d7
ldr d5, [sp, #6504] ; 8-byte Folded Reload
ldur d1, [x29, #-168] ; 8-byte Folded Reload
fmul d16, d1, d5
fsub d16, d7, d16
fmul d7, d18, d26
ldr d1, [sp, #12200] ; 8-byte Folded Reload
fmul d17, d1, d0
fadd d7, d7, d17
ldur d1, [x29, #-216] ; 8-byte Folded Reload
fmul d17, d1, d5
fsub d25, d7, d17
fmul d7, d2, d16
fmul d17, d18, d25
fadd d7, d7, d17
fmov d1, d4
fmul d17, d4, d26
str d0, [sp, #1648] ; 8-byte Folded Spill
ldr d4, [sp, #12160] ; 8-byte Folded Reload
fmul d14, d4, d0
fadd d17, d17, d14
ldr d0, [sp, #12336] ; 8-byte Folded Reload
fmul d9, d0, d5
fsub d20, d17, d9
fmul d9, d1, d20
fadd d7, d9, d7
fsub d3, d28, d3
fsub d9, d3, d7
ldr d0, [sp, #10944] ; 8-byte Folded Reload
ldr d1, [sp, #8240] ; 8-byte Folded Reload
fmul d14, d0, d1
mov x9, #18811
movk x9, #34700, lsl #16
movk x9, #61210, lsl #32
movk x9, #16643, lsl #48
fmov d0, x9
str d0, [sp, #8240] ; 8-byte Folded Spill
fmul d14, d14, d0
ldr d0, [sp, #10352] ; 8-byte Folded Reload
fmul d14, d0, d14
ldr d0, [sp, #10328] ; 8-byte Folded Reload
fdiv d14, d14, d0
ldr d0, [sp, #10304] ; 8-byte Folded Reload
fmul d1, d0, d14
ldr d0, [sp, #10952] ; 8-byte Folded Reload
ldr d4, [sp, #8248] ; 8-byte Folded Reload
fmul d14, d0, d4
fmul d14, d14, d13
ldr d0, [sp, #10336] ; 8-byte Folded Reload
fmul d14, d0, d14
fmul d0, d14, d10
str d1, [sp, #3048] ; 8-byte Folded Spill
str d0, [sp, #3032] ; 8-byte Folded Spill
fsub d14, d1, d0
fadd d14, d14, d7
fmul d7, d7, d13
fsub d7, d14, d7
fadd d15, d7, d9
ldr d17, [sp, #6432] ; 8-byte Folded Reload
fmul d7, d2, d17
ldr d23, [sp, #6304] ; 8-byte Folded Reload
ldr d0, [sp, #12136] ; 8-byte Folded Reload
fmul d9, d0, d23
fadd d7, d7, d9
ldr d24, [sp, #6296] ; 8-byte Folded Reload
ldr d0, [sp, #11896] ; 8-byte Folded Reload
fmul d9, d0, d24
fadd d14, d9, d7
ldr d0, [sp, #10384] ; 8-byte Folded Reload
ldr d1, [sp, #8304] ; 8-byte Folded Reload
fmul d7, d0, d1
mov x9, #45572
movk x9, #23979, lsl #16
movk x9, #34811, lsl #32
movk x9, #16645, lsl #48
fmov d0, x9
str d0, [sp, #8168] ; 8-byte Folded Spill
fmul d7, d7, d0
ldr d0, [sp, #9736] ; 8-byte Folded Reload
fmul d7, d0, d7
ldr d0, [sp, #9728] ; 8-byte Folded Reload
fdiv d7, d7, d0
ldr d0, [sp, #9720] ; 8-byte Folded Reload
fmul d1, d0, d7
ldr d0, [sp, #10376] ; 8-byte Folded Reload
ldr d4, [sp, #8312] ; 8-byte Folded Reload
fmul d7, d0, d4
fmul d7, d7, d13
ldr d0, [sp, #9712] ; 8-byte Folded Reload
fmul d7, d0, d7
fmul d0, d7, d10
str d1, [sp, #3024] ; 8-byte Folded Spill
str d0, [sp, #3000] ; 8-byte Folded Spill
fsub d7, d1, d0
fsub d7, d7, d14
fmul d13, d14, d13
fadd d7, d7, d13
ldr d0, [sp, #11904] ; 8-byte Folded Reload
ldr d5, [sp, #6800] ; 8-byte Folded Reload
fmul d13, d0, d5
fmov d0, d22
ldr d18, [sp, #11384] ; 8-byte Folded Reload
fmul d31, d18, d22
fadd d31, d13, d31
ldr d0, [sp, #11664] ; 8-byte Folded Reload
fmul d13, d0, d26
fadd d31, d13, d31
ldr d19, [sp, #11280] ; 8-byte Folded Reload
fmul d16, d19, d16
fadd d16, d16, d31
ldr d2, [sp, #9312] ; 8-byte Folded Reload
fmul d31, d15, d2
fadd d16, d16, d31
ldr d0, [sp, #11176] ; 8-byte Folded Reload
ldr d9, [sp, #8664] ; 8-byte Folded Reload
fmul d0, d0, d9
ldr d27, [sp, #11936] ; 8-byte Folded Reload
ldr d29, [sp, #8216] ; 8-byte Folded Reload
fmul d31, d27, d29
str d0, [sp, #8304] ; 8-byte Folded Spill
fsub d31, d0, d31
ldr d0, [sp, #12184] ; 8-byte Folded Reload
ldr d1, [sp, #8296] ; 8-byte Folded Reload
fmul d13, d0, d1
fsub d31, d31, d13
ldr d1, [sp, #5992] ; 8-byte Folded Reload
ldr d0, [sp, #11736] ; 8-byte Folded Reload
fmul d13, d0, d1
fadd d31, d31, d13
ldr d0, [sp, #12176] ; 8-byte Folded Reload
fmul d13, d0, d12
fsub d31, d31, d13
ldr d0, [sp, #11648] ; 8-byte Folded Reload
fmul d13, d0, d6
fadd d0, d13, d31
str d0, [sp, #1720] ; 8-byte Folded Spill
ldr d3, [sp, #12280] ; 8-byte Folded Reload
fmul d31, d3, d0
fadd d16, d31, d16
fmul d31, d30, d29
ldr d0, [sp, #11328] ; 8-byte Folded Reload
fmul d0, d0, d9
str d0, [sp, #6504] ; 8-byte Folded Spill
fsub d31, d31, d0
ldr d0, [sp, #8712] ; 8-byte Folded Reload
fmul d13, d31, d0
fsub d16, d16, d13
ldr d4, [sp, #11536] ; 8-byte Folded Reload
ldr d0, [sp, #8720] ; 8-byte Folded Reload
fmul d13, d4, d0
fmov d0, d1
str d6, [sp, #3904] ; 8-byte Folded Spill
fadd d12, d1, d6
ldr d21, [sp, #11368] ; 8-byte Folded Reload
fmul d30, d21, d12
fsub d30, d13, d30
fmul d13, d30, d2
fadd d16, d13, d16
ldr d0, [sp, #11440] ; 8-byte Folded Reload
ldr d1, [sp, #6376] ; 8-byte Folded Reload
fmul d13, d1, d0
fadd d16, d13, d16
ldr d0, [sp, #11352] ; 8-byte Folded Reload
ldr d2, [sp, #6736] ; 8-byte Folded Reload
fmul d13, d0, d2
fsub d16, d16, d13
ldr d22, [sp, #10968] ; 8-byte Folded Reload
fmul d13, d22, d17
fsub d16, d16, d13
fadd d13, d15, d14
fadd d6, d13, d7
ldr d0, [sp, #6480] ; 8-byte Folded Reload
fmul d13, d16, d0
ldr d3, [sp, #8608] ; 8-byte Folded Reload
fmul d14, d6, d3
fadd d13, d13, d14
ldr d0, [sp, #9304] ; 8-byte Folded Reload
fmul d14, d15, d0
fmul d29, d30, d0
fadd d29, d14, d29
fmov d1, d5
ldr d0, [sp, #12192] ; 8-byte Folded Reload
fmul d14, d0, d5
fmul d27, d18, d8
fadd d27, d14, d27
ldr d0, [sp, #9208] ; 8-byte Folded Reload
fmul d5, d28, d0
fadd d5, d27, d5
ldr d0, [sp, #12104] ; 8-byte Folded Reload
fmul d27, d0, d26
fadd d5, d27, d5
fmul d17, d19, d20
fadd d5, d17, d5
ldr d0, [sp, #9192] ; 8-byte Folded Reload
fmul d17, d31, d0
fadd d0, d5, d17
ldur d27, [x29, #-200] ; 8-byte Folded Reload
fmul d5, d27, d0
fadd d5, d29, d5
ldr d17, [sp, #11976] ; 8-byte Folded Reload
fmul d17, d17, d1
ldr d1, [sp, #6512] ; 8-byte Folded Reload
fmul d20, d18, d1
fadd d17, d17, d20
ldr d1, [sp, #12064] ; 8-byte Folded Reload
fmul d20, d1, d26
fadd d17, d20, d17
fmul d20, d19, d25
fadd d17, d20, d17
ldr d1, [sp, #9176] ; 8-byte Folded Reload
fmul d20, d31, d1
fadd d18, d17, d20
ldur d19, [x29, #-208] ; 8-byte Folded Reload
fmul d17, d19, d18
fadd d5, d17, d5
fmov d1, d2
ldr d17, [sp, #11600] ; 8-byte Folded Reload
fmul d17, d17, d2
fsub d5, d5, d17
fmul d17, d22, d23
fsub d5, d5, d17
ldr d17, [sp, #8696] ; 8-byte Folded Reload
fmul d17, d4, d17
ldr d2, [sp, #9112] ; 8-byte Folded Reload
fmul d20, d17, d2
fadd d5, d20, d5
ldr d23, [sp, #9104] ; 8-byte Folded Reload
fmul d20, d5, d23
fadd d20, d20, d13
ldr d2, [sp, #9224] ; 8-byte Folded Reload
fmul d25, d15, d2
fmul d26, d30, d2
fadd d25, d25, d26
str d0, [sp, #1640] ; 8-byte Folded Spill
fmul d26, d19, d0
fadd d25, d25, d26
str d18, [sp, #1632] ; 8-byte Folded Spill
fmul d26, d27, d18
fsub d25, d25, d26
ldr d0, [sp, #11464] ; 8-byte Folded Reload
fmul d18, d0, d1
fsub d18, d25, d18
fmul d22, d22, d24
fsub d18, d18, d22
ldr d0, [sp, #9120] ; 8-byte Folded Reload
fmul d7, d7, d0
fadd d7, d18, d7
ldr d0, [sp, #9184] ; 8-byte Folded Reload
fmul d17, d17, d0
fadd d7, d17, d7
ldr d0, [sp, #9320] ; 8-byte Folded Reload
fmul d17, d4, d0
str d12, [sp, #3896] ; 8-byte Folded Spill
ldr d0, [sp, #5984] ; 8-byte Folded Reload
fadd d22, d12, d0
str d22, [sp, #6480] ; 8-byte Folded Spill
fmul d18, d21, d22
fmov d19, d6
fsub d17, d17, d18
fadd d18, d7, d20
fmul d20, d17, d3
fadd d0, d18, d20
ldr d1, [sp, #8592] ; 8-byte Folded Reload
fmul d16, d16, d1
ldr d1, [sp, #9328] ; 8-byte Folded Reload
fmul d18, d6, d1
fadd d16, d16, d18
fadd d5, d5, d16
fmul d7, d7, d23
fadd d5, d5, d7
fmul d7, d17, d1
fadd d1, d5, d7
ldr q2, [sp, #12000] ; 16-byte Folded Reload
fmul d5, d2, d0
ldr q3, [sp, #11840] ; 16-byte Folded Reload
fmul d7, d3, d1
fsub d6, d5, d7
ldr d14, [sp, #5920] ; 8-byte Folded Reload
fmul d5, d19, d14
ldr q4, [sp, #12240] ; 16-byte Folded Reload
fmul d7, d4, d6
fsub d5, d7, d5
ldr d18, [sp, #9232] ; 8-byte Folded Reload
fmul d7, d19, d18
str q1, [sp, #1264] ; 16-byte Folded Spill
fmul d16, d2, d1
fsub d7, d16, d7
str q0, [sp, #1280] ; 16-byte Folded Spill
fmul d16, d3, d0
fadd d7, d16, d7
ldr d0, [sp, #11584] ; 8-byte Folded Reload
ldr d1, [sp, #9216] ; 8-byte Folded Reload
fmul d1, d0, d1
ldr d2, [sp, #12056] ; 8-byte Folded Reload
fmul d16, d2, d22
str d1, [sp, #8312] ; 8-byte Folded Spill
fsub d16, d1, d16
fmul d16, d16, d18
fsub d7, d7, d16
mov x9, #5915
movk x9, #64709, lsl #16
movk x9, #30489, lsl #32
movk x9, #49160, lsl #48
fmov d1, x9
str d1, [sp, #8208] ; 8-byte Folded Spill
fmul d16, d0, d1
ldr d0, [sp, #9200] ; 8-byte Folded Reload
fmul d0, d16, d0
str d0, [sp, #8592] ; 8-byte Folded Spill
fadd d0, d0, d7
ldr q2, [sp, #11824] ; 16-byte Folded Reload
fmul d7, d2, d0
fsub d5, d5, d7
mov x9, #58251
movk x9, #46885, lsl #16
movk x9, #26312, lsl #32
movk x9, #49169, lsl #48
fmov d3, x9
str d3, [sp, #8216] ; 8-byte Folded Spill
ldr d1, [sp, #11784] ; 8-byte Folded Reload
fmul d16, d1, d3
ldr d1, [sp, #12208] ; 8-byte Folded Reload
fmul d18, d1, d22
fsub d16, d16, d18
fmul d18, d16, d14
fsub d1, d5, d18
str q1, [sp, #4496] ; 16-byte Folded Spill
ldr d15, [sp, #8688] ; 8-byte Folded Reload
str d19, [sp, #4520] ; 8-byte Folded Spill
fmul d5, d19, d15
str q6, [sp, #1248] ; 16-byte Folded Spill
fmul d18, d2, d6
fsub d5, d18, d5
str q0, [sp, #1232] ; 16-byte Folded Spill
fmul d18, d4, d0
fadd d5, d5, d18
fmul d16, d16, d15
fsub d0, d5, d16
str d0, [sp, #8248] ; 8-byte Folded Spill
ldr d0, [sp, #10856] ; 8-byte Folded Reload
ldr d1, [sp, #6472] ; 8-byte Folded Reload
fmul d5, d0, d1
ldr d0, [sp, #8200] ; 8-byte Folded Reload
fmul d1, d5, d0
ldr d24, [sp, #5048] ; 8-byte Folded Reload
ldur d3, [x29, #-192] ; 8-byte Folded Reload
fmul d5, d3, d24
ldr d12, [sp, #5000] ; 8-byte Folded Reload
ldr d7, [sp, #11768] ; 8-byte Folded Reload
fmul d16, d7, d12
fadd d5, d5, d16
ldr d31, [sp, #4992] ; 8-byte Folded Reload
fmul d16, d11, d31
fadd d16, d16, d5
ldr d0, [sp, #10160] ; 8-byte Folded Reload
fmul d1, d0, d1
ldr d0, [sp, #10152] ; 8-byte Folded Reload
fdiv d1, d1, d0
ldr d0, [sp, #10168] ; 8-byte Folded Reload
fmul d2, d0, d1
ldr d0, [sp, #10864] ; 8-byte Folded Reload
ldr d1, [sp, #6768] ; 8-byte Folded Reload
fmul d1, d0, d1
fmov d5, #0.50000000
fmul d1, d1, d5
ldr d0, [sp, #10200] ; 8-byte Folded Reload
fmul d1, d0, d1
fmul d0, d1, d10
str d2, [sp, #3392] ; 8-byte Folded Spill
str d0, [sp, #3384] ; 8-byte Folded Spill
fsub d1, d2, d0
fadd d1, d1, d16
fmul d5, d16, d5
fmov d19, #0.50000000
fsub d5, d1, d5
ldr d0, [sp, #10400] ; 8-byte Folded Reload
ldr d1, [sp, #6784] ; 8-byte Folded Reload
fmul d1, d0, d1
ldr d0, [sp, #10832] ; 8-byte Folded Reload
ldr d2, [sp, #10272] ; 8-byte Folded Reload
fdiv d0, d0, d2
str d0, [sp, #3416] ; 8-byte Folded Spill
fadd d2, d0, d1
ldur d21, [x29, #-248] ; 8-byte Folded Reload
ldr d30, [sp, #6312] ; 8-byte Folded Reload
fmul d1, d21, d30
ldr d0, [sp, #12232] ; 8-byte Folded Reload
fmul d18, d0, d2
fsub d1, d1, d18
ldr d0, [sp, #11968] ; 8-byte Folded Reload
fmul d18, d0, d2
ldr d0, [sp, #5952] ; 8-byte Folded Reload
fmul d0, d21, d0
str d0, [sp, #3408] ; 8-byte Folded Spill
fadd d0, d0, d18
fmul d18, d3, d1
fadd d18, d0, d18
ldr d6, [sp, #6216] ; 8-byte Folded Reload
ldur d4, [x29, #-160] ; 8-byte Folded Reload
fmul d20, d4, d6
fsub d25, d18, d20
fmul d18, d7, d1
ldr d4, [sp, #12144] ; 8-byte Folded Reload
fmul d20, d4, d0
fadd d18, d18, d20
ldur d4, [x29, #-232] ; 8-byte Folded Reload
fmul d20, d4, d6
fsub d26, d18, d20
fmul d18, d3, d25
fmov d23, d3
fmul d20, d7, d26
fadd d18, d18, d20
fmul d20, d11, d1
str d0, [sp, #1520] ; 8-byte Folded Spill
ldr d3, [sp, #12152] ; 8-byte Folded Reload
fmul d27, d3, d0
fadd d20, d20, d27
ldr d0, [sp, #12296] ; 8-byte Folded Reload
fmul d27, d0, d6
fsub d20, d20, d27
fmul d27, d11, d20
fadd d18, d27, d18
ldr d0, [sp, #10840] ; 8-byte Folded Reload
ldr d3, [sp, #7984] ; 8-byte Folded Reload
fmul d27, d0, d3
ldr d0, [sp, #8240] ; 8-byte Folded Reload
fmul d3, d27, d0
fsub d16, d5, d16
fsub d16, d16, d18
ldr d0, [sp, #10184] ; 8-byte Folded Reload
fmul d3, d0, d3
ldr d0, [sp, #10176] ; 8-byte Folded Reload
fdiv d3, d3, d0
ldr d0, [sp, #10192] ; 8-byte Folded Reload
fmul d6, d0, d3
ldr d0, [sp, #10848] ; 8-byte Folded Reload
ldr d3, [sp, #8160] ; 8-byte Folded Reload
fmul d3, d0, d3
fmul d3, d3, d19
ldr d0, [sp, #10208] ; 8-byte Folded Reload
fmul d3, d0, d3
fmul d0, d3, d10
str d6, [sp, #3368] ; 8-byte Folded Spill
str d0, [sp, #3360] ; 8-byte Folded Spill
fsub d3, d6, d0
fadd d3, d3, d18
fmul d18, d18, d19
fsub d3, d3, d18
fadd d3, d3, d16
ldr d8, [sp, #6208] ; 8-byte Folded Reload
fmul d16, d23, d8
ldr d7, [sp, #6280] ; 8-byte Folded Reload
ldr d0, [sp, #11688] ; 8-byte Folded Reload
fmul d18, d0, d7
fadd d16, d16, d18
ldr d22, [sp, #6272] ; 8-byte Folded Reload
ldr d0, [sp, #11632] ; 8-byte Folded Reload
fmul d18, d0, d22
fadd d16, d18, d16
ldr d0, [sp, #10144] ; 8-byte Folded Reload
ldr d4, [sp, #6440] ; 8-byte Folded Reload
fmul d18, d0, d4
ldr d0, [sp, #8168] ; 8-byte Folded Reload
fmul d18, d18, d0
ldr d0, [sp, #10136] ; 8-byte Folded Reload
ldr d4, [sp, #6464] ; 8-byte Folded Reload
fmul d27, d0, d4
fmul d27, d27, d19
ldr d0, [sp, #9680] ; 8-byte Folded Reload
fmul d27, d0, d27
fmul d0, d27, d10
ldr d6, [sp, #9672] ; 8-byte Folded Reload
fmul d18, d6, d18
ldr d6, [sp, #9656] ; 8-byte Folded Reload
fdiv d18, d18, d6
ldr d6, [sp, #9664] ; 8-byte Folded Reload
fmul d6, d6, d18
str d0, [sp, #3344] ; 8-byte Folded Spill
str d6, [sp, #3336] ; 8-byte Folded Spill
fsub d18, d6, d0
fsub d18, d18, d16
fmul d27, d16, d19
fadd d18, d18, d27
ldr d0, [sp, #11728] ; 8-byte Folded Reload
ldr d28, [sp, #6360] ; 8-byte Folded Reload
fmul d27, d0, d28
ldr d23, [sp, #11360] ; 8-byte Folded Reload
fmov d0, d24
fmul d29, d23, d24
fadd d27, d27, d29
ldr d0, [sp, #11560] ; 8-byte Folded Reload
fmul d29, d0, d1
fadd d27, d29, d27
ldr d24, [sp, #11272] ; 8-byte Folded Reload
fmul d25, d24, d25
fadd d25, d25, d27
ldr d13, [sp, #9312] ; 8-byte Folded Reload
fmul d27, d3, d13
fsub d25, d25, d27
ldr d0, [sp, #11168] ; 8-byte Folded Reload
fmul d0, d0, d9
ldr d6, [sp, #11920] ; 8-byte Folded Reload
ldr d19, [sp, #7992] ; 8-byte Folded Reload
fmul d27, d6, d19
str d0, [sp, #8296] ; 8-byte Folded Spill
fsub d27, d0, d27
ldr d0, [sp, #12288] ; 8-byte Folded Reload
ldr d6, [sp, #8232] ; 8-byte Folded Reload
fmul d29, d0, d6
fsub d27, d27, d29
ldr d6, [sp, #5976] ; 8-byte Folded Reload
ldr d0, [sp, #11720] ; 8-byte Folded Reload
fmul d29, d0, d6
fadd d27, d27, d29
ldr d0, [sp, #12168] ; 8-byte Folded Reload
fmul d29, d0, d30
fsub d27, d27, d29
ldr d0, [sp, #11696] ; 8-byte Folded Reload
fmul d29, d0, d2
fadd d0, d29, d27
str d0, [sp, #1600] ; 8-byte Folded Spill
ldur d4, [x29, #-240] ; 8-byte Folded Reload
fmul d27, d4, d0
fadd d25, d27, d25
ldr d0, [sp, #11312] ; 8-byte Folded Reload
fmul d0, d0, d9
fmul d19, d21, d19
str d0, [sp, #6296] ; 8-byte Folded Spill
fsub d19, d19, d0
ldr d0, [sp, #8712] ; 8-byte Folded Reload
fmul d27, d19, d0
fadd d25, d25, d27
ldr d9, [sp, #11504] ; 8-byte Folded Reload
ldr d0, [sp, #8720] ; 8-byte Folded Reload
fmul d27, d9, d0
fmov d0, d6
str d2, [sp, #3880] ; 8-byte Folded Spill
fadd d10, d6, d2
ldr d11, [sp, #11624] ; 8-byte Folded Reload
fmul d29, d11, d10
fsub d27, d27, d29
fmul d29, d27, d13
fsub d25, d25, d29
ldr d0, [sp, #11424] ; 8-byte Folded Reload
ldr d2, [sp, #6368] ; 8-byte Folded Reload
fmul d29, d2, d0
fadd d25, d29, d25
ldr d0, [sp, #11320] ; 8-byte Folded Reload
ldr d13, [sp, #6352] ; 8-byte Folded Reload
fmul d29, d0, d13
fsub d25, d25, d29
ldr d6, [sp, #10872] ; 8-byte Folded Reload
fmul d21, d6, d8
fsub d21, d25, d21
fadd d16, d3, d16
fadd d8, d16, d18
ldr d0, [sp, #6496] ; 8-byte Folded Reload
fmul d16, d21, d0
ldr d17, [sp, #8608] ; 8-byte Folded Reload
fmul d25, d8, d17
fadd d16, d16, d25
ldr d0, [sp, #9304] ; 8-byte Folded Reload
fmul d25, d3, d0
fmul d29, d27, d0
fadd d25, d25, d29
ldr d0, [sp, #12096] ; 8-byte Folded Reload
fmul d29, d0, d28
fmul d30, d23, d31
fadd d29, d29, d30
ldr d0, [sp, #9208] ; 8-byte Folded Reload
fmul d5, d5, d0
fadd d5, d29, d5
ldr d0, [sp, #12088] ; 8-byte Folded Reload
fmul d29, d0, d1
fadd d5, d29, d5
fmul d20, d24, d20
fadd d5, d20, d5
ldr d0, [sp, #9192] ; 8-byte Folded Reload
fmul d20, d19, d0
fadd d0, d5, d20
ldr d29, [sp, #12312] ; 8-byte Folded Reload
fmul d5, d29, d0
fadd d5, d25, d5
ldr d4, [sp, #11880] ; 8-byte Folded Reload
fmul d20, d4, d28
fmul d25, d23, d12
fadd d20, d20, d25
ldr d2, [sp, #11872] ; 8-byte Folded Reload
fmul d1, d2, d1
fadd d1, d1, d20
fmul d20, d24, d26
fadd d1, d20, d1
ldr d2, [sp, #9176] ; 8-byte Folded Reload
fmul d19, d19, d2
fadd d2, d1, d19
ldr d19, [sp, #12304] ; 8-byte Folded Reload
fmul d1, d19, d2
fadd d1, d1, d5
ldr d4, [sp, #11528] ; 8-byte Folded Reload
fmul d5, d4, d13
fsub d1, d1, d5
fmul d4, d6, d7
fsub d1, d1, d4
ldr d4, [sp, #8696] ; 8-byte Folded Reload
fmul d4, d9, d4
ldr d5, [sp, #9112] ; 8-byte Folded Reload
fmul d5, d4, d5
fadd d1, d5, d1
ldr d23, [sp, #9104] ; 8-byte Folded Reload
fmul d5, d1, d23
fadd d5, d5, d16
ldr d7, [sp, #9224] ; 8-byte Folded Reload
fmul d3, d3, d7
fmul d16, d27, d7
fadd d3, d3, d16
str d0, [sp, #1504] ; 8-byte Folded Spill
fmul d16, d19, d0
fadd d3, d3, d16
str d2, [sp, #1496] ; 8-byte Folded Spill
fmul d16, d29, d2
fsub d3, d3, d16
ldr d0, [sp, #11432] ; 8-byte Folded Reload
fmul d0, d0, d13
fsub d0, d3, d0
fmul d2, d6, d22
fsub d0, d0, d2
ldr d2, [sp, #9120] ; 8-byte Folded Reload
fmul d2, d18, d2
fadd d0, d0, d2
ldr d2, [sp, #9184] ; 8-byte Folded Reload
fmul d2, d4, d2
fadd d0, d2, d0
ldr d2, [sp, #9320] ; 8-byte Folded Reload
fmul d2, d9, d2
str d10, [sp, #3872] ; 8-byte Folded Spill
ldr d3, [sp, #5944] ; 8-byte Folded Reload
fadd d27, d10, d3
fmul d3, d11, d27
fsub d2, d2, d3
fadd d3, d0, d5
fmul d4, d2, d17
fadd d5, d3, d4
ldr d3, [sp, #6424] ; 8-byte Folded Reload
fmul d3, d21, d3
ldr d6, [sp, #9328] ; 8-byte Folded Reload
fmul d4, d8, d6
fadd d3, d3, d4
fadd d1, d1, d3
fmul d0, d0, d23
fadd d0, d1, d0
fmul d1, d2, d6
fadd d2, d0, d1
ldr q6, [sp, #11984] ; 16-byte Folded Reload
fmul d0, d6, d5
ldr q4, [sp, #11808] ; 16-byte Folded Reload
fmul d1, d4, d2
fsub d3, d0, d1
fmul d0, d8, d14
ldr q7, [sp, #11952] ; 16-byte Folded Reload
fmul d1, d7, d3
fsub d0, d1, d0
ldr d16, [sp, #9232] ; 8-byte Folded Reload
fmul d1, d8, d16
str q2, [sp, #1168] ; 16-byte Folded Spill
fmul d2, d6, d2
fsub d1, d2, d1
str q5, [sp, #1184] ; 16-byte Folded Spill
fmul d2, d4, d5
fadd d1, d2, d1
ldr d4, [sp, #11520] ; 8-byte Folded Reload
ldr d2, [sp, #9216] ; 8-byte Folded Reload
fmul d5, d4, d2
ldr d2, [sp, #12048] ; 8-byte Folded Reload
fmul d2, d2, d27
str d5, [sp, #8720] ; 8-byte Folded Spill
fsub d2, d5, d2
fmul d2, d2, d16
fsub d1, d1, d2
ldr d2, [sp, #8208] ; 8-byte Folded Reload
fmul d2, d4, d2
ldr d4, [sp, #9200] ; 8-byte Folded Reload
fmul d2, d2, d4
str d2, [sp, #8712] ; 8-byte Folded Spill
fadd d4, d2, d1
ldr q6, [sp, #11792] ; 16-byte Folded Reload
fmul d1, d6, d4
fsub d0, d0, d1
ldr d5, [sp, #11640] ; 8-byte Folded Reload
ldr d1, [sp, #8216] ; 8-byte Folded Reload
fmul d1, d5, d1
ldr d2, [sp, #12072] ; 8-byte Folded Reload
fmul d2, d2, d27
fsub d1, d1, d2
fmul d2, d1, d14
fsub d11, d0, d2
fmul d0, d8, d15
str q3, [sp, #1152] ; 16-byte Folded Spill
fmul d2, d6, d3
fsub d0, d2, d0
str q4, [sp, #1136] ; 16-byte Folded Spill
fmul d2, d7, d4
fadd d0, d0, d2
fmul d1, d1, d15
fsub d2, d0, d1
mov x9, #64990
movk x9, #28266, lsl #16
movk x9, #45172, lsl #32
movk x9, #49182, lsl #48
fmov d1, x9
ldr d0, [sp, #11784] ; 8-byte Folded Reload
fmul d0, d0, d1
ldr d3, [sp, #8736] ; 8-byte Folded Reload
fmul d12, d0, d3
fmul d1, d5, d1
fmul d4, d1, d3
mov x9, #43139
movk x9, #8835, lsl #16
movk x9, #28093, lsl #32
movk x9, #49187, lsl #48
fmov d1, x9
ldur d0, [x29, #-256] ; 8-byte Folded Reload
fmul d1, d0, d1
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #49073, lsl #48
str d1, [sp, #4464] ; 8-byte Folded Spill
fmul d1, d0, d1
fmov d3, x9
fmul d14, d1, d3
mov x9, #47887
movk x9, #56309, lsl #16
movk x9, #15746, lsl #32
movk x9, #49212, lsl #48
fmov d1, x9
fmul d1, d0, d1
mov x9, #26610
movk x9, #29696, lsl #16
movk x9, #48971, lsl #32
movk x9, #16339, lsl #48
str d1, [sp, #4472] ; 8-byte Folded Spill
fmul d1, d0, d1
fmov d3, x9
fmul d31, d1, d3
mov x9, #28852
movk x9, #37576, lsl #16
movk x9, #2974, lsl #32
movk x9, #49192, lsl #48
fmov d3, x9
fmov d7, d0
fmul d3, d0, d3
ldr d0, [sp, #4896] ; 8-byte Folded Reload
fadd d10, d3, d0
ldr d0, [sp, #4872] ; 8-byte Folded Reload
fadd d9, d3, d0
ldr d0, [sp, #8248] ; 8-byte Folded Reload
fadd d29, d12, d0
mov x9, #39915
movk x9, #11776, lsl #16
movk x9, #40689, lsl #32
movk x9, #49053, lsl #48
str d4, [sp, #8696] ; 8-byte Folded Spill
fadd d30, d4, d2
fmov d2, x9
fmul d15, d1, d2
cbz x8, LBB19_40
; %bb.39:
ldr q23, [sp, #10736] ; 16-byte Folded Reload
ldr q0, [sp, #6256] ; 16-byte Folded Reload
fmul d2, d23, d0
mov x9, #54885
movk x9, #33778, lsl #16
movk x9, #12745, lsl #32
movk x9, #16308, lsl #48
fmov d5, x9
fmul d3, d1, d5
fadd d3, d3, d31
ldr q22, [sp, #11136] ; 16-byte Folded Reload
str d12, [sp, #9328] ; 8-byte Folded Spill
ldr q0, [sp, #6240] ; 16-byte Folded Reload
fmul d4, d22, d0
fadd d6, d3, d4
fmov d12, d7
fmul d3, d7, d10
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d4, x9
fmul d7, d3, d4
fsub d6, d6, d7
fsub d2, d2, d6
ldr q24, [sp, #11216] ; 16-byte Folded Reload
ldr q13, [sp, #4848] ; 16-byte Folded Reload
fmul d6, d24, d13
fsub d6, d2, d6
fmul d2, d12, d9
fmul d7, d2, d4
fadd d6, d6, d7
ldr q25, [sp, #10720] ; 16-byte Folded Reload
ldr q0, [sp, #6224] ; 16-byte Folded Reload
fmul d7, d25, d0
fadd d6, d7, d6
mov x9, #47887
movk x9, #56309, lsl #16
movk x9, #15746, lsl #32
movk x9, #49212, lsl #48
fmov d7, x9
ldr d26, [sp, #11248] ; 8-byte Folded Reload
fmul d7, d26, d7
mov x9, #39915
movk x9, #11776, lsl #16
movk x9, #40689, lsl #32
movk x9, #49053, lsl #48
fmov d16, x9
fmul d16, d7, d16
mov x9, #48998
movk x9, #16808, lsl #16
movk x9, #62387, lsl #32
movk x9, #16312, lsl #48
fmov d17, x9
fmul d18, d7, d17
fsub d16, d16, d18
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d18, x9
ldr d28, [sp, #4520] ; 8-byte Folded Reload
fmul d19, d28, d18
fsub d16, d16, d19
ldr q0, [sp, #4496] ; 16-byte Folded Reload
fmul d19, d23, d0
fadd d16, d16, d19
fmul d19, d22, d29
fsub d16, d16, d19
mov x9, #28852
movk x9, #37576, lsl #16
movk x9, #2974, lsl #32
movk x9, #49192, lsl #48
fmov d19, x9
fmul d19, d26, d19
ldr d20, [sp, #6480] ; 8-byte Folded Reload
fmul d20, d12, d20
fsub d20, d19, d20
fmul d21, d20, d18
fsub d16, d16, d21
fmul d21, d8, d18
fsub d16, d16, d21
fmul d21, d25, d11
fadd d16, d16, d21
fmul d21, d24, d30
fsub d16, d16, d21
fmul d21, d12, d27
fsub d19, d19, d21
fmul d21, d19, d18
fsub d16, d16, d21
mov x9, #43139
movk x9, #8835, lsl #16
movk x9, #28093, lsl #32
movk x9, #16419, lsl #48
fmov d21, x9
fmul d21, d26, d21
fmul d21, d21, d18
fadd d16, d21, d16
fadd d6, d6, d6
fadd d16, d16, d16
fadd d6, d6, d16
mov x9, #26610
movk x9, #29696, lsl #16
movk x9, #48971, lsl #32
movk x9, #16339, lsl #48
fmov d16, x9
fmul d16, d7, d16
fmul d5, d7, d5
fadd d5, d16, d5
fmul d7, d28, d4
fsub d5, d5, d7
fmul d7, d22, d0
fadd d5, d5, d7
fmul d7, d23, d29
fadd d5, d5, d7
fmul d7, d20, d4
fsub d5, d5, d7
fmul d7, d8, d4
fsub d5, d5, d7
fmul d7, d24, d11
fadd d5, d5, d7
fmul d7, d25, d30
fadd d5, d5, d7
fmul d4, d19, d4
fsub d4, d5, d4
fmul d1, d1, d17
fsub d1, d15, d1
ldr q0, [sp, #6240] ; 16-byte Folded Reload
fmul d5, d23, d0
ldr d12, [sp, #9328] ; 8-byte Folded Reload
fadd d1, d1, d5
fmul d3, d3, d18
fsub d1, d1, d3
ldr q0, [sp, #6256] ; 16-byte Folded Reload
fmul d3, d22, d0
fadd d1, d3, d1
fmul d3, d25, d13
fadd d1, d1, d3
fmul d2, d2, d18
fsub d1, d1, d2
ldr q0, [sp, #6224] ; 16-byte Folded Reload
fmul d2, d24, d0
fadd d1, d2, d1
ldr d5, [sp, #11400] ; 8-byte Folded Reload
fmul d2, d5, d6
fadd d1, d14, d1
fadd d1, d1, d4
ldr d3, [sp, #11160] ; 8-byte Folded Reload
fmul d3, d3, d1
fsub d2, d2, d3
ldr d4, [sp, #11392] ; 8-byte Folded Reload
fmul d3, d4, d6
ldr d6, [sp, #11240] ; 8-byte Folded Reload
fmul d1, d6, d1
fadd d1, d1, d3
fmul d2, d5, d2
fmov d3, #0.50000000
fmul d2, d2, d3
fmul d1, d4, d1
fmul d1, d1, d3
fsub d1, d2, d1
str d1, [x8, #8]
LBB19_40:
str d15, [sp, #6440] ; 8-byte Folded Spill
str d14, [sp, #6496] ; 8-byte Folded Spill
str d31, [sp, #6736] ; 8-byte Folded Spill
str d10, [sp, #728] ; 8-byte Folded Spill
str d9, [sp, #736] ; 8-byte Folded Spill
stp q30, q29, [sp, #896] ; 32-byte Folded Spill
str q11, [sp, #944] ; 16-byte Folded Spill
str d27, [sp, #4760] ; 8-byte Folded Spill
str d8, [sp, #1048] ; 8-byte Folded Spill
ldr d1, [sp, #10480] ; 8-byte Folded Reload
fmov d0, #0.50000000
fmul d1, d1, d0
ldr d2, [sp, #10904] ; 8-byte Folded Reload
fmul d1, d2, d1
mov x9, #4632233691727265792
fmov d25, x9
fmul d1, d1, d25
ldr d2, [sp, #10544] ; 8-byte Folded Reload
ldr d3, [sp, #10896] ; 8-byte Folded Reload
fmul d3, d3, d2
str d3, [sp, #8216] ; 8-byte Folded Spill
ldr d2, [sp, #10568] ; 8-byte Folded Reload
fmul d2, d2, d3
fmov d29, #3.00000000
fmul d2, d2, d29
fadd d5, d2, d1
ldr d1, [sp, #10432] ; 8-byte Folded Reload
fmul d1, d1, d0
ldr d2, [sp, #10888] ; 8-byte Folded Reload
fmul d1, d2, d1
fmul d1, d1, d25
ldr d2, [sp, #10440] ; 8-byte Folded Reload
ldr d3, [sp, #10880] ; 8-byte Folded Reload
fmul d3, d3, d2
str d3, [sp, #8248] ; 8-byte Folded Spill
ldr d2, [sp, #10448] ; 8-byte Folded Reload
fmul d2, d2, d3
fmul d2, d2, d29
fadd d16, d2, d1
ldr d1, [sp, #9816] ; 8-byte Folded Reload
fmul d1, d1, d0
ldr d2, [sp, #10368] ; 8-byte Folded Reload
fmul d1, d2, d1
fmul d1, d1, d25
ldr d2, [sp, #9824] ; 8-byte Folded Reload
ldr d0, [sp, #10360] ; 8-byte Folded Reload
fmul d0, d0, d2
str d0, [sp, #8688] ; 8-byte Folded Spill
ldr d2, [sp, #9832] ; 8-byte Folded Reload
fmul d2, d2, d0
fmul d2, d2, d29
fadd d7, d2, d1
ldur d0, [x29, #-184] ; 8-byte Folded Reload
fmul d1, d0, d5
ldr d2, [sp, #11904] ; 8-byte Folded Reload
fmul d3, d2, d1
ldur d11, [x29, #-168] ; 8-byte Folded Reload
fmul d2, d11, d1
ldr d4, [sp, #12320] ; 8-byte Folded Reload
fmul d22, d4, d5
str d2, [sp, #2848] ; 8-byte Folded Spill
fsub d6, d2, d22
ldr d21, [sp, #11376] ; 8-byte Folded Reload
fmul d2, d21, d6
fmov d13, d6
str d6, [sp, #4984] ; 8-byte Folded Spill
str d3, [sp, #2856] ; 8-byte Folded Spill
fadd d2, d3, d2
str d16, [sp, #5904] ; 8-byte Folded Spill
fmul d10, d0, d16
ldr d3, [sp, #11664] ; 8-byte Folded Reload
fmul d3, d3, d10
str d3, [sp, #2832] ; 8-byte Folded Spill
fadd d2, d3, d2
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #49094, lsl #48
fmov d28, x9
fmul d0, d4, d28
ldr d3, [sp, #11944] ; 8-byte Folded Reload
fmul d3, d3, d0
fmov d17, d0
str d0, [sp, #8240] ; 8-byte Folded Spill
ldr d0, [sp, #8304] ; 8-byte Folded Reload
fsub d3, d3, d0
ldr d4, [sp, #11736] ; 8-byte Folded Reload
fmul d4, d4, d5
str d4, [sp, #2824] ; 8-byte Folded Spill
fsub d3, d3, d4
ldr d4, [sp, #11648] ; 8-byte Folded Reload
fmul d4, d4, d16
str d4, [sp, #2816] ; 8-byte Folded Spill
fsub d3, d3, d4
str d3, [sp, #1624] ; 8-byte Folded Spill
ldr d0, [sp, #12280] ; 8-byte Folded Reload
fmul d3, d0, d3
fadd d2, d3, d2
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
str d5, [sp, #3840] ; 8-byte Folded Spill
fadd d5, d5, d16
ldr d15, [sp, #11368] ; 8-byte Folded Reload
fmul d4, d15, d5
fmov d0, x9
ldr d23, [sp, #11536] ; 8-byte Folded Reload
fmul d3, d23, d0
fmov d31, d0
str d0, [sp, #9200] ; 8-byte Folded Spill
str d4, [sp, #2896] ; 8-byte Folded Spill
fadd d6, d4, d3
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d0, x9
fmul d4, d6, d0
fmov d8, d0
str d0, [sp, #9112] ; 8-byte Folded Spill
fadd d2, d2, d4
ldr d3, [sp, #11888] ; 8-byte Folded Reload
fmul d4, d3, d17
ldr d3, [sp, #6504] ; 8-byte Folded Reload
fsub d18, d3, d4
mov x9, #54125
movk x9, #53060, lsl #16
movk x9, #15481, lsl #32
movk x9, #16273, lsl #48
fmov d0, x9
str d0, [sp, #9312] ; 8-byte Folded Spill
fmul d4, d18, d0
fsub d2, d2, d4
ldr d0, [sp, #11568] ; 8-byte Folded Reload
fmul d0, d0, d28
str d0, [sp, #6360] ; 8-byte Folded Spill
ldr d3, [sp, #11440] ; 8-byte Folded Reload
fmul d4, d0, d3
fadd d2, d4, d2
fmul d4, d15, d7
ldr d3, [sp, #11352] ; 8-byte Folded Reload
fmul d3, d3, d4
str d3, [sp, #2808] ; 8-byte Folded Spill
fadd d2, d2, d3
fmul d0, d11, d4
ldr d27, [sp, #11128] ; 8-byte Folded Reload
fmul d3, d27, d0
fmov d9, d0
str d0, [sp, #8168] ; 8-byte Folded Spill
str d3, [sp, #2800] ; 8-byte Folded Spill
fadd d19, d2, d3
mov x9, #65123
movk x9, #27942, lsl #16
movk x9, #23314, lsl #32
movk x9, #16371, lsl #48
str d7, [sp, #3832] ; 8-byte Folded Spill
str d5, [sp, #3792] ; 8-byte Folded Spill
fadd d0, d5, d7
fmul d3, d15, d0
fmov d2, x9
str d2, [sp, #9328] ; 8-byte Folded Spill
fmul d2, d23, d2
str d3, [sp, #2880] ; 8-byte Folded Spill
fadd d20, d2, d3
mov x9, #4354980839667269632
mov x10, #47272
movk x10, #56762, lsl #16
movk x10, #43178, lsl #32
movk x10, #49060, lsl #48
fmov d2, x9
str d2, [sp, #8664] ; 8-byte Folded Spill
fmul d2, d19, d2
fmov d3, x10
str d3, [sp, #9216] ; 8-byte Folded Spill
fmul d7, d20, d3
fadd d16, d2, d7
ldr d2, [sp, #11976] ; 8-byte Folded Reload
fmul d3, d2, d1
ldur d5, [x29, #-216] ; 8-byte Folded Reload
fmul d2, d5, d1
fmov d17, d5
ldr d5, [sp, #12200] ; 8-byte Folded Reload
fmul d5, d5, d22
str d2, [sp, #2712] ; 8-byte Folded Spill
str d5, [sp, #2704] ; 8-byte Folded Spill
fsub d5, d2, d5
fmul d2, d21, d5
str d5, [sp, #4960] ; 8-byte Folded Spill
str d3, [sp, #2784] ; 8-byte Folded Spill
fadd d2, d3, d2
ldr d3, [sp, #12064] ; 8-byte Folded Reload
fmul d3, d3, d10
str d3, [sp, #2768] ; 8-byte Folded Spill
fadd d2, d3, d2
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d24, x9
fmul d7, d18, d24
str d24, [sp, #9208] ; 8-byte Folded Spill
fadd d26, d2, d7
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
fmov d2, x9
str d2, [sp, #9192] ; 8-byte Folded Spill
fmul d2, d6, d2
ldur d3, [x29, #-208] ; 8-byte Folded Reload
fmul d7, d3, d26
fmov d30, d3
fadd d2, d7, d2
fmul d7, d11, d13
fmul d17, d17, d5
fadd d7, d7, d17
ldr d17, [sp, #12336] ; 8-byte Folded Reload
fmul d3, d17, d1
str d22, [sp, #4976] ; 8-byte Folded Spill
ldr d5, [sp, #12160] ; 8-byte Folded Reload
fmul d5, d5, d22
str d3, [sp, #2664] ; 8-byte Folded Spill
str d5, [sp, #2656] ; 8-byte Folded Spill
fsub d5, d3, d5
str d5, [sp, #4952] ; 8-byte Folded Spill
fmul d17, d17, d5
fadd d7, d17, d7
ldr d17, [sp, #12192] ; 8-byte Folded Reload
fmul d3, d17, d1
fmul d1, d21, d5
str d3, [sp, #2736] ; 8-byte Folded Spill
fadd d1, d3, d1
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d3, x9
str d3, [sp, #9176] ; 8-byte Folded Spill
fmul d7, d7, d3
fadd d1, d1, d7
ldr d7, [sp, #12104] ; 8-byte Folded Reload
fmul d3, d7, d10
str d3, [sp, #2752] ; 8-byte Folded Spill
fadd d1, d3, d1
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d3, x9
str d3, [sp, #9120] ; 8-byte Folded Spill
fmul d18, d18, d3
fadd d7, d1, d18
ldur d3, [x29, #-200] ; 8-byte Folded Reload
fmul d1, d3, d7
fmov d17, d3
fadd d1, d1, d2
ldr d2, [sp, #11600] ; 8-byte Folded Reload
fmul d2, d2, d4
str d2, [sp, #2744] ; 8-byte Folded Spill
fadd d2, d1, d2
ldr d1, [sp, #11496] ; 8-byte Folded Reload
fmul d5, d1, d4
fmul d3, d27, d5
str d5, [sp, #6784] ; 8-byte Folded Spill
str d3, [sp, #2728] ; 8-byte Folded Spill
fadd d2, d2, d3
mov x9, #50080
movk x9, #49599, lsl #16
movk x9, #32579, lsl #32
movk x9, #16368, lsl #48
mov x10, #45974
movk x10, #34787, lsl #16
movk x10, #35902, lsl #32
movk x10, #16285, lsl #48
fmov d13, x9
fmul d21, d23, d13
fmov d23, x10
fmul d18, d21, d23
fadd d2, d18, d2
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d3, x9
str d3, [sp, #9320] ; 8-byte Folded Spill
fmul d6, d6, d3
str d26, [sp, #1536] ; 8-byte Folded Spill
fmul d22, d17, d26
fsub d6, d6, d22
str d7, [sp, #1528] ; 8-byte Folded Spill
fmul d22, d30, d7
fadd d6, d22, d6
fmul d22, d11, d9
fmul d26, d1, d5
fadd d22, d22, d26
ldr d1, [sp, #11760] ; 8-byte Folded Reload
fmul d3, d1, d4
str d3, [sp, #6800] ; 8-byte Folded Spill
fmul d26, d1, d3
fadd d22, d26, d22
ldr d1, [sp, #11464] ; 8-byte Folded Reload
fmul d1, d1, d4
str d1, [sp, #2680] ; 8-byte Folded Spill
fadd d4, d6, d1
fmul d1, d27, d3
str d1, [sp, #2688] ; 8-byte Folded Spill
fadd d4, d4, d1
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d1, x9
str d1, [sp, #9104] ; 8-byte Folded Spill
fmul d1, d22, d1
str d1, [sp, #2672] ; 8-byte Folded Spill
fadd d4, d4, d1
mov x9, #49235
movk x9, #28989, lsl #16
movk x9, #40841, lsl #32
movk x9, #16312, lsl #48
fmov d9, x9
fmul d21, d21, d9
fadd d22, d21, d4
fadd d4, d2, d16
mov x9, #4363988038922010624
fmov d1, x9
fmul d16, d22, d1
fmov d5, d1
str d1, [sp, #8736] ; 8-byte Folded Spill
fadd d1, d4, d16
str d0, [sp, #6472] ; 8-byte Folded Spill
ldr d3, [sp, #12056] ; 8-byte Folded Reload
fmul d3, d3, d0
str d3, [sp, #2720] ; 8-byte Folded Spill
ldr d4, [sp, #8312] ; 8-byte Folded Reload
fsub d4, d4, d3
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #49115, lsl #48
fmov d3, x9
str d3, [sp, #9184] ; 8-byte Folded Spill
fmul d4, d4, d3
ldr q3, [sp, #12000] ; 16-byte Folded Reload
fmul d16, d3, d1
mov.16b v6, v3
fsub d16, d4, d16
mov x9, #-4863887597560135680
fmov d3, x9
str d3, [sp, #8160] ; 8-byte Folded Spill
fmul d19, d19, d3
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #49064, lsl #48
fmov d3, x9
fmul d20, d20, d3
fmov d18, d3
str d3, [sp, #9304] ; 8-byte Folded Spill
fadd d19, d20, d19
fmul d2, d2, d5
fadd d2, d19, d2
fadd d3, d2, d22
ldr q5, [sp, #11840] ; 16-byte Folded Reload
fmul d2, d5, d3
fsub d2, d16, d2
ldr d4, [sp, #8592] ; 8-byte Folded Reload
fadd d4, d4, d2
str q3, [sp, #5552] ; 16-byte Folded Spill
fmul d2, d6, d3
str q1, [sp, #5568] ; 16-byte Folded Spill
fmul d16, d5, d1
fsub d3, d2, d16
mov x9, #58251
movk x9, #46885, lsl #16
movk x9, #26312, lsl #32
movk x9, #16401, lsl #48
ldr d1, [sp, #12208] ; 8-byte Folded Reload
fmul d5, d1, d0
fmov d27, x9
ldr d1, [sp, #11784] ; 8-byte Folded Reload
fmul d2, d1, d27
str d5, [sp, #2696] ; 8-byte Folded Spill
fadd d2, d2, d5
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
ldr q5, [sp, #11824] ; 16-byte Folded Reload
fmul d16, d5, d3
fmov d0, x9
str d0, [sp, #9232] ; 8-byte Folded Spill
fmul d22, d2, d0
fsub d16, d16, d22
ldr q6, [sp, #12240] ; 16-byte Folded Reload
fmul d22, d6, d4
fsub d16, d22, d16
fadd d0, d12, d16
str q0, [sp, #8608] ; 16-byte Folded Spill
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d0, x9
str d0, [sp, #9224] ; 8-byte Folded Spill
fmul d0, d2, d0
str q3, [sp, #1088] ; 16-byte Folded Spill
fmul d2, d6, d3
fsub d0, d2, d0
str q4, [sp, #1104] ; 16-byte Folded Spill
fmul d2, d5, d4
fadd d0, d0, d2
str q0, [sp, #8592] ; 16-byte Folded Spill
ldr d0, [sp, #10264] ; 8-byte Folded Reload
fmov d5, #0.50000000
fmul d0, d0, d5
ldr d1, [sp, #10816] ; 8-byte Folded Reload
fmul d0, d1, d0
fmul d0, d0, d25
ldr d1, [sp, #10280] ; 8-byte Folded Reload
ldr d2, [sp, #10808] ; 8-byte Folded Reload
fmul d3, d2, d1
str d3, [sp, #8208] ; 8-byte Folded Spill
ldr d1, [sp, #10288] ; 8-byte Folded Reload
fmul d2, d1, d3
fmul d2, d2, d29
fadd d4, d2, d0
ldr d0, [sp, #10240] ; 8-byte Folded Reload
fmul d0, d0, d5
fmov d20, #0.50000000
ldr d1, [sp, #10800] ; 8-byte Folded Reload
fmul d0, d1, d0
fmul d0, d0, d25
ldr d1, [sp, #10248] ; 8-byte Folded Reload
ldr d2, [sp, #10792] ; 8-byte Folded Reload
fmul d2, d2, d1
str d2, [sp, #8232] ; 8-byte Folded Spill
ldr d1, [sp, #10256] ; 8-byte Folded Reload
fmul d2, d1, d2
fmul d2, d2, d29
fadd d17, d2, d0
ldur d3, [x29, #-248] ; 8-byte Folded Reload
fmul d1, d3, d28
ldr d0, [sp, #11928] ; 8-byte Folded Reload
fmul d0, d0, d1
fmov d7, d1
str d1, [sp, #8312] ; 8-byte Folded Spill
ldr d1, [sp, #8296] ; 8-byte Folded Reload
fsub d2, d0, d1
ldr d6, [sp, #12232] ; 8-byte Folded Reload
fmul d14, d6, d4
ldr d0, [sp, #11728] ; 8-byte Folded Reload
fmul d5, d0, d14
ldur d1, [x29, #-160] ; 8-byte Folded Reload
fmul d0, d1, d14
fmov d26, d1
fmul d1, d3, d4
str d0, [sp, #3272] ; 8-byte Folded Spill
fsub d16, d0, d1
ldr d3, [sp, #11472] ; 8-byte Folded Reload
fmul d0, d3, d16
fmov d21, d16
str d16, [sp, #4928] ; 8-byte Folded Spill
str d5, [sp, #3280] ; 8-byte Folded Spill
fadd d16, d5, d0
str d17, [sp, #5896] ; 8-byte Folded Spill
fmul d5, d6, d17
ldr d0, [sp, #11560] ; 8-byte Folded Reload
fmul d0, d0, d5
fmov d22, d5
str d0, [sp, #3264] ; 8-byte Folded Spill
fadd d16, d0, d16
ldr d0, [sp, #11720] ; 8-byte Folded Reload
fmul d0, d0, d4
str d0, [sp, #3256] ; 8-byte Folded Spill
fsub d2, d2, d0
ldr d0, [sp, #11696] ; 8-byte Folded Reload
fmul d0, d0, d17
str d0, [sp, #3248] ; 8-byte Folded Spill
fsub d0, d2, d0
str d0, [sp, #1488] ; 8-byte Folded Spill
ldur d2, [x29, #-240] ; 8-byte Folded Reload
fmul d2, d2, d0
fadd d2, d2, d16
str d4, [sp, #3744] ; 8-byte Folded Spill
fadd d6, d4, d17
ldr d4, [sp, #11624] ; 8-byte Folded Reload
fmul d0, d4, d6
fmov d19, d4
ldr d5, [sp, #11504] ; 8-byte Folded Reload
fmul d16, d5, d31
str d0, [sp, #3296] ; 8-byte Folded Spill
fadd d31, d0, d16
fmul d16, d31, d8
fsub d2, d2, d16
ldr d0, [sp, #11968] ; 8-byte Folded Reload
fmul d16, d0, d7
ldr d0, [sp, #6296] ; 8-byte Folded Reload
fsub d0, d0, d16
ldr d4, [sp, #9312] ; 8-byte Folded Reload
fmul d16, d0, d4
fadd d2, d2, d16
ldr d4, [sp, #11512] ; 8-byte Folded Reload
fmul d7, d4, d28
str d7, [sp, #6352] ; 8-byte Folded Spill
ldr d4, [sp, #11424] ; 8-byte Folded Reload
fmul d16, d7, d4
fadd d2, d16, d2
ldr d4, [sp, #9688] ; 8-byte Folded Reload
fmul d16, d4, d20
ldr d4, [sp, #10128] ; 8-byte Folded Reload
fmul d16, d4, d16
fmul d16, d16, d25
ldr d4, [sp, #9696] ; 8-byte Folded Reload
ldr d7, [sp, #10120] ; 8-byte Folded Reload
fmul d7, d7, d4
str d7, [sp, #8296] ; 8-byte Folded Spill
ldr d4, [sp, #9704] ; 8-byte Folded Reload
fmul d25, d4, d7
fmul d25, d25, d29
fadd d17, d25, d16
fmov d25, d19
fmul d29, d19, d17
ldr d4, [sp, #11320] ; 8-byte Folded Reload
fmul d4, d4, d29
str d4, [sp, #3240] ; 8-byte Folded Spill
fadd d2, d2, d4
fmul d7, d26, d29
ldr d19, [sp, #11096] ; 8-byte Folded Reload
fmul d16, d19, d7
str d7, [sp, #7984] ; 8-byte Folded Spill
str d16, [sp, #3232] ; 8-byte Folded Spill
fadd d16, d2, d16
mov x9, #4359484439294640128
str d6, [sp, #3712] ; 8-byte Folded Spill
str d17, [sp, #3696] ; 8-byte Folded Spill
fadd d20, d6, d17
fmul d6, d25, d20
ldr d2, [sp, #9328] ; 8-byte Folded Reload
fmul d2, d5, d2
str d6, [sp, #3288] ; 8-byte Folded Spill
fadd d6, d2, d6
fmov d2, x9
str d2, [sp, #8304] ; 8-byte Folded Spill
fmul d25, d16, d2
fmul d28, d6, d18
fadd d25, d25, d28
ldr d17, [sp, #11880] ; 8-byte Folded Reload
fmul d2, d17, d14
ldur d4, [x29, #-232] ; 8-byte Folded Reload
fmul d17, d4, d14
fmov d30, d4
ldr d4, [sp, #12144] ; 8-byte Folded Reload
fmul d28, d4, d1
str d17, [sp, #3160] ; 8-byte Folded Spill
str d28, [sp, #3152] ; 8-byte Folded Spill
fsub d17, d17, d28
fmul d28, d3, d17
fmov d18, d17
str d17, [sp, #4912] ; 8-byte Folded Spill
str d2, [sp, #3224] ; 8-byte Folded Spill
fadd d28, d2, d28
ldr d17, [sp, #11872] ; 8-byte Folded Reload
str d22, [sp, #6768] ; 8-byte Folded Spill
fmul d2, d17, d22
str d2, [sp, #3216] ; 8-byte Folded Spill
fadd d28, d2, d28
fmul d12, d0, d24
fadd d2, d28, d12
ldr d8, [sp, #9192] ; 8-byte Folded Reload
fmul d28, d31, d8
ldr d4, [sp, #12304] ; 8-byte Folded Reload
fmul d12, d4, d2
fadd d28, d12, d28
fmul d12, d26, d21
fmul d17, d30, d18
fadd d17, d12, d17
ldr d21, [sp, #12296] ; 8-byte Folded Reload
fmul d12, d21, d14
str d1, [sp, #4920] ; 8-byte Folded Spill
ldr d18, [sp, #12152] ; 8-byte Folded Reload
fmul d1, d18, d1
str d12, [sp, #3112] ; 8-byte Folded Spill
str d1, [sp, #3104] ; 8-byte Folded Spill
fsub d18, d12, d1
str d18, [sp, #4904] ; 8-byte Folded Spill
fmul d12, d21, d18
fadd d17, d12, d17
ldr d12, [sp, #12096] ; 8-byte Folded Reload
fmul d1, d12, d14
fmul d12, d3, d18
str d1, [sp, #3176] ; 8-byte Folded Spill
fadd d12, d1, d12
ldr d21, [sp, #9176] ; 8-byte Folded Reload
fmul d17, d17, d21
fadd d17, d12, d17
ldr d12, [sp, #12088] ; 8-byte Folded Reload
fmul d1, d12, d22
str d1, [sp, #3208] ; 8-byte Folded Spill
fadd d17, d1, d17
ldr d30, [sp, #9120] ; 8-byte Folded Reload
fmul d0, d0, d30
fadd d3, d17, d0
ldr d18, [sp, #12312] ; 8-byte Folded Reload
fmul d0, d18, d3
fadd d0, d0, d28
ldr d1, [sp, #11528] ; 8-byte Folded Reload
fmul d1, d1, d29
str d1, [sp, #3192] ; 8-byte Folded Spill
fadd d0, d0, d1
ldr d12, [sp, #11488] ; 8-byte Folded Reload
fmul d17, d12, d29
fmul d1, d19, d17
fmov d22, d17
str d17, [sp, #7992] ; 8-byte Folded Spill
str d1, [sp, #3184] ; 8-byte Folded Spill
fadd d0, d0, d1
fmul d17, d5, d13
fmul d23, d17, d23
fadd d0, d23, d0
ldr d5, [sp, #8736] ; 8-byte Folded Reload
fmul d23, d0, d5
fadd d23, d25, d23
ldr d1, [sp, #9320] ; 8-byte Folded Reload
fmul d24, d31, d1
str d2, [sp, #1304] ; 8-byte Folded Spill
fmul d25, d18, d2
fsub d24, d24, d25
str d3, [sp, #1296] ; 8-byte Folded Spill
fmul d25, d4, d3
fadd d24, d25, d24
fmul d25, d26, d7
fmul d31, d12, d22
fadd d31, d25, d31
ldr d12, [sp, #11680] ; 8-byte Folded Reload
fmul d2, d12, d29
str d2, [sp, #7976] ; 8-byte Folded Spill
fmul d12, d12, d2
fadd d31, d12, d31
fmul d17, d17, d9
ldr d1, [sp, #11432] ; 8-byte Folded Reload
fmul d1, d1, d29
str d1, [sp, #3128] ; 8-byte Folded Spill
fadd d24, d24, d1
fmul d1, d19, d2
str d1, [sp, #3144] ; 8-byte Folded Spill
fadd d24, d24, d1
ldr d2, [sp, #9104] ; 8-byte Folded Reload
fmul d1, d31, d2
fmov d31, d2
str d1, [sp, #3136] ; 8-byte Folded Spill
fadd d24, d24, d1
fadd d17, d17, d24
mov x9, #-4868391197187506176
fmov d1, x9
str d1, [sp, #8200] ; 8-byte Folded Spill
fmul d16, d16, d1
ldr d26, [sp, #9216] ; 8-byte Folded Reload
fmul d2, d6, d26
fadd d2, d2, d16
fadd d0, d0, d2
fadd d1, d23, d17
fmul d2, d17, d5
fadd d3, d0, d2
ldr q2, [sp, #11984] ; 16-byte Folded Reload
fmul d0, d2, d1
mov.16b v17, v2
ldr q4, [sp, #11808] ; 16-byte Folded Reload
fmul d2, d4, d3
mov.16b v18, v4
fsub d4, d0, d2
ldr d0, [sp, #11640] ; 8-byte Folded Reload
fmul d0, d0, d27
str d20, [sp, #6464] ; 8-byte Folded Spill
ldr d2, [sp, #12072] ; 8-byte Folded Reload
fmul d2, d2, d20
str d2, [sp, #3200] ; 8-byte Folded Spill
fadd d0, d0, d2
ldr q5, [sp, #11952] ; 16-byte Folded Reload
fmul d2, d5, d4
mov.16b v19, v5
ldr d5, [sp, #9224] ; 8-byte Folded Reload
fmul d16, d0, d5
fsub d2, d2, d16
ldr d5, [sp, #12048] ; 8-byte Folded Reload
fmul d5, d5, d20
str d5, [sp, #3168] ; 8-byte Folded Spill
ldr d6, [sp, #8720] ; 8-byte Folded Reload
fsub d16, d6, d5
ldr d22, [sp, #9184] ; 8-byte Folded Reload
fmul d16, d16, d22
str q3, [sp, #5520] ; 16-byte Folded Spill
fmul d17, d17, d3
fsub d16, d16, d17
str q1, [sp, #5536] ; 16-byte Folded Spill
fmul d17, d18, d1
fsub d16, d16, d17
ldr d1, [sp, #8712] ; 8-byte Folded Reload
fadd d1, d1, d16
ldr q3, [sp, #11792] ; 16-byte Folded Reload
fmul d16, d3, d1
fadd d2, d2, d16
str q2, [sp, #5920] ; 16-byte Folded Spill
ldr d2, [sp, #9232] ; 8-byte Folded Reload
fmul d0, d0, d2
stp q1, q4, [sp, #992] ; 32-byte Folded Spill
fmul d2, d3, d4
fsub d0, d2, d0
fmul d2, d19, d1
fsub d0, d2, d0
ldr d1, [sp, #8696] ; 8-byte Folded Reload
fadd d0, d1, d0
str q0, [sp, #4704] ; 16-byte Folded Spill
ldur d29, [x29, #-176] ; 8-byte Folded Reload
ldr d28, [sp, #4984] ; 8-byte Folded Reload
fmul d0, d29, d28
ldr d17, [sp, #11864] ; 8-byte Folded Reload
ldr d19, [sp, #4960] ; 8-byte Folded Reload
fmul d2, d17, d19
fadd d0, d0, d2
ldr d20, [sp, #12016] ; 8-byte Folded Reload
ldr d7, [sp, #4952] ; 8-byte Folded Reload
fmul d2, d20, d7
fadd d0, d2, d0
ldr d1, [sp, #10976] ; 8-byte Folded Reload
ldr d2, [sp, #10896] ; 8-byte Folded Reload
fmul d2, d2, d1
fmov d4, #0.50000000
fmul d2, d2, d4
ldr d1, [sp, #10320] ; 8-byte Folded Reload
fmul d2, d1, d2
mov x9, #211106232532992
movk x9, #16498, lsl #48
fmov d14, x9
fmul d3, d2, d14
ldr d1, [sp, #10960] ; 8-byte Folded Reload
ldr d2, [sp, #8216] ; 8-byte Folded Reload
fmul d2, d2, d1
mov x9, #10523
movk x9, #38535, lsl #16
movk x9, #12921, lsl #32
movk x9, #16642, lsl #48
fmov d1, x9
str d1, [sp, #8216] ; 8-byte Folded Spill
fmul d2, d2, d1
ldr d1, [sp, #10344] ; 8-byte Folded Reload
fmul d2, d1, d2
ldr d1, [sp, #10312] ; 8-byte Folded Reload
fdiv d2, d2, d1
ldr d1, [sp, #10296] ; 8-byte Folded Reload
fmul d1, d1, d2
str d3, [sp, #2520] ; 8-byte Folded Spill
str d1, [sp, #2512] ; 8-byte Folded Spill
fsub d2, d3, d1
fadd d2, d2, d0
fmul d16, d0, d4
fmov d25, #0.50000000
fsub d24, d2, d16
fmul d2, d11, d10
ldr d11, [sp, #12320] ; 8-byte Folded Reload
ldr d1, [sp, #5904] ; 8-byte Folded Reload
fmul d1, d11, d1
str d2, [sp, #2584] ; 8-byte Folded Spill
fsub d2, d2, d1
fmul d16, d29, d2
ldur d3, [x29, #-216] ; 8-byte Folded Reload
fmul d3, d3, d10
ldr d4, [sp, #12200] ; 8-byte Folded Reload
fmul d4, d4, d1
str d3, [sp, #2496] ; 8-byte Folded Spill
str d4, [sp, #2488] ; 8-byte Folded Spill
fsub d23, d3, d4
fmul d17, d17, d23
fadd d17, d16, d17
ldr d3, [sp, #12336] ; 8-byte Folded Reload
fmul d3, d3, d10
str d1, [sp, #4944] ; 8-byte Folded Spill
ldr d4, [sp, #12160] ; 8-byte Folded Reload
fmul d1, d4, d1
str d3, [sp, #2480] ; 8-byte Folded Spill
str d1, [sp, #2472] ; 8-byte Folded Spill
fsub d16, d3, d1
fmul d20, d20, d16
fadd d17, d20, d17
fsub d0, d24, d0
fsub d0, d0, d17
ldr d1, [sp, #10952] ; 8-byte Folded Reload
ldr d3, [sp, #10880] ; 8-byte Folded Reload
fmul d20, d3, d1
fmul d20, d20, d25
ldr d1, [sp, #10336] ; 8-byte Folded Reload
fmul d20, d1, d20
fmul d3, d20, d14
ldr d1, [sp, #10944] ; 8-byte Folded Reload
ldr d4, [sp, #8248] ; 8-byte Folded Reload
fmul d20, d4, d1
mov x9, #18811
movk x9, #34700, lsl #16
movk x9, #61210, lsl #32
movk x9, #16643, lsl #48
fmov d10, x9
fmul d20, d20, d10
ldr d1, [sp, #10352] ; 8-byte Folded Reload
fmul d20, d1, d20
ldr d1, [sp, #10328] ; 8-byte Folded Reload
fdiv d20, d20, d1
ldr d1, [sp, #10304] ; 8-byte Folded Reload
fmul d1, d1, d20
str d3, [sp, #2464] ; 8-byte Folded Spill
str d1, [sp, #2456] ; 8-byte Folded Spill
fsub d20, d3, d1
fadd d20, d20, d17
fmul d17, d17, d25
fsub d17, d20, d17
fadd d27, d17, d0
ldr d12, [sp, #8168] ; 8-byte Folded Reload
fmul d0, d29, d12
ldr d5, [sp, #12136] ; 8-byte Folded Reload
ldr d18, [sp, #6784] ; 8-byte Folded Reload
fmul d17, d5, d18
fadd d0, d0, d17
ldr d5, [sp, #11896] ; 8-byte Folded Reload
ldr d9, [sp, #6800] ; 8-byte Folded Reload
fmul d17, d5, d9
fadd d6, d17, d0
ldr d1, [sp, #11280] ; 8-byte Folded Reload
fmul d0, d1, d2
ldr d3, [sp, #11384] ; 8-byte Folded Reload
fmov d2, d28
fmul d2, d3, d28
fadd d0, d2, d0
ldr d29, [sp, #9112] ; 8-byte Folded Reload
fmul d2, d27, d29
fadd d0, d0, d2
ldr d28, [sp, #9200] ; 8-byte Folded Reload
ldur d4, [x29, #-184] ; 8-byte Folded Reload
fmul d20, d4, d28
ldr d2, [sp, #11568] ; 8-byte Folded Reload
fmul d2, d2, d20
ldr d5, [sp, #11936] ; 8-byte Folded Reload
ldr d13, [sp, #8240] ; 8-byte Folded Reload
fmul d17, d5, d13
fsub d2, d2, d17
str d2, [sp, #1544] ; 8-byte Folded Spill
ldr d5, [sp, #12280] ; 8-byte Folded Reload
fmul d2, d5, d2
fadd d0, d2, d0
fmul d2, d11, d13
str d20, [sp, #4936] ; 8-byte Folded Spill
fmul d17, d4, d20
fsub d2, d2, d17
ldr d4, [sp, #9312] ; 8-byte Folded Reload
fmul d17, d2, d4
fsub d0, d0, d17
fmul d4, d15, d28
str d4, [sp, #3592] ; 8-byte Folded Spill
fmul d17, d15, d4
fmul d20, d17, d29
fsub d0, d0, d20
ldr d4, [sp, #6360] ; 8-byte Folded Reload
fmul d4, d4, d5
str d4, [sp, #3584] ; 8-byte Folded Spill
fmul d20, d4, d15
fsub d0, d0, d20
ldr d4, [sp, #10376] ; 8-byte Folded Reload
ldr d5, [sp, #10360] ; 8-byte Folded Reload
fmul d20, d4, d5
fmul d20, d20, d25
ldr d4, [sp, #9712] ; 8-byte Folded Reload
fmul d20, d4, d20
fmul d5, d20, d14
ldr d4, [sp, #10384] ; 8-byte Folded Reload
ldr d20, [sp, #8688] ; 8-byte Folded Reload
fmul d29, d4, d20
mov x9, #45572
movk x9, #23979, lsl #16
movk x9, #34811, lsl #32
movk x9, #16645, lsl #48
fmov d20, x9
fmul d29, d29, d20
ldr d4, [sp, #9736] ; 8-byte Folded Reload
fmul d29, d4, d29
ldr d4, [sp, #9728] ; 8-byte Folded Reload
fdiv d29, d29, d4
ldr d4, [sp, #9720] ; 8-byte Folded Reload
fmul d4, d4, d29
str d5, [sp, #2448] ; 8-byte Folded Spill
str d4, [sp, #2440] ; 8-byte Folded Spill
fsub d29, d5, d4
fadd d29, d29, d6
fmul d4, d6, d25
str d4, [sp, #2544] ; 8-byte Folded Spill
fsub d29, d29, d4
str d6, [sp, #2576] ; 8-byte Folded Spill
fsub d11, d27, d6
fadd d25, d11, d29
ldr d5, [sp, #10968] ; 8-byte Folded Reload
fmul d4, d5, d12
str d4, [sp, #2568] ; 8-byte Folded Spill
fadd d0, d0, d4
ldr d4, [sp, #8160] ; 8-byte Folded Reload
fmul d4, d0, d4
ldr d6, [sp, #9304] ; 8-byte Folded Reload
fmul d11, d25, d6
fadd d11, d4, d11
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #16223, lsl #48
fmul d12, d27, d8
fmov d4, x9
str d4, [sp, #8720] ; 8-byte Folded Spill
fmul d13, d17, d4
fadd d12, d12, d13
fmul d24, d24, d21
fmul d13, d3, d7
fadd d24, d13, d24
fmul d16, d1, d16
fadd d16, d16, d24
fmul d24, d2, d30
fadd d8, d24, d16
ldp d6, d7, [x29, #-208] ; 16-byte Folded Reload
fmul d16, d7, d8
fadd d16, d12, d16
fmul d23, d1, d23
fmov d1, d19
fmul d24, d3, d19
fadd d23, d24, d23
ldr d1, [sp, #9208] ; 8-byte Folded Reload
fmul d2, d2, d1
fadd d1, d2, d23
fmul d2, d6, d1
fadd d2, d2, d16
fmul d3, d5, d18
str d3, [sp, #2560] ; 8-byte Folded Spill
fadd d2, d2, d3
ldr d3, [sp, #8360] ; 8-byte Folded Reload
fadd d3, d3, d2
ldr d21, [sp, #8736] ; 8-byte Folded Reload
fmul d2, d3, d21
fadd d2, d2, d11
ldr d4, [sp, #9320] ; 8-byte Folded Reload
fmul d16, d27, d4
fmul d17, d17, d4
fsub d16, d16, d17
str d8, [sp, #1480] ; 8-byte Folded Spill
fmul d17, d6, d8
fadd d16, d16, d17
str d1, [sp, #1472] ; 8-byte Folded Spill
fmul d17, d7, d1
fsub d16, d16, d17
fmul d1, d5, d9
str d1, [sp, #2552] ; 8-byte Folded Spill
fadd d16, d16, d1
fmul d17, d29, d31
fadd d16, d16, d17
ldr d1, [sp, #8368] ; 8-byte Folded Reload
fadd d16, d1, d16
fadd d17, d16, d2
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #16296, lsl #48
ldr d1, [sp, #9328] ; 8-byte Folded Reload
fmul d1, d15, d1
str d1, [sp, #3576] ; 8-byte Folded Spill
fmul d23, d15, d1
fmov d1, x9
str d1, [sp, #8712] ; 8-byte Folded Spill
fmul d24, d23, d1
fadd d1, d17, d24
ldr d2, [sp, #8664] ; 8-byte Folded Reload
fmul d0, d0, d2
fmul d17, d25, d26
fadd d0, d0, d17
fadd d0, d3, d0
fmul d3, d16, d21
fmov d24, d21
fadd d0, d0, d3
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #16292, lsl #48
fmov d2, x9
str d2, [sp, #8696] ; 8-byte Folded Spill
fmul d3, d23, d2
fadd d5, d0, d3
ldr q2, [sp, #12000] ; 16-byte Folded Reload
fmul d0, d2, d1
ldr q7, [sp, #11840] ; 16-byte Folded Reload
fmul d3, d7, d5
fsub d17, d0, d3
ldr d29, [sp, #9224] ; 8-byte Folded Reload
fmul d0, d25, d29
ldr q6, [sp, #12240] ; 16-byte Folded Reload
fmul d3, d6, d17
fsub d0, d3, d0
fmul d3, d25, d22
str q5, [sp, #5488] ; 16-byte Folded Spill
fmul d16, d2, d5
fadd d3, d16, d3
str q1, [sp, #5504] ; 16-byte Folded Spill
fmul d16, d7, d1
fadd d3, d16, d3
mov x9, #24565
movk x9, #58125, lsl #16
movk x9, #44270, lsl #32
movk x9, #16372, lsl #48
mov x10, #43115
movk x10, #62349, lsl #16
movk x10, #30721, lsl #32
movk x10, #16347, lsl #48
fmov d11, x9
ldr d2, [sp, #12056] ; 8-byte Folded Reload
fmul d1, d2, d11
str d1, [sp, #3528] ; 8-byte Folded Spill
fmul d16, d2, d1
fmov d1, x10
str d1, [sp, #8664] ; 8-byte Folded Spill
fmul d16, d16, d1
fadd d3, d3, d16
mov x9, #5915
movk x9, #64709, lsl #16
movk x9, #30489, lsl #32
movk x9, #49160, lsl #48
mov x10, #2356
movk x10, #12413, lsl #16
movk x10, #55910, lsl #32
movk x10, #16327, lsl #48
fmov d1, x9
str d1, [sp, #8688] ; 8-byte Folded Spill
fmul d1, d2, d1
str d1, [sp, #3488] ; 8-byte Folded Spill
fmul d16, d2, d1
fmov d1, x10
str d1, [sp, #8368] ; 8-byte Folded Spill
fmul d16, d16, d1
fsub d1, d3, d16
ldr q5, [sp, #11824] ; 16-byte Folded Reload
fmul d3, d5, d1
fsub d0, d0, d3
ldr d3, [sp, #6008] ; 8-byte Folded Reload
ldr d2, [sp, #12208] ; 8-byte Folded Reload
fmul d3, d2, d3
fmul d16, d3, d29
fadd d0, d0, d16
str q0, [sp, #6800] ; 16-byte Folded Spill
ldr d13, [sp, #9232] ; 8-byte Folded Reload
fmul d0, d25, d13
stp q1, q17, [sp, #960] ; 32-byte Folded Spill
fmul d16, d5, d17
fsub d0, d16, d0
fmul d16, d6, d1
fadd d0, d0, d16
fmul d3, d3, d13
fadd d0, d0, d3
mov x9, #64990
movk x9, #28266, lsl #16
movk x9, #45172, lsl #32
movk x9, #49182, lsl #48
mov x10, #28530
movk x10, #30490, lsl #16
movk x10, #27495, lsl #32
movk x10, #16325, lsl #48
fmov d23, x9
fmul d1, d2, d23
str d1, [sp, #3440] ; 8-byte Folded Spill
fmul d3, d2, d1
fmov d27, x10
fmul d3, d3, d27
fsub d0, d0, d3
str q0, [sp, #6784] ; 16-byte Folded Spill
ldr d0, [sp, #10856] ; 8-byte Folded Reload
ldr d1, [sp, #8208] ; 8-byte Folded Reload
fmul d0, d1, d0
ldr d1, [sp, #8216] ; 8-byte Folded Reload
fmul d0, d0, d1
ldur d5, [x29, #-192] ; 8-byte Folded Reload
ldr d4, [sp, #4928] ; 8-byte Folded Reload
fmul d3, d5, d4
ldr d18, [sp, #11768] ; 8-byte Folded Reload
ldr d28, [sp, #4912] ; 8-byte Folded Reload
fmul d16, d18, d28
fadd d3, d3, d16
ldr d7, [sp, #11776] ; 8-byte Folded Reload
ldr d15, [sp, #4904] ; 8-byte Folded Reload
fmul d16, d7, d15
fadd d3, d16, d3
ldr d1, [sp, #10864] ; 8-byte Folded Reload
ldr d2, [sp, #10808] ; 8-byte Folded Reload
fmul d16, d2, d1
fmov d17, #0.50000000
fmul d16, d16, d17
ldr d1, [sp, #10200] ; 8-byte Folded Reload
fmul d16, d1, d16
fmul d6, d16, d14
ldr d1, [sp, #10160] ; 8-byte Folded Reload
fmul d0, d1, d0
ldr d1, [sp, #10152] ; 8-byte Folded Reload
fdiv d0, d0, d1
ldr d1, [sp, #10168] ; 8-byte Folded Reload
fmul d0, d1, d0
str d6, [sp, #2936] ; 8-byte Folded Spill
str d0, [sp, #2928] ; 8-byte Folded Spill
fsub d0, d6, d0
fadd d0, d0, d3
fmul d16, d3, d17
fmov d8, #0.50000000
fsub d16, d0, d16
ldr d21, [sp, #6768] ; 8-byte Folded Reload
ldur d0, [x29, #-160] ; 8-byte Folded Reload
fmul d0, d0, d21
ldur d2, [x29, #-248] ; 8-byte Folded Reload
ldr d1, [sp, #5896] ; 8-byte Folded Reload
fmul d1, d2, d1
str d0, [sp, #2992] ; 8-byte Folded Spill
fsub d17, d0, d1
fmul d0, d5, d17
ldur d6, [x29, #-232] ; 8-byte Folded Reload
fmul d6, d6, d21
ldr d22, [sp, #12144] ; 8-byte Folded Reload
fmul d22, d22, d1
str d6, [sp, #2920] ; 8-byte Folded Spill
str d22, [sp, #2912] ; 8-byte Folded Spill
fsub d31, d6, d22
fmul d12, d18, d31
fadd d0, d0, d12
ldr d6, [sp, #12296] ; 8-byte Folded Reload
fmul d6, d6, d21
str d1, [sp, #4888] ; 8-byte Folded Spill
ldr d18, [sp, #12152] ; 8-byte Folded Reload
fmul d1, d18, d1
str d6, [sp, #2904] ; 8-byte Folded Spill
str d1, [sp, #2888] ; 8-byte Folded Spill
fsub d1, d6, d1
fmul d12, d7, d1
fadd d0, d12, d0
ldr d6, [sp, #10840] ; 8-byte Folded Reload
ldr d7, [sp, #8232] ; 8-byte Folded Reload
fmul d12, d6, d7
fmul d10, d12, d10
fsub d3, d16, d3
fsub d3, d3, d0
ldr d6, [sp, #10848] ; 8-byte Folded Reload
ldr d7, [sp, #10792] ; 8-byte Folded Reload
fmul d12, d7, d6
fmul d12, d12, d8
ldr d6, [sp, #10208] ; 8-byte Folded Reload
fmul d12, d6, d12
fmul d7, d12, d14
ldr d6, [sp, #10184] ; 8-byte Folded Reload
fmul d10, d6, d10
ldr d6, [sp, #10176] ; 8-byte Folded Reload
fdiv d10, d10, d6
ldr d6, [sp, #10192] ; 8-byte Folded Reload
fmul d6, d6, d10
str d7, [sp, #2872] ; 8-byte Folded Spill
str d6, [sp, #2864] ; 8-byte Folded Spill
fsub d10, d7, d6
fadd d10, d10, d0
fmul d0, d0, d8
fsub d0, d10, d0
fadd d19, d0, d3
ldr d12, [sp, #7984] ; 8-byte Folded Reload
fmul d3, d5, d12
ldr d5, [sp, #11688] ; 8-byte Folded Reload
ldr d9, [sp, #7992] ; 8-byte Folded Reload
fmul d10, d5, d9
fadd d3, d3, d10
ldr d5, [sp, #11632] ; 8-byte Folded Reload
ldr d30, [sp, #7976] ; 8-byte Folded Reload
fmul d10, d5, d30
fadd d21, d10, d3
ldr d3, [sp, #10136] ; 8-byte Folded Reload
ldr d5, [sp, #10120] ; 8-byte Folded Reload
fmul d3, d3, d5
fmul d3, d3, d8
ldr d5, [sp, #9680] ; 8-byte Folded Reload
fmul d3, d5, d3
fmul d26, d3, d14
ldr d3, [sp, #10144] ; 8-byte Folded Reload
ldr d5, [sp, #8296] ; 8-byte Folded Reload
fmul d3, d3, d5
fmul d3, d3, d20
ldr d18, [sp, #11272] ; 8-byte Folded Reload
fmul d17, d18, d17
ldr d7, [sp, #11360] ; 8-byte Folded Reload
fmul d20, d7, d4
fadd d17, d20, d17
ldr d0, [sp, #9112] ; 8-byte Folded Reload
fmul d20, d19, d0
fsub d17, d17, d20
ldr d14, [sp, #12232] ; 8-byte Folded Reload
ldr d4, [sp, #9200] ; 8-byte Folded Reload
fmul d22, d14, d4
ldr d5, [sp, #11512] ; 8-byte Folded Reload
fmul d20, d5, d22
ldr d5, [sp, #11920] ; 8-byte Folded Reload
ldr d6, [sp, #8312] ; 8-byte Folded Reload
fmul d10, d5, d6
fsub d5, d20, d10
str d5, [sp, #1336] ; 8-byte Folded Spill
ldur d10, [x29, #-240] ; 8-byte Folded Reload
fmul d20, d10, d5
fadd d17, d20, d17
fmul d6, d2, d6
str d22, [sp, #4880] ; 8-byte Folded Spill
fmul d20, d14, d22
fsub d6, d6, d20
ldr d2, [sp, #9312] ; 8-byte Folded Reload
fmul d20, d6, d2
fadd d17, d20, d17
ldr d22, [sp, #11624] ; 8-byte Folded Reload
fmul d5, d22, d4
str d5, [sp, #3432] ; 8-byte Folded Spill
fmul d20, d22, d5
fmul d5, d20, d0
fadd d5, d17, d5
ldr d2, [sp, #6352] ; 8-byte Folded Reload
fmul d17, d2, d10
str d17, [sp, #3424] ; 8-byte Folded Spill
fmul d17, d22, d17
fmov d14, d22
fsub d5, d5, d17
ldr d17, [sp, #9672] ; 8-byte Folded Reload
fmul d3, d17, d3
ldr d17, [sp, #9656] ; 8-byte Folded Reload
fdiv d3, d3, d17
ldr d17, [sp, #9664] ; 8-byte Folded Reload
fmul d3, d17, d3
str d26, [sp, #2840] ; 8-byte Folded Spill
str d3, [sp, #2792] ; 8-byte Folded Spill
fsub d3, d26, d3
fadd d3, d3, d21
fmul d17, d21, d8
str d17, [sp, #2960] ; 8-byte Folded Spill
fsub d3, d3, d17
str d21, [sp, #2984] ; 8-byte Folded Spill
fsub d17, d19, d21
fadd d10, d17, d3
ldr d22, [sp, #10872] ; 8-byte Folded Reload
fmul d17, d22, d12
str d17, [sp, #2976] ; 8-byte Folded Spill
fadd d5, d5, d17
ldr d2, [sp, #8304] ; 8-byte Folded Reload
fmul d17, d5, d2
ldr d2, [sp, #9304] ; 8-byte Folded Reload
fmul d26, d10, d2
fadd d17, d17, d26
ldr d0, [sp, #9192] ; 8-byte Folded Reload
fmul d26, d19, d0
ldr d0, [sp, #8720] ; 8-byte Folded Reload
fmul d4, d20, d0
fadd d4, d26, d4
ldr d0, [sp, #9176] ; 8-byte Folded Reload
fmul d16, d16, d0
fmul d26, d7, d15
fadd d16, d26, d16
fmul d1, d18, d1
fadd d1, d1, d16
ldr d0, [sp, #9120] ; 8-byte Folded Reload
fmul d16, d6, d0
fadd d26, d16, d1
ldr d21, [sp, #12312] ; 8-byte Folded Reload
fmul d1, d21, d26
fadd d1, d4, d1
fmul d4, d18, d31
fmul d16, d7, d28
fadd d4, d16, d4
ldr d0, [sp, #9208] ; 8-byte Folded Reload
fmul d6, d6, d0
fadd d7, d6, d4
ldr d2, [sp, #12304] ; 8-byte Folded Reload
fmul d4, d2, d7
fadd d1, d4, d1
fmul d4, d22, d9
str d4, [sp, #2968] ; 8-byte Folded Spill
fadd d1, d1, d4
ldr d4, [sp, #8352] ; 8-byte Folded Reload
fadd d1, d4, d1
fmul d4, d1, d24
fadd d4, d4, d17
ldr d6, [sp, #9320] ; 8-byte Folded Reload
fmul d0, d19, d6
fmul d6, d20, d6
fsub d0, d0, d6
str d26, [sp, #1216] ; 8-byte Folded Spill
fmul d6, d2, d26
fadd d0, d0, d6
str d7, [sp, #1208] ; 8-byte Folded Spill
fmul d6, d21, d7
fsub d0, d0, d6
ldr d2, [sp, #9104] ; 8-byte Folded Reload
fmul d3, d3, d2
fmul d6, d22, d30
fmov d30, d25
str d6, [sp, #2952] ; 8-byte Folded Spill
fadd d0, d0, d6
fadd d0, d0, d3
ldr d3, [sp, #8344] ; 8-byte Folded Reload
fadd d0, d3, d0
fadd d3, d0, d4
ldr d4, [sp, #9328] ; 8-byte Folded Reload
fmul d4, d14, d4
str d4, [sp, #3400] ; 8-byte Folded Spill
fmul d4, d14, d4
ldr q14, [sp, #6784] ; 16-byte Folded Reload
ldr d2, [sp, #8712] ; 8-byte Folded Reload
fmul d2, d4, d2
fadd d6, d3, d2
ldr d2, [sp, #8200] ; 8-byte Folded Reload
fmul d2, d5, d2
ldr d3, [sp, #9216] ; 8-byte Folded Reload
fmul d3, d10, d3
fadd d2, d2, d3
fadd d1, d1, d2
fmul d0, d0, d24
ldr q28, [sp, #8608] ; 16-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #8696] ; 8-byte Folded Reload
fmul d1, d4, d1
fadd d2, d0, d1
ldr q5, [sp, #11984] ; 16-byte Folded Reload
fmul d0, d5, d6
ldr q4, [sp, #11808] ; 16-byte Folded Reload
fmul d1, d4, d2
fsub d3, d0, d1
fmul d0, d10, d29
ldr q7, [sp, #11952] ; 16-byte Folded Reload
fmul d1, d7, d3
fsub d0, d1, d0
ldr d1, [sp, #9184] ; 8-byte Folded Reload
fmul d1, d10, d1
str q2, [sp, #5440] ; 16-byte Folded Spill
fmul d2, d5, d2
fadd d1, d2, d1
str q6, [sp, #5456] ; 16-byte Folded Spill
fmul d2, d4, d6
ldur d18, [x29, #-256] ; 8-byte Folded Reload
fadd d1, d2, d1
ldr d4, [sp, #12048] ; 8-byte Folded Reload
fmul d2, d4, d11
str d2, [sp, #3376] ; 8-byte Folded Spill
fmul d2, d4, d2
ldr d5, [sp, #8664] ; 8-byte Folded Reload
fmul d2, d2, d5
ldr q11, [sp, #5920] ; 16-byte Folded Reload
ldr q8, [sp, #8592] ; 16-byte Folded Reload
fadd d1, d1, d2
ldr d2, [sp, #8688] ; 8-byte Folded Reload
fmul d2, d4, d2
str d2, [sp, #3352] ; 8-byte Folded Spill
fmul d2, d4, d2
ldr d4, [sp, #8368] ; 8-byte Folded Reload
fmul d2, d2, d4
fsub d4, d1, d2
ldr q6, [sp, #11792] ; 16-byte Folded Reload
fmul d1, d6, d4
fsub d0, d0, d1
ldr d1, [sp, #5960] ; 8-byte Folded Reload
ldr d5, [sp, #12072] ; 8-byte Folded Reload
fmul d1, d5, d1
fmul d2, d1, d29
fadd d25, d0, d2
fmul d0, d10, d13
str q3, [sp, #832] ; 16-byte Folded Spill
fmul d2, d6, d3
fsub d0, d2, d0
str q4, [sp, #800] ; 16-byte Folded Spill
fmul d2, d7, d4
fadd d0, d0, d2
fmul d1, d1, d13
ldr q13, [sp, #6800] ; 16-byte Folded Reload
fadd d0, d0, d1
fmul d1, d5, d23
str d1, [sp, #3328] ; 8-byte Folded Spill
fmul d1, d5, d1
fmul d1, d1, d27
fsub d26, d0, d1
mov x9, #47887
movk x9, #56309, lsl #16
movk x9, #15746, lsl #32
movk x9, #16444, lsl #48
mov x10, #28852
movk x10, #37576, lsl #16
movk x10, #2974, lsl #32
movk x10, #16424, lsl #48
fmov d0, x9
fmul d29, d18, d0
fmov d0, x10
fmul d27, d18, d0
ldr d0, [sp, #6472] ; 8-byte Folded Reload
fmul d31, d18, d0
ldr d0, [sp, #6464] ; 8-byte Folded Reload
fmul d9, d18, d0
cbz x8, LBB19_42
; %bb.41:
ldr q21, [sp, #10736] ; 16-byte Folded Reload
fmul d2, d21, d28
mov x9, #47887
movk x9, #56309, lsl #16
movk x9, #15746, lsl #32
movk x9, #16444, lsl #48
fmov d0, x9
ldr d24, [sp, #11248] ; 8-byte Folded Reload
fmul d0, d24, d0
mov x9, #54885
movk x9, #33778, lsl #16
movk x9, #12745, lsl #32
movk x9, #16308, lsl #48
fmov d3, x9
fmul d1, d0, d3
mov x9, #26610
movk x9, #29696, lsl #16
movk x9, #48971, lsl #32
movk x9, #16339, lsl #48
fmov d4, x9
fmul d4, d0, d4
fadd d1, d1, d4
ldr q20, [sp, #11136] ; 16-byte Folded Reload
fmul d4, d20, d8
fadd d4, d1, d4
mov x9, #28852
movk x9, #37576, lsl #16
movk x9, #2974, lsl #32
movk x9, #16424, lsl #48
fmov d1, x9
fmul d5, d24, d1
fadd d1, d5, d31
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #49072, lsl #48
fmov d6, x9
fmul d7, d1, d6
fadd d4, d4, d7
fsub d2, d2, d4
ldr q22, [sp, #11216] ; 16-byte Folded Reload
fmul d4, d22, d11
fsub d4, d2, d4
fadd d2, d5, d9
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d5, x9
fmul d5, d2, d5
fadd d4, d4, d5
ldr q23, [sp, #10720] ; 16-byte Folded Reload
ldr q12, [sp, #4704] ; 16-byte Folded Reload
fmul d5, d23, d12
fadd d4, d5, d4
fmul d5, d18, d29
mov x9, #48998
movk x9, #16808, lsl #16
movk x9, #62387, lsl #32
movk x9, #16312, lsl #48
fmov d7, x9
fmul d7, d5, d7
ldr d16, [sp, #6440] ; 8-byte Folded Reload
fadd d7, d16, d7
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #49073, lsl #48
fmov d16, x9
fmul d17, d30, d16
fadd d7, d7, d17
fmul d17, d21, d13
fadd d7, d7, d17
fmul d17, d20, d14
fsub d7, d7, d17
fmul d17, d18, d27
fmul d18, d17, d16
fsub d7, d7, d18
fmul d19, d10, d16
fadd d7, d7, d19
fmul d19, d23, d25
fadd d7, d7, d19
fmul d19, d22, d26
fsub d7, d7, d19
fsub d7, d7, d18
ldr d18, [sp, #6496] ; 8-byte Folded Reload
fadd d7, d18, d7
fadd d4, d4, d4
fadd d7, d7, d7
fadd d4, d4, d7
fmul d3, d5, d3
ldr d5, [sp, #6736] ; 8-byte Folded Reload
fsub d3, d5, d3
fmul d5, d30, d6
fadd d3, d3, d5
fmul d5, d20, d13
fadd d3, d3, d5
fmul d5, d21, d14
fadd d3, d3, d5
fmul d5, d17, d6
fsub d3, d3, d5
fmul d6, d10, d6
fadd d3, d3, d6
fmul d6, d22, d25
fadd d3, d3, d6
fmul d6, d23, d26
fadd d3, d3, d6
fsub d3, d3, d5
mov x9, #48998
movk x9, #16808, lsl #16
movk x9, #62387, lsl #32
movk x9, #49080, lsl #48
fmov d5, x9
fmul d5, d0, d5
mov x9, #39915
movk x9, #11776, lsl #16
movk x9, #40689, lsl #32
movk x9, #49053, lsl #48
fmov d6, x9
fmul d0, d0, d6
fadd d0, d5, d0
fmul d5, d21, d8
fadd d0, d0, d5
fmul d1, d1, d16
fadd d0, d0, d1
fmul d1, d20, d28
fadd d0, d1, d0
fmul d1, d23, d11
fadd d0, d0, d1
fmul d1, d2, d16
fadd d0, d0, d1
fmul d1, d22, d12
fadd d0, d1, d0
mov x9, #43139
movk x9, #8835, lsl #16
movk x9, #28093, lsl #32
movk x9, #49187, lsl #48
fmov d1, x9
fmul d1, d24, d1
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d2, x9
fmul d1, d1, d2
fadd d0, d0, d1
ldr d5, [sp, #11400] ; 8-byte Folded Reload
fmul d1, d5, d4
fadd d0, d0, d3
ldr d2, [sp, #11160] ; 8-byte Folded Reload
fmul d2, d2, d0
fsub d1, d1, d2
fmul d1, d5, d1
ldr d3, [sp, #11392] ; 8-byte Folded Reload
fmul d2, d3, d4
ldr d4, [sp, #11240] ; 8-byte Folded Reload
fmul d0, d4, d0
fadd d0, d0, d2
fmov d2, #0.50000000
fmul d1, d1, d2
fmul d0, d3, d0
fmul d0, d0, d2
fsub d0, d1, d0
str d0, [x8, #16]
LBB19_42:
str d31, [sp, #104] ; 8-byte Folded Spill
str d9, [sp, #248] ; 8-byte Folded Spill
stp d29, d27, [sp, #480] ; 16-byte Folded Spill
str d10, [sp, #744] ; 8-byte Folded Spill
stp q26, q25, [sp, #752] ; 32-byte Folded Spill
str d30, [sp, #784] ; 8-byte Folded Spill
ldr d0, [sp, #11304] ; 8-byte Folded Reload
ldr d1, [sp, #11408] ; 8-byte Folded Reload
fsub d1, d1, d0
ldr d0, [sp, #12328] ; 8-byte Folded Reload
fadd d17, d1, d0
ldr d0, [sp, #12256] ; 8-byte Folded Reload
fadd d18, d17, d0
ldr d0, [sp, #12112] ; 8-byte Folded Reload
ldr d2, [sp, #12080] ; 8-byte Folded Reload
fadd d22, d2, d0
ldur d0, [x29, #-224] ; 8-byte Folded Reload
fadd d19, d22, d0
ldr d0, [sp, #12264] ; 8-byte Folded Reload
fadd d24, d19, d0
ldr d30, [sp, #11912] ; 8-byte Folded Reload
fadd d3, d24, d30
ldr d0, [sp, #9808] ; 8-byte Folded Reload
fdiv d0, d3, d0
ldr d2, [sp, #9768] ; 8-byte Folded Reload
fmul d7, d2, d0
ldr d2, [sp, #9760] ; 8-byte Folded Reload
fmul d2, d7, d2
fadd d2, d18, d2
ldr d4, [sp, #12288] ; 8-byte Folded Reload
fadd d2, d4, d2
ldr d4, [sp, #10264] ; 8-byte Folded Reload
fmul d4, d4, d2
fmov d5, #0.50000000
fmul d4, d4, d5
fmov d21, #0.50000000
ldr d5, [sp, #10816] ; 8-byte Folded Reload
fmul d4, d5, d4
mov x9, #4632233691727265792
fmov d25, x9
fmul d4, d4, d25
ldr d5, [sp, #10808] ; 8-byte Folded Reload
fmul d5, d5, d2
str d5, [sp, #9120] ; 8-byte Folded Spill
ldr d2, [sp, #10280] ; 8-byte Folded Reload
fmul d5, d2, d5
str d5, [sp, #9112] ; 8-byte Folded Spill
ldr d2, [sp, #10288] ; 8-byte Folded Reload
fmul d2, d2, d5
fmov d29, #3.00000000
fmul d2, d2, d29
fadd d28, d4, d2
ldr d31, [sp, #11744] ; 8-byte Folded Reload
fadd d6, d24, d31
ldr d2, [sp, #10272] ; 8-byte Folded Reload
fdiv d4, d6, d2
str d4, [sp, #9176] ; 8-byte Folded Spill
ldr d2, [sp, #10224] ; 8-byte Folded Reload
fmul d16, d2, d4
ldr d2, [sp, #9744] ; 8-byte Folded Reload
fmul d2, d16, d2
fadd d2, d18, d2
ldr d4, [sp, #12168] ; 8-byte Folded Reload
fadd d2, d4, d2
ldr d4, [sp, #10240] ; 8-byte Folded Reload
fmul d4, d4, d2
fmul d4, d4, d21
ldr d5, [sp, #10800] ; 8-byte Folded Reload
fmul d4, d5, d4
fmul d4, d4, d25
ldr d5, [sp, #10792] ; 8-byte Folded Reload
fmul d5, d5, d2
str d5, [sp, #9216] ; 8-byte Folded Spill
ldr d2, [sp, #10248] ; 8-byte Folded Reload
fmul d5, d2, d5
str d5, [sp, #9200] ; 8-byte Folded Spill
ldr d2, [sp, #10256] ; 8-byte Folded Reload
fmul d2, d2, d5
fmul d2, d2, d29
fadd d4, d4, d2
ldr d2, [sp, #11192] ; 8-byte Folded Reload
fadd d5, d19, d2
str d5, [sp, #8712] ; 8-byte Folded Spill
ldr d2, [sp, #8808] ; 8-byte Folded Reload
fdiv d2, d5, d2
str d2, [sp, #9192] ; 8-byte Folded Spill
ldr d5, [sp, #8816] ; 8-byte Folded Reload
fmul d2, d5, d2
str d2, [sp, #9184] ; 8-byte Folded Spill
ldr d20, [sp, #8800] ; 8-byte Folded Reload
fmul d26, d2, d20
fadd d26, d17, d26
ldr d20, [sp, #11184] ; 8-byte Folded Reload
fadd d26, d20, d26
ldr d20, [sp, #9688] ; 8-byte Folded Reload
fmul d27, d26, d20
fmul d27, d27, d21
ldr d20, [sp, #10128] ; 8-byte Folded Reload
fmul d27, d20, d27
fmul d25, d27, d25
ldr d20, [sp, #10120] ; 8-byte Folded Reload
fmul d2, d26, d20
str d2, [sp, #9208] ; 8-byte Folded Spill
ldr d20, [sp, #9696] ; 8-byte Folded Reload
fmul d2, d20, d2
str d2, [sp, #9224] ; 8-byte Folded Spill
ldr d20, [sp, #9704] ; 8-byte Folded Reload
fmul d26, d20, d2
fmul d26, d26, d29
fadd d13, d26, d25
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d25, x9
fmul d27, d24, d25
mov x9, #50080
movk x9, #49599, lsl #16
movk x9, #32579, lsl #32
movk x9, #16368, lsl #48
mov x10, #6432
movk x10, #24166, lsl #16
movk x10, #7623, lsl #32
movk x10, #16309, lsl #48
fmov d24, x10
fmul d22, d22, d24
fmul d1, d1, d24
fmov d24, x9
fmul d19, d19, d24
str d19, [sp, #9320] ; 8-byte Folded Spill
fmul d20, d17, d24
str d20, [sp, #9328] ; 8-byte Folded Spill
ldr d17, [sp, #10072] ; 8-byte Folded Reload
fsub d21, d17, d27
fmul d12, d18, d25
fsub d17, d21, d19
str d21, [sp, #8352] ; 8-byte Folded Spill
ldr d18, [sp, #9408] ; 8-byte Folded Reload
fadd d9, d18, d17
fsub d18, d9, d22
str d18, [sp, #9312] ; 8-byte Folded Spill
str d9, [sp, #8368] ; 8-byte Folded Spill
fadd d17, d28, d4
fadd d8, d17, d13
ldr d2, [sp, #12048] ; 8-byte Folded Reload
fmul d18, d18, d2
fsub d18, d8, d18
str d8, [sp, #6440] ; 8-byte Folded Spill
str d18, [sp, #3304] ; 8-byte Folded Spill
fmul d18, d2, d18
ldr d19, [sp, #8704] ; 8-byte Folded Reload
fadd d18, d19, d18
ldr d19, [sp, #10064] ; 8-byte Folded Reload
fadd d5, d12, d19
fadd d19, d20, d5
str d5, [sp, #8344] ; 8-byte Folded Spill
ldr d22, [sp, #9416] ; 8-byte Folded Reload
fadd d20, d22, d19
fadd d1, d1, d20
str d1, [sp, #9304] ; 8-byte Folded Spill
str d20, [sp, #8360] ; 8-byte Folded Spill
ldr d19, [sp, #11520] ; 8-byte Folded Reload
fmul d1, d1, d19
fadd d1, d1, d18
str d1, [sp, #8664] ; 8-byte Folded Spill
ldr d18, [sp, #10536] ; 8-byte Folded Reload
fmul d3, d3, d18
ldr d18, [sp, #10824] ; 8-byte Folded Reload
fmul d18, d18, d7
fmov d2, #0.50000000
fmul d7, d18, d2
fsub d3, d3, d7
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16361, lsl #48
fmov d7, x9
fmul d18, d18, d7
ldr d19, [sp, #9776] ; 8-byte Folded Reload
fmul d18, d18, d19
fmov d1, #5.00000000
fmul d18, d18, d1
fmov d25, #5.00000000
fsub d3, d3, d18
ldr d18, [sp, #10520] ; 8-byte Folded Reload
fdiv d19, d3, d18
ldr d1, [sp, #12232] ; 8-byte Folded Reload
fmul d3, d1, d28
ldr d18, [sp, #10528] ; 8-byte Folded Reload
fmul d29, d18, d19
ldr d26, [sp, #11968] ; 8-byte Folded Reload
fmul d22, d26, d29
fsub d23, d3, d22
ldr d18, [sp, #10512] ; 8-byte Folded Reload
fmul d19, d18, d19
ldr d18, [sp, #9792] ; 8-byte Folded Reload
fmul d0, d0, d18
fsub d18, d19, d0
str d18, [sp, #5856] ; 8-byte Folded Spill
ldur d24, [x29, #-248] ; 8-byte Folded Reload
fmul d0, d24, d29
fmov d10, d29
str d29, [sp, #9104] ; 8-byte Folded Spill
fmul d19, d1, d18
fmov d3, d1
fsub d0, d0, d19
str d0, [sp, #9232] ; 8-byte Folded Spill
fmul d19, d26, d18
fmul d22, d24, d28
fsub d14, d19, d22
ldur d15, [x29, #-192] ; 8-byte Folded Reload
fmul d19, d15, d0
ldr d0, [sp, #8976] ; 8-byte Folded Reload
fsub d19, d19, d0
fadd d19, d14, d19
ldur d0, [x29, #-160] ; 8-byte Folded Reload
fmul d22, d0, d23
fadd d19, d22, d19
ldr d0, [sp, #11728] ; 8-byte Folded Reload
fmul d22, d0, d23
ldr d0, [sp, #7880] ; 8-byte Folded Reload
fsub d22, d22, d0
ldr d0, [sp, #8968] ; 8-byte Folded Reload
fsub d0, d19, d0
str d0, [sp, #4832] ; 8-byte Folded Spill
ldr d11, [sp, #11472] ; 8-byte Folded Reload
fmul d19, d11, d0
fadd d19, d22, d19
ldr d0, [sp, #10416] ; 8-byte Folded Reload
fmul d6, d6, d0
ldr d0, [sp, #11072] ; 8-byte Folded Reload
fmul d16, d0, d16
fmul d22, d16, d2
fsub d6, d6, d22
fmul d16, d16, d7
ldr d0, [sp, #9752] ; 8-byte Folded Reload
fmul d16, d16, d0
fmul d16, d16, d25
fmov d25, #5.00000000
fsub d6, d6, d16
ldr d0, [sp, #10408] ; 8-byte Folded Reload
fdiv d2, d6, d0
str d2, [sp, #8168] ; 8-byte Folded Spill
str d4, [sp, #5864] ; 8-byte Folded Spill
fmul d6, d1, d4
ldr d0, [sp, #10472] ; 8-byte Folded Reload
fmul d0, d0, d2
fmul d16, d26, d0
fmov d22, d0
str d0, [sp, #8240] ; 8-byte Folded Spill
fsub d29, d6, d16
ldr d0, [sp, #11560] ; 8-byte Folded Reload
fmul d6, d0, d29
fadd d6, d6, d19
ldr d0, [sp, #7872] ; 8-byte Folded Reload
fsub d6, d6, d0
mov x9, #51491
movk x9, #54360, lsl #16
movk x9, #13074, lsl #32
movk x9, #16286, lsl #48
fmov d16, x9
fadd d6, d6, d16
fmov d2, d27
fmul d18, d27, d1
ldr d3, [sp, #11512] ; 8-byte Folded Reload
fmul d16, d3, d18
ldr d0, [sp, #11168] ; 8-byte Folded Reload
fmul d19, d12, d0
fadd d16, d16, d19
fmul d19, d27, d26
str d27, [sp, #8736] ; 8-byte Folded Spill
fmul d24, d12, d24
fadd d0, d19, d24
ldr d19, [sp, #11928] ; 8-byte Folded Reload
fmul d19, d19, d0
fmov d24, d0
str d0, [sp, #8688] ; 8-byte Folded Spill
fadd d16, d19, d16
fmul d19, d30, d10
fsub d16, d19, d16
str d28, [sp, #3320] ; 8-byte Folded Spill
ldr d0, [sp, #11720] ; 8-byte Folded Reload
fmul d19, d0, d28
fsub d16, d16, d19
fmul d19, d31, d22
fadd d16, d19, d16
ldr d0, [sp, #11696] ; 8-byte Folded Reload
fmul d19, d0, d4
fsub d16, d16, d19
ldr d0, [sp, #7864] ; 8-byte Folded Reload
fadd d16, d0, d16
ldr d0, [sp, #8960] ; 8-byte Folded Reload
fadd d16, d0, d16
ldr d0, [sp, #7856] ; 8-byte Folded Reload
fadd d0, d0, d16
ldur d4, [x29, #-240] ; 8-byte Folded Reload
str d0, [sp, #1224] ; 8-byte Folded Spill
fmul d16, d4, d0
fadd d6, d16, d6
ldr d10, [sp, #11624] ; 8-byte Folded Reload
fmul d16, d21, d10
fsub d0, d17, d16
str d0, [sp, #3120] ; 8-byte Folded Spill
fmul d16, d10, d0
ldr d0, [sp, #8856] ; 8-byte Folded Reload
fadd d16, d16, d0
ldr d0, [sp, #11504] ; 8-byte Folded Reload
fmul d17, d5, d0
fmov d21, d0
fadd d17, d17, d16
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d0, x9
str d0, [sp, #8696] ; 8-byte Folded Spill
fmul d16, d17, d0
fsub d19, d6, d16
str d18, [sp, #4800] ; 8-byte Folded Spill
fmul d6, d1, d18
fmul d16, d26, d24
fadd d6, d6, d16
ldr d0, [sp, #11312] ; 8-byte Folded Reload
fmul d16, d12, d0
fadd d6, d6, d16
ldr d0, [sp, #8952] ; 8-byte Folded Reload
fadd d16, d0, d6
mov x9, #54125
movk x9, #53060, lsl #16
movk x9, #15481, lsl #32
movk x9, #16273, lsl #48
fmov d0, x9
str d0, [sp, #8720] ; 8-byte Folded Spill
fmul d27, d16, d0
fadd d19, d27, d19
fmul d27, d2, d3
ldr d0, [sp, #8944] ; 8-byte Folded Reload
fsub d0, d0, d27
str d0, [sp, #6312] ; 8-byte Folded Spill
fmul d0, d0, d4
str d0, [sp, #3096] ; 8-byte Folded Spill
fmul d27, d10, d0
fadd d19, d27, d19
fmul d27, d12, d3
ldr d0, [sp, #8928] ; 8-byte Folded Reload
fsub d0, d0, d27
str d0, [sp, #6304] ; 8-byte Folded Spill
ldr d1, [sp, #11424] ; 8-byte Folded Reload
fmul d27, d0, d1
fadd d19, d27, d19
str d13, [sp, #3312] ; 8-byte Folded Spill
fmul d30, d10, d13
ldr d0, [sp, #11320] ; 8-byte Folded Reload
fmul d27, d0, d30
fadd d27, d27, d19
ldr d0, [sp, #9856] ; 8-byte Folded Reload
ldr d1, [sp, #8712] ; 8-byte Folded Reload
fmul d4, d1, d0
ldr d0, [sp, #8784] ; 8-byte Folded Reload
ldr d1, [sp, #9184] ; 8-byte Folded Reload
fmul d5, d1, d0
fmov d0, #0.50000000
fmul d19, d5, d0
fsub d4, d4, d19
fmul d5, d5, d7
ldr d0, [sp, #8776] ; 8-byte Folded Reload
fmul d5, d5, d0
fmul d5, d5, d25
fsub d4, d4, d5
ldr d0, [sp, #9848] ; 8-byte Folded Reload
fdiv d4, d4, d0
ldr d0, [sp, #9840] ; 8-byte Folded Reload
fmul d4, d0, d4
ldr d0, [sp, #8792] ; 8-byte Folded Reload
ldr d1, [sp, #9192] ; 8-byte Folded Reload
fmul d2, d1, d0
fsub d0, d4, d2
str d0, [sp, #5848] ; 8-byte Folded Spill
fmul d0, d10, d0
fmul d2, d15, d0
fmov d1, d0
ldr d0, [sp, #7320] ; 8-byte Folded Reload
fadd d2, d0, d2
ldur d25, [x29, #-160] ; 8-byte Folded Reload
fmul d4, d25, d30
fsub d2, d4, d2
ldr d0, [sp, #7848] ; 8-byte Folded Reload
fsub d4, d27, d0
ldr d0, [sp, #7296] ; 8-byte Folded Reload
fsub d0, d2, d0
str d0, [sp, #6200] ; 8-byte Folded Spill
ldr d27, [sp, #11096] ; 8-byte Folded Reload
fmul d2, d27, d0
fadd d4, d4, d2
fmul d2, d9, d10
fsub d0, d8, d2
str d0, [sp, #3088] ; 8-byte Folded Spill
fmul d2, d10, d0
ldr d0, [sp, #8832] ; 8-byte Folded Reload
fadd d2, d0, d2
fmul d5, d20, d21
fadd d13, d5, d2
mov x9, #4354980839667269632
mov x10, #47272
movk x10, #56762, lsl #16
movk x10, #43178, lsl #32
movk x10, #49060, lsl #48
fmov d0, x9
str d0, [sp, #9192] ; 8-byte Folded Spill
fmul d2, d4, d0
fmov d0, x10
str d0, [sp, #9184] ; 8-byte Folded Spill
fmul d5, d13, d0
fsub d2, d5, d2
ldr d28, [sp, #11768] ; 8-byte Folded Reload
ldr d3, [sp, #9232] ; 8-byte Folded Reload
fmul d5, d28, d3
ldr d0, [sp, #7256] ; 8-byte Folded Reload
fsub d5, d5, d0
ldr d0, [sp, #12144] ; 8-byte Folded Reload
fmul d7, d0, d14
fadd d5, d7, d5
ldur d0, [x29, #-232] ; 8-byte Folded Reload
fmul d7, d0, d23
fmov d18, d0
fadd d5, d7, d5
ldr d0, [sp, #11880] ; 8-byte Folded Reload
fmul d7, d0, d23
ldr d0, [sp, #7840] ; 8-byte Folded Reload
fsub d7, d7, d0
ldr d0, [sp, #7248] ; 8-byte Folded Reload
fsub d0, d5, d0
fmul d5, d11, d0
fmov d20, d0
fadd d5, d7, d5
ldr d0, [sp, #11872] ; 8-byte Folded Reload
fmul d7, d0, d29
fadd d5, d7, d5
ldr d0, [sp, #7832] ; 8-byte Folded Reload
fsub d5, d5, d0
mov x9, #46543
movk x9, #48510, lsl #16
movk x9, #46414, lsl #32
movk x9, #16260, lsl #48
fmov d7, x9
fadd d5, d5, d7
ldr d0, [sp, #7776] ; 8-byte Folded Reload
fadd d5, d0, d5
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d0, x9
str d0, [sp, #8712] ; 8-byte Folded Spill
fmul d7, d16, d0
fadd d5, d5, d7
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
ldr d0, [sp, #7744] ; 8-byte Folded Reload
fadd d5, d0, d5
ldr d0, [sp, #7728] ; 8-byte Folded Reload
fadd d6, d0, d5
fmov d0, x9
str d0, [sp, #8232] ; 8-byte Folded Spill
fmul d5, d17, d0
ldr d0, [sp, #12304] ; 8-byte Folded Reload
fmul d7, d0, d6
fmov d9, d0
fadd d5, d7, d5
ldr d22, [sp, #11776] ; 8-byte Folded Reload
fmul d7, d22, d3
ldr d19, [sp, #7240] ; 8-byte Folded Reload
fsub d7, d7, d19
str d14, [sp, #1080] ; 8-byte Folded Spill
ldr d0, [sp, #12152] ; 8-byte Folded Reload
fmul d31, d0, d14
fadd d7, d31, d7
ldr d19, [sp, #12296] ; 8-byte Folded Reload
fmul d31, d19, d23
fadd d7, d31, d7
ldr d0, [sp, #12096] ; 8-byte Folded Reload
fmul d3, d0, d23
ldr d24, [sp, #7720] ; 8-byte Folded Reload
fsub d3, d3, d24
ldr d24, [sp, #7232] ; 8-byte Folded Reload
fsub d23, d7, d24
fmul d7, d11, d23
fadd d3, d3, d7
ldr d8, [sp, #4832] ; 8-byte Folded Reload
fmov d0, d25
fmul d7, d25, d8
fmul d31, d18, d20
fmov d18, d20
str d20, [sp, #4792] ; 8-byte Folded Spill
fadd d7, d7, d31
fmul d31, d19, d23
fmov d25, d23
str d23, [sp, #4776] ; 8-byte Folded Spill
fadd d7, d31, d7
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d20, x9
str d20, [sp, #7984] ; 8-byte Folded Spill
fmul d7, d7, d20
fadd d3, d3, d7
ldr d7, [sp, #12088] ; 8-byte Folded Reload
fmul d7, d7, d29
fadd d3, d7, d3
ldr d7, [sp, #7688] ; 8-byte Folded Reload
fsub d3, d3, d7
mov x9, #57269
movk x9, #60105, lsl #16
movk x9, #55991, lsl #32
movk x9, #16301, lsl #48
fmov d7, x9
fadd d3, d3, d7
ldr d7, [sp, #7672] ; 8-byte Folded Reload
fadd d3, d7, d3
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d7, x9
str d7, [sp, #7992] ; 8-byte Folded Spill
fmul d16, d16, d7
fadd d3, d3, d16
ldr d7, [sp, #7664] ; 8-byte Folded Reload
fsub d3, d3, d7
ldr d7, [sp, #7656] ; 8-byte Folded Reload
fadd d7, d7, d3
ldr d16, [sp, #12312] ; 8-byte Folded Reload
fmul d3, d16, d7
fmov d23, d16
fadd d3, d3, d5
ldr d5, [sp, #11528] ; 8-byte Folded Reload
fmul d5, d5, d30
fadd d3, d5, d3
ldr d24, [sp, #11688] ; 8-byte Folded Reload
fmov d20, d1
str d1, [sp, #6424] ; 8-byte Folded Spill
fmul d5, d24, d1
ldr d16, [sp, #7288] ; 8-byte Folded Reload
fadd d5, d16, d5
ldr d11, [sp, #11488] ; 8-byte Folded Reload
fmul d16, d11, d30
fsub d5, d16, d5
ldr d16, [sp, #7648] ; 8-byte Folded Reload
fsub d3, d3, d16
ldr d16, [sp, #7280] ; 8-byte Folded Reload
fsub d14, d5, d16
fmul d5, d27, d14
str d14, [sp, #6216] ; 8-byte Folded Spill
fadd d3, d3, d5
mov x9, #26288
movk x9, #13902, lsl #16
movk x9, #44107, lsl #32
movk x9, #16338, lsl #48
fmov d5, x9
fadd d3, d3, d5
ldr d1, [sp, #9320] ; 8-byte Folded Reload
fmul d5, d1, d10
str d5, [sp, #3040] ; 8-byte Folded Spill
fmul d5, d10, d5
ldr d1, [sp, #9328] ; 8-byte Folded Reload
fmul d16, d1, d21
fadd d5, d5, d16
ldr d16, [sp, #7640] ; 8-byte Folded Reload
fadd d16, d5, d16
mov x9, #45974
movk x9, #34787, lsl #16
movk x9, #35902, lsl #32
movk x9, #16285, lsl #48
fmov d1, x9
str d1, [sp, #8208] ; 8-byte Folded Spill
fmul d5, d16, d1
fadd d21, d5, d3
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d1, x9
str d1, [sp, #8200] ; 8-byte Folded Spill
fmul d3, d17, d1
str d6, [sp, #1064] ; 8-byte Folded Spill
fmul d5, d23, d6
fsub d3, d3, d5
str d7, [sp, #1040] ; 8-byte Folded Spill
fmul d5, d9, d7
fadd d3, d5, d3
ldr d1, [sp, #11432] ; 8-byte Folded Reload
fmul d5, d1, d30
fadd d3, d5, d3
ldr d6, [sp, #11680] ; 8-byte Folded Reload
fmul d5, d6, d30
ldr d7, [sp, #11632] ; 8-byte Folded Reload
fmul d17, d7, d20
ldr d30, [sp, #7272] ; 8-byte Folded Reload
fadd d17, d30, d17
fsub d5, d5, d17
ldr d17, [sp, #7560] ; 8-byte Folded Reload
fsub d3, d3, d17
ldr d17, [sp, #7264] ; 8-byte Folded Reload
fsub d1, d5, d17
fmul d5, d27, d1
fadd d3, d3, d5
ldr d31, [sp, #6200] ; 8-byte Folded Reload
fmul d5, d0, d31
fmov d30, d0
fmul d17, d11, d14
fadd d5, d5, d17
fmul d17, d6, d1
fmov d11, d1
str d1, [sp, #6208] ; 8-byte Folded Spill
fadd d17, d17, d5
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d0, x9
str d0, [sp, #7976] ; 8-byte Folded Spill
fmul d17, d17, d0
fadd d3, d3, d17
mov x9, #21969
movk x9, #1325, lsl #16
movk x9, #7976, lsl #32
movk x9, #16367, lsl #48
fmov d17, x9
fadd d17, d3, d17
mov x9, #49235
movk x9, #28989, lsl #16
movk x9, #40841, lsl #32
movk x9, #16312, lsl #48
fmov d0, x9
str d0, [sp, #6768] ; 8-byte Folded Spill
fmul d16, d16, d0
fadd d16, d16, d17
fadd d2, d2, d21
mov x9, #4363988038922010624
fmov d5, x9
str d5, [sp, #6736] ; 8-byte Folded Spill
fmul d17, d16, d5
fadd d6, d2, d17
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d0, x9
str d0, [sp, #8248] ; 8-byte Folded Spill
ldr d1, [sp, #8664] ; 8-byte Folded Reload
fmul d1, d1, d0
ldr q3, [sp, #11984] ; 16-byte Folded Reload
fmul d2, d3, d6
fsub d17, d2, d1
mov x9, #4359484439294640128
fmov d0, x9
str d0, [sp, #6272] ; 8-byte Folded Spill
fmul d4, d4, d0
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #16296, lsl #48
fmov d0, x9
str d0, [sp, #6280] ; 8-byte Folded Spill
fmul d13, d13, d0
fsub d4, d4, d13
fmul d13, d21, d5
fadd d4, d4, d13
fadd d1, d4, d16
ldr q2, [sp, #11808] ; 16-byte Folded Reload
fmul d4, d2, d1
mov.16b v5, v2
fadd d4, d4, d17
mov x9, #31036
movk x9, #52462, lsl #16
movk x9, #23267, lsl #32
movk x9, #16406, lsl #48
fmov d16, x9
fsub d4, d16, d4
mov x9, #5915
movk x9, #64709, lsl #16
movk x9, #30489, lsl #32
movk x9, #16392, lsl #48
fmov d16, x9
ldr d2, [sp, #12080] ; 8-byte Folded Reload
fmul d0, d2, d16
ldr d2, [sp, #11408] ; 8-byte Folded Reload
fmul d20, d2, d16
ldr d2, [sp, #12048] ; 8-byte Folded Reload
fmul d16, d0, d2
str d0, [sp, #8312] ; 8-byte Folded Spill
str d16, [sp, #3008] ; 8-byte Folded Spill
fmul d16, d2, d16
ldr d2, [sp, #11520] ; 8-byte Folded Reload
fmul d17, d20, d2
str d20, [sp, #8304] ; 8-byte Folded Spill
fadd d16, d16, d17
ldr d2, [sp, #9096] ; 8-byte Folded Reload
fsub d16, d2, d16
mov x9, #2356
movk x9, #12413, lsl #16
movk x9, #55910, lsl #32
movk x9, #16327, lsl #48
fmov d2, x9
str d2, [sp, #8216] ; 8-byte Folded Spill
fmul d16, d16, d2
fsub d21, d4, d16
stp q1, q6, [sp, #688] ; 32-byte Folded Spill
fmul d4, d3, d1
fmul d16, d5, d6
fsub d1, d4, d16
ldr d2, [sp, #9312] ; 8-byte Folded Reload
fsub d4, d2, d0
ldr d6, [sp, #8480] ; 8-byte Folded Reload
fadd d0, d6, d4
str d0, [sp, #8296] ; 8-byte Folded Spill
ldr d3, [sp, #12072] ; 8-byte Folded Reload
fmul d4, d0, d3
ldr d2, [sp, #6440] ; 8-byte Folded Reload
fsub d4, d2, d4
str d4, [sp, #3016] ; 8-byte Folded Spill
fmul d4, d3, d4
ldr d2, [sp, #8288] ; 8-byte Folded Reload
fadd d4, d2, d4
ldr d2, [sp, #9304] ; 8-byte Folded Reload
fadd d16, d20, d2
ldr d6, [sp, #8488] ; 8-byte Folded Reload
fadd d0, d6, d16
str d0, [sp, #8288] ; 8-byte Folded Spill
ldr d2, [sp, #11640] ; 8-byte Folded Reload
fmul d16, d0, d2
fadd d16, d16, d4
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
ldr q0, [sp, #11792] ; 16-byte Folded Reload
fmul d4, d0, d1
mov.16b v3, v0
fmov d0, x9
str d0, [sp, #8160] ; 8-byte Folded Spill
fmul d14, d16, d0
fsub d4, d4, d14
ldr q27, [sp, #11952] ; 16-byte Folded Reload
fmul d14, d27, d21
fsub d4, d14, d4
mov x9, #42186
movk x9, #52566, lsl #16
movk x9, #11879, lsl #32
movk x9, #16425, lsl #48
fmov d14, x9
fadd d2, d4, d14
str d2, [sp, #8664] ; 8-byte Folded Spill
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d0, x9
str d0, [sp, #6432] ; 8-byte Folded Spill
fmul d16, d16, d0
stp q1, q21, [sp, #656] ; 32-byte Folded Spill
fmul d14, d27, d1
fsub d16, d14, d16
fmul d14, d3, d21
fadd d0, d16, d14
str q0, [sp, #5872] ; 16-byte Folded Spill
ldr d1, [sp, #10856] ; 8-byte Folded Reload
ldr d2, [sp, #9112] ; 8-byte Folded Reload
fmul d16, d1, d2
mov x9, #10523
movk x9, #38535, lsl #16
movk x9, #12921, lsl #32
movk x9, #49410, lsl #48
fmov d23, x9
fmul d16, d16, d23
ldr d1, [sp, #10864] ; 8-byte Folded Reload
ldr d2, [sp, #9120] ; 8-byte Folded Reload
fmul d21, d1, d2
fmov d2, #0.50000000
fmul d21, d21, d2
ldr d1, [sp, #10200] ; 8-byte Folded Reload
fmul d21, d1, d21
mov x9, #211106232532992
movk x9, #16498, lsl #48
fmov d14, x9
fmul d21, d21, d14
ldr d1, [sp, #10160] ; 8-byte Folded Reload
fmul d16, d1, d16
ldr d1, [sp, #10152] ; 8-byte Folded Reload
fdiv d16, d16, d1
ldr d1, [sp, #10168] ; 8-byte Folded Reload
fmul d16, d1, d16
fadd d16, d21, d16
fmul d21, d15, d8
fmov d4, d8
fmul d23, d28, d18
fadd d21, d21, d23
fmul d23, d22, d25
fadd d8, d23, d21
fadd d16, d16, d8
fmul d21, d8, d2
fsub d21, d16, d21
ldr d1, [sp, #10400] ; 8-byte Folded Reload
ldr d0, [sp, #8168] ; 8-byte Folded Reload
fmul d16, d1, d0
ldr d1, [sp, #10832] ; 8-byte Folded Reload
ldr d2, [sp, #9176] ; 8-byte Folded Reload
fmul d20, d2, d1
fsub d2, d16, d20
ldur d3, [x29, #-248] ; 8-byte Folded Reload
ldr d13, [sp, #8240] ; 8-byte Folded Reload
fmul d16, d3, d13
ldr d18, [sp, #12232] ; 8-byte Folded Reload
fmul d20, d18, d2
fsub d20, d16, d20
fmul d16, d26, d2
ldr d0, [sp, #5864] ; 8-byte Folded Reload
fmul d23, d3, d0
fsub d1, d16, d23
fmul d16, d15, d20
ldr d6, [sp, #8848] ; 8-byte Folded Reload
fsub d16, d16, d6
fadd d16, d1, d16
fmul d23, d30, d29
fadd d16, d23, d16
fmul d23, d28, d20
ldr d6, [sp, #7184] ; 8-byte Folded Reload
fsub d23, d23, d6
ldr d0, [sp, #12144] ; 8-byte Folded Reload
fmul d26, d0, d1
fadd d23, d26, d23
ldur d0, [x29, #-232] ; 8-byte Folded Reload
fmul d26, d0, d29
fadd d23, d26, d23
ldr d6, [sp, #8840] ; 8-byte Folded Reload
fsub d9, d16, d6
ldr d6, [sp, #7136] ; 8-byte Folded Reload
fsub d23, d23, d6
fmul d16, d15, d9
fmul d26, d28, d23
fadd d26, d16, d26
fmul d16, d22, d20
ldr d6, [sp, #7096] ; 8-byte Folded Reload
fsub d16, d16, d6
str d1, [sp, #880] ; 8-byte Folded Spill
ldr d0, [sp, #12152] ; 8-byte Folded Reload
fmul d6, d0, d1
fadd d6, d6, d16
fmul d16, d19, d29
fadd d6, d16, d6
ldr d1, [sp, #7072] ; 8-byte Folded Reload
fsub d16, d6, d1
fmul d6, d22, d16
fadd d6, d6, d26
ldr d0, [sp, #10840] ; 8-byte Folded Reload
ldr d1, [sp, #9200] ; 8-byte Folded Reload
fmul d26, d0, d1
mov x9, #18811
movk x9, #34700, lsl #16
movk x9, #61210, lsl #32
movk x9, #49411, lsl #48
fmov d29, x9
fmul d26, d26, d29
ldr d0, [sp, #10848] ; 8-byte Folded Reload
ldr d1, [sp, #9216] ; 8-byte Folded Reload
fmul d29, d0, d1
fmov d1, #0.50000000
fmul d29, d29, d1
ldr d0, [sp, #10208] ; 8-byte Folded Reload
fmul d29, d0, d29
fmul d29, d29, d14
ldr d0, [sp, #10184] ; 8-byte Folded Reload
fmul d26, d0, d26
ldr d0, [sp, #10176] ; 8-byte Folded Reload
fdiv d26, d26, d0
ldr d0, [sp, #10192] ; 8-byte Folded Reload
fmul d26, d0, d26
fadd d26, d29, d26
fsub d29, d21, d8
fsub d29, d29, d6
fadd d26, d26, d6
fmul d6, d6, d1
fmov d1, #0.50000000
fsub d6, d26, d6
fadd d29, d6, d29
fmul d6, d15, d31
ldur d30, [x29, #-256] ; 8-byte Folded Reload
ldr d17, [sp, #6216] ; 8-byte Folded Reload
fmul d26, d24, d17
fadd d6, d6, d26
fmul d26, d7, d11
fadd d26, d26, d6
ldr d0, [sp, #10136] ; 8-byte Folded Reload
ldr d6, [sp, #9208] ; 8-byte Folded Reload
fmul d6, d0, d6
fmul d6, d6, d1
ldr d0, [sp, #9680] ; 8-byte Folded Reload
fmul d6, d0, d6
fmul d6, d6, d14
ldr d0, [sp, #10144] ; 8-byte Folded Reload
ldr d7, [sp, #9224] ; 8-byte Folded Reload
fmul d8, d0, d7
mov x9, #45572
movk x9, #23979, lsl #16
movk x9, #34811, lsl #32
movk x9, #49413, lsl #48
fmov d14, x9
fmul d8, d8, d14
ldr d0, [sp, #9672] ; 8-byte Folded Reload
fmul d8, d0, d8
ldr d0, [sp, #9656] ; 8-byte Folded Reload
fdiv d8, d8, d0
ldr d0, [sp, #9664] ; 8-byte Folded Reload
fmul d8, d0, d8
fadd d6, d6, d8
fadd d6, d6, d26
fmul d8, d26, d1
fsub d14, d6, d8
ldr d0, [sp, #11728] ; 8-byte Folded Reload
ldr d5, [sp, #9232] ; 8-byte Folded Reload
fmul d6, d0, d5
ldr d0, [sp, #7216] ; 8-byte Folded Reload
fsub d6, d6, d0
ldr d19, [sp, #11360] ; 8-byte Folded Reload
fmul d8, d19, d4
fadd d6, d6, d8
ldr d0, [sp, #11560] ; 8-byte Folded Reload
fmul d8, d0, d20
fadd d6, d8, d6
ldr d0, [sp, #7200] ; 8-byte Folded Reload
fsub d6, d6, d0
ldr d1, [sp, #11272] ; 8-byte Folded Reload
fmul d8, d1, d9
fadd d6, d8, d6
ldr d15, [sp, #8696] ; 8-byte Folded Reload
fmul d8, d29, d15
fsub d6, d6, d8
ldr d8, [sp, #11920] ; 8-byte Folded Reload
ldr d11, [sp, #8688] ; 8-byte Folded Reload
fmul d8, d8, d11
ldr d24, [sp, #8736] ; 8-byte Folded Reload
ldr d0, [sp, #11168] ; 8-byte Folded Reload
fmul d9, d24, d0
fsub d8, d8, d9
str d12, [sp, #6496] ; 8-byte Folded Spill
fmov d25, d18
fmul d7, d12, d18
ldr d0, [sp, #11512] ; 8-byte Folded Reload
fmul d9, d0, d7
fadd d8, d9, d8
ldr d0, [sp, #12288] ; 8-byte Folded Reload
ldr d4, [sp, #9104] ; 8-byte Folded Reload
fmul d18, d0, d4
fsub d18, d8, d18
ldr d9, [sp, #5856] ; 8-byte Folded Reload
ldr d0, [sp, #11720] ; 8-byte Folded Reload
fmul d8, d0, d9
fadd d18, d18, d8
ldr d0, [sp, #12168] ; 8-byte Folded Reload
fmul d22, d0, d13
fsub d18, d18, d22
ldr d0, [sp, #11696] ; 8-byte Folded Reload
fmul d22, d0, d2
fadd d18, d22, d18
ldr d0, [sp, #8864] ; 8-byte Folded Reload
fadd d18, d0, d18
ldr d0, [sp, #7224] ; 8-byte Folded Reload
fadd d18, d0, d18
ldr d0, [sp, #7208] ; 8-byte Folded Reload
fadd d0, d0, d18
str d0, [sp, #1072] ; 8-byte Folded Spill
ldur d12, [x29, #-240] ; 8-byte Folded Reload
fmul d18, d12, d0
fadd d6, d18, d6
fmul d18, d3, d11
fmov d0, d24
ldr d3, [sp, #11312] ; 8-byte Folded Reload
fmul d22, d24, d3
fsub d18, d22, d18
str d7, [sp, #4736] ; 8-byte Folded Spill
fmul d22, d25, d7
fsub d18, d18, d22
ldr d0, [sp, #7024] ; 8-byte Folded Reload
fadd d6, d0, d6
ldr d0, [sp, #6984] ; 8-byte Folded Reload
fadd d6, d0, d6
ldr d0, [sp, #8888] ; 8-byte Folded Reload
fadd d24, d18, d0
ldr d0, [sp, #8720] ; 8-byte Folded Reload
fmul d18, d24, d0
fadd d18, d6, d18
ldr d0, [sp, #8352] ; 8-byte Folded Reload
ldr d11, [sp, #11504] ; 8-byte Folded Reload
fmul d6, d0, d11
ldr d0, [sp, #6896] ; 8-byte Folded Reload
fadd d22, d6, d0
fmov d0, d9
str d2, [sp, #2944] ; 8-byte Folded Spill
fadd d6, d9, d2
ldr d0, [sp, #8344] ; 8-byte Folded Reload
fmul d8, d0, d10
fadd d0, d8, d6
str d0, [sp, #2776] ; 8-byte Folded Spill
fmul d8, d10, d0
fadd d22, d8, d22
fmul d25, d22, d15
fadd d18, d18, d25
ldr d0, [sp, #6312] ; 8-byte Folded Reload
ldr d2, [sp, #11424] ; 8-byte Folded Reload
fmul d25, d0, d2
fadd d18, d25, d18
ldr d0, [sp, #6304] ; 8-byte Folded Reload
fmul d0, d0, d12
str d0, [sp, #2760] ; 8-byte Folded Spill
fmul d25, d10, d0
fsub d18, d18, d25
ldr d0, [sp, #11320] ; 8-byte Folded Reload
ldr d12, [sp, #6424] ; 8-byte Folded Reload
fmul d25, d0, d12
fsub d18, d18, d25
ldr d0, [sp, #6976] ; 8-byte Folded Reload
fsub d18, d18, d0
ldr d8, [sp, #10872] ; 8-byte Folded Reload
fmul d25, d8, d31
fadd d18, d25, d18
fsub d25, d29, d26
fadd d9, d25, d14
ldr d0, [sp, #6272] ; 8-byte Folded Reload
fmul d2, d18, d0
ldr d31, [sp, #6280] ; 8-byte Folded Reload
fmul d25, d9, d31
fsub d2, d2, d25
ldr d0, [sp, #8232] ; 8-byte Folded Reload
fmul d25, d29, d0
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #16223, lsl #48
fmov d26, x9
fmul d26, d22, d26
fadd d25, d25, d26
ldr d0, [sp, #12096] ; 8-byte Folded Reload
fmul d26, d0, d5
ldr d0, [sp, #7168] ; 8-byte Folded Reload
fsub d26, d26, d0
ldr d0, [sp, #4776] ; 8-byte Folded Reload
fmul d28, d19, d0
fadd d26, d26, d28
ldr d0, [sp, #7984] ; 8-byte Folded Reload
fmul d21, d21, d0
fadd d21, d26, d21
ldr d0, [sp, #12088] ; 8-byte Folded Reload
fmul d26, d0, d20
fadd d21, d26, d21
ldr q26, [sp, #5872] ; 16-byte Folded Reload
ldr d0, [sp, #7192] ; 8-byte Folded Reload
fsub d21, d21, d0
fmul d16, d1, d16
fadd d16, d16, d21
ldr d0, [sp, #7160] ; 8-byte Folded Reload
fadd d16, d0, d16
ldr d0, [sp, #7120] ; 8-byte Folded Reload
fadd d16, d0, d16
ldr d0, [sp, #7088] ; 8-byte Folded Reload
fadd d16, d0, d16
ldr d0, [sp, #7992] ; 8-byte Folded Reload
fmul d7, d24, d0
fadd d7, d16, d7
ldr d0, [sp, #7104] ; 8-byte Folded Reload
fadd d7, d0, d7
ldr d0, [sp, #7056] ; 8-byte Folded Reload
fadd d21, d0, d7
ldr d28, [sp, #12312] ; 8-byte Folded Reload
fmul d7, d28, d21
fadd d7, d7, d25
ldr d0, [sp, #11880] ; 8-byte Folded Reload
fmul d0, d0, d5
ldr d3, [sp, #7040] ; 8-byte Folded Reload
fsub d0, d0, d3
ldr d3, [sp, #4792] ; 8-byte Folded Reload
fmul d16, d19, d3
fadd d0, d0, d16
ldr d3, [sp, #11872] ; 8-byte Folded Reload
fmul d16, d3, d20
fadd d0, d16, d0
ldr d3, [sp, #7016] ; 8-byte Folded Reload
fsub d0, d0, d3
fmul d16, d1, d23
fadd d0, d16, d0
ldr d1, [sp, #8712] ; 8-byte Folded Reload
fmul d16, d24, d1
ldr d1, [sp, #6848] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #6960] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #6928] ; 8-byte Folded Reload
fadd d0, d1, d0
fadd d0, d0, d16
ldr d1, [sp, #6944] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #6880] ; 8-byte Folded Reload
fsub d1, d0, d1
ldr d4, [sp, #12304] ; 8-byte Folded Reload
fmul d0, d4, d1
fadd d0, d0, d7
fmov d5, d12
ldr d3, [sp, #11528] ; 8-byte Folded Reload
fmul d7, d3, d12
fsub d0, d0, d7
ldr d3, [sp, #6864] ; 8-byte Folded Reload
fsub d0, d0, d3
fmul d7, d8, d17
fadd d0, d7, d0
ldr d3, [sp, #9320] ; 8-byte Folded Reload
fmov d23, d11
fmul d7, d3, d11
ldr d3, [sp, #9328] ; 8-byte Folded Reload
fmul d3, d3, d10
str d3, [sp, #2632] ; 8-byte Folded Spill
fmul d16, d10, d3
fsub d7, d7, d16
ldr d3, [sp, #6912] ; 8-byte Folded Reload
fadd d7, d7, d3
ldr d3, [sp, #8208] ; 8-byte Folded Reload
fmul d16, d7, d3
fadd d0, d16, d0
ldr d24, [sp, #6736] ; 8-byte Folded Reload
fmul d16, d0, d24
fadd d2, d2, d16
ldr d3, [sp, #8200] ; 8-byte Folded Reload
fmul d16, d29, d3
fmul d20, d22, d3
fsub d16, d16, d20
str d21, [sp, #864] ; 8-byte Folded Spill
fmul d20, d4, d21
fadd d16, d20, d16
str d1, [sp, #856] ; 8-byte Folded Spill
fmul d20, d28, d1
fsub d16, d16, d20
ldr d1, [sp, #11432] ; 8-byte Folded Reload
fmul d19, d1, d12
fsub d16, d16, d19
ldr d1, [sp, #7048] ; 8-byte Folded Reload
fsub d16, d16, d1
ldr d1, [sp, #6208] ; 8-byte Folded Reload
fmul d19, d8, d1
fadd d16, d19, d16
ldr d1, [sp, #7976] ; 8-byte Folded Reload
fmul d5, d14, d1
fadd d5, d5, d16
ldr d1, [sp, #6768] ; 8-byte Folded Reload
fmul d3, d7, d1
fadd d3, d3, d5
ldr d1, [sp, #8368] ; 8-byte Folded Reload
fmul d5, d1, d11
ldr d1, [sp, #6992] ; 8-byte Folded Reload
fadd d5, d5, d1
ldr d1, [sp, #5848] ; 8-byte Folded Reload
fadd d7, d6, d1
ldr d1, [sp, #8360] ; 8-byte Folded Reload
fmul d6, d1, d10
fadd d1, d6, d7
str d1, [sp, #2648] ; 8-byte Folded Spill
fmul d6, d10, d1
fadd d5, d6, d5
fadd d2, d3, d2
fmul d1, d5, d31
fadd d6, d2, d1
ldr d1, [sp, #9192] ; 8-byte Folded Reload
fmul d1, d18, d1
ldr d2, [sp, #9184] ; 8-byte Folded Reload
fmul d2, d9, d2
fsub d1, d2, d1
fadd d0, d0, d1
fmul d1, d3, d24
fadd d0, d0, d1
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #16292, lsl #48
fmov d1, x9
fmul d1, d5, d1
fadd d2, d0, d1
ldr q3, [sp, #11984] ; 16-byte Folded Reload
fmul d0, d3, d6
ldr q16, [sp, #11808] ; 16-byte Folded Reload
fmul d1, d16, d2
fsub d5, d0, d1
ldr d18, [sp, #6432] ; 8-byte Folded Reload
fmul d0, d9, d18
fmul d1, d27, d5
fsub d0, d1, d0
ldr d4, [sp, #8248] ; 8-byte Folded Reload
fmul d1, d9, d4
stp q2, q6, [sp, #608] ; 32-byte Folded Spill
fmul d2, d3, d2
fsub d1, d2, d1
fmul d2, d16, d6
fadd d1, d2, d1
ldr d2, [sp, #9312] ; 8-byte Folded Reload
ldr d16, [sp, #11520] ; 8-byte Folded Reload
fmul d2, d2, d16
ldr d3, [sp, #8680] ; 8-byte Folded Reload
fadd d2, d2, d3
ldr d3, [sp, #9304] ; 8-byte Folded Reload
ldr d6, [sp, #12048] ; 8-byte Folded Reload
fmul d3, d3, d6
fadd d3, d3, d7
str d3, [sp, #2640] ; 8-byte Folded Spill
fmul d3, d6, d3
fadd d2, d3, d2
fmul d2, d2, d4
fadd d1, d1, d2
ldr d2, [sp, #8312] ; 8-byte Folded Reload
fmul d2, d2, d16
ldr d3, [sp, #8304] ; 8-byte Folded Reload
fmul d3, d3, d6
str d3, [sp, #2616] ; 8-byte Folded Spill
fmul d3, d6, d3
fsub d2, d2, d3
ldr d3, [sp, #8672] ; 8-byte Folded Reload
fadd d2, d2, d3
ldr d3, [sp, #8216] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d3, d1, d2
ldr q17, [sp, #11792] ; 16-byte Folded Reload
fmul d1, d17, d3
fsub d0, d0, d1
ldr d4, [sp, #8296] ; 8-byte Folded Reload
ldr d1, [sp, #11640] ; 8-byte Folded Reload
fmul d1, d4, d1
ldr d2, [sp, #8256] ; 8-byte Folded Reload
fadd d1, d1, d2
ldr d6, [sp, #8288] ; 8-byte Folded Reload
ldr d16, [sp, #12072] ; 8-byte Folded Reload
fmul d2, d6, d16
fadd d2, d2, d7
str d2, [sp, #2624] ; 8-byte Folded Spill
fmul d2, d16, d2
fadd d1, d2, d1
fmul d2, d1, d18
fadd d21, d0, d2
ldr d16, [sp, #8160] ; 8-byte Folded Reload
fmul d0, d9, d16
stp q3, q5, [sp, #560] ; 32-byte Folded Spill
fmul d2, d17, d5
fsub d0, d2, d0
fmul d2, d27, d3
fadd d0, d0, d2
fmul d1, d1, d16
fadd d0, d0, d1
mov x9, #28530
movk x9, #30490, lsl #16
movk x9, #27495, lsl #32
movk x9, #49093, lsl #48
fmov d1, x9
ldr d2, [sp, #9240] ; 8-byte Folded Reload
fmul d3, d2, d1
ldr d2, [sp, #8728] ; 8-byte Folded Reload
fmul d2, d2, d1
str d3, [sp, #9192] ; 8-byte Folded Spill
ldr d1, [sp, #8664] ; 8-byte Folded Reload
fadd d22, d3, d1
ldr d1, [sp, #6152] ; 8-byte Folded Reload
fmov d3, d4
fadd d20, d1, d4
fmul d1, d30, d20
ldr d3, [sp, #6440] ; 8-byte Folded Reload
fsub d25, d3, d1
str d2, [sp, #9200] ; 8-byte Folded Spill
fadd d23, d2, d0
ldr d0, [sp, #6160] ; 8-byte Folded Reload
fmov d1, d6
fadd d19, d0, d6
fmul d0, d30, d19
str d7, [sp, #464] ; 8-byte Folded Spill
fadd d24, d0, d7
cbz x8, LBB19_44
; %bb.43:
ldr q17, [sp, #10720] ; 16-byte Folded Reload
fmul d0, d17, d22
ldr q16, [sp, #11216] ; 16-byte Folded Reload
fmul d1, d16, d26
fmul d2, d30, d25
ldr d6, [sp, #11248] ; 8-byte Folded Reload
fmul d3, d6, d19
fadd d2, d3, d2
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d3, x9
fmul d4, d2, d3
fsub d1, d1, d4
fsub d0, d0, d1
fadd d0, d0, d0
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d1, x9
fmul d4, d9, d1
fmul d5, d17, d21
fsub d4, d5, d4
fmul d5, d16, d23
fsub d4, d4, d5
fmul d5, d6, d20
fmul d6, d30, d24
fadd d5, d5, d6
fmul d6, d5, d1
fadd d4, d4, d6
fadd d4, d4, d4
fadd d0, d0, d4
ldr d18, [sp, #11400] ; 8-byte Folded Reload
fmul d4, d18, d0
fmul d6, d9, d3
fmul d7, d16, d21
fsub d6, d7, d6
fmul d7, d17, d23
fadd d6, d6, d7
fmul d3, d5, d3
fadd d3, d6, d3
fmul d5, d17, d26
fmul d1, d2, d1
fsub d1, d5, d1
fmul d2, d16, d22
fadd d1, d1, d2
fadd d1, d1, d3
ldr d2, [sp, #11160] ; 8-byte Folded Reload
fmul d2, d2, d1
fsub d2, d4, d2
fmul d2, d18, d2
fmov d5, #0.50000000
fmul d2, d2, d5
ldr d3, [sp, #11392] ; 8-byte Folded Reload
fmul d0, d3, d0
ldr d4, [sp, #11240] ; 8-byte Folded Reload
fmul d1, d4, d1
fadd d0, d1, d0
fmul d0, d3, d0
fmul d0, d0, d5
fsub d0, d2, d0
str d0, [x8, #24]
LBB19_44:
stp d25, d24, [sp, #224] ; 16-byte Folded Spill
stp q23, q22, [sp, #320] ; 32-byte Folded Spill
str q21, [sp, #352] ; 16-byte Folded Spill
str d9, [sp, #496] ; 8-byte Folded Spill
str d20, [sp, #5592] ; 8-byte Folded Spill
str d19, [sp, #5600] ; 8-byte Folded Spill
ldr d0, [sp, #11296] ; 8-byte Folded Reload
ldr d1, [sp, #11480] ; 8-byte Folded Reload
fsub d1, d1, d0
ldr d0, [sp, #12272] ; 8-byte Folded Reload
fadd d17, d1, d0
ldr d0, [sp, #12120] ; 8-byte Folded Reload
fadd d18, d17, d0
ldr d0, [sp, #12032] ; 8-byte Folded Reload
ldr d2, [sp, #12024] ; 8-byte Folded Reload
fadd d22, d2, d0
ldr d0, [sp, #12344] ; 8-byte Folded Reload
fadd d20, d22, d0
ldr d0, [sp, #12128] ; 8-byte Folded Reload
fadd d24, d20, d0
ldr d11, [sp, #11752] ; 8-byte Folded Reload
fadd d2, d24, d11
ldr d0, [sp, #9928] ; 8-byte Folded Reload
fdiv d0, d2, d0
ldr d3, [sp, #9904] ; 8-byte Folded Reload
fmul d7, d3, d0
ldr d3, [sp, #9896] ; 8-byte Folded Reload
fmul d3, d7, d3
fadd d3, d18, d3
ldr d4, [sp, #12184] ; 8-byte Folded Reload
fadd d3, d4, d3
ldr d4, [sp, #10480] ; 8-byte Folded Reload
fmul d4, d4, d3
fmov d5, #0.50000000
fmul d4, d4, d5
fmov d21, #0.50000000
ldr d5, [sp, #10904] ; 8-byte Folded Reload
fmul d4, d5, d4
mov x9, #4632233691727265792
fmov d25, x9
fmul d4, d4, d25
ldr d5, [sp, #10896] ; 8-byte Folded Reload
fmul d5, d5, d3
str d5, [sp, #8688] ; 8-byte Folded Spill
ldr d3, [sp, #10544] ; 8-byte Folded Reload
fmul d5, d3, d5
str d5, [sp, #8664] ; 8-byte Folded Spill
ldr d3, [sp, #10568] ; 8-byte Folded Reload
fmul d3, d3, d5
fmov d29, #3.00000000
fmul d3, d3, d29
fadd d28, d4, d3
ldr d30, [sp, #11672] ; 8-byte Folded Reload
fadd d6, d24, d30
ldr d3, [sp, #10496] ; 8-byte Folded Reload
fdiv d4, d6, d3
str d4, [sp, #8696] ; 8-byte Folded Spill
ldr d3, [sp, #10424] ; 8-byte Folded Reload
fmul d16, d3, d4
ldr d3, [sp, #9880] ; 8-byte Folded Reload
fmul d3, d16, d3
fadd d3, d18, d3
ldr d4, [sp, #12176] ; 8-byte Folded Reload
fadd d3, d4, d3
ldr d4, [sp, #10432] ; 8-byte Folded Reload
fmul d4, d4, d3
fmul d4, d4, d21
ldr d5, [sp, #10888] ; 8-byte Folded Reload
fmul d4, d5, d4
fmul d4, d4, d25
ldr d5, [sp, #10880] ; 8-byte Folded Reload
fmul d5, d5, d3
str d5, [sp, #9176] ; 8-byte Folded Spill
ldr d3, [sp, #10440] ; 8-byte Folded Reload
fmul d5, d3, d5
str d5, [sp, #9112] ; 8-byte Folded Spill
ldr d3, [sp, #10448] ; 8-byte Folded Reload
fmul d3, d3, d5
fmul d3, d3, d29
fadd d23, d4, d3
ldr d3, [sp, #11208] ; 8-byte Folded Reload
fadd d4, d20, d3
str d4, [sp, #8712] ; 8-byte Folded Spill
ldr d3, [sp, #9064] ; 8-byte Folded Reload
fdiv d3, d4, d3
str d3, [sp, #8208] ; 8-byte Folded Spill
ldr d5, [sp, #9080] ; 8-byte Folded Reload
fmul d3, d5, d3
str d3, [sp, #9104] ; 8-byte Folded Spill
ldr d19, [sp, #9056] ; 8-byte Folded Reload
fmul d26, d3, d19
fadd d26, d17, d26
ldr d27, [sp, #11200] ; 8-byte Folded Reload
fadd d26, d27, d26
ldr d19, [sp, #9816] ; 8-byte Folded Reload
fmul d27, d26, d19
fmul d27, d27, d21
fmov d5, #0.50000000
ldr d19, [sp, #10368] ; 8-byte Folded Reload
fmul d27, d19, d27
fmul d25, d27, d25
ldr d19, [sp, #10360] ; 8-byte Folded Reload
fmul d3, d26, d19
str d3, [sp, #9120] ; 8-byte Folded Spill
ldr d19, [sp, #9824] ; 8-byte Folded Reload
fmul d3, d19, d3
str d3, [sp, #9184] ; 8-byte Folded Spill
ldr d19, [sp, #9832] ; 8-byte Folded Reload
fmul d26, d19, d3
fmul d26, d26, d29
fadd d10, d26, d25
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d25, x9
fmul d3, d24, d25
mov x9, #50080
movk x9, #49599, lsl #16
movk x9, #32579, lsl #32
movk x9, #16368, lsl #48
mov x10, #6432
movk x10, #24166, lsl #16
movk x10, #7623, lsl #32
movk x10, #16309, lsl #48
fmov d24, x10
fmul d22, d22, d24
fmul d1, d1, d24
fmov d24, x9
fmul d19, d20, d24
str d19, [sp, #9232] ; 8-byte Folded Spill
fmul d21, d17, d24
str d21, [sp, #9240] ; 8-byte Folded Spill
ldr d17, [sp, #10088] ; 8-byte Folded Reload
fsub d26, d17, d3
fmul d24, d18, d25
fsub d17, d26, d19
str d26, [sp, #8240] ; 8-byte Folded Spill
ldr d18, [sp, #9488] ; 8-byte Folded Reload
fadd d14, d18, d17
fsub d18, d14, d22
str d14, [sp, #8256] ; 8-byte Folded Spill
str d18, [sp, #9224] ; 8-byte Folded Spill
fadd d29, d28, d23
fadd d13, d29, d10
ldr d4, [sp, #12056] ; 8-byte Folded Reload
fmul d18, d18, d4
fsub d18, d13, d18
str d13, [sp, #6432] ; 8-byte Folded Spill
str d18, [sp, #2592] ; 8-byte Folded Spill
fmul d18, d4, d18
ldr d19, [sp, #9088] ; 8-byte Folded Reload
fadd d18, d19, d18
ldr d20, [sp, #10080] ; 8-byte Folded Reload
fadd d17, d24, d20
fadd d20, d21, d17
str d17, [sp, #8232] ; 8-byte Folded Spill
ldr d22, [sp, #9496] ; 8-byte Folded Reload
fadd d8, d22, d20
fadd d1, d1, d8
str d8, [sp, #8248] ; 8-byte Folded Spill
str d1, [sp, #9216] ; 8-byte Folded Spill
ldr d19, [sp, #11584] ; 8-byte Folded Reload
fmul d1, d1, d19
fadd d1, d1, d18
str d1, [sp, #8216] ; 8-byte Folded Spill
ldr d18, [sp, #10624] ; 8-byte Folded Reload
fmul d2, d2, d18
ldr d18, [sp, #10912] ; 8-byte Folded Reload
fmul d18, d7, d18
fmul d7, d18, d5
fmov d12, #0.50000000
fsub d2, d2, d7
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16361, lsl #48
fmov d25, x9
fmul d18, d18, d25
ldr d20, [sp, #9912] ; 8-byte Folded Reload
fmul d18, d18, d20
fmov d1, #5.00000000
fmul d18, d18, d1
fmov d7, #5.00000000
fsub d2, d2, d18
ldr d18, [sp, #10608] ; 8-byte Folded Reload
fdiv d20, d2, d18
ldur d1, [x29, #-184] ; 8-byte Folded Reload
fmul d2, d1, d28
fmov d4, d1
ldr d18, [sp, #10616] ; 8-byte Folded Reload
fmul d5, d18, d20
ldr d15, [sp, #11888] ; 8-byte Folded Reload
fmul d22, d15, d5
fsub d21, d2, d22
ldr d22, [sp, #10600] ; 8-byte Folded Reload
fmul d20, d22, d20
ldr d22, [sp, #9920] ; 8-byte Folded Reload
fmul d0, d0, d22
fsub d1, d20, d0
str d1, [sp, #5832] ; 8-byte Folded Spill
ldr d2, [sp, #12320] ; 8-byte Folded Reload
fmul d0, d2, d5
str d5, [sp, #8168] ; 8-byte Folded Spill
fmul d20, d4, d1
fsub d0, d0, d20
str d0, [sp, #9208] ; 8-byte Folded Spill
fmul d20, d15, d1
fmul d22, d2, d28
fsub d27, d20, d22
ldur d31, [x29, #-176] ; 8-byte Folded Reload
fmul d20, d31, d0
ldr d0, [sp, #9288] ; 8-byte Folded Reload
fsub d20, d20, d0
fadd d20, d27, d20
ldur d0, [x29, #-168] ; 8-byte Folded Reload
fmul d22, d0, d21
fadd d20, d22, d20
ldr d0, [sp, #11904] ; 8-byte Folded Reload
fmul d22, d0, d21
ldr d0, [sp, #8184] ; 8-byte Folded Reload
fsub d22, d22, d0
ldr d0, [sp, #9280] ; 8-byte Folded Reload
fsub d0, d20, d0
str d0, [sp, #4784] ; 8-byte Folded Spill
ldr d9, [sp, #11376] ; 8-byte Folded Reload
fmul d20, d9, d0
fadd d20, d22, d20
ldr d0, [sp, #10584] ; 8-byte Folded Reload
fmul d6, d6, d0
ldr d0, [sp, #11104] ; 8-byte Folded Reload
fmul d16, d16, d0
fmul d22, d16, d12
fsub d6, d6, d22
fmul d16, d16, d25
ldr d0, [sp, #9888] ; 8-byte Folded Reload
fmul d16, d16, d0
fmul d16, d16, d7
fsub d6, d6, d16
ldr d0, [sp, #10576] ; 8-byte Folded Reload
fdiv d1, d6, d0
str d1, [sp, #5816] ; 8-byte Folded Spill
str d23, [sp, #5840] ; 8-byte Folded Spill
fmov d7, d4
fmul d6, d4, d23
ldr d0, [sp, #10592] ; 8-byte Folded Reload
fmul d0, d0, d1
fmul d16, d15, d0
fmov d18, d0
str d0, [sp, #6192] ; 8-byte Folded Spill
fsub d12, d6, d16
ldr d0, [sp, #11664] ; 8-byte Folded Reload
fmul d6, d0, d12
fadd d6, d6, d20
ldr d0, [sp, #8152] ; 8-byte Folded Reload
fsub d6, d6, d0
mov x9, #51491
movk x9, #54360, lsl #16
movk x9, #13074, lsl #32
movk x9, #49054, lsl #48
fmov d16, x9
fadd d6, d6, d16
fmul d19, d3, d4
ldr d22, [sp, #11568] ; 8-byte Folded Reload
fmul d16, d22, d19
fmov d0, d24
ldr d1, [sp, #11176] ; 8-byte Folded Reload
fmul d20, d24, d1
fadd d16, d16, d20
fmul d20, d3, d15
fmov d4, d3
str d3, [sp, #8728] ; 8-byte Folded Spill
fmul d24, d24, d2
fmov d1, d0
fadd d0, d20, d24
ldr d20, [sp, #11944] ; 8-byte Folded Reload
fmul d20, d20, d0
fmov d3, d0
str d0, [sp, #7976] ; 8-byte Folded Spill
fadd d16, d20, d16
fmul d20, d11, d5
fsub d16, d20, d16
str d28, [sp, #2608] ; 8-byte Folded Spill
ldr d0, [sp, #11736] ; 8-byte Folded Reload
fmul d20, d0, d28
fsub d16, d16, d20
fmul d20, d30, d18
fadd d16, d20, d16
ldr d0, [sp, #11648] ; 8-byte Folded Reload
fmul d20, d0, d23
fsub d16, d16, d20
ldr d0, [sp, #8144] ; 8-byte Folded Reload
fadd d16, d0, d16
ldr d0, [sp, #9264] ; 8-byte Folded Reload
fadd d16, d0, d16
ldr d0, [sp, #8136] ; 8-byte Folded Reload
fadd d0, d0, d16
ldr d2, [sp, #12280] ; 8-byte Folded Reload
str d0, [sp, #1128] ; 8-byte Folded Spill
fmul d16, d2, d0
fadd d16, d16, d6
ldr d28, [sp, #11368] ; 8-byte Folded Reload
fmul d6, d26, d28
fsub d0, d29, d6
str d0, [sp, #2536] ; 8-byte Folded Spill
fmul d6, d28, d0
ldr d0, [sp, #9168] ; 8-byte Folded Reload
fadd d6, d6, d0
ldr d0, [sp, #11536] ; 8-byte Folded Reload
fmul d17, d17, d0
fmov d18, d0
fadd d6, d17, d6
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d0, x9
str d0, [sp, #7984] ; 8-byte Folded Spill
fmul d17, d6, d0
fadd d17, d16, d17
str d19, [sp, #4768] ; 8-byte Folded Spill
fmul d16, d7, d19
fmul d20, d15, d3
fadd d16, d16, d20
str d1, [sp, #8720] ; 8-byte Folded Spill
ldr d0, [sp, #11328] ; 8-byte Folded Reload
fmul d20, d1, d0
fadd d16, d16, d20
ldr d0, [sp, #9272] ; 8-byte Folded Reload
fadd d16, d0, d16
mov x9, #54125
movk x9, #53060, lsl #16
movk x9, #15481, lsl #32
movk x9, #16273, lsl #48
fmov d0, x9
str d0, [sp, #8160] ; 8-byte Folded Spill
fmul d20, d16, d0
fsub d17, d17, d20
fmul d20, d4, d22
ldr d0, [sp, #9256] ; 8-byte Folded Reload
fsub d0, d0, d20
str d0, [sp, #6280] ; 8-byte Folded Spill
fmul d0, d0, d2
str d0, [sp, #2528] ; 8-byte Folded Spill
fmul d20, d0, d28
fadd d17, d20, d17
fmul d20, d1, d22
ldr d0, [sp, #9248] ; 8-byte Folded Reload
fsub d1, d0, d20
str d1, [sp, #6272] ; 8-byte Folded Spill
ldr d0, [sp, #11440] ; 8-byte Folded Reload
fmul d20, d1, d0
fadd d17, d20, d17
str d10, [sp, #2600] ; 8-byte Folded Spill
fmul d30, d28, d10
ldr d0, [sp, #11352] ; 8-byte Folded Reload
fmul d20, d0, d30
fadd d20, d20, d17
ldr d0, [sp, #9952] ; 8-byte Folded Reload
ldr d1, [sp, #8712] ; 8-byte Folded Reload
fmul d4, d1, d0
ldr d0, [sp, #9024] ; 8-byte Folded Reload
ldr d1, [sp, #9104] ; 8-byte Folded Reload
fmul d5, d1, d0
fmov d0, #0.50000000
fmul d17, d5, d0
fsub d4, d4, d17
fmul d5, d5, d25
ldr d0, [sp, #9008] ; 8-byte Folded Reload
fmul d5, d5, d0
fmov d0, #5.00000000
fmul d5, d5, d0
fsub d4, d4, d5
ldr d0, [sp, #9944] ; 8-byte Folded Reload
fdiv d4, d4, d0
ldr d0, [sp, #9936] ; 8-byte Folded Reload
fmul d4, d0, d4
ldr d0, [sp, #9048] ; 8-byte Folded Reload
ldr d1, [sp, #8208] ; 8-byte Folded Reload
fmul d3, d1, d0
fsub d0, d4, d3
str d0, [sp, #5824] ; 8-byte Folded Spill
fmul d11, d28, d0
fmul d3, d31, d11
ldr d0, [sp, #7968] ; 8-byte Folded Reload
fadd d3, d0, d3
ldur d7, [x29, #-168] ; 8-byte Folded Reload
fmul d4, d7, d30
fsub d3, d4, d3
ldr d0, [sp, #8128] ; 8-byte Folded Reload
fsub d4, d20, d0
ldr d0, [sp, #7960] ; 8-byte Folded Reload
fsub d17, d3, d0
ldr d10, [sp, #11128] ; 8-byte Folded Reload
fmul d3, d10, d17
str d17, [sp, #5424] ; 8-byte Folded Spill
fadd d20, d4, d3
fmul d3, d14, d28
fsub d0, d13, d3
str d0, [sp, #2504] ; 8-byte Folded Spill
fmul d3, d28, d0
ldr d0, [sp, #9160] ; 8-byte Folded Reload
fadd d3, d0, d3
fmul d4, d8, d18
fmov d8, d18
fadd d13, d4, d3
mov x9, #4354980839667269632
mov x10, #47272
movk x10, #56762, lsl #16
movk x10, #43178, lsl #32
movk x10, #16292, lsl #48
fmov d0, x9
str d0, [sp, #9104] ; 8-byte Folded Spill
fmul d3, d20, d0
fmov d0, x10
str d0, [sp, #8712] ; 8-byte Folded Spill
fmul d4, d13, d0
fsub d19, d3, d4
ldr d22, [sp, #11864] ; 8-byte Folded Reload
ldr d2, [sp, #9208] ; 8-byte Folded Reload
fmul d3, d22, d2
ldr d0, [sp, #7920] ; 8-byte Folded Reload
fsub d3, d3, d0
ldr d0, [sp, #12200] ; 8-byte Folded Reload
fmul d4, d0, d27
fadd d3, d4, d3
ldur d29, [x29, #-216] ; 8-byte Folded Reload
fmul d4, d29, d21
fadd d3, d4, d3
ldr d0, [sp, #11976] ; 8-byte Folded Reload
fmul d4, d0, d21
ldr d0, [sp, #8120] ; 8-byte Folded Reload
fsub d4, d4, d0
ldr d0, [sp, #7912] ; 8-byte Folded Reload
fsub d0, d3, d0
fmul d3, d9, d0
fmov d26, d0
fadd d3, d4, d3
ldr d0, [sp, #12064] ; 8-byte Folded Reload
fmul d4, d0, d12
fadd d3, d4, d3
ldr d0, [sp, #8112] ; 8-byte Folded Reload
fsub d3, d3, d0
mov x9, #46543
movk x9, #48510, lsl #16
movk x9, #46414, lsl #32
movk x9, #16260, lsl #48
fmov d4, x9
fadd d3, d3, d4
ldr d0, [sp, #8096] ; 8-byte Folded Reload
fadd d3, d0, d3
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d0, x9
str d0, [sp, #7992] ; 8-byte Folded Spill
fmul d4, d16, d0
fadd d3, d3, d4
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
ldr d0, [sp, #8080] ; 8-byte Folded Reload
fadd d3, d0, d3
ldr d0, [sp, #8072] ; 8-byte Folded Reload
fadd d0, d0, d3
fmov d1, x9
str d1, [sp, #6208] ; 8-byte Folded Spill
fmul d3, d6, d1
ldur d1, [x29, #-208] ; 8-byte Folded Reload
fmul d4, d1, d0
fmov d23, d1
fadd d3, d4, d3
ldr d18, [sp, #12016] ; 8-byte Folded Reload
fmul d4, d18, d2
ldr d5, [sp, #7904] ; 8-byte Folded Reload
fsub d4, d4, d5
str d27, [sp, #1056] ; 8-byte Folded Spill
ldr d1, [sp, #12160] ; 8-byte Folded Reload
fmul d5, d1, d27
fadd d4, d5, d4
ldr d24, [sp, #12336] ; 8-byte Folded Reload
fmul d5, d24, d21
fadd d4, d5, d4
ldr d1, [sp, #12192] ; 8-byte Folded Reload
fmul d2, d1, d21
ldr d1, [sp, #8064] ; 8-byte Folded Reload
fsub d2, d2, d1
ldr d1, [sp, #7888] ; 8-byte Folded Reload
fsub d1, d4, d1
fmul d4, d9, d1
fadd d2, d2, d4
ldr d25, [sp, #4784] ; 8-byte Folded Reload
fmov d21, d7
fmul d4, d7, d25
fmul d5, d29, d26
str d26, [sp, #4752] ; 8-byte Folded Spill
fadd d4, d4, d5
fmul d5, d24, d1
fmov d9, d1
str d1, [sp, #4744] ; 8-byte Folded Spill
fadd d4, d5, d4
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d1, x9
str d1, [sp, #5792] ; 8-byte Folded Spill
fmul d4, d4, d1
fadd d2, d2, d4
ldr d1, [sp, #12104] ; 8-byte Folded Reload
fmul d4, d1, d12
fadd d2, d4, d2
ldr d1, [sp, #8056] ; 8-byte Folded Reload
fsub d2, d2, d1
mov x9, #57269
movk x9, #60105, lsl #16
movk x9, #55991, lsl #32
movk x9, #16301, lsl #48
fmov d4, x9
fadd d2, d2, d4
ldr d1, [sp, #8032] ; 8-byte Folded Reload
fadd d2, d1, d2
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d1, x9
str d1, [sp, #5800] ; 8-byte Folded Spill
fmul d4, d16, d1
fadd d2, d2, d4
ldr d1, [sp, #8048] ; 8-byte Folded Reload
fsub d2, d2, d1
ldr d1, [sp, #8040] ; 8-byte Folded Reload
fadd d1, d1, d2
ldur d4, [x29, #-200] ; 8-byte Folded Reload
fmul d2, d4, d1
fmov d5, d4
fadd d2, d2, d3
ldr d3, [sp, #11600] ; 8-byte Folded Reload
fmul d3, d3, d30
fadd d2, d3, d2
str d11, [sp, #5768] ; 8-byte Folded Spill
ldr d3, [sp, #12136] ; 8-byte Folded Reload
fmul d3, d3, d11
ldr d4, [sp, #7952] ; 8-byte Folded Reload
fadd d3, d4, d3
ldr d7, [sp, #11496] ; 8-byte Folded Reload
fmul d4, d7, d30
fsub d3, d4, d3
ldr d4, [sp, #8024] ; 8-byte Folded Reload
fsub d2, d2, d4
ldr d4, [sp, #7944] ; 8-byte Folded Reload
fsub d31, d3, d4
fmul d3, d10, d31
fadd d2, d2, d3
mov x9, #26288
movk x9, #13902, lsl #16
movk x9, #44107, lsl #32
movk x9, #16338, lsl #48
fmov d3, x9
fadd d2, d2, d3
ldr d3, [sp, #9232] ; 8-byte Folded Reload
fmul d3, d3, d28
str d3, [sp, #2432] ; 8-byte Folded Spill
fmul d3, d28, d3
ldr d4, [sp, #9240] ; 8-byte Folded Reload
fmul d4, d4, d8
fadd d3, d3, d4
ldr d4, [sp, #8016] ; 8-byte Folded Reload
fadd d16, d3, d4
mov x9, #45974
movk x9, #34787, lsl #16
movk x9, #35902, lsl #32
movk x9, #16285, lsl #48
fmov d3, x9
str d3, [sp, #6736] ; 8-byte Folded Spill
fmul d3, d16, d3
fadd d27, d3, d2
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d2, x9
str d2, [sp, #6424] ; 8-byte Folded Spill
fmul d2, d6, d2
str d0, [sp, #936] ; 8-byte Folded Spill
fmul d3, d5, d0
fsub d2, d2, d3
str d1, [sp, #888] ; 8-byte Folded Spill
fmul d3, d23, d1
fadd d2, d3, d2
ldr d0, [sp, #11464] ; 8-byte Folded Reload
fmul d3, d0, d30
fadd d2, d3, d2
ldr d6, [sp, #11760] ; 8-byte Folded Reload
fmul d3, d6, d30
ldr d0, [sp, #11896] ; 8-byte Folded Reload
fmul d4, d0, d11
ldr d0, [sp, #7936] ; 8-byte Folded Reload
fadd d4, d0, d4
fsub d3, d3, d4
ldr d0, [sp, #8008] ; 8-byte Folded Reload
fsub d2, d2, d0
ldr d0, [sp, #7928] ; 8-byte Folded Reload
fsub d5, d3, d0
fmul d3, d10, d5
fadd d2, d2, d3
fmul d3, d21, d17
fmov d30, d21
fmul d4, d7, d31
fmov d17, d31
str d31, [sp, #5408] ; 8-byte Folded Spill
fadd d3, d3, d4
fmul d4, d6, d5
str d5, [sp, #5472] ; 8-byte Folded Spill
fadd d3, d4, d3
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d0, x9
str d0, [sp, #5808] ; 8-byte Folded Spill
fmul d3, d3, d0
fadd d2, d2, d3
mov x9, #21969
movk x9, #1325, lsl #16
movk x9, #7976, lsl #32
movk x9, #16367, lsl #48
fmov d3, x9
fadd d2, d2, d3
mov x9, #49235
movk x9, #28989, lsl #16
movk x9, #40841, lsl #32
movk x9, #16312, lsl #48
fmov d0, x9
str d0, [sp, #5784] ; 8-byte Folded Spill
fmul d6, d16, d0
fadd d16, d6, d2
fadd d2, d19, d27
mov x9, #4363988038922010624
fmov d4, x9
str d4, [sp, #5776] ; 8-byte Folded Spill
fmul d6, d16, d4
fadd d0, d2, d6
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d1, x9
str d1, [sp, #6768] ; 8-byte Folded Spill
ldr d2, [sp, #8216] ; 8-byte Folded Reload
fmul d1, d2, d1
ldr q3, [sp, #12000] ; 16-byte Folded Reload
fmul d2, d3, d0
mov.16b v6, v3
fsub d1, d2, d1
mov x9, #4359484439294640128
fmov d2, x9
str d2, [sp, #5744] ; 8-byte Folded Spill
fmul d20, d20, d2
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #49064, lsl #48
fmov d2, x9
str d2, [sp, #5480] ; 8-byte Folded Spill
fmul d13, d13, d2
fsub d20, d13, d20
fmul d27, d27, d4
fadd d20, d20, d27
fadd d7, d20, d16
ldr q13, [sp, #11840] ; 16-byte Folded Reload
fmul d16, d13, d7
fadd d1, d16, d1
mov x9, #31036
movk x9, #52462, lsl #16
movk x9, #23267, lsl #32
movk x9, #16406, lsl #48
fmov d16, x9
fsub d1, d16, d1
mov x9, #5915
movk x9, #64709, lsl #16
movk x9, #30489, lsl #32
movk x9, #16392, lsl #48
fmov d16, x9
ldr d2, [sp, #12024] ; 8-byte Folded Reload
fmul d3, d2, d16
ldr d2, [sp, #11480] ; 8-byte Folded Reload
fmul d4, d2, d16
ldr d2, [sp, #12056] ; 8-byte Folded Reload
fmul d16, d3, d2
str d3, [sp, #8216] ; 8-byte Folded Spill
str d16, [sp, #2416] ; 8-byte Folded Spill
fmul d16, d2, d16
ldr d2, [sp, #11584] ; 8-byte Folded Reload
fmul d20, d4, d2
str d4, [sp, #8208] ; 8-byte Folded Spill
fadd d16, d16, d20
ldr d19, [sp, #6752] ; 8-byte Folded Reload
fsub d16, d19, d16
mov x9, #2356
movk x9, #12413, lsl #16
movk x9, #55910, lsl #32
movk x9, #16327, lsl #48
fmov d2, x9
str d2, [sp, #6216] ; 8-byte Folded Spill
fmul d16, d16, d2
fsub d19, d1, d16
str q7, [sp, #416] ; 16-byte Folded Spill
fmul d1, d6, d7
str q0, [sp, #448] ; 16-byte Folded Spill
fmul d16, d13, d0
fsub d0, d1, d16
ldr d1, [sp, #9224] ; 8-byte Folded Reload
fsub d1, d1, d3
ldr d7, [sp, #8560] ; 8-byte Folded Reload
fadd d1, d7, d1
str d1, [sp, #8200] ; 8-byte Folded Spill
ldr d11, [sp, #12208] ; 8-byte Folded Reload
fmul d1, d1, d11
ldr d2, [sp, #6432] ; 8-byte Folded Reload
fsub d1, d2, d1
str d1, [sp, #2424] ; 8-byte Folded Spill
fmul d1, d11, d1
ldr d2, [sp, #8192] ; 8-byte Folded Reload
fadd d1, d2, d1
ldr d2, [sp, #9216] ; 8-byte Folded Reload
fadd d16, d4, d2
ldr d7, [sp, #8568] ; 8-byte Folded Reload
fadd d2, d7, d16
str d2, [sp, #8192] ; 8-byte Folded Spill
ldr d3, [sp, #11784] ; 8-byte Folded Reload
fmul d16, d2, d3
fadd d16, d16, d1
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
ldr q10, [sp, #11824] ; 16-byte Folded Reload
fmul d1, d10, d0
fmov d2, x9
str d2, [sp, #6200] ; 8-byte Folded Spill
fmul d27, d16, d2
fsub d1, d1, d27
ldr q31, [sp, #12240] ; 16-byte Folded Reload
fmul d27, d31, d19
fsub d1, d27, d1
mov x9, #42186
movk x9, #52566, lsl #16
movk x9, #11879, lsl #32
movk x9, #16425, lsl #48
fmov d27, x9
fadd d1, d1, d27
ldr d2, [sp, #9192] ; 8-byte Folded Reload
fadd d1, d2, d1
str q1, [sp, #4608] ; 16-byte Folded Spill
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d1, x9
str d1, [sp, #9192] ; 8-byte Folded Spill
fmul d16, d16, d1
stp q0, q19, [sp, #368] ; 32-byte Folded Spill
fmul d27, d31, d0
fsub d16, d27, d16
fmul d27, d10, d19
fadd d0, d16, d27
str q0, [sp, #4592] ; 16-byte Folded Spill
ldr d1, [sp, #10960] ; 8-byte Folded Reload
ldr d0, [sp, #8664] ; 8-byte Folded Reload
fmul d16, d1, d0
mov x9, #10523
movk x9, #38535, lsl #16
movk x9, #12921, lsl #32
movk x9, #49410, lsl #48
fmov d23, x9
fmul d23, d16, d23
ldr d1, [sp, #10976] ; 8-byte Folded Reload
ldr d0, [sp, #8688] ; 8-byte Folded Reload
fmul d16, d1, d0
fmov d1, #0.50000000
fmul d16, d16, d1
ldr d0, [sp, #10320] ; 8-byte Folded Reload
fmul d21, d0, d16
mov x9, #211106232532992
movk x9, #16498, lsl #48
fmov d16, x9
fmul d21, d21, d16
ldr d0, [sp, #10344] ; 8-byte Folded Reload
fmul d23, d0, d23
ldr d0, [sp, #10312] ; 8-byte Folded Reload
fdiv d23, d23, d0
ldr d0, [sp, #10296] ; 8-byte Folded Reload
fmul d23, d0, d23
fadd d21, d21, d23
ldur d2, [x29, #-176] ; 8-byte Folded Reload
fmul d23, d2, d25
fmul d27, d22, d26
fadd d23, d23, d27
fmul d27, d18, d9
fadd d27, d27, d23
fadd d21, d21, d27
fmul d23, d27, d1
fsub d21, d21, d23
ldr d0, [sp, #10392] ; 8-byte Folded Reload
ldr d1, [sp, #5816] ; 8-byte Folded Reload
fmul d23, d0, d1
ldr d1, [sp, #10936] ; 8-byte Folded Reload
ldr d0, [sp, #8696] ; 8-byte Folded Reload
fmul d19, d0, d1
fsub d4, d23, d19
ldr d8, [sp, #12320] ; 8-byte Folded Reload
ldr d20, [sp, #6192] ; 8-byte Folded Reload
fmul d19, d8, d20
ldur d9, [x29, #-184] ; 8-byte Folded Reload
fmul d23, d9, d4
fsub d19, d19, d23
fmul d23, d15, d4
ldr d0, [sp, #5840] ; 8-byte Folded Reload
fmul d14, d8, d0
fsub d0, d23, d14
fmul d23, d2, d19
ldr d1, [sp, #8920] ; 8-byte Folded Reload
fsub d23, d23, d1
fadd d23, d0, d23
fmul d14, d30, d12
fadd d23, d14, d23
fmul d14, d22, d19
ldr d1, [sp, #7712] ; 8-byte Folded Reload
fsub d14, d14, d1
ldr d1, [sp, #12200] ; 8-byte Folded Reload
fmul d26, d1, d0
fadd d26, d26, d14
fmul d14, d29, d12
fadd d26, d14, d26
ldr d1, [sp, #8896] ; 8-byte Folded Reload
fsub d15, d23, d1
ldr d1, [sp, #7704] ; 8-byte Folded Reload
fsub d30, d26, d1
fmul d26, d2, d15
fmul d14, d22, d30
fadd d26, d26, d14
fmul d14, d18, d19
ldr d1, [sp, #7696] ; 8-byte Folded Reload
fsub d14, d14, d1
str d0, [sp, #872] ; 8-byte Folded Spill
ldr d1, [sp, #12160] ; 8-byte Folded Reload
fmul d7, d1, d0
fadd d7, d7, d14
fmul d29, d24, d12
fadd d7, d29, d7
ldr d0, [sp, #7680] ; 8-byte Folded Reload
fsub d14, d7, d0
fmul d7, d18, d14
fadd d7, d7, d26
ldr d0, [sp, #10944] ; 8-byte Folded Reload
ldr d1, [sp, #9112] ; 8-byte Folded Reload
fmul d26, d0, d1
mov x9, #18811
movk x9, #34700, lsl #16
movk x9, #61210, lsl #32
movk x9, #49411, lsl #48
fmov d29, x9
fmul d26, d26, d29
ldr d0, [sp, #10952] ; 8-byte Folded Reload
ldr d1, [sp, #9176] ; 8-byte Folded Reload
fmul d29, d0, d1
fmov d1, #0.50000000
fmul d29, d29, d1
ldr d0, [sp, #10336] ; 8-byte Folded Reload
fmul d29, d0, d29
fmul d29, d29, d16
ldr d0, [sp, #10352] ; 8-byte Folded Reload
fmul d26, d0, d26
ldr d0, [sp, #10328] ; 8-byte Folded Reload
fdiv d26, d26, d0
ldr d0, [sp, #10304] ; 8-byte Folded Reload
fmul d26, d0, d26
fadd d26, d29, d26
fsub d27, d21, d27
fsub d27, d27, d7
fadd d26, d26, d7
fmul d7, d7, d1
fmov d1, #0.50000000
fsub d7, d26, d7
fadd d29, d7, d27
ldr d6, [sp, #5424] ; 8-byte Folded Reload
fmul d7, d2, d6
ldr d0, [sp, #12136] ; 8-byte Folded Reload
fmul d26, d0, d17
fadd d7, d7, d26
ldr d0, [sp, #11896] ; 8-byte Folded Reload
fmul d26, d0, d5
fadd d27, d26, d7
ldr d0, [sp, #10376] ; 8-byte Folded Reload
ldr d5, [sp, #9120] ; 8-byte Folded Reload
fmul d7, d0, d5
fmul d7, d7, d1
ldr d0, [sp, #9712] ; 8-byte Folded Reload
fmul d7, d0, d7
fmul d7, d7, d16
ldr d0, [sp, #10384] ; 8-byte Folded Reload
ldr d5, [sp, #9184] ; 8-byte Folded Reload
fmul d16, d0, d5
mov x9, #45572
movk x9, #23979, lsl #16
movk x9, #34811, lsl #32
movk x9, #49413, lsl #48
fmov d26, x9
fmul d16, d16, d26
ldr d0, [sp, #9736] ; 8-byte Folded Reload
fmul d16, d0, d16
ldr d0, [sp, #9728] ; 8-byte Folded Reload
fdiv d16, d16, d0
ldr d0, [sp, #9720] ; 8-byte Folded Reload
fmul d16, d0, d16
fadd d7, d7, d16
fadd d7, d7, d27
fmul d16, d27, d1
fsub d23, d7, d16
ldr d0, [sp, #11904] ; 8-byte Folded Reload
ldr d16, [sp, #9208] ; 8-byte Folded Reload
fmul d7, d0, d16
ldr d0, [sp, #7824] ; 8-byte Folded Reload
fsub d7, d7, d0
ldr d17, [sp, #11384] ; 8-byte Folded Reload
fmov d0, d25
fmul d26, d17, d25
fadd d7, d7, d26
ldr d0, [sp, #11664] ; 8-byte Folded Reload
fmul d26, d0, d19
fadd d7, d26, d7
ldr d0, [sp, #7816] ; 8-byte Folded Reload
fsub d7, d7, d0
ldr d3, [sp, #11280] ; 8-byte Folded Reload
fmul d26, d3, d15
fadd d7, d26, d7
ldr d2, [sp, #7984] ; 8-byte Folded Reload
fmul d26, d29, d2
fadd d7, d7, d26
ldr d0, [sp, #11936] ; 8-byte Folded Reload
ldr d1, [sp, #7976] ; 8-byte Folded Reload
fmul d26, d0, d1
ldr d12, [sp, #8728] ; 8-byte Folded Reload
ldr d0, [sp, #11176] ; 8-byte Folded Reload
fmul d15, d12, d0
fsub d26, d26, d15
ldr d0, [sp, #8720] ; 8-byte Folded Reload
fmul d5, d0, d9
ldr d0, [sp, #11568] ; 8-byte Folded Reload
fmul d15, d0, d5
fadd d26, d15, d26
ldur d15, [x29, #-256] ; 8-byte Folded Reload
ldr d0, [sp, #12184] ; 8-byte Folded Reload
ldr d18, [sp, #8168] ; 8-byte Folded Reload
fmul d18, d0, d18
fsub d18, d26, d18
ldr d24, [sp, #5832] ; 8-byte Folded Reload
ldr d0, [sp, #11736] ; 8-byte Folded Reload
fmul d26, d0, d24
fadd d18, d18, d26
ldr d0, [sp, #12176] ; 8-byte Folded Reload
fmul d22, d0, d20
fsub d18, d18, d22
ldr d0, [sp, #11648] ; 8-byte Folded Reload
fmul d22, d0, d4
fadd d18, d22, d18
ldr d0, [sp, #8992] ; 8-byte Folded Reload
fadd d18, d0, d18
ldr d0, [sp, #7792] ; 8-byte Folded Reload
fadd d18, d0, d18
ldr d0, [sp, #7768] ; 8-byte Folded Reload
fadd d0, d0, d18
str d0, [sp, #1032] ; 8-byte Folded Spill
ldr d25, [sp, #12280] ; 8-byte Folded Reload
fmul d18, d25, d0
fadd d7, d18, d7
fmul d18, d8, d1
ldr d1, [sp, #11328] ; 8-byte Folded Reload
fmul d22, d12, d1
fsub d18, d22, d18
str d5, [sp, #4728] ; 8-byte Folded Spill
fmul d22, d9, d5
fsub d18, d18, d22
ldr d0, [sp, #7632] ; 8-byte Folded Reload
fadd d7, d0, d7
ldr d0, [sp, #7624] ; 8-byte Folded Reload
fadd d7, d0, d7
ldr d0, [sp, #9072] ; 8-byte Folded Reload
fadd d26, d18, d0
ldr d0, [sp, #8160] ; 8-byte Folded Reload
fmul d18, d26, d0
fsub d7, d7, d18
ldr d8, [sp, #11536] ; 8-byte Folded Reload
ldr d0, [sp, #8240] ; 8-byte Folded Reload
fmul d18, d0, d8
ldr d0, [sp, #7592] ; 8-byte Folded Reload
fadd d22, d18, d0
fmov d0, d24
str d4, [sp, #2408] ; 8-byte Folded Spill
fadd d18, d24, d4
ldr d0, [sp, #8232] ; 8-byte Folded Reload
fmul d24, d0, d28
fadd d0, d24, d18
str d0, [sp, #2400] ; 8-byte Folded Spill
fmul d24, d28, d0
fadd d24, d24, d22
fmul d22, d24, d2
fsub d7, d7, d22
ldr d0, [sp, #6280] ; 8-byte Folded Reload
ldr d1, [sp, #11440] ; 8-byte Folded Reload
fmul d22, d0, d1
fadd d7, d22, d7
ldr d0, [sp, #6272] ; 8-byte Folded Reload
fmul d0, d0, d25
str d0, [sp, #2392] ; 8-byte Folded Spill
fmul d22, d0, d28
fsub d7, d7, d22
ldr d0, [sp, #11352] ; 8-byte Folded Reload
ldr d20, [sp, #5768] ; 8-byte Folded Reload
fmul d22, d0, d20
fsub d7, d7, d22
ldr d0, [sp, #7616] ; 8-byte Folded Reload
fsub d7, d7, d0
ldr d9, [sp, #10968] ; 8-byte Folded Reload
fmul d22, d9, d6
fadd d22, d22, d7
fsub d7, d29, d27
fadd d27, d7, d23
ldr d0, [sp, #5744] ; 8-byte Folded Reload
fmul d2, d22, d0
ldr d0, [sp, #5480] ; 8-byte Folded Reload
fmul d6, d27, d0
fsub d2, d6, d2
ldr d0, [sp, #6208] ; 8-byte Folded Reload
fmul d6, d29, d0
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #16223, lsl #48
fmov d7, x9
fmul d7, d24, d7
fadd d6, d6, d7
fmov d1, d16
ldr d0, [sp, #12192] ; 8-byte Folded Reload
fmul d7, d0, d16
ldr d0, [sp, #7608] ; 8-byte Folded Reload
fsub d7, d7, d0
ldr d0, [sp, #4744] ; 8-byte Folded Reload
fmul d25, d17, d0
fadd d7, d7, d25
ldr q25, [sp, #4608] ; 16-byte Folded Reload
ldr d0, [sp, #5792] ; 8-byte Folded Reload
fmul d21, d21, d0
fadd d7, d7, d21
ldr d0, [sp, #12104] ; 8-byte Folded Reload
fmul d21, d0, d19
fadd d7, d21, d7
ldr d0, [sp, #7600] ; 8-byte Folded Reload
fsub d7, d7, d0
fmul d21, d3, d14
fadd d7, d21, d7
ldr d0, [sp, #7568] ; 8-byte Folded Reload
fadd d7, d0, d7
ldr d0, [sp, #7536] ; 8-byte Folded Reload
fadd d7, d0, d7
ldr d0, [sp, #7520] ; 8-byte Folded Reload
fadd d7, d0, d7
ldr d0, [sp, #5800] ; 8-byte Folded Reload
fmul d5, d26, d0
fadd d5, d7, d5
ldr d0, [sp, #7504] ; 8-byte Folded Reload
fadd d5, d0, d5
ldr d0, [sp, #7488] ; 8-byte Folded Reload
fadd d21, d0, d5
ldur d16, [x29, #-200] ; 8-byte Folded Reload
fmul d5, d16, d21
fadd d5, d5, d6
ldr d0, [sp, #11976] ; 8-byte Folded Reload
fmul d0, d0, d1
ldr d6, [sp, #7472] ; 8-byte Folded Reload
fsub d0, d0, d6
ldr d1, [sp, #4752] ; 8-byte Folded Reload
fmul d6, d17, d1
fadd d0, d0, d6
ldr d1, [sp, #12064] ; 8-byte Folded Reload
fmul d6, d1, d19
fadd d0, d6, d0
ldr d6, [sp, #7456] ; 8-byte Folded Reload
fsub d0, d0, d6
fmul d6, d3, d30
fadd d0, d6, d0
ldr d1, [sp, #7992] ; 8-byte Folded Reload
fmul d6, d26, d1
ldr q26, [sp, #4592] ; 16-byte Folded Reload
ldr d1, [sp, #7440] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #7432] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #7408] ; 8-byte Folded Reload
fadd d0, d1, d0
fadd d0, d0, d6
ldr d1, [sp, #7392] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #7384] ; 8-byte Folded Reload
fsub d1, d0, d1
ldur d4, [x29, #-208] ; 8-byte Folded Reload
fmul d0, d4, d1
fadd d0, d0, d5
ldr d3, [sp, #11600] ; 8-byte Folded Reload
fmul d5, d3, d20
fsub d0, d0, d5
ldr d5, [sp, #7344] ; 8-byte Folded Reload
fsub d0, d0, d5
ldr d3, [sp, #5408] ; 8-byte Folded Reload
fmul d5, d9, d3
fadd d0, d5, d0
ldr d3, [sp, #9232] ; 8-byte Folded Reload
fmov d17, d8
fmul d5, d3, d8
ldr d3, [sp, #9240] ; 8-byte Folded Reload
fmul d3, d3, d28
str d3, [sp, #2368] ; 8-byte Folded Spill
fmul d6, d28, d3
fsub d5, d5, d6
ldr d6, [sp, #7360] ; 8-byte Folded Reload
fadd d5, d5, d6
ldr d3, [sp, #6736] ; 8-byte Folded Reload
fmul d6, d5, d3
fadd d0, d6, d0
ldr d19, [sp, #5776] ; 8-byte Folded Reload
fmul d6, d0, d19
fadd d2, d2, d6
ldr d3, [sp, #6424] ; 8-byte Folded Reload
fmul d6, d29, d3
fmul d7, d24, d3
fsub d6, d6, d7
str d21, [sp, #824] ; 8-byte Folded Spill
fmul d7, d4, d21
fadd d6, d7, d6
str d1, [sp, #792] ; 8-byte Folded Spill
fmul d7, d16, d1
fsub d6, d6, d7
ldr d1, [sp, #11464] ; 8-byte Folded Reload
fmul d7, d1, d20
fsub d6, d6, d7
ldr d1, [sp, #7336] ; 8-byte Folded Reload
fsub d6, d6, d1
ldr d1, [sp, #5472] ; 8-byte Folded Reload
fmul d7, d9, d1
fadd d6, d7, d6
ldr d1, [sp, #5808] ; 8-byte Folded Reload
fmul d4, d23, d1
fadd d4, d4, d6
ldr d1, [sp, #5784] ; 8-byte Folded Reload
fmul d3, d5, d1
fadd d3, d3, d4
ldr d1, [sp, #8256] ; 8-byte Folded Reload
fmul d4, d1, d8
ldr d1, [sp, #7328] ; 8-byte Folded Reload
fadd d4, d4, d1
ldr d1, [sp, #5824] ; 8-byte Folded Reload
fadd d6, d18, d1
ldr d1, [sp, #8248] ; 8-byte Folded Reload
fmul d5, d1, d28
fadd d1, d5, d6
str d1, [sp, #2384] ; 8-byte Folded Spill
fmul d5, d28, d1
fadd d4, d5, d4
fadd d2, d3, d2
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #16296, lsl #48
fmov d5, x9
fmul d5, d4, d5
fadd d1, d2, d5
ldr d2, [sp, #9104] ; 8-byte Folded Reload
fmul d2, d22, d2
ldr d7, [sp, #8712] ; 8-byte Folded Reload
fmul d5, d27, d7
fsub d2, d2, d5
fadd d0, d0, d2
fmul d2, d3, d19
fadd d0, d0, d2
fmul d2, d4, d7
fadd d3, d0, d2
ldr q4, [sp, #12000] ; 16-byte Folded Reload
fmul d0, d4, d1
fmul d2, d13, d3
fsub d5, d0, d2
ldr d18, [sp, #9192] ; 8-byte Folded Reload
fmul d0, d27, d18
fmul d2, d31, d5
fsub d0, d2, d0
ldr d17, [sp, #6768] ; 8-byte Folded Reload
fmul d2, d27, d17
stp q3, q1, [sp, #288] ; 32-byte Folded Spill
fmul d3, d4, d3
fsub d2, d3, d2
fmul d3, d13, d1
fadd d2, d3, d2
ldr d1, [sp, #9224] ; 8-byte Folded Reload
ldr d16, [sp, #11584] ; 8-byte Folded Reload
fmul d3, d1, d16
ldr d1, [sp, #6456] ; 8-byte Folded Reload
fadd d3, d3, d1
ldr d1, [sp, #9216] ; 8-byte Folded Reload
ldr d7, [sp, #12056] ; 8-byte Folded Reload
fmul d4, d1, d7
fadd d1, d4, d6
str d1, [sp, #2376] ; 8-byte Folded Spill
fmul d4, d7, d1
fadd d3, d4, d3
fmul d3, d3, d17
fadd d2, d2, d3
ldr d1, [sp, #8216] ; 8-byte Folded Reload
fmul d3, d1, d16
ldr d1, [sp, #8208] ; 8-byte Folded Reload
fmul d1, d1, d7
str d1, [sp, #2352] ; 8-byte Folded Spill
fmul d4, d7, d1
fsub d3, d3, d4
ldr d1, [sp, #6448] ; 8-byte Folded Reload
fadd d3, d3, d1
ldr d1, [sp, #6216] ; 8-byte Folded Reload
fmul d3, d3, d1
fsub d4, d2, d3
fmul d2, d10, d4
fsub d0, d0, d2
ldr d7, [sp, #8200] ; 8-byte Folded Reload
ldr d1, [sp, #11784] ; 8-byte Folded Reload
fmul d2, d7, d1
ldr d1, [sp, #8176] ; 8-byte Folded Reload
fadd d2, d2, d1
ldr d16, [sp, #8192] ; 8-byte Folded Reload
fmul d3, d16, d11
fadd d1, d3, d6
str d1, [sp, #2360] ; 8-byte Folded Spill
fmul d3, d11, d1
fadd d2, d3, d2
fmul d1, d2, d18
fadd d21, d0, d1
ldr d3, [sp, #6200] ; 8-byte Folded Reload
fmul d0, d27, d3
stp q4, q5, [sp, #256] ; 32-byte Folded Spill
fmul d1, d10, d5
fsub d0, d1, d0
fmul d1, d31, d4
fadd d0, d0, d1
fmul d1, d2, d3
fadd d0, d0, d1
ldr d1, [sp, #9200] ; 8-byte Folded Reload
fadd d22, d1, d0
ldr d0, [sp, #8520] ; 8-byte Folded Reload
fmov d31, d7
fadd d20, d0, d7
fmul d0, d15, d20
ldr d1, [sp, #6432] ; 8-byte Folded Reload
fsub d24, d1, d0
ldr d0, [sp, #8512] ; 8-byte Folded Reload
fmov d1, d16
fadd d19, d0, d16
fmul d0, d15, d19
str d6, [sp, #184] ; 8-byte Folded Spill
fadd d23, d0, d6
cbz x8, LBB19_46
; %bb.45:
ldr q17, [sp, #10736] ; 16-byte Folded Reload
fmul d0, d17, d25
ldr q16, [sp, #11136] ; 16-byte Folded Reload
fmul d1, d16, d26
fmul d2, d15, d24
ldr d6, [sp, #11248] ; 8-byte Folded Reload
fmul d3, d6, d19
fadd d2, d3, d2
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d3, x9
fmul d4, d2, d3
fsub d1, d1, d4
fsub d0, d0, d1
fadd d0, d0, d0
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d1, x9
fmul d4, d27, d1
fmul d5, d17, d21
fsub d4, d5, d4
fmul d5, d16, d22
fsub d4, d4, d5
fmul d5, d6, d20
fmul d6, d15, d23
fadd d5, d5, d6
fmul d6, d5, d1
fadd d4, d4, d6
fadd d4, d4, d4
fadd d0, d0, d4
ldr d18, [sp, #11400] ; 8-byte Folded Reload
fmul d4, d18, d0
fmul d6, d27, d3
fmul d7, d16, d21
fsub d6, d7, d6
fmul d7, d17, d22
fadd d6, d6, d7
fmul d3, d5, d3
fadd d3, d6, d3
fmul d5, d17, d26
fmul d1, d2, d1
fsub d1, d5, d1
fmul d2, d16, d25
fadd d1, d1, d2
fadd d1, d1, d3
ldr d2, [sp, #11160] ; 8-byte Folded Reload
fmul d2, d2, d1
fsub d2, d4, d2
fmul d2, d18, d2
fmov d3, #0.50000000
fmul d2, d2, d3
ldr d4, [sp, #11392] ; 8-byte Folded Reload
fmul d0, d4, d0
ldr d5, [sp, #11240] ; 8-byte Folded Reload
fmul d1, d5, d1
fadd d0, d1, d0
fmul d0, d4, d0
fmul d0, d0, d3
fsub d0, d2, d0
str d0, [x8, #32]
LBB19_46:
stp d24, d23, [sp, #64] ; 16-byte Folded Spill
stp q22, q21, [sp, #144] ; 32-byte Folded Spill
str d27, [sp, #240] ; 8-byte Folded Spill
str d20, [sp, #5472] ; 8-byte Folded Spill
str d19, [sp, #5480] ; 8-byte Folded Spill
ldr d27, [sp, #11304] ; 8-byte Folded Reload
ldr d0, [sp, #12328] ; 8-byte Folded Reload
fsub d1, d0, d27
ldr d0, [sp, #12256] ; 8-byte Folded Reload
fadd d18, d1, d0
ldr d26, [sp, #12112] ; 8-byte Folded Reload
ldur d0, [x29, #-224] ; 8-byte Folded Reload
fadd d19, d26, d0
ldr d0, [sp, #12264] ; 8-byte Folded Reload
fadd d21, d19, d0
ldr d15, [sp, #11912] ; 8-byte Folded Reload
fadd d3, d21, d15
ldr d0, [sp, #9808] ; 8-byte Folded Reload
fdiv d0, d3, d0
ldr d2, [sp, #9768] ; 8-byte Folded Reload
fmul d17, d2, d0
ldr d2, [sp, #9760] ; 8-byte Folded Reload
fmul d2, d17, d2
fadd d2, d18, d2
ldr d4, [sp, #12288] ; 8-byte Folded Reload
fadd d2, d4, d2
ldr d4, [sp, #10264] ; 8-byte Folded Reload
fmul d4, d4, d2
fmov d5, #0.50000000
fmul d4, d4, d5
fmov d10, #0.50000000
ldr d5, [sp, #10816] ; 8-byte Folded Reload
fmul d4, d5, d4
mov x9, #4632233691727265792
fmov d22, x9
fmul d4, d4, d22
ldr d5, [sp, #10808] ; 8-byte Folded Reload
fmul d5, d5, d2
str d5, [sp, #6200] ; 8-byte Folded Spill
ldr d2, [sp, #10280] ; 8-byte Folded Reload
fmul d5, d2, d5
str d5, [sp, #6192] ; 8-byte Folded Spill
ldr d2, [sp, #10288] ; 8-byte Folded Reload
fmul d2, d2, d5
fmov d29, #3.00000000
fmul d2, d2, d29
fadd d30, d4, d2
ldr d12, [sp, #11744] ; 8-byte Folded Reload
fadd d5, d21, d12
ldr d2, [sp, #10272] ; 8-byte Folded Reload
fdiv d4, d5, d2
str d4, [sp, #6424] ; 8-byte Folded Spill
ldr d2, [sp, #10224] ; 8-byte Folded Reload
fmul d16, d2, d4
ldr d2, [sp, #9744] ; 8-byte Folded Reload
fmul d2, d16, d2
fadd d2, d18, d2
ldr d4, [sp, #12168] ; 8-byte Folded Reload
fadd d2, d4, d2
ldr d4, [sp, #10240] ; 8-byte Folded Reload
fmul d4, d4, d2
fmul d4, d4, d10
ldr d6, [sp, #10800] ; 8-byte Folded Reload
fmul d4, d6, d4
fmul d4, d4, d22
ldr d6, [sp, #10792] ; 8-byte Folded Reload
fmul d6, d6, d2
str d6, [sp, #9104] ; 8-byte Folded Spill
ldr d2, [sp, #10248] ; 8-byte Folded Reload
fmul d6, d2, d6
str d6, [sp, #7984] ; 8-byte Folded Spill
ldr d2, [sp, #10256] ; 8-byte Folded Reload
fmul d2, d2, d6
fmul d2, d2, d29
fadd d28, d4, d2
ldr d2, [sp, #11192] ; 8-byte Folded Reload
fadd d4, d19, d2
str d4, [sp, #7976] ; 8-byte Folded Spill
ldr d2, [sp, #8808] ; 8-byte Folded Reload
fdiv d2, d4, d2
str d2, [sp, #8688] ; 8-byte Folded Spill
ldr d6, [sp, #8816] ; 8-byte Folded Reload
fmul d2, d6, d2
str d2, [sp, #6768] ; 8-byte Folded Spill
ldr d20, [sp, #8800] ; 8-byte Folded Reload
fmul d24, d2, d20
fadd d24, d1, d24
ldr d20, [sp, #11184] ; 8-byte Folded Reload
fadd d24, d20, d24
ldr d20, [sp, #9688] ; 8-byte Folded Reload
fmul d25, d24, d20
fmul d25, d25, d10
ldr d20, [sp, #10128] ; 8-byte Folded Reload
fmul d25, d20, d25
fmul d22, d25, d22
ldr d20, [sp, #10120] ; 8-byte Folded Reload
fmul d2, d24, d20
str d2, [sp, #7992] ; 8-byte Folded Spill
ldr d20, [sp, #9696] ; 8-byte Folded Reload
fmul d2, d20, d2
str d2, [sp, #9112] ; 8-byte Folded Spill
ldr d20, [sp, #9704] ; 8-byte Folded Reload
fmul d24, d20, d2
fmul d24, d24, d29
fadd d11, d24, d22
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d22, x9
fmul d2, d21, d22
ldr d21, [sp, #10072] ; 8-byte Folded Reload
fsub d8, d21, d2
mov x9, #50080
movk x9, #49599, lsl #16
movk x9, #32579, lsl #32
movk x9, #16368, lsl #48
fmov d21, x9
fmul d7, d19, d21
str d7, [sp, #9184] ; 8-byte Folded Spill
fsub d19, d8, d7
str d8, [sp, #8176] ; 8-byte Folded Spill
ldr d24, [sp, #9408] ; 8-byte Folded Reload
fadd d4, d24, d19
str d4, [sp, #9200] ; 8-byte Folded Spill
mov x9, #6432
movk x9, #24166, lsl #16
movk x9, #7623, lsl #32
movk x9, #16309, lsl #48
fmov d24, x9
fmul d19, d26, d24
fsub d7, d4, d19
str d7, [sp, #9208] ; 8-byte Folded Spill
fadd d31, d30, d28
fadd d4, d31, d11
str d4, [sp, #8664] ; 8-byte Folded Spill
ldr d29, [sp, #12048] ; 8-byte Folded Reload
fmul d25, d7, d29
fsub d20, d4, d25
str d20, [sp, #2328] ; 8-byte Folded Spill
fmul d25, d29, d20
ldr d20, [sp, #8704] ; 8-byte Folded Reload
fadd d25, d20, d25
fmul d7, d1, d21
str d7, [sp, #9176] ; 8-byte Folded Spill
fmul d4, d18, d22
ldr d1, [sp, #10064] ; 8-byte Folded Reload
fadd d19, d4, d1
fadd d1, d7, d19
str d19, [sp, #8160] ; 8-byte Folded Spill
ldr d18, [sp, #9416] ; 8-byte Folded Reload
fadd d14, d18, d1
fmul d1, d27, d24
fsub d1, d14, d1
str d14, [sp, #8168] ; 8-byte Folded Spill
str d1, [sp, #9192] ; 8-byte Folded Spill
ldr d18, [sp, #11520] ; 8-byte Folded Reload
fmul d1, d1, d18
fadd d1, d1, d25
str d1, [sp, #8696] ; 8-byte Folded Spill
ldr d18, [sp, #10536] ; 8-byte Folded Reload
fmul d3, d3, d18
ldr d1, [sp, #10824] ; 8-byte Folded Reload
fmul d18, d1, d17
fmul d17, d18, d10
fsub d3, d3, d17
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16361, lsl #48
fmov d17, x9
fmul d18, d18, d17
ldr d1, [sp, #9776] ; 8-byte Folded Reload
fmul d18, d18, d1
fmov d1, #5.00000000
fmul d18, d18, d1
fmov d6, #5.00000000
fsub d3, d3, d18
ldr d18, [sp, #10520] ; 8-byte Folded Reload
fdiv d21, d3, d18
ldr d27, [sp, #12232] ; 8-byte Folded Reload
fmul d3, d27, d30
ldr d18, [sp, #10528] ; 8-byte Folded Reload
fmul d23, d18, d21
ldr d25, [sp, #11968] ; 8-byte Folded Reload
fmul d22, d25, d23
fsub d7, d3, d22
ldr d20, [sp, #10512] ; 8-byte Folded Reload
fmul d21, d20, d21
ldr d1, [sp, #9792] ; 8-byte Folded Reload
fmul d0, d0, d1
fsub d1, d21, d0
str d1, [sp, #5808] ; 8-byte Folded Spill
ldur d3, [x29, #-248] ; 8-byte Folded Reload
fmul d0, d3, d23
fmov d26, d23
str d23, [sp, #5792] ; 8-byte Folded Spill
fmul d21, d27, d1
fsub d0, d0, d21
str d0, [sp, #9120] ; 8-byte Folded Spill
fmul d21, d25, d1
fmul d22, d3, d30
fsub d13, d21, d22
ldur d20, [x29, #-192] ; 8-byte Folded Reload
fmul d21, d20, d0
ldr d0, [sp, #8976] ; 8-byte Folded Reload
fsub d21, d21, d0
fadd d21, d13, d21
ldur d23, [x29, #-160] ; 8-byte Folded Reload
fmul d22, d23, d7
fadd d21, d22, d21
ldr d0, [sp, #11728] ; 8-byte Folded Reload
fmul d22, d0, d7
ldr d0, [sp, #7880] ; 8-byte Folded Reload
fsub d22, d22, d0
ldr d0, [sp, #8968] ; 8-byte Folded Reload
fsub d0, d21, d0
str d0, [sp, #4696] ; 8-byte Folded Spill
ldr d9, [sp, #11472] ; 8-byte Folded Reload
fmul d21, d9, d0
fadd d21, d22, d21
ldr d0, [sp, #10416] ; 8-byte Folded Reload
fmul d5, d5, d0
ldr d0, [sp, #11072] ; 8-byte Folded Reload
fmul d16, d0, d16
fmul d22, d16, d10
fsub d5, d5, d22
fmul d16, d16, d17
ldr d0, [sp, #9752] ; 8-byte Folded Reload
fmul d16, d16, d0
fmul d16, d16, d6
fsub d5, d5, d16
ldr d0, [sp, #10408] ; 8-byte Folded Reload
fdiv d1, d5, d0
str d1, [sp, #5312] ; 8-byte Folded Spill
str d28, [sp, #5816] ; 8-byte Folded Spill
fmul d5, d27, d28
ldr d0, [sp, #10472] ; 8-byte Folded Reload
fmul d0, d0, d1
fmul d16, d25, d0
fmov d18, d0
str d0, [sp, #5296] ; 8-byte Folded Spill
fsub d29, d5, d16
ldr d0, [sp, #11560] ; 8-byte Folded Reload
fmul d5, d0, d29
fadd d5, d5, d21
ldr d0, [sp, #7872] ; 8-byte Folded Reload
fsub d5, d5, d0
mov x9, #51491
movk x9, #54360, lsl #16
movk x9, #13074, lsl #32
movk x9, #16286, lsl #48
fmov d16, x9
fadd d5, d5, d16
fmul d1, d2, d27
ldr d6, [sp, #11512] ; 8-byte Folded Reload
fmul d16, d6, d1
ldr d0, [sp, #11168] ; 8-byte Folded Reload
fmov d22, d4
fmul d21, d4, d0
fadd d16, d16, d21
fmul d21, d2, d25
fmov d4, d2
str d2, [sp, #8712] ; 8-byte Folded Spill
fmul d24, d22, d3
fadd d2, d21, d24
ldr d0, [sp, #11928] ; 8-byte Folded Reload
fmul d21, d0, d2
fmov d24, d2
str d2, [sp, #5744] ; 8-byte Folded Spill
fadd d16, d21, d16
fmul d21, d15, d26
fsub d16, d21, d16
str d30, [sp, #2344] ; 8-byte Folded Spill
ldr d0, [sp, #11720] ; 8-byte Folded Reload
fmul d21, d0, d30
fsub d16, d16, d21
fmul d21, d12, d18
fadd d16, d21, d16
ldr d0, [sp, #11696] ; 8-byte Folded Reload
fmul d21, d0, d28
fsub d16, d16, d21
ldr d0, [sp, #7864] ; 8-byte Folded Reload
fadd d16, d0, d16
ldr d0, [sp, #8960] ; 8-byte Folded Reload
fadd d16, d0, d16
ldr d0, [sp, #7856] ; 8-byte Folded Reload
fadd d0, d0, d16
ldur d2, [x29, #-240] ; 8-byte Folded Reload
str d0, [sp, #648] ; 8-byte Folded Spill
fmul d16, d2, d0
fadd d5, d16, d5
ldr d26, [sp, #11624] ; 8-byte Folded Reload
fmul d16, d8, d26
fsub d0, d31, d16
str d0, [sp, #2320] ; 8-byte Folded Spill
fmul d16, d26, d0
ldr d0, [sp, #8856] ; 8-byte Folded Reload
fadd d16, d16, d0
ldr d0, [sp, #11504] ; 8-byte Folded Reload
fmul d19, d19, d0
fmov d28, d0
fadd d30, d19, d16
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d0, x9
str d0, [sp, #5768] ; 8-byte Folded Spill
fmul d19, d30, d0
fsub d19, d5, d19
str d1, [sp, #4688] ; 8-byte Folded Spill
fmul d5, d27, d1
fmul d21, d25, d24
fadd d5, d5, d21
str d22, [sp, #8704] ; 8-byte Folded Spill
ldr d0, [sp, #11312] ; 8-byte Folded Reload
fmul d21, d22, d0
fadd d5, d5, d21
ldr d0, [sp, #8952] ; 8-byte Folded Reload
fadd d5, d0, d5
mov x9, #54125
movk x9, #53060, lsl #16
movk x9, #15481, lsl #32
movk x9, #16273, lsl #48
fmov d0, x9
str d0, [sp, #5784] ; 8-byte Folded Spill
fmul d21, d5, d0
fadd d19, d21, d19
fmul d21, d4, d6
ldr d0, [sp, #8944] ; 8-byte Folded Reload
fsub d0, d0, d21
str d0, [sp, #6216] ; 8-byte Folded Spill
fmul d0, d0, d2
str d0, [sp, #2312] ; 8-byte Folded Spill
fmul d21, d26, d0
fadd d19, d21, d19
fmul d21, d22, d6
ldr d0, [sp, #8928] ; 8-byte Folded Reload
fsub d0, d0, d21
str d0, [sp, #6208] ; 8-byte Folded Spill
ldr d1, [sp, #11424] ; 8-byte Folded Reload
fmul d21, d0, d1
fadd d19, d21, d19
str d11, [sp, #2336] ; 8-byte Folded Spill
fmul d12, d26, d11
ldr d0, [sp, #11320] ; 8-byte Folded Reload
fmul d21, d0, d12
fadd d21, d21, d19
ldr d0, [sp, #9856] ; 8-byte Folded Reload
ldr d1, [sp, #7976] ; 8-byte Folded Reload
fmul d4, d1, d0
ldr d0, [sp, #8784] ; 8-byte Folded Reload
ldr d1, [sp, #6768] ; 8-byte Folded Reload
fmul d6, d1, d0
fmul d19, d6, d10
fsub d4, d4, d19
fmul d6, d6, d17
ldr d0, [sp, #8776] ; 8-byte Folded Reload
fmul d6, d6, d0
fmov d0, #5.00000000
fmul d6, d6, d0
fsub d4, d4, d6
ldr d0, [sp, #9848] ; 8-byte Folded Reload
fdiv d4, d4, d0
ldr d0, [sp, #9840] ; 8-byte Folded Reload
fmul d4, d0, d4
ldr d0, [sp, #8792] ; 8-byte Folded Reload
ldr d1, [sp, #8688] ; 8-byte Folded Reload
fmul d2, d1, d0
fsub d0, d4, d2
str d0, [sp, #5800] ; 8-byte Folded Spill
fmul d0, d26, d0
fmul d2, d20, d0
fmov d20, d0
ldr d0, [sp, #7320] ; 8-byte Folded Reload
fadd d2, d0, d2
fmov d22, d23
fmul d4, d23, d12
fsub d2, d4, d2
ldr d0, [sp, #7848] ; 8-byte Folded Reload
fsub d4, d21, d0
ldr d0, [sp, #7296] ; 8-byte Folded Reload
fsub d0, d2, d0
ldr d1, [sp, #11096] ; 8-byte Folded Reload
fmul d2, d1, d0
fmov d8, d0
str d0, [sp, #2256] ; 8-byte Folded Spill
fadd d24, d4, d2
ldr d0, [sp, #9200] ; 8-byte Folded Reload
fmul d2, d0, d26
ldr d0, [sp, #8664] ; 8-byte Folded Reload
fsub d0, d0, d2
str d0, [sp, #2304] ; 8-byte Folded Spill
fmul d2, d26, d0
ldr d0, [sp, #8832] ; 8-byte Folded Reload
fadd d2, d0, d2
fmul d6, d14, d28
fmov d15, d28
fadd d11, d6, d2
mov x9, #4354980839667269632
mov x10, #47272
movk x10, #56762, lsl #16
movk x10, #43178, lsl #32
movk x10, #49060, lsl #48
fmov d0, x9
str d0, [sp, #7976] ; 8-byte Folded Spill
fmul d2, d24, d0
fmov d0, x10
str d0, [sp, #6736] ; 8-byte Folded Spill
fmul d6, d11, d0
fsub d6, d6, d2
ldr d16, [sp, #11768] ; 8-byte Folded Reload
ldr d23, [sp, #9120] ; 8-byte Folded Reload
fmul d2, d16, d23
ldr d0, [sp, #7256] ; 8-byte Folded Reload
fsub d2, d2, d0
ldr d0, [sp, #12144] ; 8-byte Folded Reload
fmul d17, d0, d13
fadd d2, d17, d2
ldur d10, [x29, #-232] ; 8-byte Folded Reload
fmul d17, d10, d7
fadd d2, d17, d2
ldr d0, [sp, #11880] ; 8-byte Folded Reload
fmul d17, d0, d7
ldr d0, [sp, #7840] ; 8-byte Folded Reload
fsub d17, d17, d0
ldr d0, [sp, #7248] ; 8-byte Folded Reload
fsub d14, d2, d0
fmul d2, d9, d14
fadd d2, d17, d2
ldr d0, [sp, #11872] ; 8-byte Folded Reload
fmul d17, d0, d29
fadd d2, d17, d2
ldr d0, [sp, #7832] ; 8-byte Folded Reload
fsub d2, d2, d0
mov x9, #46543
movk x9, #48510, lsl #16
movk x9, #46414, lsl #32
movk x9, #16260, lsl #48
fmov d17, x9
fadd d2, d2, d17
ldr d0, [sp, #7776] ; 8-byte Folded Reload
fadd d2, d0, d2
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d0, x9
str d0, [sp, #5776] ; 8-byte Folded Spill
fmul d17, d5, d0
fadd d2, d2, d17
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
ldr d0, [sp, #7744] ; 8-byte Folded Reload
fadd d2, d0, d2
ldr d0, [sp, #7728] ; 8-byte Folded Reload
fadd d0, d0, d2
fmov d2, x9
str d2, [sp, #5392] ; 8-byte Folded Spill
fmul d2, d30, d2
ldr d3, [sp, #12304] ; 8-byte Folded Reload
fmul d17, d3, d0
fmov d31, d3
fadd d2, d17, d2
ldr d4, [sp, #11776] ; 8-byte Folded Reload
fmul d17, d4, d23
ldr d19, [sp, #7240] ; 8-byte Folded Reload
fsub d17, d17, d19
str d13, [sp, #600] ; 8-byte Folded Spill
ldr d3, [sp, #12152] ; 8-byte Folded Reload
fmul d21, d3, d13
fadd d17, d21, d17
ldr d28, [sp, #12296] ; 8-byte Folded Reload
fmul d21, d28, d7
fadd d17, d21, d17
ldr d3, [sp, #12096] ; 8-byte Folded Reload
fmul d3, d3, d7
ldr d21, [sp, #7720] ; 8-byte Folded Reload
fsub d3, d3, d21
ldr d21, [sp, #7232] ; 8-byte Folded Reload
fsub d7, d17, d21
str d7, [sp, #4656] ; 8-byte Folded Spill
fmul d17, d9, d7
fadd d3, d3, d17
ldr d18, [sp, #4696] ; 8-byte Folded Reload
fmul d17, d22, d18
fmul d21, d10, d14
str d14, [sp, #4672] ; 8-byte Folded Spill
fadd d17, d17, d21
fmul d21, d28, d7
fadd d17, d21, d17
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d7, x9
str d7, [sp, #5264] ; 8-byte Folded Spill
fmul d17, d17, d7
fadd d3, d3, d17
ldr d7, [sp, #12088] ; 8-byte Folded Reload
fmul d17, d7, d29
fadd d3, d17, d3
ldr d17, [sp, #7688] ; 8-byte Folded Reload
fsub d3, d3, d17
mov x9, #57269
movk x9, #60105, lsl #16
movk x9, #55991, lsl #32
movk x9, #16301, lsl #48
fmov d17, x9
fadd d3, d3, d17
ldr d17, [sp, #7672] ; 8-byte Folded Reload
fadd d3, d17, d3
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d7, x9
str d7, [sp, #5280] ; 8-byte Folded Spill
fmul d5, d5, d7
fadd d3, d3, d5
ldr d5, [sp, #7664] ; 8-byte Folded Reload
fsub d3, d3, d5
ldr d5, [sp, #7656] ; 8-byte Folded Reload
fadd d17, d5, d3
ldr d5, [sp, #12312] ; 8-byte Folded Reload
fmul d3, d5, d17
fmov d7, d5
fadd d2, d3, d2
ldr d3, [sp, #11528] ; 8-byte Folded Reload
fmul d3, d3, d12
fadd d2, d3, d2
str d20, [sp, #4640] ; 8-byte Folded Spill
ldr d3, [sp, #11688] ; 8-byte Folded Reload
fmul d3, d3, d20
ldr d5, [sp, #7288] ; 8-byte Folded Reload
fadd d3, d5, d3
ldr d13, [sp, #11488] ; 8-byte Folded Reload
fmul d5, d13, d12
fsub d3, d5, d3
ldr d5, [sp, #7648] ; 8-byte Folded Reload
fsub d2, d2, d5
ldr d5, [sp, #7280] ; 8-byte Folded Reload
fsub d5, d3, d5
fmul d3, d1, d5
fmov d19, d5
str d5, [sp, #2248] ; 8-byte Folded Spill
fadd d2, d2, d3
mov x9, #26288
movk x9, #13902, lsl #16
movk x9, #44107, lsl #32
movk x9, #16338, lsl #48
fmov d3, x9
fadd d2, d2, d3
ldr d3, [sp, #9184] ; 8-byte Folded Reload
fmul d3, d3, d26
str d3, [sp, #2296] ; 8-byte Folded Spill
fmul d3, d26, d3
ldr d5, [sp, #9176] ; 8-byte Folded Reload
fmul d5, d5, d15
fadd d3, d3, d5
ldr d5, [sp, #7640] ; 8-byte Folded Reload
fadd d5, d3, d5
mov x9, #45974
movk x9, #34787, lsl #16
movk x9, #35902, lsl #32
movk x9, #16285, lsl #48
fmov d3, x9
str d3, [sp, #5376] ; 8-byte Folded Spill
fmul d3, d5, d3
fadd d15, d3, d2
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d2, x9
str d2, [sp, #5328] ; 8-byte Folded Spill
fmul d2, d30, d2
str d0, [sp, #536] ; 8-byte Folded Spill
fmul d3, d7, d0
fsub d2, d2, d3
str d17, [sp, #528] ; 8-byte Folded Spill
fmul d3, d31, d17
fadd d2, d3, d2
ldr d0, [sp, #11432] ; 8-byte Folded Reload
fmul d3, d0, d12
fadd d2, d3, d2
ldr d0, [sp, #11680] ; 8-byte Folded Reload
fmul d3, d0, d12
ldr d31, [sp, #11632] ; 8-byte Folded Reload
fmul d30, d31, d20
ldr d9, [sp, #7272] ; 8-byte Folded Reload
fadd d30, d9, d30
fsub d3, d3, d30
ldr d30, [sp, #7560] ; 8-byte Folded Reload
fsub d2, d2, d30
ldr d30, [sp, #7264] ; 8-byte Folded Reload
fsub d7, d3, d30
fmul d3, d1, d7
fadd d2, d2, d3
fmul d3, d22, d8
fmul d30, d13, d19
fadd d3, d3, d30
fmul d30, d0, d7
fmov d19, d7
fadd d30, d30, d3
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d0, x9
str d0, [sp, #5248] ; 8-byte Folded Spill
fmul d30, d30, d0
fadd d2, d2, d30
mov x9, #21969
movk x9, #1325, lsl #16
movk x9, #7976, lsl #32
movk x9, #16367, lsl #48
fmov d30, x9
fadd d30, d2, d30
mov x9, #49235
movk x9, #28989, lsl #16
movk x9, #40841, lsl #32
movk x9, #16312, lsl #48
fmov d0, x9
str d0, [sp, #4680] ; 8-byte Folded Spill
fmul d5, d5, d0
fadd d5, d5, d30
fadd d6, d6, d15
mov x9, #4363988038922010624
fmov d0, x9
fmul d30, d5, d0
fmov d3, d0
str d0, [sp, #4648] ; 8-byte Folded Spill
fadd d0, d6, d30
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d1, x9
str d1, [sp, #5360] ; 8-byte Folded Spill
ldr d2, [sp, #8696] ; 8-byte Folded Reload
fmul d1, d2, d1
ldr q2, [sp, #11984] ; 16-byte Folded Reload
fmul d6, d2, d0
mov.16b v17, v2
fsub d1, d6, d1
mov x9, #4359484439294640128
fmov d2, x9
str d2, [sp, #4624] ; 8-byte Folded Spill
fmul d12, d24, d2
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #16296, lsl #48
fmov d2, x9
str d2, [sp, #2240] ; 8-byte Folded Spill
fmul d11, d11, d2
fsub d11, d12, d11
fmul d12, d15, d3
fadd d11, d11, d12
fadd d7, d11, d5
ldr q13, [sp, #11808] ; 16-byte Folded Reload
fmul d5, d13, d7
fadd d1, d5, d1
mov x9, #31036
movk x9, #52462, lsl #16
movk x9, #23267, lsl #32
movk x9, #16406, lsl #48
fmov d5, x9
fsub d1, d5, d1
mov x9, #2356
movk x9, #12413, lsl #16
movk x9, #55910, lsl #32
movk x9, #16327, lsl #48
fmov d3, x9
str d3, [sp, #5344] ; 8-byte Folded Spill
ldr d2, [sp, #9096] ; 8-byte Folded Reload
fmul d5, d2, d3
fsub d2, d1, d5
str q7, [sp, #5408] ; 16-byte Folded Spill
fmul d1, d17, d7
str q0, [sp, #5424] ; 16-byte Folded Spill
fmul d5, d13, d0
fsub d7, d1, d5
ldr d0, [sp, #8480] ; 8-byte Folded Reload
ldr d1, [sp, #9208] ; 8-byte Folded Reload
fadd d0, d0, d1
str d0, [sp, #8696] ; 8-byte Folded Spill
ldr d3, [sp, #12072] ; 8-byte Folded Reload
fmul d1, d0, d3
ldr d0, [sp, #8664] ; 8-byte Folded Reload
fsub d0, d0, d1
str d0, [sp, #2288] ; 8-byte Folded Spill
fmul d1, d3, d0
ldr d0, [sp, #8488] ; 8-byte Folded Reload
ldr d3, [sp, #9192] ; 8-byte Folded Reload
fadd d3, d0, d3
str d3, [sp, #8688] ; 8-byte Folded Spill
ldr d0, [sp, #11640] ; 8-byte Folded Reload
fmul d5, d3, d0
fadd d5, d5, d1
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
ldr q0, [sp, #11792] ; 16-byte Folded Reload
fmul d1, d0, d7
mov.16b v3, v0
fmov d0, x9
str d0, [sp, #9096] ; 8-byte Folded Spill
fmul d15, d5, d0
fsub d1, d1, d15
ldr q12, [sp, #11952] ; 16-byte Folded Reload
fmul d15, d12, d2
fsub d0, d15, d1
str q0, [sp, #4576] ; 16-byte Folded Spill
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d0, x9
str d0, [sp, #4664] ; 8-byte Folded Spill
fmul d5, d5, d0
stp q7, q2, [sp, #192] ; 32-byte Folded Spill
fmul d15, d12, d7
fsub d5, d15, d5
fmul d15, d3, d2
fadd d0, d5, d15
str q0, [sp, #6768] ; 16-byte Folded Spill
ldr d0, [sp, #10856] ; 8-byte Folded Reload
ldr d1, [sp, #6192] ; 8-byte Folded Reload
fmul d5, d0, d1
mov x9, #10523
movk x9, #38535, lsl #16
movk x9, #12921, lsl #32
movk x9, #49410, lsl #48
fmov d23, x9
fmul d5, d5, d23
ldr d0, [sp, #10864] ; 8-byte Folded Reload
ldr d1, [sp, #6200] ; 8-byte Folded Reload
fmul d7, d0, d1
fmov d1, #0.50000000
fmul d7, d7, d1
ldr d0, [sp, #10200] ; 8-byte Folded Reload
fmul d7, d0, d7
mov x9, #211106232532992
movk x9, #16498, lsl #48
fmov d15, x9
fmul d7, d7, d15
ldr d0, [sp, #10160] ; 8-byte Folded Reload
fmul d5, d0, d5
ldr d0, [sp, #10152] ; 8-byte Folded Reload
fdiv d5, d5, d0
ldr d0, [sp, #10168] ; 8-byte Folded Reload
fmul d5, d0, d5
fadd d5, d7, d5
fmov d17, d18
ldur d8, [x29, #-192] ; 8-byte Folded Reload
fmul d7, d8, d18
fmul d23, d16, d14
fadd d7, d7, d23
ldr d9, [sp, #4656] ; 8-byte Folded Reload
fmul d23, d4, d9
fadd d0, d23, d7
fadd d5, d5, d0
fmul d7, d0, d1
fmov d24, #0.50000000
fsub d23, d5, d7
ldr d1, [sp, #10400] ; 8-byte Folded Reload
ldr d2, [sp, #5312] ; 8-byte Folded Reload
fmul d5, d1, d2
ldr d1, [sp, #10832] ; 8-byte Folded Reload
ldr d2, [sp, #6424] ; 8-byte Folded Reload
fmul d7, d2, d1
fsub d3, d5, d7
ldur d6, [x29, #-248] ; 8-byte Folded Reload
ldr d21, [sp, #5296] ; 8-byte Folded Reload
fmul d5, d6, d21
fmul d7, d27, d3
fsub d7, d5, d7
fmul d5, d25, d3
ldr d1, [sp, #5816] ; 8-byte Folded Reload
fmul d20, d6, d1
fsub d18, d5, d20
fmul d5, d8, d7
ldr d1, [sp, #8848] ; 8-byte Folded Reload
fsub d5, d5, d1
fadd d5, d18, d5
fmul d20, d22, d29
fadd d5, d20, d5
fmul d20, d16, d7
ldr d1, [sp, #7184] ; 8-byte Folded Reload
fsub d20, d20, d1
ldr d1, [sp, #12144] ; 8-byte Folded Reload
fmul d25, d1, d18
fadd d20, d25, d20
fmul d25, d10, d29
fadd d20, d25, d20
ldr d1, [sp, #8840] ; 8-byte Folded Reload
fsub d1, d5, d1
ldr d2, [sp, #7136] ; 8-byte Folded Reload
fsub d11, d20, d2
fmul d5, d8, d1
fmov d22, d8
fmul d25, d16, d11
fadd d25, d5, d25
fmul d5, d4, d7
ldr d2, [sp, #7096] ; 8-byte Folded Reload
fsub d5, d5, d2
str d18, [sp, #504] ; 8-byte Folded Spill
ldr d2, [sp, #12152] ; 8-byte Folded Reload
fmul d8, d2, d18
fadd d5, d8, d5
fmul d29, d28, d29
fadd d5, d29, d5
ldr d2, [sp, #7072] ; 8-byte Folded Reload
fsub d20, d5, d2
fmul d29, d4, d20
fadd d25, d29, d25
ldr d4, [sp, #10840] ; 8-byte Folded Reload
ldr d2, [sp, #7984] ; 8-byte Folded Reload
fmul d29, d4, d2
mov x9, #18811
movk x9, #34700, lsl #16
movk x9, #61210, lsl #32
movk x9, #49411, lsl #48
fmov d8, x9
fmul d29, d29, d8
ldr d4, [sp, #10848] ; 8-byte Folded Reload
ldr d2, [sp, #9104] ; 8-byte Folded Reload
fmul d8, d4, d2
fmul d8, d8, d24
ldr d4, [sp, #10208] ; 8-byte Folded Reload
fmul d8, d4, d8
fmul d8, d8, d15
ldr d4, [sp, #10184] ; 8-byte Folded Reload
fmul d29, d4, d29
ldr d4, [sp, #10176] ; 8-byte Folded Reload
fdiv d29, d29, d4
ldr d4, [sp, #10192] ; 8-byte Folded Reload
fmul d29, d4, d29
fadd d29, d8, d29
fsub d0, d23, d0
fsub d0, d0, d25
fadd d29, d29, d25
fmul d25, d25, d24
fsub d25, d29, d25
fadd d29, d25, d0
ldr d5, [sp, #2256] ; 8-byte Folded Reload
fmul d0, d22, d5
fmov d16, #0.50000000
ldr d30, [sp, #2248] ; 8-byte Folded Reload
ldr d2, [sp, #11688] ; 8-byte Folded Reload
fmul d25, d2, d30
fadd d0, d0, d25
fmul d25, d31, d19
fmov d14, d19
fadd d25, d25, d0
ldr d0, [sp, #10136] ; 8-byte Folded Reload
ldr d2, [sp, #7992] ; 8-byte Folded Reload
fmul d0, d0, d2
fmul d0, d0, d16
ldr d2, [sp, #9680] ; 8-byte Folded Reload
fmul d0, d2, d0
fmul d0, d0, d15
ldr d2, [sp, #10144] ; 8-byte Folded Reload
ldr d4, [sp, #9112] ; 8-byte Folded Reload
fmul d8, d2, d4
mov x9, #45572
movk x9, #23979, lsl #16
movk x9, #34811, lsl #32
movk x9, #49413, lsl #48
fmov d15, x9
fmul d8, d8, d15
ldr d2, [sp, #9672] ; 8-byte Folded Reload
fmul d8, d2, d8
ldr d2, [sp, #9656] ; 8-byte Folded Reload
fdiv d8, d8, d2
ldr d2, [sp, #9664] ; 8-byte Folded Reload
fmul d8, d2, d8
fadd d0, d0, d8
fadd d0, d0, d25
fmul d8, d25, d16
fsub d15, d0, d8
ldr d0, [sp, #11728] ; 8-byte Folded Reload
ldr d31, [sp, #9120] ; 8-byte Folded Reload
fmul d0, d0, d31
ldr d2, [sp, #7216] ; 8-byte Folded Reload
fsub d0, d0, d2
ldr d4, [sp, #11360] ; 8-byte Folded Reload
fmul d8, d4, d17
fadd d0, d0, d8
ldr d2, [sp, #11560] ; 8-byte Folded Reload
fmul d8, d2, d7
fadd d0, d8, d0
ldr d2, [sp, #7200] ; 8-byte Folded Reload
fsub d0, d0, d2
ldr d17, [sp, #11272] ; 8-byte Folded Reload
fmul d1, d17, d1
fadd d0, d1, d0
ldr d10, [sp, #5768] ; 8-byte Folded Reload
fmul d1, d29, d10
fsub d0, d0, d1
ldr d1, [sp, #11920] ; 8-byte Folded Reload
ldr d24, [sp, #5744] ; 8-byte Folded Reload
fmul d1, d1, d24
ldr d22, [sp, #8712] ; 8-byte Folded Reload
ldr d2, [sp, #11168] ; 8-byte Folded Reload
fmul d8, d22, d2
fsub d1, d1, d8
ldr d2, [sp, #8704] ; 8-byte Folded Reload
fmul d16, d2, d27
ldr d2, [sp, #11512] ; 8-byte Folded Reload
fmul d8, d2, d16
fadd d1, d8, d1
ldr d2, [sp, #12288] ; 8-byte Folded Reload
ldr d18, [sp, #5792] ; 8-byte Folded Reload
fmul d18, d2, d18
fsub d1, d1, d18
ldr d19, [sp, #5808] ; 8-byte Folded Reload
ldr d2, [sp, #11720] ; 8-byte Folded Reload
fmul d18, d2, d19
fadd d1, d1, d18
ldr d2, [sp, #12168] ; 8-byte Folded Reload
fmul d18, d2, d21
fsub d1, d1, d18
ldr d2, [sp, #11696] ; 8-byte Folded Reload
fmul d18, d2, d3
fadd d1, d18, d1
ldr d18, [sp, #8864] ; 8-byte Folded Reload
fadd d1, d18, d1
ldr d2, [sp, #7224] ; 8-byte Folded Reload
fadd d1, d2, d1
ldr d2, [sp, #7208] ; 8-byte Folded Reload
fadd d1, d2, d1
str d1, [sp, #544] ; 8-byte Folded Spill
ldur d21, [x29, #-240] ; 8-byte Folded Reload
fmul d1, d21, d1
fadd d0, d1, d0
fmul d1, d6, d24
fmov d2, d22
ldr d6, [sp, #11312] ; 8-byte Folded Reload
fmul d18, d22, d6
fsub d1, d18, d1
str d16, [sp, #4632] ; 8-byte Folded Spill
fmul d18, d27, d16
fsub d1, d1, d18
ldr d2, [sp, #7024] ; 8-byte Folded Reload
fadd d0, d2, d0
ldr d2, [sp, #6984] ; 8-byte Folded Reload
fadd d0, d2, d0
ldr d16, [sp, #8888] ; 8-byte Folded Reload
fadd d1, d1, d16
ldr d2, [sp, #5784] ; 8-byte Folded Reload
fmul d18, d1, d2
fadd d0, d0, d18
ldr d8, [sp, #11504] ; 8-byte Folded Reload
ldr d2, [sp, #8176] ; 8-byte Folded Reload
fmul d18, d2, d8
ldr d2, [sp, #6896] ; 8-byte Folded Reload
fadd d22, d18, d2
fmov d2, d19
str d3, [sp, #2280] ; 8-byte Folded Spill
fadd d28, d19, d3
ldr d2, [sp, #8160] ; 8-byte Folded Reload
fmul d24, d2, d26
fadd d2, d24, d28
str d2, [sp, #2272] ; 8-byte Folded Spill
fmul d24, d26, d2
fadd d22, d24, d22
fmul d16, d22, d10
fadd d0, d0, d16
ldr d2, [sp, #6216] ; 8-byte Folded Reload
ldr d3, [sp, #11424] ; 8-byte Folded Reload
fmul d16, d2, d3
fadd d0, d16, d0
ldr d2, [sp, #6208] ; 8-byte Folded Reload
fmul d2, d2, d21
str d2, [sp, #2264] ; 8-byte Folded Spill
fmul d16, d26, d2
fsub d0, d0, d16
ldr d2, [sp, #11320] ; 8-byte Folded Reload
ldr d10, [sp, #4640] ; 8-byte Folded Reload
fmul d16, d2, d10
fsub d0, d0, d16
ldr d2, [sp, #6976] ; 8-byte Folded Reload
fsub d0, d0, d2
ldr d19, [sp, #10872] ; 8-byte Folded Reload
fmul d16, d19, d5
fadd d18, d16, d0
fsub d0, d29, d25
fadd d27, d0, d15
ldr d0, [sp, #4624] ; 8-byte Folded Reload
fmul d0, d18, d0
ldr d16, [sp, #2240] ; 8-byte Folded Reload
fmul d6, d27, d16
fsub d0, d0, d6
ldr d2, [sp, #5392] ; 8-byte Folded Reload
fmul d6, d29, d2
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #16223, lsl #48
fmov d24, x9
fmul d24, d22, d24
fadd d6, d6, d24
fmov d3, d31
ldr d2, [sp, #12096] ; 8-byte Folded Reload
fmul d24, d2, d31
ldr d2, [sp, #7168] ; 8-byte Folded Reload
fsub d24, d24, d2
fmul d25, d4, d9
fadd d24, d24, d25
ldr q25, [sp, #6768] ; 16-byte Folded Reload
ldr d2, [sp, #5264] ; 8-byte Folded Reload
fmul d23, d23, d2
fadd d23, d24, d23
ldr d2, [sp, #12088] ; 8-byte Folded Reload
fmul d24, d2, d7
fadd d23, d24, d23
ldr d2, [sp, #7192] ; 8-byte Folded Reload
fsub d23, d23, d2
fmul d5, d17, d20
fadd d5, d5, d23
ldr d2, [sp, #7160] ; 8-byte Folded Reload
fadd d5, d2, d5
ldr d2, [sp, #7120] ; 8-byte Folded Reload
fadd d5, d2, d5
ldr d2, [sp, #7088] ; 8-byte Folded Reload
fadd d5, d2, d5
ldr d2, [sp, #5280] ; 8-byte Folded Reload
fmul d21, d1, d2
fadd d5, d5, d21
ldr d2, [sp, #7104] ; 8-byte Folded Reload
fadd d5, d2, d5
ldr d2, [sp, #7056] ; 8-byte Folded Reload
fadd d23, d2, d5
ldr d24, [sp, #12312] ; 8-byte Folded Reload
fmul d5, d24, d23
fadd d5, d5, d6
ldr d2, [sp, #11880] ; 8-byte Folded Reload
fmul d6, d2, d31
ldr d2, [sp, #7040] ; 8-byte Folded Reload
fsub d6, d6, d2
ldr d2, [sp, #4672] ; 8-byte Folded Reload
fmul d21, d4, d2
fadd d6, d6, d21
ldr d2, [sp, #11872] ; 8-byte Folded Reload
fmul d7, d2, d7
fadd d6, d7, d6
ldr d2, [sp, #7016] ; 8-byte Folded Reload
fsub d6, d6, d2
fmul d7, d17, d11
fadd d6, d7, d6
ldr d2, [sp, #5776] ; 8-byte Folded Reload
fmul d1, d1, d2
ldr d2, [sp, #6848] ; 8-byte Folded Reload
fadd d6, d2, d6
ldr d2, [sp, #6960] ; 8-byte Folded Reload
fadd d6, d2, d6
ldr d2, [sp, #6928] ; 8-byte Folded Reload
fadd d6, d2, d6
fadd d1, d6, d1
ldr d2, [sp, #6944] ; 8-byte Folded Reload
fadd d1, d2, d1
ldr d2, [sp, #6880] ; 8-byte Folded Reload
fsub d4, d1, d2
ldr d3, [sp, #12304] ; 8-byte Folded Reload
fmul d1, d3, d4
fadd d1, d1, d5
ldr d2, [sp, #11528] ; 8-byte Folded Reload
fmov d17, d10
fmul d5, d2, d10
fsub d1, d1, d5
ldr d2, [sp, #6864] ; 8-byte Folded Reload
fsub d1, d1, d2
fmul d5, d19, d30
fadd d1, d5, d1
ldr d2, [sp, #9184] ; 8-byte Folded Reload
fmov d20, d8
fmul d5, d2, d8
ldr d2, [sp, #9176] ; 8-byte Folded Reload
fmul d2, d2, d26
str d2, [sp, #2248] ; 8-byte Folded Spill
fmul d6, d26, d2
fsub d5, d5, d6
ldr d2, [sp, #6912] ; 8-byte Folded Reload
fadd d5, d5, d2
ldr d2, [sp, #5376] ; 8-byte Folded Reload
fmul d6, d5, d2
fadd d1, d6, d1
ldr d21, [sp, #4648] ; 8-byte Folded Reload
fmul d6, d1, d21
fadd d0, d0, d6
ldr d2, [sp, #5328] ; 8-byte Folded Reload
fmul d6, d29, d2
fmul d7, d22, d2
fsub d6, d6, d7
str d23, [sp, #440] ; 8-byte Folded Spill
fmul d7, d3, d23
fadd d6, d7, d6
str d4, [sp, #408] ; 8-byte Folded Spill
fmul d7, d24, d4
fsub d6, d6, d7
ldr d2, [sp, #11432] ; 8-byte Folded Reload
fmul d7, d2, d10
fsub d6, d6, d7
ldr d2, [sp, #7048] ; 8-byte Folded Reload
fsub d6, d6, d2
fmul d7, d19, d14
fadd d6, d7, d6
ldr d2, [sp, #5248] ; 8-byte Folded Reload
fmul d7, d15, d2
ldur d17, [x29, #-256] ; 8-byte Folded Reload
fadd d6, d7, d6
ldr d2, [sp, #4680] ; 8-byte Folded Reload
fmul d3, d5, d2
fadd d3, d3, d6
ldr d2, [sp, #9200] ; 8-byte Folded Reload
fmul d5, d2, d8
ldr d2, [sp, #6992] ; 8-byte Folded Reload
fadd d5, d5, d2
ldr d2, [sp, #5800] ; 8-byte Folded Reload
fadd d7, d28, d2
ldr d2, [sp, #8168] ; 8-byte Folded Reload
fmul d6, d2, d26
fadd d2, d6, d7
str d2, [sp, #2256] ; 8-byte Folded Spill
fmul d6, d26, d2
ldr q26, [sp, #4576] ; 16-byte Folded Reload
ldr d20, [sp, #8688] ; 8-byte Folded Reload
ldr d19, [sp, #8696] ; 8-byte Folded Reload
fadd d5, d6, d5
fadd d0, d3, d0
fmul d4, d5, d16
fadd d6, d0, d4
ldr d0, [sp, #7976] ; 8-byte Folded Reload
fmul d0, d18, d0
ldr d2, [sp, #6736] ; 8-byte Folded Reload
fmul d4, d27, d2
fsub d0, d4, d0
fadd d0, d1, d0
fmul d1, d3, d21
fadd d0, d0, d1
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #16292, lsl #48
fmov d1, x9
fmul d1, d5, d1
fadd d3, d0, d1
ldr q2, [sp, #11984] ; 16-byte Folded Reload
fmul d0, d2, d6
fmul d1, d13, d3
fsub d5, d0, d1
ldr d18, [sp, #4664] ; 8-byte Folded Reload
fmul d0, d27, d18
fmul d1, d12, d5
fsub d0, d1, d0
ldr d16, [sp, #5360] ; 8-byte Folded Reload
fmul d1, d27, d16
str q3, [sp, #5376] ; 16-byte Folded Spill
fmul d3, d2, d3
fsub d1, d3, d1
str q6, [sp, #5392] ; 16-byte Folded Spill
fmul d3, d13, d6
fadd d1, d3, d1
ldr d2, [sp, #9208] ; 8-byte Folded Reload
ldr d3, [sp, #11520] ; 8-byte Folded Reload
fmul d3, d2, d3
ldr d2, [sp, #8680] ; 8-byte Folded Reload
fadd d3, d3, d2
ldr d6, [sp, #12048] ; 8-byte Folded Reload
ldr d2, [sp, #9192] ; 8-byte Folded Reload
fmul d4, d2, d6
fadd d2, d4, d7
str d2, [sp, #2240] ; 8-byte Folded Spill
fmul d4, d6, d2
fadd d3, d4, d3
fmul d3, d3, d16
fadd d1, d1, d3
ldr d2, [sp, #8672] ; 8-byte Folded Reload
ldr d3, [sp, #5344] ; 8-byte Folded Reload
fmul d3, d2, d3
fsub d4, d1, d3
ldr q6, [sp, #11792] ; 16-byte Folded Reload
fmul d1, d6, d4
fsub d0, d0, d1
ldr d3, [sp, #12072] ; 8-byte Folded Reload
fmul d1, d20, d3
fadd d2, d1, d7
ldr d1, [sp, #11640] ; 8-byte Folded Reload
fmul d1, d19, d1
str d2, [sp, #2232] ; 8-byte Folded Spill
fmul d3, d3, d2
fadd d1, d1, d3
fmul d2, d1, d18
fadd d21, d0, d2
ldr d3, [sp, #9096] ; 8-byte Folded Reload
fmul d0, d27, d3
stp q4, q5, [sp, #112] ; 32-byte Folded Spill
fmul d2, d6, d5
fsub d0, d2, d0
fmul d2, d12, d4
fadd d0, d0, d2
fmul d1, d1, d3
fadd d22, d0, d1
fmul d0, d17, d19
ldr d1, [sp, #8664] ; 8-byte Folded Reload
fsub d24, d1, d0
fmul d0, d17, d20
str d7, [sp, #16] ; 8-byte Folded Spill
fadd d23, d0, d7
cbz x8, LBB19_48
; %bb.47:
ldr q18, [sp, #10720] ; 16-byte Folded Reload
fmul d0, d18, d26
ldr q16, [sp, #11216] ; 16-byte Folded Reload
fmul d1, d16, d25
fmul d2, d17, d24
ldr d6, [sp, #11248] ; 8-byte Folded Reload
fmul d3, d6, d20
fadd d2, d3, d2
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d3, x9
fmul d4, d2, d3
fsub d1, d1, d4
fsub d0, d0, d1
fadd d0, d0, d0
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d1, x9
fmul d4, d27, d1
fmul d5, d18, d21
fsub d4, d5, d4
fmul d5, d16, d22
fsub d4, d4, d5
fmul d5, d6, d19
fmul d6, d17, d23
fadd d5, d5, d6
fmul d6, d5, d1
fadd d4, d4, d6
fadd d4, d4, d4
fadd d0, d0, d4
ldr d17, [sp, #11400] ; 8-byte Folded Reload
fmul d4, d17, d0
fmul d6, d27, d3
fmul d7, d16, d21
fsub d6, d7, d6
fmul d7, d18, d22
fadd d6, d7, d6
fmul d3, d5, d3
fadd d3, d6, d3
fmul d5, d18, d25
fmul d1, d2, d1
fsub d1, d5, d1
fmul d2, d16, d26
fadd d1, d2, d1
fadd d1, d1, d3
ldr d2, [sp, #11160] ; 8-byte Folded Reload
fmul d2, d2, d1
fsub d2, d4, d2
fmul d2, d17, d2
fmov d5, #0.50000000
fmul d2, d2, d5
ldr d3, [sp, #11392] ; 8-byte Folded Reload
fmul d0, d3, d0
ldr d4, [sp, #11240] ; 8-byte Folded Reload
fmul d1, d4, d1
fadd d0, d1, d0
fmul d0, d3, d0
fmul d0, d0, d5
fsub d0, d2, d0
str d0, [x8, #40]
LBB19_48:
stp d24, d23, [sp] ; 16-byte Folded Spill
str d27, [sp, #24] ; 8-byte Folded Spill
stp q22, q21, [sp, #32] ; 32-byte Folded Spill
ldr d27, [sp, #11296] ; 8-byte Folded Reload
ldr d0, [sp, #12272] ; 8-byte Folded Reload
fsub d1, d0, d27
ldr d0, [sp, #12120] ; 8-byte Folded Reload
fadd d17, d1, d0
ldr d26, [sp, #12032] ; 8-byte Folded Reload
ldr d0, [sp, #12344] ; 8-byte Folded Reload
fadd d21, d26, d0
ldr d0, [sp, #12128] ; 8-byte Folded Reload
fadd d22, d21, d0
ldr d13, [sp, #11752] ; 8-byte Folded Reload
fadd d2, d22, d13
ldr d0, [sp, #9928] ; 8-byte Folded Reload
fdiv d0, d2, d0
ldr d3, [sp, #9904] ; 8-byte Folded Reload
fmul d16, d3, d0
ldr d3, [sp, #9896] ; 8-byte Folded Reload
fmul d3, d16, d3
fadd d3, d17, d3
ldr d4, [sp, #12184] ; 8-byte Folded Reload
fadd d3, d4, d3
ldr d4, [sp, #10480] ; 8-byte Folded Reload
fmul d4, d4, d3
fmov d31, #0.50000000
fmul d4, d4, d31
ldr d5, [sp, #10904] ; 8-byte Folded Reload
fmul d4, d5, d4
mov x9, #4632233691727265792
fmov d23, x9
fmul d4, d4, d23
ldr d5, [sp, #10896] ; 8-byte Folded Reload
fmul d5, d5, d3
str d5, [sp, #2160] ; 8-byte Folded Spill
ldr d3, [sp, #10544] ; 8-byte Folded Reload
fmul d5, d3, d5
str d5, [sp, #2152] ; 8-byte Folded Spill
ldr d3, [sp, #10568] ; 8-byte Folded Reload
fmul d3, d3, d5
fmov d18, #3.00000000
fmul d3, d3, d18
fadd d10, d4, d3
ldr d29, [sp, #11672] ; 8-byte Folded Reload
fadd d5, d22, d29
ldr d3, [sp, #10496] ; 8-byte Folded Reload
fdiv d4, d5, d3
str d4, [sp, #4624] ; 8-byte Folded Spill
ldr d3, [sp, #10424] ; 8-byte Folded Reload
fmul d20, d3, d4
ldr d3, [sp, #9880] ; 8-byte Folded Reload
fmul d3, d20, d3
fadd d3, d17, d3
ldr d4, [sp, #12176] ; 8-byte Folded Reload
fadd d3, d4, d3
ldr d4, [sp, #10432] ; 8-byte Folded Reload
fmul d4, d4, d3
fmul d4, d4, d31
ldr d7, [sp, #10888] ; 8-byte Folded Reload
fmul d4, d7, d4
fmul d4, d4, d23
ldr d7, [sp, #10880] ; 8-byte Folded Reload
fmul d6, d7, d3
str d6, [sp, #5328] ; 8-byte Folded Spill
ldr d3, [sp, #10440] ; 8-byte Folded Reload
fmul d6, d3, d6
str d6, [sp, #5280] ; 8-byte Folded Spill
ldr d3, [sp, #10448] ; 8-byte Folded Reload
fmul d3, d3, d6
fmul d3, d3, d18
fadd d4, d4, d3
ldr d3, [sp, #11208] ; 8-byte Folded Reload
fadd d6, d21, d3
str d6, [sp, #5360] ; 8-byte Folded Spill
ldr d3, [sp, #9064] ; 8-byte Folded Reload
fdiv d3, d6, d3
str d3, [sp, #5776] ; 8-byte Folded Spill
ldr d7, [sp, #9080] ; 8-byte Folded Reload
fmul d3, d7, d3
str d3, [sp, #5344] ; 8-byte Folded Spill
ldr d24, [sp, #9056] ; 8-byte Folded Reload
fmul d24, d3, d24
fadd d24, d1, d24
ldr d25, [sp, #11200] ; 8-byte Folded Reload
fadd d24, d25, d24
ldr d25, [sp, #9816] ; 8-byte Folded Reload
fmul d25, d24, d25
fmul d25, d25, d31
ldr d30, [sp, #10368] ; 8-byte Folded Reload
fmul d25, d30, d25
fmul d23, d25, d23
ldr d25, [sp, #10360] ; 8-byte Folded Reload
fmul d3, d24, d25
str d3, [sp, #5312] ; 8-byte Folded Spill
ldr d24, [sp, #9824] ; 8-byte Folded Reload
fmul d3, d24, d3
str d3, [sp, #5744] ; 8-byte Folded Spill
ldr d24, [sp, #9832] ; 8-byte Folded Reload
fmul d24, d24, d3
fmul d24, d24, d18
fadd d11, d24, d23
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d23, x9
fmul d3, d22, d23
ldr d22, [sp, #10088] ; 8-byte Folded Reload
fsub d28, d22, d3
mov x9, #50080
movk x9, #49599, lsl #16
movk x9, #32579, lsl #32
movk x9, #16368, lsl #48
fmov d22, x9
fmul d6, d21, d22
str d6, [sp, #9096] ; 8-byte Folded Spill
fsub d21, d28, d6
str d28, [sp, #7992] ; 8-byte Folded Spill
ldr d24, [sp, #9488] ; 8-byte Folded Reload
fadd d6, d24, d21
str d6, [sp, #9112] ; 8-byte Folded Spill
mov x9, #6432
movk x9, #24166, lsl #16
movk x9, #7623, lsl #32
movk x9, #16309, lsl #48
fmov d24, x9
fmul d21, d26, d24
fsub d6, d6, d21
str d6, [sp, #9120] ; 8-byte Folded Spill
fadd d21, d10, d4
fadd d7, d21, d11
str d7, [sp, #6424] ; 8-byte Folded Spill
ldr d30, [sp, #12056] ; 8-byte Folded Reload
fmul d25, d6, d30
fsub d18, d7, d25
str d18, [sp, #2208] ; 8-byte Folded Spill
fmul d25, d30, d18
ldr d26, [sp, #9088] ; 8-byte Folded Reload
fadd d25, d26, d25
fmul d18, d1, d22
str d18, [sp, #9088] ; 8-byte Folded Spill
fmul d7, d17, d23
ldr d1, [sp, #10080] ; 8-byte Folded Reload
fadd d26, d7, d1
fadd d1, d18, d26
str d26, [sp, #7976] ; 8-byte Folded Spill
ldr d17, [sp, #9496] ; 8-byte Folded Reload
fadd d9, d17, d1
fmul d1, d27, d24
fsub d1, d9, d1
str d9, [sp, #7984] ; 8-byte Folded Spill
str d1, [sp, #9104] ; 8-byte Folded Spill
ldr d17, [sp, #11584] ; 8-byte Folded Reload
fmul d1, d1, d17
fadd d1, d1, d25
str d1, [sp, #6736] ; 8-byte Folded Spill
ldr d17, [sp, #10624] ; 8-byte Folded Reload
fmul d2, d2, d17
ldr d17, [sp, #10912] ; 8-byte Folded Reload
fmul d16, d16, d17
fmul d17, d16, d31
fsub d2, d2, d17
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16361, lsl #48
fmov d18, x9
fmul d16, d16, d18
ldr d22, [sp, #9912] ; 8-byte Folded Reload
fmul d16, d16, d22
fmov d1, #5.00000000
fmul d16, d16, d1
fmov d31, #5.00000000
fsub d2, d2, d16
ldr d16, [sp, #10608] ; 8-byte Folded Reload
fdiv d22, d2, d16
fmov d30, #0.50000000
ldur d6, [x29, #-184] ; 8-byte Folded Reload
fmul d2, d6, d10
ldr d16, [sp, #10616] ; 8-byte Folded Reload
fmul d24, d16, d22
ldr d19, [sp, #11888] ; 8-byte Folded Reload
fmul d23, d19, d24
fsub d17, d2, d23
ldr d16, [sp, #10600] ; 8-byte Folded Reload
fmul d22, d16, d22
ldr d16, [sp, #9920] ; 8-byte Folded Reload
fmul d0, d0, d16
fsub d1, d22, d0
str d1, [sp, #5784] ; 8-byte Folded Spill
ldr d16, [sp, #12320] ; 8-byte Folded Reload
fmul d0, d16, d24
fmov d25, d24
str d24, [sp, #2144] ; 8-byte Folded Spill
fmul d22, d6, d1
fsub d0, d0, d22
str d0, [sp, #5768] ; 8-byte Folded Spill
fmul d22, d19, d1
fmov d24, d19
fmul d23, d16, d10
fmov d1, d16
fsub d19, d22, d23
ldp d2, d27, [x29, #-176] ; 16-byte Folded Reload
fmul d22, d2, d0
fmov d15, d2
ldr d0, [sp, #9288] ; 8-byte Folded Reload
fsub d22, d22, d0
fadd d22, d19, d22
fmul d23, d27, d17
fadd d22, d23, d22
ldr d0, [sp, #11904] ; 8-byte Folded Reload
fmul d23, d0, d17
ldr d0, [sp, #8184] ; 8-byte Folded Reload
fsub d23, d23, d0
ldr d0, [sp, #9280] ; 8-byte Folded Reload
fsub d0, d22, d0
str d0, [sp, #4680] ; 8-byte Folded Spill
ldr d12, [sp, #11376] ; 8-byte Folded Reload
fmul d22, d12, d0
fadd d22, d23, d22
ldr d16, [sp, #10584] ; 8-byte Folded Reload
fmul d5, d5, d16
ldr d0, [sp, #11104] ; 8-byte Folded Reload
fmul d20, d20, d0
fmul d23, d20, d30
fsub d5, d5, d23
fmul d20, d20, d18
ldr d0, [sp, #9888] ; 8-byte Folded Reload
fmul d20, d20, d0
fmul d20, d20, d31
fsub d5, d5, d20
ldr d16, [sp, #10576] ; 8-byte Folded Reload
fdiv d0, d5, d16
str d0, [sp, #2056] ; 8-byte Folded Spill
str d4, [sp, #5792] ; 8-byte Folded Spill
fmov d31, d6
fmul d5, d6, d4
ldr d16, [sp, #10592] ; 8-byte Folded Reload
fmul d0, d16, d0
fmul d23, d24, d0
fmov d30, d0
str d0, [sp, #2016] ; 8-byte Folded Spill
fsub d14, d5, d23
ldr d0, [sp, #11664] ; 8-byte Folded Reload
fmul d5, d0, d14
fadd d5, d5, d22
ldr d0, [sp, #8152] ; 8-byte Folded Reload
fsub d5, d5, d0
mov x9, #51491
movk x9, #54360, lsl #16
movk x9, #13074, lsl #32
movk x9, #49054, lsl #48
fmov d22, x9
fadd d5, d5, d22
fmov d2, d3
fmul d16, d3, d6
ldr d6, [sp, #11568] ; 8-byte Folded Reload
fmul d22, d6, d16
ldr d0, [sp, #11176] ; 8-byte Folded Reload
fmul d23, d7, d0
fadd d23, d22, d23
fmul d22, d3, d24
fmov d8, d24
str d3, [sp, #8680] ; 8-byte Folded Spill
fmul d24, d7, d1
fadd d0, d22, d24
ldr d20, [sp, #11944] ; 8-byte Folded Reload
fmul d24, d20, d0
fmov d22, d0
str d0, [sp, #2112] ; 8-byte Folded Spill
fadd d23, d24, d23
fmul d24, d13, d25
fsub d23, d24, d23
str d10, [sp, #2224] ; 8-byte Folded Spill
ldr d0, [sp, #11736] ; 8-byte Folded Reload
fmul d24, d0, d10
fsub d23, d23, d24
fmul d24, d29, d30
fadd d23, d24, d23
ldr d0, [sp, #11648] ; 8-byte Folded Reload
fmul d24, d0, d4
fsub d23, d23, d24
ldr d0, [sp, #8144] ; 8-byte Folded Reload
fadd d23, d0, d23
ldr d0, [sp, #9264] ; 8-byte Folded Reload
fadd d23, d0, d23
ldr d0, [sp, #8136] ; 8-byte Folded Reload
fadd d0, d0, d23
ldr d3, [sp, #12280] ; 8-byte Folded Reload
str d0, [sp, #640] ; 8-byte Folded Spill
fmul d23, d3, d0
fadd d5, d23, d5
ldr d30, [sp, #11368] ; 8-byte Folded Reload
fmul d23, d28, d30
fsub d0, d21, d23
str d0, [sp, #2200] ; 8-byte Folded Spill
fmul d21, d30, d0
ldr d0, [sp, #9168] ; 8-byte Folded Reload
fadd d21, d21, d0
ldr d20, [sp, #11536] ; 8-byte Folded Reload
fmul d23, d26, d20
fadd d21, d23, d21
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d0, x9
str d0, [sp, #2120] ; 8-byte Folded Spill
fmul d24, d21, d0
fadd d24, d5, d24
str d16, [sp, #4664] ; 8-byte Folded Spill
fmul d5, d31, d16
fmul d25, d8, d22
fadd d5, d5, d25
str d7, [sp, #8672] ; 8-byte Folded Spill
ldr d0, [sp, #11328] ; 8-byte Folded Reload
fmul d25, d7, d0
fadd d5, d5, d25
ldr d0, [sp, #9272] ; 8-byte Folded Reload
fadd d16, d0, d5
mov x9, #54125
movk x9, #53060, lsl #16
movk x9, #15481, lsl #32
movk x9, #16273, lsl #48
fmov d0, x9
str d0, [sp, #2136] ; 8-byte Folded Spill
fmul d25, d16, d0
fsub d24, d24, d25
fmul d25, d2, d6
ldr d0, [sp, #9256] ; 8-byte Folded Reload
fsub d0, d0, d25
str d0, [sp, #6200] ; 8-byte Folded Spill
fmul d0, d0, d3
str d0, [sp, #2192] ; 8-byte Folded Spill
fmul d25, d0, d30
fadd d24, d25, d24
fmul d25, d7, d6
ldr d0, [sp, #9248] ; 8-byte Folded Reload
fsub d0, d0, d25
str d0, [sp, #6192] ; 8-byte Folded Spill
ldr d1, [sp, #11440] ; 8-byte Folded Reload
fmul d25, d0, d1
fadd d24, d25, d24
str d11, [sp, #2216] ; 8-byte Folded Spill
fmul d28, d30, d11
ldr d0, [sp, #11352] ; 8-byte Folded Reload
fmul d25, d0, d28
fadd d24, d25, d24
ldr d0, [sp, #9952] ; 8-byte Folded Reload
ldr d1, [sp, #5360] ; 8-byte Folded Reload
fmul d4, d1, d0
ldr d0, [sp, #9024] ; 8-byte Folded Reload
ldr d1, [sp, #5344] ; 8-byte Folded Reload
fmul d7, d1, d0
fmov d0, #0.50000000
fmul d25, d7, d0
fsub d4, d4, d25
fmul d7, d7, d18
ldr d0, [sp, #9008] ; 8-byte Folded Reload
fmul d7, d7, d0
fmov d0, #5.00000000
fmul d7, d7, d0
fsub d4, d4, d7
ldr d0, [sp, #9944] ; 8-byte Folded Reload
fdiv d4, d4, d0
ldr d0, [sp, #9936] ; 8-byte Folded Reload
fmul d4, d0, d4
ldr d0, [sp, #9048] ; 8-byte Folded Reload
ldr d1, [sp, #5776] ; 8-byte Folded Reload
fmul d3, d1, d0
fsub d0, d4, d3
str d0, [sp, #5776] ; 8-byte Folded Spill
fmul d0, d30, d0
fmul d3, d15, d0
fmov d13, d0
ldr d0, [sp, #7968] ; 8-byte Folded Reload
fadd d3, d0, d3
fmov d6, d27
fmul d4, d27, d28
fsub d3, d4, d3
ldr d0, [sp, #8128] ; 8-byte Folded Reload
fsub d4, d24, d0
ldr d0, [sp, #7960] ; 8-byte Folded Reload
fsub d0, d3, d0
str d0, [sp, #1952] ; 8-byte Folded Spill
ldr d18, [sp, #11128] ; 8-byte Folded Reload
fmul d3, d18, d0
fadd d10, d4, d3
ldr d0, [sp, #9112] ; 8-byte Folded Reload
fmul d3, d0, d30
ldr d27, [sp, #6424] ; 8-byte Folded Reload
fsub d3, d27, d3
str d3, [sp, #2184] ; 8-byte Folded Spill
fmul d3, d30, d3
ldr d4, [sp, #9160] ; 8-byte Folded Reload
fadd d3, d4, d3
fmul d4, d9, d20
fadd d11, d4, d3
mov x9, #4354980839667269632
mov x10, #47272
movk x10, #56762, lsl #16
movk x10, #43178, lsl #32
movk x10, #16292, lsl #48
fmov d3, x9
str d3, [sp, #5264] ; 8-byte Folded Spill
fmul d3, d10, d3
fmov d4, x10
str d4, [sp, #5248] ; 8-byte Folded Spill
fmul d7, d11, d4
fsub d7, d3, d7
ldr d8, [sp, #11864] ; 8-byte Folded Reload
ldr d2, [sp, #5768] ; 8-byte Folded Reload
fmul d3, d8, d2
ldr d4, [sp, #7920] ; 8-byte Folded Reload
fsub d3, d3, d4
ldr d0, [sp, #12200] ; 8-byte Folded Reload
fmul d24, d0, d19
fadd d3, d24, d3
ldur d5, [x29, #-216] ; 8-byte Folded Reload
fmul d24, d5, d17
fadd d3, d24, d3
ldr d1, [sp, #11976] ; 8-byte Folded Reload
fmul d24, d1, d17
ldr d4, [sp, #8120] ; 8-byte Folded Reload
fsub d24, d24, d4
ldr d4, [sp, #7912] ; 8-byte Folded Reload
fsub d0, d3, d4
fmul d3, d12, d0
fadd d3, d24, d3
ldr d1, [sp, #12064] ; 8-byte Folded Reload
fmul d24, d1, d14
fadd d3, d24, d3
ldr d4, [sp, #8112] ; 8-byte Folded Reload
fsub d3, d3, d4
mov x9, #46543
movk x9, #48510, lsl #16
movk x9, #46414, lsl #32
movk x9, #16260, lsl #48
fmov d24, x9
fadd d3, d3, d24
ldr d4, [sp, #8096] ; 8-byte Folded Reload
fadd d3, d4, d3
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d1, x9
str d1, [sp, #2128] ; 8-byte Folded Spill
fmul d25, d16, d1
fadd d3, d3, d25
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
ldr d4, [sp, #8080] ; 8-byte Folded Reload
fadd d3, d4, d3
ldr d4, [sp, #8072] ; 8-byte Folded Reload
fadd d23, d4, d3
fmov d1, x9
str d1, [sp, #2104] ; 8-byte Folded Spill
fmul d3, d21, d1
ldur d1, [x29, #-208] ; 8-byte Folded Reload
fmul d25, d1, d23
fmov d9, d1
fadd d3, d25, d3
ldr d26, [sp, #12016] ; 8-byte Folded Reload
fmul d25, d26, d2
ldr d4, [sp, #7904] ; 8-byte Folded Reload
fsub d25, d25, d4
str d19, [sp, #552] ; 8-byte Folded Spill
ldr d1, [sp, #12160] ; 8-byte Folded Reload
fmul d29, d1, d19
fadd d25, d29, d25
ldr d15, [sp, #12336] ; 8-byte Folded Reload
fmul d29, d15, d17
fadd d25, d29, d25
ldr d1, [sp, #12192] ; 8-byte Folded Reload
fmul d2, d1, d17
ldr d4, [sp, #8064] ; 8-byte Folded Reload
fsub d2, d2, d4
ldr d4, [sp, #7888] ; 8-byte Folded Reload
fsub d1, d25, d4
str d1, [sp, #4640] ; 8-byte Folded Spill
fmul d25, d12, d1
fadd d2, d2, d25
ldr d22, [sp, #4680] ; 8-byte Folded Reload
fmul d25, d6, d22
fmul d29, d5, d0
fmov d12, d0
str d0, [sp, #4648] ; 8-byte Folded Spill
fadd d25, d25, d29
fmul d29, d15, d1
fadd d25, d29, d25
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d0, x9
str d0, [sp, #2048] ; 8-byte Folded Spill
fmul d25, d25, d0
fadd d2, d2, d25
ldr d0, [sp, #12104] ; 8-byte Folded Reload
fmul d25, d0, d14
fadd d2, d25, d2
ldr d4, [sp, #8056] ; 8-byte Folded Reload
fsub d2, d2, d4
mov x9, #57269
movk x9, #60105, lsl #16
movk x9, #55991, lsl #32
movk x9, #16301, lsl #48
fmov d25, x9
fadd d2, d2, d25
ldr d4, [sp, #8032] ; 8-byte Folded Reload
fadd d2, d4, d2
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d0, x9
str d0, [sp, #2040] ; 8-byte Folded Spill
fmul d5, d16, d0
fadd d2, d2, d5
ldr d4, [sp, #8048] ; 8-byte Folded Reload
fsub d2, d2, d4
ldr d4, [sp, #8040] ; 8-byte Folded Reload
fadd d1, d4, d2
ldur d0, [x29, #-200] ; 8-byte Folded Reload
fmul d2, d0, d1
fadd d2, d2, d3
ldr d3, [sp, #11600] ; 8-byte Folded Reload
fmul d3, d3, d28
fadd d2, d3, d2
str d13, [sp, #1984] ; 8-byte Folded Spill
ldr d3, [sp, #12136] ; 8-byte Folded Reload
fmul d3, d3, d13
ldr d4, [sp, #7952] ; 8-byte Folded Reload
fadd d3, d4, d3
ldr d24, [sp, #11496] ; 8-byte Folded Reload
fmul d5, d24, d28
fsub d3, d5, d3
ldr d4, [sp, #8024] ; 8-byte Folded Reload
fsub d2, d2, d4
ldr d4, [sp, #7944] ; 8-byte Folded Reload
fsub d4, d3, d4
fmul d3, d18, d4
fmov d17, d4
fadd d2, d2, d3
mov x9, #26288
movk x9, #13902, lsl #16
movk x9, #44107, lsl #32
movk x9, #16338, lsl #48
fmov d3, x9
fadd d2, d2, d3
ldr d3, [sp, #9096] ; 8-byte Folded Reload
fmul d3, d3, d30
str d3, [sp, #2176] ; 8-byte Folded Spill
fmul d3, d30, d3
ldr d4, [sp, #9088] ; 8-byte Folded Reload
fmul d5, d4, d20
fadd d3, d3, d5
ldr d4, [sp, #8016] ; 8-byte Folded Reload
fadd d5, d3, d4
mov x9, #45974
movk x9, #34787, lsl #16
movk x9, #35902, lsl #32
movk x9, #16285, lsl #48
fmov d3, x9
str d3, [sp, #2088] ; 8-byte Folded Spill
fmul d3, d5, d3
fadd d25, d3, d2
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d2, x9
str d2, [sp, #2072] ; 8-byte Folded Spill
fmul d2, d21, d2
str d23, [sp, #520] ; 8-byte Folded Spill
fmul d3, d0, d23
fsub d2, d2, d3
str d1, [sp, #512] ; 8-byte Folded Spill
fmul d3, d9, d1
fadd d2, d3, d2
ldr d0, [sp, #11464] ; 8-byte Folded Reload
fmul d3, d0, d28
fadd d2, d3, d2
ldr d4, [sp, #11760] ; 8-byte Folded Reload
fmul d3, d4, d28
ldr d0, [sp, #11896] ; 8-byte Folded Reload
fmul d21, d0, d13
ldr d28, [sp, #7936] ; 8-byte Folded Reload
fadd d21, d28, d21
fsub d3, d3, d21
ldr d21, [sp, #8008] ; 8-byte Folded Reload
fsub d2, d2, d21
ldr d21, [sp, #7928] ; 8-byte Folded Reload
fsub d23, d3, d21
fmul d3, d18, d23
fadd d2, d2, d3
ldr d9, [sp, #1952] ; 8-byte Folded Reload
fmul d3, d6, d9
fmov d16, d6
fmul d21, d24, d17
fmov d20, d17
str d17, [sp, #1960] ; 8-byte Folded Spill
fadd d3, d3, d21
fmul d21, d4, d23
fadd d21, d21, d3
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d0, x9
str d0, [sp, #2032] ; 8-byte Folded Spill
fmul d21, d21, d0
fadd d2, d2, d21
mov x9, #21969
movk x9, #1325, lsl #16
movk x9, #7976, lsl #32
movk x9, #16367, lsl #48
fmov d21, x9
fadd d21, d2, d21
mov x9, #49235
movk x9, #28989, lsl #16
movk x9, #40841, lsl #32
movk x9, #16312, lsl #48
fmov d0, x9
str d0, [sp, #2024] ; 8-byte Folded Spill
fmul d5, d5, d0
fadd d5, d5, d21
fadd d7, d7, d25
mov x9, #4363988038922010624
fmov d0, x9
fmul d21, d5, d0
fmov d3, d0
str d0, [sp, #1992] ; 8-byte Folded Spill
fadd d2, d7, d21
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d0, x9
str d0, [sp, #2096] ; 8-byte Folded Spill
ldr d1, [sp, #6736] ; 8-byte Folded Reload
fmul d1, d1, d0
ldr q0, [sp, #12000] ; 16-byte Folded Reload
fmul d7, d0, d2
mov.16b v6, v0
fsub d1, d7, d1
mov x9, #4359484439294640128
fmov d0, x9
str d0, [sp, #1976] ; 8-byte Folded Spill
fmul d10, d10, d0
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #49064, lsl #48
fmov d0, x9
str d0, [sp, #1968] ; 8-byte Folded Spill
fmul d11, d11, d0
fsub d10, d11, d10
fmul d25, d25, d3
fadd d25, d10, d25
fadd d3, d25, d5
ldr q0, [sp, #11840] ; 16-byte Folded Reload
fmul d5, d0, d3
mov.16b v7, v0
fadd d1, d5, d1
mov x9, #31036
movk x9, #52462, lsl #16
movk x9, #23267, lsl #32
movk x9, #16406, lsl #48
fmov d5, x9
fsub d1, d5, d1
mov x9, #2356
movk x9, #12413, lsl #16
movk x9, #55910, lsl #32
movk x9, #16327, lsl #48
fmov d4, x9
str d4, [sp, #2080] ; 8-byte Folded Spill
ldr d0, [sp, #6752] ; 8-byte Folded Reload
fmul d5, d0, d4
fsub d0, d1, d5
str q3, [sp, #5344] ; 16-byte Folded Spill
fmul d1, d6, d3
str q2, [sp, #5360] ; 16-byte Folded Spill
fmul d5, d7, d2
fsub d24, d1, d5
ldr d1, [sp, #8560] ; 8-byte Folded Reload
ldr d2, [sp, #9120] ; 8-byte Folded Reload
fadd d11, d1, d2
ldr d2, [sp, #12208] ; 8-byte Folded Reload
fmul d1, d11, d2
fsub d1, d27, d1
str d1, [sp, #2168] ; 8-byte Folded Spill
fmul d1, d2, d1
ldr d4, [sp, #8568] ; 8-byte Folded Reload
ldr d2, [sp, #9104] ; 8-byte Folded Reload
fadd d21, d4, d2
ldr d2, [sp, #11784] ; 8-byte Folded Reload
fmul d5, d21, d2
fadd d5, d5, d1
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
ldr q2, [sp, #11824] ; 16-byte Folded Reload
fmul d1, d2, d24
mov.16b v3, v2
fmov d2, x9
str d2, [sp, #2064] ; 8-byte Folded Spill
fmul d25, d5, d2
fsub d1, d1, d25
ldr q2, [sp, #12240] ; 16-byte Folded Reload
fmul d25, d2, d0
fsub d1, d25, d1
str q1, [sp, #6752] ; 16-byte Folded Spill
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d1, x9
str d1, [sp, #2000] ; 8-byte Folded Spill
fmul d5, d5, d1
str q24, [sp, #80] ; 16-byte Folded Spill
fmul d25, d2, d24
fsub d5, d25, d5
str q0, [sp, #5296] ; 16-byte Folded Spill
fmul d25, d3, d0
fadd d0, d5, d25
str q0, [sp, #6736] ; 16-byte Folded Spill
ldr d0, [sp, #10960] ; 8-byte Folded Reload
ldr d1, [sp, #2152] ; 8-byte Folded Reload
fmul d5, d0, d1
mov x9, #10523
movk x9, #38535, lsl #16
movk x9, #12921, lsl #32
movk x9, #49410, lsl #48
fmov d6, x9
fmul d6, d5, d6
ldr d0, [sp, #10976] ; 8-byte Folded Reload
ldr d1, [sp, #2160] ; 8-byte Folded Reload
fmul d5, d0, d1
fmov d1, #0.50000000
fmul d5, d5, d1
ldr d0, [sp, #10320] ; 8-byte Folded Reload
fmul d19, d0, d5
mov x9, #211106232532992
movk x9, #16498, lsl #48
fmov d5, x9
fmul d19, d19, d5
ldr d0, [sp, #10344] ; 8-byte Folded Reload
fmul d6, d0, d6
ldr d0, [sp, #10312] ; 8-byte Folded Reload
fdiv d6, d6, d0
ldr d0, [sp, #10296] ; 8-byte Folded Reload
fmul d6, d0, d6
fadd d6, d19, d6
ldur d3, [x29, #-176] ; 8-byte Folded Reload
fmul d19, d3, d22
fmul d25, d8, d12
fadd d19, d19, d25
ldr d10, [sp, #4640] ; 8-byte Folded Reload
fmul d25, d26, d10
fadd d25, d25, d19
fadd d6, d6, d25
fmul d19, d25, d1
fsub d29, d6, d19
ldr d0, [sp, #10392] ; 8-byte Folded Reload
ldr d1, [sp, #2056] ; 8-byte Folded Reload
fmul d6, d0, d1
ldr d0, [sp, #10936] ; 8-byte Folded Reload
ldr d1, [sp, #4624] ; 8-byte Folded Reload
fmul d18, d1, d0
fsub d1, d6, d18
ldr d7, [sp, #12320] ; 8-byte Folded Reload
ldr d19, [sp, #2016] ; 8-byte Folded Reload
fmul d6, d7, d19
fmul d18, d31, d1
fsub d6, d6, d18
ldr d0, [sp, #11888] ; 8-byte Folded Reload
fmul d18, d0, d1
ldr d0, [sp, #5792] ; 8-byte Folded Reload
fmul d13, d7, d0
fsub d4, d18, d13
fmul d18, d3, d6
ldr d0, [sp, #8920] ; 8-byte Folded Reload
fsub d18, d18, d0
fadd d18, d4, d18
fmul d13, d16, d14
fadd d18, d13, d18
fmul d13, d8, d6
ldr d0, [sp, #7712] ; 8-byte Folded Reload
fsub d13, d13, d0
ldr d0, [sp, #12200] ; 8-byte Folded Reload
fmul d24, d0, d4
fadd d24, d24, d13
ldur d0, [x29, #-216] ; 8-byte Folded Reload
fmul d13, d0, d14
fadd d24, d13, d24
ldr d0, [sp, #8896] ; 8-byte Folded Reload
fsub d0, d18, d0
ldr d18, [sp, #7704] ; 8-byte Folded Reload
fsub d28, d24, d18
fmul d24, d3, d0
fmul d13, d8, d28
fadd d24, d24, d13
fmul d13, d26, d6
ldr d17, [sp, #7696] ; 8-byte Folded Reload
fsub d13, d13, d17
str d4, [sp, #472] ; 8-byte Folded Spill
ldr d16, [sp, #12160] ; 8-byte Folded Reload
fmul d4, d16, d4
fadd d4, d4, d13
fmul d27, d15, d14
fadd d4, d27, d4
ldr d17, [sp, #7680] ; 8-byte Folded Reload
fsub d13, d4, d17
fmul d4, d26, d13
fadd d4, d4, d24
ldr d16, [sp, #10944] ; 8-byte Folded Reload
ldr d17, [sp, #5280] ; 8-byte Folded Reload
fmul d24, d16, d17
mov x9, #18811
movk x9, #34700, lsl #16
movk x9, #61210, lsl #32
movk x9, #49411, lsl #48
fmov d27, x9
fmul d24, d24, d27
ldr d16, [sp, #10952] ; 8-byte Folded Reload
ldr d17, [sp, #5328] ; 8-byte Folded Reload
fmul d27, d16, d17
fmov d17, #0.50000000
fmul d27, d27, d17
ldr d16, [sp, #10336] ; 8-byte Folded Reload
fmul d27, d16, d27
fmul d27, d27, d5
ldr d16, [sp, #10352] ; 8-byte Folded Reload
fmul d24, d16, d24
ldr d16, [sp, #10328] ; 8-byte Folded Reload
fdiv d24, d24, d16
ldr d16, [sp, #10304] ; 8-byte Folded Reload
fmul d24, d16, d24
fadd d24, d27, d24
fsub d25, d29, d25
fsub d25, d25, d4
fadd d24, d24, d4
fmul d4, d4, d17
fmov d17, #0.50000000
fsub d4, d24, d4
fadd d27, d4, d25
fmul d4, d3, d9
fmov d18, d9
ldr d3, [sp, #12136] ; 8-byte Folded Reload
fmul d24, d3, d20
fadd d4, d4, d24
ldr d3, [sp, #11896] ; 8-byte Folded Reload
fmul d24, d3, d23
fmov d14, d23
fadd d25, d24, d4
ldr d4, [sp, #10376] ; 8-byte Folded Reload
ldr d16, [sp, #5312] ; 8-byte Folded Reload
fmul d4, d4, d16
fmul d4, d4, d17
ldr d16, [sp, #9712] ; 8-byte Folded Reload
fmul d4, d16, d4
fmul d4, d4, d5
ldr d5, [sp, #10384] ; 8-byte Folded Reload
ldr d16, [sp, #5744] ; 8-byte Folded Reload
fmul d5, d5, d16
mov x9, #45572
movk x9, #23979, lsl #16
movk x9, #34811, lsl #32
movk x9, #49413, lsl #48
fmov d24, x9
fmul d5, d5, d24
ldr d16, [sp, #9736] ; 8-byte Folded Reload
fmul d5, d16, d5
ldr d16, [sp, #9728] ; 8-byte Folded Reload
fdiv d5, d5, d16
ldr d16, [sp, #9720] ; 8-byte Folded Reload
fmul d5, d16, d5
fadd d4, d4, d5
fadd d4, d4, d25
fmul d5, d25, d17
fsub d5, d4, d5
ldr d4, [sp, #11904] ; 8-byte Folded Reload
ldr d12, [sp, #5768] ; 8-byte Folded Reload
fmul d4, d4, d12
ldr d16, [sp, #7824] ; 8-byte Folded Reload
fsub d4, d4, d16
ldr d17, [sp, #11384] ; 8-byte Folded Reload
fmul d24, d17, d22
fadd d4, d4, d24
ldr d2, [sp, #11664] ; 8-byte Folded Reload
fmul d24, d2, d6
fadd d4, d24, d4
ldr d16, [sp, #7816] ; 8-byte Folded Reload
fsub d4, d4, d16
ldr d26, [sp, #11280] ; 8-byte Folded Reload
fmul d0, d26, d0
fadd d0, d0, d4
ldr d3, [sp, #2120] ; 8-byte Folded Reload
fmul d4, d27, d3
fadd d0, d0, d4
ldr d4, [sp, #11936] ; 8-byte Folded Reload
ldr d8, [sp, #2112] ; 8-byte Folded Reload
fmul d4, d4, d8
ldr d22, [sp, #8680] ; 8-byte Folded Reload
ldr d2, [sp, #11176] ; 8-byte Folded Reload
fmul d24, d22, d2
fsub d4, d4, d24
ldr d2, [sp, #8672] ; 8-byte Folded Reload
fmul d20, d2, d31
ldr d2, [sp, #11568] ; 8-byte Folded Reload
fmul d24, d2, d20
fadd d4, d24, d4
ldr d16, [sp, #12184] ; 8-byte Folded Reload
ldr d2, [sp, #2144] ; 8-byte Folded Reload
fmul d24, d16, d2
fsub d4, d4, d24
ldr d23, [sp, #5784] ; 8-byte Folded Reload
ldr d2, [sp, #11736] ; 8-byte Folded Reload
fmul d24, d2, d23
fadd d4, d4, d24
ldr d16, [sp, #12176] ; 8-byte Folded Reload
fmul d16, d16, d19
fsub d4, d4, d16
ldr d2, [sp, #11648] ; 8-byte Folded Reload
fmul d16, d2, d1
fadd d4, d16, d4
ldr d16, [sp, #8992] ; 8-byte Folded Reload
fadd d4, d16, d4
ldr d16, [sp, #7792] ; 8-byte Folded Reload
fadd d4, d16, d4
ldr d16, [sp, #7768] ; 8-byte Folded Reload
fadd d4, d16, d4
str d4, [sp, #2016] ; 8-byte Folded Spill
ldr d15, [sp, #12280] ; 8-byte Folded Reload
fmul d4, d15, d4
fadd d0, d4, d0
fmul d4, d7, d8
fmov d7, d22
ldr d2, [sp, #11328] ; 8-byte Folded Reload
fmul d16, d22, d2
fmov d9, d21
fmov d8, d11
fsub d4, d16, d4
str d20, [sp, #4624] ; 8-byte Folded Spill
fmul d16, d31, d20
fsub d4, d4, d16
ldr d2, [sp, #7632] ; 8-byte Folded Reload
fadd d0, d2, d0
ldr d2, [sp, #7624] ; 8-byte Folded Reload
fadd d0, d2, d0
ldr d16, [sp, #9072] ; 8-byte Folded Reload
fadd d4, d4, d16
ldr d2, [sp, #2136] ; 8-byte Folded Reload
fmul d16, d4, d2
fsub d0, d0, d16
ldr d31, [sp, #11536] ; 8-byte Folded Reload
ldr d2, [sp, #7992] ; 8-byte Folded Reload
fmul d16, d2, d31
ldr d2, [sp, #7592] ; 8-byte Folded Reload
fadd d22, d16, d2
fmov d2, d23
str d1, [sp, #2160] ; 8-byte Folded Spill
fadd d16, d23, d1
ldr d1, [sp, #7976] ; 8-byte Folded Reload
fmul d24, d1, d30
fadd d1, d24, d16
str d1, [sp, #2152] ; 8-byte Folded Spill
fmul d24, d30, d1
fadd d22, d24, d22
fmul d23, d22, d3
fsub d0, d0, d23
ldr d1, [sp, #6200] ; 8-byte Folded Reload
ldr d2, [sp, #11440] ; 8-byte Folded Reload
fmul d23, d1, d2
fadd d0, d23, d0
ldr d1, [sp, #6192] ; 8-byte Folded Reload
fmul d1, d1, d15
str d1, [sp, #2144] ; 8-byte Folded Spill
fmul d23, d1, d30
fsub d0, d0, d23
ldr d1, [sp, #11352] ; 8-byte Folded Reload
ldr d11, [sp, #1984] ; 8-byte Folded Reload
fmul d23, d1, d11
fsub d0, d0, d23
ldr d2, [sp, #7616] ; 8-byte Folded Reload
fsub d0, d0, d2
ldr d24, [sp, #10968] ; 8-byte Folded Reload
fmul d20, d24, d18
fadd d20, d20, d0
fsub d0, d27, d25
fadd d25, d0, d5
ldr d0, [sp, #1976] ; 8-byte Folded Reload
fmul d0, d20, d0
ldr d1, [sp, #1968] ; 8-byte Folded Reload
fmul d7, d25, d1
fsub d0, d7, d0
ldr d1, [sp, #2104] ; 8-byte Folded Reload
fmul d7, d27, d1
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #16223, lsl #48
fmov d21, x9
fmul d21, d22, d21
fadd d7, d7, d21
fmov d3, d12
ldr d1, [sp, #12192] ; 8-byte Folded Reload
fmul d21, d1, d12
ldr d2, [sp, #7608] ; 8-byte Folded Reload
fsub d21, d21, d2
fmul d23, d17, d10
fadd d21, d21, d23
ldr q23, [sp, #6752] ; 16-byte Folded Reload
ldr d1, [sp, #2048] ; 8-byte Folded Reload
fmul d19, d29, d1
fadd d19, d21, d19
ldr d1, [sp, #12104] ; 8-byte Folded Reload
fmul d21, d1, d6
fadd d19, d21, d19
ldr d2, [sp, #7600] ; 8-byte Folded Reload
fsub d19, d19, d2
fmul d21, d26, d13
fadd d19, d21, d19
ldr d2, [sp, #7568] ; 8-byte Folded Reload
fadd d19, d2, d19
ldr d2, [sp, #7536] ; 8-byte Folded Reload
fadd d19, d2, d19
ldr d2, [sp, #7520] ; 8-byte Folded Reload
fadd d19, d2, d19
ldr d1, [sp, #2040] ; 8-byte Folded Reload
fmul d21, d4, d1
fadd d19, d19, d21
ldr d2, [sp, #7504] ; 8-byte Folded Reload
fadd d19, d2, d19
ldr d2, [sp, #7488] ; 8-byte Folded Reload
fadd d2, d2, d19
ldur d29, [x29, #-200] ; 8-byte Folded Reload
fmul d19, d29, d2
fadd d7, d19, d7
ldr d1, [sp, #11976] ; 8-byte Folded Reload
fmul d19, d1, d12
ldr d3, [sp, #7472] ; 8-byte Folded Reload
fsub d19, d19, d3
ldr d1, [sp, #4648] ; 8-byte Folded Reload
fmul d21, d17, d1
fadd d19, d19, d21
ldr d1, [sp, #12064] ; 8-byte Folded Reload
fmul d6, d1, d6
fadd d6, d6, d19
ldr d3, [sp, #7456] ; 8-byte Folded Reload
fsub d6, d6, d3
fmul d18, d26, d28
fadd d6, d18, d6
ldr d1, [sp, #2128] ; 8-byte Folded Reload
fmul d4, d4, d1
ldr d3, [sp, #7440] ; 8-byte Folded Reload
fadd d6, d3, d6
ldr d3, [sp, #7432] ; 8-byte Folded Reload
fadd d6, d3, d6
ldr d3, [sp, #7408] ; 8-byte Folded Reload
fadd d6, d3, d6
fadd d4, d6, d4
ldr d3, [sp, #7392] ; 8-byte Folded Reload
fadd d4, d3, d4
ldr d3, [sp, #7384] ; 8-byte Folded Reload
fsub d3, d4, d3
ldur d17, [x29, #-208] ; 8-byte Folded Reload
fmul d4, d17, d3
fadd d4, d4, d7
ldr d1, [sp, #11600] ; 8-byte Folded Reload
fmov d21, d11
fmul d6, d1, d11
fsub d4, d4, d6
ldr d6, [sp, #7344] ; 8-byte Folded Reload
fsub d4, d4, d6
ldr d1, [sp, #1960] ; 8-byte Folded Reload
fmul d6, d24, d1
fadd d4, d6, d4
ldr d1, [sp, #9096] ; 8-byte Folded Reload
fmov d19, d31
fmul d6, d1, d31
ldr d1, [sp, #9088] ; 8-byte Folded Reload
fmul d1, d1, d30
str d1, [sp, #2128] ; 8-byte Folded Spill
fmul d7, d30, d1
fsub d6, d6, d7
ldr d7, [sp, #7360] ; 8-byte Folded Reload
fadd d6, d6, d7
ldr d1, [sp, #2088] ; 8-byte Folded Reload
fmul d7, d6, d1
fadd d4, d7, d4
ldr d26, [sp, #1992] ; 8-byte Folded Reload
fmul d7, d4, d26
fadd d0, d0, d7
ldr d1, [sp, #2072] ; 8-byte Folded Reload
fmul d7, d27, d1
fmul d18, d22, d1
fsub d7, d7, d18
str d2, [sp, #2040] ; 8-byte Folded Spill
fmul d18, d17, d2
fadd d7, d18, d7
str d3, [sp, #1976] ; 8-byte Folded Spill
fmul d18, d29, d3
fsub d7, d7, d18
ldr d1, [sp, #11464] ; 8-byte Folded Reload
fmul d17, d1, d11
fsub d7, d7, d17
ldr d2, [sp, #7336] ; 8-byte Folded Reload
fsub d7, d7, d2
fmul d17, d24, d14
ldr q24, [sp, #6736] ; 16-byte Folded Reload
fadd d7, d17, d7
ldr d1, [sp, #2032] ; 8-byte Folded Reload
fmul d3, d5, d1
fadd d3, d3, d7
ldr d1, [sp, #2024] ; 8-byte Folded Reload
fmul d2, d6, d1
fadd d2, d2, d3
ldr d1, [sp, #9112] ; 8-byte Folded Reload
fmul d3, d1, d31
ldr d5, [sp, #7328] ; 8-byte Folded Reload
fadd d3, d3, d5
ldr d1, [sp, #5776] ; 8-byte Folded Reload
fadd d16, d16, d1
ldr d1, [sp, #7984] ; 8-byte Folded Reload
fmul d5, d1, d30
fadd d1, d5, d16
str d1, [sp, #2136] ; 8-byte Folded Spill
fmul d5, d30, d1
fadd d3, d5, d3
fadd d0, d2, d0
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #16296, lsl #48
fmov d5, x9
fmul d5, d3, d5
fadd d7, d0, d5
ldr d0, [sp, #5264] ; 8-byte Folded Reload
fmul d0, d20, d0
ldr d1, [sp, #5248] ; 8-byte Folded Reload
fmul d5, d25, d1
fsub d0, d0, d5
fadd d0, d4, d0
fmul d2, d2, d26
fadd d0, d0, d2
fmul d2, d3, d1
fadd d3, d0, d2
ldr q1, [sp, #12000] ; 16-byte Folded Reload
fmul d0, d1, d7
ldr q4, [sp, #11840] ; 16-byte Folded Reload
fmul d2, d4, d3
fsub d5, d0, d2
ldr d19, [sp, #2000] ; 8-byte Folded Reload
fmul d0, d25, d19
ldr q18, [sp, #12240] ; 16-byte Folded Reload
fmul d2, d18, d5
fsub d0, d2, d0
ldr d17, [sp, #2096] ; 8-byte Folded Reload
fmul d2, d25, d17
str q3, [sp, #5312] ; 16-byte Folded Spill
fmul d3, d1, d3
ldur d6, [x29, #-256] ; 8-byte Folded Reload
fsub d2, d3, d2
str q7, [sp, #5328] ; 16-byte Folded Spill
fmul d3, d4, d7
fadd d2, d3, d2
ldr d1, [sp, #9120] ; 8-byte Folded Reload
ldr d3, [sp, #11584] ; 8-byte Folded Reload
fmul d3, d1, d3
ldr d4, [sp, #6456] ; 8-byte Folded Reload
fadd d3, d3, d4
ldr d7, [sp, #12056] ; 8-byte Folded Reload
ldr d1, [sp, #9104] ; 8-byte Folded Reload
fmul d4, d1, d7
fadd d1, d4, d16
str d1, [sp, #2120] ; 8-byte Folded Spill
fmul d4, d7, d1
fadd d3, d4, d3
fmul d3, d3, d17
fadd d2, d2, d3
ldr d3, [sp, #6448] ; 8-byte Folded Reload
ldr d1, [sp, #2080] ; 8-byte Folded Reload
fmul d3, d3, d1
fsub d4, d2, d3
ldr q7, [sp, #11824] ; 16-byte Folded Reload
fmul d2, d7, d4
fsub d0, d0, d2
ldr d3, [sp, #12208] ; 8-byte Folded Reload
fmul d2, d9, d3
fadd d1, d2, d16
ldr d2, [sp, #11784] ; 8-byte Folded Reload
fmul d2, d8, d2
str d1, [sp, #2112] ; 8-byte Folded Spill
fmul d3, d3, d1
fadd d2, d2, d3
fmul d1, d2, d19
fadd d19, d0, d1
ldr d3, [sp, #2064] ; 8-byte Folded Reload
fmul d0, d25, d3
str q5, [sp, #2000] ; 16-byte Folded Spill
fmul d1, d7, d5
fsub d0, d1, d0
str q4, [sp, #5280] ; 16-byte Folded Spill
fmul d1, d18, d4
fadd d0, d0, d1
fmul d1, d2, d3
fadd d20, d0, d1
fmul d0, d6, d8
ldr d1, [sp, #6424] ; 8-byte Folded Reload
fsub d22, d1, d0
fmul d0, d6, d9
str d16, [sp, #1984] ; 8-byte Folded Spill
fadd d21, d0, d16
cbz x8, LBB19_50
; %bb.49:
ldr q17, [sp, #10736] ; 16-byte Folded Reload
fmul d0, d17, d23
ldr q16, [sp, #11136] ; 16-byte Folded Reload
fmul d1, d16, d24
fmul d2, d6, d22
ldr d7, [sp, #11248] ; 8-byte Folded Reload
fmul d3, d7, d9
fadd d2, d3, d2
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d3, x9
fmul d4, d2, d3
fsub d1, d1, d4
fsub d0, d0, d1
fadd d0, d0, d0
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d1, x9
fmul d4, d25, d1
fmul d5, d17, d19
fsub d4, d5, d4
fmul d5, d16, d20
fsub d4, d4, d5
fmul d5, d7, d8
fmul d6, d6, d21
fadd d5, d5, d6
fmul d6, d5, d1
fadd d4, d4, d6
fadd d4, d4, d4
fadd d0, d0, d4
ldr d18, [sp, #11400] ; 8-byte Folded Reload
fmul d4, d18, d0
fmul d6, d25, d3
fmul d7, d16, d19
fsub d6, d7, d6
fmul d7, d17, d20
fadd d6, d7, d6
fmul d3, d5, d3
fadd d3, d6, d3
fmul d5, d17, d24
fmul d1, d2, d1
fsub d1, d5, d1
fmul d2, d16, d23
fadd d1, d2, d1
fadd d1, d1, d3
ldr d2, [sp, #11160] ; 8-byte Folded Reload
fmul d2, d2, d1
fsub d2, d4, d2
fmul d2, d18, d2
fmov d3, #0.50000000
fmul d2, d2, d3
ldr d4, [sp, #11392] ; 8-byte Folded Reload
fmul d0, d4, d0
ldr d5, [sp, #11240] ; 8-byte Folded Reload
fmul d1, d5, d1
fadd d0, d1, d0
fmul d0, d4, d0
fmul d0, d0, d3
fsub d0, d2, d0
str d0, [x8, #48]
LBB19_50:
str d22, [sp, #1960] ; 8-byte Folded Spill
str d21, [sp, #1968] ; 8-byte Folded Spill
str d25, [sp, #1992] ; 8-byte Folded Spill
str q20, [sp, #5248] ; 16-byte Folded Spill
str q19, [sp, #5264] ; 16-byte Folded Spill
str d9, [sp, #6448] ; 8-byte Folded Spill
str d8, [sp, #6456] ; 8-byte Folded Spill
ldr d9, [sp, #12328] ; 8-byte Folded Reload
ldr d0, [sp, #12256] ; 8-byte Folded Reload
fadd d4, d9, d0
ldr d0, [sp, #12264] ; 8-byte Folded Reload
ldur d1, [x29, #-224] ; 8-byte Folded Reload
fadd d2, d1, d0
ldr d0, [sp, #11912] ; 8-byte Folded Reload
fadd d1, d2, d0
fmov d22, d2
str d2, [sp, #2072] ; 8-byte Folded Spill
ldr d0, [sp, #9808] ; 8-byte Folded Reload
fdiv d2, d1, d0
ldr d0, [sp, #9768] ; 8-byte Folded Reload
fmul d3, d0, d2
ldr d0, [sp, #9760] ; 8-byte Folded Reload
fmul d0, d3, d0
fadd d0, d4, d0
fmov d24, d4
str d4, [sp, #2048] ; 8-byte Folded Spill
ldr d4, [sp, #12288] ; 8-byte Folded Reload
fadd d5, d4, d0
mov x9, #10523
movk x9, #38535, lsl #16
movk x9, #12921, lsl #32
movk x9, #49410, lsl #48
ldr d0, [sp, #10808] ; 8-byte Folded Reload
fmul d0, d0, d5
ldr d4, [sp, #10280] ; 8-byte Folded Reload
fmul d16, d4, d0
ldr d4, [sp, #10856] ; 8-byte Folded Reload
fmul d4, d4, d16
fmov d6, x9
fmul d4, d4, d6
ldr d6, [sp, #10864] ; 8-byte Folded Reload
fmul d6, d6, d0
fmov d14, #0.50000000
fmul d6, d6, d14
ldr d0, [sp, #10200] ; 8-byte Folded Reload
fmul d6, d0, d6
mov x9, #211106232532992
movk x9, #16498, lsl #48
fmov d21, x9
fmul d6, d6, d21
ldr d0, [sp, #10160] ; 8-byte Folded Reload
fmul d4, d0, d4
ldr d0, [sp, #10152] ; 8-byte Folded Reload
fdiv d4, d4, d0
ldr d0, [sp, #10168] ; 8-byte Folded Reload
fmul d4, d0, d4
fadd d17, d6, d4
ldr d0, [sp, #10536] ; 8-byte Folded Reload
fmul d1, d1, d0
ldr d0, [sp, #10824] ; 8-byte Folded Reload
fmul d3, d0, d3
fmul d4, d3, d14
fsub d1, d1, d4
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16361, lsl #48
fmov d20, x9
fmul d3, d3, d20
ldr d0, [sp, #9776] ; 8-byte Folded Reload
fmul d3, d3, d0
fmov d0, #5.00000000
fmul d3, d3, d0
fmov d27, #5.00000000
fsub d1, d1, d3
ldr d0, [sp, #10520] ; 8-byte Folded Reload
fdiv d1, d1, d0
ldr d0, [sp, #9792] ; 8-byte Folded Reload
fmul d2, d2, d0
ldr d0, [sp, #10512] ; 8-byte Folded Reload
fmul d3, d0, d1
fsub d13, d3, d2
ldr d0, [sp, #10528] ; 8-byte Folded Reload
fmul d0, d0, d1
ldur d10, [x29, #-248] ; 8-byte Folded Reload
fmul d1, d10, d0
fmov d4, d0
str d0, [sp, #5744] ; 8-byte Folded Spill
ldr d0, [sp, #12232] ; 8-byte Folded Reload
fmul d3, d0, d13
fmov d26, d0
fsub d6, d1, d3
ldr d0, [sp, #10264] ; 8-byte Folded Reload
fmul d1, d0, d5
fmul d1, d1, d14
ldr d0, [sp, #10816] ; 8-byte Folded Reload
fmul d1, d0, d1
mov x9, #4632233691727265792
fmov d19, x9
fmul d1, d1, d19
ldr d0, [sp, #10288] ; 8-byte Folded Reload
fmul d3, d0, d16
fmov d0, #3.00000000
fmul d3, d3, d0
fmov d12, #3.00000000
fadd d5, d1, d3
str d5, [sp, #5768] ; 8-byte Folded Spill
ldr d0, [sp, #11968] ; 8-byte Folded Reload
fmul d1, d0, d13
fmul d3, d10, d5
fsub d2, d1, d3
fmul d1, d26, d5
fmul d3, d0, d4
fmov d29, d0
fsub d23, d1, d3
str d23, [sp, #2104] ; 8-byte Folded Spill
ldur d4, [x29, #-192] ; 8-byte Folded Reload
fmul d3, d4, d6
ldr d0, [sp, #8976] ; 8-byte Folded Reload
fsub d3, d3, d0
fadd d3, d2, d3
ldur d0, [x29, #-160] ; 8-byte Folded Reload
fmul d16, d0, d23
fmov d30, d0
fadd d3, d16, d3
ldr d1, [sp, #11768] ; 8-byte Folded Reload
fmul d16, d1, d6
str d6, [sp, #2064] ; 8-byte Folded Spill
ldr d0, [sp, #7256] ; 8-byte Folded Reload
fsub d16, d16, d0
ldr d28, [sp, #12144] ; 8-byte Folded Reload
fmul d18, d28, d2
fadd d16, d18, d16
ldur d0, [x29, #-232] ; 8-byte Folded Reload
fmul d18, d0, d23
fmov d31, d0
fadd d16, d18, d16
ldr d0, [sp, #7248] ; 8-byte Folded Reload
fsub d5, d16, d0
str d5, [sp, #7248] ; 8-byte Folded Spill
ldr d0, [sp, #8968] ; 8-byte Folded Reload
fsub d0, d3, d0
str d0, [sp, #7256] ; 8-byte Folded Spill
fmul d3, d4, d0
fmul d16, d1, d5
fadd d3, d3, d16
ldr d5, [sp, #11776] ; 8-byte Folded Reload
fmul d16, d5, d6
ldr d0, [sp, #7240] ; 8-byte Folded Reload
fsub d16, d16, d0
ldr d6, [sp, #12152] ; 8-byte Folded Reload
str d2, [sp, #2032] ; 8-byte Folded Spill
fmul d18, d6, d2
fadd d16, d18, d16
ldr d2, [sp, #12296] ; 8-byte Folded Reload
fmul d18, d2, d23
fmov d8, d2
fadd d16, d18, d16
ldr d2, [sp, #7232] ; 8-byte Folded Reload
fsub d2, d16, d2
str d2, [sp, #7240] ; 8-byte Folded Spill
fmul d16, d5, d2
fadd d16, d16, d3
fadd d3, d17, d16
fmul d17, d16, d14
fsub d7, d3, d17
str d7, [sp, #2056] ; 8-byte Folded Spill
ldr d2, [sp, #11744] ; 8-byte Folded Reload
fadd d3, d22, d2
ldr d2, [sp, #10416] ; 8-byte Folded Reload
fmul d17, d3, d2
ldr d2, [sp, #10272] ; 8-byte Folded Reload
fdiv d3, d3, d2
ldr d2, [sp, #10224] ; 8-byte Folded Reload
fmul d25, d2, d3
ldr d2, [sp, #11072] ; 8-byte Folded Reload
fmul d18, d2, d25
fmul d23, d18, d14
fsub d17, d17, d23
fmul d18, d18, d20
ldr d2, [sp, #9752] ; 8-byte Folded Reload
fmul d18, d18, d2
fmul d18, d18, d27
fmov d23, #5.00000000
fsub d17, d17, d18
ldr d2, [sp, #10408] ; 8-byte Folded Reload
fdiv d17, d17, d2
ldr d2, [sp, #10832] ; 8-byte Folded Reload
fmul d3, d3, d2
ldr d2, [sp, #10400] ; 8-byte Folded Reload
fmul d18, d2, d17
fsub d15, d18, d3
ldr d2, [sp, #10472] ; 8-byte Folded Reload
fmul d0, d2, d17
fmul d3, d10, d0
fmov d18, d26
fmul d17, d26, d15
fsub d11, d3, d17
ldr d2, [sp, #9744] ; 8-byte Folded Reload
fmul d3, d25, d2
fadd d3, d24, d3
ldr d2, [sp, #12168] ; 8-byte Folded Reload
fadd d3, d2, d3
ldr d2, [sp, #10240] ; 8-byte Folded Reload
fmul d17, d2, d3
fmul d17, d17, d14
ldr d2, [sp, #10800] ; 8-byte Folded Reload
fmul d17, d2, d17
fmul d17, d17, d19
ldr d2, [sp, #10792] ; 8-byte Folded Reload
fmul d26, d2, d3
ldr d2, [sp, #10248] ; 8-byte Folded Reload
fmul d27, d2, d26
ldr d2, [sp, #10256] ; 8-byte Folded Reload
fmul d3, d2, d27
fmul d3, d3, d12
fmov d24, #3.00000000
fadd d22, d17, d3
str d22, [sp, #7232] ; 8-byte Folded Spill
fmov d2, d29
fmul d3, d29, d15
fmul d17, d10, d22
fsub d29, d3, d17
fmul d3, d18, d22
fmul d17, d2, d0
fmov d12, d0
str d0, [sp, #2088] ; 8-byte Folded Spill
fmov d22, d2
fsub d0, d3, d17
str d0, [sp, #2096] ; 8-byte Folded Spill
fmul d17, d4, d11
ldr d2, [sp, #8848] ; 8-byte Folded Reload
fsub d17, d17, d2
fadd d17, d29, d17
fmul d25, d30, d0
fadd d17, d25, d17
fmul d25, d1, d11
ldr d2, [sp, #7184] ; 8-byte Folded Reload
fsub d25, d25, d2
fmul d28, d28, d29
fadd d25, d28, d25
fmul d28, d31, d0
fadd d25, d28, d25
ldr d2, [sp, #7136] ; 8-byte Folded Reload
fsub d3, d25, d2
str d3, [sp, #2080] ; 8-byte Folded Spill
ldr d2, [sp, #8840] ; 8-byte Folded Reload
fsub d25, d17, d2
fmul d17, d4, d25
fmul d28, d1, d3
fadd d17, d17, d28
fmul d28, d5, d11
ldr d1, [sp, #7096] ; 8-byte Folded Reload
fsub d28, d28, d1
str d29, [sp, #2024] ; 8-byte Folded Spill
fmul d29, d6, d29
fadd d28, d29, d28
fmul d29, d8, d0
fadd d28, d29, d28
ldr d0, [sp, #7072] ; 8-byte Folded Reload
fsub d31, d28, d0
fmul d28, d5, d31
fadd d17, d28, d17
ldr d0, [sp, #10840] ; 8-byte Folded Reload
fmul d27, d0, d27
mov x9, #18811
movk x9, #34700, lsl #16
movk x9, #61210, lsl #32
movk x9, #49411, lsl #48
fmov d28, x9
fmul d27, d27, d28
ldr d0, [sp, #10848] ; 8-byte Folded Reload
fmul d26, d0, d26
fmul d26, d26, d14
ldr d0, [sp, #10208] ; 8-byte Folded Reload
fmul d26, d0, d26
fmul d26, d26, d21
ldr d0, [sp, #10184] ; 8-byte Folded Reload
fmul d27, d0, d27
ldr d0, [sp, #10176] ; 8-byte Folded Reload
fdiv d27, d27, d0
ldr d0, [sp, #10192] ; 8-byte Folded Reload
fmul d27, d0, d27
fadd d26, d26, d27
fsub d16, d7, d16
fsub d16, d16, d17
fadd d26, d26, d17
fmul d17, d17, d14
fsub d17, d26, d17
fadd d29, d17, d16
ldr d0, [sp, #11192] ; 8-byte Folded Reload
ldur d1, [x29, #-224] ; 8-byte Folded Reload
fadd d16, d1, d0
ldr d0, [sp, #8808] ; 8-byte Folded Reload
fdiv d17, d16, d0
ldr d0, [sp, #8816] ; 8-byte Folded Reload
fmul d26, d0, d17
ldr d0, [sp, #8800] ; 8-byte Folded Reload
fmul d27, d26, d0
fadd d27, d9, d27
ldr d0, [sp, #11184] ; 8-byte Folded Reload
fadd d27, d0, d27
ldr d0, [sp, #9688] ; 8-byte Folded Reload
fmul d28, d27, d0
fmul d28, d28, d14
ldr d0, [sp, #10128] ; 8-byte Folded Reload
fmul d28, d0, d28
fmul d7, d28, d19
ldr d0, [sp, #10120] ; 8-byte Folded Reload
fmul d27, d27, d0
ldr d0, [sp, #9696] ; 8-byte Folded Reload
fmul d28, d0, d27
ldr d0, [sp, #9704] ; 8-byte Folded Reload
fmul d8, d0, d28
fmul d8, d8, d24
fadd d1, d8, d7
str d1, [sp, #8808] ; 8-byte Folded Spill
ldr d0, [sp, #8784] ; 8-byte Folded Reload
fmul d7, d26, d0
ldr d0, [sp, #9856] ; 8-byte Folded Reload
fmul d16, d16, d0
fmul d26, d7, d14
fsub d16, d16, d26
fmul d6, d7, d20
ldr d0, [sp, #8776] ; 8-byte Folded Reload
fmul d6, d6, d0
fmul d6, d6, d23
fsub d6, d16, d6
ldr d0, [sp, #8792] ; 8-byte Folded Reload
fmul d7, d17, d0
ldr d0, [sp, #9848] ; 8-byte Folded Reload
fdiv d6, d6, d0
ldr d0, [sp, #9840] ; 8-byte Folded Reload
fmul d6, d0, d6
fsub d0, d6, d7
str d0, [sp, #8800] ; 8-byte Folded Spill
ldr d5, [sp, #11624] ; 8-byte Folded Reload
fmul d8, d5, d0
fmul d6, d4, d8
ldr d0, [sp, #7320] ; 8-byte Folded Reload
fadd d6, d0, d6
fmul d3, d5, d1
str d3, [sp, #9696] ; 8-byte Folded Spill
fmul d16, d30, d3
fsub d6, d16, d6
ldr d0, [sp, #7296] ; 8-byte Folded Reload
fsub d24, d6, d0
ldr d0, [sp, #11688] ; 8-byte Folded Reload
fmul d16, d0, d8
ldr d1, [sp, #7288] ; 8-byte Folded Reload
fadd d16, d1, d16
ldr d1, [sp, #11488] ; 8-byte Folded Reload
fmul d17, d1, d3
fsub d16, d17, d16
ldr d1, [sp, #7280] ; 8-byte Folded Reload
fsub d30, d16, d1
fmul d17, d4, d24
str d24, [sp, #8792] ; 8-byte Folded Spill
fmul d26, d0, d30
str d30, [sp, #7136] ; 8-byte Folded Spill
fadd d26, d17, d26
ldr d0, [sp, #11632] ; 8-byte Folded Reload
fmul d17, d0, d8
ldr d1, [sp, #7272] ; 8-byte Folded Reload
fadd d17, d1, d17
ldr d1, [sp, #11680] ; 8-byte Folded Reload
fmul d9, d1, d3
fsub d17, d9, d17
ldr d1, [sp, #7264] ; 8-byte Folded Reload
fsub d1, d17, d1
str d1, [sp, #9704] ; 8-byte Folded Spill
fmul d9, d0, d1
fadd d26, d9, d26
ldr d0, [sp, #10136] ; 8-byte Folded Reload
fmul d27, d0, d27
fmul d27, d27, d14
ldr d0, [sp, #9680] ; 8-byte Folded Reload
fmul d27, d0, d27
fmul d20, d27, d21
ldr d0, [sp, #10144] ; 8-byte Folded Reload
fmul d27, d0, d28
mov x9, #45572
movk x9, #23979, lsl #16
movk x9, #34811, lsl #32
movk x9, #49413, lsl #48
fmov d28, x9
fmul d27, d27, d28
ldr d0, [sp, #9672] ; 8-byte Folded Reload
fmul d27, d0, d27
ldr d0, [sp, #9656] ; 8-byte Folded Reload
fdiv d27, d27, d0
ldr d0, [sp, #9664] ; 8-byte Folded Reload
fmul d27, d0, d27
fadd d20, d20, d27
fadd d20, d20, d26
fmul d27, d26, d14
fsub d9, d20, d27
ldr d16, [sp, #2064] ; 8-byte Folded Reload
ldr d0, [sp, #11728] ; 8-byte Folded Reload
fmul d20, d0, d16
ldr d0, [sp, #7216] ; 8-byte Folded Reload
fsub d20, d20, d0
ldr d23, [sp, #11360] ; 8-byte Folded Reload
ldr d1, [sp, #7256] ; 8-byte Folded Reload
fmul d27, d23, d1
fadd d20, d20, d27
ldr d0, [sp, #11560] ; 8-byte Folded Reload
fmul d27, d0, d11
fadd d20, d27, d20
ldr d1, [sp, #7200] ; 8-byte Folded Reload
fsub d20, d20, d1
ldr d6, [sp, #11272] ; 8-byte Folded Reload
fmul d25, d6, d25
fadd d25, d25, d20
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d1, x9
fmul d27, d29, d1
fmov d18, d1
str d1, [sp, #7184] ; 8-byte Folded Spill
fsub d25, d25, d27
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d27, x9
ldr d0, [sp, #2072] ; 8-byte Folded Reload
fmul d1, d0, d27
ldr d0, [sp, #2048] ; 8-byte Folded Reload
fmul d2, d0, d27
fmul d21, d1, d22
fmul d22, d2, d10
fadd d3, d21, d22
ldr d4, [sp, #11920] ; 8-byte Folded Reload
fmul d22, d4, d3
fmov d20, d3
str d3, [sp, #2072] ; 8-byte Folded Spill
ldr d0, [sp, #11168] ; 8-byte Folded Reload
fmul d27, d1, d0
fsub d22, d22, d27
ldr d17, [sp, #12232] ; 8-byte Folded Reload
fmul d7, d2, d17
fmov d19, d2
ldr d0, [sp, #11512] ; 8-byte Folded Reload
fmul d27, d0, d7
fadd d22, d27, d22
ldr d3, [sp, #5744] ; 8-byte Folded Reload
ldr d2, [sp, #12288] ; 8-byte Folded Reload
fmul d27, d2, d3
fsub d22, d22, d27
ldr d2, [sp, #11720] ; 8-byte Folded Reload
fmul d27, d2, d13
fadd d22, d22, d27
ldr d2, [sp, #12168] ; 8-byte Folded Reload
fmul d27, d2, d12
fsub d22, d22, d27
ldr d2, [sp, #11696] ; 8-byte Folded Reload
fmul d27, d2, d15
fadd d22, d27, d22
ldr d4, [sp, #8864] ; 8-byte Folded Reload
fadd d22, d4, d22
ldr d4, [sp, #7224] ; 8-byte Folded Reload
fadd d22, d4, d22
ldr d4, [sp, #7208] ; 8-byte Folded Reload
fadd d4, d4, d22
ldur d2, [x29, #-240] ; 8-byte Folded Reload
str d4, [sp, #2048] ; 8-byte Folded Spill
fmul d22, d2, d4
fmov d12, d2
fadd d22, d22, d25
ldr d4, [sp, #7024] ; 8-byte Folded Reload
fadd d22, d4, d22
ldr d4, [sp, #6984] ; 8-byte Folded Reload
fadd d22, d4, d22
ldr d2, [sp, #11312] ; 8-byte Folded Reload
fmul d25, d1, d2
fmul d27, d10, d20
fsub d25, d25, d27
str d7, [sp, #8776] ; 8-byte Folded Spill
fmul d27, d17, d7
fsub d25, d25, d27
ldr d4, [sp, #8888] ; 8-byte Folded Reload
fadd d14, d25, d4
mov x9, #54125
movk x9, #53060, lsl #16
movk x9, #15481, lsl #32
movk x9, #16273, lsl #48
fmov d2, x9
str d2, [sp, #7096] ; 8-byte Folded Spill
fmul d27, d14, d2
fadd d22, d22, d27
ldr d4, [sp, #10072] ; 8-byte Folded Reload
fsub d21, d4, d1
ldr d25, [sp, #11504] ; 8-byte Folded Reload
fmul d27, d21, d25
ldr d4, [sp, #6896] ; 8-byte Folded Reload
fadd d27, d27, d4
str d13, [sp, #7288] ; 8-byte Folded Spill
str d15, [sp, #7280] ; 8-byte Folded Spill
fadd d10, d13, d15
ldr d4, [sp, #10064] ; 8-byte Folded Reload
str d19, [sp, #10144] ; 8-byte Folded Spill
fadd d20, d19, d4
fmul d28, d20, d5
fadd d4, d28, d10
str d4, [sp, #7272] ; 8-byte Folded Spill
fmul d28, d5, d4
fadd d13, d28, d27
fmul d27, d13, d18
fadd d22, d22, d27
fmul d27, d1, d0
ldr d4, [sp, #8944] ; 8-byte Folded Reload
fsub d3, d4, d27
str d3, [sp, #9672] ; 8-byte Folded Spill
ldr d2, [sp, #11424] ; 8-byte Folded Reload
fmul d27, d3, d2
fadd d22, d27, d22
fmul d27, d19, d0
ldr d4, [sp, #8928] ; 8-byte Folded Reload
fsub d0, d4, d27
str d0, [sp, #9664] ; 8-byte Folded Spill
fmul d4, d0, d12
str d4, [sp, #7264] ; 8-byte Folded Spill
fmul d27, d5, d4
fsub d22, d22, d27
ldr d0, [sp, #11320] ; 8-byte Folded Reload
fmul d27, d0, d8
fsub d22, d22, d27
ldr d4, [sp, #6976] ; 8-byte Folded Reload
fsub d22, d22, d4
ldr d17, [sp, #10872] ; 8-byte Folded Reload
fmul d27, d17, d24
fadd d18, d27, d22
fsub d22, d29, d26
fadd d2, d22, d9
mov x9, #4359484439294640128
mov x10, #62612
movk x10, #18904, lsl #16
movk x10, #1144, lsl #32
movk x10, #16296, lsl #48
fmov d0, x9
str d0, [sp, #7072] ; 8-byte Folded Spill
fmul d22, d18, d0
fmov d27, x10
fmul d26, d2, d27
fsub d15, d22, d26
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
mov x10, #18456
movk x10, #63321, lsl #16
movk x10, #33926, lsl #32
movk x10, #16223, lsl #48
fmov d22, x10
fmul d22, d13, d22
fmov d0, x9
str d0, [sp, #8784] ; 8-byte Folded Spill
fmul d12, d29, d0
fadd d12, d12, d22
ldr d3, [sp, #12096] ; 8-byte Folded Reload
fmul d22, d3, d16
ldr d4, [sp, #7168] ; 8-byte Folded Reload
fsub d22, d22, d4
ldr d4, [sp, #7240] ; 8-byte Folded Reload
fmul d4, d23, d4
fadd d4, d22, d4
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d3, x9
str d3, [sp, #9680] ; 8-byte Folded Spill
ldr d7, [sp, #2056] ; 8-byte Folded Reload
fmul d24, d7, d3
fadd d4, d4, d24
ldr d3, [sp, #12088] ; 8-byte Folded Reload
fmul d24, d3, d11
fadd d4, d24, d4
ldr d7, [sp, #7192] ; 8-byte Folded Reload
fsub d4, d4, d7
fmul d24, d6, d31
fadd d4, d24, d4
ldr d7, [sp, #7160] ; 8-byte Folded Reload
fadd d4, d7, d4
ldr d7, [sp, #7120] ; 8-byte Folded Reload
fadd d4, d7, d4
ldr d7, [sp, #7088] ; 8-byte Folded Reload
fadd d4, d7, d4
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d3, x9
str d3, [sp, #9688] ; 8-byte Folded Spill
fmul d31, d14, d3
fadd d4, d4, d31
ldr d7, [sp, #7104] ; 8-byte Folded Reload
fadd d4, d7, d4
ldr d7, [sp, #7056] ; 8-byte Folded Reload
fadd d7, d7, d4
ldr d3, [sp, #12312] ; 8-byte Folded Reload
fmul d4, d3, d7
fmov d24, d3
fadd d4, d4, d12
ldr d3, [sp, #11880] ; 8-byte Folded Reload
fmul d19, d3, d16
ldr d16, [sp, #7040] ; 8-byte Folded Reload
fsub d19, d19, d16
ldr d16, [sp, #7248] ; 8-byte Folded Reload
fmul d31, d23, d16
fadd d19, d19, d31
ldr d0, [sp, #11872] ; 8-byte Folded Reload
fmul d23, d0, d11
fadd d19, d23, d19
ldr d0, [sp, #7016] ; 8-byte Folded Reload
fsub d19, d19, d0
ldr d0, [sp, #2080] ; 8-byte Folded Reload
fmul d23, d6, d0
fadd d19, d23, d19
ldr d0, [sp, #6848] ; 8-byte Folded Reload
fadd d19, d0, d19
ldr d0, [sp, #6960] ; 8-byte Folded Reload
fadd d19, d0, d19
ldr d0, [sp, #6928] ; 8-byte Folded Reload
fadd d19, d0, d19
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d26, x9
fmul d23, d14, d26
fadd d19, d19, d23
ldr d0, [sp, #6944] ; 8-byte Folded Reload
fadd d19, d0, d19
ldr d0, [sp, #6880] ; 8-byte Folded Reload
fsub d0, d19, d0
ldr d3, [sp, #12304] ; 8-byte Folded Reload
fmul d19, d3, d0
fmov d16, d3
fadd d4, d19, d4
ldr d3, [sp, #11528] ; 8-byte Folded Reload
fmul d19, d3, d8
fsub d4, d4, d19
ldr d6, [sp, #6864] ; 8-byte Folded Reload
fsub d4, d4, d6
fmul d19, d17, d30
fadd d4, d19, d4
mov x9, #50080
movk x9, #49599, lsl #16
movk x9, #32579, lsl #32
movk x9, #16368, lsl #48
fmov d19, x9
ldur d3, [x29, #-224] ; 8-byte Folded Reload
fmul d6, d3, d19
ldr d3, [sp, #12328] ; 8-byte Folded Reload
fmul d3, d3, d19
fmul d19, d6, d25
fmov d22, d6
str d6, [sp, #10136] ; 8-byte Folded Spill
fmul d6, d3, d5
fmov d28, d3
str d3, [sp, #10128] ; 8-byte Folded Spill
str d6, [sp, #7224] ; 8-byte Folded Spill
fmul d23, d5, d6
fsub d19, d19, d23
ldr d6, [sp, #6912] ; 8-byte Folded Reload
fadd d14, d19, d6
mov x9, #45974
movk x9, #34787, lsl #16
movk x9, #35902, lsl #32
movk x9, #16285, lsl #48
fmov d3, x9
str d3, [sp, #7160] ; 8-byte Folded Spill
fmul d19, d14, d3
fadd d4, d19, d4
mov x9, #4363988038922010624
fmov d3, x9
fmul d30, d4, d3
fmov d6, d3
str d3, [sp, #7024] ; 8-byte Folded Spill
fadd d15, d15, d30
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d3, x9
str d3, [sp, #7120] ; 8-byte Folded Spill
fmul d29, d29, d3
fmul d30, d13, d3
fsub d29, d29, d30
str d7, [sp, #6984] ; 8-byte Folded Spill
fmul d30, d16, d7
fadd d29, d30, d29
str d0, [sp, #6976] ; 8-byte Folded Spill
fmul d30, d24, d0
fsub d29, d29, d30
ldr d0, [sp, #11432] ; 8-byte Folded Reload
fmul d30, d0, d8
fsub d29, d29, d30
ldr d0, [sp, #7048] ; 8-byte Folded Reload
fsub d29, d29, d0
ldr d0, [sp, #9704] ; 8-byte Folded Reload
fmul d30, d17, d0
fadd d29, d30, d29
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d0, x9
str d0, [sp, #7168] ; 8-byte Folded Spill
fmul d8, d9, d0
fadd d8, d8, d29
mov x9, #49235
movk x9, #28989, lsl #16
movk x9, #40841, lsl #32
movk x9, #16312, lsl #48
fmov d0, x9
str d0, [sp, #7192] ; 8-byte Folded Spill
fmul d9, d14, d0
fadd d8, d9, d8
fsub d9, d21, d22
ldr d13, [sp, #9408] ; 8-byte Folded Reload
fadd d17, d13, d9
fmul d9, d17, d25
fmov d22, d25
ldr d0, [sp, #6992] ; 8-byte Folded Reload
fadd d9, d9, d0
ldr d0, [sp, #8800] ; 8-byte Folded Reload
fadd d29, d10, d0
fadd d10, d28, d20
ldr d13, [sp, #9416] ; 8-byte Folded Reload
fadd d16, d13, d10
fmul d10, d16, d5
fadd d0, d10, d29
str d0, [sp, #7216] ; 8-byte Folded Spill
fmul d10, d5, d0
fadd d10, d10, d9
fadd d9, d8, d15
fmul d13, d10, d27
fadd d0, d9, d13
mov x9, #4354980839667269632
fmov d3, x9
str d3, [sp, #7088] ; 8-byte Folded Spill
fmul d13, d18, d3
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #49060, lsl #48
fmov d3, x9
str d3, [sp, #7056] ; 8-byte Folded Spill
fmul d14, d2, d3
fsub d13, d14, d13
fadd d4, d4, d13
fmul d8, d8, d6
fadd d4, d4, d8
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #16292, lsl #48
fmov d8, x9
fmul d8, d10, d8
fadd d6, d4, d8
ldr q30, [sp, #11984] ; 16-byte Folded Reload
fmul d4, d30, d0
ldr q31, [sp, #11808] ; 16-byte Folded Reload
fmul d8, d31, d6
fsub d9, d4, d8
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d3, x9
fmul d4, d2, d3
fmov d18, d3
str d3, [sp, #7040] ; 8-byte Folded Spill
ldr q12, [sp, #11952] ; 16-byte Folded Reload
fmul d8, d12, d9
fsub d4, d8, d4
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d3, x9
fmul d13, d2, d3
fmov d7, d3
str d3, [sp, #7104] ; 8-byte Folded Spill
str d2, [sp, #7320] ; 8-byte Folded Spill
str q6, [sp, #6960] ; 16-byte Folded Spill
fmul d14, d30, d6
fsub d13, d14, d13
str q0, [sp, #6992] ; 16-byte Folded Spill
fmul d14, d31, d0
fadd d13, d14, d13
ldr d24, [sp, #12048] ; 8-byte Folded Reload
fmul d14, d16, d24
fadd d0, d14, d29
ldr d3, [sp, #11520] ; 8-byte Folded Reload
fmul d14, d17, d3
str d0, [sp, #7208] ; 8-byte Folded Spill
fmul d15, d24, d0
fadd d14, d14, d15
fmul d14, d14, d7
fadd d0, d13, d14
ldr q28, [sp, #11792] ; 16-byte Folded Reload
fmul d13, d28, d0
fsub d4, d4, d13
ldr d10, [sp, #12072] ; 8-byte Folded Reload
fmul d13, d16, d10
fadd d6, d13, d29
ldr d3, [sp, #11640] ; 8-byte Folded Reload
fmul d13, d17, d3
str d6, [sp, #7200] ; 8-byte Folded Spill
fmul d14, d10, d6
fadd d14, d13, d14
fmul d13, d14, d18
fadd d3, d4, d13
str q3, [sp, #8816] ; 16-byte Folded Spill
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
fmov d3, x9
str d3, [sp, #7048] ; 8-byte Folded Spill
fmul d4, d2, d3
str q9, [sp, #6944] ; 16-byte Folded Spill
fmul d15, d28, d9
fsub d4, d15, d4
str q0, [sp, #6928] ; 16-byte Folded Spill
fmul d15, d12, d0
fadd d4, d4, d15
fmul d14, d14, d3
fadd d0, d4, d14
str q0, [sp, #7296] ; 16-byte Folded Spill
ldr d3, [sp, #2104] ; 8-byte Folded Reload
ldr d0, [sp, #11728] ; 8-byte Folded Reload
fmul d4, d0, d3
ldr d0, [sp, #7880] ; 8-byte Folded Reload
fsub d4, d4, d0
ldr d6, [sp, #11472] ; 8-byte Folded Reload
ldr d8, [sp, #7256] ; 8-byte Folded Reload
fmul d14, d6, d8
fadd d4, d4, d14
ldr d11, [sp, #2096] ; 8-byte Folded Reload
ldr d0, [sp, #11560] ; 8-byte Folded Reload
fmul d14, d0, d11
fadd d4, d14, d4
ldr d14, [sp, #7872] ; 8-byte Folded Reload
fsub d4, d4, d14
mov x9, #51491
movk x9, #54360, lsl #16
movk x9, #13074, lsl #32
movk x9, #16286, lsl #48
fmov d14, x9
fadd d4, d4, d14
ldr d0, [sp, #11168] ; 8-byte Folded Reload
ldr d23, [sp, #10144] ; 8-byte Folded Reload
fmul d14, d23, d0
str d1, [sp, #10120] ; 8-byte Folded Spill
ldr d7, [sp, #12232] ; 8-byte Folded Reload
fmul d0, d1, d7
ldr d1, [sp, #11512] ; 8-byte Folded Reload
fmul d15, d1, d0
fadd d14, d15, d14
ldr d15, [sp, #11928] ; 8-byte Folded Reload
ldr d25, [sp, #2072] ; 8-byte Folded Reload
fmul d15, d15, d25
fadd d14, d15, d14
ldr d1, [sp, #11912] ; 8-byte Folded Reload
ldr d2, [sp, #5744] ; 8-byte Folded Reload
fmul d2, d1, d2
fsub d2, d2, d14
ldr d9, [sp, #5768] ; 8-byte Folded Reload
ldr d1, [sp, #11720] ; 8-byte Folded Reload
fmul d14, d1, d9
fsub d2, d2, d14
ldr d1, [sp, #11744] ; 8-byte Folded Reload
ldr d18, [sp, #2088] ; 8-byte Folded Reload
fmul d18, d1, d18
fadd d2, d18, d2
ldr d14, [sp, #7232] ; 8-byte Folded Reload
ldr d1, [sp, #11696] ; 8-byte Folded Reload
fmul d18, d1, d14
fsub d2, d2, d18
ldr d1, [sp, #7864] ; 8-byte Folded Reload
fadd d2, d1, d2
ldr d1, [sp, #8960] ; 8-byte Folded Reload
fadd d2, d1, d2
ldr d1, [sp, #7856] ; 8-byte Folded Reload
fadd d1, d1, d2
str d1, [sp, #7016] ; 8-byte Folded Spill
ldur d19, [x29, #-240] ; 8-byte Folded Reload
fmul d2, d19, d1
fadd d2, d2, d4
fadd d4, d9, d14
str d21, [sp, #9656] ; 8-byte Folded Spill
fmul d18, d21, d5
fsub d1, d4, d18
str d1, [sp, #7872] ; 8-byte Folded Spill
fmul d18, d5, d1
ldr d1, [sp, #8856] ; 8-byte Folded Reload
fadd d18, d18, d1
str d20, [sp, #8856] ; 8-byte Folded Spill
fmul d14, d20, d22
fadd d18, d14, d18
ldr d1, [sp, #7184] ; 8-byte Folded Reload
fmul d20, d18, d1
fsub d2, d2, d20
ldr d1, [sp, #11968] ; 8-byte Folded Reload
fmul d20, d1, d25
str d0, [sp, #7880] ; 8-byte Folded Spill
fmul d21, d7, d0
fadd d20, d21, d20
fmov d1, d23
ldr d0, [sp, #11312] ; 8-byte Folded Reload
fmul d21, d23, d0
fadd d20, d20, d21
ldr d1, [sp, #8952] ; 8-byte Folded Reload
fadd d21, d1, d20
ldr d0, [sp, #7096] ; 8-byte Folded Reload
fmul d20, d21, d0
fadd d2, d20, d2
ldr d0, [sp, #9672] ; 8-byte Folded Reload
fmul d0, d0, d19
str d0, [sp, #7864] ; 8-byte Folded Spill
fmul d20, d5, d0
fadd d2, d20, d2
ldr d0, [sp, #11424] ; 8-byte Folded Reload
ldr d1, [sp, #9664] ; 8-byte Folded Reload
fmul d20, d1, d0
fadd d2, d20, d2
ldr d7, [sp, #9696] ; 8-byte Folded Reload
ldr d0, [sp, #11320] ; 8-byte Folded Reload
fmul d20, d0, d7
fadd d2, d20, d2
ldr d1, [sp, #7848] ; 8-byte Folded Reload
fsub d2, d2, d1
ldr d14, [sp, #11096] ; 8-byte Folded Reload
ldr d19, [sp, #8792] ; 8-byte Folded Reload
fmul d20, d14, d19
fadd d23, d2, d20
ldr d0, [sp, #8808] ; 8-byte Folded Reload
fadd d9, d4, d0
fmul d4, d17, d5
fsub d0, d9, d4
str d0, [sp, #7856] ; 8-byte Folded Spill
fmul d4, d5, d0
ldr d1, [sp, #8832] ; 8-byte Folded Reload
fadd d4, d1, d4
fmul d20, d16, d22
fmov d13, d22
fadd d20, d20, d4
ldr d0, [sp, #7072] ; 8-byte Folded Reload
fmul d4, d23, d0
fmul d25, d20, d27
fsub d4, d4, d25
ldr d0, [sp, #11880] ; 8-byte Folded Reload
fmul d25, d0, d3
ldr d1, [sp, #7840] ; 8-byte Folded Reload
fsub d25, d25, d1
ldr d0, [sp, #7248] ; 8-byte Folded Reload
fmul d27, d6, d0
fadd d25, d25, d27
ldr d1, [sp, #11872] ; 8-byte Folded Reload
fmul d27, d1, d11
fadd d25, d27, d25
ldr d1, [sp, #7832] ; 8-byte Folded Reload
fsub d25, d25, d1
mov x9, #46543
movk x9, #48510, lsl #16
movk x9, #46414, lsl #32
movk x9, #16260, lsl #48
fmov d27, x9
fadd d25, d25, d27
ldr d1, [sp, #7776] ; 8-byte Folded Reload
fadd d25, d1, d25
fmul d27, d21, d26
fadd d25, d25, d27
ldr d1, [sp, #7744] ; 8-byte Folded Reload
fadd d25, d1, d25
ldr d1, [sp, #7728] ; 8-byte Folded Reload
fadd d15, d1, d25
ldr d1, [sp, #8784] ; 8-byte Folded Reload
fmul d25, d18, d1
ldr d2, [sp, #12304] ; 8-byte Folded Reload
fmul d26, d2, d15
fadd d25, d26, d25
ldr d1, [sp, #12096] ; 8-byte Folded Reload
fmul d1, d1, d3
ldr d3, [sp, #7720] ; 8-byte Folded Reload
fsub d1, d1, d3
ldr d22, [sp, #7240] ; 8-byte Folded Reload
fmul d26, d6, d22
fadd d1, d1, d26
fmov d3, d8
ldur d8, [x29, #-160] ; 8-byte Folded Reload
fmul d26, d8, d3
ldur d3, [x29, #-232] ; 8-byte Folded Reload
fmul d27, d3, d0
fadd d26, d26, d27
ldr d0, [sp, #12296] ; 8-byte Folded Reload
fmul d27, d0, d22
fadd d26, d27, d26
ldr q27, [sp, #7296] ; 16-byte Folded Reload
ldr d0, [sp, #9680] ; 8-byte Folded Reload
fmul d22, d26, d0
fadd d1, d1, d22
ldr d0, [sp, #12088] ; 8-byte Folded Reload
fmul d3, d0, d11
fadd d1, d3, d1
ldr d0, [sp, #7688] ; 8-byte Folded Reload
fsub d1, d1, d0
mov x9, #57269
movk x9, #60105, lsl #16
movk x9, #55991, lsl #32
movk x9, #16301, lsl #48
fmov d3, x9
fadd d1, d1, d3
ldr d0, [sp, #7672] ; 8-byte Folded Reload
fadd d1, d0, d1
ldr d0, [sp, #9688] ; 8-byte Folded Reload
fmul d3, d21, d0
fadd d1, d1, d3
ldr d0, [sp, #7664] ; 8-byte Folded Reload
fsub d1, d1, d0
ldr d0, [sp, #7656] ; 8-byte Folded Reload
fadd d6, d0, d1
ldr d26, [sp, #12312] ; 8-byte Folded Reload
fmul d1, d26, d6
fadd d1, d1, d25
fmov d25, d16
fmov d22, d17
ldr d0, [sp, #11528] ; 8-byte Folded Reload
fmul d3, d0, d7
fadd d1, d3, d1
ldr d0, [sp, #7648] ; 8-byte Folded Reload
fsub d1, d1, d0
ldr d16, [sp, #7136] ; 8-byte Folded Reload
fmul d3, d14, d16
fadd d1, d1, d3
mov x9, #26288
movk x9, #13902, lsl #16
movk x9, #44107, lsl #32
movk x9, #16338, lsl #48
fmov d3, x9
fadd d1, d1, d3
ldr d0, [sp, #10136] ; 8-byte Folded Reload
fmul d0, d0, d5
str d0, [sp, #7840] ; 8-byte Folded Spill
fmul d3, d5, d0
ldr d0, [sp, #10128] ; 8-byte Folded Reload
fmul d21, d0, d13
fadd d3, d3, d21
ldr d0, [sp, #7640] ; 8-byte Folded Reload
fadd d3, d3, d0
ldr d0, [sp, #7160] ; 8-byte Folded Reload
fmul d21, d3, d0
fadd d1, d21, d1
ldr d17, [sp, #7024] ; 8-byte Folded Reload
fmul d21, d1, d17
fadd d4, d4, d21
ldr d0, [sp, #7120] ; 8-byte Folded Reload
fmul d18, d18, d0
str d15, [sp, #7184] ; 8-byte Folded Spill
fmul d21, d26, d15
ldr q26, [sp, #8816] ; 16-byte Folded Reload
fsub d18, d18, d21
str d6, [sp, #7160] ; 8-byte Folded Spill
fmul d21, d2, d6
fadd d18, d21, d18
ldr d0, [sp, #11432] ; 8-byte Folded Reload
fmul d7, d0, d7
fadd d7, d7, d18
ldr d0, [sp, #7560] ; 8-byte Folded Reload
fsub d7, d7, d0
ldr d5, [sp, #9704] ; 8-byte Folded Reload
fmul d18, d14, d5
fadd d7, d7, d18
fmul d6, d8, d19
ldr d0, [sp, #11488] ; 8-byte Folded Reload
fmul d16, d0, d16
fadd d6, d6, d16
ldr d0, [sp, #11680] ; 8-byte Folded Reload
fmul d16, d0, d5
fadd d6, d16, d6
ldr d0, [sp, #7168] ; 8-byte Folded Reload
fmul d6, d6, d0
fadd d6, d7, d6
mov x9, #21969
movk x9, #1325, lsl #16
movk x9, #7976, lsl #32
movk x9, #16367, lsl #48
fmov d7, x9
fadd d6, d6, d7
ldr d0, [sp, #7192] ; 8-byte Folded Reload
fmul d3, d3, d0
fadd d3, d3, d6
ldr d0, [sp, #7088] ; 8-byte Folded Reload
fmul d2, d23, d0
ldr d0, [sp, #7056] ; 8-byte Folded Reload
fmul d6, d20, d0
fsub d2, d6, d2
fadd d1, d2, d1
fadd d0, d4, d3
fmul d2, d3, d17
fadd d5, d1, d2
fmul d1, d30, d0
fmul d2, d31, d5
fsub d6, d1, d2
fmul d1, d22, d10
fsub d1, d9, d1
str d1, [sp, #7848] ; 8-byte Folded Spill
fmul d1, d10, d1
ldr d2, [sp, #11640] ; 8-byte Folded Reload
fmul d2, d25, d2
fadd d1, d2, d1
ldr d2, [sp, #7040] ; 8-byte Folded Reload
fmul d2, d1, d2
fmul d3, d12, d6
fsub d2, d3, d2
fmul d3, d22, d24
fsub d3, d9, d3
str d3, [sp, #7832] ; 8-byte Folded Spill
fmul d3, d24, d3
ldr d4, [sp, #11520] ; 8-byte Folded Reload
fmul d4, d25, d4
fadd d3, d4, d3
ldr d4, [sp, #7104] ; 8-byte Folded Reload
fmul d3, d3, d4
str q5, [sp, #7136] ; 16-byte Folded Spill
fmul d4, d30, d5
fsub d3, d4, d3
str q0, [sp, #7168] ; 16-byte Folded Spill
fmul d4, d31, d0
fadd d0, d4, d3
fmul d3, d28, d0
fsub d19, d2, d3
ldr d2, [sp, #7048] ; 8-byte Folded Reload
fmul d1, d1, d2
str q6, [sp, #7120] ; 16-byte Folded Spill
fmul d2, d28, d6
ldur d6, [x29, #-256] ; 8-byte Folded Reload
fsub d1, d2, d1
str q0, [sp, #7104] ; 16-byte Folded Spill
fmul d2, d12, d0
fadd d20, d1, d2
fmul d1, d6, d25
str d29, [sp, #7096] ; 8-byte Folded Spill
fadd d21, d1, d29
ldr d17, [sp, #7320] ; 8-byte Folded Reload
fmul d1, d6, d22
str d9, [sp, #7088] ; 8-byte Folded Spill
fsub d23, d9, d1
fmov d10, #0.50000000
cbz x8, LBB19_52
; %bb.51:
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d1, x9
fmul d2, d17, d1
ldr q5, [sp, #10720] ; 16-byte Folded Reload
fmul d3, d5, d26
fsub d2, d3, d2
ldr q0, [sp, #11216] ; 16-byte Folded Reload
fmul d3, d0, d27
fsub d2, d2, d3
ldr d7, [sp, #11248] ; 8-byte Folded Reload
fmul d3, d7, d22
fmul d4, d6, d21
fadd d3, d3, d4
fmul d4, d3, d1
fadd d2, d2, d4
fadd d2, d2, d2
fmul d4, d0, d19
fmul d6, d6, d23
fmul d7, d7, d25
fadd d6, d7, d6
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d7, x9
fmul d16, d6, d7
fsub d4, d4, d16
fmul d16, d5, d20
fadd d4, d16, d4
fadd d4, d4, d4
fsub d2, d2, d4
ldr d18, [sp, #11400] ; 8-byte Folded Reload
fmul d4, d18, d2
fmul d16, d17, d7
fmul d17, d0, d26
fsub d16, d17, d16
fmul d17, d5, d27
fadd d16, d17, d16
fmul d3, d3, d7
fadd d3, d16, d3
fmul d7, d5, d19
fmul d1, d6, d1
fsub d1, d7, d1
fmul d6, d0, d20
fsub d1, d1, d6
fadd d1, d1, d3
ldr d0, [sp, #11160] ; 8-byte Folded Reload
fmul d3, d0, d1
fsub d3, d4, d3
fmul d3, d18, d3
fmul d3, d3, d10
ldr d0, [sp, #11392] ; 8-byte Folded Reload
fmul d2, d0, d2
ldr d4, [sp, #11240] ; 8-byte Folded Reload
fmul d1, d4, d1
fadd d1, d1, d2
fmul d1, d0, d1
fmul d1, d1, d10
fsub d1, d3, d1
str d1, [x8, #56]
LBB19_52:
str d23, [sp, #7040] ; 8-byte Folded Spill
str d21, [sp, #7048] ; 8-byte Folded Spill
str q20, [sp, #7056] ; 16-byte Folded Spill
str q19, [sp, #7072] ; 16-byte Folded Spill
str d25, [sp, #9696] ; 8-byte Folded Spill
str d22, [sp, #9704] ; 8-byte Folded Spill
ldr d18, [sp, #12272] ; 8-byte Folded Reload
ldr d1, [sp, #12120] ; 8-byte Folded Reload
fadd d5, d18, d1
ldr d1, [sp, #12128] ; 8-byte Folded Reload
ldr d0, [sp, #12344] ; 8-byte Folded Reload
fadd d2, d0, d1
ldr d0, [sp, #11752] ; 8-byte Folded Reload
fadd d1, d2, d0
fmov d21, d2
str d2, [sp, #7720] ; 8-byte Folded Spill
ldr d0, [sp, #9928] ; 8-byte Folded Reload
fdiv d2, d1, d0
ldr d0, [sp, #9904] ; 8-byte Folded Reload
fmul d3, d0, d2
ldr d0, [sp, #9896] ; 8-byte Folded Reload
fmul d4, d3, d0
fadd d4, d5, d4
fmov d26, d5
str d5, [sp, #7688] ; 8-byte Folded Spill
ldr d0, [sp, #12184] ; 8-byte Folded Reload
fadd d4, d0, d4
mov x9, #10523
movk x9, #38535, lsl #16
movk x9, #12921, lsl #32
movk x9, #49410, lsl #48
ldr d0, [sp, #10896] ; 8-byte Folded Reload
fmul d6, d0, d4
ldr d0, [sp, #10544] ; 8-byte Folded Reload
fmul d16, d0, d6
ldr d0, [sp, #10960] ; 8-byte Folded Reload
fmul d7, d0, d16
fmov d17, x9
fmul d7, d7, d17
ldr d0, [sp, #10976] ; 8-byte Folded Reload
fmul d6, d0, d6
fmul d6, d6, d10
ldr d0, [sp, #10320] ; 8-byte Folded Reload
fmul d6, d0, d6
mov x9, #211106232532992
movk x9, #16498, lsl #48
fmov d22, x9
fmul d6, d6, d22
ldr d0, [sp, #10344] ; 8-byte Folded Reload
fmul d7, d0, d7
ldr d0, [sp, #10312] ; 8-byte Folded Reload
fdiv d7, d7, d0
ldr d0, [sp, #10296] ; 8-byte Folded Reload
fmul d7, d0, d7
fadd d20, d6, d7
ldr d0, [sp, #10624] ; 8-byte Folded Reload
fmul d1, d1, d0
ldr d0, [sp, #10912] ; 8-byte Folded Reload
fmul d3, d3, d0
fmul d6, d3, d10
fsub d1, d1, d6
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16361, lsl #48
fmov d17, x9
fmul d3, d3, d17
ldr d0, [sp, #9912] ; 8-byte Folded Reload
fmul d3, d3, d0
fmov d0, #5.00000000
fmul d3, d3, d0
fmov d9, #5.00000000
fsub d1, d1, d3
ldr d0, [sp, #10608] ; 8-byte Folded Reload
fdiv d1, d1, d0
ldr d0, [sp, #9920] ; 8-byte Folded Reload
fmul d2, d2, d0
ldr d0, [sp, #10600] ; 8-byte Folded Reload
fmul d3, d0, d1
fsub d13, d3, d2
ldr d0, [sp, #10616] ; 8-byte Folded Reload
fmul d0, d0, d1
ldr d12, [sp, #12320] ; 8-byte Folded Reload
fmul d1, d12, d0
fmov d5, d0
str d0, [sp, #11168] ; 8-byte Folded Spill
ldur d0, [x29, #-184] ; 8-byte Folded Reload
fmul d3, d0, d13
fmov d25, d0
fsub d14, d1, d3
ldr d0, [sp, #10480] ; 8-byte Folded Reload
fmul d1, d0, d4
fmul d1, d1, d10
ldr d0, [sp, #10904] ; 8-byte Folded Reload
fmul d1, d0, d1
mov x9, #4632233691727265792
fmov d7, x9
fmul d1, d1, d7
ldr d0, [sp, #10568] ; 8-byte Folded Reload
fmul d3, d0, d16
fmov d0, #3.00000000
fmul d3, d3, d0
fadd d4, d1, d3
str d4, [sp, #8832] ; 8-byte Folded Spill
ldr d6, [sp, #11888] ; 8-byte Folded Reload
fmul d1, d6, d13
fmul d3, d12, d4
fsub d2, d1, d3
fmul d1, d25, d4
fmul d3, d6, d5
fsub d5, d1, d3
str d5, [sp, #7776] ; 8-byte Folded Spill
ldur d8, [x29, #-176] ; 8-byte Folded Reload
fmul d3, d8, d14
ldr d0, [sp, #9288] ; 8-byte Folded Reload
fsub d3, d3, d0
fadd d3, d2, d3
ldur d0, [x29, #-168] ; 8-byte Folded Reload
fmul d4, d0, d5
fmov d28, d0
fadd d3, d4, d3
ldr d1, [sp, #11864] ; 8-byte Folded Reload
fmul d4, d1, d14
ldr d0, [sp, #7920] ; 8-byte Folded Reload
fsub d4, d4, d0
ldr d27, [sp, #12200] ; 8-byte Folded Reload
fmul d16, d27, d2
fadd d4, d16, d4
ldur d0, [x29, #-216] ; 8-byte Folded Reload
fmul d16, d0, d5
fmov d31, d0
fadd d4, d16, d4
ldr d0, [sp, #7912] ; 8-byte Folded Reload
fsub d4, d4, d0
str d4, [sp, #9688] ; 8-byte Folded Spill
ldr d0, [sp, #9280] ; 8-byte Folded Reload
fsub d19, d3, d0
fmul d3, d8, d19
str d19, [sp, #8784] ; 8-byte Folded Spill
fmul d4, d1, d4
fadd d3, d3, d4
ldr d30, [sp, #12016] ; 8-byte Folded Reload
fmul d4, d30, d14
ldr d0, [sp, #7904] ; 8-byte Folded Reload
fsub d4, d4, d0
ldr d29, [sp, #12160] ; 8-byte Folded Reload
str d2, [sp, #7560] ; 8-byte Folded Spill
fmul d16, d29, d2
fadd d4, d16, d4
ldr d0, [sp, #12336] ; 8-byte Folded Reload
fmul d16, d0, d5
fmov d5, d0
fadd d4, d16, d4
ldr d0, [sp, #7888] ; 8-byte Folded Reload
fsub d0, d4, d0
str d0, [sp, #9680] ; 8-byte Folded Spill
fmul d4, d30, d0
fadd d24, d4, d3
fadd d3, d20, d24
fmul d4, d24, d10
fsub d11, d3, d4
str d11, [sp, #7912] ; 8-byte Folded Spill
ldr d0, [sp, #11672] ; 8-byte Folded Reload
fadd d3, d21, d0
ldr d0, [sp, #10584] ; 8-byte Folded Reload
fmul d4, d3, d0
ldr d0, [sp, #10496] ; 8-byte Folded Reload
fdiv d3, d3, d0
ldr d0, [sp, #10424] ; 8-byte Folded Reload
fmul d23, d0, d3
ldr d0, [sp, #11104] ; 8-byte Folded Reload
fmul d16, d23, d0
fmul d20, d16, d10
fsub d4, d4, d20
fmul d16, d16, d17
ldr d0, [sp, #9888] ; 8-byte Folded Reload
fmul d16, d16, d0
fmul d16, d16, d9
fsub d4, d4, d16
ldr d0, [sp, #10576] ; 8-byte Folded Reload
fdiv d4, d4, d0
ldr d0, [sp, #10936] ; 8-byte Folded Reload
fmul d3, d3, d0
ldr d0, [sp, #10392] ; 8-byte Folded Reload
fmul d16, d0, d4
fsub d15, d16, d3
ldr d0, [sp, #10592] ; 8-byte Folded Reload
fmul d21, d0, d4
fmul d3, d12, d21
fmov d16, d25
fmul d4, d25, d15
fsub d20, d3, d4
ldr d0, [sp, #9880] ; 8-byte Folded Reload
fmul d3, d23, d0
fadd d3, d26, d3
ldr d0, [sp, #12176] ; 8-byte Folded Reload
fadd d3, d0, d3
ldr d0, [sp, #10432] ; 8-byte Folded Reload
fmul d4, d0, d3
fmul d4, d4, d10
ldr d0, [sp, #10888] ; 8-byte Folded Reload
fmul d4, d0, d4
fmul d4, d4, d7
ldr d0, [sp, #10880] ; 8-byte Folded Reload
fmul d25, d0, d3
ldr d0, [sp, #10440] ; 8-byte Folded Reload
fmul d26, d0, d25
ldr d0, [sp, #10448] ; 8-byte Folded Reload
fmul d3, d0, d26
fmov d0, #3.00000000
fmul d3, d3, d0
fadd d0, d4, d3
str d0, [sp, #8792] ; 8-byte Folded Spill
fmul d3, d6, d15
fmul d4, d12, d0
fsub d2, d3, d4
fmul d3, d16, d0
fmul d4, d6, d21
str d21, [sp, #7744] ; 8-byte Folded Spill
fmov d16, d6
fsub d6, d3, d4
str d6, [sp, #7904] ; 8-byte Folded Spill
fmul d4, d8, d20
ldr d0, [sp, #8920] ; 8-byte Folded Reload
fsub d4, d4, d0
fadd d4, d2, d4
fmov d3, d28
fmul d23, d28, d6
fadd d4, d23, d4
fmul d23, d1, d20
ldr d0, [sp, #7712] ; 8-byte Folded Reload
fsub d23, d23, d0
fmul d27, d27, d2
fadd d23, d27, d23
fmul d27, d31, d6
fadd d23, d27, d23
ldr d0, [sp, #7704] ; 8-byte Folded Reload
fsub d27, d23, d0
str d27, [sp, #7920] ; 8-byte Folded Spill
ldr d0, [sp, #8896] ; 8-byte Folded Reload
fsub d23, d4, d0
fmul d4, d8, d23
fmul d28, d1, d27
fadd d4, d4, d28
fmul d28, d30, d20
ldr d0, [sp, #7696] ; 8-byte Folded Reload
fsub d28, d28, d0
str d2, [sp, #7192] ; 8-byte Folded Spill
fmul d29, d29, d2
fadd d28, d29, d28
fmul d29, d5, d6
fadd d28, d29, d28
ldr d0, [sp, #7680] ; 8-byte Folded Reload
fsub d28, d28, d0
fmul d29, d30, d28
fadd d4, d29, d4
ldr d0, [sp, #10944] ; 8-byte Folded Reload
fmul d26, d0, d26
mov x9, #18811
movk x9, #34700, lsl #16
movk x9, #61210, lsl #32
movk x9, #49411, lsl #48
fmov d29, x9
fmul d26, d26, d29
ldr d0, [sp, #10952] ; 8-byte Folded Reload
fmul d25, d0, d25
fmul d25, d25, d10
ldr d0, [sp, #10336] ; 8-byte Folded Reload
fmul d25, d0, d25
fmul d25, d25, d22
ldr d0, [sp, #10352] ; 8-byte Folded Reload
fmul d26, d0, d26
ldr d0, [sp, #10328] ; 8-byte Folded Reload
fdiv d26, d26, d0
ldr d0, [sp, #10304] ; 8-byte Folded Reload
fmul d26, d0, d26
fadd d25, d25, d26
fsub d24, d11, d24
fsub d24, d24, d4
fadd d25, d25, d4
fmul d4, d4, d10
fsub d4, d25, d4
fadd d26, d4, d24
ldr d0, [sp, #11208] ; 8-byte Folded Reload
ldr d1, [sp, #12344] ; 8-byte Folded Reload
fadd d4, d1, d0
ldr d0, [sp, #9064] ; 8-byte Folded Reload
fdiv d24, d4, d0
ldr d0, [sp, #9080] ; 8-byte Folded Reload
fmul d25, d0, d24
ldr d0, [sp, #9056] ; 8-byte Folded Reload
fmul d29, d25, d0
fadd d29, d18, d29
ldr d0, [sp, #11200] ; 8-byte Folded Reload
fadd d29, d0, d29
ldr d0, [sp, #9816] ; 8-byte Folded Reload
fmul d30, d29, d0
fmul d30, d30, d10
ldr d0, [sp, #10368] ; 8-byte Folded Reload
fmul d30, d0, d30
fmul d7, d30, d7
ldr d0, [sp, #10360] ; 8-byte Folded Reload
fmul d30, d29, d0
ldr d0, [sp, #9824] ; 8-byte Folded Reload
fmul d31, d0, d30
ldr d0, [sp, #9832] ; 8-byte Folded Reload
fmul d29, d0, d31
fmov d0, #3.00000000
fmul d5, d29, d0
fadd d1, d5, d7
str d1, [sp, #9824] ; 8-byte Folded Spill
ldr d0, [sp, #9024] ; 8-byte Folded Reload
fmul d5, d25, d0
ldr d0, [sp, #9952] ; 8-byte Folded Reload
fmul d4, d4, d0
fmul d7, d5, d10
fsub d4, d4, d7
fmul d5, d5, d17
ldr d0, [sp, #9008] ; 8-byte Folded Reload
fmul d5, d5, d0
fmov d0, #5.00000000
fmul d5, d5, d0
fsub d4, d4, d5
ldr d0, [sp, #9048] ; 8-byte Folded Reload
fmul d5, d24, d0
ldr d0, [sp, #9944] ; 8-byte Folded Reload
fdiv d4, d4, d0
ldr d0, [sp, #9936] ; 8-byte Folded Reload
fmul d4, d0, d4
fsub d0, d4, d5
str d0, [sp, #9816] ; 8-byte Folded Spill
ldr d5, [sp, #11368] ; 8-byte Folded Reload
fmul d29, d5, d0
fmul d4, d8, d29
ldr d0, [sp, #7968] ; 8-byte Folded Reload
fadd d4, d0, d4
fmul d2, d5, d1
str d2, [sp, #7888] ; 8-byte Folded Spill
fmul d6, d3, d2
fsub d4, d6, d4
ldr d0, [sp, #7960] ; 8-byte Folded Reload
fsub d1, d4, d0
ldr d4, [sp, #12136] ; 8-byte Folded Reload
fmul d6, d4, d29
ldr d0, [sp, #7952] ; 8-byte Folded Reload
fadd d6, d0, d6
ldr d0, [sp, #11496] ; 8-byte Folded Reload
fmul d7, d0, d2
fsub d6, d7, d6
ldr d0, [sp, #7944] ; 8-byte Folded Reload
fsub d0, d6, d0
str d0, [sp, #9064] ; 8-byte Folded Spill
fmul d7, d8, d1
fmov d17, d1
str d1, [sp, #7728] ; 8-byte Folded Spill
fmul d24, d4, d0
fadd d24, d7, d24
ldr d4, [sp, #11896] ; 8-byte Folded Reload
fmul d7, d4, d29
ldr d0, [sp, #7936] ; 8-byte Folded Reload
fadd d7, d0, d7
ldr d0, [sp, #11760] ; 8-byte Folded Reload
fmul d25, d0, d2
fsub d7, d25, d7
ldr d0, [sp, #7928] ; 8-byte Folded Reload
fsub d0, d7, d0
str d0, [sp, #9080] ; 8-byte Folded Spill
fmul d25, d4, d0
fadd d24, d25, d24
ldr d0, [sp, #10376] ; 8-byte Folded Reload
fmul d25, d0, d30
fmul d25, d25, d10
ldr d0, [sp, #9712] ; 8-byte Folded Reload
fmul d25, d0, d25
fmul d22, d25, d22
ldr d0, [sp, #10384] ; 8-byte Folded Reload
fmul d25, d0, d31
mov x9, #45572
movk x9, #23979, lsl #16
movk x9, #34811, lsl #32
movk x9, #49413, lsl #48
fmov d30, x9
fmul d25, d25, d30
ldr d0, [sp, #9736] ; 8-byte Folded Reload
fmul d25, d0, d25
ldr d0, [sp, #9728] ; 8-byte Folded Reload
fdiv d25, d25, d0
ldr d0, [sp, #9720] ; 8-byte Folded Reload
fmul d25, d0, d25
fadd d22, d22, d25
fadd d22, d22, d24
fmul d0, d24, d10
fsub d30, d22, d0
ldr d0, [sp, #11904] ; 8-byte Folded Reload
fmul d0, d0, d14
ldr d1, [sp, #7824] ; 8-byte Folded Reload
fsub d0, d0, d1
ldr d6, [sp, #11384] ; 8-byte Folded Reload
fmul d22, d6, d19
fadd d0, d0, d22
ldr d1, [sp, #11664] ; 8-byte Folded Reload
fmul d22, d1, d20
fadd d0, d22, d0
ldr d1, [sp, #7816] ; 8-byte Folded Reload
fsub d0, d0, d1
ldr d3, [sp, #11280] ; 8-byte Folded Reload
fmul d22, d3, d23
fadd d22, d22, d0
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d0, x9
fmul d23, d26, d0
fmov d7, d0
str d0, [sp, #10376] ; 8-byte Folded Spill
fadd d22, d22, d23
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d23, x9
ldr d0, [sp, #7720] ; 8-byte Folded Reload
fmul d2, d0, d23
ldr d0, [sp, #7688] ; 8-byte Folded Reload
fmul d9, d0, d23
fmul d18, d2, d16
fmul d19, d9, d12
fadd d0, d18, d19
ldr d4, [sp, #11936] ; 8-byte Folded Reload
fmul d19, d4, d0
fmov d16, d0
str d0, [sp, #7720] ; 8-byte Folded Spill
ldr d0, [sp, #11176] ; 8-byte Folded Reload
fmul d23, d2, d0
fsub d19, d19, d23
ldur d1, [x29, #-184] ; 8-byte Folded Reload
fmul d4, d9, d1
ldr d8, [sp, #11568] ; 8-byte Folded Reload
fmul d23, d8, d4
fadd d19, d23, d19
ldr d0, [sp, #11168] ; 8-byte Folded Reload
ldr d18, [sp, #12184] ; 8-byte Folded Reload
fmul d23, d18, d0
fsub d19, d19, d23
ldr d0, [sp, #11736] ; 8-byte Folded Reload
fmul d23, d0, d13
fadd d19, d19, d23
ldr d0, [sp, #12176] ; 8-byte Folded Reload
fmul d23, d0, d21
fsub d19, d19, d23
ldr d0, [sp, #11648] ; 8-byte Folded Reload
fmul d23, d0, d15
fadd d19, d23, d19
ldr d0, [sp, #8992] ; 8-byte Folded Reload
fadd d19, d0, d19
ldr d0, [sp, #7792] ; 8-byte Folded Reload
fadd d19, d0, d19
ldr d0, [sp, #7768] ; 8-byte Folded Reload
fadd d0, d0, d19
ldr d18, [sp, #12280] ; 8-byte Folded Reload
str d0, [sp, #7640] ; 8-byte Folded Spill
fmul d19, d18, d0
fmov d21, d18
fadd d19, d19, d22
ldr d0, [sp, #7632] ; 8-byte Folded Reload
fadd d19, d0, d19
ldr d0, [sp, #7624] ; 8-byte Folded Reload
fadd d19, d0, d19
ldr d0, [sp, #11328] ; 8-byte Folded Reload
fmul d22, d2, d0
fmul d23, d12, d16
fsub d22, d22, d23
str d4, [sp, #9056] ; 8-byte Folded Spill
fmul d23, d1, d4
fsub d22, d22, d23
ldr d0, [sp, #9072] ; 8-byte Folded Reload
fadd d11, d22, d0
mov x9, #54125
movk x9, #53060, lsl #16
movk x9, #15481, lsl #32
movk x9, #16273, lsl #48
fmov d0, x9
str d0, [sp, #9712] ; 8-byte Folded Spill
fmul d23, d11, d0
fsub d19, d19, d23
ldr d4, [sp, #10088] ; 8-byte Folded Reload
fsub d18, d4, d2
ldr d27, [sp, #11536] ; 8-byte Folded Reload
fmul d23, d18, d27
ldr d0, [sp, #7592] ; 8-byte Folded Reload
fadd d23, d23, d0
str d13, [sp, #7968] ; 8-byte Folded Spill
str d15, [sp, #7960] ; 8-byte Folded Spill
fadd d31, d13, d15
ldr d4, [sp, #10080] ; 8-byte Folded Reload
fadd d22, d9, d4
fmul d25, d22, d5
fadd d0, d25, d31
str d0, [sp, #7952] ; 8-byte Folded Spill
fmul d25, d5, d0
fadd d10, d25, d23
fmul d23, d10, d7
fsub d19, d19, d23
fmul d23, d2, d8
ldr d0, [sp, #9256] ; 8-byte Folded Reload
fsub d1, d0, d23
str d1, [sp, #9832] ; 8-byte Folded Spill
ldr d0, [sp, #11440] ; 8-byte Folded Reload
fmul d23, d1, d0
fadd d19, d23, d19
fmul d23, d9, d8
ldr d0, [sp, #9248] ; 8-byte Folded Reload
fsub d25, d0, d23
fmul d0, d25, d21
str d0, [sp, #7944] ; 8-byte Folded Spill
fmul d23, d0, d5
fsub d19, d19, d23
ldr d0, [sp, #11352] ; 8-byte Folded Reload
fmul d23, d0, d29
fsub d19, d19, d23
ldr d0, [sp, #7616] ; 8-byte Folded Reload
fsub d19, d19, d0
ldr d7, [sp, #10968] ; 8-byte Folded Reload
fmul d23, d7, d17
fadd d0, d23, d19
fsub d19, d26, d24
fadd d16, d19, d30
mov x9, #4359484439294640128
mov x10, #62612
movk x10, #18904, lsl #16
movk x10, #1144, lsl #32
movk x10, #49064, lsl #48
fmov d1, x9
str d1, [sp, #7792] ; 8-byte Folded Spill
fmul d19, d0, d1
fmov d24, x10
fmul d23, d16, d24
fsub d12, d23, d19
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
mov x10, #18456
movk x10, #63321, lsl #16
movk x10, #33926, lsl #32
movk x10, #16223, lsl #48
fmov d19, x10
fmul d19, d10, d19
fmov d1, x9
str d1, [sp, #7816] ; 8-byte Folded Spill
fmul d8, d26, d1
fadd d8, d8, d19
ldr d1, [sp, #12192] ; 8-byte Folded Reload
fmul d19, d1, d14
ldr d4, [sp, #7608] ; 8-byte Folded Reload
fsub d19, d19, d4
ldr d1, [sp, #9680] ; 8-byte Folded Reload
fmul d13, d6, d1
fadd d13, d19, d13
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d1, x9
str d1, [sp, #9720] ; 8-byte Folded Spill
ldr d4, [sp, #7912] ; 8-byte Folded Reload
fmul d21, d4, d1
fadd d21, d13, d21
ldr d1, [sp, #12104] ; 8-byte Folded Reload
fmul d13, d1, d20
fadd d21, d13, d21
ldr d4, [sp, #7600] ; 8-byte Folded Reload
fsub d21, d21, d4
fmul d28, d3, d28
fadd d21, d28, d21
ldr d4, [sp, #7568] ; 8-byte Folded Reload
fadd d21, d4, d21
ldr d4, [sp, #7536] ; 8-byte Folded Reload
fadd d21, d4, d21
ldr d4, [sp, #7520] ; 8-byte Folded Reload
fadd d28, d4, d21
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d1, x9
str d1, [sp, #7824] ; 8-byte Folded Spill
fmul d13, d11, d1
fadd d28, d28, d13
ldr d4, [sp, #7504] ; 8-byte Folded Reload
fadd d28, d4, d28
ldr d4, [sp, #7488] ; 8-byte Folded Reload
fadd d21, d4, d28
ldur d1, [x29, #-200] ; 8-byte Folded Reload
fmul d28, d1, d21
fmov d19, d1
fadd d28, d28, d8
ldr d1, [sp, #11976] ; 8-byte Folded Reload
fmul d17, d1, d14
ldr d4, [sp, #7472] ; 8-byte Folded Reload
fsub d17, d17, d4
ldr d1, [sp, #9688] ; 8-byte Folded Reload
fmul d8, d6, d1
fadd d17, d17, d8
ldr d1, [sp, #12064] ; 8-byte Folded Reload
fmul d20, d1, d20
fadd d17, d20, d17
ldr d4, [sp, #7456] ; 8-byte Folded Reload
fsub d17, d17, d4
ldr d1, [sp, #7920] ; 8-byte Folded Reload
fmul d20, d3, d1
fadd d17, d20, d17
ldr d3, [sp, #7440] ; 8-byte Folded Reload
fadd d17, d3, d17
ldr d3, [sp, #7432] ; 8-byte Folded Reload
fadd d17, d3, d17
ldr d3, [sp, #7408] ; 8-byte Folded Reload
fadd d17, d3, d17
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d1, x9
str d1, [sp, #7664] ; 8-byte Folded Spill
fmul d20, d11, d1
fadd d17, d17, d20
ldr d3, [sp, #7392] ; 8-byte Folded Reload
fadd d17, d3, d17
ldr d3, [sp, #7384] ; 8-byte Folded Reload
fsub d4, d17, d3
ldur d1, [x29, #-208] ; 8-byte Folded Reload
fmul d17, d1, d4
fmov d6, d1
fadd d17, d17, d28
ldr d1, [sp, #11600] ; 8-byte Folded Reload
fmul d20, d1, d29
fsub d17, d17, d20
ldr d3, [sp, #7344] ; 8-byte Folded Reload
fsub d17, d17, d3
ldr d1, [sp, #9064] ; 8-byte Folded Reload
fmul d20, d7, d1
fadd d17, d20, d17
mov x9, #50080
movk x9, #49599, lsl #16
movk x9, #32579, lsl #32
movk x9, #16368, lsl #48
fmov d20, x9
ldr d1, [sp, #12344] ; 8-byte Folded Reload
fmul d1, d1, d20
ldr d3, [sp, #12272] ; 8-byte Folded Reload
fmul d3, d3, d20
fmov d23, d27
fmul d20, d1, d27
fmov d8, d1
str d1, [sp, #10368] ; 8-byte Folded Spill
fmul d1, d3, d5
fmov d28, d3
str d3, [sp, #10360] ; 8-byte Folded Spill
str d1, [sp, #7936] ; 8-byte Folded Spill
fmul d27, d5, d1
fsub d20, d20, d27
ldr d3, [sp, #7360] ; 8-byte Folded Reload
fadd d11, d20, d3
mov x9, #45974
movk x9, #34787, lsl #16
movk x9, #35902, lsl #32
movk x9, #16285, lsl #48
fmov d1, x9
str d1, [sp, #7768] ; 8-byte Folded Spill
fmul d27, d11, d1
fadd d13, d27, d17
mov x9, #4363988038922010624
fmov d1, x9
fmul d27, d13, d1
fmov d17, d1
str d1, [sp, #7648] ; 8-byte Folded Spill
fadd d12, d12, d27
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d1, x9
str d1, [sp, #7696] ; 8-byte Folded Spill
fmul d26, d26, d1
fmul d27, d10, d1
fsub d26, d26, d27
str d21, [sp, #7624] ; 8-byte Folded Spill
fmul d27, d6, d21
fadd d26, d27, d26
str d4, [sp, #7616] ; 8-byte Folded Spill
fmul d27, d19, d4
fsub d26, d26, d27
ldr d1, [sp, #11464] ; 8-byte Folded Reload
fmul d27, d1, d29
fsub d26, d26, d27
ldr d3, [sp, #7336] ; 8-byte Folded Reload
fsub d26, d26, d3
ldr d1, [sp, #9080] ; 8-byte Folded Reload
fmul d27, d7, d1
fadd d26, d27, d26
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d1, x9
str d1, [sp, #7704] ; 8-byte Folded Spill
fmul d29, d30, d1
fadd d29, d29, d26
mov x9, #49235
movk x9, #28989, lsl #16
movk x9, #40841, lsl #32
movk x9, #16312, lsl #48
fmov d1, x9
str d1, [sp, #7712] ; 8-byte Folded Spill
fmul d30, d11, d1
fadd d29, d30, d29
fsub d30, d18, d8
ldr d10, [sp, #9488] ; 8-byte Folded Reload
fadd d6, d10, d30
fmul d30, d6, d23
fmov d27, d23
ldr d3, [sp, #7328] ; 8-byte Folded Reload
fadd d30, d30, d3
ldr d1, [sp, #9816] ; 8-byte Folded Reload
fadd d15, d31, d1
fadd d31, d28, d22
ldr d10, [sp, #9496] ; 8-byte Folded Reload
fadd d21, d10, d31
fmul d31, d21, d5
fadd d1, d31, d15
str d1, [sp, #7928] ; 8-byte Folded Spill
fmul d31, d5, d1
fadd d30, d31, d30
fadd d31, d29, d12
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #16296, lsl #48
fmov d10, x9
fmul d10, d30, d10
fadd d3, d31, d10
mov x9, #4354980839667269632
fmov d1, x9
str d1, [sp, #7688] ; 8-byte Folded Spill
fmul d10, d0, d1
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #16292, lsl #48
fmov d0, x9
str d0, [sp, #7680] ; 8-byte Folded Spill
fmul d11, d16, d0
fsub d10, d10, d11
fadd d10, d13, d10
fmul d29, d29, d17
fadd d29, d10, d29
fmul d30, d30, d0
fadd d0, d29, d30
ldr q31, [sp, #12000] ; 16-byte Folded Reload
fmul d29, d31, d3
ldr q8, [sp, #11840] ; 16-byte Folded Reload
fmul d30, d8, d0
fsub d1, d29, d30
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d4, x9
fmul d29, d16, d4
fmov d7, d4
str d4, [sp, #7656] ; 8-byte Folded Spill
ldr q4, [sp, #12240] ; 16-byte Folded Reload
fmul d10, d4, d1
mov.16b v17, v4
fsub d10, d10, d29
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d4, x9
str d4, [sp, #7672] ; 8-byte Folded Spill
fmul d11, d16, d4
str d16, [sp, #9048] ; 8-byte Folded Spill
str q0, [sp, #7536] ; 16-byte Folded Spill
fmul d12, d31, d0
fsub d11, d12, d11
str q3, [sp, #7568] ; 16-byte Folded Spill
fmul d12, d8, d3
fadd d11, d12, d11
ldr d29, [sp, #12056] ; 8-byte Folded Reload
fmul d12, d21, d29
fadd d0, d12, d15
ldr d3, [sp, #11584] ; 8-byte Folded Reload
fmul d12, d6, d3
str d0, [sp, #7920] ; 8-byte Folded Spill
fmul d13, d29, d0
fadd d12, d12, d13
fmul d12, d12, d4
fadd d0, d11, d12
ldr q30, [sp, #11824] ; 16-byte Folded Reload
fmul d11, d30, d0
fsub d10, d10, d11
ldr d28, [sp, #12208] ; 8-byte Folded Reload
fmul d11, d21, d28
fadd d3, d11, d15
ldr d4, [sp, #11784] ; 8-byte Folded Reload
fmul d11, d6, d4
str d3, [sp, #7912] ; 8-byte Folded Spill
fmul d12, d28, d3
fadd d11, d11, d12
fmul d12, d11, d7
fadd d3, d10, d12
str q3, [sp, #9024] ; 16-byte Folded Spill
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
fmov d10, x9
fmul d12, d16, d10
str q1, [sp, #7520] ; 16-byte Folded Spill
fmul d13, d30, d1
fsub d12, d13, d12
str q0, [sp, #7504] ; 16-byte Folded Spill
fmul d13, d17, d0
fadd d12, d12, d13
fmul d11, d11, d10
fadd d0, d12, d11
str q0, [sp, #9008] ; 16-byte Folded Spill
ldr d19, [sp, #7776] ; 8-byte Folded Reload
ldr d0, [sp, #11904] ; 8-byte Folded Reload
fmul d11, d0, d19
ldr d0, [sp, #8184] ; 8-byte Folded Reload
fsub d11, d11, d0
ldr d4, [sp, #11376] ; 8-byte Folded Reload
ldr d20, [sp, #8784] ; 8-byte Folded Reload
fmul d12, d4, d20
fadd d11, d11, d12
ldr d14, [sp, #7904] ; 8-byte Folded Reload
ldr d0, [sp, #11664] ; 8-byte Folded Reload
fmul d12, d0, d14
fadd d11, d12, d11
ldr d0, [sp, #8152] ; 8-byte Folded Reload
fsub d11, d11, d0
mov x9, #51491
movk x9, #54360, lsl #16
movk x9, #13074, lsl #32
movk x9, #49054, lsl #48
fmov d12, x9
fadd d11, d11, d12
ldr d0, [sp, #11176] ; 8-byte Folded Reload
fmul d12, d9, d0
str d2, [sp, #10384] ; 8-byte Folded Spill
ldur d3, [x29, #-184] ; 8-byte Folded Reload
fmul d1, d2, d3
ldr d0, [sp, #11568] ; 8-byte Folded Reload
fmul d13, d0, d1
fadd d12, d13, d12
ldr d13, [sp, #11944] ; 8-byte Folded Reload
ldr d26, [sp, #7720] ; 8-byte Folded Reload
fmul d13, d13, d26
fadd d12, d13, d12
ldr d0, [sp, #11752] ; 8-byte Folded Reload
ldr d2, [sp, #11168] ; 8-byte Folded Reload
fmul d2, d0, d2
fsub d2, d2, d12
ldr d13, [sp, #8832] ; 8-byte Folded Reload
ldr d0, [sp, #11736] ; 8-byte Folded Reload
fmul d12, d0, d13
fsub d2, d2, d12
ldr d0, [sp, #11672] ; 8-byte Folded Reload
ldr d7, [sp, #7744] ; 8-byte Folded Reload
fmul d16, d0, d7
fadd d2, d16, d2
ldr d12, [sp, #8792] ; 8-byte Folded Reload
ldr d0, [sp, #11648] ; 8-byte Folded Reload
fmul d16, d0, d12
fsub d2, d2, d16
ldr d0, [sp, #8144] ; 8-byte Folded Reload
fadd d2, d0, d2
ldr d0, [sp, #9264] ; 8-byte Folded Reload
fadd d2, d0, d2
ldr d0, [sp, #8136] ; 8-byte Folded Reload
fadd d0, d0, d2
str d0, [sp, #7632] ; 8-byte Folded Spill
ldr d17, [sp, #12280] ; 8-byte Folded Reload
fmul d2, d17, d0
fadd d16, d2, d11
fadd d11, d13, d12
str d18, [sp, #9736] ; 8-byte Folded Spill
fmul d2, d18, d5
fsub d0, d11, d2
str d0, [sp, #8152] ; 8-byte Folded Spill
fmul d2, d5, d0
ldr d0, [sp, #9168] ; 8-byte Folded Reload
fadd d2, d2, d0
str d22, [sp, #9728] ; 8-byte Folded Spill
fmul d12, d22, d23
fadd d2, d12, d2
ldr d0, [sp, #10376] ; 8-byte Folded Reload
fmul d0, d2, d0
fadd d0, d16, d0
ldr d7, [sp, #11888] ; 8-byte Folded Reload
fmul d16, d7, d26
str d1, [sp, #9168] ; 8-byte Folded Spill
fmul d18, d3, d1
fadd d16, d18, d16
str d9, [sp, #10376] ; 8-byte Folded Spill
ldr d1, [sp, #11328] ; 8-byte Folded Reload
fmul d18, d9, d1
fadd d16, d16, d18
ldr d1, [sp, #9272] ; 8-byte Folded Reload
fadd d18, d1, d16
ldr d1, [sp, #9712] ; 8-byte Folded Reload
fmul d16, d18, d1
fsub d0, d0, d16
ldr d1, [sp, #9832] ; 8-byte Folded Reload
fmul d1, d1, d17
str d1, [sp, #8144] ; 8-byte Folded Spill
fmul d16, d1, d5
fadd d0, d16, d0
str d25, [sp, #8184] ; 8-byte Folded Spill
ldr d1, [sp, #11440] ; 8-byte Folded Reload
fmul d16, d25, d1
fadd d0, d16, d0
ldr d7, [sp, #7888] ; 8-byte Folded Reload
ldr d1, [sp, #11352] ; 8-byte Folded Reload
fmul d16, d1, d7
fadd d0, d16, d0
ldr d1, [sp, #8128] ; 8-byte Folded Reload
fsub d0, d0, d1
ldr d12, [sp, #11128] ; 8-byte Folded Reload
ldr d17, [sp, #7728] ; 8-byte Folded Reload
fmul d16, d12, d17
fadd d26, d0, d16
ldr d1, [sp, #9824] ; 8-byte Folded Reload
fadd d11, d11, d1
fmul d16, d6, d5
fsub d1, d11, d16
str d1, [sp, #8136] ; 8-byte Folded Spill
fmul d16, d5, d1
ldr d1, [sp, #9160] ; 8-byte Folded Reload
fadd d16, d1, d16
fmul d22, d21, d23
fadd d16, d22, d16
ldr d1, [sp, #7792] ; 8-byte Folded Reload
fmul d22, d26, d1
fmul d24, d16, d24
fsub d22, d24, d22
ldr d0, [sp, #11976] ; 8-byte Folded Reload
fmul d24, d0, d19
ldr d1, [sp, #8120] ; 8-byte Folded Reload
fsub d24, d24, d1
ldr d3, [sp, #9688] ; 8-byte Folded Reload
fmul d25, d4, d3
fadd d24, d24, d25
ldr d0, [sp, #12064] ; 8-byte Folded Reload
fmul d25, d0, d14
fadd d24, d25, d24
ldr d1, [sp, #8112] ; 8-byte Folded Reload
fsub d24, d24, d1
mov x9, #46543
movk x9, #48510, lsl #16
movk x9, #46414, lsl #32
movk x9, #16260, lsl #48
fmov d25, x9
fadd d24, d24, d25
ldr d1, [sp, #8096] ; 8-byte Folded Reload
fadd d24, d1, d24
ldr d0, [sp, #7664] ; 8-byte Folded Reload
fmul d25, d18, d0
fadd d24, d24, d25
ldr d1, [sp, #8080] ; 8-byte Folded Reload
fadd d24, d1, d24
ldr d1, [sp, #8072] ; 8-byte Folded Reload
fadd d9, d1, d24
ldr d1, [sp, #7816] ; 8-byte Folded Reload
fmul d23, d2, d1
ldur d0, [x29, #-208] ; 8-byte Folded Reload
fmul d24, d0, d9
fadd d23, d24, d23
ldr d1, [sp, #12192] ; 8-byte Folded Reload
fmul d1, d1, d19
ldr d19, [sp, #8064] ; 8-byte Folded Reload
fsub d1, d1, d19
ldr d19, [sp, #9680] ; 8-byte Folded Reload
fmul d24, d4, d19
fadd d1, d1, d24
ldur d13, [x29, #-168] ; 8-byte Folded Reload
fmul d24, d13, d20
ldur d4, [x29, #-216] ; 8-byte Folded Reload
fmul d25, d4, d3
fadd d24, d24, d25
ldr d3, [sp, #12336] ; 8-byte Folded Reload
fmul d25, d3, d19
fadd d24, d25, d24
fmov d25, d6
ldr d3, [sp, #9720] ; 8-byte Folded Reload
fmul d19, d24, d3
fadd d1, d1, d19
ldr d3, [sp, #12104] ; 8-byte Folded Reload
fmul d3, d3, d14
fadd d1, d3, d1
ldr d3, [sp, #8056] ; 8-byte Folded Reload
fsub d1, d1, d3
mov x9, #57269
movk x9, #60105, lsl #16
movk x9, #55991, lsl #32
movk x9, #16301, lsl #48
fmov d3, x9
fadd d1, d1, d3
ldr d3, [sp, #8032] ; 8-byte Folded Reload
fadd d1, d3, d1
ldr d3, [sp, #7824] ; 8-byte Folded Reload
fmul d3, d18, d3
fadd d1, d1, d3
ldr d3, [sp, #8048] ; 8-byte Folded Reload
fsub d1, d1, d3
ldr d3, [sp, #8040] ; 8-byte Folded Reload
fadd d4, d3, d1
ldur d19, [x29, #-200] ; 8-byte Folded Reload
fmul d1, d19, d4
fadd d1, d1, d23
ldr d3, [sp, #11600] ; 8-byte Folded Reload
fmul d3, d3, d7
fadd d1, d3, d1
ldr d3, [sp, #8024] ; 8-byte Folded Reload
fsub d1, d1, d3
ldr d6, [sp, #9064] ; 8-byte Folded Reload
fmul d3, d12, d6
fadd d1, d1, d3
mov x9, #26288
movk x9, #13902, lsl #16
movk x9, #44107, lsl #32
movk x9, #16338, lsl #48
fmov d3, x9
fadd d1, d1, d3
ldr d3, [sp, #10368] ; 8-byte Folded Reload
fmul d3, d3, d5
str d3, [sp, #8120] ; 8-byte Folded Spill
fmul d3, d5, d3
ldr d5, [sp, #10360] ; 8-byte Folded Reload
fmul d18, d5, d27
fadd d3, d3, d18
ldr d5, [sp, #8016] ; 8-byte Folded Reload
fadd d3, d3, d5
ldr d5, [sp, #7768] ; 8-byte Folded Reload
fmul d18, d3, d5
fadd d1, d18, d1
ldr d20, [sp, #7648] ; 8-byte Folded Reload
fmul d18, d1, d20
fadd d18, d22, d18
ldr d5, [sp, #7696] ; 8-byte Folded Reload
fmul d2, d2, d5
str d9, [sp, #7664] ; 8-byte Folded Spill
fmul d19, d19, d9
fsub d2, d2, d19
str d4, [sp, #7608] ; 8-byte Folded Spill
fmul d19, d0, d4
fadd d2, d19, d2
ldr d0, [sp, #11464] ; 8-byte Folded Reload
fmul d5, d0, d7
ldur d19, [x29, #-256] ; 8-byte Folded Reload
fadd d2, d5, d2
ldr d4, [sp, #8008] ; 8-byte Folded Reload
fsub d2, d2, d4
ldr d7, [sp, #9080] ; 8-byte Folded Reload
fmul d5, d12, d7
fadd d2, d2, d5
fmul d4, d13, d17
ldr d5, [sp, #11496] ; 8-byte Folded Reload
fmul d5, d5, d6
fadd d4, d4, d5
ldr d5, [sp, #11760] ; 8-byte Folded Reload
fmul d5, d5, d7
fadd d4, d5, d4
ldr d0, [sp, #7704] ; 8-byte Folded Reload
fmul d4, d4, d0
fadd d2, d2, d4
mov x9, #21969
movk x9, #1325, lsl #16
movk x9, #7976, lsl #32
movk x9, #16367, lsl #48
fmov d4, x9
fadd d2, d2, d4
ldr d0, [sp, #7712] ; 8-byte Folded Reload
fmul d3, d3, d0
fadd d2, d3, d2
ldr d0, [sp, #7688] ; 8-byte Folded Reload
fmul d0, d26, d0
ldr d3, [sp, #7680] ; 8-byte Folded Reload
fmul d3, d16, d3
fsub d0, d0, d3
fadd d0, d0, d1
fadd d4, d18, d2
fmul d1, d2, d20
fadd d5, d0, d1
fmul d0, d31, d4
fmul d1, d8, d5
fsub d16, d0, d1
fmul d0, d25, d28
fsub d0, d11, d0
str d0, [sp, #8128] ; 8-byte Folded Spill
fmul d0, d28, d0
ldr q26, [sp, #9024] ; 16-byte Folded Reload
ldr d1, [sp, #11784] ; 8-byte Folded Reload
fmul d1, d21, d1
fadd d0, d1, d0
ldr d1, [sp, #7656] ; 8-byte Folded Reload
fmul d1, d0, d1
ldr q6, [sp, #12240] ; 16-byte Folded Reload
fmul d2, d6, d16
fsub d1, d2, d1
fmul d2, d25, d29
fsub d2, d11, d2
str d2, [sp, #8112] ; 8-byte Folded Spill
fmul d2, d29, d2
ldr q27, [sp, #9008] ; 16-byte Folded Reload
ldr d3, [sp, #11584] ; 8-byte Folded Reload
fmul d3, d21, d3
fadd d2, d3, d2
ldr d3, [sp, #7672] ; 8-byte Folded Reload
fmul d2, d2, d3
str q5, [sp, #7472] ; 16-byte Folded Spill
fmul d3, d31, d5
fsub d2, d3, d2
str q4, [sp, #7488] ; 16-byte Folded Spill
fmul d3, d8, d4
fadd d3, d3, d2
fmul d2, d30, d3
fsub d22, d1, d2
fmul d0, d0, d10
str q16, [sp, #7456] ; 16-byte Folded Spill
fmul d1, d30, d16
ldr d7, [sp, #9048] ; 8-byte Folded Reload
fsub d0, d1, d0
str q3, [sp, #7440] ; 16-byte Folded Spill
fmul d1, d6, d3
fadd d20, d0, d1
fmul d0, d19, d21
str d15, [sp, #7432] ; 8-byte Folded Spill
fadd d23, d0, d15
fmul d0, d19, d25
str d11, [sp, #7384] ; 8-byte Folded Spill
fsub d24, d11, d0
cbz x8, LBB19_54
; %bb.53:
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d0, x9
fmul d1, d7, d0
ldr q17, [sp, #10736] ; 16-byte Folded Reload
fmul d2, d17, d26
fsub d1, d2, d1
ldr q16, [sp, #11136] ; 16-byte Folded Reload
fmul d2, d16, d27
fsub d1, d1, d2
ldr d5, [sp, #11248] ; 8-byte Folded Reload
fmul d2, d5, d25
fmul d3, d19, d23
fadd d2, d2, d3
fmul d3, d2, d0
fadd d1, d1, d3
fadd d1, d1, d1
fmul d3, d16, d22
fmul d4, d19, d24
fmul d5, d5, d21
fadd d4, d5, d4
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d5, x9
fmul d6, d4, d5
fsub d3, d3, d6
fmul d6, d17, d20
fadd d3, d6, d3
fadd d3, d3, d3
fsub d1, d1, d3
ldr d18, [sp, #11400] ; 8-byte Folded Reload
fmul d3, d18, d1
fmul d6, d7, d5
fmul d7, d16, d26
fsub d6, d7, d6
fmul d7, d17, d27
fadd d6, d7, d6
fmul d2, d2, d5
fadd d2, d6, d2
fmul d5, d17, d22
fmul d0, d4, d0
fsub d0, d5, d0
fmul d4, d16, d20
fsub d0, d0, d4
fadd d0, d0, d2
ldr d2, [sp, #11160] ; 8-byte Folded Reload
fmul d2, d2, d0
fsub d2, d3, d2
fmul d2, d18, d2
fmov d3, #0.50000000
fmul d2, d2, d3
ldr d4, [sp, #11392] ; 8-byte Folded Reload
fmul d1, d4, d1
ldr d5, [sp, #11240] ; 8-byte Folded Reload
fmul d0, d5, d0
fadd d0, d0, d1
fmul d0, d4, d0
fmul d0, d0, d3
fsub d0, d2, d0
str d0, [x8, #64]
LBB19_54:
str d24, [sp, #7328] ; 8-byte Folded Spill
str d23, [sp, #7336] ; 8-byte Folded Spill
str q20, [sp, #7344] ; 16-byte Folded Spill
str q22, [sp, #7360] ; 16-byte Folded Spill
str d25, [sp, #9712] ; 8-byte Folded Spill
str d21, [sp, #9720] ; 8-byte Folded Spill
ldr d0, [sp, #8976] ; 8-byte Folded Reload
ldr d1, [sp, #8968] ; 8-byte Folded Reload
fadd d0, d1, d0
str d0, [sp, #9080] ; 8-byte Folded Spill
ldr d0, [sp, #8848] ; 8-byte Folded Reload
ldr d1, [sp, #8840] ; 8-byte Folded Reload
fadd d0, d1, d0
str d0, [sp, #9160] ; 8-byte Folded Spill
ldur d14, [x29, #-160] ; 8-byte Folded Reload
ldr d3, [sp, #11912] ; 8-byte Folded Reload
fmul d0, d14, d3
ldur d13, [x29, #-192] ; 8-byte Folded Reload
ldr d6, [sp, #12288] ; 8-byte Folded Reload
fmul d1, d13, d6
fsub d2, d0, d1
ldr d7, [sp, #11720] ; 8-byte Folded Reload
fmul d0, d13, d7
fsub d4, d0, d3
fmov d30, d3
ldr d28, [sp, #9808] ; 8-byte Folded Reload
fdiv d3, d4, d28
ldr d0, [sp, #9768] ; 8-byte Folded Reload
fmul d1, d0, d3
fdiv d0, d2, d28
ldr d26, [sp, #3888] ; 8-byte Folded Reload
fmul d5, d26, d0
fadd d18, d5, d1
ldr d1, [sp, #9760] ; 8-byte Folded Reload
fmul d1, d18, d1
fmul d5, d14, d7
fmov d27, d7
fadd d1, d5, d1
fsub d1, d1, d6
ldr d5, [sp, #10264] ; 8-byte Folded Reload
fmul d5, d5, d1
fmov d6, #0.50000000
fmul d5, d5, d6
fmov d22, #0.50000000
ldr d6, [sp, #10816] ; 8-byte Folded Reload
fmul d5, d6, d5
ldr d6, [sp, #10808] ; 8-byte Folded Reload
fmul d7, d6, d1
str d7, [sp, #8056] ; 8-byte Folded Spill
mov x9, #4632233691727265792
fmov d6, x9
fmul d1, d5, d6
ldr d5, [sp, #10280] ; 8-byte Folded Reload
fmul d7, d5, d7
str d7, [sp, #8048] ; 8-byte Folded Spill
ldr d5, [sp, #10288] ; 8-byte Folded Reload
fmul d5, d5, d7
fmov d21, #3.00000000
fmul d5, d5, d21
fadd d29, d1, d5
ldr d17, [sp, #11744] ; 8-byte Folded Reload
fmul d1, d14, d17
ldr d7, [sp, #12168] ; 8-byte Folded Reload
fmul d5, d13, d7
fmov d24, d7
fsub d7, d1, d5
str d7, [sp, #10280] ; 8-byte Folded Spill
ldr d20, [sp, #11696] ; 8-byte Folded Reload
fmul d1, d13, d20
fsub d16, d1, d17
fmov d10, d17
ldr d15, [sp, #10272] ; 8-byte Folded Reload
fdiv d5, d16, d15
str d5, [sp, #8064] ; 8-byte Folded Spill
ldr d1, [sp, #10224] ; 8-byte Folded Reload
fmul d5, d1, d5
fdiv d1, d7, d15
str d1, [sp, #10288] ; 8-byte Folded Spill
ldr d25, [sp, #3864] ; 8-byte Folded Reload
fmul d17, d25, d1
fadd d1, d17, d5
str d1, [sp, #9768] ; 8-byte Folded Spill
ldr d5, [sp, #9744] ; 8-byte Folded Reload
fmul d5, d1, d5
fmul d19, d14, d20
fmov d9, d20
fadd d5, d19, d5
fsub d5, d5, d24
ldr d19, [sp, #10240] ; 8-byte Folded Reload
fmul d19, d19, d5
fmul d19, d19, d22
ldr d20, [sp, #10800] ; 8-byte Folded Reload
fmul d19, d20, d19
fmul d6, d19, d6
ldr d19, [sp, #10792] ; 8-byte Folded Reload
fmul d19, d19, d5
str d19, [sp, #8840] ; 8-byte Folded Spill
ldr d5, [sp, #10248] ; 8-byte Folded Reload
fmul d19, d5, d19
str d19, [sp, #8096] ; 8-byte Folded Spill
ldr d5, [sp, #10256] ; 8-byte Folded Reload
fmul d5, d5, d19
fmul d5, d5, d21
fadd d1, d6, d5
str d1, [sp, #10792] ; 8-byte Folded Spill
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d12, x9
fmul d5, d13, d12
str d5, [sp, #11168] ; 8-byte Folded Spill
ldr d8, [sp, #11512] ; 8-byte Folded Reload
fmul d6, d5, d8
ldr d19, [sp, #10072] ; 8-byte Folded Reload
fadd d5, d6, d19
str d5, [sp, #10808] ; 8-byte Folded Spill
fadd d7, d29, d1
str d7, [sp, #10800] ; 8-byte Folded Spill
ldr d20, [sp, #12048] ; 8-byte Folded Reload
fmul d6, d5, d20
fadd d5, d6, d7
fmul d1, d14, d12
str d1, [sp, #10816] ; 8-byte Folded Spill
fmul d6, d1, d8
ldr d19, [sp, #10064] ; 8-byte Folded Reload
fsub d1, d19, d6
str d1, [sp, #11176] ; 8-byte Folded Spill
str d5, [sp, #8848] ; 8-byte Folded Spill
fmul d6, d20, d5
ldr d5, [sp, #11520] ; 8-byte Folded Reload
fmul d19, d1, d5
fsub d1, d6, d19
str d1, [sp, #8976] ; 8-byte Folded Spill
ldr d5, [sp, #10104] ; 8-byte Folded Reload
ldr d1, [sp, #7896] ; 8-byte Folded Reload
fmul d6, d1, d5
ldr d5, [sp, #10096] ; 8-byte Folded Reload
ldr d1, [sp, #9128] ; 8-byte Folded Reload
fmul d19, d1, d5
fadd d6, d6, d19
ldr d24, [sp, #9576] ; 8-byte Folded Reload
ldr d1, [sp, #11312] ; 8-byte Folded Reload
fmul d19, d24, d1
fadd d6, d19, d6
ldr d5, [sp, #6600] ; 8-byte Folded Reload
fmul d19, d8, d5
fsub d19, d6, d19
ldur d1, [x29, #-248] ; 8-byte Folded Reload
fmul d6, d24, d1
ldr d31, [sp, #11968] ; 8-byte Folded Reload
ldr d5, [sp, #10056] ; 8-byte Folded Reload
fmul d20, d5, d31
fadd d6, d6, d20
mov x9, #3449
movk x9, #18764, lsl #16
movk x9, #45194, lsl #32
movk x9, #16217, lsl #48
fmov d20, x9
ldr d5, [sp, #3808] ; 8-byte Folded Reload
fadd d5, d5, d20
ldr d22, [sp, #12232] ; 8-byte Folded Reload
str d5, [sp, #10264] ; 8-byte Folded Spill
fmul d20, d5, d22
fadd d5, d20, d6
str d5, [sp, #10224] ; 8-byte Folded Spill
fmul d20, d31, d5
ldr d6, [sp, #6104] ; 8-byte Folded Reload
fmul d21, d22, d6
fsub d20, d20, d21
str d12, [sp, #8072] ; 8-byte Folded Spill
fmul d19, d19, d12
fmul d20, d20, d12
fadd d21, d20, d19
ldr d23, [sp, #10832] ; 8-byte Folded Reload
fmul d19, d25, d23
fdiv d7, d19, d15
fmul d20, d10, d7
ldr d5, [sp, #11072] ; 8-byte Folded Reload
fmul d25, d9, d5
fadd d20, d25, d20
mov x9, #11213
movk x9, #64899, lsl #16
movk x9, #2195, lsl #32
movk x9, #49148, lsl #48
fmov d25, x9
fmul d25, d8, d25
fadd d25, d20, d25
ldr d11, [sp, #9792] ; 8-byte Folded Reload
fmul d20, d26, d11
fdiv d6, d20, d28
fmul d26, d30, d6
ldr d12, [sp, #10824] ; 8-byte Folded Reload
fmov d19, d27
fmul d27, d27, d12
fadd d26, d27, d26
fadd d25, d26, d25
fadd d25, d25, d21
fdiv d21, d6, d28
fmov d26, d6
fmul d2, d2, d21
ldr d6, [sp, #10536] ; 8-byte Folded Reload
fmul d4, d4, d6
fadd d2, d2, d4
fmul d4, d12, d18
fmov d17, #0.50000000
fmul d18, d4, d17
fsub d2, d18, d2
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16361, lsl #48
fmov d18, x9
fmul d4, d4, d18
ldr d6, [sp, #9776] ; 8-byte Folded Reload
fmul d4, d4, d6
fmov d30, #5.00000000
fmul d4, d4, d30
fadd d2, d4, d2
fmul d0, d0, d11
ldr d4, [sp, #10520] ; 8-byte Folded Reload
fdiv d4, d2, d4
ldr d2, [sp, #10528] ; 8-byte Folded Reload
fmul d2, d2, d4
fadd d2, d0, d2
fmul d0, d31, d2
fmov d20, d2
fmul d2, d22, d29
fadd d10, d2, d0
ldr d6, [sp, #11728] ; 8-byte Folded Reload
fmul d2, d6, d10
fsub d25, d25, d2
fmov d27, d26
str d26, [sp, #8040] ; 8-byte Folded Spill
fmul d2, d13, d26
fadd d9, d12, d2
fmul d26, d6, d9
fsub d25, d25, d26
fmul d3, d3, d11
ldr d6, [sp, #10512] ; 8-byte Folded Reload
fmul d4, d6, d4
fadd d6, d3, d4
str d6, [sp, #10256] ; 8-byte Folded Spill
fmul d3, d1, d20
fmov d8, d20
str d20, [sp, #8008] ; 8-byte Folded Spill
fmul d4, d22, d6
fsub d3, d3, d4
fmul d4, d13, d3
fmov d11, d3
ldr d21, [sp, #9080] ; 8-byte Folded Reload
fadd d26, d21, d4
fmul d4, d14, d27
ldr d3, [sp, #9152] ; 8-byte Folded Reload
fsub d0, d4, d3
str d0, [sp, #9808] ; 8-byte Folded Spill
fmul d27, d13, d0
fadd d26, d27, d26
fmul d27, d31, d6
fmul d28, d1, d29
fmov d4, d1
fadd d20, d28, d27
fadd d26, d20, d26
fmul d27, d14, d10
fsub d26, d26, d27
fmul d27, d14, d9
fsub d1, d26, d27
str d1, [sp, #10824] ; 8-byte Folded Spill
ldr d0, [sp, #11472] ; 8-byte Folded Reload
fmul d26, d0, d1
fadd d26, d26, d25
fmov d2, d7
str d7, [sp, #7712] ; 8-byte Folded Spill
fdiv d25, d7, d15
ldr d0, [sp, #10280] ; 8-byte Folded Reload
fmul d7, d0, d25
ldr d3, [sp, #10416] ; 8-byte Folded Reload
fmul d16, d16, d3
fadd d7, d7, d16
ldr d0, [sp, #9768] ; 8-byte Folded Reload
fmul d16, d5, d0
fmul d17, d16, d17
fsub d7, d17, d7
fmul d16, d16, d18
ldr d3, [sp, #9752] ; 8-byte Folded Reload
fmul d16, d16, d3
fmul d16, d16, d30
fadd d7, d16, d7
ldr d0, [sp, #10288] ; 8-byte Folded Reload
fmul d16, d0, d23
ldr d1, [sp, #10408] ; 8-byte Folded Reload
fdiv d0, d7, d1
str d0, [sp, #7720] ; 8-byte Folded Spill
ldr d3, [sp, #10472] ; 8-byte Folded Reload
fmul d7, d3, d0
fadd d1, d16, d7
fmul d7, d31, d1
fmov d3, d1
str d1, [sp, #7704] ; 8-byte Folded Spill
ldr d1, [sp, #10792] ; 8-byte Folded Reload
fmul d16, d22, d1
fadd d15, d16, d7
ldr d17, [sp, #11560] ; 8-byte Folded Reload
fmul d7, d17, d15
fsub d7, d26, d7
fmul d16, d13, d2
fadd d12, d5, d16
fmul d16, d17, d12
fsub d7, d7, d16
mov x9, #51491
movk x9, #54360, lsl #16
movk x9, #13074, lsl #32
movk x9, #16286, lsl #48
fmov d16, x9
fadd d7, d7, d16
ldr d6, [sp, #10816] ; 8-byte Folded Reload
fmul d16, d6, d31
ldr d30, [sp, #11168] ; 8-byte Folded Reload
fmul d17, d30, d4
fsub d5, d16, d17
fmul d16, d24, d5
fmov d28, d5
ldr d0, [sp, #10224] ; 8-byte Folded Reload
fmul d17, d30, d0
fsub d16, d16, d17
ldr d0, [sp, #11912] ; 8-byte Folded Reload
fmul d17, d0, d8
fadd d16, d16, d17
str d29, [sp, #8968] ; 8-byte Folded Spill
fmul d17, d19, d29
fadd d16, d17, d16
ldr d0, [sp, #11744] ; 8-byte Folded Reload
fmul d17, d0, d3
fadd d16, d17, d16
ldr d0, [sp, #11696] ; 8-byte Folded Reload
fmul d17, d0, d1
fadd d16, d17, d16
fmul d17, d30, d22
ldr d3, [sp, #9368] ; 8-byte Folded Reload
fadd d1, d17, d3
ldr d25, [sp, #7896] ; 8-byte Folded Reload
fmul d17, d25, d1
fmov d4, d1
fadd d16, d17, d16
ldr d3, [sp, #8960] ; 8-byte Folded Reload
fadd d16, d3, d16
fmul d17, d6, d22
ldr d3, [sp, #9360] ; 8-byte Folded Reload
fsub d0, d3, d17
ldr d2, [sp, #9128] ; 8-byte Folded Reload
fmul d17, d2, d0
fmov d23, d0
fadd d3, d17, d16
ldur d1, [x29, #-240] ; 8-byte Folded Reload
str d3, [sp, #7672] ; 8-byte Folded Spill
fmul d16, d1, d3
fadd d7, d16, d7
ldr d5, [sp, #11624] ; 8-byte Folded Reload
ldr d22, [sp, #10808] ; 8-byte Folded Reload
fmul d16, d22, d5
ldr d0, [sp, #10800] ; 8-byte Folded Reload
fadd d3, d16, d0
ldr d16, [sp, #11504] ; 8-byte Folded Reload
ldr d0, [sp, #11176] ; 8-byte Folded Reload
fmul d16, d0, d16
str d3, [sp, #8960] ; 8-byte Folded Spill
fmul d17, d5, d3
fmov d24, d5
fsub d18, d16, d17
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d0, x9
str d0, [sp, #7888] ; 8-byte Folded Spill
fmul d16, d18, d0
fsub d7, d7, d16
mov x9, #54125
movk x9, #53060, lsl #16
movk x9, #15481, lsl #32
movk x9, #16273, lsl #48
fmov d3, x9
str d3, [sp, #8016] ; 8-byte Folded Spill
ldr d19, [sp, #8952] ; 8-byte Folded Reload
fmul d16, d19, d3
fadd d7, d16, d7
ldr d0, [sp, #8000] ; 8-byte Folded Reload
fmul d16, d30, d0
fmul d17, d6, d2
fadd d16, d16, d17
ldr d3, [sp, #8944] ; 8-byte Folded Reload
fadd d3, d3, d16
str d3, [sp, #10280] ; 8-byte Folded Spill
fmul d3, d3, d1
str d3, [sp, #8944] ; 8-byte Folded Spill
fmul d16, d5, d3
fadd d7, d16, d7
fmul d16, d6, d25
fmov d0, d6
fmul d17, d30, d2
fmov d1, d30
fadd d16, d17, d16
ldr d3, [sp, #8928] ; 8-byte Folded Reload
fadd d3, d3, d16
str d3, [sp, #10272] ; 8-byte Folded Spill
ldr d6, [sp, #11424] ; 8-byte Folded Reload
fmul d16, d3, d6
fadd d2, d16, d7
str d2, [sp, #11072] ; 8-byte Folded Spill
mov x9, #4354980839667269632
mov x10, #47272
movk x10, #56762, lsl #16
movk x10, #43178, lsl #32
movk x10, #49060, lsl #48
fmov d3, x9
str d3, [sp, #8928] ; 8-byte Folded Spill
fmul d7, d2, d3
fmov d3, x10
str d3, [sp, #8080] ; 8-byte Folded Spill
fmul d16, d18, d3
fsub d7, d16, d7
ldr d3, [sp, #12144] ; 8-byte Folded Reload
fmov d2, d21
fmul d16, d3, d21
ldr d27, [sp, #11768] ; 8-byte Folded Reload
fmov d24, d11
str d11, [sp, #7680] ; 8-byte Folded Spill
fmul d17, d27, d11
fadd d16, d16, d17
ldr d25, [sp, #9808] ; 8-byte Folded Reload
fmul d17, d27, d25
fadd d16, d17, d16
fmul d17, d3, d20
fadd d16, d17, d16
ldur d11, [x29, #-232] ; 8-byte Folded Reload
fmul d17, d11, d10
fsub d16, d16, d17
fmul d17, d11, d9
fsub d3, d16, d17
ldr d6, [sp, #11880] ; 8-byte Folded Reload
fmul d16, d6, d10
fmul d17, d6, d9
fadd d16, d17, d16
ldr d5, [sp, #11472] ; 8-byte Folded Reload
fmul d17, d5, d3
fmov d30, d3
fsub d16, d17, d16
ldr d3, [sp, #11872] ; 8-byte Folded Reload
fmul d17, d3, d15
fsub d16, d16, d17
fmul d17, d3, d12
fsub d16, d16, d17
mov x9, #46543
movk x9, #48510, lsl #16
movk x9, #46414, lsl #32
movk x9, #16260, lsl #48
fmov d17, x9
fadd d16, d16, d17
ldr d3, [sp, #10104] ; 8-byte Folded Reload
fmov d6, d1
fmul d17, d1, d3
ldr d3, [sp, #10096] ; 8-byte Folded Reload
fmul d26, d0, d3
fadd d17, d17, d26
fmov d29, d31
fmov d1, d4
str d4, [sp, #10248] ; 8-byte Folded Spill
fmul d26, d31, d4
fadd d17, d26, d17
str d23, [sp, #10240] ; 8-byte Folded Spill
ldur d31, [x29, #-248] ; 8-byte Folded Reload
fmul d26, d31, d23
fadd d4, d26, d17
ldr d3, [sp, #9144] ; 8-byte Folded Reload
fmul d17, d3, d4
str d4, [sp, #9760] ; 8-byte Folded Spill
fadd d16, d16, d17
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d3, x9
str d3, [sp, #8032] ; 8-byte Folded Spill
fmul d17, d19, d3
fadd d16, d17, d16
ldr d8, [sp, #11312] ; 8-byte Folded Reload
fmul d17, d6, d8
fmov d21, d28
str d28, [sp, #10288] ; 8-byte Folded Spill
fmul d28, d29, d28
fadd d17, d28, d17
ldr d26, [sp, #12232] ; 8-byte Folded Reload
fmul d28, d26, d23
fadd d17, d28, d17
ldr d3, [sp, #5160] ; 8-byte Folded Reload
fadd d6, d3, d17
mov x9, #64744
movk x9, #21380, lsl #16
movk x9, #23316, lsl #32
movk x9, #16194, lsl #48
fmov d3, x9
str d3, [sp, #8024] ; 8-byte Folded Spill
fmul d17, d6, d3
fmov d23, d6
str d6, [sp, #9752] ; 8-byte Folded Spill
fsub d16, d16, d17
fmul d17, d31, d21
fmul d31, d0, d8
fadd d17, d17, d31
fmul d31, d26, d1
fmov d8, d26
fadd d17, d31, d17
ldr d3, [sp, #5168] ; 8-byte Folded Reload
fadd d3, d3, d17
ldr d6, [sp, #10752] ; 8-byte Folded Reload
fmul d17, d6, d3
fmov d28, d3
str d3, [sp, #9064] ; 8-byte Folded Spill
fadd d3, d17, d16
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
fmov d0, x9
str d0, [sp, #7816] ; 8-byte Folded Spill
fmul d16, d18, d0
ldr d6, [sp, #12304] ; 8-byte Folded Reload
fmul d17, d6, d3
fmov d26, d6
fadd d31, d17, d16
ldr d6, [sp, #12152] ; 8-byte Folded Reload
fmul d16, d6, d2
ldr d21, [sp, #11776] ; 8-byte Folded Reload
fmul d17, d21, d24
fadd d16, d16, d17
fmul d17, d21, d25
fadd d16, d17, d16
str d20, [sp, #7656] ; 8-byte Folded Spill
fmul d17, d6, d20
fadd d16, d17, d16
ldr d25, [sp, #12296] ; 8-byte Folded Reload
fmul d17, d25, d10
fsub d16, d16, d17
fmul d17, d25, d9
fsub d1, d16, d17
ldr d2, [sp, #12096] ; 8-byte Folded Reload
fmul d0, d2, d10
fmul d2, d2, d9
fadd d0, d2, d0
fmul d2, d5, d1
fsub d0, d2, d0
ldr d20, [sp, #10824] ; 8-byte Folded Reload
fmul d2, d14, d20
fmul d16, d11, d30
fmov d17, d30
str d30, [sp, #9768] ; 8-byte Folded Spill
fadd d2, d2, d16
fmul d16, d25, d1
fmov d10, d1
str d1, [sp, #9744] ; 8-byte Folded Spill
fadd d2, d16, d2
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d1, x9
str d1, [sp, #7728] ; 8-byte Folded Spill
fmul d2, d2, d1
fadd d0, d0, d2
ldr d1, [sp, #12088] ; 8-byte Folded Reload
fmul d2, d1, d15
fsub d0, d0, d2
fmul d2, d1, d12
fsub d0, d0, d2
mov x9, #57269
movk x9, #60105, lsl #16
movk x9, #55991, lsl #32
movk x9, #16301, lsl #48
fmov d2, x9
fadd d0, d0, d2
ldr d1, [sp, #9136] ; 8-byte Folded Reload
fmul d2, d1, d4
fadd d0, d0, d2
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d1, x9
str d1, [sp, #7744] ; 8-byte Folded Spill
fmul d2, d19, d1
fadd d0, d2, d0
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #16153, lsl #48
fmov d1, x9
str d1, [sp, #7768] ; 8-byte Folded Spill
fmul d2, d23, d1
fadd d0, d0, d2
ldr d1, [sp, #10760] ; 8-byte Folded Reload
fmul d2, d1, d28
fadd d4, d2, d0
ldr d1, [sp, #12312] ; 8-byte Folded Reload
fmul d0, d1, d4
fadd d0, d0, d31
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d2, x9
str d2, [sp, #7824] ; 8-byte Folded Spill
fmul d2, d18, d2
str d3, [sp, #7648] ; 8-byte Folded Spill
fmul d31, d1, d3
fsub d2, d2, d31
str d4, [sp, #7600] ; 8-byte Folded Spill
fmul d31, d26, d4
fadd d2, d31, d2
fadd d7, d0, d7
mov x9, #4363988038922010624
fmov d1, x9
fmul d31, d2, d1
fmov d4, d1
str d1, [sp, #7776] ; 8-byte Folded Spill
fadd d3, d31, d7
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #49115, lsl #48
fmov d1, x9
str d1, [sp, #7904] ; 8-byte Folded Spill
ldr d5, [sp, #8976] ; 8-byte Folded Reload
fmul d7, d5, d1
ldr q1, [sp, #11984] ; 16-byte Folded Reload
fmul d30, d1, d3
fsub d30, d7, d30
mov x9, #4359484439294640128
fmov d5, x9
str d5, [sp, #7696] ; 8-byte Folded Spill
ldr d7, [sp, #11072] ; 8-byte Folded Reload
fmul d9, d7, d5
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #16296, lsl #48
fmov d5, x9
str d5, [sp, #7688] ; 8-byte Folded Spill
fmul d18, d18, d5
fsub d18, d9, d18
fmul d0, d0, d4
fadd d0, d0, d18
fadd d2, d2, d0
ldr q5, [sp, #11808] ; 16-byte Folded Reload
fmul d0, d5, d2
fsub d4, d30, d0
str q2, [sp, #9776] ; 16-byte Folded Spill
fmul d0, d1, d2
str q3, [sp, #9792] ; 16-byte Folded Spill
fmul d2, d5, d3
fsub d3, d0, d2
ldr d1, [sp, #12072] ; 8-byte Folded Reload
fmul d0, d22, d1
ldr d2, [sp, #10800] ; 8-byte Folded Reload
fadd d2, d0, d2
ldr d0, [sp, #11176] ; 8-byte Folded Reload
ldr d5, [sp, #11640] ; 8-byte Folded Reload
fmul d0, d0, d5
str d2, [sp, #8952] ; 8-byte Folded Spill
fmul d2, d1, d2
fsub d0, d0, d2
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
ldr q5, [sp, #11792] ; 16-byte Folded Reload
fmul d2, d5, d3
fmov d1, x9
str d1, [sp, #7896] ; 8-byte Folded Spill
fmul d18, d0, d1
fsub d2, d2, d18
ldr q1, [sp, #11952] ; 16-byte Folded Reload
fmul d18, d1, d4
fsub d2, d18, d2
str q2, [sp, #8976] ; 16-byte Folded Spill
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d2, x9
str d2, [sp, #7792] ; 8-byte Folded Spill
fmul d0, d0, d2
str q3, [sp, #7392] ; 16-byte Folded Spill
fmul d2, d1, d3
fsub d0, d2, d0
str q4, [sp, #7408] ; 16-byte Folded Spill
fmul d2, d5, d4
fadd d0, d0, d2
str q0, [sp, #11072] ; 16-byte Folded Spill
ldr d0, [sp, #10856] ; 8-byte Folded Reload
ldr d1, [sp, #8048] ; 8-byte Folded Reload
fmul d0, d0, d1
mov x9, #10523
movk x9, #38535, lsl #16
movk x9, #12921, lsl #32
movk x9, #16642, lsl #48
fmov d2, x9
fmul d0, d0, d2
ldr d2, [sp, #10160] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d2, [sp, #10152] ; 8-byte Folded Reload
fdiv d0, d0, d2
ldr d2, [sp, #10168] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d2, [sp, #10864] ; 8-byte Folded Reload
ldr d1, [sp, #8056] ; 8-byte Folded Reload
fmul d2, d2, d1
fmov d1, #0.50000000
fmul d2, d2, d1
ldr d3, [sp, #10200] ; 8-byte Folded Reload
fmul d2, d3, d2
mov x9, #211106232532992
movk x9, #16498, lsl #48
fmov d24, x9
fmul d2, d2, d24
fsub d2, d0, d2
fmul d0, d13, d20
fmul d18, d27, d17
fadd d0, d0, d18
fmul d18, d21, d10
fadd d0, d18, d0
fadd d2, d2, d0
fmul d18, d0, d1
fsub d23, d2, d18
ldr d1, [sp, #8064] ; 8-byte Folded Reload
ldr d2, [sp, #10832] ; 8-byte Folded Reload
fmul d2, d1, d2
ldr d18, [sp, #10400] ; 8-byte Folded Reload
ldr d1, [sp, #7720] ; 8-byte Folded Reload
fmul d1, d18, d1
fadd d7, d2, d1
ldur d31, [x29, #-248] ; 8-byte Folded Reload
ldr d17, [sp, #7704] ; 8-byte Folded Reload
fmul d1, d31, d17
fmul d2, d8, d7
fsub d30, d1, d2
fmul d2, d13, d30
ldr d1, [sp, #9160] ; 8-byte Folded Reload
fadd d2, d1, d2
ldr d20, [sp, #7712] ; 8-byte Folded Reload
fmul d18, d14, d20
ldr d3, [sp, #5912] ; 8-byte Folded Reload
fsub d18, d18, d3
fmul d22, d13, d18
fadd d2, d22, d2
fmul d22, d29, d7
ldr d4, [sp, #10792] ; 8-byte Folded Reload
fmul d26, d31, d4
fadd d4, d26, d22
fadd d2, d4, d2
fmul d22, d14, d15
fsub d2, d2, d22
fmul d22, d14, d12
fsub d2, d2, d22
ldr d5, [sp, #12144] ; 8-byte Folded Reload
fmul d22, d5, d1
fmul d26, d27, d30
fadd d22, d22, d26
fmul d26, d27, d18
fadd d22, d26, d22
fmul d26, d5, d4
fadd d22, d26, d22
fmul d26, d11, d15
fsub d22, d22, d26
fmul d26, d11, d12
fsub d5, d22, d26
str d5, [sp, #10864] ; 8-byte Folded Spill
fmul d26, d13, d2
fmul d28, d27, d5
fadd d26, d26, d28
fmul d28, d6, d1
fmul d29, d21, d30
fadd d28, d28, d29
fmul d29, d21, d18
fadd d28, d29, d28
str d4, [sp, #7592] ; 8-byte Folded Spill
fmul d29, d6, d4
fadd d28, d29, d28
fmul d29, d25, d15
fsub d28, d28, d29
fmul d29, d25, d12
fmov d5, #0.50000000
fsub d11, d28, d29
fmul d28, d21, d11
fadd d26, d28, d26
ldr d4, [sp, #10840] ; 8-byte Folded Reload
ldr d1, [sp, #8096] ; 8-byte Folded Reload
fmul d28, d4, d1
mov x9, #18811
movk x9, #34700, lsl #16
movk x9, #61210, lsl #32
movk x9, #16643, lsl #48
fmov d29, x9
fmul d28, d28, d29
ldr d4, [sp, #10184] ; 8-byte Folded Reload
fmul d28, d4, d28
ldr d4, [sp, #10176] ; 8-byte Folded Reload
fdiv d28, d28, d4
ldr d4, [sp, #10192] ; 8-byte Folded Reload
fmul d28, d4, d28
ldr d4, [sp, #10848] ; 8-byte Folded Reload
ldr d1, [sp, #8840] ; 8-byte Folded Reload
fmul d29, d4, d1
fmul d29, d29, d5
ldr d4, [sp, #10208] ; 8-byte Folded Reload
fmul d29, d4, d29
fmul d24, d29, d24
fsub d24, d28, d24
fsub d0, d23, d0
fmov d22, d23
fsub d0, d0, d26
fadd d24, d24, d26
fmul d26, d26, d5
fsub d24, d24, d26
fadd d9, d24, d0
ldr d12, [sp, #10104] ; 8-byte Folded Reload
ldr d13, [sp, #9128] ; 8-byte Folded Reload
fmul d0, d13, d12
ldr d1, [sp, #10096] ; 8-byte Folded Reload
ldr d10, [sp, #8000] ; 8-byte Folded Reload
fmul d24, d10, d1
fadd d0, d0, d24
ldr d29, [sp, #10056] ; 8-byte Folded Reload
ldr d4, [sp, #11312] ; 8-byte Folded Reload
fmul d24, d29, d4
fadd d0, d24, d0
ldr d4, [sp, #5128] ; 8-byte Folded Reload
ldr d5, [sp, #11512] ; 8-byte Folded Reload
fmul d24, d5, d4
fadd d0, d0, d24
ldr d4, [sp, #6056] ; 8-byte Folded Reload
fmul d24, d8, d4
ldr d8, [sp, #10224] ; 8-byte Folded Reload
fmul d26, d31, d8
fsub d24, d24, d26
ldr d4, [sp, #8072] ; 8-byte Folded Reload
fmul d0, d0, d4
fmul d5, d24, d4
fadd d0, d5, d0
ldr d23, [sp, #12168] ; 8-byte Folded Reload
fmul d5, d23, d20
ldr d6, [sp, #11696] ; 8-byte Folded Reload
fmul d19, d6, d3
fsub d5, d5, d19
ldr d4, [sp, #12288] ; 8-byte Folded Reload
ldr d3, [sp, #8040] ; 8-byte Folded Reload
fmul d19, d4, d3
ldr d3, [sp, #11720] ; 8-byte Folded Reload
ldr d20, [sp, #9152] ; 8-byte Folded Reload
fmul d20, d3, d20
fsub d19, d19, d20
fadd d5, d5, d19
fsub d0, d0, d5
ldr d19, [sp, #11728] ; 8-byte Folded Reload
ldr d14, [sp, #7680] ; 8-byte Folded Reload
fmul d5, d19, d14
fadd d0, d5, d0
ldr d16, [sp, #9808] ; 8-byte Folded Reload
fmul d5, d19, d16
fadd d0, d5, d0
ldr d26, [sp, #11360] ; 8-byte Folded Reload
ldr d5, [sp, #10824] ; 8-byte Folded Reload
fmul d5, d26, d5
fadd d0, d5, d0
ldr d19, [sp, #11560] ; 8-byte Folded Reload
fmul d5, d19, d30
fadd d0, d5, d0
fmul d5, d19, d18
fadd d0, d5, d0
ldr d28, [sp, #11272] ; 8-byte Folded Reload
fmul d2, d28, d2
fadd d0, d2, d0
ldr d24, [sp, #7888] ; 8-byte Folded Reload
fmul d2, d9, d24
fsub d0, d0, d2
ldr d19, [sp, #10816] ; 8-byte Folded Reload
fmul d2, d19, d8
ldr d5, [sp, #10288] ; 8-byte Folded Reload
fmul d5, d29, d5
fadd d2, d5, d2
ldr d5, [sp, #8008] ; 8-byte Folded Reload
fmul d5, d4, d5
fsub d2, d2, d5
ldr d4, [sp, #10256] ; 8-byte Folded Reload
fmul d5, d3, d4
fadd d2, d5, d2
fmul d5, d23, d17
fsub d2, d2, d5
fmul d5, d6, d7
fadd d2, d5, d2
ldr d3, [sp, #8864] ; 8-byte Folded Reload
fadd d2, d3, d2
ldr d6, [sp, #10248] ; 8-byte Folded Reload
fmul d5, d13, d6
fadd d2, d5, d2
ldr d3, [sp, #10240] ; 8-byte Folded Reload
fmul d5, d10, d3
fadd d2, d5, d2
str d2, [sp, #7720] ; 8-byte Folded Spill
ldur d23, [x29, #-240] ; 8-byte Folded Reload
fmul d2, d23, d2
fadd d0, d2, d0
ldr d2, [sp, #11168] ; 8-byte Folded Reload
fmul d2, d2, d1
ldr d1, [sp, #11968] ; 8-byte Folded Reload
fmul d5, d1, d3
fadd d2, d5, d2
fmov d3, d19
fmul d5, d19, d12
fmov d1, d6
fmul d6, d31, d6
fadd d3, d6, d5
str d3, [sp, #7712] ; 8-byte Folded Spill
fsub d19, d2, d3
fmul d2, d27, d19
ldr d1, [sp, #9760] ; 8-byte Folded Reload
ldur d3, [x29, #-232] ; 8-byte Folded Reload
fmul d5, d3, d1
fadd d6, d2, d5
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #48937, lsl #48
fmov d2, x9
fmul d2, d6, d2
fadd d2, d0, d2
fmul d0, d21, d19
fmul d5, d25, d1
fadd d0, d0, d5
mov x9, #64744
movk x9, #21380, lsl #16
movk x9, #23316, lsl #32
movk x9, #48978, lsl #48
fmov d5, x9
fmul d5, d0, d5
fadd d2, d2, d5
ldr d25, [sp, #8888] ; 8-byte Folded Reload
ldr d1, [sp, #8016] ; 8-byte Folded Reload
fmul d5, d25, d1
fadd d2, d5, d2
ldr d8, [sp, #10808] ; 8-byte Folded Reload
ldr d1, [sp, #11504] ; 8-byte Folded Reload
fmul d5, d8, d1
fmov d1, d4
str d7, [sp, #10168] ; 8-byte Folded Spill
fadd d27, d4, d7
ldr d10, [sp, #11176] ; 8-byte Folded Reload
ldr d1, [sp, #11624] ; 8-byte Folded Reload
fmul d20, d10, d1
fadd d3, d20, d27
str d3, [sp, #10160] ; 8-byte Folded Spill
fmul d20, d1, d3
fadd d5, d20, d5
fmul d20, d5, d24
fadd d2, d2, d20
ldr d3, [sp, #10280] ; 8-byte Folded Reload
ldr d4, [sp, #11424] ; 8-byte Folded Reload
fmul d20, d3, d4
fadd d2, d20, d2
ldr d3, [sp, #10272] ; 8-byte Folded Reload
fmul d3, d3, d23
str d3, [sp, #10152] ; 8-byte Folded Spill
fmul d20, d1, d3
fsub d20, d2, d20
ldr d1, [sp, #7696] ; 8-byte Folded Reload
fmul d2, d20, d1
ldr d31, [sp, #7688] ; 8-byte Folded Reload
fmul d7, d9, d31
fsub d2, d2, d7
ldr d1, [sp, #7816] ; 8-byte Folded Reload
fmul d7, d9, d1
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #16223, lsl #48
fmov d21, x9
fmul d21, d5, d21
fadd d7, d7, d21
fmov d3, d14
ldr d1, [sp, #12096] ; 8-byte Folded Reload
fmul d21, d1, d14
fmov d4, d16
fmul d24, d1, d16
fadd d21, d24, d21
ldr d1, [sp, #9744] ; 8-byte Folded Reload
fmul d24, d26, d1
fadd d21, d21, d24
ldr d1, [sp, #7728] ; 8-byte Folded Reload
fmul d16, d22, d1
fadd d16, d21, d16
ldr d1, [sp, #12088] ; 8-byte Folded Reload
fmul d21, d1, d30
fadd d16, d21, d16
fmul d21, d1, d18
fadd d16, d21, d16
fmul d21, d28, d11
fadd d16, d21, d16
ldr d1, [sp, #9136] ; 8-byte Folded Reload
fmul d21, d1, d19
fadd d16, d16, d21
mov x9, #45724
movk x9, #42429, lsl #16
movk x9, #11379, lsl #32
movk x9, #16169, lsl #48
fmov d21, x9
fmul d23, d6, d21
fsub d16, d16, d23
mov x9, #45033
movk x9, #40035, lsl #16
movk x9, #524, lsl #32
movk x9, #48971, lsl #48
fmov d23, x9
fmul d23, d0, d23
fadd d16, d16, d23
ldr d1, [sp, #7744] ; 8-byte Folded Reload
fmul d23, d25, d1
fadd d16, d16, d23
ldr d24, [sp, #9752] ; 8-byte Folded Reload
ldr d1, [sp, #10760] ; 8-byte Folded Reload
fmul d23, d1, d24
fadd d16, d23, d16
ldr d23, [sp, #9064] ; 8-byte Folded Reload
ldr d1, [sp, #7768] ; 8-byte Folded Reload
fmul d17, d23, d1
fsub d17, d16, d17
ldr d29, [sp, #12312] ; 8-byte Folded Reload
fmul d16, d29, d17
fadd d7, d7, d16
ldr d1, [sp, #11880] ; 8-byte Folded Reload
fmul d3, d1, d14
fmul d4, d1, d4
fadd d3, d4, d3
ldr d1, [sp, #9768] ; 8-byte Folded Reload
fmul d4, d26, d1
ldr q26, [sp, #11072] ; 16-byte Folded Reload
fadd d3, d3, d4
ldr d4, [sp, #11872] ; 8-byte Folded Reload
fmul d1, d4, d30
fadd d1, d1, d3
fmul d3, d4, d18
fadd d1, d3, d1
ldr d3, [sp, #10864] ; 8-byte Folded Reload
fmul d3, d28, d3
fadd d1, d3, d1
ldr d3, [sp, #9144] ; 8-byte Folded Reload
fmul d3, d3, d19
fadd d1, d1, d3
mov x9, #40862
movk x9, #31695, lsl #16
movk x9, #12355, lsl #32
movk x9, #16198, lsl #48
fmov d3, x9
fmul d3, d6, d3
fadd d1, d1, d3
fmul d0, d0, d21
fsub d0, d1, d0
ldr d1, [sp, #8032] ; 8-byte Folded Reload
fmul d1, d25, d1
ldr q19, [sp, #8976] ; 16-byte Folded Reload
fadd d0, d0, d1
fmov d1, d24
ldr d3, [sp, #10752] ; 8-byte Folded Reload
fmul d1, d3, d24
fadd d0, d1, d0
fmov d3, d23
ldr d1, [sp, #8024] ; 8-byte Folded Reload
fmul d1, d23, d1
fadd d4, d0, d1
ldr d6, [sp, #12304] ; 8-byte Folded Reload
fmul d0, d6, d4
fadd d0, d0, d7
ldr d7, [sp, #7776] ; 8-byte Folded Reload
fmul d1, d0, d7
fadd d1, d1, d2
ldr d3, [sp, #7824] ; 8-byte Folded Reload
fmul d2, d9, d3
fmul d3, d5, d3
fsub d2, d2, d3
str d17, [sp, #7824] ; 8-byte Folded Spill
fmul d3, d6, d17
fadd d2, d2, d3
str d4, [sp, #7816] ; 8-byte Folded Spill
fmul d3, d29, d4
ldur d6, [x29, #-256] ; 8-byte Folded Reload
fsub d2, d2, d3
fadd d1, d2, d1
fmul d3, d5, d31
fadd d4, d1, d3
ldr d1, [sp, #8928] ; 8-byte Folded Reload
fmul d1, d20, d1
ldr d3, [sp, #8080] ; 8-byte Folded Reload
fmul d3, d9, d3
fsub d1, d3, d1
fadd d0, d0, d1
fmul d1, d2, d7
fadd d0, d1, d0
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #16292, lsl #48
fmov d1, x9
fmul d1, d5, d1
fadd d2, d0, d1
ldr q3, [sp, #11984] ; 16-byte Folded Reload
fmul d0, d3, d4
ldr q16, [sp, #11808] ; 16-byte Folded Reload
fmul d1, d16, d2
fsub d5, d0, d1
ldr d17, [sp, #7792] ; 8-byte Folded Reload
fmul d0, d9, d17
ldr q7, [sp, #11952] ; 16-byte Folded Reload
fmul d1, d7, d5
fsub d0, d1, d0
ldr d1, [sp, #7904] ; 8-byte Folded Reload
fmul d1, d9, d1
str q2, [sp, #10208] ; 16-byte Folded Spill
fmul d2, d3, d2
fadd d1, d2, d1
str q4, [sp, #10224] ; 16-byte Folded Spill
fmul d2, d16, d4
fadd d1, d2, d1
ldr d2, [sp, #11520] ; 8-byte Folded Reload
fmul d2, d8, d2
ldr d4, [sp, #12048] ; 8-byte Folded Reload
fmul d3, d10, d4
fadd d3, d3, d27
str d3, [sp, #9808] ; 8-byte Folded Spill
fmul d3, d4, d3
fadd d2, d3, d2
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d3, x9
fmul d2, d2, d3
fadd d3, d1, d2
ldr q16, [sp, #11792] ; 16-byte Folded Reload
fmul d1, d16, d3
fsub d0, d0, d1
ldr d1, [sp, #11640] ; 8-byte Folded Reload
fmul d1, d8, d1
ldr d4, [sp, #12072] ; 8-byte Folded Reload
fmul d2, d10, d4
fadd d2, d2, d27
str d2, [sp, #9152] ; 8-byte Folded Spill
fmul d2, d4, d2
fadd d1, d2, d1
fmul d2, d1, d17
fadd d20, d0, d2
ldr d4, [sp, #7896] ; 8-byte Folded Reload
fmul d0, d9, d4
str q5, [sp, #7792] ; 16-byte Folded Spill
fmul d2, d16, d5
fsub d0, d2, d0
str q3, [sp, #7776] ; 16-byte Folded Spill
fmul d2, d7, d3
fadd d0, d0, d2
fmul d1, d1, d4
fadd d22, d0, d1
fmul d0, d6, d8
ldr d1, [sp, #10800] ; 8-byte Folded Reload
fadd d24, d0, d1
fmul d0, d6, d10
str d27, [sp, #7704] ; 8-byte Folded Spill
fadd d23, d0, d27
cbz x8, LBB19_56
; %bb.55:
fmov d21, d8
fmov d18, d10
ldr q17, [sp, #10720] ; 16-byte Folded Reload
fmul d0, d17, d19
ldr q16, [sp, #11216] ; 16-byte Folded Reload
fmul d1, d16, d26
ldr d7, [sp, #11248] ; 8-byte Folded Reload
fmul d2, d7, d10
fmul d3, d6, d24
fsub d2, d2, d3
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d3, x9
fmul d4, d2, d3
fsub d1, d1, d4
fsub d0, d0, d1
fadd d0, d0, d0
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d1, x9
fmul d4, d9, d1
fmul d5, d17, d20
fsub d4, d5, d4
fmul d5, d16, d22
fsub d4, d4, d5
fmul d5, d7, d8
fmul d6, d6, d23
fadd d5, d5, d6
fmul d6, d5, d1
fadd d4, d4, d6
fadd d4, d4, d4
fadd d0, d0, d4
ldr d18, [sp, #11400] ; 8-byte Folded Reload
fmul d4, d18, d0
fmul d6, d9, d3
fmul d7, d16, d20
fsub d6, d7, d6
fmul d7, d17, d22
fadd d6, d7, d6
fmul d3, d5, d3
fadd d3, d6, d3
fmul d5, d17, d26
fmul d1, d2, d1
fsub d1, d5, d1
fmul d2, d16, d19
fadd d1, d2, d1
fadd d1, d1, d3
ldr d2, [sp, #11160] ; 8-byte Folded Reload
fmul d2, d2, d1
fsub d2, d4, d2
fmul d2, d18, d2
fmov d5, #0.50000000
fmul d2, d2, d5
ldr d3, [sp, #11392] ; 8-byte Folded Reload
fmul d0, d3, d0
ldr d4, [sp, #11240] ; 8-byte Folded Reload
fmul d1, d4, d1
fadd d0, d1, d0
fmul d0, d3, d0
fmul d0, d0, d5
fsub d0, d2, d0
str d0, [x8, #72]
LBB19_56:
str d24, [sp, #7688] ; 8-byte Folded Spill
str d23, [sp, #7696] ; 8-byte Folded Spill
str q22, [sp, #7728] ; 16-byte Folded Spill
str q20, [sp, #7744] ; 16-byte Folded Spill
str d9, [sp, #7768] ; 8-byte Folded Spill
ldr d0, [sp, #9288] ; 8-byte Folded Reload
ldr d1, [sp, #9280] ; 8-byte Folded Reload
fadd d0, d1, d0
str d0, [sp, #10192] ; 8-byte Folded Spill
ldr d0, [sp, #8920] ; 8-byte Folded Reload
ldr d1, [sp, #8896] ; 8-byte Folded Reload
fadd d0, d1, d0
str d0, [sp, #10200] ; 8-byte Folded Spill
ldp d8, d9, [x29, #-176] ; 16-byte Folded Reload
ldr d14, [sp, #11752] ; 8-byte Folded Reload
fmul d0, d9, d14
ldr d4, [sp, #12184] ; 8-byte Folded Reload
fmul d1, d8, d4
fsub d16, d0, d1
ldr d21, [sp, #11736] ; 8-byte Folded Reload
fmul d0, d8, d21
fsub d18, d0, d14
ldr d29, [sp, #9928] ; 8-byte Folded Reload
fdiv d1, d18, d29
str d1, [sp, #10856] ; 8-byte Folded Spill
ldr d0, [sp, #9904] ; 8-byte Folded Reload
fmul d0, d0, d1
fdiv d3, d16, d29
ldr d26, [sp, #4320] ; 8-byte Folded Reload
fmul d1, d26, d3
fadd d20, d1, d0
ldr d0, [sp, #9896] ; 8-byte Folded Reload
fmul d0, d20, d0
fmul d1, d9, d21
fadd d0, d1, d0
fsub d0, d0, d4
ldr d1, [sp, #10480] ; 8-byte Folded Reload
fmul d1, d1, d0
fmov d4, #0.50000000
fmul d1, d1, d4
ldr d4, [sp, #10904] ; 8-byte Folded Reload
fmul d1, d4, d1
ldr d4, [sp, #10896] ; 8-byte Folded Reload
fmul d2, d4, d0
str d2, [sp, #9128] ; 8-byte Folded Spill
mov x9, #4632233691727265792
fmov d4, x9
fmul d0, d1, d4
ldr d1, [sp, #10544] ; 8-byte Folded Reload
fmul d2, d1, d2
str d2, [sp, #8928] ; 8-byte Folded Spill
ldr d1, [sp, #10568] ; 8-byte Folded Reload
fmul d1, d1, d2
fmov d22, #3.00000000
fmul d1, d1, d22
fadd d23, d0, d1
fmov d6, #0.50000000
ldr d12, [sp, #11672] ; 8-byte Folded Reload
fmul d0, d9, d12
ldr d19, [sp, #12176] ; 8-byte Folded Reload
fmul d1, d8, d19
fsub d2, d0, d1
str d2, [sp, #10848] ; 8-byte Folded Spill
ldr d30, [sp, #11648] ; 8-byte Folded Reload
fmul d0, d8, d30
fsub d0, d0, d12
str d0, [sp, #10840] ; 8-byte Folded Spill
ldr d13, [sp, #10496] ; 8-byte Folded Reload
fdiv d1, d0, d13
str d1, [sp, #9136] ; 8-byte Folded Spill
ldr d0, [sp, #10424] ; 8-byte Folded Reload
fmul d5, d0, d1
fdiv d0, d2, d13
str d0, [sp, #11424] ; 8-byte Folded Spill
ldr d2, [sp, #4288] ; 8-byte Folded Reload
fmul d17, d2, d0
fadd d0, d17, d5
str d0, [sp, #10480] ; 8-byte Folded Spill
ldr d5, [sp, #9880] ; 8-byte Folded Reload
fmul d5, d0, d5
fmul d17, d9, d30
fadd d5, d17, d5
fsub d5, d5, d19
ldr d17, [sp, #10432] ; 8-byte Folded Reload
fmul d17, d17, d5
fmul d17, d17, d6
fmov d15, #0.50000000
ldr d19, [sp, #10888] ; 8-byte Folded Reload
fmul d17, d19, d17
fmul d4, d17, d4
ldr d17, [sp, #10880] ; 8-byte Folded Reload
fmul d17, d17, d5
str d17, [sp, #9880] ; 8-byte Folded Spill
ldr d5, [sp, #10440] ; 8-byte Folded Reload
fmul d17, d5, d17
str d17, [sp, #9280] ; 8-byte Folded Spill
ldr d5, [sp, #10448] ; 8-byte Folded Reload
fmul d5, d5, d17
fmul d5, d5, d22
fadd d0, d4, d5
str d0, [sp, #10904] ; 8-byte Folded Spill
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d25, x9
fmul d4, d8, d25
str d4, [sp, #11520] ; 8-byte Folded Spill
ldr d22, [sp, #11568] ; 8-byte Folded Reload
fmul d5, d4, d22
ldr d17, [sp, #10088] ; 8-byte Folded Reload
fadd d4, d5, d17
str d4, [sp, #11504] ; 8-byte Folded Spill
fadd d6, d23, d0
str d6, [sp, #11312] ; 8-byte Folded Spill
ldr d19, [sp, #12056] ; 8-byte Folded Reload
fmul d5, d4, d19
fadd d4, d5, d6
fmul d0, d9, d25
str d0, [sp, #11512] ; 8-byte Folded Spill
fmul d5, d0, d22
fmov d24, d22
ldr d17, [sp, #10080] ; 8-byte Folded Reload
fsub d0, d17, d5
str d0, [sp, #11640] ; 8-byte Folded Spill
str d4, [sp, #9904] ; 8-byte Folded Spill
fmul d5, d19, d4
ldr d4, [sp, #11584] ; 8-byte Folded Reload
fmul d17, d0, d4
fsub d0, d5, d17
str d0, [sp, #10544] ; 8-byte Folded Spill
ldr d0, [sp, #10112] ; 8-byte Folded Reload
ldr d4, [sp, #8272] ; 8-byte Folded Reload
fmul d5, d4, d0
ldr d0, [sp, #10768] ; 8-byte Folded Reload
ldr d4, [sp, #9296] ; 8-byte Folded Reload
fmul d17, d4, d0
fadd d5, d5, d17
ldr d28, [sp, #10008] ; 8-byte Folded Reload
ldr d0, [sp, #11328] ; 8-byte Folded Reload
fmul d17, d28, d0
fadd d5, d17, d5
ldr d17, [sp, #6728] ; 8-byte Folded Reload
fmul d17, d22, d17
fmov d0, d22
fsub d17, d5, d17
ldr d6, [sp, #12320] ; 8-byte Folded Reload
fmul d5, d28, d6
ldr d11, [sp, #11888] ; 8-byte Folded Reload
ldr d4, [sp, #10712] ; 8-byte Folded Reload
fmul d19, d4, d11
fadd d5, d5, d19
mov x9, #3449
movk x9, #18764, lsl #16
movk x9, #45194, lsl #32
movk x9, #16217, lsl #48
fmov d19, x9
ldr d22, [sp, #4240] ; 8-byte Folded Reload
fadd d19, d22, d19
ldur d10, [x29, #-184] ; 8-byte Folded Reload
str d19, [sp, #10864] ; 8-byte Folded Spill
fmul d19, d19, d10
fadd d31, d19, d5
fmul d19, d11, d31
str d31, [sp, #8864] ; 8-byte Folded Spill
ldr d5, [sp, #6128] ; 8-byte Folded Reload
fmul d24, d10, d5
fsub d19, d19, d24
str d25, [sp, #9144] ; 8-byte Folded Spill
fmul d17, d17, d25
fmul d19, d19, d25
fadd d24, d19, d17
ldr d22, [sp, #10936] ; 8-byte Folded Reload
fmul d17, d2, d22
fdiv d7, d17, d13
fmul d19, d12, d7
ldr d4, [sp, #11104] ; 8-byte Folded Reload
fmul d25, d30, d4
fadd d19, d25, d19
mov x9, #11213
movk x9, #64899, lsl #16
movk x9, #2195, lsl #32
movk x9, #49148, lsl #48
fmov d25, x9
fmul d25, d0, d25
fadd d25, d19, d25
ldr d17, [sp, #9920] ; 8-byte Folded Reload
fmul d19, d26, d17
fdiv d0, d19, d29
fmul d26, d14, d0
ldr d19, [sp, #10912] ; 8-byte Folded Reload
fmul d27, d21, d19
fadd d26, d27, d26
fadd d25, d26, d25
fadd d24, d25, d24
fdiv d25, d0, d29
fmov d2, d0
fmul d16, d16, d25
ldr d5, [sp, #10624] ; 8-byte Folded Reload
fmul d18, d18, d5
fadd d16, d16, d18
fmul d18, d19, d20
fmul d20, d18, d15
fsub d16, d20, d16
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16361, lsl #48
fmov d26, x9
fmul d18, d18, d26
ldr d5, [sp, #9912] ; 8-byte Folded Reload
fmul d18, d18, d5
fmov d15, #5.00000000
fmul d18, d18, d15
fadd d16, d18, d16
fmul d3, d3, d17
ldr d5, [sp, #10608] ; 8-byte Folded Reload
fdiv d25, d16, d5
ldr d5, [sp, #10616] ; 8-byte Folded Reload
fmul d16, d5, d25
fadd d1, d3, d16
fmul d3, d11, d1
fmul d16, d10, d23
fadd d16, d16, d3
ldr d0, [sp, #11904] ; 8-byte Folded Reload
fmul d3, d0, d16
fsub d3, d24, d3
fmov d29, d2
str d2, [sp, #8920] ; 8-byte Folded Spill
fmul d18, d8, d2
fadd d18, d19, d18
fmul d24, d0, d18
fsub d3, d3, d24
ldr d0, [sp, #10856] ; 8-byte Folded Reload
fmul d2, d0, d17
ldr d5, [sp, #10600] ; 8-byte Folded Reload
fmul d24, d5, d25
fadd d0, d2, d24
str d0, [sp, #10856] ; 8-byte Folded Spill
fmul d2, d6, d1
str d1, [sp, #8888] ; 8-byte Folded Spill
fmul d24, d10, d0
fsub d2, d2, d24
str d2, [sp, #10568] ; 8-byte Folded Spill
fmul d2, d8, d2
ldr d20, [sp, #10192] ; 8-byte Folded Reload
fadd d2, d20, d2
fmul d24, d9, d29
ldr d5, [sp, #9352] ; 8-byte Folded Reload
fsub d5, d24, d5
str d5, [sp, #10832] ; 8-byte Folded Spill
fmul d27, d8, d5
fadd d2, d27, d2
fmul d27, d11, d0
fmul d29, d6, d23
fmov d19, d6
fadd d24, d29, d27
fadd d2, d24, d2
fmul d27, d9, d16
fsub d2, d2, d27
fmul d27, d9, d18
fsub d0, d2, d27
str d0, [sp, #10896] ; 8-byte Folded Spill
ldr d27, [sp, #11376] ; 8-byte Folded Reload
fmul d2, d27, d0
fadd d2, d2, d3
fmov d17, d7
str d7, [sp, #8032] ; 8-byte Folded Spill
fdiv d3, d7, d13
ldr d0, [sp, #10848] ; 8-byte Folded Reload
fmul d3, d0, d3
ldr d5, [sp, #10584] ; 8-byte Folded Reload
ldr d0, [sp, #10840] ; 8-byte Folded Reload
fmul d6, d0, d5
fadd d3, d3, d6
ldr d0, [sp, #10480] ; 8-byte Folded Reload
fmul d6, d4, d0
fmov d0, #0.50000000
fmul d7, d6, d0
fsub d3, d7, d3
fmul d6, d6, d26
ldr d5, [sp, #9888] ; 8-byte Folded Reload
fmul d6, d6, d5
fmul d6, d6, d15
fadd d3, d6, d3
ldr d0, [sp, #11424] ; 8-byte Folded Reload
fmul d0, d0, d22
ldr d5, [sp, #10576] ; 8-byte Folded Reload
fdiv d5, d3, d5
str d5, [sp, #8040] ; 8-byte Folded Spill
ldr d3, [sp, #10592] ; 8-byte Folded Reload
fmul d3, d3, d5
fadd d3, d0, d3
fmul d0, d11, d3
fmov d6, d3
str d3, [sp, #8024] ; 8-byte Folded Spill
ldr d5, [sp, #10904] ; 8-byte Folded Reload
fmul d3, d10, d5
fadd d13, d3, d0
ldr d3, [sp, #11664] ; 8-byte Folded Reload
fmul d0, d3, d13
fsub d0, d2, d0
fmul d2, d8, d17
fadd d25, d4, d2
fmul d2, d3, d25
fsub d0, d0, d2
mov x9, #51491
movk x9, #54360, lsl #16
movk x9, #13074, lsl #32
movk x9, #49054, lsl #48
fmov d2, x9
fadd d0, d0, d2
ldr d17, [sp, #11512] ; 8-byte Folded Reload
fmul d2, d17, d11
ldr d15, [sp, #11520] ; 8-byte Folded Reload
fmul d3, d15, d19
fsub d2, d2, d3
str d2, [sp, #11424] ; 8-byte Folded Spill
fmul d2, d28, d2
fmul d3, d15, d31
fsub d2, d2, d3
fmul d3, d14, d1
fadd d2, d2, d3
str d23, [sp, #9928] ; 8-byte Folded Spill
fmul d3, d21, d23
fadd d2, d3, d2
fmul d3, d12, d6
fadd d2, d3, d2
fmul d3, d30, d5
fadd d2, d3, d2
fmul d3, d15, d10
ldr d4, [sp, #9384] ; 8-byte Folded Reload
fadd d1, d3, d4
ldr d26, [sp, #8272] ; 8-byte Folded Reload
fmul d3, d26, d1
fmov d30, d1
fadd d2, d3, d2
ldr d1, [sp, #9264] ; 8-byte Folded Reload
fadd d2, d1, d2
fmul d3, d17, d10
ldr d4, [sp, #9376] ; 8-byte Folded Reload
fsub d22, d4, d3
ldr d5, [sp, #9296] ; 8-byte Folded Reload
fmul d3, d5, d22
fadd d1, d3, d2
ldr d3, [sp, #12280] ; 8-byte Folded Reload
str d1, [sp, #8000] ; 8-byte Folded Spill
fmul d2, d3, d1
fmov d19, d3
fadd d2, d2, d0
ldr d4, [sp, #11368] ; 8-byte Folded Reload
ldr d29, [sp, #11504] ; 8-byte Folded Reload
fmul d0, d29, d4
ldr d1, [sp, #11312] ; 8-byte Folded Reload
fadd d1, d0, d1
ldr d0, [sp, #11536] ; 8-byte Folded Reload
ldr d3, [sp, #11640] ; 8-byte Folded Reload
fmul d0, d3, d0
str d1, [sp, #9920] ; 8-byte Folded Spill
fmul d3, d4, d1
fmov d21, d4
fsub d6, d0, d3
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d0, x9
str d0, [sp, #8840] ; 8-byte Folded Spill
fmul d3, d6, d0
fadd d2, d3, d2
mov x9, #54125
movk x9, #53060, lsl #16
movk x9, #15481, lsl #32
movk x9, #16273, lsl #48
fmov d3, x9
str d3, [sp, #8896] ; 8-byte Folded Spill
ldr d23, [sp, #9272] ; 8-byte Folded Reload
fmul d3, d23, d3
fsub d2, d2, d3
ldr d0, [sp, #8320] ; 8-byte Folded Reload
fmul d3, d15, d0
fmul d7, d17, d5
fadd d3, d3, d7
ldr d4, [sp, #9256] ; 8-byte Folded Reload
fadd d3, d4, d3
str d3, [sp, #10888] ; 8-byte Folded Spill
fmul d3, d3, d19
str d3, [sp, #9912] ; 8-byte Folded Spill
fmul d3, d3, d21
fadd d2, d3, d2
fmul d3, d17, d26
fmov d19, d17
fmul d7, d15, d5
fmov d0, d15
fadd d3, d7, d3
ldr d4, [sp, #9248] ; 8-byte Folded Reload
fadd d3, d4, d3
str d3, [sp, #10880] ; 8-byte Folded Spill
ldr d4, [sp, #11440] ; 8-byte Folded Reload
fmul d3, d3, d4
fadd d31, d3, d2
mov x9, #4354980839667269632
mov x10, #47272
movk x10, #56762, lsl #16
movk x10, #43178, lsl #32
movk x10, #16292, lsl #48
fmov d2, x9
str d2, [sp, #9888] ; 8-byte Folded Spill
fmul d2, d31, d2
fmov d3, x10
str d3, [sp, #9288] ; 8-byte Folded Spill
fmul d3, d6, d3
fsub d1, d2, d3
str d1, [sp, #11104] ; 8-byte Folded Spill
ldr d4, [sp, #12200] ; 8-byte Folded Reload
fmov d1, d20
fmul d2, d4, d20
ldr d14, [sp, #11864] ; 8-byte Folded Reload
ldr d28, [sp, #10568] ; 8-byte Folded Reload
fmul d3, d14, d28
fadd d2, d2, d3
ldr d15, [sp, #10832] ; 8-byte Folded Reload
fmul d3, d14, d15
fadd d2, d3, d2
fmul d3, d4, d24
fadd d2, d3, d2
ldur d12, [x29, #-216] ; 8-byte Folded Reload
fmul d3, d12, d16
fsub d2, d2, d3
fmul d3, d12, d18
fsub d7, d2, d3
ldr d3, [sp, #11976] ; 8-byte Folded Reload
fmul d2, d3, d16
fmul d3, d3, d18
fadd d2, d3, d2
fmul d3, d27, d7
fsub d2, d3, d2
ldr d4, [sp, #12064] ; 8-byte Folded Reload
fmul d3, d4, d13
fsub d2, d2, d3
fmul d3, d4, d25
fsub d2, d2, d3
mov x9, #46543
movk x9, #48510, lsl #16
movk x9, #46414, lsl #32
movk x9, #16260, lsl #48
fmov d3, x9
fadd d2, d2, d3
ldr d3, [sp, #10112] ; 8-byte Folded Reload
fmov d17, d0
fmul d3, d0, d3
ldr d4, [sp, #10768] ; 8-byte Folded Reload
fmul d26, d19, d4
fadd d3, d3, d26
fmov d0, d30
str d30, [sp, #10848] ; 8-byte Folded Spill
fmul d26, d11, d30
fadd d3, d26, d3
str d22, [sp, #10840] ; 8-byte Folded Spill
ldr d30, [sp, #12320] ; 8-byte Folded Reload
fmul d26, d30, d22
fadd d5, d26, d3
ldr d3, [sp, #9344] ; 8-byte Folded Reload
fmul d3, d3, d5
str d5, [sp, #10432] ; 8-byte Folded Spill
fadd d2, d2, d3
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d3, x9
str d3, [sp, #9264] ; 8-byte Folded Spill
fmul d3, d23, d3
fadd d2, d3, d2
ldr d20, [sp, #11328] ; 8-byte Folded Reload
fmul d3, d17, d20
ldr d17, [sp, #11424] ; 8-byte Folded Reload
fmul d26, d11, d17
fadd d3, d26, d3
fmul d26, d10, d22
fadd d3, d26, d3
ldr d4, [sp, #5208] ; 8-byte Folded Reload
fadd d21, d4, d3
mov x9, #64744
movk x9, #21380, lsl #16
movk x9, #23316, lsl #32
movk x9, #16194, lsl #48
fmov d3, x9
str d3, [sp, #9256] ; 8-byte Folded Spill
fmul d3, d21, d3
fmov d22, d21
str d21, [sp, #10424] ; 8-byte Folded Spill
fadd d2, d3, d2
fmul d3, d30, d17
fmov d11, d30
fmul d30, d19, d20
fadd d3, d3, d30
fmul d30, d10, d0
fadd d3, d30, d3
ldr d4, [sp, #5216] ; 8-byte Folded Reload
fadd d4, d4, d3
ldr d3, [sp, #10776] ; 8-byte Folded Reload
fmul d3, d3, d4
fmov d0, d4
str d4, [sp, #10176] ; 8-byte Folded Spill
fadd d21, d3, d2
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
fmov d2, x9
str d2, [sp, #8096] ; 8-byte Folded Spill
fmul d2, d6, d2
ldur d4, [x29, #-208] ; 8-byte Folded Reload
fmul d3, d4, d21
fadd d2, d3, d2
ldr d19, [sp, #12160] ; 8-byte Folded Reload
fmul d3, d19, d1
ldr d20, [sp, #12016] ; 8-byte Folded Reload
fmul d30, d20, d28
fadd d3, d3, d30
fmul d30, d20, d15
fadd d3, d30, d3
str d24, [sp, #7904] ; 8-byte Folded Spill
fmul d30, d19, d24
fadd d3, d30, d3
ldr d24, [sp, #12336] ; 8-byte Folded Reload
fmul d30, d24, d16
fsub d3, d3, d30
fmul d30, d24, d18
fsub d17, d3, d30
ldr d1, [sp, #12192] ; 8-byte Folded Reload
fmul d3, d1, d16
fmul d16, d1, d18
fadd d3, d16, d3
fmul d16, d27, d17
fsub d3, d16, d3
ldr d27, [sp, #10896] ; 8-byte Folded Reload
fmul d16, d9, d27
fmov d26, d7
str d7, [sp, #10440] ; 8-byte Folded Spill
fmul d18, d12, d7
fadd d16, d16, d18
fmul d18, d24, d17
fmov d7, d17
str d17, [sp, #10184] ; 8-byte Folded Spill
fadd d16, d18, d16
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d1, x9
str d1, [sp, #8048] ; 8-byte Folded Spill
fmul d16, d16, d1
fadd d3, d3, d16
ldr d1, [sp, #12104] ; 8-byte Folded Reload
fmul d16, d1, d13
fsub d3, d3, d16
fmul d16, d1, d25
fsub d3, d3, d16
mov x9, #57269
movk x9, #60105, lsl #16
movk x9, #55991, lsl #32
movk x9, #16301, lsl #48
fmov d16, x9
fadd d3, d3, d16
ldr d1, [sp, #9336] ; 8-byte Folded Reload
fmul d16, d1, d5
fadd d3, d3, d16
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d1, x9
str d1, [sp, #8056] ; 8-byte Folded Spill
fmul d16, d23, d1
fadd d3, d16, d3
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #16153, lsl #48
fmov d1, x9
str d1, [sp, #8064] ; 8-byte Folded Spill
fmul d16, d22, d1
fsub d3, d3, d16
ldr d1, [sp, #10784] ; 8-byte Folded Reload
fmul d16, d1, d0
fadd d1, d16, d3
ldur d0, [x29, #-200] ; 8-byte Folded Reload
fmul d3, d0, d1
fadd d2, d3, d2
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d3, x9
str d3, [sp, #8272] ; 8-byte Folded Spill
fmul d3, d6, d3
str d21, [sp, #7896] ; 8-byte Folded Spill
fmul d16, d0, d21
fsub d3, d3, d16
str d1, [sp, #7888] ; 8-byte Folded Spill
fmul d16, d4, d1
fadd d3, d16, d3
ldr d0, [sp, #11104] ; 8-byte Folded Reload
fadd d16, d2, d0
mov x9, #4363988038922010624
fmov d0, x9
fmul d18, d3, d0
fmov d4, d0
str d0, [sp, #8072] ; 8-byte Folded Spill
fadd d1, d18, d16
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #49115, lsl #48
fmov d0, x9
str d0, [sp, #9272] ; 8-byte Folded Spill
ldr d5, [sp, #10544] ; 8-byte Folded Reload
fmul d16, d5, d0
ldr q0, [sp, #12000] ; 16-byte Folded Reload
fmul d18, d0, d1
mov.16b v17, v0
fsub d28, d16, d18
mov x9, #4359484439294640128
fmov d0, x9
str d0, [sp, #8016] ; 8-byte Folded Spill
fmul d31, d31, d0
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #49064, lsl #48
fmov d0, x9
str d0, [sp, #8008] ; 8-byte Folded Spill
fmul d0, d6, d0
fsub d0, d0, d31
fmul d2, d2, d4
fadd d0, d2, d0
fadd d2, d3, d0
ldr q3, [sp, #11840] ; 16-byte Folded Reload
fmul d0, d3, d2
fsub d5, d28, d0
str q2, [sp, #10496] ; 16-byte Folded Spill
fmul d0, d17, d2
str q1, [sp, #10544] ; 16-byte Folded Spill
fmul d2, d3, d1
fsub d1, d0, d2
ldr d3, [sp, #12208] ; 8-byte Folded Reload
fmul d0, d29, d3
ldr d2, [sp, #11312] ; 8-byte Folded Reload
fadd d2, d0, d2
ldr d0, [sp, #11784] ; 8-byte Folded Reload
ldr d4, [sp, #11640] ; 8-byte Folded Reload
fmul d0, d4, d0
str d2, [sp, #9896] ; 8-byte Folded Spill
fmul d2, d3, d2
fsub d0, d0, d2
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
ldr q6, [sp, #11824] ; 16-byte Folded Reload
fmul d2, d6, d1
fmov d3, x9
str d3, [sp, #9248] ; 8-byte Folded Spill
fmul d3, d0, d3
fsub d2, d2, d3
ldr q4, [sp, #12240] ; 16-byte Folded Reload
fmul d3, d4, d5
fsub d2, d3, d2
str q2, [sp, #11104] ; 16-byte Folded Spill
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d2, x9
str d2, [sp, #8080] ; 8-byte Folded Spill
fmul d0, d0, d2
str q1, [sp, #10448] ; 16-byte Folded Spill
fmul d2, d4, d1
fsub d0, d2, d0
str q5, [sp, #10480] ; 16-byte Folded Spill
fmul d2, d6, d5
fadd d0, d0, d2
str q0, [sp, #10912] ; 16-byte Folded Spill
ldr d0, [sp, #10960] ; 8-byte Folded Reload
ldr d1, [sp, #8928] ; 8-byte Folded Reload
fmul d0, d0, d1
mov x9, #10523
movk x9, #38535, lsl #16
movk x9, #12921, lsl #32
movk x9, #16642, lsl #48
fmov d2, x9
fmul d0, d0, d2
ldr d1, [sp, #10344] ; 8-byte Folded Reload
fmul d0, d1, d0
ldr d1, [sp, #10312] ; 8-byte Folded Reload
fdiv d0, d0, d1
ldr d1, [sp, #10296] ; 8-byte Folded Reload
fmul d0, d1, d0
ldr d2, [sp, #10976] ; 8-byte Folded Reload
ldr d1, [sp, #9128] ; 8-byte Folded Reload
fmul d2, d2, d1
fmov d6, #0.50000000
fmul d2, d2, d6
ldr d1, [sp, #10320] ; 8-byte Folded Reload
fmul d3, d1, d2
mov x9, #211106232532992
movk x9, #16498, lsl #48
fmov d2, x9
fmul d3, d3, d2
fsub d3, d0, d3
fmul d0, d8, d27
fmul d21, d14, d26
fadd d0, d0, d21
fmul d21, d20, d7
fadd d5, d21, d0
fadd d3, d3, d5
fmul d21, d5, d6
fmov d6, #0.50000000
fsub d23, d3, d21
ldr d0, [sp, #9136] ; 8-byte Folded Reload
ldr d1, [sp, #10936] ; 8-byte Folded Reload
fmul d1, d0, d1
ldr d3, [sp, #10392] ; 8-byte Folded Reload
ldr d0, [sp, #8040] ; 8-byte Folded Reload
fmul d21, d3, d0
fadd d28, d1, d21
ldr d18, [sp, #8024] ; 8-byte Folded Reload
fmul d1, d11, d18
fmul d21, d10, d28
fsub d29, d1, d21
fmul d21, d8, d29
ldr d16, [sp, #10200] ; 8-byte Folded Reload
fadd d22, d16, d21
ldr d17, [sp, #8032] ; 8-byte Folded Reload
fmul d21, d9, d17
ldr d3, [sp, #6072] ; 8-byte Folded Reload
fsub d21, d21, d3
fmul d15, d8, d21
fadd d22, d15, d22
ldr d30, [sp, #11888] ; 8-byte Folded Reload
fmul d15, d30, d28
ldr d0, [sp, #10904] ; 8-byte Folded Reload
fmul d26, d11, d0
fadd d0, d26, d15
fadd d22, d0, d22
fmul d26, d9, d13
fsub d22, d22, d26
fmul d26, d9, d25
fsub d15, d22, d26
ldr d1, [sp, #12200] ; 8-byte Folded Reload
fmul d22, d1, d16
fmul d26, d14, d29
fadd d22, d22, d26
fmul d26, d14, d21
fadd d22, d26, d22
fmul d26, d1, d0
fadd d22, d26, d22
fmul d26, d12, d13
fsub d22, d22, d26
fmul d26, d12, d25
fsub d1, d22, d26
str d1, [sp, #10976] ; 8-byte Folded Spill
fmul d26, d8, d15
fmul d27, d14, d1
fadd d26, d26, d27
fmov d7, d16
fmul d27, d19, d16
fmul d7, d20, d29
fadd d7, d27, d7
fmul d27, d20, d21
fadd d7, d27, d7
str d0, [sp, #10320] ; 8-byte Folded Spill
fmul d27, d19, d0
fadd d7, d27, d7
fmul d27, d24, d13
fsub d7, d7, d27
fmul d27, d24, d25
fsub d16, d7, d27
fmul d7, d20, d16
fadd d7, d7, d26
ldr d4, [sp, #10944] ; 8-byte Folded Reload
ldr d0, [sp, #9280] ; 8-byte Folded Reload
fmul d26, d4, d0
mov x9, #18811
movk x9, #34700, lsl #16
movk x9, #61210, lsl #32
movk x9, #16643, lsl #48
fmov d27, x9
fmul d26, d26, d27
ldr d4, [sp, #10352] ; 8-byte Folded Reload
fmul d26, d4, d26
ldr d4, [sp, #10328] ; 8-byte Folded Reload
fdiv d26, d26, d4
ldr d4, [sp, #10304] ; 8-byte Folded Reload
fmul d26, d4, d26
ldr d4, [sp, #10952] ; 8-byte Folded Reload
ldr d0, [sp, #9880] ; 8-byte Folded Reload
fmul d27, d4, d0
fmul d27, d27, d6
ldr d4, [sp, #10336] ; 8-byte Folded Reload
fmul d27, d4, d27
fmul d2, d27, d2
fsub d2, d26, d2
fsub d0, d23, d5
fmov d22, d23
fsub d0, d0, d7
fadd d2, d2, d7
fmul d7, d7, d6
fsub d2, d2, d7
fadd d23, d2, d0
ldr d12, [sp, #10112] ; 8-byte Folded Reload
ldr d6, [sp, #9296] ; 8-byte Folded Reload
fmul d0, d6, d12
ldr d26, [sp, #10768] ; 8-byte Folded Reload
ldr d13, [sp, #8320] ; 8-byte Folded Reload
fmul d2, d13, d26
fadd d0, d0, d2
ldr d1, [sp, #10712] ; 8-byte Folded Reload
ldr d2, [sp, #11328] ; 8-byte Folded Reload
fmul d2, d1, d2
fadd d0, d2, d0
ldr d2, [sp, #5192] ; 8-byte Folded Reload
ldr d4, [sp, #11568] ; 8-byte Folded Reload
fmul d2, d4, d2
fadd d0, d0, d2
ldr d2, [sp, #6080] ; 8-byte Folded Reload
fmul d2, d10, d2
ldr d9, [sp, #8864] ; 8-byte Folded Reload
fmul d7, d11, d9
fsub d2, d2, d7
ldr d4, [sp, #9144] ; 8-byte Folded Reload
fmul d0, d0, d4
fmul d2, d2, d4
fadd d0, d2, d0
ldr d25, [sp, #12176] ; 8-byte Folded Reload
fmul d2, d25, d17
ldr d19, [sp, #11648] ; 8-byte Folded Reload
fmul d4, d19, d3
fsub d2, d2, d4
ldr d5, [sp, #12184] ; 8-byte Folded Reload
ldr d3, [sp, #8920] ; 8-byte Folded Reload
fmul d4, d5, d3
ldr d3, [sp, #11736] ; 8-byte Folded Reload
ldr d7, [sp, #9352] ; 8-byte Folded Reload
fmul d7, d3, d7
fsub d4, d4, d7
fadd d2, d2, d4
fsub d0, d0, d2
ldr d4, [sp, #11904] ; 8-byte Folded Reload
ldr d31, [sp, #10568] ; 8-byte Folded Reload
fmul d2, d4, d31
fadd d0, d2, d0
ldr d10, [sp, #10832] ; 8-byte Folded Reload
fmul d2, d4, d10
fadd d0, d2, d0
ldr d27, [sp, #11384] ; 8-byte Folded Reload
ldr d2, [sp, #10896] ; 8-byte Folded Reload
fmul d2, d27, d2
fadd d0, d2, d0
ldr d4, [sp, #11664] ; 8-byte Folded Reload
fmul d2, d4, d29
fadd d0, d2, d0
fmul d2, d4, d21
fadd d0, d2, d0
ldr d8, [sp, #11280] ; 8-byte Folded Reload
fmul d2, d8, d15
ldur d15, [x29, #-256] ; 8-byte Folded Reload
fadd d0, d2, d0
ldr d7, [sp, #8840] ; 8-byte Folded Reload
fmul d2, d23, d7
fadd d0, d2, d0
ldr d17, [sp, #11512] ; 8-byte Folded Reload
fmul d2, d17, d9
ldr d4, [sp, #11424] ; 8-byte Folded Reload
fmul d4, d1, d4
fadd d2, d4, d2
ldr d1, [sp, #8888] ; 8-byte Folded Reload
fmul d4, d5, d1
fsub d2, d2, d4
ldr d1, [sp, #10856] ; 8-byte Folded Reload
fmul d4, d3, d1
fadd d2, d4, d2
fmul d4, d25, d18
fsub d2, d2, d4
fmul d4, d19, d28
fadd d2, d4, d2
ldr d3, [sp, #8992] ; 8-byte Folded Reload
fadd d2, d3, d2
ldr d5, [sp, #10848] ; 8-byte Folded Reload
fmul d4, d6, d5
fadd d2, d4, d2
ldr d3, [sp, #10840] ; 8-byte Folded Reload
fmul d4, d13, d3
fadd d2, d4, d2
str d2, [sp, #10344] ; 8-byte Folded Spill
ldr d18, [sp, #12280] ; 8-byte Folded Reload
fmul d2, d18, d2
fadd d0, d2, d0
ldr d2, [sp, #11520] ; 8-byte Folded Reload
fmul d2, d2, d26
ldr q26, [sp, #10912] ; 16-byte Folded Reload
fmul d4, d30, d3
fadd d2, d4, d2
fmov d3, d17
fmul d4, d17, d12
fmov d3, d5
fmul d5, d11, d5
fadd d3, d5, d4
str d3, [sp, #9880] ; 8-byte Folded Spill
fsub d19, d2, d3
fmul d2, d14, d19
ldr d3, [sp, #10432] ; 8-byte Folded Reload
ldur d4, [x29, #-216] ; 8-byte Folded Reload
fmul d4, d4, d3
fadd d17, d2, d4
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #16169, lsl #48
fmov d2, x9
fmul d2, d17, d2
fadd d0, d2, d0
fmul d2, d20, d19
fmul d4, d24, d3
fadd d20, d2, d4
mov x9, #64744
movk x9, #21380, lsl #16
movk x9, #23316, lsl #32
movk x9, #16210, lsl #48
fmov d2, x9
fmul d2, d20, d2
fadd d0, d2, d0
ldr d24, [sp, #9072] ; 8-byte Folded Reload
ldr d2, [sp, #8896] ; 8-byte Folded Reload
fmul d2, d24, d2
fsub d0, d0, d2
ldr d11, [sp, #11504] ; 8-byte Folded Reload
ldr d2, [sp, #11536] ; 8-byte Folded Reload
fmul d2, d11, d2
str d28, [sp, #10944] ; 8-byte Folded Spill
fadd d25, d1, d28
ldr d9, [sp, #11640] ; 8-byte Folded Reload
ldr d1, [sp, #11368] ; 8-byte Folded Reload
fmul d4, d9, d1
fadd d3, d4, d25
str d3, [sp, #10936] ; 8-byte Folded Spill
fmul d4, d1, d3
fadd d4, d4, d2
fmul d2, d4, d7
fsub d0, d0, d2
ldr d2, [sp, #10888] ; 8-byte Folded Reload
ldr d3, [sp, #11440] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d0, d2, d0
ldr d2, [sp, #10880] ; 8-byte Folded Reload
fmul d2, d2, d18
str d2, [sp, #10352] ; 8-byte Folded Spill
fmul d2, d2, d1
fsub d5, d0, d2
ldr d0, [sp, #8016] ; 8-byte Folded Reload
fmul d0, d5, d0
ldr d1, [sp, #8008] ; 8-byte Folded Reload
fmul d2, d23, d1
fsub d0, d2, d0
ldr d1, [sp, #8096] ; 8-byte Folded Reload
fmul d2, d23, d1
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #16223, lsl #48
fmov d6, x9
fmul d6, d4, d6
fadd d2, d2, d6
ldr d1, [sp, #12192] ; 8-byte Folded Reload
fmul d6, d1, d31
fmul d7, d1, d10
fadd d6, d7, d6
ldr d1, [sp, #10184] ; 8-byte Folded Reload
fmul d7, d27, d1
fadd d6, d6, d7
ldr d1, [sp, #8048] ; 8-byte Folded Reload
fmul d3, d22, d1
fadd d3, d6, d3
ldr d1, [sp, #12104] ; 8-byte Folded Reload
fmul d6, d1, d29
fadd d3, d6, d3
fmul d6, d1, d21
fadd d3, d6, d3
fmul d6, d8, d16
fadd d3, d6, d3
ldr d1, [sp, #9336] ; 8-byte Folded Reload
fmul d6, d1, d19
fadd d3, d3, d6
mov x9, #45724
movk x9, #42429, lsl #16
movk x9, #11379, lsl #32
movk x9, #16169, lsl #48
fmov d6, x9
fmul d7, d17, d6
fsub d3, d3, d7
mov x9, #45033
movk x9, #40035, lsl #16
movk x9, #524, lsl #32
movk x9, #48971, lsl #48
fmov d7, x9
fmul d7, d20, d7
fadd d3, d3, d7
ldr d1, [sp, #8056] ; 8-byte Folded Reload
fmul d7, d24, d1
fadd d3, d3, d7
ldr d16, [sp, #10424] ; 8-byte Folded Reload
ldr d1, [sp, #10784] ; 8-byte Folded Reload
fmul d7, d1, d16
fadd d3, d7, d3
ldr d30, [sp, #10176] ; 8-byte Folded Reload
ldr d1, [sp, #8064] ; 8-byte Folded Reload
fmul d7, d30, d1
fadd d18, d7, d3
ldur d28, [x29, #-200] ; 8-byte Folded Reload
fmul d3, d28, d18
fadd d2, d2, d3
ldr d1, [sp, #11976] ; 8-byte Folded Reload
fmul d3, d1, d31
fmul d7, d1, d10
fadd d3, d7, d3
ldr d1, [sp, #10440] ; 8-byte Folded Reload
fmul d7, d27, d1
fadd d3, d3, d7
ldr d7, [sp, #12064] ; 8-byte Folded Reload
fmul d1, d7, d29
fadd d1, d1, d3
fmul d3, d7, d21
fadd d1, d3, d1
ldr d3, [sp, #10976] ; 8-byte Folded Reload
fmul d3, d8, d3
fadd d1, d3, d1
ldr d3, [sp, #9344] ; 8-byte Folded Reload
fmul d3, d3, d19
fadd d1, d1, d3
mov x9, #40862
movk x9, #31695, lsl #16
movk x9, #12355, lsl #32
movk x9, #16198, lsl #48
fmov d3, x9
fmul d3, d17, d3
fadd d1, d1, d3
fmul d3, d20, d6
fsub d1, d1, d3
ldr d3, [sp, #9264] ; 8-byte Folded Reload
fmul d3, d24, d3
fadd d1, d1, d3
fmov d3, d16
ldr d6, [sp, #10776] ; 8-byte Folded Reload
fmul d3, d6, d16
fadd d1, d3, d1
fmov d6, d30
ldr d3, [sp, #9256] ; 8-byte Folded Reload
fmul d3, d30, d3
fsub d6, d1, d3
ldur d7, [x29, #-208] ; 8-byte Folded Reload
fmul d1, d7, d6
fadd d1, d1, d2
ldr d16, [sp, #8072] ; 8-byte Folded Reload
fmul d2, d1, d16
fadd d0, d2, d0
ldr d3, [sp, #8272] ; 8-byte Folded Reload
fmul d2, d23, d3
fmul d3, d4, d3
fsub d2, d2, d3
str d18, [sp, #10336] ; 8-byte Folded Spill
fmul d3, d7, d18
fadd d2, d2, d3
str d6, [sp, #10328] ; 8-byte Folded Spill
fmul d3, d28, d6
fsub d2, d2, d3
fadd d0, d2, d0
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #16296, lsl #48
fmov d3, x9
fmul d3, d4, d3
fadd d6, d0, d3
ldr d0, [sp, #9888] ; 8-byte Folded Reload
fmul d0, d5, d0
ldr d5, [sp, #9288] ; 8-byte Folded Reload
fmul d3, d23, d5
fsub d0, d0, d3
fadd d0, d1, d0
fmul d1, d2, d16
fadd d0, d1, d0
fmul d1, d4, d5
fadd d2, d0, d1
ldr q3, [sp, #12000] ; 16-byte Folded Reload
fmul d0, d3, d6
ldr q5, [sp, #11840] ; 16-byte Folded Reload
fmul d1, d5, d2
fsub d4, d0, d1
ldr d16, [sp, #8080] ; 8-byte Folded Reload
fmul d0, d23, d16
ldr q7, [sp, #12240] ; 16-byte Folded Reload
fmul d1, d7, d4
fsub d0, d1, d0
ldr d1, [sp, #9272] ; 8-byte Folded Reload
fmul d1, d23, d1
str q2, [sp, #11536] ; 16-byte Folded Spill
fmul d2, d3, d2
fadd d1, d2, d1
str q6, [sp, #11568] ; 16-byte Folded Spill
fmul d2, d5, d6
fadd d1, d2, d1
ldr d2, [sp, #11584] ; 8-byte Folded Reload
fmul d2, d11, d2
ldr d5, [sp, #12056] ; 8-byte Folded Reload
fmul d3, d9, d5
fadd d3, d3, d25
str d3, [sp, #10832] ; 8-byte Folded Spill
fmul d3, d5, d3
fadd d2, d3, d2
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d3, x9
fmul d2, d2, d3
fadd d3, d1, d2
ldr q6, [sp, #11824] ; 16-byte Folded Reload
fmul d1, d6, d3
fsub d0, d0, d1
ldr d1, [sp, #11784] ; 8-byte Folded Reload
fmul d1, d11, d1
ldr d5, [sp, #12208] ; 8-byte Folded Reload
fmul d2, d9, d5
fadd d2, d2, d25
str d2, [sp, #10568] ; 8-byte Folded Spill
fmul d2, d5, d2
fadd d1, d2, d1
fmul d2, d1, d16
fadd d21, d0, d2
ldr d5, [sp, #9248] ; 8-byte Folded Reload
fmul d0, d23, d5
str q4, [sp, #11584] ; 16-byte Folded Spill
fmul d2, d6, d4
fsub d0, d2, d0
str q3, [sp, #11440] ; 16-byte Folded Spill
fmul d2, d7, d3
fadd d0, d0, d2
fmul d1, d1, d5
fadd d22, d0, d1
fmul d0, d15, d11
ldr d1, [sp, #11312] ; 8-byte Folded Reload
fadd d27, d0, d1
fmul d0, d15, d9
str d25, [sp, #8840] ; 8-byte Folded Spill
fadd d24, d0, d25
ldr q25, [sp, #11104] ; 16-byte Folded Reload
cbz x8, LBB19_58
; %bb.57:
fmov d20, d11
fmov d19, d9
ldr q17, [sp, #10736] ; 16-byte Folded Reload
fmul d0, d17, d25
ldr q16, [sp, #11136] ; 16-byte Folded Reload
fmul d1, d16, d26
ldr d6, [sp, #11248] ; 8-byte Folded Reload
fmul d2, d6, d9
fmul d3, d15, d27
fsub d2, d2, d3
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d3, x9
fmul d4, d2, d3
fsub d1, d1, d4
fsub d0, d0, d1
fadd d0, d0, d0
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d1, x9
fmul d4, d23, d1
fmul d5, d17, d21
fsub d4, d5, d4
fmul d5, d16, d22
fsub d4, d4, d5
fmul d5, d6, d11
fmul d6, d15, d24
fadd d5, d5, d6
fmul d6, d5, d1
fadd d4, d4, d6
fadd d4, d4, d4
fadd d0, d0, d4
ldr d18, [sp, #11400] ; 8-byte Folded Reload
fmul d4, d18, d0
fmul d6, d23, d3
fmul d7, d16, d21
fsub d6, d7, d6
fmul d7, d17, d22
fadd d6, d7, d6
fmul d3, d5, d3
fadd d3, d6, d3
fmul d5, d17, d26
fmul d1, d2, d1
fsub d1, d5, d1
fmul d2, d16, d25
fadd d1, d2, d1
fadd d1, d1, d3
ldr d2, [sp, #11160] ; 8-byte Folded Reload
fmul d2, d2, d1
fsub d2, d4, d2
fmul d2, d18, d2
fmov d3, #0.50000000
fmul d2, d2, d3
ldr d4, [sp, #11392] ; 8-byte Folded Reload
fmul d0, d4, d0
ldr d5, [sp, #11240] ; 8-byte Folded Reload
fmul d1, d5, d1
fadd d0, d1, d0
fmul d0, d4, d0
fmul d0, d0, d3
fsub d0, d2, d0
str d0, [x8, #80]
LBB19_58:
str d27, [sp, #8064] ; 8-byte Folded Spill
str d24, [sp, #8072] ; 8-byte Folded Spill
str d23, [sp, #9072] ; 8-byte Folded Spill
str q22, [sp, #11248] ; 16-byte Folded Spill
str q21, [sp, #11328] ; 16-byte Folded Spill
ldr d0, [sp, #3064] ; 8-byte Folded Reload
ldr d1, [sp, #3056] ; 8-byte Folded Reload
fsub d0, d1, d0
ldr d1, [sp, #10624] ; 8-byte Folded Reload
ldr d2, [sp, #3568] ; 8-byte Folded Reload
fsub d1, d1, d2
ldr d2, [sp, #3552] ; 8-byte Folded Reload
fsub d1, d1, d2
ldr d2, [sp, #10608] ; 8-byte Folded Reload
fdiv d1, d1, d2
ldr d2, [sp, #10584] ; 8-byte Folded Reload
ldr d3, [sp, #3520] ; 8-byte Folded Reload
fsub d2, d2, d3
ldr d3, [sp, #3512] ; 8-byte Folded Reload
fsub d2, d2, d3
ldr d3, [sp, #10576] ; 8-byte Folded Reload
fdiv d3, d2, d3
ldr d2, [sp, #3048] ; 8-byte Folded Reload
ldr d4, [sp, #3032] ; 8-byte Folded Reload
fsub d4, d4, d2
ldr d2, [sp, #9952] ; 8-byte Folded Reload
ldr d5, [sp, #3472] ; 8-byte Folded Reload
fsub d2, d2, d5
ldr d5, [sp, #3464] ; 8-byte Folded Reload
fsub d2, d2, d5
ldr d5, [sp, #3024] ; 8-byte Folded Reload
ldr d6, [sp, #3000] ; 8-byte Folded Reload
fsub d17, d6, d5
ldr d5, [sp, #9944] ; 8-byte Folded Reload
fdiv d2, d2, d5
ldr d5, [sp, #9936] ; 8-byte Folded Reload
fmul d2, d5, d2
ldr d5, [sp, #3456] ; 8-byte Folded Reload
fsub d10, d2, d5
str d10, [sp, #11784] ; 8-byte Folded Spill
ldr d2, [sp, #10616] ; 8-byte Folded Reload
fmul d13, d2, d1
ldr d5, [sp, #10600] ; 8-byte Folded Reload
fmul d1, d5, d1
ldr d5, [sp, #3560] ; 8-byte Folded Reload
fsub d29, d1, d5
ldr d18, [sp, #12320] ; 8-byte Folded Reload
fmul d1, d18, d13
ldur d22, [x29, #-184] ; 8-byte Folded Reload
fmul d5, d22, d29
fsub d16, d1, d5
ldr d24, [sp, #11888] ; 8-byte Folded Reload
fmul d1, d24, d29
ldr d5, [sp, #3496] ; 8-byte Folded Reload
fsub d7, d1, d5
fmul d1, d24, d13
ldr d5, [sp, #3504] ; 8-byte Folded Reload
fsub d2, d5, d1
str d2, [sp, #10608] ; 8-byte Folded Spill
ldp d21, d1, [x29, #-176] ; 16-byte Folded Reload
fmul d5, d21, d16
fadd d5, d7, d5
fmul d6, d1, d2
fmov d28, d1
fadd d20, d6, d5
ldr d26, [sp, #11864] ; 8-byte Folded Reload
fmul d5, d26, d16
ldr d19, [sp, #12200] ; 8-byte Folded Reload
fmul d6, d19, d7
fadd d5, d5, d6
ldur d1, [x29, #-216] ; 8-byte Folded Reload
fmul d6, d1, d2
fmov d31, d1
fadd d1, d6, d5
fmul d5, d21, d20
fmov d8, d20
str d20, [sp, #10976] ; 8-byte Folded Spill
fmul d6, d26, d1
fmov d12, d1
str d1, [sp, #10960] ; 8-byte Folded Spill
fadd d5, d5, d6
ldr d25, [sp, #12016] ; 8-byte Folded Reload
fmul d6, d25, d16
ldr d20, [sp, #12160] ; 8-byte Folded Reload
str d7, [sp, #10312] ; 8-byte Folded Spill
fmul d7, d20, d7
fadd d6, d6, d7
ldr d1, [sp, #12336] ; 8-byte Folded Reload
fmul d7, d1, d2
fmov d9, d1
fadd d1, d7, d6
fmul d6, d25, d1
fmov d11, d1
str d1, [sp, #10952] ; 8-byte Folded Spill
fadd d5, d6, d5
fadd d6, d0, d5
fmov d27, #0.50000000
fmul d7, d5, d27
fsub d23, d6, d7
ldr d0, [sp, #10592] ; 8-byte Folded Reload
fmul d7, d0, d3
ldr d0, [sp, #10392] ; 8-byte Folded Reload
fmul d3, d0, d3
ldr d0, [sp, #3072] ; 8-byte Folded Reload
fsub d30, d3, d0
fmul d3, d18, d7
fmul d6, d22, d30
fsub d22, d3, d6
fmul d3, d24, d30
ldr d0, [sp, #3080] ; 8-byte Folded Reload
fsub d1, d3, d0
fmul d3, d24, d7
ldr d0, [sp, #3544] ; 8-byte Folded Reload
fsub d3, d0, d3
fmul d6, d21, d22
fadd d6, d1, d6
fmul d18, d28, d3
fadd d18, d18, d6
fmul d6, d26, d22
fmul d19, d19, d1
fadd d6, d6, d19
fmul d19, d31, d3
fadd d24, d19, d6
fmul d6, d21, d18
fmul d19, d26, d24
fadd d6, d6, d19
fmul d19, d25, d22
str d1, [sp, #10304] ; 8-byte Folded Spill
fmul d20, d20, d1
fadd d19, d19, d20
fmul d20, d9, d3
fadd d28, d20, d19
fmul d19, d25, d28
fadd d6, d19, d6
fsub d5, d23, d5
fsub d5, d5, d6
fadd d4, d4, d6
fmul d6, d6, d27
fsub d4, d4, d6
fadd d26, d4, d5
ldr d1, [sp, #11368] ; 8-byte Folded Reload
fmul d25, d1, d10
fmul d4, d21, d25
ldr d0, [sp, #3536] ; 8-byte Folded Reload
fsub d4, d0, d4
ldr d19, [sp, #12136] ; 8-byte Folded Reload
fmul d5, d19, d25
ldr d0, [sp, #3480] ; 8-byte Folded Reload
fsub d15, d0, d5
fmul d6, d21, d4
fmov d5, d4
str d4, [sp, #10600] ; 8-byte Folded Spill
fmul d19, d19, d15
str d15, [sp, #10592] ; 8-byte Folded Spill
fadd d19, d6, d19
ldr d20, [sp, #11896] ; 8-byte Folded Reload
fmul d6, d20, d25
ldr d0, [sp, #3448] ; 8-byte Folded Reload
fsub d2, d0, d6
fmul d20, d20, d2
str d2, [sp, #10584] ; 8-byte Folded Spill
fadd d19, d20, d19
fadd d17, d17, d19
fmul d20, d19, d27
fsub d27, d17, d20
ldr d0, [sp, #11904] ; 8-byte Folded Reload
fmul d17, d0, d16
ldr d4, [sp, #11384] ; 8-byte Folded Reload
fmul d20, d4, d8
fadd d17, d17, d20
ldr d0, [sp, #11664] ; 8-byte Folded Reload
fmul d20, d0, d22
fadd d17, d20, d17
ldr d6, [sp, #11280] ; 8-byte Folded Reload
fmul d18, d6, d18
fadd d18, d18, d17
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d17, x9
fmul d20, d26, d17
fadd d18, d18, d20
ldr d0, [sp, #11736] ; 8-byte Folded Reload
fmul d20, d0, d29
ldr d0, [sp, #12184] ; 8-byte Folded Reload
fmul d21, d0, d13
fsub d20, d20, d21
ldr d0, [sp, #12176] ; 8-byte Folded Reload
fmul d21, d0, d7
fsub d20, d20, d21
ldr d0, [sp, #11648] ; 8-byte Folded Reload
fmul d21, d0, d30
fadd d0, d21, d20
str d0, [sp, #10392] ; 8-byte Folded Spill
ldr d20, [sp, #12280] ; 8-byte Folded Reload
fmul d20, d20, d0
fadd d18, d20, d18
str d29, [sp, #10624] ; 8-byte Folded Spill
str d30, [sp, #10616] ; 8-byte Folded Spill
fadd d14, d29, d30
fmul d29, d1, d14
fmul d20, d29, d17
fsub d18, d18, d20
ldr d0, [sp, #11352] ; 8-byte Folded Reload
fmul d20, d0, d25
fsub d18, d18, d20
ldr d0, [sp, #10968] ; 8-byte Folded Reload
fmul d20, d0, d5
fadd d30, d18, d20
fsub d18, d26, d19
fadd d10, d18, d27
mov x9, #-4863887597560135680
mov x10, #62612
movk x10, #18904, lsl #16
movk x10, #1144, lsl #32
movk x10, #49064, lsl #48
fmov d21, x9
fmul d18, d30, d21
fmov d19, x10
fmul d20, d10, d19
fadd d31, d18, d20
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
mov x10, #18456
movk x10, #63321, lsl #16
movk x10, #33926, lsl #32
movk x10, #16223, lsl #48
fmov d18, x10
fmul d18, d29, d18
fmov d20, x9
fmul d8, d26, d20
fadd d8, d8, d18
ldr d5, [sp, #12192] ; 8-byte Folded Reload
fmul d18, d5, d16
fmul d9, d4, d11
fadd d9, d18, d9
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d5, x9
str d5, [sp, #11736] ; 8-byte Folded Spill
fmul d23, d23, d5
fadd d23, d9, d23
ldr d5, [sp, #12104] ; 8-byte Folded Reload
fmul d9, d5, d22
fadd d23, d9, d23
fmul d28, d6, d28
fadd d11, d28, d23
ldur d9, [x29, #-200] ; 8-byte Folded Reload
fmul d23, d9, d11
fadd d23, d8, d23
ldr d18, [sp, #11976] ; 8-byte Folded Reload
fmul d16, d18, d16
fmul d28, d4, d12
fadd d16, d16, d28
ldr d4, [sp, #12064] ; 8-byte Folded Reload
fmul d22, d4, d22
fadd d16, d22, d16
fmul d22, d6, d24
fadd d8, d22, d16
ldur d12, [x29, #-208] ; 8-byte Folded Reload
fmul d16, d12, d8
fadd d16, d16, d23
ldr d6, [sp, #11600] ; 8-byte Folded Reload
fmul d22, d6, d25
fsub d16, d16, d22
fmul d22, d0, d15
fadd d28, d16, d22
mov x9, #4363988038922010624
fmov d5, x9
fmul d22, d28, d5
str d5, [sp, #10576] ; 8-byte Folded Spill
fadd d24, d22, d31
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d22, x9
fmul d23, d26, d22
fmul d26, d29, d22
fsub d23, d23, d26
str d11, [sp, #9952] ; 8-byte Folded Spill
fmul d26, d12, d11
fadd d23, d23, d26
str d8, [sp, #9944] ; 8-byte Folded Spill
fmul d26, d9, d8
fsub d23, d23, d26
ldr d6, [sp, #11464] ; 8-byte Folded Reload
fmul d25, d6, d25
fsub d23, d23, d25
fmul d25, d0, d2
fadd d25, d23, d25
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d0, x9
str d0, [sp, #11464] ; 8-byte Folded Spill
fmul d26, d27, d0
fadd d26, d25, d26
fadd d24, d26, d24
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #16296, lsl #48
str d14, [sp, #11352] ; 8-byte Folded Spill
ldr d0, [sp, #11784] ; 8-byte Folded Reload
fadd d15, d14, d0
fmul d27, d1, d15
fmov d25, x9
fmul d25, d27, d25
fadd d0, d24, d25
mov x9, #4354980839667269632
fmov d2, x9
str d2, [sp, #10968] ; 8-byte Folded Spill
fmul d30, d30, d2
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #16292, lsl #48
fmov d25, x9
fmul d31, d10, d25
fsub d30, d30, d31
fadd d28, d28, d30
fmul d26, d26, d5
fadd d26, d26, d28
fmul d27, d27, d25
fadd d6, d26, d27
ldr q16, [sp, #12000] ; 16-byte Folded Reload
fmul d26, d16, d0
ldr q29, [sp, #11840] ; 16-byte Folded Reload
fmul d27, d29, d6
fsub d8, d26, d27
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d26, x9
fmul d27, d10, d26
ldr q11, [sp, #12240] ; 16-byte Folded Reload
fmul d28, d11, d8
fsub d28, d28, d27
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d27, x9
fmul d30, d10, d27
str q6, [sp, #8928] ; 16-byte Folded Spill
fmul d31, d16, d6
fsub d30, d31, d30
str q0, [sp, #8992] ; 16-byte Folded Spill
fmul d31, d29, d0
fadd d30, d31, d30
ldr d0, [sp, #12056] ; 8-byte Folded Reload
fmul d31, d0, d15
fmul d31, d31, d27
fadd d0, d30, d31
ldr q6, [sp, #11824] ; 16-byte Folded Reload
fmul d30, d6, d0
fsub d28, d28, d30
ldr d24, [sp, #12208] ; 8-byte Folded Reload
fmul d30, d24, d15
fmul d31, d30, d26
fadd d2, d28, d31
str q2, [sp, #11648] ; 16-byte Folded Spill
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
fmov d28, x9
fmul d31, d10, d28
str q8, [sp, #8896] ; 16-byte Folded Spill
fmul d8, d6, d8
fsub d31, d8, d31
str q0, [sp, #8864] ; 16-byte Folded Spill
fmul d8, d11, d0
fadd d31, d31, d8
fmul d30, d30, d28
fadd d0, d31, d30
str q0, [sp, #11600] ; 16-byte Folded Spill
ldr d14, [sp, #10608] ; 8-byte Folded Reload
ldr d0, [sp, #11904] ; 8-byte Folded Reload
fmul d30, d0, d14
ldr d8, [sp, #11376] ; 8-byte Folded Reload
ldr d5, [sp, #10976] ; 8-byte Folded Reload
fmul d31, d8, d5
fadd d30, d30, d31
ldr d0, [sp, #11664] ; 8-byte Folded Reload
fmul d31, d0, d3
fadd d30, d31, d30
ldr d31, [sp, #11752] ; 8-byte Folded Reload
fmul d2, d31, d13
ldr d0, [sp, #3624] ; 8-byte Folded Reload
fsub d2, d2, d0
ldr d31, [sp, #11672] ; 8-byte Folded Reload
fmul d7, d31, d7
fadd d2, d7, d2
ldr d0, [sp, #3632] ; 8-byte Folded Reload
fsub d0, d2, d0
str d0, [sp, #10296] ; 8-byte Folded Spill
ldr d2, [sp, #12280] ; 8-byte Folded Reload
fmul d2, d2, d0
fadd d2, d2, d30
ldr d0, [sp, #6040] ; 8-byte Folded Reload
fmul d7, d1, d0
fmul d17, d7, d17
fadd d2, d17, d2
ldr d0, [sp, #3616] ; 8-byte Folded Reload
fadd d2, d2, d0
ldr d31, [sp, #11128] ; 8-byte Folded Reload
ldr d23, [sp, #10600] ; 8-byte Folded Reload
fmul d17, d31, d23
fadd d2, d2, d17
fmul d17, d2, d21
ldr d0, [sp, #6048] ; 8-byte Folded Reload
fmul d21, d1, d0
fmul d19, d21, d19
fadd d17, d19, d17
fmul d19, d18, d14
fmov d1, d14
ldr d14, [sp, #10960] ; 8-byte Folded Reload
fmul d30, d8, d14
fadd d19, d19, d30
fmul d30, d4, d3
fadd d4, d30, d19
fmul d19, d7, d20
fmul d20, d12, d4
fadd d19, d20, d19
ldr d18, [sp, #12192] ; 8-byte Folded Reload
fmul d1, d18, d1
ldr d13, [sp, #10952] ; 8-byte Folded Reload
fmul d20, d8, d13
fadd d1, d1, d20
ldur d8, [x29, #-168] ; 8-byte Folded Reload
fmul d20, d8, d5
ldur d30, [x29, #-216] ; 8-byte Folded Reload
fmul d30, d30, d14
fadd d20, d20, d30
ldr d30, [sp, #12336] ; 8-byte Folded Reload
fmul d30, d30, d13
fadd d20, d30, d20
ldr d5, [sp, #11736] ; 8-byte Folded Reload
fmul d18, d20, d5
fadd d1, d1, d18
ldr d5, [sp, #12104] ; 8-byte Folded Reload
fmul d3, d5, d3
fadd d5, d3, d1
fmul d1, d9, d5
fadd d1, d19, d1
ldr d3, [sp, #3608] ; 8-byte Folded Reload
fadd d1, d1, d3
ldr d18, [sp, #10592] ; 8-byte Folded Reload
fmul d3, d31, d18
fadd d1, d1, d3
ldr d20, [sp, #10576] ; 8-byte Folded Reload
fmul d3, d1, d20
fadd d3, d3, d17
fmul d7, d7, d22
str d4, [sp, #9936] ; 8-byte Folded Spill
fmul d17, d9, d4
fsub d7, d7, d17
str d5, [sp, #9888] ; 8-byte Folded Spill
fmul d17, d12, d5
fadd d7, d7, d17
ldr d4, [sp, #3600] ; 8-byte Folded Reload
fadd d7, d7, d4
ldr d19, [sp, #10584] ; 8-byte Folded Reload
fmul d17, d31, d19
fadd d7, d7, d17
fmul d4, d8, d23
ldr q23, [sp, #11648] ; 16-byte Folded Reload
ldr d17, [sp, #11496] ; 8-byte Folded Reload
fmul d5, d17, d18
fmov d17, d0
fadd d4, d4, d5
ldr d5, [sp, #11760] ; 8-byte Folded Reload
fmul d5, d5, d19
fadd d4, d5, d4
ldr d0, [sp, #11464] ; 8-byte Folded Reload
fmul d4, d4, d0
fadd d4, d7, d4
ldr d0, [sp, #10968] ; 8-byte Folded Reload
fmul d2, d2, d0
fmul d5, d21, d25
fsub d2, d2, d5
fadd d1, d1, d2
fadd d0, d3, d4
fmul d2, d4, d20
fadd d4, d1, d2
fmul d1, d16, d0
fmul d2, d29, d4
fsub d5, d1, d2
fmul d1, d24, d17
ldr q24, [sp, #11600] ; 16-byte Folded Reload
fmul d2, d1, d26
fmul d3, d11, d5
fsub d2, d3, d2
ldr d3, [sp, #12056] ; 8-byte Folded Reload
fmul d3, d3, d17
fmul d3, d3, d27
str q4, [sp, #8272] ; 16-byte Folded Spill
fmul d4, d16, d4
fsub d3, d4, d3
str q0, [sp, #8320] ; 16-byte Folded Spill
fmul d4, d29, d0
fadd d0, d4, d3
fmul d3, d6, d0
fsub d18, d2, d3
str q5, [sp, #8096] ; 16-byte Folded Spill
fmul d2, d6, d5
fmul d1, d1, d28
fsub d1, d2, d1
str q0, [sp, #8080] ; 16-byte Folded Spill
fmul d2, d11, d0
fadd d19, d1, d2
ldr q22, [sp, #11136] ; 16-byte Folded Reload
ldr d21, [sp, #11400] ; 8-byte Folded Reload
ldr d20, [sp, #11392] ; 8-byte Folded Reload
cbz x8, LBB19_60
; %bb.59:
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d1, x9
fmul d2, d10, d1
ldr q0, [sp, #10736] ; 16-byte Folded Reload
fmul d3, d0, d23
fsub d2, d3, d2
fmul d3, d22, d24
fsub d2, d2, d3
ldur d5, [x29, #-256] ; 8-byte Folded Reload
fmul d3, d5, d15
fmul d4, d3, d1
fadd d2, d2, d4
fadd d2, d2, d2
fmul d4, d22, d18
fmul d5, d5, d17
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d6, x9
fmul d7, d5, d6
fsub d4, d4, d7
fmul d7, d0, d19
fadd d4, d7, d4
fadd d4, d4, d4
fsub d2, d2, d4
fmul d4, d21, d2
fmul d7, d10, d6
fmul d16, d22, d23
fsub d7, d16, d7
fmul d16, d0, d24
fadd d7, d16, d7
fmul d3, d3, d6
fadd d3, d7, d3
fmul d6, d0, d18
fmul d1, d5, d1
fsub d1, d6, d1
fmul d5, d22, d19
fsub d1, d1, d5
fadd d1, d1, d3
ldr d0, [sp, #11160] ; 8-byte Folded Reload
fmul d3, d0, d1
fsub d3, d4, d3
fmul d3, d21, d3
fmov d4, #0.50000000
fmul d3, d3, d4
fmul d2, d20, d2
ldr d0, [sp, #11240] ; 8-byte Folded Reload
fmul d1, d0, d1
fadd d1, d1, d2
fmul d1, d20, d1
fmul d1, d1, d4
fsub d1, d3, d1
str d1, [x8, #88]
LBB19_60:
str q22, [sp, #11136] ; 16-byte Folded Spill
str q19, [sp, #6912] ; 16-byte Folded Spill
str q18, [sp, #7024] ; 16-byte Folded Spill
str d15, [sp, #11464] ; 8-byte Folded Spill
str d10, [sp, #8032] ; 8-byte Folded Spill
ldr d0, [sp, #2824] ; 8-byte Folded Reload
ldr d1, [sp, #2816] ; 8-byte Folded Reload
fadd d6, d0, d1
ldr d0, [sp, #2712] ; 8-byte Folded Reload
ldr d1, [sp, #2704] ; 8-byte Folded Reload
fsub d14, d1, d0
ldr d0, [sp, #2664] ; 8-byte Folded Reload
ldr d1, [sp, #2656] ; 8-byte Folded Reload
fsub d15, d1, d0
ldr d0, [sp, #2520] ; 8-byte Folded Reload
ldr d1, [sp, #2512] ; 8-byte Folded Reload
fsub d19, d1, d0
ldr d0, [sp, #2496] ; 8-byte Folded Reload
ldr d1, [sp, #2488] ; 8-byte Folded Reload
fsub d13, d1, d0
ldr d0, [sp, #2480] ; 8-byte Folded Reload
ldr d1, [sp, #2472] ; 8-byte Folded Reload
fsub d4, d1, d0
ldr d0, [sp, #2464] ; 8-byte Folded Reload
ldr d1, [sp, #2456] ; 8-byte Folded Reload
fsub d7, d1, d0
ldr d0, [sp, #2448] ; 8-byte Folded Reload
ldr d1, [sp, #2440] ; 8-byte Folded Reload
fsub d0, d1, d0
str d0, [sp, #12056] ; 8-byte Folded Spill
ldr d0, [sp, #4976] ; 8-byte Folded Reload
ldr d1, [sp, #2848] ; 8-byte Folded Reload
fsub d3, d0, d1
ldr d0, [sp, #11376] ; 8-byte Folded Reload
fmul d1, d0, d3
ldr d2, [sp, #2856] ; 8-byte Folded Reload
fsub d1, d1, d2
ldr d2, [sp, #2832] ; 8-byte Folded Reload
fsub d1, d1, d2
ldr d2, [sp, #12280] ; 8-byte Folded Reload
str d6, [sp, #10576] ; 8-byte Folded Spill
fmul d2, d2, d6
fadd d1, d2, d1
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d6, x9
ldr d24, [sp, #2896] ; 8-byte Folded Reload
fmul d2, d24, d6
fsub d1, d1, d2
ldr d2, [sp, #2808] ; 8-byte Folded Reload
fsub d1, d1, d2
ldr d2, [sp, #2800] ; 8-byte Folded Reload
fsub d21, d1, d2
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #49060, lsl #48
mov x10, #4354980839667269632
fmov d1, x9
str d1, [sp, #12280] ; 8-byte Folded Spill
ldr d26, [sp, #2880] ; 8-byte Folded Reload
fmul d16, d26, d1
fmov d1, x10
str d1, [sp, #12208] ; 8-byte Folded Spill
fmul d17, d21, d1
fsub d17, d16, d17
fmul d16, d0, d14
ldr d18, [sp, #2784] ; 8-byte Folded Reload
fsub d16, d16, d18
ldr d18, [sp, #2768] ; 8-byte Folded Reload
fsub d25, d16, d18
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #16223, lsl #48
ldp d23, d8, [x29, #-216] ; 16-byte Folded Reload
fmul d18, d8, d25
fmov d16, x9
fmul d20, d24, d16
fadd d18, d18, d20
fmul d20, d0, d15
ldr d0, [sp, #2736] ; 8-byte Folded Reload
fsub d22, d20, d0
ldur d20, [x29, #-168] ; 8-byte Folded Reload
fmul d20, d20, d3
fmul d23, d23, d14
fadd d20, d20, d23
ldr d23, [sp, #12336] ; 8-byte Folded Reload
fmul d23, d23, d15
fadd d23, d23, d20
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d20, x9
fmul d23, d23, d20
fadd d22, d22, d23
ldr d0, [sp, #2752] ; 8-byte Folded Reload
fsub d27, d22, d0
ldur d31, [x29, #-200] ; 8-byte Folded Reload
fmul d22, d31, d27
fadd d18, d18, d22
ldr d0, [sp, #2744] ; 8-byte Folded Reload
fsub d18, d18, d0
ldr d0, [sp, #2728] ; 8-byte Folded Reload
fsub d22, d18, d0
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d18, x9
fmul d23, d24, d18
str d25, [sp, #9296] ; 8-byte Folded Spill
fmul d24, d31, d25
fadd d23, d23, d24
str d27, [sp, #9288] ; 8-byte Folded Spill
fmul d24, d8, d27
fsub d23, d24, d23
ldr d0, [sp, #2680] ; 8-byte Folded Reload
fsub d23, d23, d0
ldr d0, [sp, #2688] ; 8-byte Folded Reload
fsub d23, d23, d0
ldr d0, [sp, #2672] ; 8-byte Folded Reload
fsub d23, d23, d0
fsub d24, d17, d22
mov x9, #4363988038922010624
fmov d17, x9
fmul d25, d23, d17
fsub d27, d24, d25
mov x9, #-4863887597560135680
fmov d24, x9
fmul d21, d21, d24
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #16296, lsl #48
fmov d25, x9
fmul d26, d26, d25
fadd d21, d21, d26
fmul d22, d22, d17
fadd d21, d22, d21
fadd d30, d21, d23
ldr q29, [sp, #11840] ; 16-byte Folded Reload
fmul d21, d29, d27
ldr q10, [sp, #12000] ; 16-byte Folded Reload
fmul d22, d10, d30
fadd d9, d22, d21
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmul d21, d11, d9
fmov d26, x9
ldr d28, [sp, #2696] ; 8-byte Folded Reload
fmul d22, d28, d26
fadd d22, d21, d22
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d21, x9
ldr d0, [sp, #2720] ; 8-byte Folded Reload
fmul d23, d0, d21
str d27, [sp, #9264] ; 8-byte Folded Spill
fmul d27, d10, d27
fsub d23, d27, d23
str d30, [sp, #9256] ; 8-byte Folded Spill
fmul d27, d29, d30
fsub d0, d23, d27
ldr q12, [sp, #11824] ; 16-byte Folded Reload
fmul d23, d12, d0
fadd d1, d22, d23
str d1, [sp, #11664] ; 8-byte Folded Spill
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #49113, lsl #48
fmov d22, x9
fmul d23, d28, d22
str d9, [sp, #9136] ; 8-byte Folded Spill
fmul d27, d12, d9
fsub d23, d23, d27
str d0, [sp, #9128] ; 8-byte Folded Spill
fmul d27, d11, d0
fadd d2, d23, d27
ldur d28, [x29, #-176] ; 8-byte Folded Reload
fmul d23, d28, d3
ldr d30, [sp, #11864] ; 8-byte Folded Reload
fmul d27, d30, d14
fadd d23, d23, d27
ldr d9, [sp, #12016] ; 8-byte Folded Reload
fmul d27, d9, d15
fadd d23, d27, d23
fadd d19, d19, d23
fmov d1, #0.50000000
fmul d27, d23, d1
fsub d19, d19, d27
ldr d0, [sp, #4944] ; 8-byte Folded Reload
ldr d27, [sp, #2584] ; 8-byte Folded Reload
fsub d27, d0, d27
fmul d28, d28, d27
fmul d30, d30, d13
fadd d28, d28, d30
fmul d30, d9, d4
fadd d28, d30, d28
fsub d23, d19, d23
fsub d23, d23, d28
fadd d7, d7, d28
fmul d0, d28, d1
fsub d0, d7, d0
fadd d0, d0, d23
ldr d9, [sp, #2576] ; 8-byte Folded Reload
ldr d1, [sp, #12056] ; 8-byte Folded Reload
fsub d5, d1, d9
ldr d7, [sp, #2544] ; 8-byte Folded Reload
fadd d5, d5, d7
ldr d28, [sp, #11280] ; 8-byte Folded Reload
fmul d7, d28, d27
ldr d27, [sp, #11384] ; 8-byte Folded Reload
str d3, [sp, #9336] ; 8-byte Folded Spill
fmul d23, d27, d3
fadd d7, d23, d7
fmul d6, d0, d6
fadd d6, d7, d6
ldr d7, [sp, #2568] ; 8-byte Folded Reload
fsub d6, d6, d7
fadd d7, d0, d9
fadd d23, d7, d5
fmul d7, d6, d24
fmul d24, d23, d25
fsub d7, d7, d24
fmul d19, d19, d20
str d15, [sp, #9352] ; 8-byte Folded Spill
fmul d20, d27, d15
fadd d19, d20, d19
fmul d20, d23, d26
fmul d16, d0, d16
fmul d4, d28, d4
fadd d19, d4, d19
fmul d4, d31, d19
fsub d4, d4, d16
str d14, [sp, #9344] ; 8-byte Folded Spill
fmul d16, d27, d14
fmul d3, d28, d13
fadd d16, d16, d3
fmul d3, d8, d16
fadd d3, d3, d4
ldr d4, [sp, #2560] ; 8-byte Folded Reload
fsub d3, d3, d4
fmul d4, d3, d17
fadd d4, d4, d7
fmul d0, d0, d18
str d19, [sp, #9280] ; 8-byte Folded Spill
fmul d7, d8, d19
fmov d19, d2
fadd d0, d0, d7
str d16, [sp, #9272] ; 8-byte Folded Spill
fmul d7, d31, d16
fsub d0, d0, d7
ldr d7, [sp, #2552] ; 8-byte Folded Reload
fsub d0, d0, d7
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d7, x9
fmul d5, d5, d7
fadd d0, d0, d5
fadd d5, d0, d4
fmul d4, d10, d5
ldr d1, [sp, #12208] ; 8-byte Folded Reload
fmul d2, d6, d1
ldr d1, [sp, #12280] ; 8-byte Folded Reload
fmul d1, d23, d1
fadd d1, d2, d1
fadd d1, d3, d1
fmul d0, d0, d17
fadd d2, d0, d1
fmul d0, d29, d2
fsub d3, d4, d0
fmul d0, d11, d3
fsub d0, d0, d20
ldr d20, [sp, #11664] ; 8-byte Folded Reload
fmul d1, d23, d21
str d2, [sp, #9144] ; 8-byte Folded Spill
fmul d2, d10, d2
fsub d1, d2, d1
str d5, [sp, #9248] ; 8-byte Folded Spill
fmul d2, d29, d5
fadd d2, d2, d1
fmul d1, d12, d2
fsub d7, d0, d1
fmul d0, d23, d22
str d3, [sp, #8920] ; 8-byte Folded Spill
fmul d1, d12, d3
fadd d0, d1, d0
str d2, [sp, #8888] ; 8-byte Folded Spill
fmul d1, d11, d2
fadd d16, d0, d1
cbz x8, LBB19_62
; %bb.61:
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #49072, lsl #48
fmov d0, x9
ldr d6, [sp, #104] ; 8-byte Folded Reload
fmul d1, d6, d0
ldr q18, [sp, #11136] ; 16-byte Folded Reload
fmul d2, d18, d20
fsub d1, d1, d2
ldr q5, [sp, #10736] ; 16-byte Folded Reload
fmul d2, d5, d19
fadd d1, d2, d1
fadd d1, d1, d1
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d2, x9
fmul d3, d23, d2
fmul d4, d5, d7
fsub d3, d4, d3
fmul d4, d18, d16
fsub d3, d3, d4
fadd d3, d3, d3
fadd d1, d1, d3
ldr d17, [sp, #11400] ; 8-byte Folded Reload
fmul d3, d17, d1
fmul d0, d23, d0
fmul d4, d18, d7
fadd d0, d4, d0
fmul d4, d5, d16
fadd d0, d4, d0
fmul d4, d5, d20
fmul d2, d6, d2
fadd d2, d4, d2
fmul d4, d18, d19
fadd d2, d4, d2
fadd d0, d2, d0
ldr d2, [sp, #11160] ; 8-byte Folded Reload
fmul d2, d2, d0
fsub d2, d3, d2
fmul d2, d17, d2
fmov d3, #0.50000000
fmul d2, d2, d3
ldr d5, [sp, #11392] ; 8-byte Folded Reload
fmul d1, d5, d1
ldr d4, [sp, #11240] ; 8-byte Folded Reload
fmul d0, d4, d0
fadd d0, d0, d1
fmul d0, d5, d0
fmul d0, d0, d3
fsub d0, d2, d0
str d0, [x8, #96]
LBB19_62:
str d23, [sp, #3888] ; 8-byte Folded Spill
str d16, [sp, #8016] ; 8-byte Folded Spill
str d7, [sp, #8024] ; 8-byte Folded Spill
str d19, [sp, #8056] ; 8-byte Folded Spill
ldr d0, [sp, #3392] ; 8-byte Folded Reload
ldr d1, [sp, #3384] ; 8-byte Folded Reload
fsub d0, d1, d0
ldr d1, [sp, #10536] ; 8-byte Folded Reload
ldr d2, [sp, #3768] ; 8-byte Folded Reload
fsub d1, d1, d2
ldr d2, [sp, #3760] ; 8-byte Folded Reload
fsub d1, d1, d2
ldr d2, [sp, #10520] ; 8-byte Folded Reload
fdiv d1, d1, d2
ldr d2, [sp, #10416] ; 8-byte Folded Reload
ldr d3, [sp, #3688] ; 8-byte Folded Reload
fsub d2, d2, d3
ldr d3, [sp, #3680] ; 8-byte Folded Reload
fsub d2, d2, d3
ldr d3, [sp, #10408] ; 8-byte Folded Reload
fdiv d3, d2, d3
ldr d2, [sp, #3368] ; 8-byte Folded Reload
ldr d4, [sp, #3360] ; 8-byte Folded Reload
fsub d4, d4, d2
ldr d2, [sp, #9856] ; 8-byte Folded Reload
ldr d5, [sp, #3648] ; 8-byte Folded Reload
fsub d2, d2, d5
ldr d5, [sp, #3656] ; 8-byte Folded Reload
fsub d2, d2, d5
ldr d5, [sp, #3344] ; 8-byte Folded Reload
ldr d6, [sp, #3336] ; 8-byte Folded Reload
fsub d17, d5, d6
ldr d5, [sp, #9848] ; 8-byte Folded Reload
fdiv d2, d2, d5
ldr d5, [sp, #9840] ; 8-byte Folded Reload
fmul d2, d5, d2
ldr d5, [sp, #3640] ; 8-byte Folded Reload
fsub d13, d2, d5
str d13, [sp, #12280] ; 8-byte Folded Spill
ldr d2, [sp, #10528] ; 8-byte Folded Reload
fmul d2, d2, d1
ldr d5, [sp, #10512] ; 8-byte Folded Reload
fmul d1, d5, d1
ldr d5, [sp, #3752] ; 8-byte Folded Reload
fsub d29, d1, d5
ldur d22, [x29, #-248] ; 8-byte Folded Reload
fmul d1, d22, d2
ldr d18, [sp, #12232] ; 8-byte Folded Reload
fmul d5, d18, d29
fsub d16, d1, d5
ldr d25, [sp, #11968] ; 8-byte Folded Reload
fmul d1, d25, d29
ldr d5, [sp, #3728] ; 8-byte Folded Reload
fsub d7, d1, d5
fmul d1, d25, d2
fmov d11, d2
str d2, [sp, #11128] ; 8-byte Folded Spill
ldr d5, [sp, #3720] ; 8-byte Folded Reload
fsub d2, d5, d1
str d2, [sp, #11368] ; 8-byte Folded Spill
ldur d21, [x29, #-192] ; 8-byte Folded Reload
fmul d5, d21, d16
fadd d5, d7, d5
ldur d1, [x29, #-160] ; 8-byte Folded Reload
fmul d6, d1, d2
fmov d27, d1
fadd d20, d6, d5
ldr d23, [sp, #11768] ; 8-byte Folded Reload
fmul d5, d23, d16
ldr d19, [sp, #12144] ; 8-byte Folded Reload
fmul d6, d19, d7
fadd d5, d5, d6
ldur d1, [x29, #-232] ; 8-byte Folded Reload
fmul d6, d1, d2
fmov d30, d1
fadd d1, d6, d5
str d1, [sp, #11904] ; 8-byte Folded Spill
fmul d5, d21, d20
fmov d9, d20
str d20, [sp, #12056] ; 8-byte Folded Spill
fmul d6, d23, d1
fadd d5, d5, d6
ldr d26, [sp, #11776] ; 8-byte Folded Reload
fmul d6, d26, d16
ldr d20, [sp, #12152] ; 8-byte Folded Reload
str d7, [sp, #10528] ; 8-byte Folded Spill
fmul d7, d20, d7
fadd d6, d6, d7
ldr d1, [sp, #12296] ; 8-byte Folded Reload
fmul d7, d1, d2
fmov d10, d1
fadd d24, d7, d6
fmul d6, d26, d24
str d24, [sp, #11736] ; 8-byte Folded Spill
fadd d5, d6, d5
fadd d6, d0, d5
fmov d28, #0.50000000
fmul d7, d5, d28
fsub d7, d6, d7
ldr d0, [sp, #10472] ; 8-byte Folded Reload
fmul d2, d0, d3
ldr d0, [sp, #10400] ; 8-byte Folded Reload
fmul d3, d0, d3
ldr d0, [sp, #3416] ; 8-byte Folded Reload
fsub d8, d3, d0
fmul d3, d22, d2
fmul d6, d18, d8
fsub d22, d3, d6
fmul d3, d25, d8
ldr d0, [sp, #3408] ; 8-byte Folded Reload
fsub d1, d3, d0
fmul d3, d25, d2
fmov d12, d2
str d2, [sp, #10968] ; 8-byte Folded Spill
ldr d0, [sp, #3736] ; 8-byte Folded Reload
fsub d0, d0, d3
str d0, [sp, #11280] ; 8-byte Folded Spill
fmul d6, d21, d22
fadd d6, d1, d6
fmul d18, d27, d0
fadd d18, d18, d6
fmul d6, d23, d22
fmul d19, d19, d1
fadd d6, d6, d19
fmul d19, d30, d0
fadd d25, d19, d6
fmul d6, d21, d18
fmul d19, d23, d25
fadd d6, d6, d19
fmul d19, d26, d22
str d1, [sp, #10520] ; 8-byte Folded Spill
fmul d20, d20, d1
fadd d19, d19, d20
fmul d20, d10, d0
fadd d30, d20, d19
fmul d19, d26, d30
fadd d6, d19, d6
fsub d5, d7, d5
fmov d23, d7
fsub d5, d5, d6
fadd d4, d4, d6
fmul d6, d6, d28
fsub d4, d4, d6
fadd d27, d4, d5
ldr d31, [sp, #11624] ; 8-byte Folded Reload
fmul d26, d31, d13
fmul d4, d21, d26
ldr d0, [sp, #3704] ; 8-byte Folded Reload
fsub d1, d0, d4
ldr d19, [sp, #11688] ; 8-byte Folded Reload
fmul d5, d19, d26
ldr d0, [sp, #3672] ; 8-byte Folded Reload
fsub d2, d0, d5
fmul d6, d21, d1
fmov d5, d1
str d1, [sp, #10608] ; 8-byte Folded Spill
fmul d19, d19, d2
str d2, [sp, #10600] ; 8-byte Folded Spill
fadd d19, d6, d19
ldr d20, [sp, #11632] ; 8-byte Folded Reload
fmul d6, d20, d26
ldr d0, [sp, #3664] ; 8-byte Folded Reload
fsub d7, d0, d6
fmul d20, d20, d7
str d7, [sp, #10592] ; 8-byte Folded Spill
fadd d19, d20, d19
fadd d17, d17, d19
fmul d20, d19, d28
fsub d28, d17, d20
ldr d3, [sp, #11728] ; 8-byte Folded Reload
fmul d17, d3, d16
ldr d4, [sp, #11360] ; 8-byte Folded Reload
fmul d20, d4, d9
fadd d17, d17, d20
ldr d13, [sp, #11560] ; 8-byte Folded Reload
fmul d20, d13, d22
fadd d17, d20, d17
ldr d6, [sp, #11272] ; 8-byte Folded Reload
fmul d18, d6, d18
fadd d18, d18, d17
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d17, x9
fmul d20, d27, d17
fsub d18, d18, d20
ldr d0, [sp, #11720] ; 8-byte Folded Reload
fmul d20, d0, d29
ldr d0, [sp, #12288] ; 8-byte Folded Reload
fmul d21, d0, d11
fsub d20, d20, d21
ldr d0, [sp, #12168] ; 8-byte Folded Reload
fmul d21, d0, d12
fsub d20, d20, d21
ldr d0, [sp, #11696] ; 8-byte Folded Reload
fmul d21, d0, d8
fadd d0, d21, d20
str d0, [sp, #10536] ; 8-byte Folded Spill
ldur d1, [x29, #-240] ; 8-byte Folded Reload
fmul d20, d1, d0
fadd d18, d20, d18
str d29, [sp, #11384] ; 8-byte Folded Spill
str d8, [sp, #11376] ; 8-byte Folded Spill
fadd d1, d29, d8
fmul d9, d31, d1
fmul d20, d9, d17
fadd d18, d18, d20
ldr d0, [sp, #11320] ; 8-byte Folded Reload
fmul d20, d0, d26
fsub d18, d18, d20
ldr d0, [sp, #10872] ; 8-byte Folded Reload
fmul d20, d0, d5
fadd d8, d18, d20
fsub d18, d27, d19
fadd d15, d18, d28
mov x9, #4359484439294640128
mov x10, #62612
movk x10, #18904, lsl #16
movk x10, #1144, lsl #32
movk x10, #16296, lsl #48
fmov d21, x9
fmul d18, d8, d21
fmov d19, x10
fmul d20, d15, d19
fsub d11, d18, d20
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #48991, lsl #48
mov x10, #18456
movk x10, #63321, lsl #16
movk x10, #33926, lsl #32
movk x10, #16223, lsl #48
fmov d18, x10
fmul d18, d9, d18
fmov d20, x9
fmul d12, d27, d20
fadd d12, d12, d18
ldr d5, [sp, #12096] ; 8-byte Folded Reload
fmul d18, d5, d16
fmul d14, d4, d24
fadd d14, d18, d14
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d5, x9
str d5, [sp, #11320] ; 8-byte Folded Spill
fmul d24, d23, d5
fadd d24, d14, d24
ldr d5, [sp, #12088] ; 8-byte Folded Reload
fmul d14, d5, d22
fadd d24, d14, d24
fmul d30, d6, d30
fadd d23, d30, d24
ldr d29, [sp, #12312] ; 8-byte Folded Reload
fmul d24, d29, d23
fadd d24, d12, d24
ldr d18, [sp, #11880] ; 8-byte Folded Reload
fmul d16, d18, d16
ldr d14, [sp, #11904] ; 8-byte Folded Reload
fmul d30, d4, d14
fadd d16, d16, d30
ldr d5, [sp, #11872] ; 8-byte Folded Reload
fmul d22, d5, d22
fadd d16, d22, d16
fmul d22, d6, d25
fadd d6, d22, d16
ldr d10, [sp, #12304] ; 8-byte Folded Reload
fmul d16, d10, d6
fadd d16, d16, d24
ldr d4, [sp, #11528] ; 8-byte Folded Reload
fmul d22, d4, d26
fsub d16, d16, d22
fmul d22, d0, d2
fadd d12, d16, d22
mov x9, #4363988038922010624
fmov d30, x9
fmul d22, d12, d30
fadd d25, d22, d11
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d4, x9
fmul d24, d27, d4
fmul d27, d9, d4
fsub d24, d24, d27
str d23, [sp, #10512] ; 8-byte Folded Spill
fmul d27, d10, d23
fadd d24, d24, d27
str d6, [sp, #10472] ; 8-byte Folded Spill
fmul d27, d29, d6
fsub d24, d24, d27
ldr d6, [sp, #11432] ; 8-byte Folded Reload
fmul d26, d6, d26
fsub d24, d24, d26
fmul d26, d0, d7
fadd d26, d24, d26
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d0, x9
str d0, [sp, #11432] ; 8-byte Folded Spill
fmul d27, d28, d0
fadd d27, d26, d27
fadd d25, d27, d25
str d1, [sp, #11528] ; 8-byte Folded Spill
ldr d0, [sp, #12280] ; 8-byte Folded Reload
fadd d24, d1, d0
fmul d28, d31, d24
fmul d26, d28, d19
fadd d6, d25, d26
mov x9, #-4868391197187506176
fmov d0, x9
str d0, [sp, #10872] ; 8-byte Folded Spill
fmul d8, d8, d0
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #49060, lsl #48
fmov d0, x9
str d0, [sp, #10584] ; 8-byte Folded Spill
fmul d9, d15, d0
fadd d8, d8, d9
fadd d8, d12, d8
fmul d27, d27, d30
fadd d27, d27, d8
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #16292, lsl #48
fmov d8, x9
fmul d28, d28, d8
fadd d16, d27, d28
ldr q22, [sp, #11984] ; 16-byte Folded Reload
fmul d27, d22, d6
ldr q23, [sp, #11808] ; 16-byte Folded Reload
fmul d28, d23, d16
fsub d12, d27, d28
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmov d27, x9
fmul d28, d15, d27
ldr q0, [sp, #11952] ; 16-byte Folded Reload
fmul d8, d0, d12
fsub d8, d8, d28
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d28, x9
fmul d9, d15, d28
str q16, [sp, #6880] ; 16-byte Folded Spill
fmul d11, d22, d16
fsub d9, d11, d9
str q6, [sp, #6896] ; 16-byte Folded Spill
fmul d11, d23, d6
fadd d9, d11, d9
ldr d1, [sp, #12048] ; 8-byte Folded Reload
fmul d11, d1, d24
fmul d11, d11, d28
fadd d16, d9, d11
ldr q6, [sp, #11792] ; 16-byte Folded Reload
fmul d9, d6, d16
fsub d8, d8, d9
ldr d1, [sp, #12072] ; 8-byte Folded Reload
fmul d9, d1, d24
fmul d11, d9, d27
fadd d1, d8, d11
str q1, [sp, #12208] ; 16-byte Folded Spill
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #16345, lsl #48
fmov d8, x9
fmul d11, d15, d8
str q12, [sp, #6864] ; 16-byte Folded Spill
fmul d12, d6, d12
fsub d11, d12, d11
str q16, [sp, #6848] ; 16-byte Folded Spill
fmul d12, d0, d16
fadd d11, d11, d12
fmul d9, d9, d8
fadd d1, d11, d9
str q1, [sp, #11696] ; 16-byte Folded Spill
ldr d1, [sp, #11368] ; 8-byte Folded Reload
fmul d9, d3, d1
ldr d12, [sp, #11472] ; 8-byte Folded Reload
ldr d3, [sp, #12056] ; 8-byte Folded Reload
fmul d11, d12, d3
fadd d9, d9, d11
ldr d26, [sp, #11280] ; 8-byte Folded Reload
fmul d11, d13, d26
fadd d9, d11, d9
ldr d11, [sp, #11912] ; 8-byte Folded Reload
ldr d2, [sp, #11128] ; 8-byte Folded Reload
fmul d2, d11, d2
ldr d16, [sp, #3856] ; 8-byte Folded Reload
fsub d2, d2, d16
ldr d11, [sp, #11744] ; 8-byte Folded Reload
ldr d7, [sp, #10968] ; 8-byte Folded Reload
fmul d7, d11, d7
fadd d2, d7, d2
ldr d7, [sp, #3848] ; 8-byte Folded Reload
fsub d2, d2, d7
str d2, [sp, #11128] ; 8-byte Folded Spill
ldur d7, [x29, #-240] ; 8-byte Folded Reload
fmul d2, d7, d2
fadd d2, d2, d9
ldr d7, [sp, #6024] ; 8-byte Folded Reload
fmul d7, d31, d7
fmul d17, d7, d17
fsub d2, d2, d17
ldr d16, [sp, #3824] ; 8-byte Folded Reload
fadd d2, d2, d16
ldr d11, [sp, #11096] ; 8-byte Folded Reload
ldr d13, [sp, #10608] ; 8-byte Folded Reload
fmul d17, d11, d13
fadd d2, d2, d17
fmul d17, d2, d21
ldr d16, [sp, #6032] ; 8-byte Folded Reload
fmul d21, d31, d16
fmul d19, d21, d19
fsub d17, d17, d19
fmul d19, d18, d1
fmul d9, d12, d14
fadd d19, d19, d9
fmul d9, d5, d26
fmov d31, d26
fadd d5, d9, d19
fmul d19, d7, d20
fmul d20, d10, d5
fadd d19, d20, d19
ldr d18, [sp, #12096] ; 8-byte Folded Reload
fmul d1, d18, d1
ldr d25, [sp, #11392] ; 8-byte Folded Reload
ldr d26, [sp, #11736] ; 8-byte Folded Reload
fmul d20, d12, d26
fadd d1, d1, d20
ldur d12, [x29, #-160] ; 8-byte Folded Reload
fmul d20, d12, d3
ldur d18, [x29, #-232] ; 8-byte Folded Reload
fmul d9, d18, d14
fadd d20, d20, d9
ldr d18, [sp, #12296] ; 8-byte Folded Reload
fmul d9, d18, d26
fadd d20, d9, d20
ldr d3, [sp, #11320] ; 8-byte Folded Reload
fmul d18, d20, d3
fadd d1, d1, d18
ldr d3, [sp, #12088] ; 8-byte Folded Reload
fmul d3, d3, d31
fadd d18, d3, d1
fmul d1, d29, d18
fadd d1, d19, d1
ldr d3, [sp, #3800] ; 8-byte Folded Reload
fadd d1, d1, d3
ldr d19, [sp, #10600] ; 8-byte Folded Reload
fmul d3, d11, d19
fadd d1, d1, d3
fmul d3, d1, d30
fadd d3, d3, d17
fmul d7, d7, d4
str d5, [sp, #10968] ; 8-byte Folded Spill
fmul d17, d29, d5
fsub d7, d7, d17
str d18, [sp, #10416] ; 8-byte Folded Spill
fmul d17, d10, d18
fadd d7, d7, d17
ldr d4, [sp, #3776] ; 8-byte Folded Reload
fadd d7, d7, d4
ldr d18, [sp, #10592] ; 8-byte Folded Reload
fmul d17, d11, d18
fadd d7, d7, d17
fmul d4, d12, d13
ldr d17, [sp, #11488] ; 8-byte Folded Reload
fmul d5, d17, d19
fadd d4, d4, d5
ldr d5, [sp, #11680] ; 8-byte Folded Reload
fmul d5, d5, d18
fadd d4, d5, d4
ldr d5, [sp, #11432] ; 8-byte Folded Reload
fmul d4, d4, d5
fadd d4, d7, d4
ldr d5, [sp, #10872] ; 8-byte Folded Reload
fmul d2, d2, d5
ldr d5, [sp, #10584] ; 8-byte Folded Reload
fmul d5, d21, d5
ldr q21, [sp, #12208] ; 16-byte Folded Reload
fadd d2, d5, d2
fadd d1, d1, d2
fadd d5, d3, d4
fmul d2, d4, d30
fmov d7, d16
fadd d4, d1, d2
fmul d1, d22, d5
fmul d2, d23, d4
fsub d16, d1, d2
ldr d1, [sp, #12072] ; 8-byte Folded Reload
fmul d1, d1, d7
fmul d2, d1, d27
fmul d3, d0, d16
fsub d2, d3, d2
ldr d3, [sp, #12048] ; 8-byte Folded Reload
fmul d3, d3, d7
fmul d3, d3, d28
str q4, [sp, #4320] ; 16-byte Folded Spill
fmul d4, d22, d4
ldr q22, [sp, #11696] ; 16-byte Folded Reload
fsub d3, d4, d3
str q5, [sp, #5744] ; 16-byte Folded Spill
fmul d4, d23, d5
ldr d23, [sp, #11400] ; 8-byte Folded Reload
fadd d4, d4, d3
fmul d3, d6, d4
fsub d18, d2, d3
str q16, [sp, #4288] ; 16-byte Folded Spill
fmul d2, d6, d16
fmul d1, d1, d8
fsub d1, d2, d1
str q4, [sp, #4240] ; 16-byte Folded Spill
fmul d2, d0, d4
fadd d19, d1, d2
ldur d5, [x29, #-256] ; 8-byte Folded Reload
ldr q20, [sp, #11216] ; 16-byte Folded Reload
cbz x8, LBB19_64
; %bb.63:
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d1, x9
fmul d2, d15, d1
ldr q17, [sp, #10720] ; 16-byte Folded Reload
fmul d3, d17, d21
fsub d2, d3, d2
fmul d3, d20, d22
fsub d2, d2, d3
fmul d3, d5, d24
fmul d4, d3, d1
fadd d2, d2, d4
fadd d2, d2, d2
fmul d4, d20, d18
fmul d5, d5, d7
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #16304, lsl #48
fmov d6, x9
fmul d7, d5, d6
fsub d4, d4, d7
fmul d7, d17, d19
fadd d4, d7, d4
fadd d4, d4, d4
fsub d2, d2, d4
fmul d4, d23, d2
fmul d7, d15, d6
fmul d16, d20, d21
fsub d7, d16, d7
fmul d16, d17, d22
fadd d7, d16, d7
fmul d3, d3, d6
fadd d3, d7, d3
fmul d6, d17, d18
fmul d1, d5, d1
fsub d1, d6, d1
fmul d5, d20, d19
fsub d1, d1, d5
fadd d1, d1, d3
ldr d3, [sp, #11160] ; 8-byte Folded Reload
fmul d3, d3, d1
fsub d3, d4, d3
fmul d3, d23, d3
fmov d5, #0.50000000
fmul d3, d3, d5
fmul d2, d25, d2
ldr d4, [sp, #11240] ; 8-byte Folded Reload
fmul d1, d4, d1
fadd d1, d1, d2
fmul d1, d25, d1
fmul d1, d1, d5
fsub d1, d3, d1
str d1, [x8, #104]
LBB19_64:
str q20, [sp, #11216] ; 16-byte Folded Spill
str q19, [sp, #3776] ; 16-byte Folded Spill
str q18, [sp, #3808] ; 16-byte Folded Spill
str d15, [sp, #3848] ; 8-byte Folded Spill
str d24, [sp, #11720] ; 8-byte Folded Spill
ldr d1, [sp, #3256] ; 8-byte Folded Reload
ldr d2, [sp, #3248] ; 8-byte Folded Reload
fadd d7, d1, d2
ldr d1, [sp, #3160] ; 8-byte Folded Reload
ldr d2, [sp, #3152] ; 8-byte Folded Reload
fsub d15, d2, d1
ldr d1, [sp, #3112] ; 8-byte Folded Reload
ldr d2, [sp, #3104] ; 8-byte Folded Reload
fsub d13, d2, d1
ldr d1, [sp, #2936] ; 8-byte Folded Reload
ldr d2, [sp, #2928] ; 8-byte Folded Reload
fsub d18, d2, d1
ldr d1, [sp, #2920] ; 8-byte Folded Reload
ldr d2, [sp, #2912] ; 8-byte Folded Reload
fsub d2, d2, d1
ldr d1, [sp, #2904] ; 8-byte Folded Reload
ldr d3, [sp, #2888] ; 8-byte Folded Reload
fsub d3, d3, d1
ldr d1, [sp, #2872] ; 8-byte Folded Reload
ldr d4, [sp, #2864] ; 8-byte Folded Reload
fsub d6, d4, d1
ldr d1, [sp, #2840] ; 8-byte Folded Reload
ldr d4, [sp, #2792] ; 8-byte Folded Reload
fsub d4, d4, d1
ldr d1, [sp, #4920] ; 8-byte Folded Reload
ldr d5, [sp, #3272] ; 8-byte Folded Reload
fsub d14, d1, d5
ldr d21, [sp, #11472] ; 8-byte Folded Reload
fmul d1, d21, d14
ldr d5, [sp, #3280] ; 8-byte Folded Reload
fsub d1, d1, d5
ldr d5, [sp, #3264] ; 8-byte Folded Reload
fsub d1, d1, d5
ldur d5, [x29, #-240] ; 8-byte Folded Reload
str d7, [sp, #11096] ; 8-byte Folded Spill
fmul d5, d5, d7
fadd d1, d5, d1
mov x9, #63706
movk x9, #13221, lsl #16
movk x9, #1281, lsl #32
movk x9, #16209, lsl #48
fmov d5, x9
ldr d23, [sp, #3296] ; 8-byte Folded Reload
fmul d7, d23, d5
fadd d1, d1, d7
ldr d7, [sp, #3240] ; 8-byte Folded Reload
fsub d1, d1, d7
ldr d7, [sp, #3232] ; 8-byte Folded Reload
fsub d19, d1, d7
mov x9, #47272
movk x9, #56762, lsl #16
movk x9, #43178, lsl #32
movk x9, #49060, lsl #48
mov x10, #4354980839667269632
fmov d1, x10
fmul d7, d19, d1
fmov d1, x9
str d1, [sp, #12072] ; 8-byte Folded Spill
ldr d26, [sp, #3288] ; 8-byte Folded Reload
fmul d16, d26, d1
fadd d7, d16, d7
fmul d16, d21, d15
ldr d17, [sp, #3224] ; 8-byte Folded Reload
fsub d16, d16, d17
ldr d17, [sp, #3216] ; 8-byte Folded Reload
fsub d24, d16, d17
mov x9, #18456
movk x9, #63321, lsl #16
movk x9, #33926, lsl #32
movk x9, #16223, lsl #48
ldr d31, [sp, #12304] ; 8-byte Folded Reload
fmul d17, d31, d24
fmov d16, x9
fmul d20, d23, d16
fadd d17, d17, d20
fmul d20, d21, d13
ldr d21, [sp, #3176] ; 8-byte Folded Reload
fsub d21, d20, d21
ldur d20, [x29, #-160] ; 8-byte Folded Reload
fmul d20, d20, d14
ldur d22, [x29, #-232] ; 8-byte Folded Reload
fmul d22, d22, d15
fadd d20, d20, d22
ldr d22, [sp, #12296] ; 8-byte Folded Reload
fmul d22, d22, d13
fadd d22, d22, d20
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d20, x9
fmul d22, d22, d20
fadd d21, d21, d22
ldr d22, [sp, #3208] ; 8-byte Folded Reload
fsub d25, d21, d22
ldr d29, [sp, #12312] ; 8-byte Folded Reload
fmul d21, d29, d25
fadd d17, d17, d21
ldr d21, [sp, #3192] ; 8-byte Folded Reload
fsub d17, d17, d21
ldr d21, [sp, #3184] ; 8-byte Folded Reload
fsub d21, d17, d21
mov x9, #36544
movk x9, #43611, lsl #16
movk x9, #860, lsl #32
movk x9, #16326, lsl #48
fmov d17, x9
fmul d22, d23, d17
str d24, [sp, #10592] ; 8-byte Folded Spill
fmul d24, d29, d24
fadd d22, d22, d24
str d25, [sp, #10584] ; 8-byte Folded Spill
fmul d24, d31, d25
fsub d22, d24, d22
ldr d23, [sp, #3128] ; 8-byte Folded Reload
fsub d22, d22, d23
ldr d23, [sp, #3144] ; 8-byte Folded Reload
fsub d22, d22, d23
ldr d23, [sp, #3136] ; 8-byte Folded Reload
fsub d22, d22, d23
fsub d24, d7, d21
mov x9, #4363988038922010624
fmov d7, x9
fmul d25, d22, d7
fsub d27, d24, d25
mov x9, #4359484439294640128
fmov d24, x9
fmul d19, d19, d24
mov x9, #62612
movk x9, #18904, lsl #16
movk x9, #1144, lsl #32
movk x9, #16296, lsl #48
fmov d25, x9
fmul d26, d26, d25
fadd d19, d19, d26
fmul d21, d21, d7
fadd d19, d21, d19
fadd d28, d19, d22
ldr q30, [sp, #11808] ; 16-byte Folded Reload
fmul d19, d30, d27
ldr q10, [sp, #11984] ; 16-byte Folded Reload
fmul d21, d10, d28
fadd d8, d21, d19
mov x9, #11201
movk x9, #50599, lsl #16
movk x9, #31589, lsl #32
movk x9, #16242, lsl #48
fmul d19, d0, d8
fmov d26, x9
ldr d23, [sp, #3200] ; 8-byte Folded Reload
fmul d21, d23, d26
fadd d19, d19, d21
mov x9, #43115
movk x9, #62349, lsl #16
movk x9, #30721, lsl #32
movk x9, #16347, lsl #48
fmov d21, x9
ldr d22, [sp, #3168] ; 8-byte Folded Reload
fmul d22, d22, d21
str d27, [sp, #8048] ; 8-byte Folded Spill
fmul d27, d10, d27
fsub d22, d27, d22
str d28, [sp, #8040] ; 8-byte Folded Spill
fmul d27, d30, d28
fsub d28, d22, d27
ldr q11, [sp, #11792] ; 16-byte Folded Reload
fmul d22, d11, d28
fadd d1, d19, d22
stur d1, [x29, #-240] ; 8-byte Folded Spill
mov x9, #52090
movk x9, #42545, lsl #16
movk x9, #26349, lsl #32
movk x9, #49113, lsl #48
fmov d22, x9
fmul d19, d23, d22
str d8, [sp, #6072] ; 8-byte Folded Spill
fmul d27, d11, d8
fsub d19, d19, d27
str d28, [sp, #5912] ; 8-byte Folded Spill
fmul d27, d0, d28
fadd d1, d19, d27
ldur d28, [x29, #-192] ; 8-byte Folded Reload
fmul d19, d28, d14
ldr d23, [sp, #11768] ; 8-byte Folded Reload
fmul d27, d23, d15
fadd d19, d19, d27
ldr d9, [sp, #11776] ; 8-byte Folded Reload
fmul d27, d9, d13
fadd d19, d27, d19
fadd d18, d18, d19
fmov d12, #0.50000000
fmul d27, d19, d12
fsub d18, d18, d27
ldr d27, [sp, #4888] ; 8-byte Folded Reload
ldr d8, [sp, #2992] ; 8-byte Folded Reload
fsub d27, d27, d8
fmul d28, d28, d27
fmul d8, d23, d2
fadd d28, d28, d8
fmul d8, d9, d3
fadd d28, d8, d28
fsub d19, d18, d19
fsub d19, d19, d28
fadd d6, d6, d28
mov.16b v23, v0
fmul d0, d28, d12
fsub d0, d6, d0
fadd d0, d0, d19
ldr d8, [sp, #2984] ; 8-byte Folded Reload
fsub d4, d4, d8
ldr d6, [sp, #2960] ; 8-byte Folded Reload
fadd d4, d4, d6
ldr d28, [sp, #11272] ; 8-byte Folded Reload
fmul d6, d28, d27
ldr d27, [sp, #11360] ; 8-byte Folded Reload
str d14, [sp, #10600] ; 8-byte Folded Spill
fmul d19, d27, d14
fadd d6, d19, d6
fmul d5, d0, d5
fsub d5, d6, d5
ldr d6, [sp, #2976] ; 8-byte Folded Reload
fsub d5, d5, d6
fadd d6, d0, d8
fadd d8, d6, d4
fmul d6, d5, d24
fmul d24, d8, d25
fmov d25, d1
fsub d6, d6, d24
fmul d24, d8, d26
fmul d16, d0, d16
fmul d18, d18, d20
str d13, [sp, #10872] ; 8-byte Folded Spill
fmul d20, d27, d13
fadd d18, d20, d18
fmul d3, d28, d3
fadd d18, d3, d18
fmul d3, d29, d18
fsub d3, d3, d16
str d15, [sp, #10608] ; 8-byte Folded Spill
fmul d16, d27, d15
fmul d2, d28, d2
fadd d16, d16, d2
fmul d2, d31, d16
fadd d2, d2, d3
ldr d3, [sp, #2968] ; 8-byte Folded Reload
fsub d2, d2, d3
fmul d3, d2, d7
fadd d3, d3, d6
fmul d0, d0, d17
str d18, [sp, #10408] ; 8-byte Folded Spill
fmul d6, d31, d18
fadd d0, d0, d6
str d16, [sp, #10400] ; 8-byte Folded Spill
fmul d6, d29, d16
fsub d0, d0, d6
ldr d6, [sp, #2952] ; 8-byte Folded Reload
fsub d0, d0, d6
mov x9, #43516
movk x9, #54001, lsl #16
movk x9, #25165, lsl #32
movk x9, #16240, lsl #48
fmov d6, x9
fmul d4, d4, d6
fadd d0, d0, d4
fadd d6, d0, d3
fmul d3, d10, d6
mov x9, #-4868391197187506176
fmov d4, x9
fmul d4, d5, d4
ldr d1, [sp, #12072] ; 8-byte Folded Reload
fmul d1, d8, d1
fadd d1, d4, d1
fadd d1, d2, d1
fmul d0, d0, d7
fadd d2, d0, d1
fmul d0, d30, d2
fsub d3, d3, d0
fmul d0, d23, d3
fsub d0, d0, d24
ldur d24, [x29, #-240] ; 8-byte Folded Reload
fmul d1, d8, d21
str d2, [sp, #7680] ; 8-byte Folded Spill
fmul d2, d10, d2
fsub d1, d2, d1
str d6, [sp, #8008] ; 8-byte Folded Spill
fmul d2, d30, d6
fadd d2, d2, d1
fmul d1, d11, d2
fsub d18, d0, d1
fmul d0, d8, d22
str d3, [sp, #3864] ; 8-byte Folded Spill
fmul d1, d11, d3
fadd d0, d1, d0
str d2, [sp, #3856] ; 8-byte Folded Spill
fmul d1, d23, d2
fadd d19, d0, d1
ldr d22, [sp, #11416] ; 8-byte Folded Reload
ldr d9, [sp, #11296] ; 8-byte Folded Reload
ldr d17, [sp, #9648] ; 8-byte Folded Reload
ldr d27, [sp, #9624] ; 8-byte Folded Reload
ldr d26, [sp, #11304] ; 8-byte Folded Reload
ldr d29, [sp, #9448] ; 8-byte Folded Reload
ldr d31, [sp, #8760] ; 8-byte Folded Reload
ldr d6, [sp, #9440] ; 8-byte Folded Reload
ldr d23, [sp, #8424] ; 8-byte Folded Reload
ldr d30, [sp, #8744] ; 8-byte Folded Reload
ldr d13, [sp, #6144] ; 8-byte Folded Reload
ldr d16, [sp, #6488] ; 8-byte Folded Reload
ldr d15, [sp, #4808] ; 8-byte Folded Reload
cbz x8, LBB19_66
; %bb.65:
mov x9, #39127
movk x9, #24179, lsl #16
movk x9, #24811, lsl #32
movk x9, #49072, lsl #48
fmov d0, x9
ldr d7, [sp, #248] ; 8-byte Folded Reload
fmul d1, d7, d0
ldr q21, [sp, #11216] ; 16-byte Folded Reload
fmul d2, d21, d24
fsub d1, d1, d2
ldr q5, [sp, #10720] ; 16-byte Folded Reload
fmul d2, d5, d25
fadd d1, d2, d1
fadd d1, d1, d1
mov x9, #33620
movk x9, #2364, lsl #16
movk x9, #33974, lsl #32
movk x9, #16305, lsl #48
fmov d2, x9
fmul d3, d8, d2
fmul d4, d5, d18
fsub d3, d4, d3
fmul d4, d21, d19
fsub d3, d3, d4
fadd d3, d3, d3
fadd d1, d1, d3
ldr d20, [sp, #11400] ; 8-byte Folded Reload
fmul d3, d20, d1
fmul d0, d8, d0
fmul d4, d21, d18
fadd d0, d4, d0
fmul d4, d5, d19
fadd d0, d4, d0
fmul d4, d5, d24
fmul d2, d7, d2
fadd d2, d4, d2
fmul d4, d21, d25
fadd d2, d4, d2
fadd d0, d2, d0
ldr d2, [sp, #11160] ; 8-byte Folded Reload
fmul d2, d2, d0
fsub d2, d3, d2
fmul d2, d20, d2
fmov d3, #0.50000000
fmul d2, d2, d3
ldr d5, [sp, #11392] ; 8-byte Folded Reload
fmul d1, d5, d1
ldr d4, [sp, #11240] ; 8-byte Folded Reload
fmul d0, d4, d0
fadd d0, d0, d1
fmul d0, d5, d0
fmul d0, d0, d3
fsub d0, d2, d0
str d0, [x8, #112]
LBB19_66:
str d8, [sp, #3760] ; 8-byte Folded Spill
str d19, [sp, #3768] ; 8-byte Folded Spill
str d18, [sp, #3800] ; 8-byte Folded Spill
str d25, [sp, #3824] ; 8-byte Folded Spill
ldr d0, [sp, #4408] ; 8-byte Folded Reload
fmul d0, d17, d0
ldr d1, [sp, #8584] ; 8-byte Folded Reload
ldr d2, [sp, #9960] ; 8-byte Folded Reload
fmul d1, d1, d2
fadd d0, d1, d0
ldr d1, [sp, #8576] ; 8-byte Folded Reload
ldr d2, [sp, #9992] ; 8-byte Folded Reload
fmul d1, d1, d2
fsub d0, d0, d1
ldr d1, [sp, #4208] ; 8-byte Folded Reload
fmul d1, d27, d1
fsub d1, d0, d1
ldr d0, [sp, #9632] ; 8-byte Folded Reload
ldr d2, [sp, #9968] ; 8-byte Folded Reload
fmul d0, d0, d2
ldr d2, [sp, #11936] ; 8-byte Folded Reload
ldr d3, [sp, #6712] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d0, d2, d0
ldr d2, [sp, #9640] ; 8-byte Folded Reload
ldr d3, [sp, #10000] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d0, d2, d0
ldr d2, [sp, #11944] ; 8-byte Folded Reload
ldr d3, [sp, #5176] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d0, d0, d2
ldr d2, [sp, #11752] ; 8-byte Folded Reload
ldr d3, [sp, #6120] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d0, d0, d2
ldr d2, [sp, #12184] ; 8-byte Folded Reload
ldr d3, [sp, #8408] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d0, d2, d0
ldr d2, [sp, #11672] ; 8-byte Folded Reload
ldr d3, [sp, #4232] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d0, d0, d2
ldr d2, [sp, #12176] ; 8-byte Folded Reload
ldr d3, [sp, #8400] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d2, d2, d0
ldr d0, [sp, #10008] ; 8-byte Folded Reload
ldr d3, [sp, #9384] ; 8-byte Folded Reload
fmul d0, d0, d3
str d0, [sp, #11400] ; 8-byte Folded Spill
fadd d2, d0, d2
ldr d3, [sp, #10712] ; 8-byte Folded Reload
ldr d4, [sp, #9376] ; 8-byte Folded Reload
fmul d20, d3, d4
fadd d2, d20, d2
str d2, [sp, #11280] ; 8-byte Folded Spill
fadd d1, d1, d2
ldr d2, [sp, #9512] ; 8-byte Folded Reload
ldr d3, [sp, #11024] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #12120] ; 8-byte Folded Reload
ldr d3, [sp, #4440] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #9504] ; 8-byte Folded Reload
ldr d3, [sp, #10664] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #12128] ; 8-byte Folded Reload
ldr d3, [sp, #4224] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldur d2, [x29, #-168] ; 8-byte Folded Reload
ldr d3, [sp, #4432] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldur d2, [x29, #-176] ; 8-byte Folded Reload
ldr d3, [sp, #4216] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #11208] ; 8-byte Folded Reload
ldr d3, [sp, #6112] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #11200] ; 8-byte Folded Reload
ldr d3, [sp, #4424] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #9480] ; 8-byte Folded Reload
ldr d3, [sp, #11032] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #12272] ; 8-byte Folded Reload
ldr d3, [sp, #4416] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #9472] ; 8-byte Folded Reload
ldr d3, [sp, #10672] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #12344] ; 8-byte Folded Reload
ldr d3, [sp, #4200] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d4, d1, d2
ldr d1, [sp, #10704] ; 8-byte Folded Reload
ldr d2, [sp, #4384] ; 8-byte Folded Reload
fmul d1, d1, d2
ldr d2, [sp, #8552] ; 8-byte Folded Reload
ldr d3, [sp, #8264] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #8528] ; 8-byte Folded Reload
ldr d3, [sp, #8392] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #10696] ; 8-byte Folded Reload
ldr d3, [sp, #4184] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #8536] ; 8-byte Folded Reload
ldr d3, [sp, #10992] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d1, d2
ldr d2, [sp, #4400] ; 8-byte Folded Reload
fmul d2, d9, d2
fadd d1, d1, d2
str d4, [sp, #8408] ; 8-byte Folded Spill
fsub d1, d4, d1
ldr d2, [sp, #8544] ; 8-byte Folded Reload
ldr d3, [sp, #8416] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #12032] ; 8-byte Folded Reload
ldr d3, [sp, #4192] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d3, d1, d2
ldr d1, [sp, #6096] ; 8-byte Folded Reload
fmul d1, d6, d1
ldr d4, [sp, #11064] ; 8-byte Folded Reload
ldr d18, [sp, #4824] ; 8-byte Folded Reload
fmul d2, d4, d18
fadd d1, d2, d1
ldr d5, [sp, #11056] ; 8-byte Folded Reload
ldr d11, [sp, #4144] ; 8-byte Folded Reload
fmul d2, d5, d11
fadd d1, d2, d1
ldr d2, [sp, #4168] ; 8-byte Folded Reload
fmul d2, d29, d2
fadd d1, d2, d1
str d3, [sp, #8400] ; 8-byte Folded Spill
fsub d1, d3, d1
ldr d2, [sp, #9464] ; 8-byte Folded Reload
ldr d3, [sp, #11040] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d24, [sp, #11480] ; 8-byte Folded Reload
ldr d2, [sp, #4392] ; 8-byte Folded Reload
fmul d2, d24, d2
fadd d1, d2, d1
ldr d2, [sp, #9456] ; 8-byte Folded Reload
ldr d3, [sp, #6136] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #12024] ; 8-byte Folded Reload
ldr d3, [sp, #4176] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d0, d1, d2
ldr d1, [sp, #8504] ; 8-byte Folded Reload
ldr d2, [sp, #9864] ; 8-byte Folded Reload
fmul d1, d1, d2
ldr d2, [sp, #11048] ; 8-byte Folded Reload
ldr d3, [sp, #4312] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d1, d2
ldr d2, [sp, #8496] ; 8-byte Folded Reload
ldr d3, [sp, #9976] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #11288] ; 8-byte Folded Reload
ldr d3, [sp, #4112] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #9560] ; 8-byte Folded Reload
ldr d3, [sp, #9872] ; 8-byte Folded Reload
fmul d2, d2, d3
ldr d3, [sp, #11920] ; 8-byte Folded Reload
ldr d7, [sp, #6568] ; 8-byte Folded Reload
fmul d3, d3, d7
fadd d2, d3, d2
ldr d3, [sp, #9568] ; 8-byte Folded Reload
ldr d7, [sp, #9984] ; 8-byte Folded Reload
fmul d3, d3, d7
fadd d2, d3, d2
ldr d3, [sp, #11928] ; 8-byte Folded Reload
ldr d7, [sp, #5096] ; 8-byte Folded Reload
fmul d3, d3, d7
fsub d2, d2, d3
ldr d3, [sp, #11912] ; 8-byte Folded Reload
ldr d7, [sp, #6088] ; 8-byte Folded Reload
fmul d3, d3, d7
fsub d2, d2, d3
ldr d3, [sp, #12288] ; 8-byte Folded Reload
ldr d7, [sp, #8384] ; 8-byte Folded Reload
fmul d3, d3, d7
fadd d2, d3, d2
ldr d3, [sp, #11744] ; 8-byte Folded Reload
ldr d7, [sp, #4160] ; 8-byte Folded Reload
fmul d3, d3, d7
fsub d2, d2, d3
ldr d3, [sp, #12168] ; 8-byte Folded Reload
ldr d7, [sp, #8376] ; 8-byte Folded Reload
fmul d3, d3, d7
fadd d2, d3, d2
ldr d3, [sp, #9576] ; 8-byte Folded Reload
ldr d7, [sp, #9368] ; 8-byte Folded Reload
fmul d14, d3, d7
fadd d2, d14, d2
ldr d3, [sp, #10056] ; 8-byte Folded Reload
ldr d7, [sp, #9360] ; 8-byte Folded Reload
fmul d25, d3, d7
fadd d2, d25, d2
str d2, [sp, #9848] ; 8-byte Folded Spill
fadd d1, d1, d2
ldr d2, [sp, #9432] ; 8-byte Folded Reload
ldr d3, [sp, #11000] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #12256] ; 8-byte Folded Reload
ldr d3, [sp, #4368] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #9424] ; 8-byte Folded Reload
ldr d3, [sp, #10632] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #12264] ; 8-byte Folded Reload
ldr d3, [sp, #4136] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldur d2, [x29, #-160] ; 8-byte Folded Reload
ldr d3, [sp, #4360] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldur d2, [x29, #-192] ; 8-byte Folded Reload
ldr d3, [sp, #4128] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #11192] ; 8-byte Folded Reload
ldr d3, [sp, #6064] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #11184] ; 8-byte Folded Reload
ldr d3, [sp, #4352] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #9400] ; 8-byte Folded Reload
ldr d3, [sp, #11008] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #12328] ; 8-byte Folded Reload
ldr d3, [sp, #4344] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #9392] ; 8-byte Folded Reload
ldr d3, [sp, #10640] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldur d2, [x29, #-224] ; 8-byte Folded Reload
ldr d3, [sp, #4120] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d7, d1, d2
ldr d1, [sp, #8472] ; 8-byte Folded Reload
ldr d2, [sp, #8224] ; 8-byte Folded Reload
fmul d1, d1, d2
ldr d2, [sp, #10688] ; 8-byte Folded Reload
ldr d3, [sp, #4264] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d1, d2
ldr d2, [sp, #8456] ; 8-byte Folded Reload
ldr d10, [sp, #4376] ; 8-byte Folded Reload
fmul d2, d2, d10
fadd d1, d2, d1
ldr d2, [sp, #10680] ; 8-byte Folded Reload
ldr d3, [sp, #4072] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #8464] ; 8-byte Folded Reload
ldr d3, [sp, #10984] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #4272] ; 8-byte Folded Reload
fmul d2, d26, d2
fadd d1, d1, d2
str d7, [sp, #8384] ; 8-byte Folded Spill
fsub d1, d7, d1
ldr d2, [sp, #8448] ; 8-byte Folded Reload
ldr d3, [sp, #10648] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #12112] ; 8-byte Folded Reload
ldr d3, [sp, #4104] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d3, d1, d2
fmul d1, d4, d16
ldr d2, [sp, #4256] ; 8-byte Folded Reload
fmul d2, d23, d2
fadd d1, d1, d2
ldr d12, [sp, #4152] ; 8-byte Folded Reload
fmul d2, d5, d12
fadd d1, d2, d1
ldr d2, [sp, #4056] ; 8-byte Folded Reload
fmul d2, d30, d2
fadd d1, d2, d1
str d3, [sp, #8376] ; 8-byte Folded Spill
fsub d1, d3, d1
ldr d2, [sp, #8440] ; 8-byte Folded Reload
ldr d3, [sp, #11016] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d28, [sp, #11408] ; 8-byte Folded Reload
ldr d2, [sp, #4280] ; 8-byte Folded Reload
fmul d2, d28, d2
fadd d1, d2, d1
ldr d2, [sp, #8432] ; 8-byte Folded Reload
ldr d3, [sp, #10656] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #12080] ; 8-byte Folded Reload
ldr d3, [sp, #4088] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d19, d1, d2
ldr d8, [sp, #8752] ; 8-byte Folded Reload
ldr d3, [sp, #6288] ; 8-byte Folded Reload
ldr d2, [sp, #4840] ; 8-byte Folded Reload
ldr d5, [sp, #4096] ; 8-byte Folded Reload
ldr d7, [sp, #4816] ; 8-byte Folded Reload
ldr d16, [sp, #4080] ; 8-byte Folded Reload
ldr d21, [sp, #4064] ; 8-byte Folded Reload
cbz x8, LBB19_68
; %bb.67:
ldr d1, [sp, #6640] ; 8-byte Folded Reload
fmul d1, d1, d2
ldr d2, [sp, #4480] ; 8-byte Folded Reload
fmul d2, d8, d2
fadd d1, d1, d2
ldr d2, [sp, #6648] ; 8-byte Folded Reload
ldr d4, [sp, #4536] ; 8-byte Folded Reload
fmul d2, d2, d4
fsub d1, d1, d2
ldr d2, [sp, #8768] ; 8-byte Folded Reload
ldr d4, [sp, #4488] ; 8-byte Folded Reload
fmul d2, d2, d4
fsub d1, d1, d2
ldr d2, [sp, #6824] ; 8-byte Folded Reload
ldr d4, [sp, #4528] ; 8-byte Folded Reload
fmul d2, d2, d4
fsub d1, d1, d2
fmul d2, d31, d15
fsub d1, d1, d2
ldr d2, [sp, #6832] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d1, d2, d1
ldr d2, [sp, #8656] ; 8-byte Folded Reload
fmul d2, d2, d21
fsub d1, d1, d2
fadd d1, d1, d0
ldr d2, [sp, #8648] ; 8-byte Folded Reload
fmul d2, d13, d2
fsub d1, d1, d2
fmul d2, d22, d16
fadd d1, d2, d1
ldr d2, [sp, #6840] ; 8-byte Folded Reload
ldr d4, [sp, #5184] ; 8-byte Folded Reload
fmul d2, d4, d2
fadd d1, d2, d1
ldr d3, [sp, #12040] ; 8-byte Folded Reload
fmul d2, d3, d7
fsub d1, d1, d2
fadd d1, d1, d19
ldr d2, [sp, #6416] ; 8-byte Folded Reload
fmul d2, d13, d2
fsub d1, d1, d2
fmul d2, d22, d5
fadd d1, d2, d1
ldr d2, [sp, #6816] ; 8-byte Folded Reload
fmul d2, d4, d2
fadd d1, d2, d1
ldr d2, [sp, #5608] ; 8-byte Folded Reload
fmul d2, d3, d2
fsub d1, d1, d2
str d1, [x8, #120]
LBB19_68:
str d0, [sp, #9360] ; 8-byte Folded Spill
str d19, [sp, #4816] ; 8-byte Folded Spill
mov x9, #64990
movk x9, #28266, lsl #16
movk x9, #45172, lsl #32
movk x9, #16414, lsl #48
fmov d3, x9
fmul d1, d18, d3
ldr d2, [sp, #3976] ; 8-byte Folded Reload
fmul d2, d6, d2
fadd d4, d1, d2
mov x9, #5915
movk x9, #64709, lsl #16
movk x9, #30489, lsl #32
movk x9, #16392, lsl #48
fmov d5, x9
ldr d1, [sp, #8264] ; 8-byte Folded Reload
fmul d1, d1, d5
ldr d2, [sp, #10704] ; 8-byte Folded Reload
ldr d6, [sp, #3984] ; 8-byte Folded Reload
fmul d2, d2, d6
fadd d1, d1, d2
mov x9, #24565
movk x9, #58125, lsl #16
movk x9, #44270, lsl #32
movk x9, #16372, lsl #48
fmov d6, x9
ldr d2, [sp, #10992] ; 8-byte Folded Reload
fmul d2, d2, d6
fsub d1, d1, d2
ldr d2, [sp, #4000] ; 8-byte Folded Reload
fmul d2, d9, d2
fsub d7, d1, d2
mov x9, #50080
movk x9, #49599, lsl #16
movk x9, #32579, lsl #32
movk x9, #16368, lsl #48
fmov d16, x9
ldr d1, [sp, #9960] ; 8-byte Folded Reload
fmul d1, d1, d16
ldr d2, [sp, #6000] ; 8-byte Folded Reload
fmul d2, d17, d2
fsub d17, d2, d1
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d2, x9
ldr d1, [sp, #9968] ; 8-byte Folded Reload
fmul d1, d1, d2
ldr d18, [sp, #11936] ; 8-byte Folded Reload
ldr d21, [sp, #5112] ; 8-byte Folded Reload
fmul d18, d18, d21
fsub d1, d18, d1
ldr d18, [sp, #11752] ; 8-byte Folded Reload
ldr d21, [sp, #5992] ; 8-byte Folded Reload
fmul d18, d18, d21
fsub d18, d1, d18
ldr d1, [sp, #12184] ; 8-byte Folded Reload
ldr d21, [sp, #4040] ; 8-byte Folded Reload
fmul d1, d1, d21
str d1, [sp, #11624] ; 8-byte Folded Spill
fsub d18, d18, d1
ldr d1, [sp, #11672] ; 8-byte Folded Reload
ldr d21, [sp, #3904] ; 8-byte Folded Reload
fmul d21, d1, d21
fsub d18, d18, d21
ldr d1, [sp, #12176] ; 8-byte Folded Reload
ldr d21, [sp, #6016] ; 8-byte Folded Reload
fmul d1, d1, d21
str d1, [sp, #11560] ; 8-byte Folded Spill
fsub d1, d18, d1
str d1, [sp, #11272] ; 8-byte Folded Spill
fadd d17, d17, d1
ldr d1, [sp, #11024] ; 8-byte Folded Reload
fmul d18, d1, d2
fsub d17, d17, d18
ldr d1, [sp, #12120] ; 8-byte Folded Reload
ldr d18, [sp, #4032] ; 8-byte Folded Reload
fmul d18, d1, d18
fadd d17, d18, d17
ldr d1, [sp, #12128] ; 8-byte Folded Reload
ldr d18, [sp, #3896] ; 8-byte Folded Reload
fmul d18, d1, d18
fsub d17, d17, d18
ldur d1, [x29, #-168] ; 8-byte Folded Reload
ldr d18, [sp, #4024] ; 8-byte Folded Reload
fmul d18, d1, d18
fadd d17, d18, d17
ldr d1, [sp, #11208] ; 8-byte Folded Reload
ldr d18, [sp, #5984] ; 8-byte Folded Reload
fmul d18, d1, d18
fsub d17, d17, d18
ldr d1, [sp, #11200] ; 8-byte Folded Reload
ldr d18, [sp, #4048] ; 8-byte Folded Reload
fmul d1, d1, d18
str d1, [sp, #11432] ; 8-byte Folded Spill
fsub d17, d17, d1
mov x9, #65123
movk x9, #27942, lsl #16
movk x9, #23314, lsl #32
movk x9, #16371, lsl #48
fmov d18, x9
ldr d1, [sp, #11032] ; 8-byte Folded Reload
fmul d21, d1, d18
fsub d17, d17, d21
ldr d1, [sp, #12272] ; 8-byte Folded Reload
ldr d21, [sp, #4016] ; 8-byte Folded Reload
fmul d21, d1, d21
fadd d17, d21, d17
ldr d1, [sp, #12344] ; 8-byte Folded Reload
ldr d15, [sp, #6480] ; 8-byte Folded Reload
fmul d21, d1, d15
fsub d1, d17, d21
str d1, [sp, #6120] ; 8-byte Folded Spill
fadd d7, d7, d1
ldr d1, [sp, #12032] ; 8-byte Folded Reload
fmul d17, d1, d15
fsub d1, d7, d17
str d1, [sp, #6112] ; 8-byte Folded Spill
fadd d4, d4, d1
mov x9, #58251
movk x9, #46885, lsl #16
movk x9, #26312, lsl #32
movk x9, #16401, lsl #48
fmov d7, x9
ldr d1, [sp, #11040] ; 8-byte Folded Reload
fmul d17, d1, d7
fsub d4, d4, d17
ldr d1, [sp, #4008] ; 8-byte Folded Reload
fmul d17, d24, d1
fadd d4, d17, d4
ldr d1, [sp, #12024] ; 8-byte Folded Reload
fmul d17, d1, d15
fsub d17, d4, d17
ldr d1, [sp, #6488] ; 8-byte Folded Reload
fmul d3, d1, d3
ldr d1, [sp, #3912] ; 8-byte Folded Reload
fmul d4, d23, d1
fadd d3, d3, d4
ldr d1, [sp, #8224] ; 8-byte Folded Reload
fmul d4, d1, d5
ldr d1, [sp, #10688] ; 8-byte Folded Reload
ldr d5, [sp, #3920] ; 8-byte Folded Reload
fmul d5, d1, d5
fadd d4, d4, d5
ldr d1, [sp, #10984] ; 8-byte Folded Reload
fmul d5, d1, d6
fsub d4, d4, d5
ldr d1, [sp, #3928] ; 8-byte Folded Reload
fmul d5, d26, d1
fsub d4, d4, d5
ldr d1, [sp, #9864] ; 8-byte Folded Reload
fmul d5, d1, d16
ldr d1, [sp, #11048] ; 8-byte Folded Reload
ldr d6, [sp, #5968] ; 8-byte Folded Reload
fmul d6, d1, d6
fsub d5, d6, d5
ldr d1, [sp, #9872] ; 8-byte Folded Reload
fmul d6, d1, d2
ldr d1, [sp, #11920] ; 8-byte Folded Reload
ldr d16, [sp, #5008] ; 8-byte Folded Reload
fmul d16, d1, d16
fsub d6, d16, d6
ldr d1, [sp, #11912] ; 8-byte Folded Reload
ldr d16, [sp, #5976] ; 8-byte Folded Reload
fmul d16, d1, d16
fsub d6, d6, d16
ldr d1, [sp, #12288] ; 8-byte Folded Reload
ldr d16, [sp, #3992] ; 8-byte Folded Reload
fmul d1, d1, d16
str d1, [sp, #12048] ; 8-byte Folded Spill
fsub d6, d6, d1
ldr d1, [sp, #11744] ; 8-byte Folded Reload
ldr d16, [sp, #3880] ; 8-byte Folded Reload
fmul d16, d1, d16
fsub d6, d6, d16
ldr d1, [sp, #12168] ; 8-byte Folded Reload
ldr d16, [sp, #5952] ; 8-byte Folded Reload
fmul d1, d1, d16
str d1, [sp, #11728] ; 8-byte Folded Spill
fsub d1, d6, d1
str d1, [sp, #9840] ; 8-byte Folded Spill
fadd d5, d5, d1
ldr d1, [sp, #11000] ; 8-byte Folded Reload
fmul d2, d1, d2
fsub d2, d5, d2
ldr d1, [sp, #12256] ; 8-byte Folded Reload
ldr d5, [sp, #3968] ; 8-byte Folded Reload
fmul d5, d1, d5
fadd d2, d5, d2
ldr d1, [sp, #12264] ; 8-byte Folded Reload
ldr d5, [sp, #3872] ; 8-byte Folded Reload
fmul d5, d1, d5
fsub d2, d2, d5
ldur d1, [x29, #-160] ; 8-byte Folded Reload
ldr d5, [sp, #3960] ; 8-byte Folded Reload
fmul d5, d1, d5
fadd d2, d5, d2
ldr d1, [sp, #11192] ; 8-byte Folded Reload
ldr d5, [sp, #5944] ; 8-byte Folded Reload
fmul d5, d1, d5
fsub d2, d2, d5
ldr d1, [sp, #11184] ; 8-byte Folded Reload
ldr d5, [sp, #3952] ; 8-byte Folded Reload
fmul d1, d1, d5
str d1, [sp, #11472] ; 8-byte Folded Spill
fsub d2, d2, d1
ldr d1, [sp, #11008] ; 8-byte Folded Reload
fmul d5, d1, d18
fsub d2, d2, d5
ldr d1, [sp, #12328] ; 8-byte Folded Reload
ldr d5, [sp, #3944] ; 8-byte Folded Reload
fmul d5, d1, d5
fadd d2, d5, d2
ldur d1, [x29, #-224] ; 8-byte Folded Reload
ldr d6, [sp, #4760] ; 8-byte Folded Reload
fmul d5, d1, d6
fsub d1, d2, d5
str d1, [sp, #6096] ; 8-byte Folded Spill
fadd d2, d4, d1
ldr d1, [sp, #12112] ; 8-byte Folded Reload
fmul d4, d1, d6
fsub d1, d2, d4
str d1, [sp, #6088] ; 8-byte Folded Spill
fadd d2, d3, d1
ldr d1, [sp, #11016] ; 8-byte Folded Reload
fmul d3, d1, d7
fsub d2, d2, d3
ldr d1, [sp, #3936] ; 8-byte Folded Reload
fmul d3, d28, d1
fadd d2, d3, d2
ldr d1, [sp, #12080] ; 8-byte Folded Reload
fmul d3, d1, d6
fsub d7, d2, d3
cbz x8, LBB19_70
; %bb.69:
ldr d1, [sp, #5232] ; 8-byte Folded Reload
ldur d2, [x29, #-256] ; 8-byte Folded Reload
fmul d2, d1, d2
mov x9, #43139
movk x9, #8835, lsl #16
movk x9, #28093, lsl #32
movk x9, #49187, lsl #48
fmov d3, x9
fmul d2, d2, d3
ldr d3, [sp, #4464] ; 8-byte Folded Reload
fmul d3, d1, d3
fadd d2, d3, d2
mov x9, #47887
movk x9, #56309, lsl #16
movk x9, #15746, lsl #32
movk x9, #16444, lsl #48
fmov d3, x9
ldr d1, [sp, #4840] ; 8-byte Folded Reload
fmul d4, d1, d3
fsub d2, d2, d4
ldr d5, [sp, #4472] ; 8-byte Folded Reload
fmul d4, d8, d5
fadd d2, d4, d2
ldr d1, [sp, #4528] ; 8-byte Folded Reload
fmul d3, d1, d3
fsub d2, d2, d3
fmul d3, d31, d5
fadd d2, d3, d2
fadd d2, d2, d17
mov x9, #28852
movk x9, #37576, lsl #16
movk x9, #2974, lsl #32
movk x9, #49192, lsl #48
fmov d3, x9
fmul d3, d13, d3
fadd d2, d3, d2
ldr d1, [sp, #728] ; 8-byte Folded Reload
fmul d4, d22, d1
fadd d2, d4, d2
ldr d1, [sp, #12040] ; 8-byte Folded Reload
fmul d4, d1, d15
fsub d2, d2, d4
fadd d2, d2, d7
fadd d2, d3, d2
ldr d3, [sp, #736] ; 8-byte Folded Reload
fmul d3, d22, d3
fadd d2, d3, d2
fmul d3, d1, d6
fsub d2, d2, d3
str d2, [x8, #128]
LBB19_70:
str d7, [sp, #5952] ; 8-byte Folded Spill
str d17, [sp, #6488] ; 8-byte Folded Spill
mov x9, #64990
movk x9, #28266, lsl #16
movk x9, #45172, lsl #32
movk x9, #16414, lsl #48
fmov d3, x9
fmul d2, d11, d3
ldr d1, [sp, #3440] ; 8-byte Folded Reload
fmul d4, d29, d1
fsub d5, d4, d2
mov x9, #5915
movk x9, #64709, lsl #16
movk x9, #30489, lsl #32
movk x9, #16392, lsl #48
fmov d18, x9
ldr d1, [sp, #8392] ; 8-byte Folded Reload
fmul d2, d1, d18
ldr d1, [sp, #10696] ; 8-byte Folded Reload
ldr d4, [sp, #3488] ; 8-byte Folded Reload
fmul d4, d1, d4
fsub d2, d4, d2
ldr d15, [sp, #6472] ; 8-byte Folded Reload
fmul d6, d9, d15
fsub d21, d2, d6
mov x9, #50080
movk x9, #49599, lsl #16
movk x9, #32579, lsl #32
movk x9, #16368, lsl #48
fmov d26, x9
ldr d1, [sp, #9992] ; 8-byte Folded Reload
fmul d2, d1, d26
ldr d1, [sp, #6000] ; 8-byte Folded Reload
fmul d4, d27, d1
fsub d2, d4, d2
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d17, x9
ldr d1, [sp, #10000] ; 8-byte Folded Reload
fmul d4, d1, d17
ldr d1, [sp, #11944] ; 8-byte Folded Reload
ldr d7, [sp, #4936] ; 8-byte Folded Reload
fmul d7, d1, d7
fsub d4, d4, d7
ldr d1, [sp, #12184] ; 8-byte Folded Reload
ldr d7, [sp, #3840] ; 8-byte Folded Reload
fmul d16, d1, d7
fadd d7, d4, d16
ldr d1, [sp, #12176] ; 8-byte Folded Reload
ldr d4, [sp, #5904] ; 8-byte Folded Reload
fmul d4, d1, d4
fadd d1, d4, d7
str d1, [sp, #11240] ; 8-byte Folded Spill
fadd d2, d2, d1
ldr d1, [sp, #12120] ; 8-byte Folded Reload
ldr d7, [sp, #3792] ; 8-byte Folded Reload
fmul d7, d1, d7
fadd d2, d7, d2
ldr d1, [sp, #10664] ; 8-byte Folded Reload
fmul d22, d1, d17
fadd d2, d22, d2
ldr d1, [sp, #12128] ; 8-byte Folded Reload
ldr d22, [sp, #3592] ; 8-byte Folded Reload
fmul d22, d1, d22
fsub d2, d2, d22
ldur d1, [x29, #-176] ; 8-byte Folded Reload
ldr d22, [sp, #3584] ; 8-byte Folded Reload
fmul d22, d1, d22
fsub d22, d2, d22
ldr d1, [sp, #11200] ; 8-byte Folded Reload
ldr d2, [sp, #3832] ; 8-byte Folded Reload
fmul d2, d1, d2
fadd d24, d22, d2
ldr d1, [sp, #12272] ; 8-byte Folded Reload
fmul d22, d1, d15
fadd d24, d24, d22
mov x9, #65123
movk x9, #27942, lsl #16
movk x9, #23314, lsl #32
movk x9, #16371, lsl #48
fmov d28, x9
ldr d1, [sp, #10672] ; 8-byte Folded Reload
fmul d27, d1, d28
fadd d24, d27, d24
ldr d1, [sp, #12344] ; 8-byte Folded Reload
ldr d23, [sp, #3576] ; 8-byte Folded Reload
fmul d27, d1, d23
fsub d1, d24, d27
str d1, [sp, #6064] ; 8-byte Folded Spill
fadd d21, d21, d1
mov x9, #24565
movk x9, #58125, lsl #16
movk x9, #44270, lsl #32
movk x9, #16372, lsl #48
fmov d11, x9
ldr d1, [sp, #8416] ; 8-byte Folded Reload
fmul d24, d1, d11
fadd d21, d24, d21
ldr d1, [sp, #12032] ; 8-byte Folded Reload
ldr d23, [sp, #3528] ; 8-byte Folded Reload
fmul d24, d1, d23
fsub d1, d21, d24
str d1, [sp, #6016] ; 8-byte Folded Spill
fadd d5, d5, d1
ldr d1, [sp, #11480] ; 8-byte Folded Reload
fmul d24, d1, d15
fadd d5, d24, d5
mov x9, #58251
movk x9, #46885, lsl #16
movk x9, #26312, lsl #32
movk x9, #16401, lsl #48
fmov d21, x9
ldr d29, [sp, #6136] ; 8-byte Folded Reload
fmul d27, d29, d21
fadd d5, d27, d5
ldr d1, [sp, #12024] ; 8-byte Folded Reload
ldr d23, [sp, #6008] ; 8-byte Folded Reload
fmul d27, d1, d23
fsub d27, d5, d27
fmul d3, d12, d3
ldr d1, [sp, #3328] ; 8-byte Folded Reload
fmul d5, d30, d1
fsub d12, d5, d3
fmul d3, d10, d18
ldr d1, [sp, #10680] ; 8-byte Folded Reload
ldr d5, [sp, #3352] ; 8-byte Folded Reload
fmul d5, d1, d5
fsub d18, d5, d3
ldr d1, [sp, #9976] ; 8-byte Folded Reload
fmul d3, d1, d26
ldr d1, [sp, #11288] ; 8-byte Folded Reload
ldr d5, [sp, #5968] ; 8-byte Folded Reload
fmul d5, d1, d5
fsub d5, d5, d3
ldr d1, [sp, #9984] ; 8-byte Folded Reload
fmul d3, d1, d17
ldr d1, [sp, #11928] ; 8-byte Folded Reload
ldr d23, [sp, #4880] ; 8-byte Folded Reload
fmul d26, d1, d23
fsub d26, d3, d26
ldr d1, [sp, #12288] ; 8-byte Folded Reload
ldr d3, [sp, #3744] ; 8-byte Folded Reload
fmul d0, d1, d3
str d0, [sp, #11320] ; 8-byte Folded Spill
fadd d26, d26, d0
ldr d1, [sp, #12168] ; 8-byte Folded Reload
ldr d23, [sp, #5896] ; 8-byte Folded Reload
fmul d8, d1, d23
fadd d1, d8, d26
str d1, [sp, #9384] ; 8-byte Folded Spill
fadd d26, d5, d1
ldr d1, [sp, #12256] ; 8-byte Folded Reload
ldr d5, [sp, #3712] ; 8-byte Folded Reload
fmul d0, d1, d5
str d0, [sp, #11368] ; 8-byte Folded Spill
fadd d26, d0, d26
ldr d1, [sp, #10632] ; 8-byte Folded Reload
fmul d17, d1, d17
fadd d17, d17, d26
ldr d1, [sp, #12264] ; 8-byte Folded Reload
ldr d5, [sp, #3432] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d17, d17, d26
ldur d1, [x29, #-192] ; 8-byte Folded Reload
ldr d5, [sp, #3424] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d17, d17, d26
ldr d1, [sp, #11184] ; 8-byte Folded Reload
ldr d5, [sp, #3696] ; 8-byte Folded Reload
fmul d0, d1, d5
str d0, [sp, #11360] ; 8-byte Folded Spill
fadd d26, d17, d0
ldr d1, [sp, #12328] ; 8-byte Folded Reload
ldr d5, [sp, #6464] ; 8-byte Folded Reload
fmul d0, d1, d5
str d0, [sp, #11392] ; 8-byte Folded Spill
fadd d26, d26, d0
ldr d1, [sp, #10640] ; 8-byte Folded Reload
fmul d28, d1, d28
fadd d26, d28, d26
ldur d1, [x29, #-224] ; 8-byte Folded Reload
ldr d17, [sp, #3400] ; 8-byte Folded Reload
fmul d28, d1, d17
fsub d17, d26, d28
ldr d1, [sp, #11304] ; 8-byte Folded Reload
fmul d1, d1, d5
str d1, [sp, #12072] ; 8-byte Folded Spill
fsub d18, d18, d1
str d17, [sp, #6008] ; 8-byte Folded Spill
fadd d18, d18, d17
ldr d1, [sp, #10648] ; 8-byte Folded Reload
fmul d26, d1, d11
fadd d18, d26, d18
ldr d1, [sp, #12112] ; 8-byte Folded Reload
ldr d17, [sp, #3376] ; 8-byte Folded Reload
fmul d26, d1, d17
fsub d1, d18, d26
str d1, [sp, #6000] ; 8-byte Folded Spill
fadd d18, d12, d1
ldr d31, [sp, #11416] ; 8-byte Folded Reload
ldr d28, [sp, #11408] ; 8-byte Folded Reload
fmul d1, d28, d5
str d1, [sp, #5984] ; 8-byte Folded Spill
fadd d18, d1, d18
ldr d1, [sp, #10656] ; 8-byte Folded Reload
fmul d21, d1, d21
fadd d18, d21, d18
fmul d30, d31, d15
ldr d1, [sp, #12080] ; 8-byte Folded Reload
ldr d17, [sp, #5960] ; 8-byte Folded Reload
fmul d21, d1, d17
fsub d19, d18, d21
fmul d17, d31, d5
ldr d23, [sp, #5184] ; 8-byte Folded Reload
ldr d0, [sp, #11400] ; 8-byte Folded Reload
cbz x8, LBB19_72
; %bb.71:
ldr d1, [sp, #5240] ; 8-byte Folded Reload
ldur d18, [x29, #-256] ; 8-byte Folded Reload
fmul d18, d1, d18
mov x9, #43139
movk x9, #8835, lsl #16
movk x9, #28093, lsl #32
movk x9, #49187, lsl #48
fmov d21, x9
fmul d18, d18, d21
ldr d5, [sp, #4464] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
mov x9, #47887
movk x9, #56309, lsl #16
movk x9, #15746, lsl #32
movk x9, #16444, lsl #48
fmov d21, x9
ldr d1, [sp, #4536] ; 8-byte Folded Reload
fmul d26, d1, d21
fsub d18, d18, d26
ldr d1, [sp, #8768] ; 8-byte Folded Reload
ldr d5, [sp, #4472] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d18, d26, d18
ldr d1, [sp, #6288] ; 8-byte Folded Reload
fmul d21, d1, d21
fadd d18, d21, d18
ldr d1, [sp, #8656] ; 8-byte Folded Reload
ldr d5, [sp, #480] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
fadd d18, d18, d27
fadd d18, d30, d18
mov x9, #28852
movk x9, #37576, lsl #16
movk x9, #2974, lsl #32
movk x9, #16424, lsl #48
fmov d21, x9
fmul d21, d23, d21
fadd d18, d21, d18
ldr d1, [sp, #12040] ; 8-byte Folded Reload
ldr d5, [sp, #488] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d18, d18, d26
fadd d18, d18, d19
fadd d18, d17, d18
fadd d18, d21, d18
fsub d18, d18, d26
str d18, [x8, #136]
LBB19_72:
str d30, [sp, #4840] ; 8-byte Folded Spill
str d19, [sp, #5896] ; 8-byte Folded Spill
str d17, [sp, #5976] ; 8-byte Folded Spill
stur d27, [x29, #-256] ; 8-byte Folded Spill
ldr d1, [sp, #9320] ; 8-byte Folded Reload
ldr d5, [sp, #9864] ; 8-byte Folded Reload
fmul d18, d1, d5
ldr d1, [sp, #3040] ; 8-byte Folded Reload
ldr d5, [sp, #11048] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d18, d21
ldr d1, [sp, #9976] ; 8-byte Folded Reload
ldr d5, [sp, #9328] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #11288] ; 8-byte Folded Reload
ldr d5, [sp, #2632] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #8736] ; 8-byte Folded Reload
ldr d5, [sp, #9872] ; 8-byte Folded Reload
fmul d21, d1, d5
ldr d1, [sp, #11920] ; 8-byte Folded Reload
ldr d5, [sp, #4800] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
ldr d1, [sp, #9984] ; 8-byte Folded Reload
ldr d5, [sp, #6496] ; 8-byte Folded Reload
fmul d26, d5, d1
fadd d21, d26, d21
ldr d1, [sp, #11928] ; 8-byte Folded Reload
ldr d5, [sp, #4736] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #11912] ; 8-byte Folded Reload
ldr d5, [sp, #5856] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #12288] ; 8-byte Folded Reload
ldr d5, [sp, #3320] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
ldr d1, [sp, #11744] ; 8-byte Folded Reload
ldr d5, [sp, #2944] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #12168] ; 8-byte Folded Reload
ldr d5, [sp, #5864] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
fadd d21, d14, d21
fadd d1, d25, d21
str d1, [sp, #9376] ; 8-byte Folded Spill
fadd d18, d18, d1
ldr d1, [sp, #11000] ; 8-byte Folded Reload
ldr d5, [sp, #8352] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #12256] ; 8-byte Folded Reload
ldr d5, [sp, #3120] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #10632] ; 8-byte Folded Reload
ldr d5, [sp, #8344] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d1, [sp, #12264] ; 8-byte Folded Reload
ldr d5, [sp, #2776] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldur d1, [x29, #-160] ; 8-byte Folded Reload
ldr d5, [sp, #3096] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldur d1, [x29, #-192] ; 8-byte Folded Reload
ldr d5, [sp, #2760] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #11192] ; 8-byte Folded Reload
ldr d5, [sp, #5848] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #11184] ; 8-byte Folded Reload
ldr d5, [sp, #3312] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #11008] ; 8-byte Folded Reload
ldr d5, [sp, #8368] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #12328] ; 8-byte Folded Reload
ldr d5, [sp, #3088] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #10640] ; 8-byte Folded Reload
ldr d5, [sp, #8360] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldur d1, [x29, #-224] ; 8-byte Folded Reload
ldr d5, [sp, #2648] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d17, d18, d21
ldr d1, [sp, #8312] ; 8-byte Folded Reload
ldr d5, [sp, #8224] ; 8-byte Folded Reload
fmul d18, d1, d5
ldr d1, [sp, #10688] ; 8-byte Folded Reload
ldr d5, [sp, #3008] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d18, d21
ldr d1, [sp, #8304] ; 8-byte Folded Reload
fmul d21, d1, d10
fadd d18, d21, d18
ldr d1, [sp, #10680] ; 8-byte Folded Reload
ldr d5, [sp, #2616] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #10984] ; 8-byte Folded Reload
ldr d5, [sp, #9312] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d12, [sp, #11304] ; 8-byte Folded Reload
ldr d1, [sp, #3304] ; 8-byte Folded Reload
fmul d21, d12, d1
fadd d18, d18, d21
str d17, [sp, #8224] ; 8-byte Folded Spill
fsub d18, d17, d18
ldr d1, [sp, #10648] ; 8-byte Folded Reload
ldr d5, [sp, #9304] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d1, [sp, #12112] ; 8-byte Folded Reload
ldr d5, [sp, #2640] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d17, d18, d21
ldr d1, [sp, #11016] ; 8-byte Folded Reload
ldr d5, [sp, #8296] ; 8-byte Folded Reload
fmul d18, d5, d1
str d17, [sp, #6288] ; 8-byte Folded Spill
fsub d18, d17, d18
ldr d1, [sp, #3016] ; 8-byte Folded Reload
fmul d21, d28, d1
fadd d18, d21, d18
ldr d1, [sp, #10656] ; 8-byte Folded Reload
ldr d5, [sp, #8288] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d1, [sp, #12080] ; 8-byte Folded Reload
ldr d5, [sp, #2624] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d19, d18, d21
ldr d10, [sp, #9648] ; 8-byte Folded Reload
ldr d17, [sp, #6048] ; 8-byte Folded Reload
ldr d27, [sp, #6456] ; 8-byte Folded Reload
ldr d15, [sp, #6448] ; 8-byte Folded Reload
cbz x8, LBB19_74
; %bb.73:
ldr d1, [sp, #5592] ; 8-byte Folded Reload
fmul d18, d13, d1
fsub d18, d19, d18
ldp d1, d5, [sp, #224] ; 16-byte Folded Reload
fmul d21, d31, d1
fadd d18, d21, d18
ldr d1, [sp, #5600] ; 8-byte Folded Reload
fmul d21, d23, d1
fadd d18, d21, d18
ldr d1, [sp, #12040] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
str d18, [x8, #144]
LBB19_74:
str d19, [sp, #5864] ; 8-byte Folded Spill
ldr d1, [sp, #9960] ; 8-byte Folded Reload
ldr d5, [sp, #9232] ; 8-byte Folded Reload
fmul d18, d5, d1
ldr d1, [sp, #2432] ; 8-byte Folded Reload
fmul d21, d10, d1
fadd d18, d18, d21
ldr d1, [sp, #9992] ; 8-byte Folded Reload
ldr d5, [sp, #9240] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d11, [sp, #9624] ; 8-byte Folded Reload
ldr d1, [sp, #2368] ; 8-byte Folded Reload
fmul d21, d11, d1
fsub d18, d18, d21
ldr d1, [sp, #9968] ; 8-byte Folded Reload
ldr d5, [sp, #8728] ; 8-byte Folded Reload
fmul d21, d5, d1
ldr d1, [sp, #11936] ; 8-byte Folded Reload
ldr d5, [sp, #4768] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
ldr d1, [sp, #10000] ; 8-byte Folded Reload
ldr d5, [sp, #8720] ; 8-byte Folded Reload
fmul d26, d5, d1
fadd d21, d26, d21
ldr d1, [sp, #11944] ; 8-byte Folded Reload
ldr d5, [sp, #4728] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #11752] ; 8-byte Folded Reload
ldr d5, [sp, #5832] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #12184] ; 8-byte Folded Reload
ldr d5, [sp, #2608] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
ldr d1, [sp, #11672] ; 8-byte Folded Reload
ldr d5, [sp, #2408] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #12176] ; 8-byte Folded Reload
ldr d5, [sp, #5840] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
fadd d21, d0, d21
fadd d1, d20, d21
str d1, [sp, #11160] ; 8-byte Folded Spill
fadd d18, d18, d1
ldr d1, [sp, #11024] ; 8-byte Folded Reload
ldr d5, [sp, #8240] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #12120] ; 8-byte Folded Reload
ldr d5, [sp, #2536] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #10664] ; 8-byte Folded Reload
ldr d5, [sp, #8232] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d1, [sp, #12128] ; 8-byte Folded Reload
ldr d5, [sp, #2400] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldur d1, [x29, #-168] ; 8-byte Folded Reload
ldr d5, [sp, #2528] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldur d1, [x29, #-176] ; 8-byte Folded Reload
ldr d5, [sp, #2392] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #11208] ; 8-byte Folded Reload
ldr d5, [sp, #5824] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #11200] ; 8-byte Folded Reload
ldr d5, [sp, #2600] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #11032] ; 8-byte Folded Reload
ldr d5, [sp, #8256] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #12272] ; 8-byte Folded Reload
ldr d5, [sp, #2504] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #10672] ; 8-byte Folded Reload
ldr d5, [sp, #8248] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d1, [sp, #12344] ; 8-byte Folded Reload
ldr d5, [sp, #2384] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d19, d18, d21
ldr d1, [sp, #8264] ; 8-byte Folded Reload
ldr d5, [sp, #8216] ; 8-byte Folded Reload
fmul d18, d5, d1
ldr d1, [sp, #10704] ; 8-byte Folded Reload
ldr d5, [sp, #2416] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d18, d21
ldr d1, [sp, #8392] ; 8-byte Folded Reload
ldr d5, [sp, #8208] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d1, [sp, #10696] ; 8-byte Folded Reload
ldr d5, [sp, #2352] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #10992] ; 8-byte Folded Reload
ldr d5, [sp, #9224] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d18, d21
ldr d1, [sp, #2592] ; 8-byte Folded Reload
fmul d21, d9, d1
fadd d18, d18, d21
str d19, [sp, #8392] ; 8-byte Folded Spill
fsub d18, d19, d18
ldr d30, [sp, #8416] ; 8-byte Folded Reload
ldr d1, [sp, #9216] ; 8-byte Folded Reload
fmul d21, d1, d30
fadd d18, d21, d18
ldr d1, [sp, #12032] ; 8-byte Folded Reload
ldr d5, [sp, #2376] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d19, d18, d21
ldr d1, [sp, #11040] ; 8-byte Folded Reload
ldr d5, [sp, #8200] ; 8-byte Folded Reload
fmul d18, d5, d1
str d19, [sp, #8264] ; 8-byte Folded Spill
fsub d18, d19, d18
ldr d1, [sp, #11480] ; 8-byte Folded Reload
ldr d5, [sp, #2424] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #8192] ; 8-byte Folded Reload
fmul d21, d1, d29
fadd d18, d21, d18
ldr d1, [sp, #12024] ; 8-byte Folded Reload
ldr d5, [sp, #2360] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d19, d18, d21
cbz x8, LBB19_76
; %bb.75:
ldr d1, [sp, #5472] ; 8-byte Folded Reload
fmul d18, d13, d1
fsub d18, d19, d18
ldp d1, d5, [sp, #64] ; 16-byte Folded Reload
fmul d21, d31, d1
fadd d18, d21, d18
ldr d1, [sp, #5480] ; 8-byte Folded Reload
fmul d21, d23, d1
fadd d18, d21, d18
ldr d1, [sp, #12040] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
str d18, [x8, #152]
LBB19_76:
ldr d1, [sp, #9864] ; 8-byte Folded Reload
ldr d5, [sp, #9184] ; 8-byte Folded Reload
fmul d18, d5, d1
ldr d1, [sp, #2296] ; 8-byte Folded Reload
ldr d5, [sp, #11048] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d18, d21
ldr d1, [sp, #9976] ; 8-byte Folded Reload
ldr d5, [sp, #9176] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #11288] ; 8-byte Folded Reload
ldr d5, [sp, #2248] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #9872] ; 8-byte Folded Reload
ldr d5, [sp, #8712] ; 8-byte Folded Reload
fmul d21, d5, d1
ldr d1, [sp, #11920] ; 8-byte Folded Reload
ldr d5, [sp, #4688] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
ldr d1, [sp, #9984] ; 8-byte Folded Reload
ldr d5, [sp, #8704] ; 8-byte Folded Reload
fmul d26, d5, d1
fadd d21, d26, d21
ldr d1, [sp, #11928] ; 8-byte Folded Reload
ldr d5, [sp, #4632] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #11912] ; 8-byte Folded Reload
ldr d5, [sp, #5808] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #12288] ; 8-byte Folded Reload
ldr d5, [sp, #2344] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
ldr d1, [sp, #11744] ; 8-byte Folded Reload
ldr d5, [sp, #2280] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #12168] ; 8-byte Folded Reload
ldr d5, [sp, #5816] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
fadd d21, d14, d21
fadd d1, d25, d21
str d1, [sp, #9368] ; 8-byte Folded Spill
fadd d18, d18, d1
ldr d1, [sp, #11000] ; 8-byte Folded Reload
ldr d5, [sp, #8176] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #12256] ; 8-byte Folded Reload
ldr d5, [sp, #2320] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #10632] ; 8-byte Folded Reload
ldr d5, [sp, #8160] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d1, [sp, #12264] ; 8-byte Folded Reload
ldr d5, [sp, #2272] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldur d1, [x29, #-160] ; 8-byte Folded Reload
ldr d5, [sp, #2312] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldur d1, [x29, #-192] ; 8-byte Folded Reload
ldr d5, [sp, #2264] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #11192] ; 8-byte Folded Reload
ldr d5, [sp, #5800] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #11184] ; 8-byte Folded Reload
ldr d5, [sp, #2336] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #11008] ; 8-byte Folded Reload
ldr d5, [sp, #9200] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #12328] ; 8-byte Folded Reload
ldr d5, [sp, #2304] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #10640] ; 8-byte Folded Reload
ldr d5, [sp, #8168] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldur d1, [x29, #-224] ; 8-byte Folded Reload
ldr d5, [sp, #2256] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d26, d18, d21
ldr d1, [sp, #10984] ; 8-byte Folded Reload
ldr d5, [sp, #9208] ; 8-byte Folded Reload
fmul d18, d5, d1
ldr d1, [sp, #2328] ; 8-byte Folded Reload
fmul d21, d12, d1
fadd d18, d18, d21
str d26, [sp, #5992] ; 8-byte Folded Spill
fsub d18, d26, d18
ldr d1, [sp, #10648] ; 8-byte Folded Reload
ldr d5, [sp, #9192] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d1, [sp, #12112] ; 8-byte Folded Reload
ldr d5, [sp, #2240] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d21, d18, d21
ldr d1, [sp, #11016] ; 8-byte Folded Reload
ldr d5, [sp, #8696] ; 8-byte Folded Reload
fmul d18, d5, d1
str d21, [sp, #5968] ; 8-byte Folded Spill
fsub d18, d21, d18
ldr d1, [sp, #2288] ; 8-byte Folded Reload
fmul d21, d28, d1
fadd d18, d21, d18
ldr d1, [sp, #10656] ; 8-byte Folded Reload
ldr d26, [sp, #8688] ; 8-byte Folded Reload
fmul d21, d26, d1
fadd d18, d21, d18
ldr d1, [sp, #12080] ; 8-byte Folded Reload
ldr d21, [sp, #2232] ; 8-byte Folded Reload
fmul d21, d1, d21
fsub d28, d18, d21
cbz x8, LBB19_78
; %bb.77:
fmul d18, d13, d5
fsub d18, d28, d18
ldp d1, d5, [sp] ; 16-byte Folded Reload
fmul d21, d31, d1
fadd d18, d21, d18
fmul d21, d23, d26
fadd d18, d21, d18
ldr d1, [sp, #12040] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
str d18, [x8, #160]
LBB19_78:
ldr d1, [sp, #9960] ; 8-byte Folded Reload
ldr d5, [sp, #9096] ; 8-byte Folded Reload
fmul d18, d5, d1
ldr d1, [sp, #2176] ; 8-byte Folded Reload
fmul d21, d10, d1
fadd d18, d18, d21
ldr d1, [sp, #9992] ; 8-byte Folded Reload
ldr d5, [sp, #9088] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #2128] ; 8-byte Folded Reload
fmul d21, d11, d1
fsub d18, d18, d21
ldr d1, [sp, #9968] ; 8-byte Folded Reload
ldr d5, [sp, #8680] ; 8-byte Folded Reload
fmul d21, d5, d1
ldr d1, [sp, #11936] ; 8-byte Folded Reload
ldr d5, [sp, #4664] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
ldr d1, [sp, #10000] ; 8-byte Folded Reload
ldr d5, [sp, #8672] ; 8-byte Folded Reload
fmul d26, d5, d1
fadd d21, d26, d21
ldr d1, [sp, #11944] ; 8-byte Folded Reload
ldr d5, [sp, #4624] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #11752] ; 8-byte Folded Reload
ldr d5, [sp, #5784] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #12184] ; 8-byte Folded Reload
ldr d5, [sp, #2224] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
ldr d1, [sp, #11672] ; 8-byte Folded Reload
ldr d5, [sp, #2160] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #12176] ; 8-byte Folded Reload
ldr d5, [sp, #5792] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
fadd d21, d0, d21
fadd d1, d20, d21
str d1, [sp, #9856] ; 8-byte Folded Spill
fadd d18, d18, d1
ldr d1, [sp, #11024] ; 8-byte Folded Reload
ldr d5, [sp, #7992] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #12120] ; 8-byte Folded Reload
ldr d5, [sp, #2200] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #10664] ; 8-byte Folded Reload
ldr d5, [sp, #7976] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d1, [sp, #12128] ; 8-byte Folded Reload
ldr d5, [sp, #2152] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldur d1, [x29, #-168] ; 8-byte Folded Reload
ldr d5, [sp, #2192] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldur d1, [x29, #-176] ; 8-byte Folded Reload
ldr d5, [sp, #2144] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #11208] ; 8-byte Folded Reload
ldr d5, [sp, #5776] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #11200] ; 8-byte Folded Reload
ldr d5, [sp, #2216] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #11032] ; 8-byte Folded Reload
ldr d5, [sp, #9112] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #12272] ; 8-byte Folded Reload
ldr d5, [sp, #2184] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #10672] ; 8-byte Folded Reload
ldr d5, [sp, #7984] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d1, [sp, #12344] ; 8-byte Folded Reload
ldr d5, [sp, #2136] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d26, d18, d21
ldr d1, [sp, #10992] ; 8-byte Folded Reload
ldr d5, [sp, #9120] ; 8-byte Folded Reload
fmul d18, d5, d1
ldr d1, [sp, #2208] ; 8-byte Folded Reload
fmul d21, d9, d1
fadd d18, d18, d21
str d26, [sp, #5960] ; 8-byte Folded Spill
fsub d18, d26, d18
ldr d1, [sp, #9104] ; 8-byte Folded Reload
fmul d21, d1, d30
fadd d18, d21, d18
ldr d1, [sp, #12032] ; 8-byte Folded Reload
ldr d5, [sp, #2120] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d5, d18, d21
ldr d1, [sp, #11040] ; 8-byte Folded Reload
fmul d18, d27, d1
str d5, [sp, #5944] ; 8-byte Folded Spill
fsub d18, d5, d18
ldr d1, [sp, #11480] ; 8-byte Folded Reload
ldr d5, [sp, #2168] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
fmul d21, d15, d29
fadd d18, d21, d18
ldr d1, [sp, #12024] ; 8-byte Folded Reload
ldr d5, [sp, #2112] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d26, d18, d21
cbz x8, LBB19_80
; %bb.79:
fmul d18, d13, d27
fsub d18, d26, d18
ldr d1, [sp, #1960] ; 8-byte Folded Reload
fmul d21, d31, d1
fadd d18, d21, d18
fmul d21, d23, d15
fadd d18, d21, d18
ldr d1, [sp, #12040] ; 8-byte Folded Reload
ldr d5, [sp, #1968] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
str d18, [x8, #168]
LBB19_80:
str d26, [sp, #5904] ; 8-byte Folded Spill
ldr d1, [sp, #9864] ; 8-byte Folded Reload
ldr d5, [sp, #10136] ; 8-byte Folded Reload
fmul d18, d5, d1
ldr d1, [sp, #7840] ; 8-byte Folded Reload
ldr d5, [sp, #11048] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d18, d21
ldr d1, [sp, #9976] ; 8-byte Folded Reload
ldr d5, [sp, #10128] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #11288] ; 8-byte Folded Reload
ldr d5, [sp, #7224] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #9872] ; 8-byte Folded Reload
ldr d5, [sp, #10120] ; 8-byte Folded Reload
fmul d21, d5, d1
ldr d1, [sp, #11920] ; 8-byte Folded Reload
ldr d5, [sp, #7880] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
ldr d1, [sp, #9984] ; 8-byte Folded Reload
ldr d5, [sp, #10144] ; 8-byte Folded Reload
fmul d26, d5, d1
fadd d21, d26, d21
ldr d1, [sp, #11928] ; 8-byte Folded Reload
ldr d5, [sp, #8776] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #11912] ; 8-byte Folded Reload
ldr d5, [sp, #7288] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #12288] ; 8-byte Folded Reload
ldr d5, [sp, #5768] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
ldr d1, [sp, #11744] ; 8-byte Folded Reload
ldr d5, [sp, #7280] ; 8-byte Folded Reload
fmul d26, d1, d5
fsub d21, d21, d26
ldr d1, [sp, #12168] ; 8-byte Folded Reload
ldr d5, [sp, #7232] ; 8-byte Folded Reload
fmul d26, d1, d5
fadd d21, d26, d21
fadd d21, d14, d21
fadd d1, d25, d21
str d1, [sp, #9984] ; 8-byte Folded Spill
fadd d18, d18, d1
ldr d1, [sp, #11000] ; 8-byte Folded Reload
ldr d5, [sp, #9656] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #12256] ; 8-byte Folded Reload
ldr d5, [sp, #7872] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #10632] ; 8-byte Folded Reload
ldr d5, [sp, #8856] ; 8-byte Folded Reload
fmul d21, d5, d1
fadd d18, d21, d18
ldr d1, [sp, #12264] ; 8-byte Folded Reload
ldr d5, [sp, #7272] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldur d1, [x29, #-160] ; 8-byte Folded Reload
ldr d5, [sp, #7864] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldur d1, [x29, #-192] ; 8-byte Folded Reload
ldr d5, [sp, #7264] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #11192] ; 8-byte Folded Reload
ldr d5, [sp, #8800] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
ldr d1, [sp, #11184] ; 8-byte Folded Reload
ldr d5, [sp, #8808] ; 8-byte Folded Reload
fmul d21, d1, d5
fadd d18, d21, d18
ldr d1, [sp, #11008] ; 8-byte Folded Reload
ldr d5, [sp, #9704] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #12328] ; 8-byte Folded Reload
ldr d21, [sp, #7856] ; 8-byte Folded Reload
fmul d21, d1, d21
fadd d18, d21, d18
ldr d1, [sp, #10640] ; 8-byte Folded Reload
ldr d25, [sp, #9696] ; 8-byte Folded Reload
fmul d21, d25, d1
fadd d18, d21, d18
ldur d1, [x29, #-224] ; 8-byte Folded Reload
ldr d21, [sp, #7216] ; 8-byte Folded Reload
fmul d21, d1, d21
fsub d26, d18, d21
ldr d1, [sp, #10984] ; 8-byte Folded Reload
fmul d18, d5, d1
ldr d1, [sp, #7832] ; 8-byte Folded Reload
fmul d21, d12, d1
fadd d18, d18, d21
str d26, [sp, #9872] ; 8-byte Folded Spill
fsub d18, d26, d18
ldr d1, [sp, #10648] ; 8-byte Folded Reload
fmul d21, d25, d1
fadd d18, d21, d18
ldr d1, [sp, #12112] ; 8-byte Folded Reload
ldr d21, [sp, #7208] ; 8-byte Folded Reload
fmul d21, d1, d21
fsub d21, d18, d21
ldr d1, [sp, #11016] ; 8-byte Folded Reload
fmul d18, d5, d1
str d21, [sp, #9864] ; 8-byte Folded Spill
fsub d18, d21, d18
ldr d1, [sp, #11408] ; 8-byte Folded Reload
ldr d21, [sp, #7848] ; 8-byte Folded Reload
fmul d21, d1, d21
fadd d18, d21, d18
ldr d1, [sp, #10656] ; 8-byte Folded Reload
fmul d21, d25, d1
fadd d18, d21, d18
ldr d1, [sp, #12080] ; 8-byte Folded Reload
ldr d21, [sp, #7200] ; 8-byte Folded Reload
fmul d21, d1, d21
fsub d15, d18, d21
cbz x8, LBB19_82
; %bb.81:
fmul d18, d13, d5
fsub d18, d15, d18
ldr d1, [sp, #7040] ; 8-byte Folded Reload
fmul d21, d31, d1
fadd d18, d21, d18
fmul d21, d23, d25
fadd d18, d21, d18
ldr d1, [sp, #12040] ; 8-byte Folded Reload
ldr d5, [sp, #7048] ; 8-byte Folded Reload
fmul d21, d1, d5
fsub d18, d18, d21
str d18, [x8, #176]
LBB19_82:
str d19, [sp, #9976] ; 8-byte Folded Spill
ldr d1, [sp, #9960] ; 8-byte Folded Reload
ldr d5, [sp, #10368] ; 8-byte Folded Reload
fmul d18, d5, d1
ldr d1, [sp, #8120] ; 8-byte Folded Reload
fmul d21, d10, d1
fadd d18, d18, d21
ldr d1, [sp, #9992] ; 8-byte Folded Reload
ldr d5, [sp, #10360] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d18, d18, d21
ldr d1, [sp, #7936] ; 8-byte Folded Reload
fmul d21, d11, d1
fsub d18, d18, d21
ldr d1, [sp, #9968] ; 8-byte Folded Reload
ldr d5, [sp, #10384] ; 8-byte Folded Reload
fmul d21, d5, d1
ldr d1, [sp, #11936] ; 8-byte Folded Reload
ldr d5, [sp, #9168] ; 8-byte Folded Reload
fmul d25, d1, d5
fadd d21, d25, d21
ldr d1, [sp, #10000] ; 8-byte Folded Reload
ldr d5, [sp, #10376] ; 8-byte Folded Reload
fmul d25, d5, d1
fadd d21, d25, d21
ldr d1, [sp, #11944] ; 8-byte Folded Reload
ldr d5, [sp, #9056] ; 8-byte Folded Reload
fmul d25, d1, d5
fsub d21, d21, d25
ldr d1, [sp, #11752] ; 8-byte Folded Reload
ldr d5, [sp, #7968] ; 8-byte Folded Reload
fmul d25, d1, d5
fsub d21, d21, d25
ldr d1, [sp, #12184] ; 8-byte Folded Reload
ldr d5, [sp, #8832] ; 8-byte Folded Reload
fmul d25, d1, d5
fadd d21, d25, d21
ldr d1, [sp, #11672] ; 8-byte Folded Reload
ldr d5, [sp, #7960] ; 8-byte Folded Reload
fmul d25, d1, d5
fsub d21, d21, d25
ldr d1, [sp, #12176] ; 8-byte Folded Reload
ldr d5, [sp, #8792] ; 8-byte Folded Reload
fmul d25, d1, d5
fadd d21, d25, d21
fadd d0, d0, d21
fadd d0, d20, d0
str d0, [sp, #11400] ; 8-byte Folded Spill
fadd d0, d18, d0
ldr d1, [sp, #11024] ; 8-byte Folded Reload
ldr d5, [sp, #9736] ; 8-byte Folded Reload
fmul d18, d5, d1
fsub d0, d0, d18
ldr d1, [sp, #12120] ; 8-byte Folded Reload
ldr d5, [sp, #8152] ; 8-byte Folded Reload
fmul d18, d1, d5
fadd d0, d18, d0
ldr d1, [sp, #10664] ; 8-byte Folded Reload
ldr d5, [sp, #9728] ; 8-byte Folded Reload
fmul d18, d5, d1
fadd d0, d18, d0
ldr d1, [sp, #12128] ; 8-byte Folded Reload
ldr d5, [sp, #7952] ; 8-byte Folded Reload
fmul d18, d1, d5
fsub d0, d0, d18
ldur d1, [x29, #-168] ; 8-byte Folded Reload
ldr d5, [sp, #8144] ; 8-byte Folded Reload
fmul d18, d1, d5
fadd d0, d18, d0
ldur d1, [x29, #-176] ; 8-byte Folded Reload
ldr d5, [sp, #7944] ; 8-byte Folded Reload
fmul d18, d1, d5
fsub d0, d0, d18
ldr d1, [sp, #11208] ; 8-byte Folded Reload
ldr d5, [sp, #9816] ; 8-byte Folded Reload
fmul d18, d1, d5
fsub d0, d0, d18
ldr d1, [sp, #11200] ; 8-byte Folded Reload
ldr d5, [sp, #9824] ; 8-byte Folded Reload
fmul d18, d1, d5
fadd d0, d18, d0
ldr d1, [sp, #11032] ; 8-byte Folded Reload
ldr d21, [sp, #9712] ; 8-byte Folded Reload
fmul d18, d21, d1
fsub d0, d0, d18
ldr d1, [sp, #12272] ; 8-byte Folded Reload
ldr d5, [sp, #8136] ; 8-byte Folded Reload
fmul d18, d1, d5
fadd d0, d18, d0
ldr d1, [sp, #10672] ; 8-byte Folded Reload
ldr d5, [sp, #9720] ; 8-byte Folded Reload
fmul d18, d5, d1
fadd d0, d18, d0
ldr d1, [sp, #12344] ; 8-byte Folded Reload
ldr d18, [sp, #7928] ; 8-byte Folded Reload
fmul d18, d1, d18
fsub d19, d0, d18
ldr d0, [sp, #10992] ; 8-byte Folded Reload
fmul d0, d21, d0
ldr d1, [sp, #8112] ; 8-byte Folded Reload
fmul d18, d9, d1
fadd d0, d0, d18
str d19, [sp, #10000] ; 8-byte Folded Spill
fsub d0, d19, d0
fmul d18, d5, d30
fadd d0, d18, d0
ldr d1, [sp, #12032] ; 8-byte Folded Reload
ldr d18, [sp, #7920] ; 8-byte Folded Reload
fmul d18, d1, d18
fsub d0, d0, d18
ldr d1, [sp, #11040] ; 8-byte Folded Reload
fmul d18, d21, d1
str d0, [sp, #9992] ; 8-byte Folded Spill
fsub d18, d0, d18
ldr d1, [sp, #11480] ; 8-byte Folded Reload
ldr d0, [sp, #8128] ; 8-byte Folded Reload
fmul d20, d1, d0
fadd d18, d20, d18
fmul d20, d5, d29
fadd d18, d20, d18
ldr d1, [sp, #12024] ; 8-byte Folded Reload
ldr d0, [sp, #7912] ; 8-byte Folded Reload
fmul d20, d1, d0
fsub d19, d18, d20
ldr q26, [sp, #12208] ; 16-byte Folded Reload
cbz x8, LBB19_84
; %bb.83:
fmul d18, d13, d21
fsub d18, d19, d18
ldr d0, [sp, #7328] ; 8-byte Folded Reload
fmul d20, d31, d0
fadd d18, d20, d18
fmul d20, d23, d5
fadd d18, d20, d18
ldr d1, [sp, #12040] ; 8-byte Folded Reload
ldr d0, [sp, #7336] ; 8-byte Folded Reload
fmul d20, d1, d0
fsub d18, d18, d20
str d18, [x8, #184]
LBB19_84:
str d19, [sp, #9968] ; 8-byte Folded Spill
ldr d0, [sp, #8848] ; 8-byte Folded Reload
fmul d18, d12, d0
ldr d0, [sp, #10984] ; 8-byte Folded Reload
ldr d5, [sp, #10808] ; 8-byte Folded Reload
fmul d20, d5, d0
fsub d18, d18, d20
ldr d0, [sp, #6056] ; 8-byte Folded Reload
ldr d1, [sp, #11168] ; 8-byte Folded Reload
fmul d20, d1, d0
ldr d1, [sp, #6104] ; 8-byte Folded Reload
ldr d0, [sp, #10816] ; 8-byte Folded Reload
fmul d21, d0, d1
fsub d20, d20, d21
ldr d0, [sp, #10264] ; 8-byte Folded Reload
ldr d1, [sp, #10288] ; 8-byte Folded Reload
fmul d21, d0, d1
fadd d20, d21, d20
ldr d1, [sp, #11912] ; 8-byte Folded Reload
ldr d0, [sp, #10256] ; 8-byte Folded Reload
fmul d21, d1, d0
fsub d20, d20, d21
ldr d1, [sp, #12288] ; 8-byte Folded Reload
ldr d0, [sp, #8968] ; 8-byte Folded Reload
fmul d21, d1, d0
fsub d20, d20, d21
ldr d1, [sp, #11744] ; 8-byte Folded Reload
ldr d0, [sp, #10168] ; 8-byte Folded Reload
fmul d21, d1, d0
fsub d20, d20, d21
ldr d1, [sp, #12168] ; 8-byte Folded Reload
ldr d0, [sp, #10792] ; 8-byte Folded Reload
fmul d21, d1, d0
fsub d20, d20, d21
ldr d1, [sp, #9576] ; 8-byte Folded Reload
ldr d0, [sp, #10248] ; 8-byte Folded Reload
fmul d21, d1, d0
fadd d20, d21, d20
ldr d1, [sp, #10056] ; 8-byte Folded Reload
ldr d0, [sp, #10240] ; 8-byte Folded Reload
fmul d21, d1, d0
fadd d0, d21, d20
ldr d1, [sp, #11000] ; 8-byte Folded Reload
fmul d20, d5, d1
str d0, [sp, #11184] ; 8-byte Folded Spill
fsub d20, d0, d20
ldr d1, [sp, #12256] ; 8-byte Folded Reload
ldr d25, [sp, #8960] ; 8-byte Folded Reload
fmul d21, d1, d25
fsub d20, d20, d21
ldr d1, [sp, #10632] ; 8-byte Folded Reload
ldr d0, [sp, #11176] ; 8-byte Folded Reload
fmul d21, d0, d1
fadd d20, d21, d20
ldr d1, [sp, #12264] ; 8-byte Folded Reload
ldr d19, [sp, #10160] ; 8-byte Folded Reload
fmul d21, d1, d19
fsub d20, d20, d21
ldur d1, [x29, #-160] ; 8-byte Folded Reload
ldr d21, [sp, #8944] ; 8-byte Folded Reload
fmul d21, d1, d21
fadd d20, d21, d20
ldur d1, [x29, #-192] ; 8-byte Folded Reload
ldr d21, [sp, #10152] ; 8-byte Folded Reload
fmul d21, d1, d21
fsub d20, d20, d21
ldr d1, [sp, #11008] ; 8-byte Folded Reload
fmul d21, d5, d1
fsub d20, d20, d21
ldr d1, [sp, #12328] ; 8-byte Folded Reload
fmul d21, d1, d25
fsub d20, d20, d21
ldr d1, [sp, #10640] ; 8-byte Folded Reload
fmul d21, d0, d1
fadd d20, d21, d20
ldur d1, [x29, #-224] ; 8-byte Folded Reload
fmul d21, d1, d19
fsub d1, d20, d21
str d1, [sp, #12168] ; 8-byte Folded Spill
fadd d18, d18, d1
ldr d1, [sp, #10648] ; 8-byte Folded Reload
fmul d21, d0, d1
fadd d18, d18, d21
ldr d1, [sp, #12112] ; 8-byte Folded Reload
ldr d19, [sp, #9808] ; 8-byte Folded Reload
fmul d21, d1, d19
fsub d19, d18, d21
ldr d1, [sp, #11016] ; 8-byte Folded Reload
fmul d18, d5, d1
str d19, [sp, #11016] ; 8-byte Folded Spill
fsub d18, d19, d18
ldr d1, [sp, #11408] ; 8-byte Folded Reload
ldr d19, [sp, #8952] ; 8-byte Folded Reload
fmul d21, d1, d19
fsub d18, d18, d21
ldr d1, [sp, #10656] ; 8-byte Folded Reload
fmul d21, d0, d1
fadd d18, d21, d18
ldr d1, [sp, #12080] ; 8-byte Folded Reload
ldr d19, [sp, #9152] ; 8-byte Folded Reload
fmul d21, d1, d19
fsub d3, d18, d21
ldr q10, [sp, #6336] ; 16-byte Folded Reload
ldr q19, [sp, #8816] ; 16-byte Folded Reload
cbz x8, LBB19_86
; %bb.85:
fmul d21, d13, d5
fsub d21, d3, d21
ldr d1, [sp, #11416] ; 8-byte Folded Reload
ldr d5, [sp, #7688] ; 8-byte Folded Reload
fmul d11, d1, d5
fsub d21, d21, d11
fmul d11, d23, d0
fadd d21, d11, d21
ldr d1, [sp, #12040] ; 8-byte Folded Reload
ldr d0, [sp, #7696] ; 8-byte Folded Reload
fmul d11, d1, d0
fsub d21, d21, d11
str d21, [x8, #192]
LBB19_86:
str d3, [sp, #11008] ; 8-byte Folded Spill
ldr d0, [sp, #9904] ; 8-byte Folded Reload
fmul d21, d9, d0
ldr d1, [sp, #10992] ; 8-byte Folded Reload
ldr d5, [sp, #11504] ; 8-byte Folded Reload
fmul d11, d5, d1
fsub d21, d21, d11
ldr d0, [sp, #6080] ; 8-byte Folded Reload
ldr d1, [sp, #11520] ; 8-byte Folded Reload
fmul d11, d1, d0
ldr d1, [sp, #6128] ; 8-byte Folded Reload
ldr d0, [sp, #11512] ; 8-byte Folded Reload
fmul d14, d0, d1
fsub d11, d11, d14
ldr d0, [sp, #10864] ; 8-byte Folded Reload
ldr d1, [sp, #11424] ; 8-byte Folded Reload
fmul d14, d0, d1
fadd d11, d14, d11
ldr d1, [sp, #11752] ; 8-byte Folded Reload
ldr d0, [sp, #10856] ; 8-byte Folded Reload
fmul d14, d1, d0
fsub d11, d11, d14
ldr d1, [sp, #12184] ; 8-byte Folded Reload
ldr d0, [sp, #9928] ; 8-byte Folded Reload
fmul d14, d1, d0
fsub d11, d11, d14
ldr d1, [sp, #11672] ; 8-byte Folded Reload
ldr d0, [sp, #10944] ; 8-byte Folded Reload
fmul d14, d1, d0
fsub d11, d11, d14
ldr d1, [sp, #12176] ; 8-byte Folded Reload
ldr d0, [sp, #10904] ; 8-byte Folded Reload
fmul d14, d1, d0
fsub d11, d11, d14
ldr d1, [sp, #10008] ; 8-byte Folded Reload
ldr d0, [sp, #10848] ; 8-byte Folded Reload
fmul d14, d1, d0
fadd d11, d14, d11
ldr d1, [sp, #10712] ; 8-byte Folded Reload
ldr d0, [sp, #10840] ; 8-byte Folded Reload
fmul d14, d1, d0
fadd d0, d14, d11
ldr d1, [sp, #11024] ; 8-byte Folded Reload
fmul d11, d5, d1
str d0, [sp, #11200] ; 8-byte Folded Spill
fsub d11, d0, d11
ldr d1, [sp, #12120] ; 8-byte Folded Reload
ldr d20, [sp, #9920] ; 8-byte Folded Reload
fmul d14, d1, d20
fsub d11, d11, d14
ldr d1, [sp, #10664] ; 8-byte Folded Reload
ldr d0, [sp, #11640] ; 8-byte Folded Reload
fmul d14, d0, d1
fadd d11, d14, d11
ldr d1, [sp, #12128] ; 8-byte Folded Reload
ldr d27, [sp, #10936] ; 8-byte Folded Reload
fmul d14, d1, d27
fsub d11, d11, d14
ldur d1, [x29, #-168] ; 8-byte Folded Reload
ldr d25, [sp, #9912] ; 8-byte Folded Reload
fmul d14, d1, d25
fadd d11, d14, d11
ldur d1, [x29, #-176] ; 8-byte Folded Reload
ldr d25, [sp, #10352] ; 8-byte Folded Reload
fmul d14, d1, d25
fsub d11, d11, d14
ldr d1, [sp, #11032] ; 8-byte Folded Reload
fmul d14, d5, d1
fsub d11, d11, d14
ldr d1, [sp, #12272] ; 8-byte Folded Reload
fmul d14, d1, d20
fsub d11, d11, d14
ldr d1, [sp, #10672] ; 8-byte Folded Reload
fmul d14, d0, d1
fadd d11, d14, d11
ldr d1, [sp, #12344] ; 8-byte Folded Reload
fmul d14, d1, d27
fsub d1, d11, d14
str d1, [sp, #12184] ; 8-byte Folded Spill
fadd d21, d21, d1
fmul d11, d0, d30
fadd d21, d21, d11
ldr d1, [sp, #12032] ; 8-byte Folded Reload
ldr d20, [sp, #10832] ; 8-byte Folded Reload
fmul d11, d1, d20
fsub d18, d21, d11
ldr d1, [sp, #11040] ; 8-byte Folded Reload
fmul d11, d5, d1
str d18, [sp, #12176] ; 8-byte Folded Spill
fsub d11, d18, d11
ldr d1, [sp, #11480] ; 8-byte Folded Reload
ldr d20, [sp, #9896] ; 8-byte Folded Reload
fmul d1, d1, d20
fsub d1, d11, d1
fmul d11, d0, d29
fadd d1, d11, d1
ldr d25, [sp, #12024] ; 8-byte Folded Reload
ldr d20, [sp, #10568] ; 8-byte Folded Reload
fmul d11, d25, d20
fsub d18, d1, d11
ldr q3, [sp, #6256] ; 16-byte Folded Reload
cbz x8, LBB19_88
; %bb.87:
fmul d1, d13, d5
fsub d1, d18, d1
ldr d25, [sp, #11416] ; 8-byte Folded Reload
ldr d5, [sp, #8064] ; 8-byte Folded Reload
fmul d9, d25, d5
fsub d1, d1, d9
fmul d9, d23, d0
fadd d1, d9, d1
ldr d25, [sp, #12040] ; 8-byte Folded Reload
ldr d0, [sp, #8072] ; 8-byte Folded Reload
fmul d9, d25, d0
fsub d1, d1, d9
str d1, [x8, #200]
LBB19_88:
str d18, [sp, #11040] ; 8-byte Folded Spill
ldr d1, [sp, #11752] ; 8-byte Folded Reload
ldr d0, [sp, #10624] ; 8-byte Folded Reload
fmul d1, d1, d0
ldr d0, [sp, #11624] ; 8-byte Folded Reload
fsub d1, d0, d1
ldr d23, [sp, #11672] ; 8-byte Folded Reload
ldr d0, [sp, #10616] ; 8-byte Folded Reload
fmul d9, d23, d0
fsub d1, d1, d9
ldr d0, [sp, #11560] ; 8-byte Folded Reload
fadd d5, d0, d1
ldr d1, [sp, #12120] ; 8-byte Folded Reload
ldr d0, [sp, #6040] ; 8-byte Folded Reload
fmul d1, d1, d0
str d5, [sp, #11752] ; 8-byte Folded Spill
fadd d1, d1, d5
ldr d25, [sp, #12128] ; 8-byte Folded Reload
ldr d0, [sp, #11352] ; 8-byte Folded Reload
fmul d9, d25, d0
fsub d1, d1, d9
ldr d23, [sp, #11208] ; 8-byte Folded Reload
ldr d0, [sp, #11784] ; 8-byte Folded Reload
fmul d9, d23, d0
fsub d1, d1, d9
ldr d0, [sp, #11432] ; 8-byte Folded Reload
fadd d1, d0, d1
ldr d25, [sp, #12272] ; 8-byte Folded Reload
fmul d9, d25, d17
fadd d1, d9, d1
ldr d25, [sp, #12344] ; 8-byte Folded Reload
ldr d27, [sp, #11464] ; 8-byte Folded Reload
fmul d9, d25, d27
fsub d0, d1, d9
ldr d1, [sp, #11296] ; 8-byte Folded Reload
fmul d1, d1, d17
str d0, [sp, #11672] ; 8-byte Folded Spill
fsub d1, d0, d1
ldr d25, [sp, #12032] ; 8-byte Folded Reload
fmul d9, d25, d27
fsub d0, d1, d9
ldr d1, [sp, #11480] ; 8-byte Folded Reload
fmul d1, d1, d17
str d0, [sp, #11624] ; 8-byte Folded Spill
fadd d1, d1, d0
ldr d25, [sp, #12024] ; 8-byte Folded Reload
fmul d31, d25, d27
fsub d1, d1, d31
ldr q11, [sp, #8624] ; 16-byte Folded Reload
ldr q29, [sp, #5072] ; 16-byte Folded Reload
ldr q23, [sp, #4848] ; 16-byte Folded Reload
ldr d21, [sp, #6480] ; 8-byte Folded Reload
ldr q14, [sp, #5920] ; 16-byte Folded Reload
str d1, [sp, #11560] ; 8-byte Folded Spill
fadd d0, d16, d4
cbz x8, LBB19_90
; %bb.89:
ldr d4, [sp, #11416] ; 8-byte Folded Reload
fmul d4, d4, d17
fadd d4, d4, d1
ldr d16, [sp, #12040] ; 8-byte Folded Reload
fmul d31, d16, d27
fsub d4, d4, d31
str d0, [sp, #11784] ; 8-byte Folded Spill
fadd d7, d0, d7
fadd d2, d7, d2
fadd d0, d2, d22
str d0, [sp, #12288] ; 8-byte Folded Spill
fsub d0, d6, d0
str d0, [sp, #11432] ; 8-byte Folded Spill
fsub d1, d0, d24
ldr d0, [sp, #4840] ; 8-byte Folded Reload
str d1, [sp, #11352] ; 8-byte Folded Spill
fsub d22, d1, d0
stp d4, d22, [x8, #208]
b LBB19_91
LBB19_90:
str d0, [sp, #11784] ; 8-byte Folded Spill
fadd d4, d0, d7
fadd d2, d4, d2
fadd d0, d2, d22
str d0, [sp, #12288] ; 8-byte Folded Spill
fsub d0, d6, d0
str d0, [sp, #11432] ; 8-byte Folded Spill
fsub d0, d0, d24
str d0, [sp, #11352] ; 8-byte Folded Spill
LBB19_91:
ldr q13, [sp, #6320] ; 16-byte Folded Reload
ldr d20, [sp, #6032] ; 8-byte Folded Reload
ldr q16, [sp, #6240] ; 16-byte Folded Reload
ldr q25, [sp, #6224] ; 16-byte Folded Reload
ldr q7, [sp, #8608] ; 16-byte Folded Reload
ldr q30, [sp, #8592] ; 16-byte Folded Reload
ldr q1, [sp, #5872] ; 16-byte Folded Reload
ldr q9, [sp, #11072] ; 16-byte Folded Reload
ldr d2, [sp, #11912] ; 8-byte Folded Reload
ldr d0, [sp, #11384] ; 8-byte Folded Reload
fmul d4, d2, d0
ldr d0, [sp, #12048] ; 8-byte Folded Reload
fsub d4, d0, d4
ldr d2, [sp, #11744] ; 8-byte Folded Reload
ldr d0, [sp, #11376] ; 8-byte Folded Reload
fmul d22, d2, d0
fsub d4, d4, d22
ldr d0, [sp, #11728] ; 8-byte Folded Reload
fadd d4, d0, d4
ldr d2, [sp, #12256] ; 8-byte Folded Reload
ldr d0, [sp, #6024] ; 8-byte Folded Reload
fmul d22, d2, d0
str d4, [sp, #12048] ; 8-byte Folded Spill
fadd d22, d22, d4
ldr d2, [sp, #12264] ; 8-byte Folded Reload
ldr d0, [sp, #11528] ; 8-byte Folded Reload
fmul d24, d2, d0
fsub d22, d22, d24
ldr d2, [sp, #11192] ; 8-byte Folded Reload
ldr d0, [sp, #12280] ; 8-byte Folded Reload
fmul d24, d2, d0
fsub d22, d22, d24
ldr d0, [sp, #11472] ; 8-byte Folded Reload
fadd d22, d0, d22
ldr d2, [sp, #12328] ; 8-byte Folded Reload
fmul d24, d2, d20
fadd d22, d24, d22
ldur d2, [x29, #-224] ; 8-byte Folded Reload
ldr d18, [sp, #11720] ; 8-byte Folded Reload
fmul d24, d2, d18
fsub d0, d22, d24
ldr d2, [sp, #11304] ; 8-byte Folded Reload
fmul d24, d2, d20
str d0, [sp, #11744] ; 8-byte Folded Spill
fsub d24, d0, d24
ldr d2, [sp, #12112] ; 8-byte Folded Reload
fmul d31, d2, d18
fsub d12, d24, d31
ldr q0, [sp, #5056] ; 16-byte Folded Reload
ldr q5, [sp, #5024] ; 16-byte Folded Reload
ldr d2, [sp, #5016] ; 8-byte Folded Reload
ldr q6, [sp, #4704] ; 16-byte Folded Reload
str d12, [sp, #11728] ; 8-byte Folded Spill
cbz x8, LBB19_118
; %bb.92:
str d28, [sp, #12280] ; 8-byte Folded Spill
fmov d28, d27
fmov d27, d17
ldr q17, [sp, #4560] ; 16-byte Folded Reload
ldr q4, [sp, #944] ; 16-byte Folded Reload
mov.d v17[1], v4[0]
mov.d v13[1], v23[0]
mov.16b v31, v11
ldr q11, [sp, #4544] ; 16-byte Folded Reload
ldr q4, [sp, #896] ; 16-byte Folded Reload
mov.d v11[1], v4[0]
mov.d v29[1], v25[0]
fmov d18, d15
ldr q15, [sp, #768] ; 16-byte Folded Reload
ldr q4, [sp, #352] ; 16-byte Folded Reload
mov.d v15[1], v4[0]
mov.d v14[1], v1[0]
ldr q1, [sp, #752] ; 16-byte Folded Reload
ldr q4, [sp, #320] ; 16-byte Folded Reload
mov.d v1[1], v4[0]
ldr q4, [sp, #336] ; 16-byte Folded Reload
mov.d v6[1], v4[0]
mov.16b v23, v7
ldp q24, q7, [sp, #32] ; 32-byte Folded Reload
mov.d v7[1], v19[0]
ldr q4, [sp, #7072] ; 16-byte Folded Reload
ldr q19, [sp, #6768] ; 16-byte Folded Reload
mov.d v19[1], v4[0]
str q19, [sp, #6768] ; 16-byte Folded Spill
ldr q4, [sp, #7296] ; 16-byte Folded Reload
mov.d v24[1], v4[0]
ldr q19, [sp, #4576] ; 16-byte Folded Reload
ldr q4, [sp, #7056] ; 16-byte Folded Reload
mov.d v19[1], v4[0]
mov.16b v25, v30
ldr q30, [sp, #7744] ; 16-byte Folded Reload
mov.d v30[1], v26[0]
ldr q4, [sp, #3808] ; 16-byte Folded Reload
mov.d v9[1], v4[0]
str q9, [sp, #11072] ; 16-byte Folded Spill
mov.16b v22, v10
ldr q10, [sp, #7728] ; 16-byte Folded Reload
ldr q4, [sp, #11696] ; 16-byte Folded Reload
mov.d v10[1], v4[0]
ldr q9, [sp, #8976] ; 16-byte Folded Reload
ldr q4, [sp, #3776] ; 16-byte Folded Reload
mov.d v9[1], v4[0]
ldr q4, [sp, #4496] ; 16-byte Folded Reload
mov.d v0[1], v4[0]
mov.d v22[1], v16[0]
ldr q4, [sp, #912] ; 16-byte Folded Reload
mov.d v5[1], v4[0]
mov.d v31[1], v3[0]
str q31, [sp, #8624] ; 16-byte Folded Spill
ldr q4, [sp, #160] ; 16-byte Folded Reload
ldr q3, [sp, #6800] ; 16-byte Folded Reload
mov.d v3[1], v4[0]
str q3, [sp, #6800] ; 16-byte Folded Spill
ldr q4, [sp, #4592] ; 16-byte Folded Reload
mov.d v25[1], v4[0]
str q25, [sp, #8592] ; 16-byte Folded Spill
ldr q4, [sp, #144] ; 16-byte Folded Reload
ldr q3, [sp, #6784] ; 16-byte Folded Reload
mov.d v3[1], v4[0]
str q3, [sp, #6784] ; 16-byte Folded Spill
ldr q4, [sp, #4608] ; 16-byte Folded Reload
mov.d v23[1], v4[0]
str q23, [sp, #8608] ; 16-byte Folded Spill
ldr q4, [sp, #9024] ; 16-byte Folded Reload
ldr q3, [sp, #5264] ; 16-byte Folded Reload
mov.d v3[1], v4[0]
str q3, [sp, #5264] ; 16-byte Folded Spill
ldr q4, [sp, #7360] ; 16-byte Folded Reload
ldr q3, [sp, #6736] ; 16-byte Folded Reload
mov.d v3[1], v4[0]
str q3, [sp, #6736] ; 16-byte Folded Spill
ldr q4, [sp, #9008] ; 16-byte Folded Reload
ldr q3, [sp, #5248] ; 16-byte Folded Reload
mov.d v3[1], v4[0]
str q3, [sp, #5248] ; 16-byte Folded Spill
ldr q4, [sp, #7344] ; 16-byte Folded Reload
ldr q3, [sp, #6752] ; 16-byte Folded Reload
mov.d v3[1], v4[0]
str q3, [sp, #6752] ; 16-byte Folded Spill
ldr q4, [sp, #11648] ; 16-byte Folded Reload
ldr q3, [sp, #11328] ; 16-byte Folded Reload
mov.d v3[1], v4[0]
str q3, [sp, #11328] ; 16-byte Folded Spill
ldr q4, [sp, #7024] ; 16-byte Folded Reload
ldr q3, [sp, #10912] ; 16-byte Folded Reload
mov.d v3[1], v4[0]
str q3, [sp, #10912] ; 16-byte Folded Spill
ldr q4, [sp, #11600] ; 16-byte Folded Reload
ldr q3, [sp, #11248] ; 16-byte Folded Reload
mov.d v3[1], v4[0]
str q3, [sp, #11248] ; 16-byte Folded Spill
ldr q4, [sp, #6912] ; 16-byte Folded Reload
ldr q3, [sp, #11104] ; 16-byte Folded Reload
mov.d v3[1], v4[0]
str q3, [sp, #11104] ; 16-byte Folded Spill
ldr d3, [sp, #11320] ; 8-byte Folded Reload
fadd d26, d3, d8
ldr d3, [sp, #4968] ; 8-byte Folded Reload
ldr d4, [sp, #1464] ; 8-byte Folded Reload
fsub d31, d3, d4
ldr d3, [sp, #5608] ; 8-byte Folded Reload
fadd d31, d31, d3
ldr d3, [sp, #4760] ; 8-byte Folded Reload
fadd d8, d21, d3
ldr d3, [sp, #1512] ; 8-byte Folded Reload
fadd d16, d2, d3
ldr d2, [sp, #4520] ; 8-byte Folded Reload
ldr d3, [sp, #1048] ; 8-byte Folded Reload
fadd d4, d2, d3
ldr d2, [sp, #784] ; 8-byte Folded Reload
ldr d3, [sp, #744] ; 8-byte Folded Reload
fadd d3, d2, d3
ldr d2, [sp, #6536] ; 8-byte Folded Reload
ldr d23, [sp, #5088] ; 8-byte Folded Reload
fadd d2, d2, d23
ldr d23, [sp, #4896] ; 8-byte Folded Reload
ldr d25, [sp, #4872] ; 8-byte Folded Reload
fadd d25, d23, d25
str d31, [x8, #240]
str d8, [x8, #248]
ldr d21, [sp, #464] ; 8-byte Folded Reload
str d21, [x8, #264]
ldr d21, [sp, #184] ; 8-byte Folded Reload
str d21, [x8, #272]
ldr d21, [sp, #16] ; 8-byte Folded Reload
str d21, [x8, #280]
ldr d21, [sp, #1984] ; 8-byte Folded Reload
str d21, [x8, #288]
ldr d21, [sp, #7096] ; 8-byte Folded Reload
str d21, [x8, #296]
ldr d21, [sp, #7432] ; 8-byte Folded Reload
str d21, [x8, #304]
ldr d21, [sp, #7704] ; 8-byte Folded Reload
str d21, [x8, #312]
ldr d21, [sp, #8840] ; 8-byte Folded Reload
str d21, [x8, #320]
str d28, [x8, #328]
str d16, [x8, #344]
str d4, [x8, #352]
str d3, [x8, #360]
ldr d3, [sp, #496] ; 8-byte Folded Reload
str d3, [x8, #368]
ldr d3, [sp, #240] ; 8-byte Folded Reload
str d3, [x8, #376]
ldr d3, [sp, #24] ; 8-byte Folded Reload
str d3, [x8, #384]
ldr d3, [sp, #1992] ; 8-byte Folded Reload
str d3, [x8, #392]
ldr d3, [sp, #7320] ; 8-byte Folded Reload
str d3, [x8, #400]
ldr d3, [sp, #9048] ; 8-byte Folded Reload
str d3, [x8, #408]
ldr d3, [sp, #7768] ; 8-byte Folded Reload
str d3, [x8, #416]
ldr d3, [sp, #9072] ; 8-byte Folded Reload
str d3, [x8, #424]
ldr d3, [sp, #8032] ; 8-byte Folded Reload
str d3, [x8, #432]
ldr d3, [sp, #3888] ; 8-byte Folded Reload
str d3, [x8, #440]
ldr d3, [sp, #3848] ; 8-byte Folded Reload
str d3, [x8, #448]
ldr d3, [sp, #3760] ; 8-byte Folded Reload
str d3, [x8, #456]
str d2, [x8, #464]
str d25, [x8, #472]
ldr d2, [sp, #6440] ; 8-byte Folded Reload
str d2, [x8, #488]
ldr d2, [sp, #6432] ; 8-byte Folded Reload
str d2, [x8, #496]
ldr d2, [sp, #8664] ; 8-byte Folded Reload
str d2, [x8, #504]
ldr d2, [sp, #6424] ; 8-byte Folded Reload
str d2, [x8, #512]
ldr d2, [sp, #7088] ; 8-byte Folded Reload
str d2, [x8, #520]
ldr d2, [sp, #7384] ; 8-byte Folded Reload
str d2, [x8, #528]
str d27, [x8, #552]
ldr d2, [sp, #4816] ; 8-byte Folded Reload
str d2, [x8, #656]
ldr d2, [sp, #5952] ; 8-byte Folded Reload
str d2, [x8, #664]
ldr d2, [sp, #5896] ; 8-byte Folded Reload
str d2, [x8, #672]
ldr d2, [sp, #5864] ; 8-byte Folded Reload
str d2, [x8, #680]
ldr d2, [sp, #12280] ; 8-byte Folded Reload
str d2, [x8, #688]
str d18, [x8, #696]
ldr d2, [sp, #11008] ; 8-byte Folded Reload
str d2, [x8, #704]
ldr d2, [sp, #11408] ; 8-byte Folded Reload
fmul d2, d2, d20
fadd d2, d2, d12
ldr d3, [sp, #12080] ; 8-byte Folded Reload
ldr d18, [sp, #11720] ; 8-byte Folded Reload
fmul d3, d3, d18
fsub d8, d2, d3
ldr d2, [sp, #11416] ; 8-byte Folded Reload
fmul d2, d2, d20
fadd d2, d2, d8
ldr d3, [sp, #12040] ; 8-byte Folded Reload
fmul d3, d3, d18
fsub d21, d2, d3
str d26, [sp, #11912] ; 8-byte Folded Spill
ldr d2, [sp, #11368] ; 8-byte Folded Reload
fadd d2, d26, d2
ldr d3, [sp, #11360] ; 8-byte Folded Reload
fadd d2, d2, d3
ldr d3, [sp, #11392] ; 8-byte Folded Reload
fadd d25, d2, d3
ldr q31, [sp, #10016] ; 16-byte Folded Reload
fmul.2d v2, v17, v31[0]
ldr q27, [sp, #10032] ; 16-byte Folded Reload
fmul.2d v3, v13, v27[0]
fsub.2d v2, v2, v3
fmul.2d v3, v11, v27[0]
fsub.2d v2, v2, v3
fmul.2d v3, v29, v31[0]
fadd.2d v2, v3, v2
fmul.2d v3, v17, v27[0]
fmul.2d v4, v13, v31[0]
fadd.2d v3, v4, v3
fmul.2d v4, v11, v31[0]
fadd.2d v3, v3, v4
fmul.2d v4, v29, v27[0]
fadd.2d v3, v4, v3
ldr q29, [sp, #10720] ; 16-byte Folded Reload
fmul.2d v2, v2, v29[0]
ldr q28, [sp, #11216] ; 16-byte Folded Reload
fmul.2d v3, v3, v28[0]
fsub.2d v23, v2, v3
fmul.2d v2, v15, v31[0]
fmul.2d v3, v14, v27[0]
fsub.2d v2, v2, v3
fmul.2d v3, v1, v27[0]
fsub.2d v2, v2, v3
fmul.2d v3, v6, v31[0]
fadd.2d v2, v3, v2
fmul.2d v3, v15, v27[0]
fmul.2d v4, v14, v31[0]
fadd.2d v3, v4, v3
fmul.2d v4, v1, v31[0]
fadd.2d v3, v3, v4
fmul.2d v4, v6, v27[0]
fadd.2d v3, v4, v3
fmul.2d v2, v2, v29[0]
fmul.2d v3, v3, v28[0]
fsub.2d v26, v2, v3
fmul.2d v2, v7, v31[0]
ldr q1, [sp, #6768] ; 16-byte Folded Reload
fmul.2d v3, v1, v27[0]
fsub.2d v2, v2, v3
fmul.2d v3, v24, v27[0]
fsub.2d v2, v2, v3
fmul.2d v3, v19, v31[0]
fadd.2d v4, v2, v3
fsub.2d v2, v2, v3
mov.d v4[1], v2[1]
fmul.2d v2, v7, v27[0]
fmul.2d v3, v1, v31[0]
fadd.2d v2, v3, v2
fmul.2d v3, v24, v31[0]
fadd.2d v2, v3, v2
fmul.2d v3, v19, v27[0]
fadd.2d v16, v2, v3
fsub.2d v2, v2, v3
mov.d v16[1], v2[1]
fmul.2d v2, v4, v29[0]
fmul.2d v3, v16, v28[0]
fsub.2d v2, v2, v3
fmul.2d v3, v30, v31[0]
ldr q7, [sp, #11072] ; 16-byte Folded Reload
fmul.2d v4, v7, v27[0]
fsub.2d v3, v3, v4
fmul.2d v4, v10, v27[0]
fsub.2d v3, v3, v4
str d21, [x8, #224]
ldr d1, [sp, #12072] ; 8-byte Folded Reload
str d25, [sp, #12280] ; 8-byte Folded Spill
fsub d17, d1, d25
ldr d1, [sp, #5984] ; 8-byte Folded Reload
fsub d4, d17, d1
ldr d1, [sp, #5976] ; 8-byte Folded Reload
fsub d16, d4, d1
str d16, [x8, #232]
fmul.2d v16, v9, v31[0]
fadd.2d v19, v3, v16
fsub.2d v3, v3, v16
mov.d v19[1], v3[1]
str d18, [x8, #336]
ldr d6, [sp, #6472] ; 8-byte Folded Reload
ldr d21, [sp, #6464] ; 8-byte Folded Reload
fadd d3, d6, d21
str d3, [x8, #480]
ldr d1, [sp, #10800] ; 8-byte Folded Reload
fneg d3, d1
str d3, [x8, #536]
ldr d1, [sp, #11312] ; 8-byte Folded Reload
fneg d3, d1
str d3, [x8, #544]
fmul.2d v3, v30, v27[0]
fmul.2d v16, v7, v31[0]
fadd.2d v3, v16, v3
fneg d16, d6
str d16, [x8, #560]
str d20, [x8, #568]
fmul.2d v16, v10, v31[0]
fadd.2d v3, v16, v3
fneg d16, d21
str d16, [x8, #576]
fmul.2d v16, v9, v27[0]
fadd.2d v25, v3, v16
fsub.2d v3, v3, v16
add x9, x8, #584
stp q23, q26, [x9]
mov.d v25[1], v3[1]
fmul.2d v3, v19, v29[0]
fmul.2d v16, v25, v28[0]
fsub.2d v3, v3, v16
stp q2, q3, [x9, #32]
ldr d7, [sp, #3800] ; 8-byte Folded Reload
fmul d2, d31, d7
ldur d1, [x29, #-240] ; 8-byte Folded Reload
fmul d3, d27, d1
fsub d2, d2, d3
ldr d18, [sp, #3768] ; 8-byte Folded Reload
fmul d3, d27, d18
fsub d2, d2, d3
ldr d6, [sp, #3824] ; 8-byte Folded Reload
fmul d3, d31, d6
fadd d2, d3, d2
fmul d2, d29, d2
fmul d3, d27, d7
fmul d16, d31, d1
fadd d3, d16, d3
fmul d16, d31, d18
fadd d3, d16, d3
fmul d16, d27, d6
fadd d3, d16, d3
str xzr, [x8, #256]
fmul d3, d28, d3
fsub d2, d2, d3
str d2, [x8, #648]
str d8, [x8, #712]
str d4, [x8, #720]
fmul.2d v2, v0, v31[0]
fmul.2d v3, v22, v27[0]
fmul.2d v4, v5, v27[0]
ldr q1, [sp, #8624] ; 16-byte Folded Reload
fmul.2d v16, v1, v31[0]
fmul.2d v18, v0, v27[0]
fmul.2d v19, v22, v31[0]
fmul.2d v23, v5, v31[0]
fmul.2d v25, v1, v27[0]
fsub.2d v2, v2, v3
fsub.2d v2, v2, v4
fadd.2d v2, v16, v2
ldr q8, [sp, #10736] ; 16-byte Folded Reload
fmul.2d v2, v2, v8[0]
fadd.2d v3, v19, v18
fadd.2d v3, v3, v23
fadd.2d v3, v25, v3
ldr q30, [sp, #11136] ; 16-byte Folded Reload
fmul.2d v3, v3, v30[0]
fsub.2d v2, v2, v3
ldr q5, [sp, #6800] ; 16-byte Folded Reload
fmul.2d v3, v5, v31[0]
ldr q1, [sp, #8592] ; 16-byte Folded Reload
fmul.2d v4, v1, v27[0]
ldr q6, [sp, #6784] ; 16-byte Folded Reload
fmul.2d v16, v6, v27[0]
ldr q0, [sp, #8608] ; 16-byte Folded Reload
fmul.2d v18, v0, v31[0]
fmul.2d v19, v5, v27[0]
fmul.2d v23, v1, v31[0]
fmul.2d v25, v6, v31[0]
fmul.2d v26, v0, v27[0]
fsub.2d v3, v3, v4
fsub.2d v3, v3, v16
fadd.2d v3, v18, v3
fmul.2d v3, v3, v8[0]
fadd.2d v4, v23, v19
fadd.2d v4, v4, v25
fadd.2d v4, v26, v4
fmul.2d v4, v4, v30[0]
fsub.2d v3, v3, v4
stp q2, q3, [x9, #144]
ldr q0, [sp, #5264] ; 16-byte Folded Reload
fmul.2d v2, v0, v31[0]
ldr q1, [sp, #6736] ; 16-byte Folded Reload
fmul.2d v3, v1, v27[0]
ldr q5, [sp, #5248] ; 16-byte Folded Reload
fmul.2d v4, v5, v27[0]
ldr q6, [sp, #6752] ; 16-byte Folded Reload
fmul.2d v16, v6, v31[0]
fmul.2d v18, v0, v27[0]
fmul.2d v19, v1, v31[0]
fmul.2d v23, v5, v31[0]
fmul.2d v25, v6, v27[0]
fsub.2d v2, v2, v3
fsub.2d v2, v2, v4
fadd.2d v3, v2, v16
fsub.2d v2, v2, v16
mov.d v3[1], v2[1]
fmul.2d v2, v3, v8[0]
fadd.2d v3, v19, v18
fadd.2d v3, v23, v3
fadd.2d v4, v3, v25
fsub.2d v3, v3, v25
mov.d v4[1], v3[1]
fmul.2d v3, v4, v30[0]
fsub.2d v2, v2, v3
ldr q1, [sp, #11328] ; 16-byte Folded Reload
fmul.2d v3, v1, v31[0]
ldr q5, [sp, #10912] ; 16-byte Folded Reload
fmul.2d v4, v5, v27[0]
ldr q6, [sp, #11248] ; 16-byte Folded Reload
fmul.2d v16, v6, v27[0]
ldr q0, [sp, #11104] ; 16-byte Folded Reload
fmul.2d v18, v0, v31[0]
fmul.2d v19, v1, v27[0]
fmul.2d v23, v5, v31[0]
fmul.2d v25, v6, v31[0]
fsub.2d v3, v3, v4
fsub.2d v3, v3, v16
fadd.2d v4, v3, v18
fsub.2d v3, v3, v18
mov.d v4[1], v3[1]
fmul.2d v3, v0, v27[0]
fmul.2d v4, v4, v8[0]
fadd.2d v16, v23, v19
fadd.2d v16, v25, v16
fadd.2d v18, v16, v3
fsub.2d v3, v16, v3
mov.d v18[1], v3[1]
fmul.2d v3, v18, v30[0]
fsub.2d v3, v4, v3
stp q2, q3, [x9, #176]
ldr d4, [sp, #8024] ; 8-byte Folded Reload
fmul d2, d31, d4
ldr d0, [sp, #11664] ; 8-byte Folded Reload
fmul d3, d27, d0
fsub d2, d2, d3
ldr d5, [sp, #8016] ; 8-byte Folded Reload
fmul d3, d27, d5
fsub d2, d2, d3
ldr d1, [sp, #8056] ; 8-byte Folded Reload
fmul d3, d31, d1
fadd d2, d3, d2
fmul d2, d8, d2
fmul d3, d27, d4
fmul d4, d31, d0
fadd d3, d4, d3
fmul d4, d31, d5
fadd d3, d4, d3
fmul d4, d27, d1
fadd d3, d4, d3
fmul d3, d30, d3
fsub d2, d2, d3
str d2, [x8, #792]
ldr x8, [x19]
cbz x8, LBB19_94
LBB19_93:
ldr q2, [sp, #1568] ; 16-byte Folded Reload
ldr q0, [sp, #1152] ; 16-byte Folded Reload
mov.d v2[1], v0[0]
ldr q3, [sp, #1760] ; 16-byte Folded Reload
ldr q0, [sp, #1344] ; 16-byte Folded Reload
mov.d v3[1], v0[0]
ldr q5, [sp, #1552] ; 16-byte Folded Reload
ldr q0, [sp, #1136] ; 16-byte Folded Reload
mov.d v5[1], v0[0]
ldr q4, [sp, #1728] ; 16-byte Folded Reload
ldr q0, [sp, #1312] ; 16-byte Folded Reload
mov.d v4[1], v0[0]
ldr q7, [sp, #832] ; 16-byte Folded Reload
ldr q0, [sp, #576] ; 16-byte Folded Reload
mov.d v7[1], v0[0]
ldp q21, q6, [sp, #992] ; 32-byte Folded Reload
ldr q0, [sp, #656] ; 16-byte Folded Reload
mov.d v6[1], v0[0]
ldr q19, [sp, #800] ; 16-byte Folded Reload
ldr q0, [sp, #560] ; 16-byte Folded Reload
mov.d v19[1], v0[0]
ldr q0, [sp, #672] ; 16-byte Folded Reload
mov.d v21[1], v0[0]
ldp q24, q23, [sp, #112] ; 32-byte Folded Reload
ldr q0, [sp, #6944] ; 16-byte Folded Reload
mov.d v23[1], v0[0]
ldp q22, q25, [sp, #192] ; 32-byte Folded Reload
ldr q0, [sp, #7120] ; 16-byte Folded Reload
mov.d v22[1], v0[0]
ldr q0, [sp, #6928] ; 16-byte Folded Reload
mov.d v24[1], v0[0]
ldr q0, [sp, #7104] ; 16-byte Folded Reload
mov.d v25[1], v0[0]
ldr q27, [sp, #7792] ; 16-byte Folded Reload
ldr q0, [sp, #6864] ; 16-byte Folded Reload
mov.d v27[1], v0[0]
ldr q26, [sp, #7392] ; 16-byte Folded Reload
ldr q0, [sp, #4288] ; 16-byte Folded Reload
mov.d v26[1], v0[0]
ldr q28, [sp, #7776] ; 16-byte Folded Reload
ldr q0, [sp, #6848] ; 16-byte Folded Reload
mov.d v28[1], v0[0]
ldr q29, [sp, #7408] ; 16-byte Folded Reload
ldr q0, [sp, #4240] ; 16-byte Folded Reload
mov.d v29[1], v0[0]
ldr q31, [sp, #1680] ; 16-byte Folded Reload
ldr q0, [sp, #1248] ; 16-byte Folded Reload
mov.d v31[1], v0[0]
ldr q30, [sp, #6576] ; 16-byte Folded Reload
ldr q0, [sp, #1392] ; 16-byte Folded Reload
mov.d v30[1], v0[0]
ldr q8, [sp, #1664] ; 16-byte Folded Reload
ldr q0, [sp, #1232] ; 16-byte Folded Reload
mov.d v8[1], v0[0]
ldr q9, [sp, #6544] ; 16-byte Folded Reload
ldr q0, [sp, #1408] ; 16-byte Folded Reload
mov.d v9[1], v0[0]
ldp q12, q11, [sp, #960] ; 32-byte Folded Reload
ldr q0, [sp, #272] ; 16-byte Folded Reload
mov.d v11[1], v0[0]
ldr q10, [sp, #1088] ; 16-byte Folded Reload
ldr q0, [sp, #368] ; 16-byte Folded Reload
mov.d v10[1], v0[0]
ldr q0, [sp, #256] ; 16-byte Folded Reload
mov.d v12[1], v0[0]
ldr q15, [sp, #1104] ; 16-byte Folded Reload
ldr q0, [sp, #384] ; 16-byte Folded Reload
mov.d v15[1], v0[0]
ldr q14, [sp, #2000] ; 16-byte Folded Reload
ldr q0, [sp, #7520] ; 16-byte Folded Reload
mov.d v14[1], v0[0]
ldr q13, [sp, #80] ; 16-byte Folded Reload
ldr q0, [sp, #7456] ; 16-byte Folded Reload
mov.d v13[1], v0[0]
ldr q0, [sp, #7504] ; 16-byte Folded Reload
ldr q1, [sp, #5280] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5280] ; 16-byte Folded Spill
ldr q0, [sp, #7440] ; 16-byte Folded Reload
ldr q1, [sp, #5296] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5296] ; 16-byte Folded Spill
ldr q0, [sp, #8896] ; 16-byte Folded Reload
ldr q1, [sp, #11584] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #11584] ; 16-byte Folded Spill
ldr q0, [sp, #8096] ; 16-byte Folded Reload
ldr q1, [sp, #10448] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #10448] ; 16-byte Folded Spill
ldr q0, [sp, #8864] ; 16-byte Folded Reload
ldr q1, [sp, #11440] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #11440] ; 16-byte Folded Spill
ldr q0, [sp, #8080] ; 16-byte Folded Reload
ldr q1, [sp, #10480] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #10480] ; 16-byte Folded Spill
ldr q0, [sp, #1184] ; 16-byte Folded Reload
ldr q1, [sp, #5632] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5632] ; 16-byte Folded Spill
ldr q0, [sp, #1168] ; 16-byte Folded Reload
ldr q1, [sp, #5616] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5616] ; 16-byte Folded Spill
ldr q0, [sp, #1360] ; 16-byte Folded Reload
ldr q1, [sp, #5680] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5680] ; 16-byte Folded Spill
ldr q0, [sp, #1376] ; 16-byte Folded Reload
ldr q1, [sp, #5696] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5696] ; 16-byte Folded Spill
ldr q0, [sp, #624] ; 16-byte Folded Reload
ldr q1, [sp, #5456] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5456] ; 16-byte Folded Spill
ldr q0, [sp, #608] ; 16-byte Folded Reload
ldr q1, [sp, #5440] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5440] ; 16-byte Folded Spill
ldr q0, [sp, #704] ; 16-byte Folded Reload
ldr q1, [sp, #5520] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5520] ; 16-byte Folded Spill
ldr q0, [sp, #688] ; 16-byte Folded Reload
ldr q1, [sp, #5536] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5536] ; 16-byte Folded Spill
ldr q0, [sp, #6992] ; 16-byte Folded Reload
ldr q1, [sp, #5392] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5392] ; 16-byte Folded Spill
ldr q0, [sp, #6960] ; 16-byte Folded Reload
ldr q1, [sp, #5376] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5376] ; 16-byte Folded Spill
ldr q0, [sp, #7136] ; 16-byte Folded Reload
ldr q1, [sp, #5424] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5424] ; 16-byte Folded Spill
ldr q0, [sp, #7168] ; 16-byte Folded Reload
ldr q1, [sp, #5408] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5408] ; 16-byte Folded Spill
ldr q0, [sp, #6896] ; 16-byte Folded Reload
ldr q1, [sp, #10224] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #10224] ; 16-byte Folded Spill
ldr q0, [sp, #6880] ; 16-byte Folded Reload
ldr q1, [sp, #10208] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #10208] ; 16-byte Folded Spill
ldr q0, [sp, #4320] ; 16-byte Folded Reload
ldr q1, [sp, #9792] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #9792] ; 16-byte Folded Spill
ldr q0, [sp, #5744] ; 16-byte Folded Reload
ldr q1, [sp, #9776] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #9776] ; 16-byte Folded Spill
ldr q0, [sp, #1280] ; 16-byte Folded Reload
ldr q1, [sp, #5664] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5664] ; 16-byte Folded Spill
ldr q0, [sp, #1264] ; 16-byte Folded Reload
ldr q1, [sp, #5648] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5648] ; 16-byte Folded Spill
ldr q0, [sp, #1440] ; 16-byte Folded Reload
ldr q1, [sp, #5712] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5712] ; 16-byte Folded Spill
ldr q0, [sp, #1424] ; 16-byte Folded Reload
ldr q1, [sp, #5728] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5728] ; 16-byte Folded Spill
ldr q0, [sp, #304] ; 16-byte Folded Reload
ldr q1, [sp, #5504] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5504] ; 16-byte Folded Spill
ldr q0, [sp, #288] ; 16-byte Folded Reload
ldr q1, [sp, #5488] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5488] ; 16-byte Folded Spill
ldr q0, [sp, #448] ; 16-byte Folded Reload
ldr q1, [sp, #5568] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5568] ; 16-byte Folded Spill
ldr q0, [sp, #416] ; 16-byte Folded Reload
ldr q1, [sp, #5552] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5552] ; 16-byte Folded Spill
ldr q0, [sp, #7568] ; 16-byte Folded Reload
ldr q1, [sp, #5328] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5328] ; 16-byte Folded Spill
ldr q0, [sp, #7536] ; 16-byte Folded Reload
ldr q1, [sp, #5312] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5312] ; 16-byte Folded Spill
ldr q0, [sp, #7472] ; 16-byte Folded Reload
ldr q1, [sp, #5360] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5360] ; 16-byte Folded Spill
ldr q0, [sp, #7488] ; 16-byte Folded Reload
ldr q1, [sp, #5344] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #5344] ; 16-byte Folded Spill
ldr q0, [sp, #8992] ; 16-byte Folded Reload
ldr q1, [sp, #11568] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #11568] ; 16-byte Folded Spill
ldr q0, [sp, #8928] ; 16-byte Folded Reload
ldr q1, [sp, #11536] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #11536] ; 16-byte Folded Spill
ldr q0, [sp, #8272] ; 16-byte Folded Reload
ldr q1, [sp, #10544] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #10544] ; 16-byte Folded Spill
ldr q0, [sp, #8320] ; 16-byte Folded Reload
ldr q1, [sp, #10496] ; 16-byte Folded Reload
mov.d v1[1], v0[0]
str q1, [sp, #10496] ; 16-byte Folded Spill
ldr d0, [sp, #9360] ; 8-byte Folded Reload
str d0, [x8, #800]
ldr d0, [sp, #6488] ; 8-byte Folded Reload
str d0, [x8, #808]
ldur d0, [x29, #-256] ; 8-byte Folded Reload
str d0, [x8, #816]
ldr d0, [sp, #9976] ; 8-byte Folded Reload
str d0, [x8, #824]
ldr d0, [sp, #5904] ; 8-byte Folded Reload
str d0, [x8, #832]
ldr d0, [sp, #9968] ; 8-byte Folded Reload
str d0, [x8, #840]
ldr d0, [sp, #11040] ; 8-byte Folded Reload
str d0, [x8, #848]
ldr d0, [sp, #11560] ; 8-byte Folded Reload
str d0, [x8, #856]
ldr d0, [sp, #11352] ; 8-byte Folded Reload
str d0, [x8, #864]
ldr d0, [sp, #8376] ; 8-byte Folded Reload
str d0, [x8, #944]
ldr d0, [sp, #6088] ; 8-byte Folded Reload
str d0, [x8, #952]
ldr d0, [sp, #6000] ; 8-byte Folded Reload
str d0, [x8, #960]
ldr d0, [sp, #6288] ; 8-byte Folded Reload
str d0, [x8, #968]
ldr d0, [sp, #5968] ; 8-byte Folded Reload
str d0, [x8, #976]
ldr d0, [sp, #9864] ; 8-byte Folded Reload
str d0, [x8, #984]
ldr d0, [sp, #11016] ; 8-byte Folded Reload
str d0, [x8, #992]
ldr d0, [sp, #11728] ; 8-byte Folded Reload
str d0, [x8, #1000]
str d17, [x8, #1008]
ldr d0, [sp, #8400] ; 8-byte Folded Reload
str d0, [x8, #1088]
ldr d0, [sp, #6112] ; 8-byte Folded Reload
str d0, [x8, #1096]
ldr d0, [sp, #6016] ; 8-byte Folded Reload
str d0, [x8, #1104]
ldr d0, [sp, #8264] ; 8-byte Folded Reload
str d0, [x8, #1112]
ldr d0, [sp, #5944] ; 8-byte Folded Reload
str d0, [x8, #1120]
ldr d0, [sp, #9992] ; 8-byte Folded Reload
str d0, [x8, #1128]
ldr d0, [sp, #12176] ; 8-byte Folded Reload
str d0, [x8, #1136]
ldr d0, [sp, #11624] ; 8-byte Folded Reload
str d0, [x8, #1144]
ldr d0, [sp, #11432] ; 8-byte Folded Reload
str d0, [x8, #1152]
ldr d0, [sp, #8384] ; 8-byte Folded Reload
str d0, [x8, #1232]
ldr d0, [sp, #6096] ; 8-byte Folded Reload
str d0, [x8, #1240]
ldr d0, [sp, #6008] ; 8-byte Folded Reload
str d0, [x8, #1248]
ldr d0, [sp, #8224] ; 8-byte Folded Reload
str d0, [x8, #1256]
ldr d0, [sp, #5992] ; 8-byte Folded Reload
str d0, [x8, #1264]
ldr d0, [sp, #9872] ; 8-byte Folded Reload
str d0, [x8, #1272]
ldr d0, [sp, #12168] ; 8-byte Folded Reload
str d0, [x8, #1280]
ldr d0, [sp, #11744] ; 8-byte Folded Reload
str d0, [x8, #1288]
ldr d0, [sp, #8408] ; 8-byte Folded Reload
str d0, [x8, #1376]
ldr d0, [sp, #6120] ; 8-byte Folded Reload
str d0, [x8, #1384]
ldr d0, [sp, #6064] ; 8-byte Folded Reload
str d0, [x8, #1392]
ldr d0, [sp, #8392] ; 8-byte Folded Reload
str d0, [x8, #1400]
ldr d0, [sp, #5960] ; 8-byte Folded Reload
str d0, [x8, #1408]
ldr d0, [sp, #10000] ; 8-byte Folded Reload
str d0, [x8, #1416]
ldr d0, [sp, #12184] ; 8-byte Folded Reload
str d0, [x8, #1424]
ldr d0, [sp, #11672] ; 8-byte Folded Reload
str d0, [x8, #1432]
ldr q18, [sp, #9520] ; 16-byte Folded Reload
fmul.2d v0, v2, v18[0]
ldr q17, [sp, #9536] ; 16-byte Folded Reload
fmul.2d v1, v2, v17[0]
fmul.2d v2, v3, v18[0]
fsub.2d v1, v1, v2
fmul.2d v2, v5, v18[0]
fsub.2d v1, v1, v2
fmul.2d v2, v4, v17[0]
fadd.2d v1, v2, v1
fmul.2d v2, v3, v17[0]
fadd.2d v0, v2, v0
fmul.2d v2, v5, v17[0]
fadd.2d v0, v0, v2
fmul.2d v2, v4, v18[0]
fadd.2d v0, v2, v0
ldr q16, [sp, #11952] ; 16-byte Folded Reload
fmul.2d v1, v1, v16[0]
ldr q20, [sp, #11792] ; 16-byte Folded Reload
fmul.2d v0, v0, v20[0]
fsub.2d v0, v1, v0
fmul.2d v1, v7, v17[0]
fmul.2d v2, v6, v18[0]
fsub.2d v1, v1, v2
fmul.2d v2, v19, v18[0]
fsub.2d v1, v1, v2
fmul.2d v2, v21, v17[0]
fadd.2d v1, v2, v1
fmul.2d v2, v7, v18[0]
fmul.2d v3, v6, v17[0]
fadd.2d v2, v3, v2
fmul.2d v3, v19, v17[0]
fadd.2d v2, v2, v3
fmul.2d v3, v21, v18[0]
fadd.2d v2, v3, v2
fmul.2d v1, v1, v16[0]
fmul.2d v2, v2, v20[0]
fsub.2d v1, v1, v2
fmul.2d v2, v23, v17[0]
fmul.2d v3, v22, v18[0]
fsub.2d v2, v2, v3
fmul.2d v3, v24, v18[0]
fsub.2d v2, v2, v3
fmul.2d v3, v25, v17[0]
fadd.2d v4, v2, v3
fsub.2d v2, v2, v3
fmul.2d v3, v23, v18[0]
fmul.2d v6, v22, v17[0]
fadd.2d v3, v6, v3
fmul.2d v6, v24, v17[0]
fadd.2d v3, v3, v6
fmul.2d v6, v25, v18[0]
fadd.2d v7, v3, v6
fsub.2d v3, v3, v6
mov.d v4[1], v2[1]
mov.d v7[1], v3[1]
fmul.2d v2, v4, v16[0]
fmul.2d v3, v7, v20[0]
fsub.2d v6, v2, v3
fmul.2d v2, v27, v17[0]
fmul.2d v3, v26, v18[0]
fsub.2d v2, v2, v3
fmul.2d v3, v28, v18[0]
fsub.2d v2, v2, v3
fmul.2d v3, v29, v17[0]
fadd.2d v4, v2, v3
fsub.2d v2, v2, v3
fmul.2d v3, v27, v18[0]
fmul.2d v7, v26, v17[0]
fadd.2d v3, v7, v3
fmul.2d v7, v28, v17[0]
fadd.2d v3, v3, v7
mov.d v4[1], v2[1]
fmul.2d v2, v29, v18[0]
fadd.2d v7, v3, v2
fsub.2d v2, v3, v2
mov.d v7[1], v2[1]
fmul.2d v2, v4, v16[0]
fmul.2d v3, v7, v20[0]
fsub.2d v2, v2, v3
ldr d7, [sp, #3864] ; 8-byte Folded Reload
fmul d3, d17, d7
ldr d5, [sp, #6072] ; 8-byte Folded Reload
fmul d4, d18, d5
fsub d3, d3, d4
ldr d21, [sp, #3856] ; 8-byte Folded Reload
fmul d4, d18, d21
fsub d3, d3, d4
ldr d19, [sp, #5912] ; 8-byte Folded Reload
fmul d4, d17, d19
fadd d3, d4, d3
fmul d3, d16, d3
fmul d4, d18, d7
fmul d7, d17, d5
fadd d4, d7, d4
fmul d7, d17, d21
fadd d4, d4, d7
fmul d7, d18, d19
fadd d4, d7, d4
fmul d4, d20, d4
fsub d3, d3, d4
ldr q17, [sp, #9600] ; 16-byte Folded Reload
fmul.2d v4, v31, v17[0]
ldr q18, [sp, #9584] ; 16-byte Folded Reload
fmul.2d v7, v30, v18[0]
fsub.2d v4, v4, v7
fmul.2d v7, v8, v18[0]
fsub.2d v4, v4, v7
fmul.2d v7, v9, v17[0]
fadd.2d v4, v7, v4
fmul.2d v7, v31, v18[0]
fmul.2d v16, v30, v17[0]
fadd.2d v7, v16, v7
fmul.2d v16, v8, v17[0]
fadd.2d v7, v7, v16
add x9, x8, #872
stp q0, q1, [x9]
fmul.2d v0, v9, v18[0]
fadd.2d v0, v0, v7
stp q6, q2, [x9, #32]
ldr q7, [sp, #12240] ; 16-byte Folded Reload
fmul.2d v1, v4, v7[0]
ldr q16, [sp, #11824] ; 16-byte Folded Reload
fmul.2d v0, v0, v16[0]
fsub.2d v0, v1, v0
str d3, [x8, #936]
fmul.2d v1, v11, v17[0]
fmul.2d v2, v10, v18[0]
fsub.2d v1, v1, v2
fmul.2d v2, v12, v18[0]
fsub.2d v1, v1, v2
fmul.2d v2, v15, v17[0]
fadd.2d v1, v2, v1
fmul.2d v2, v11, v18[0]
fmul.2d v3, v10, v17[0]
fadd.2d v2, v3, v2
fmul.2d v3, v12, v17[0]
fadd.2d v2, v2, v3
fmul.2d v3, v15, v18[0]
fadd.2d v2, v3, v2
fmul.2d v3, v14, v17[0]
fmul.2d v4, v13, v18[0]
fsub.2d v3, v3, v4
ldr q19, [sp, #5280] ; 16-byte Folded Reload
fmul.2d v4, v19, v18[0]
fsub.2d v3, v3, v4
ldr q5, [sp, #5296] ; 16-byte Folded Reload
fmul.2d v4, v5, v17[0]
fadd.2d v6, v3, v4
fsub.2d v3, v3, v4
mov.d v6[1], v3[1]
fmul.2d v1, v1, v7[0]
fmul.2d v2, v2, v16[0]
fsub.2d v1, v1, v2
fmul.2d v2, v14, v18[0]
fmul.2d v3, v13, v17[0]
fadd.2d v2, v3, v2
fmul.2d v3, v19, v17[0]
fadd.2d v2, v2, v3
fmul.2d v3, v5, v18[0]
fadd.2d v4, v2, v3
fsub.2d v2, v2, v3
mov.d v4[1], v2[1]
stp q0, q1, [x9, #144]
fmul.2d v0, v6, v7[0]
fmul.2d v1, v4, v16[0]
fsub.2d v0, v0, v1
ldr q6, [sp, #11584] ; 16-byte Folded Reload
fmul.2d v1, v6, v17[0]
ldr q19, [sp, #10448] ; 16-byte Folded Reload
fmul.2d v2, v19, v18[0]
fsub.2d v1, v1, v2
ldr q5, [sp, #11440] ; 16-byte Folded Reload
fmul.2d v2, v5, v18[0]
fsub.2d v1, v1, v2
ldr q4, [sp, #10480] ; 16-byte Folded Reload
fmul.2d v2, v4, v17[0]
fadd.2d v3, v1, v2
fsub.2d v1, v1, v2
mov.d v3[1], v1[1]
fmul.2d v1, v6, v18[0]
fmul.2d v2, v19, v17[0]
fadd.2d v1, v2, v1
fmul.2d v2, v5, v17[0]
fadd.2d v1, v1, v2
fmul.2d v2, v4, v18[0]
fadd.2d v4, v1, v2
fsub.2d v1, v1, v2
mov.d v4[1], v1[1]
fmul.2d v1, v3, v7[0]
fmul.2d v2, v4, v16[0]
fsub.2d v1, v1, v2
stp q0, q1, [x9, #176]
ldr d4, [sp, #8920] ; 8-byte Folded Reload
fmul d0, d17, d4
ldr d2, [sp, #9136] ; 8-byte Folded Reload
fmul d1, d18, d2
fsub d0, d0, d1
ldr d5, [sp, #8888] ; 8-byte Folded Reload
fmul d1, d18, d5
fsub d0, d0, d1
ldr d3, [sp, #9128] ; 8-byte Folded Reload
fmul d1, d17, d3
fadd d0, d1, d0
fmul d0, d7, d0
fmul d1, d18, d4
fmul d2, d17, d2
fadd d1, d2, d1
fmul d2, d17, d5
fadd d1, d1, d2
fmul d2, d18, d3
fadd d1, d2, d1
fmul d1, d16, d1
fsub d0, d0, d1
str d0, [x8, #1080]
ldr q7, [sp, #6624] ; 16-byte Folded Reload
ldr q2, [sp, #5632] ; 16-byte Folded Reload
fmul.2d v0, v2, v7[0]
ldr q16, [sp, #6608] ; 16-byte Folded Reload
ldr q5, [sp, #5616] ; 16-byte Folded Reload
fmul.2d v1, v5, v16[0]
fsub.2d v0, v0, v1
ldr q3, [sp, #5680] ; 16-byte Folded Reload
fmul.2d v1, v3, v7[0]
fsub.2d v0, v0, v1
ldr q4, [sp, #5696] ; 16-byte Folded Reload
fmul.2d v1, v4, v16[0]
fsub.2d v0, v0, v1
fmul.2d v1, v5, v7[0]
fmul.2d v2, v2, v16[0]
fadd.2d v1, v1, v2
fmul.2d v2, v3, v16[0]
fsub.2d v1, v1, v2
fmul.2d v2, v4, v7[0]
fadd.2d v1, v2, v1
ldr q6, [sp, #11984] ; 16-byte Folded Reload
fmul.2d v0, v0, v6[0]
ldr q4, [sp, #11808] ; 16-byte Folded Reload
fmul.2d v1, v1, v4[0]
fsub.2d v0, v0, v1
ldr q3, [sp, #5456] ; 16-byte Folded Reload
fmul.2d v1, v3, v7[0]
ldr q18, [sp, #5440] ; 16-byte Folded Reload
fmul.2d v2, v18, v16[0]
fsub.2d v1, v1, v2
ldr q5, [sp, #5520] ; 16-byte Folded Reload
fmul.2d v2, v5, v7[0]
fsub.2d v1, v1, v2
ldr q17, [sp, #5536] ; 16-byte Folded Reload
fmul.2d v2, v17, v16[0]
fsub.2d v1, v1, v2
fmul.2d v2, v18, v7[0]
fmul.2d v3, v3, v16[0]
fadd.2d v2, v2, v3
fmul.2d v3, v5, v16[0]
fsub.2d v2, v2, v3
fmul.2d v3, v17, v7[0]
fadd.2d v2, v3, v2
fmul.2d v1, v1, v6[0]
fmul.2d v2, v2, v4[0]
fsub.2d v1, v1, v2
stp q0, q1, [x9, #288]
ldr q2, [sp, #5392] ; 16-byte Folded Reload
fmul.2d v0, v2, v7[0]
ldr q17, [sp, #5376] ; 16-byte Folded Reload
fmul.2d v1, v17, v16[0]
fsub.2d v0, v0, v1
ldr q3, [sp, #5424] ; 16-byte Folded Reload
fmul.2d v1, v3, v7[0]
fsub.2d v0, v0, v1
ldr q5, [sp, #5408] ; 16-byte Folded Reload
fmul.2d v1, v5, v16[0]
fsub.2d v0, v0, v1
fmul.2d v1, v17, v7[0]
fmul.2d v2, v2, v16[0]
fadd.2d v1, v1, v2
fmul.2d v2, v3, v16[0]
fsub.2d v1, v1, v2
fmul.2d v2, v5, v7[0]
fadd.2d v1, v2, v1
fmul.2d v0, v0, v6[0]
fmul.2d v1, v1, v4[0]
fsub.2d v0, v0, v1
ldr q3, [sp, #10224] ; 16-byte Folded Reload
fmul.2d v1, v3, v7[0]
ldr q18, [sp, #10208] ; 16-byte Folded Reload
fmul.2d v2, v18, v16[0]
fsub.2d v1, v1, v2
ldr q17, [sp, #9792] ; 16-byte Folded Reload
fmul.2d v2, v17, v7[0]
fsub.2d v1, v1, v2
ldr q5, [sp, #9776] ; 16-byte Folded Reload
fmul.2d v2, v5, v16[0]
fsub.2d v1, v1, v2
fmul.2d v2, v18, v7[0]
fmul.2d v3, v3, v16[0]
fadd.2d v2, v2, v3
fmul.2d v3, v17, v16[0]
fsub.2d v2, v2, v3
fmul.2d v3, v5, v7[0]
fadd.2d v2, v3, v2
fmul.2d v1, v1, v6[0]
fmul.2d v2, v2, v4[0]
fsub.2d v1, v1, v2
stp q0, q1, [x9, #320]
ldr d2, [sp, #8008] ; 8-byte Folded Reload
fmul d0, d7, d2
ldr d17, [sp, #7680] ; 8-byte Folded Reload
fmul d1, d16, d17
fsub d0, d0, d1
ldr d3, [sp, #8048] ; 8-byte Folded Reload
fmul d1, d7, d3
fadd d0, d1, d0
ldr d5, [sp, #8040] ; 8-byte Folded Reload
fmul d1, d16, d5
fsub d0, d0, d1
fmul d0, d6, d0
fmul d1, d7, d17
fmul d2, d16, d2
fadd d1, d1, d2
fmul d2, d16, d3
fadd d1, d2, d1
fmul d2, d7, d5
fadd d1, d2, d1
fmul d1, d4, d1
fsub d0, d0, d1
str d0, [x8, #1224]
ldr d0, [sp, #12280] ; 8-byte Folded Reload
fneg d0, d0
str d0, [x8, #1296]
ldr q6, [sp, #6688] ; 16-byte Folded Reload
ldr q2, [sp, #5664] ; 16-byte Folded Reload
fmul.2d v0, v2, v6[0]
ldr q7, [sp, #6672] ; 16-byte Folded Reload
ldr q5, [sp, #5648] ; 16-byte Folded Reload
fmul.2d v1, v5, v7[0]
fsub.2d v0, v0, v1
ldr q3, [sp, #5712] ; 16-byte Folded Reload
fmul.2d v1, v3, v6[0]
fsub.2d v0, v0, v1
ldr q4, [sp, #5728] ; 16-byte Folded Reload
fmul.2d v1, v4, v7[0]
fsub.2d v0, v0, v1
fmul.2d v1, v5, v6[0]
fmul.2d v2, v2, v7[0]
fadd.2d v1, v1, v2
fmul.2d v2, v3, v7[0]
fsub.2d v1, v1, v2
fmul.2d v2, v4, v6[0]
fadd.2d v1, v2, v1
ldr q5, [sp, #12000] ; 16-byte Folded Reload
fmul.2d v0, v0, v5[0]
ldr q4, [sp, #11840] ; 16-byte Folded Reload
fmul.2d v1, v1, v4[0]
fsub.2d v0, v0, v1
ldr q3, [sp, #5504] ; 16-byte Folded Reload
fmul.2d v1, v3, v6[0]
ldr q18, [sp, #5488] ; 16-byte Folded Reload
fmul.2d v2, v18, v7[0]
fsub.2d v1, v1, v2
ldr q16, [sp, #5568] ; 16-byte Folded Reload
fmul.2d v2, v16, v6[0]
fsub.2d v1, v1, v2
ldr q17, [sp, #5552] ; 16-byte Folded Reload
fmul.2d v2, v17, v7[0]
fsub.2d v1, v1, v2
fmul.2d v2, v18, v6[0]
fmul.2d v3, v3, v7[0]
fadd.2d v2, v2, v3
fmul.2d v3, v16, v7[0]
fsub.2d v2, v2, v3
fmul.2d v3, v17, v6[0]
fadd.2d v2, v3, v2
fmul.2d v1, v1, v5[0]
fmul.2d v2, v2, v4[0]
fsub.2d v1, v1, v2
stp q0, q1, [x9, #432]
ldr q2, [sp, #5328] ; 16-byte Folded Reload
fmul.2d v0, v2, v6[0]
ldr q17, [sp, #5312] ; 16-byte Folded Reload
fmul.2d v1, v17, v7[0]
fsub.2d v0, v0, v1
ldr q3, [sp, #5360] ; 16-byte Folded Reload
fmul.2d v1, v3, v6[0]
fsub.2d v0, v0, v1
ldr q16, [sp, #5344] ; 16-byte Folded Reload
fmul.2d v1, v16, v7[0]
fsub.2d v0, v0, v1
fmul.2d v1, v17, v6[0]
fmul.2d v2, v2, v7[0]
fadd.2d v1, v1, v2
fmul.2d v2, v3, v7[0]
fsub.2d v1, v1, v2
fmul.2d v2, v16, v6[0]
fadd.2d v1, v2, v1
fmul.2d v0, v0, v5[0]
fmul.2d v1, v1, v4[0]
fsub.2d v0, v0, v1
ldr q3, [sp, #11568] ; 16-byte Folded Reload
fmul.2d v1, v3, v6[0]
ldr q18, [sp, #11536] ; 16-byte Folded Reload
fmul.2d v2, v18, v7[0]
fsub.2d v1, v1, v2
ldr q16, [sp, #10544] ; 16-byte Folded Reload
fmul.2d v2, v16, v6[0]
fsub.2d v1, v1, v2
ldr q17, [sp, #10496] ; 16-byte Folded Reload
fmul.2d v2, v17, v7[0]
fsub.2d v1, v1, v2
fmul.2d v2, v18, v6[0]
fmul.2d v3, v3, v7[0]
fadd.2d v2, v2, v3
fmul.2d v3, v16, v7[0]
fsub.2d v2, v2, v3
fmul.2d v3, v17, v6[0]
fadd.2d v2, v3, v2
fmul.2d v1, v1, v5[0]
fmul.2d v2, v2, v4[0]
fsub.2d v1, v1, v2
stp q0, q1, [x9, #464]
ldr d2, [sp, #9248] ; 8-byte Folded Reload
fmul d0, d6, d2
ldr d17, [sp, #9144] ; 8-byte Folded Reload
fmul d1, d7, d17
fsub d0, d0, d1
ldr d3, [sp, #9264] ; 8-byte Folded Reload
fmul d1, d6, d3
fadd d0, d1, d0
ldr d16, [sp, #9256] ; 8-byte Folded Reload
fmul d1, d7, d16
fsub d0, d0, d1
fmul d0, d5, d0
fmul d1, d6, d17
fmul d2, d7, d2
fadd d1, d1, d2
fmul d2, d7, d3
fadd d1, d2, d1
fmul d2, d6, d16
fadd d1, d2, d1
fmul d1, d4, d1
fsub d0, d0, d1
str d0, [x8, #1368]
ldr d0, [sp, #12288] ; 8-byte Folded Reload
fneg d0, d0
str d0, [x8, #1440]
ldr d0, [sp, #1712] ; 8-byte Folded Reload
ldr d1, [sp, #6296] ; 8-byte Folded Reload
fsub d1, d1, d0
ldr d2, [sp, #12152] ; 8-byte Folded Reload
fmul d0, d2, d1
fmov d26, d2
ldr d3, [sp, #5144] ; 8-byte Folded Reload
ldr d2, [sp, #11776] ; 8-byte Folded Reload
fmul d2, d2, d3
fadd d0, d0, d2
ldr d4, [sp, #5136] ; 8-byte Folded Reload
ldr d2, [sp, #12296] ; 8-byte Folded Reload
fmul d2, d2, d4
fadd d17, d2, d0
ldr d0, [sp, #11768] ; 8-byte Folded Reload
fmul d0, d0, d3
ldr d19, [sp, #12144] ; 8-byte Folded Reload
fmul d2, d19, d1
fadd d0, d2, d0
ldur d2, [x29, #-232] ; 8-byte Folded Reload
fmul d2, d2, d4
fadd d18, d2, d0
ldr d2, [sp, #11688] ; 8-byte Folded Reload
ldr d12, [sp, #1704] ; 8-byte Folded Reload
fmul d0, d2, d12
fmov d29, d2
ldr d13, [sp, #11632] ; 8-byte Folded Reload
ldr d5, [sp, #1696] ; 8-byte Folded Reload
fmul d2, d13, d5
fsub d0, d0, d2
ldr d11, [sp, #11680] ; 8-byte Folded Reload
ldr d8, [sp, #1824] ; 8-byte Folded Reload
fmul d2, d11, d8
fsub d0, d0, d2
ldr d14, [sp, #11488] ; 8-byte Folded Reload
ldr d9, [sp, #1816] ; 8-byte Folded Reload
fmul d2, d14, d9
fadd d2, d2, d0
ldur d3, [x29, #-248] ; 8-byte Folded Reload
ldr d0, [sp, #6568] ; 8-byte Folded Reload
fmul d0, d3, d0
fmov d31, d3
ldr d3, [sp, #9560] ; 8-byte Folded Reload
ldr d4, [sp, #10096] ; 8-byte Folded Reload
fmul d3, d3, d4
fadd d0, d0, d3
ldr d3, [sp, #5096] ; 8-byte Folded Reload
ldr d4, [sp, #11968] ; 8-byte Folded Reload
fmul d3, d4, d3
fsub d0, d3, d0
ldr d3, [sp, #9568] ; 8-byte Folded Reload
ldr d4, [sp, #10104] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d3, d0, d3
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d0, x9
stur d0, [x29, #-256] ; 8-byte Folded Spill
fmul d0, d3, d0
ldr d25, [sp, #11880] ; 8-byte Folded Reload
ldr d21, [sp, #5200] ; 8-byte Folded Reload
fmul d4, d25, d21
fadd d0, d4, d0
ldr d10, [sp, #1888] ; 8-byte Folded Reload
ldr d23, [sp, #6528] ; 8-byte Folded Reload
fmul d4, d10, d23
fadd d0, d4, d0
ldr d20, [sp, #11872] ; 8-byte Folded Reload
ldr d27, [sp, #1792] ; 8-byte Folded Reload
fmul d4, d20, d27
fadd d0, d4, d0
mov x9, #45724
movk x9, #42429, lsl #16
movk x9, #11379, lsl #32
movk x9, #48937, lsl #48
mov x10, #61406
movk x10, #16023, lsl #16
movk x10, #30452, lsl #32
movk x10, #48921, lsl #48
fmov d4, x10
ldr d6, [sp, #5152] ; 8-byte Folded Reload
fmul d28, d6, d4
str d28, [sp, #12176] ; 8-byte Folded Spill
mov x10, #40862
movk x10, #31695, lsl #16
movk x10, #12355, lsl #32
movk x10, #16198, lsl #48
mov x11, #64744
movk x11, #21380, lsl #16
movk x11, #23316, lsl #32
movk x11, #48962, lsl #48
fmov d4, x11
str d4, [sp, #11808] ; 8-byte Folded Spill
fmul d4, d6, d4
str d4, [sp, #11744] ; 8-byte Folded Spill
fadd d0, d4, d0
ldr d4, [sp, #10752] ; 8-byte Folded Reload
fmul d4, d4, d1
str d4, [sp, #11728] ; 8-byte Folded Spill
fadd d0, d4, d0
fmov d4, x10
str d4, [sp, #11952] ; 8-byte Folded Spill
fmul d4, d18, d4
str d4, [sp, #12280] ; 8-byte Folded Spill
fadd d0, d4, d0
fmov d4, x9
fmul d6, d17, d4
str d6, [sp, #12240] ; 8-byte Folded Spill
fmov d7, d4
str d4, [sp, #11792] ; 8-byte Folded Spill
fadd d4, d6, d0
mov x9, #4359484439294640128
fmov d15, x9
fmul d6, d4, d15
fsub d16, d2, d6
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d0, x9
stur d0, [x29, #-240] ; 8-byte Folded Spill
fmul d2, d3, d0
ldr d22, [sp, #12096] ; 8-byte Folded Reload
fmul d3, d22, d21
fadd d2, d3, d2
ldr d21, [sp, #6520] ; 8-byte Folded Reload
fmul d3, d10, d21
fadd d3, d3, d2
fmul d2, d19, d23
ldr d6, [sp, #6592] ; 8-byte Folded Reload
fadd d2, d6, d2
fmul d6, d26, d21
fmov d23, d26
fadd d6, d6, d2
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d24, x9
fmul d6, d6, d24
fadd d3, d6, d3
ldr d0, [sp, #12088] ; 8-byte Folded Reload
fmul d6, d0, d27
fmov d27, d0
fadd d3, d6, d3
mov x9, #45033
movk x9, #40035, lsl #16
movk x9, #524, lsl #32
movk x9, #48971, lsl #48
fmov d0, x9
str d0, [sp, #12072] ; 8-byte Folded Spill
fmul d2, d17, d0
str d2, [sp, #12288] ; 8-byte Folded Spill
fmul d6, d18, d7
str d6, [sp, #12208] ; 8-byte Folded Spill
ldr d0, [sp, #10760] ; 8-byte Folded Reload
fmul d0, d0, d1
str d0, [sp, #12184] ; 8-byte Folded Spill
fsub d1, d3, d28
fadd d1, d0, d1
fadd d1, d6, d1
fadd d3, d2, d1
mov x9, #4354980839667269632
fmov d30, x9
fmul d17, d3, d30
fsub d16, d16, d17
fmul d17, d13, d12
fmul d18, d29, d5
fadd d17, d17, d18
fmul d18, d14, d8
fadd d17, d18, d17
fmul d18, d11, d9
fadd d17, d18, d17
fmul d4, d4, d30
fsub d4, d17, d4
fmul d3, d3, d15
fadd d3, d3, d4
ldr d0, [sp, #12304] ; 8-byte Folded Reload
fmul d4, d0, d16
fmov d7, d0
ldr d0, [sp, #12312] ; 8-byte Folded Reload
fmul d3, d0, d3
fmov d6, d0
fsub d3, d4, d3
str d3, [x8, #1448]
ldr d1, [sp, #1504] ; 8-byte Folded Reload
fmul d3, d29, d1
ldr d2, [sp, #1496] ; 8-byte Folded Reload
fmul d4, d13, d2
fsub d3, d3, d4
ldr d21, [sp, #1608] ; 8-byte Folded Reload
fmul d4, d11, d21
fsub d3, d3, d4
ldr d9, [sp, #1592] ; 8-byte Folded Reload
fmul d4, d14, d9
fadd d3, d4, d3
ldr d4, [sp, #5008] ; 8-byte Folded Reload
fmul d4, d31, d4
ldr d16, [sp, #5160] ; 8-byte Folded Reload
fsub d4, d16, d4
ldur d31, [x29, #-256] ; 8-byte Folded Reload
fmul d16, d4, d31
ldr d5, [sp, #1616] ; 8-byte Folded Reload
fmul d17, d25, d5
fadd d16, d17, d16
ldr d18, [sp, #5000] ; 8-byte Folded Reload
fmul d17, d10, d18
fadd d16, d17, d16
ldr d0, [sp, #1520] ; 8-byte Folded Reload
fmul d17, d20, d0
fmov d26, d20
fadd d16, d17, d16
fmul d17, d16, d15
fsub d3, d3, d17
ldur d20, [x29, #-240] ; 8-byte Folded Reload
fmul d4, d4, d20
fmul d17, d22, d5
fadd d4, d17, d4
ldr d8, [sp, #4992] ; 8-byte Folded Reload
fmul d17, d10, d8
fadd d4, d17, d4
fmul d17, d19, d18
ldr d5, [sp, #5048] ; 8-byte Folded Reload
fadd d17, d5, d17
fmov d28, d23
fmul d18, d23, d8
fadd d17, d18, d17
fmul d17, d17, d24
fadd d4, d17, d4
fmov d23, d27
fmul d17, d27, d0
fadd d4, d17, d4
fmul d17, d4, d30
fsub d3, d3, d17
fmul d17, d13, d1
fmul d18, d29, d2
fadd d17, d17, d18
fmul d18, d14, d21
fadd d17, d18, d17
fmul d18, d11, d9
fadd d17, d18, d17
fmul d16, d16, d30
fsub d16, d17, d16
fmul d4, d4, d15
fadd d4, d16, d4
fmul d3, d7, d3
fmov d27, d7
fmov d17, d6
fmul d4, d6, d4
fsub d3, d3, d4
str d3, [x8, #1456]
ldr d6, [sp, #1216] ; 8-byte Folded Reload
fmul d3, d29, d6
ldr d7, [sp, #1208] ; 8-byte Folded Reload
fmul d4, d13, d7
fsub d3, d3, d4
ldr d1, [sp, #1304] ; 8-byte Folded Reload
fmul d4, d11, d1
fsub d3, d3, d4
ldr d5, [sp, #1296] ; 8-byte Folded Reload
fmul d4, d14, d5
fadd d3, d4, d3
ldr d0, [sp, #4880] ; 8-byte Folded Reload
ldr d12, [sp, #11968] ; 8-byte Folded Reload
fmul d4, d12, d0
ldr d16, [sp, #5168] ; 8-byte Folded Reload
fsub d4, d4, d16
ldr d0, [sp, #4920] ; 8-byte Folded Reload
fmul d2, d25, d0
str d2, [sp, #12000] ; 8-byte Folded Spill
fmul d16, d4, d31
fsub d16, d16, d2
ldr d2, [sp, #4912] ; 8-byte Folded Reload
fmul d18, d10, d2
fadd d16, d18, d16
ldr d9, [sp, #4888] ; 8-byte Folded Reload
fmul d18, d26, d9
str d18, [sp, #11840] ; 8-byte Folded Spill
fsub d16, d16, d18
fmul d18, d16, d15
fsub d3, d3, d18
fmul d0, d22, d0
str d0, [sp, #11984] ; 8-byte Folded Spill
fmul d4, d4, d20
fmov d31, d20
fsub d4, d4, d0
ldr d8, [sp, #4904] ; 8-byte Folded Reload
fmul d21, d10, d8
fadd d4, d21, d4
fmul d21, d19, d2
ldr d0, [sp, #4928] ; 8-byte Folded Reload
fadd d21, d0, d21
fmul d8, d28, d8
fadd d21, d8, d21
fmul d21, d21, d24
fmov d28, d24
str d24, [sp, #12168] ; 8-byte Folded Spill
fadd d4, d21, d4
fmul d0, d23, d9
str d0, [sp, #11824] ; 8-byte Folded Spill
fsub d4, d4, d0
fmul d8, d4, d30
fsub d3, d3, d8
fmul d8, d13, d6
fmul d9, d29, d7
fadd d8, d9, d8
fmul d9, d14, d1
fadd d8, d8, d9
fmul d9, d11, d5
fadd d8, d9, d8
fmul d16, d16, d30
fsub d16, d8, d16
fmul d4, d4, d15
fadd d4, d16, d4
fmul d3, d27, d3
fmul d4, d17, d4
fsub d3, d3, d4
str d3, [x8, #1464]
ldr d17, [sp, #864] ; 8-byte Folded Reload
fmul d3, d29, d17
ldr d18, [sp, #856] ; 8-byte Folded Reload
fmul d4, d13, d18
fmov d29, d13
fsub d3, d3, d4
ldr d20, [sp, #1064] ; 8-byte Folded Reload
fmul d4, d11, d20
fsub d3, d3, d4
ldr d24, [sp, #1040] ; 8-byte Folded Reload
fmul d4, d14, d24
fadd d3, d4, d3
ldr d0, [sp, #4800] ; 8-byte Folded Reload
ldp d11, d19, [x29, #-256] ; 16-byte Folded Reload
fmul d4, d19, d0
ldr d5, [sp, #8736] ; 8-byte Folded Reload
ldr d22, [sp, #10096] ; 8-byte Folded Reload
fmul d16, d5, d22
fadd d4, d4, d16
ldr d0, [sp, #4736] ; 8-byte Folded Reload
fmul d16, d12, d0
fsub d4, d16, d4
ldr d5, [sp, #6496] ; 8-byte Folded Reload
ldr d23, [sp, #10104] ; 8-byte Folded Reload
fmul d16, d5, d23
fsub d4, d4, d16
fmul d16, d4, d11
ldr d0, [sp, #1080] ; 8-byte Folded Reload
fmul d8, d25, d0
fadd d16, d8, d16
ldr d2, [sp, #4792] ; 8-byte Folded Reload
fmul d8, d10, d2
fadd d16, d8, d16
ldr d7, [sp, #880] ; 8-byte Folded Reload
fmul d8, d26, d7
fadd d16, d8, d16
ldr d25, [sp, #11744] ; 8-byte Folded Reload
fadd d16, d25, d16
ldr d26, [sp, #11728] ; 8-byte Folded Reload
fadd d16, d26, d16
ldr d12, [sp, #12280] ; 8-byte Folded Reload
fadd d16, d12, d16
ldr d21, [sp, #12240] ; 8-byte Folded Reload
fadd d16, d21, d16
fmul d8, d16, d15
fsub d3, d3, d8
fmul d4, d4, d31
ldr d13, [sp, #12096] ; 8-byte Folded Reload
fmul d8, d13, d0
fadd d4, d8, d4
ldr d5, [sp, #4776] ; 8-byte Folded Reload
fmul d8, d10, d5
fadd d4, d8, d4
ldr d31, [sp, #12144] ; 8-byte Folded Reload
fmul d8, d31, d2
ldr d0, [sp, #4832] ; 8-byte Folded Reload
fadd d8, d0, d8
ldr d14, [sp, #12152] ; 8-byte Folded Reload
fmul d9, d14, d5
fadd d8, d9, d8
fmul d8, d8, d28
fadd d4, d8, d4
ldr d0, [sp, #12088] ; 8-byte Folded Reload
fmul d8, d0, d7
fadd d4, d8, d4
ldr d6, [sp, #12176] ; 8-byte Folded Reload
fsub d4, d4, d6
ldr d27, [sp, #12184] ; 8-byte Folded Reload
fadd d4, d27, d4
ldr d1, [sp, #12208] ; 8-byte Folded Reload
fadd d4, d1, d4
ldr d0, [sp, #12288] ; 8-byte Folded Reload
fadd d4, d0, d4
fmul d8, d4, d30
fsub d3, d3, d8
fmov d5, d29
fmul d8, d29, d17
ldr d17, [sp, #11688] ; 8-byte Folded Reload
fmul d9, d17, d18
fadd d8, d8, d9
ldr d7, [sp, #11488] ; 8-byte Folded Reload
fmul d9, d7, d20
fadd d8, d9, d8
ldr d0, [sp, #11680] ; 8-byte Folded Reload
fmul d9, d0, d24
fadd d8, d9, d8
fmul d16, d16, d30
fsub d16, d8, d16
fmul d4, d4, d15
fadd d4, d4, d16
ldr d2, [sp, #12304] ; 8-byte Folded Reload
fmul d3, d2, d3
ldr d2, [sp, #12312] ; 8-byte Folded Reload
fmul d4, d2, d4
fsub d3, d3, d4
str d3, [x8, #1472]
ldr d28, [sp, #440] ; 8-byte Folded Reload
fmul d3, d17, d28
ldr d29, [sp, #408] ; 8-byte Folded Reload
fmul d4, d5, d29
fsub d3, d3, d4
ldr d20, [sp, #536] ; 8-byte Folded Reload
fmul d4, d0, d20
fsub d3, d3, d4
ldr d24, [sp, #528] ; 8-byte Folded Reload
fmul d4, d7, d24
fadd d3, d4, d3
ldr d0, [sp, #4688] ; 8-byte Folded Reload
fmul d4, d19, d0
ldr d0, [sp, #8712] ; 8-byte Folded Reload
fmul d16, d0, d22
fadd d4, d4, d16
ldr d0, [sp, #4632] ; 8-byte Folded Reload
ldr d2, [sp, #11968] ; 8-byte Folded Reload
fmul d16, d2, d0
fsub d4, d16, d4
ldr d0, [sp, #8704] ; 8-byte Folded Reload
fmul d16, d0, d23
fsub d4, d4, d16
fmul d16, d4, d11
ldr d0, [sp, #600] ; 8-byte Folded Reload
ldr d22, [sp, #11880] ; 8-byte Folded Reload
fmul d8, d22, d0
fadd d16, d8, d16
ldr d2, [sp, #4672] ; 8-byte Folded Reload
fmul d8, d10, d2
fadd d16, d8, d16
ldr d7, [sp, #504] ; 8-byte Folded Reload
ldr d23, [sp, #11872] ; 8-byte Folded Reload
fmul d8, d23, d7
fadd d16, d8, d16
fadd d16, d25, d16
fadd d16, d26, d16
fadd d16, d12, d16
fadd d16, d21, d16
fmul d8, d16, d15
fsub d3, d3, d8
ldur d18, [x29, #-240] ; 8-byte Folded Reload
fmul d4, d4, d18
fmov d17, d13
fmul d8, d13, d0
fadd d4, d8, d4
ldr d5, [sp, #4656] ; 8-byte Folded Reload
fmul d8, d10, d5
fadd d4, d8, d4
fmul d8, d31, d2
ldr d0, [sp, #4696] ; 8-byte Folded Reload
fadd d8, d0, d8
fmov d11, d14
fmul d9, d14, d5
fadd d8, d9, d8
ldr d12, [sp, #12168] ; 8-byte Folded Reload
fmul d8, d8, d12
fadd d4, d8, d4
ldr d0, [sp, #12088] ; 8-byte Folded Reload
fmul d8, d0, d7
fadd d4, d8, d4
fsub d4, d4, d6
fadd d4, d27, d4
fadd d4, d1, d4
ldr d27, [sp, #12288] ; 8-byte Folded Reload
fadd d4, d27, d4
fmul d8, d4, d30
fsub d3, d3, d8
ldr d13, [sp, #11632] ; 8-byte Folded Reload
fmul d8, d13, d28
ldr d1, [sp, #11688] ; 8-byte Folded Reload
fmul d9, d1, d29
fadd d8, d8, d9
ldr d14, [sp, #11488] ; 8-byte Folded Reload
fmul d9, d14, d20
fadd d8, d9, d8
ldr d0, [sp, #11680] ; 8-byte Folded Reload
fmul d9, d0, d24
fadd d8, d9, d8
fmul d16, d16, d30
fsub d16, d8, d16
fmul d4, d4, d15
fadd d4, d4, d16
ldr d19, [sp, #12304] ; 8-byte Folded Reload
fmul d3, d19, d3
ldr d20, [sp, #12312] ; 8-byte Folded Reload
fmul d4, d20, d4
fsub d3, d3, d4
str d3, [x8, #1480]
ldr d6, [sp, #6984] ; 8-byte Folded Reload
fmul d3, d1, d6
fmov d9, d1
ldr d7, [sp, #6976] ; 8-byte Folded Reload
fmul d4, d13, d7
fsub d3, d3, d4
ldr d28, [sp, #7184] ; 8-byte Folded Reload
fmul d4, d0, d28
fsub d3, d3, d4
ldr d29, [sp, #7160] ; 8-byte Folded Reload
fmul d4, d14, d29
fadd d3, d4, d3
ldr d0, [sp, #8776] ; 8-byte Folded Reload
ldr d1, [sp, #11968] ; 8-byte Folded Reload
fmul d4, d1, d0
ldr d0, [sp, #7880] ; 8-byte Folded Reload
ldp d21, d1, [x29, #-256] ; 16-byte Folded Reload
fmul d16, d1, d0
ldr d0, [sp, #10120] ; 8-byte Folded Reload
ldr d1, [sp, #10096] ; 8-byte Folded Reload
fmul d8, d0, d1
fadd d16, d16, d8
fsub d4, d4, d16
ldr d0, [sp, #10144] ; 8-byte Folded Reload
ldr d1, [sp, #10104] ; 8-byte Folded Reload
fmul d16, d0, d1
fsub d4, d4, d16
fmul d16, d4, d21
ldr d0, [sp, #2032] ; 8-byte Folded Reload
fmul d8, d22, d0
fadd d16, d8, d16
ldr d1, [sp, #7248] ; 8-byte Folded Reload
fmul d8, d10, d1
fadd d16, d8, d16
ldr d5, [sp, #2024] ; 8-byte Folded Reload
fmul d8, d23, d5
fadd d16, d8, d16
fadd d16, d25, d16
fadd d16, d26, d16
ldr d2, [sp, #12280] ; 8-byte Folded Reload
fadd d16, d2, d16
ldr d2, [sp, #12240] ; 8-byte Folded Reload
fadd d16, d2, d16
fmul d25, d16, d15
fsub d3, d3, d25
fmul d4, d4, d18
fmul d25, d17, d0
fadd d4, d25, d4
ldr d2, [sp, #7240] ; 8-byte Folded Reload
fmul d25, d10, d2
fadd d4, d25, d4
fmul d25, d31, d1
ldr d0, [sp, #7256] ; 8-byte Folded Reload
fadd d25, d0, d25
fmul d26, d11, d2
fadd d25, d26, d25
fmul d25, d25, d12
fadd d4, d25, d4
ldr d26, [sp, #12088] ; 8-byte Folded Reload
fmul d25, d26, d5
fadd d4, d25, d4
ldr d0, [sp, #12176] ; 8-byte Folded Reload
fsub d4, d4, d0
ldr d0, [sp, #12184] ; 8-byte Folded Reload
fadd d4, d0, d4
ldr d0, [sp, #12208] ; 8-byte Folded Reload
fadd d4, d0, d4
fadd d4, d27, d4
fmul d24, d4, d30
fsub d3, d3, d24
fmul d24, d13, d6
fmul d25, d9, d7
fadd d24, d24, d25
fmul d25, d14, d28
fadd d24, d25, d24
ldr d31, [sp, #11680] ; 8-byte Folded Reload
fmul d25, d31, d29
fadd d24, d25, d24
fmul d16, d16, d30
fsub d16, d24, d16
fmul d4, d4, d15
fadd d4, d4, d16
fmul d3, d19, d3
fmul d4, d20, d4
fsub d3, d3, d4
str d3, [x8, #1488]
ldr d11, [sp, #7824] ; 8-byte Folded Reload
fmul d3, d9, d11
fmov d29, d9
ldr d12, [sp, #7816] ; 8-byte Folded Reload
fmul d4, d13, d12
fsub d3, d3, d4
ldr d8, [sp, #7648] ; 8-byte Folded Reload
fmul d4, d31, d8
fsub d3, d3, d4
ldr d9, [sp, #7600] ; 8-byte Folded Reload
fmul d4, d14, d9
fadd d7, d4, d3
ldr d2, [sp, #9080] ; 8-byte Folded Reload
fmov d3, d22
fmul d4, d22, d2
ldr d1, [sp, #9160] ; 8-byte Folded Reload
fmul d16, d23, d1
fadd d4, d4, d16
mov x9, #11213
movk x9, #64899, lsl #16
movk x9, #2195, lsl #32
movk x9, #49148, lsl #48
fmov d16, x9
ldur d27, [x29, #-160] ; 8-byte Folded Reload
fmul d16, d27, d16
fmul d24, d16, d21
fadd d4, d24, d4
ldr d5, [sp, #5128] ; 8-byte Folded Reload
ldr d0, [sp, #11168] ; 8-byte Folded Reload
fmul d24, d0, d5
ldr d5, [sp, #6600] ; 8-byte Folded Reload
ldr d0, [sp, #10816] ; 8-byte Folded Reload
fmul d25, d0, d5
fsub d24, d24, d25
fmul d22, d24, d21
fadd d4, d4, d22
ldr d17, [sp, #7656] ; 8-byte Folded Reload
fmul d22, d3, d17
fadd d4, d22, d4
ldr d18, [sp, #9768] ; 8-byte Folded Reload
fmul d22, d10, d18
fadd d4, d22, d4
ldr d20, [sp, #7592] ; 8-byte Folded Reload
fmul d22, d23, d20
fadd d4, d22, d4
ldr d28, [sp, #9760] ; 8-byte Folded Reload
ldr d3, [sp, #11808] ; 8-byte Folded Reload
fmul d22, d28, d3
fadd d4, d4, d22
ldr d5, [sp, #12232] ; 8-byte Folded Reload
ldr d6, [sp, #10288] ; 8-byte Folded Reload
fmul d22, d5, d6
ldr d5, [sp, #6296] ; 8-byte Folded Reload
fadd d22, d22, d5
ldr d5, [sp, #7712] ; 8-byte Folded Reload
fsub d22, d22, d5
ldr d3, [sp, #10752] ; 8-byte Folded Reload
fmul d23, d3, d22
fadd d4, d23, d4
ldr d5, [sp, #9752] ; 8-byte Folded Reload
ldr d6, [sp, #11768] ; 8-byte Folded Reload
fmul d23, d6, d5
ldr d0, [sp, #12144] ; 8-byte Folded Reload
fmul d25, d0, d22
fadd d23, d25, d23
ldr d6, [sp, #9064] ; 8-byte Folded Reload
ldur d19, [x29, #-232] ; 8-byte Folded Reload
fmul d25, d19, d6
fadd d23, d25, d23
ldr d3, [sp, #11952] ; 8-byte Folded Reload
fmul d19, d23, d3
fadd d4, d19, d4
ldr d19, [sp, #11776] ; 8-byte Folded Reload
fmul d19, d19, d5
ldr d5, [sp, #12152] ; 8-byte Folded Reload
fmul d25, d5, d22
fadd d19, d25, d19
ldr d21, [sp, #12296] ; 8-byte Folded Reload
fmul d25, d21, d6
fadd d19, d25, d19
ldr d3, [sp, #11792] ; 8-byte Folded Reload
fmul d25, d19, d3
fadd d4, d4, d25
fmul d25, d4, d15
fsub d21, d7, d25
ldr d6, [sp, #12096] ; 8-byte Folded Reload
fmul d25, d6, d2
fmov d2, d26
fmul d26, d26, d1
fadd d25, d25, d26
ldur d1, [x29, #-240] ; 8-byte Folded Reload
fmul d16, d16, d1
fadd d16, d16, d25
fmul d7, d24, d1
fadd d7, d16, d7
fmul d16, d6, d17
fadd d7, d16, d7
ldr d1, [sp, #9744] ; 8-byte Folded Reload
fmul d16, d10, d1
fadd d7, d16, d7
fmov d26, d0
fmul d16, d0, d18
ldr d0, [sp, #10824] ; 8-byte Folded Reload
fadd d16, d0, d16
fmul d24, d5, d1
fmov d25, d5
fadd d16, d24, d16
ldr d24, [sp, #12168] ; 8-byte Folded Reload
fmul d16, d16, d24
fadd d7, d16, d7
fmul d16, d2, d20
fmov d20, d2
fadd d7, d16, d7
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #16153, lsl #48
fmov d16, x9
fmul d16, d28, d16
fadd d7, d7, d16
ldr d0, [sp, #10760] ; 8-byte Folded Reload
fmul d16, d0, d22
fadd d7, d16, d7
fmul d5, d23, d3
fadd d5, d7, d5
ldr d0, [sp, #12072] ; 8-byte Folded Reload
fmul d6, d19, d0
fadd d5, d5, d6
fmul d6, d5, d30
fsub d3, d21, d6
fmul d6, d13, d11
fmul d7, d29, d12
fadd d6, d6, d7
fmul d7, d14, d8
fadd d6, d7, d6
fmul d7, d31, d9
fadd d6, d7, d6
fmul d4, d4, d30
fsub d4, d6, d4
fmul d5, d5, d15
fadd d4, d4, d5
ldr d23, [sp, #12304] ; 8-byte Folded Reload
fmul d3, d23, d3
ldr d22, [sp, #12312] ; 8-byte Folded Reload
fmul d4, d22, d4
fsub d3, d3, d4
str d3, [x8, #1496]
ldr d17, [sp, #10512] ; 8-byte Folded Reload
fmul d3, d29, d17
ldr d18, [sp, #10472] ; 8-byte Folded Reload
fmul d4, d13, d18
fsub d3, d3, d4
ldr d19, [sp, #10968] ; 8-byte Folded Reload
fmul d4, d31, d19
fsub d3, d3, d4
ldr d21, [sp, #10416] ; 8-byte Folded Reload
fmul d4, d14, d21
fadd d3, d4, d3
ldr d0, [sp, #10528] ; 8-byte Folded Reload
ldr d1, [sp, #11880] ; 8-byte Folded Reload
fmul d4, d1, d0
ldr d1, [sp, #11904] ; 8-byte Folded Reload
fmul d5, d10, d1
fadd d4, d4, d5
ldr d16, [sp, #10520] ; 8-byte Folded Reload
ldr d2, [sp, #11872] ; 8-byte Folded Reload
fmul d5, d2, d16
fadd d4, d5, d4
fmul d5, d4, d15
fsub d3, d3, d5
ldr d2, [sp, #12096] ; 8-byte Folded Reload
fmul d5, d2, d0
ldr d2, [sp, #11736] ; 8-byte Folded Reload
fmul d6, d10, d2
fadd d5, d5, d6
fmul d6, d26, d1
ldr d0, [sp, #12056] ; 8-byte Folded Reload
fadd d6, d0, d6
fmul d7, d25, d2
fadd d6, d7, d6
fmul d6, d6, d24
fadd d5, d5, d6
fmul d6, d20, d16
fadd d5, d6, d5
fmul d6, d5, d30
fsub d3, d3, d6
fmul d6, d13, d17
fmul d7, d29, d18
fadd d6, d7, d6
fmul d7, d14, d19
fadd d6, d7, d6
fmul d7, d31, d21
fadd d6, d7, d6
fmul d4, d4, d30
fsub d4, d6, d4
fmul d5, d5, d15
fadd d4, d5, d4
fmul d3, d23, d3
fmul d4, d22, d4
fsub d3, d3, d4
str d3, [x8, #1504]
ldr d18, [sp, #10408] ; 8-byte Folded Reload
fmul d3, d29, d18
ldr d19, [sp, #10400] ; 8-byte Folded Reload
fmul d4, d13, d19
fsub d3, d3, d4
ldr d16, [sp, #10592] ; 8-byte Folded Reload
fmul d4, d31, d16
fsub d3, d3, d4
ldr d17, [sp, #10584] ; 8-byte Folded Reload
fmul d4, d14, d17
fadd d3, d4, d3
ldr d1, [sp, #10608] ; 8-byte Folded Reload
fmul d4, d10, d1
ldr d0, [sp, #12000] ; 8-byte Folded Reload
fadd d4, d0, d4
ldr d0, [sp, #11840] ; 8-byte Folded Reload
fadd d4, d0, d4
fmul d5, d4, d15
fsub d3, d3, d5
ldr d0, [sp, #10872] ; 8-byte Folded Reload
fmul d5, d10, d0
ldr d2, [sp, #11984] ; 8-byte Folded Reload
fadd d5, d2, d5
fmul d6, d26, d1
ldr d1, [sp, #10600] ; 8-byte Folded Reload
fadd d6, d1, d6
fmul d7, d25, d0
fadd d6, d7, d6
fmul d2, d6, d24
fadd d2, d5, d2
ldr d0, [sp, #11824] ; 8-byte Folded Reload
fadd d2, d0, d2
fmul d5, d2, d30
fsub d3, d3, d5
fmul d3, d23, d3
fmul d5, d13, d18
fmul d6, d29, d19
fadd d5, d6, d5
fmul d6, d14, d16
fadd d5, d6, d5
fmul d6, d31, d17
fadd d5, d6, d5
fmul d1, d4, d30
fsub d1, d5, d1
fmul d0, d2, d15
fadd d0, d0, d1
fmul d0, d22, d0
fsub d0, d3, d0
str d0, [x8, #1512]
ldur d2, [x29, #-192] ; 8-byte Folded Reload
ldr d0, [sp, #1800] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #9848] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #1880] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #6392] ; 8-byte Folded Reload
ldr d4, [sp, #1856] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
ldr d1, [sp, #6384] ; 8-byte Folded Reload
ldr d3, [sp, #1872] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1520]
ldr d0, [sp, #1600] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #9840] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #1656] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #6368] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
str d0, [x8, #1528]
ldr d0, [sp, #1336] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #9384] ; 8-byte Folded Reload
fadd d0, d0, d1
ldr d1, [sp, #1488] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d0, d1
ldr d1, [sp, #6352] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1536]
ldr d0, [sp, #1072] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #9376] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #1224] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #6312] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
ldr d1, [sp, #6304] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1544]
ldr d0, [sp, #544] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #9368] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #648] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #6216] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
ldr d1, [sp, #6208] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1552]
ldr d0, [sp, #2048] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #9984] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #7016] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #9672] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
ldr d1, [sp, #9664] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1560]
ldr d0, [sp, #7720] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #11184] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #7672] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #10280] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
ldr d1, [sp, #10272] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1568]
ldr d0, [sp, #10536] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #12048] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #11128] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
str d0, [x8, #1576]
ldr d0, [sp, #11096] ; 8-byte Folded Reload
fmul d0, d27, d0
ldr d1, [sp, #11912] ; 8-byte Folded Reload
fsub d0, d0, d1
str d0, [x8, #1584]
LBB19_94:
mov x9, #64744
movk x9, #21380, lsl #16
movk x9, #23316, lsl #32
movk x9, #16194, lsl #48
fmov d0, x9
ldr d5, [sp, #6184] ; 8-byte Folded Reload
fmul d20, d5, d0
ldr d0, [sp, #4448] ; 8-byte Folded Reload
ldr d1, [sp, #6504] ; 8-byte Folded Reload
fsub d0, d1, d0
ldr d1, [sp, #10776] ; 8-byte Folded Reload
fmul d1, d1, d0
stur d1, [x29, #-240] ; 8-byte Folded Spill
ldr d12, [sp, #12200] ; 8-byte Folded Reload
fmul d1, d12, d0
ldr d2, [sp, #11864] ; 8-byte Folded Reload
ldr d4, [sp, #6176] ; 8-byte Folded Reload
fmul d2, d2, d4
fadd d1, d1, d2
ldur d2, [x29, #-216] ; 8-byte Folded Reload
ldr d6, [sp, #6168] ; 8-byte Folded Reload
fmul d2, d2, d6
fadd d1, d2, d1
mov x9, #40862
movk x9, #31695, lsl #16
movk x9, #12355, lsl #32
movk x9, #16198, lsl #48
fmov d2, x9
fmul d9, d1, d2
ldr d31, [sp, #12160] ; 8-byte Folded Reload
fmul d2, d31, d0
ldr d3, [sp, #12016] ; 8-byte Folded Reload
fmul d3, d3, d4
fadd d2, d2, d3
ldr d3, [sp, #12336] ; 8-byte Folded Reload
fmul d3, d3, d6
fadd d2, d3, d2
mov x9, #45724
movk x9, #42429, lsl #16
movk x9, #11379, lsl #32
movk x9, #48937, lsl #48
fmov d3, x9
fmul d10, d2, d3
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #16153, lsl #48
fmov d4, x9
fmul d22, d5, d4
ldr d4, [sp, #10784] ; 8-byte Folded Reload
fmul d23, d4, d0
fmul d0, d1, d3
stur d0, [x29, #-232] ; 8-byte Folded Spill
mov x9, #45033
movk x9, #40035, lsl #16
movk x9, #524, lsl #32
movk x9, #48971, lsl #48
fmov d0, x9
fmul d0, d2, d0
stur d0, [x29, #-248] ; 8-byte Folded Spill
ldr d14, [sp, #4456] ; 8-byte Folded Reload
cbz x8, LBB19_96
; %bb.95:
ldr d25, [sp, #12136] ; 8-byte Folded Reload
ldr d28, [sp, #1840] ; 8-byte Folded Reload
fmul d0, d25, d28
ldr d6, [sp, #11896] ; 8-byte Folded Reload
ldr d29, [sp, #1832] ; 8-byte Folded Reload
fmul d1, d6, d29
fsub d0, d0, d1
ldr d7, [sp, #11760] ; 8-byte Folded Reload
ldr d18, [sp, #1920] ; 8-byte Folded Reload
fmul d1, d7, d18
fsub d0, d0, d1
ldr d16, [sp, #11496] ; 8-byte Folded Reload
ldr d26, [sp, #1912] ; 8-byte Folded Reload
fmul d1, d16, d26
fadd d0, d1, d0
ldr d1, [sp, #11888] ; 8-byte Folded Reload
ldr d2, [sp, #5176] ; 8-byte Folded Reload
fmul d1, d2, d1
ldr d2, [sp, #12320] ; 8-byte Folded Reload
ldr d3, [sp, #6712] ; 8-byte Folded Reload
fmul d2, d3, d2
ldr d3, [sp, #9632] ; 8-byte Folded Reload
ldr d4, [sp, #10768] ; 8-byte Folded Reload
fmul d3, d3, d4
fadd d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #9640] ; 8-byte Folded Reload
ldr d3, [sp, #10112] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
fmov d2, x9
fmul d2, d1, d2
ldr d3, [sp, #11976] ; 8-byte Folded Reload
ldr d5, [sp, #5224] ; 8-byte Folded Reload
fmul d3, d3, d5
fadd d2, d3, d2
ldr d17, [sp, #6664] ; 8-byte Folded Reload
fmul d3, d14, d17
fadd d2, d3, d2
ldr d3, [sp, #12064] ; 8-byte Folded Reload
ldr d27, [sp, #1848] ; 8-byte Folded Reload
fmul d3, d3, d27
fadd d2, d3, d2
fadd d2, d20, d2
ldur d3, [x29, #-240] ; 8-byte Folded Reload
fadd d2, d3, d2
fadd d2, d9, d2
fadd d2, d10, d2
mov x9, #4359484439294640128
fmov d3, x9
fmul d4, d2, d3
fadd d0, d0, d4
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
fmov d4, x9
fmul d1, d1, d4
ldr d4, [sp, #12192] ; 8-byte Folded Reload
fmul d4, d4, d5
fadd d1, d4, d1
ldr d19, [sp, #6656] ; 8-byte Folded Reload
fmul d4, d14, d19
fadd d1, d4, d1
fmul d4, d12, d17
ldr d5, [sp, #6720] ; 8-byte Folded Reload
fadd d4, d5, d4
ldr d5, [sp, #12160] ; 8-byte Folded Reload
fmul d5, d5, d19
fadd d4, d5, d4
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d5, x9
fmul d4, d4, d5
fadd d1, d4, d1
ldr d4, [sp, #12104] ; 8-byte Folded Reload
fmul d4, d4, d27
fadd d1, d4, d1
fsub d1, d1, d22
fadd d1, d23, d1
ldur d4, [x29, #-232] ; 8-byte Folded Reload
fadd d1, d4, d1
ldur d4, [x29, #-248] ; 8-byte Folded Reload
fadd d1, d4, d1
mov x9, #4354980839667269632
fmov d4, x9
fmul d5, d1, d4
fadd d0, d5, d0
fmul d5, d6, d28
fmul d6, d25, d29
fadd d5, d5, d6
fmul d6, d16, d18
fadd d5, d6, d5
fmul d6, d7, d26
fadd d5, d6, d5
fmul d2, d2, d4
fadd d2, d2, d5
ldr d12, [sp, #12200] ; 8-byte Folded Reload
ldr d31, [sp, #12160] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d1, d2, d1
ldur d2, [x29, #-208] ; 8-byte Folded Reload
fmul d0, d2, d0
ldur d2, [x29, #-200] ; 8-byte Folded Reload
fmul d1, d2, d1
fsub d0, d0, d1
str d0, [x8, #1592]
LBB19_96:
ldr x8, [x19]
ldr d25, [sp, #11416] ; 8-byte Folded Reload
ldr d27, [sp, #11296] ; 8-byte Folded Reload
cbz x8, LBB19_98
; %bb.97:
ldur d0, [x29, #-184] ; 8-byte Folded Reload
ldr d1, [sp, #11424] ; 8-byte Folded Reload
fmul d0, d0, d1
ldr d1, [sp, #6504] ; 8-byte Folded Reload
fadd d0, d0, d1
ldr d1, [sp, #9880] ; 8-byte Folded Reload
fsub d0, d0, d1
str d0, [sp, #12176] ; 8-byte Folded Spill
ldr d1, [sp, #12136] ; 8-byte Folded Reload
ldr d27, [sp, #1640] ; 8-byte Folded Reload
fmul d0, d1, d27
fmov d6, d1
ldr d26, [sp, #11896] ; 8-byte Folded Reload
ldr d28, [sp, #1632] ; 8-byte Folded Reload
fmul d1, d26, d28
fsub d0, d0, d1
ldr d15, [sp, #11760] ; 8-byte Folded Reload
ldr d17, [sp, #1752] ; 8-byte Folded Reload
fmul d1, d15, d17
fsub d0, d0, d1
ldr d24, [sp, #11496] ; 8-byte Folded Reload
ldr d18, [sp, #1744] ; 8-byte Folded Reload
fmul d1, d24, d18
fadd d1, d1, d0
ldr d0, [sp, #5112] ; 8-byte Folded Reload
ldr d2, [sp, #12320] ; 8-byte Folded Reload
fmul d0, d0, d2
ldr d2, [sp, #5208] ; 8-byte Folded Reload
fsub d2, d2, d0
mov x9, #62994
movk x9, #14722, lsl #16
movk x9, #41829, lsl #32
movk x9, #16247, lsl #48
stur d9, [x29, #-256] ; 8-byte Folded Spill
fmov d3, x9
fmul d0, d2, d3
fmov d29, d3
ldr d21, [sp, #11976] ; 8-byte Folded Reload
ldr d5, [sp, #1784] ; 8-byte Folded Reload
fmul d3, d21, d5
fadd d0, d3, d0
ldr d7, [sp, #6512] ; 8-byte Folded Reload
fmul d3, d14, d7
fadd d0, d3, d0
ldr d30, [sp, #12064] ; 8-byte Folded Reload
ldr d19, [sp, #1648] ; 8-byte Folded Reload
fmul d3, d30, d19
fadd d3, d3, d0
mov x9, #4359484439294640128
fmov d13, x9
fmul d4, d3, d13
fadd d4, d1, d4
mov x9, #56877
movk x9, #10885, lsl #16
movk x9, #2572, lsl #32
movk x9, #16289, lsl #48
str d10, [sp, #12312] ; 8-byte Folded Spill
fmov d0, x9
fmul d1, d2, d0
fmov d10, d0
ldr d0, [sp, #12192] ; 8-byte Folded Reload
fmul d2, d0, d5
fmov d25, d0
fadd d1, d2, d1
ldr d16, [sp, #5104] ; 8-byte Folded Reload
fmul d2, d14, d16
fadd d1, d2, d1
fmul d2, d12, d7
ldr d5, [sp, #5120] ; 8-byte Folded Reload
fadd d2, d5, d2
fmul d5, d31, d16
fadd d5, d5, d2
mov x9, #-7378697629483820647
movk x9, #39322
movk x9, #16297, lsl #48
fmov d0, x9
fmul d5, d5, d0
fmov d2, d0
stur d0, [x29, #-184] ; 8-byte Folded Spill
fadd d1, d5, d1
ldr d0, [sp, #12104] ; 8-byte Folded Reload
fmul d5, d0, d19
fadd d5, d5, d1
mov x9, #4354980839667269632
fmov d11, x9
fmul d7, d5, d11
fadd d4, d4, d7
fmul d7, d26, d27
fmul d16, d6, d28
fadd d7, d7, d16
fmul d16, d24, d17
fadd d7, d16, d7
fmul d16, d15, d18
fadd d7, d16, d7
fmul d3, d3, d11
fadd d3, d7, d3
fmul d5, d5, d13
fsub d3, d3, d5
ldp d1, d0, [x29, #-208] ; 16-byte Folded Reload
fmul d4, d1, d4
fmul d3, d0, d3
fsub d3, d4, d3
str d3, [x8, #1600]
str d22, [sp, #12296] ; 8-byte Folded Spill
ldr d22, [sp, #1480] ; 8-byte Folded Reload
fmul d3, d6, d22
str d23, [sp, #12288] ; 8-byte Folded Spill
ldr d23, [sp, #1472] ; 8-byte Folded Reload
fmul d4, d26, d23
fsub d3, d3, d4
ldr d19, [sp, #1536] ; 8-byte Folded Reload
fmul d4, d15, d19
fsub d3, d3, d4
str d20, [sp, #12280] ; 8-byte Folded Spill
ldr d20, [sp, #1528] ; 8-byte Folded Reload
fmul d4, d24, d20
fadd d4, d4, d3
ldr d6, [sp, #11888] ; 8-byte Folded Reload
ldr d3, [sp, #4936] ; 8-byte Folded Reload
fmul d3, d3, d6
ldr d5, [sp, #5216] ; 8-byte Folded Reload
fsub d7, d3, d5
ldr d18, [sp, #4976] ; 8-byte Folded Reload
fmul d0, d21, d18
fmov d8, d21
str d0, [sp, #12240] ; 8-byte Folded Spill
fmov d17, d29
fmul d5, d7, d29
fsub d5, d5, d0
ldr d29, [sp, #4960] ; 8-byte Folded Reload
fmul d16, d14, d29
fadd d16, d16, d5
ldr d21, [sp, #4944] ; 8-byte Folded Reload
fmov d3, d30
fmul d0, d30, d21
str d0, [sp, #12208] ; 8-byte Folded Spill
fsub d16, d16, d0
fmul d27, d16, d13
fadd d27, d4, d27
fmul d0, d25, d18
str d0, [sp, #12232] ; 8-byte Folded Spill
fmul d7, d7, d10
fmov d25, d10
str d10, [sp, #12304] ; 8-byte Folded Spill
fsub d7, d7, d0
ldr d30, [sp, #4952] ; 8-byte Folded Reload
fmul d28, d14, d30
fadd d7, d28, d7
fmul d28, d12, d29
ldr d18, [sp, #4984] ; 8-byte Folded Reload
fadd d28, d18, d28
fmul d29, d31, d30
fadd d28, d29, d28
fmul d28, d28, d2
fadd d28, d28, d7
ldr d10, [sp, #12104] ; 8-byte Folded Reload
fmul d0, d10, d21
str d0, [sp, #12184] ; 8-byte Folded Spill
fsub d28, d28, d0
fmul d29, d28, d11
fadd d27, d27, d29
fmul d29, d26, d22
ldr d2, [sp, #12136] ; 8-byte Folded Reload
fmul d30, d2, d23
fadd d29, d30, d29
fmov d0, d24
fmul d30, d24, d19
fadd d29, d29, d30
fmul d30, d15, d20
fadd d29, d30, d29
fmul d16, d16, d11
fadd d16, d29, d16
fmul d28, d28, d13
fsub d16, d16, d28
fmul d27, d1, d27
ldur d7, [x29, #-200] ; 8-byte Folded Reload
fmul d16, d7, d16
fsub d16, d27, d16
str d16, [x8, #1608]
ldr d9, [sp, #824] ; 8-byte Folded Reload
fmul d16, d2, d9
ldr d2, [sp, #792] ; 8-byte Folded Reload
fmul d27, d26, d2
fsub d16, d16, d27
ldr d5, [sp, #936] ; 8-byte Folded Reload
fmul d27, d15, d5
fsub d16, d16, d27
ldr d24, [sp, #888] ; 8-byte Folded Reload
fmul d27, d0, d24
fadd d16, d27, d16
ldr d18, [sp, #4768] ; 8-byte Folded Reload
ldr d1, [sp, #12320] ; 8-byte Folded Reload
fmul d27, d18, d1
ldr d18, [sp, #8728] ; 8-byte Folded Reload
ldr d0, [sp, #10768] ; 8-byte Folded Reload
fmul d28, d18, d0
fadd d27, d27, d28
ldr d0, [sp, #4728] ; 8-byte Folded Reload
fmul d28, d0, d6
fsub d27, d28, d27
ldr d20, [sp, #8720] ; 8-byte Folded Reload
ldr d0, [sp, #10112] ; 8-byte Folded Reload
fmul d28, d20, d0
fsub d27, d27, d28
fmul d28, d27, d17
fmov d26, d17
str d17, [sp, #12168] ; 8-byte Folded Spill
ldr d20, [sp, #1056] ; 8-byte Folded Reload
fmul d29, d8, d20
fadd d28, d29, d28
ldr d22, [sp, #4752] ; 8-byte Folded Reload
fmul d29, d14, d22
fadd d28, d29, d28
ldr d0, [sp, #872] ; 8-byte Folded Reload
fmul d29, d3, d0
fadd d28, d29, d28
ldr d21, [sp, #12280] ; 8-byte Folded Reload
fadd d28, d21, d28
ldur d3, [x29, #-240] ; 8-byte Folded Reload
fadd d28, d3, d28
ldur d15, [x29, #-256] ; 8-byte Folded Reload
fadd d28, d15, d28
ldr d4, [sp, #12312] ; 8-byte Folded Reload
fadd d28, d4, d28
fmul d29, d28, d13
fadd d16, d16, d29
fmul d27, d27, d25
ldr d6, [sp, #12192] ; 8-byte Folded Reload
fmul d29, d6, d20
fadd d27, d29, d27
ldr d23, [sp, #4744] ; 8-byte Folded Reload
fmul d29, d14, d23
fadd d27, d29, d27
fmul d29, d12, d22
ldr d20, [sp, #4784] ; 8-byte Folded Reload
fadd d29, d20, d29
fmul d30, d31, d23
fadd d29, d30, d29
ldur d12, [x29, #-184] ; 8-byte Folded Reload
fmul d29, d29, d12
fadd d27, d29, d27
fmul d29, d10, d0
fadd d27, d29, d27
ldr d17, [sp, #12296] ; 8-byte Folded Reload
fsub d27, d27, d17
ldr d18, [sp, #12288] ; 8-byte Folded Reload
fadd d27, d18, d27
ldur d0, [x29, #-232] ; 8-byte Folded Reload
fadd d27, d0, d27
ldur d19, [x29, #-248] ; 8-byte Folded Reload
fadd d27, d19, d27
fmul d29, d27, d11
fadd d16, d29, d16
ldr d8, [sp, #11896] ; 8-byte Folded Reload
fmul d29, d8, d9
ldr d3, [sp, #12136] ; 8-byte Folded Reload
fmul d30, d3, d2
fadd d29, d29, d30
ldr d2, [sp, #11496] ; 8-byte Folded Reload
fmul d30, d2, d5
fadd d29, d30, d29
ldr d5, [sp, #11760] ; 8-byte Folded Reload
fmul d30, d5, d24
fadd d29, d30, d29
fmul d28, d28, d11
fadd d28, d28, d29
fmul d27, d27, d13
fsub d27, d28, d27
ldur d0, [x29, #-208] ; 8-byte Folded Reload
fmul d16, d0, d16
fmul d27, d7, d27
fmov d9, d7
fsub d16, d16, d27
str d16, [x8, #1616]
ldr d23, [sp, #2040] ; 8-byte Folded Reload
fmul d16, d3, d23
ldr d24, [sp, #1976] ; 8-byte Folded Reload
fmul d27, d8, d24
fsub d16, d16, d27
ldr d7, [sp, #520] ; 8-byte Folded Reload
fmul d27, d5, d7
fsub d16, d16, d27
ldr d25, [sp, #512] ; 8-byte Folded Reload
fmul d27, d2, d25
fmov d31, d2
fadd d16, d27, d16
ldr d0, [sp, #4664] ; 8-byte Folded Reload
fmul d27, d0, d1
ldr d0, [sp, #8680] ; 8-byte Folded Reload
ldr d1, [sp, #10768] ; 8-byte Folded Reload
fmul d28, d0, d1
fadd d27, d27, d28
ldr d0, [sp, #4624] ; 8-byte Folded Reload
ldr d1, [sp, #11888] ; 8-byte Folded Reload
fmul d28, d0, d1
fsub d27, d28, d27
ldr d0, [sp, #8672] ; 8-byte Folded Reload
ldr d10, [sp, #10112] ; 8-byte Folded Reload
fmul d28, d0, d10
fsub d27, d27, d28
fmul d28, d27, d26
ldr d0, [sp, #552] ; 8-byte Folded Reload
ldr d3, [sp, #11976] ; 8-byte Folded Reload
fmul d29, d3, d0
fadd d28, d29, d28
ldr d2, [sp, #4648] ; 8-byte Folded Reload
fmul d29, d14, d2
fadd d28, d29, d28
ldr d22, [sp, #472] ; 8-byte Folded Reload
ldr d5, [sp, #12064] ; 8-byte Folded Reload
fmul d29, d5, d22
fadd d28, d29, d28
fadd d28, d21, d28
ldur d20, [x29, #-240] ; 8-byte Folded Reload
fadd d28, d20, d28
fadd d28, d15, d28
fadd d28, d4, d28
fmul d29, d28, d13
fadd d16, d16, d29
ldr d21, [sp, #12304] ; 8-byte Folded Reload
fmul d27, d27, d21
fmul d29, d6, d0
fadd d27, d29, d27
ldr d20, [sp, #4640] ; 8-byte Folded Reload
fmul d29, d14, d20
fadd d27, d29, d27
ldr d4, [sp, #12200] ; 8-byte Folded Reload
fmul d29, d4, d2
ldr d0, [sp, #4680] ; 8-byte Folded Reload
fadd d29, d0, d29
ldr d15, [sp, #12160] ; 8-byte Folded Reload
fmul d30, d15, d20
fadd d29, d30, d29
fmov d20, d12
fmul d29, d29, d12
fadd d27, d29, d27
ldr d12, [sp, #12104] ; 8-byte Folded Reload
fmul d29, d12, d22
fadd d27, d29, d27
fsub d27, d27, d17
fadd d27, d18, d27
ldur d0, [x29, #-232] ; 8-byte Folded Reload
fadd d27, d0, d27
fadd d27, d19, d27
fmul d29, d27, d11
fadd d16, d29, d16
fmul d29, d8, d23
ldr d0, [sp, #12136] ; 8-byte Folded Reload
fmul d30, d0, d24
fadd d29, d29, d30
fmul d30, d31, d7
fadd d29, d30, d29
ldr d2, [sp, #11760] ; 8-byte Folded Reload
fmul d30, d2, d25
fadd d29, d30, d29
fmul d28, d28, d11
fadd d28, d28, d29
fmul d27, d27, d13
fsub d27, d28, d27
ldur d7, [x29, #-208] ; 8-byte Folded Reload
fmul d16, d7, d16
fmul d27, d9, d27
fsub d16, d16, d27
str d16, [x8, #1624]
ldr d26, [sp, #7624] ; 8-byte Folded Reload
fmul d16, d0, d26
ldr d25, [sp, #7616] ; 8-byte Folded Reload
fmul d27, d8, d25
fsub d16, d16, d27
ldr d22, [sp, #7664] ; 8-byte Folded Reload
fmul d27, d2, d22
fsub d16, d16, d27
ldr d30, [sp, #7608] ; 8-byte Folded Reload
fmul d27, d31, d30
fadd d16, d27, d16
ldr d0, [sp, #9056] ; 8-byte Folded Reload
fmul d27, d0, d1
ldr d0, [sp, #9168] ; 8-byte Folded Reload
ldr d1, [sp, #12320] ; 8-byte Folded Reload
fmul d28, d0, d1
ldr d0, [sp, #10384] ; 8-byte Folded Reload
ldr d2, [sp, #10768] ; 8-byte Folded Reload
fmul d29, d0, d2
fadd d28, d28, d29
fsub d27, d27, d28
ldr d0, [sp, #10376] ; 8-byte Folded Reload
fmul d28, d0, d10
fsub d27, d27, d28
ldr d9, [sp, #12168] ; 8-byte Folded Reload
fmul d28, d27, d9
ldr d0, [sp, #7560] ; 8-byte Folded Reload
fmul d29, d3, d0
fadd d28, d29, d28
ldr d1, [sp, #9688] ; 8-byte Folded Reload
fmul d29, d14, d1
fadd d28, d29, d28
ldr d6, [sp, #7192] ; 8-byte Folded Reload
fmul d29, d5, d6
fadd d28, d29, d28
ldr d2, [sp, #12280] ; 8-byte Folded Reload
fadd d17, d2, d28
ldur d2, [x29, #-240] ; 8-byte Folded Reload
fadd d17, d2, d17
ldur d2, [x29, #-256] ; 8-byte Folded Reload
fadd d17, d2, d17
ldr d2, [sp, #12312] ; 8-byte Folded Reload
fadd d17, d2, d17
fmul d18, d17, d13
fadd d16, d16, d18
fmul d18, d27, d21
ldr d2, [sp, #12192] ; 8-byte Folded Reload
fmul d23, d2, d0
fadd d18, d23, d18
ldr d2, [sp, #9680] ; 8-byte Folded Reload
fmul d23, d14, d2
fadd d18, d23, d18
fmul d23, d4, d1
fmov d21, d4
ldr d0, [sp, #8784] ; 8-byte Folded Reload
fadd d23, d0, d23
fmul d24, d15, d2
fadd d23, d24, d23
fmul d23, d23, d20
fadd d18, d23, d18
fmul d23, d12, d6
fadd d18, d23, d18
ldr d0, [sp, #12296] ; 8-byte Folded Reload
fsub d18, d18, d0
ldr d0, [sp, #12288] ; 8-byte Folded Reload
fadd d18, d0, d18
ldur d0, [x29, #-232] ; 8-byte Folded Reload
fadd d18, d0, d18
fadd d18, d19, d18
fmul d19, d18, d11
fadd d16, d19, d16
fmul d19, d8, d26
ldr d26, [sp, #12136] ; 8-byte Folded Reload
fmul d20, d26, d25
fadd d19, d19, d20
fmul d20, d31, d22
fadd d19, d20, d19
ldr d25, [sp, #11760] ; 8-byte Folded Reload
fmul d20, d25, d30
fadd d19, d20, d19
fmul d17, d17, d11
fadd d17, d17, d19
fmul d18, d18, d13
fsub d17, d17, d18
fmul d16, d7, d16
ldur d0, [x29, #-200] ; 8-byte Folded Reload
fmul d17, d0, d17
fsub d16, d16, d17
str d16, [x8, #1632]
ldr d4, [sp, #10336] ; 8-byte Folded Reload
fmul d16, d26, d4
ldr d5, [sp, #10328] ; 8-byte Folded Reload
fmul d17, d8, d5
fsub d16, d16, d17
ldr d30, [sp, #7896] ; 8-byte Folded Reload
fmul d17, d25, d30
fsub d16, d16, d17
ldr d10, [sp, #7888] ; 8-byte Folded Reload
fmul d17, d31, d10
fadd d16, d17, d16
ldr d24, [sp, #10192] ; 8-byte Folded Reload
fmov d2, d3
fmul d17, d3, d24
ldr d3, [sp, #10200] ; 8-byte Folded Reload
ldr d7, [sp, #12064] ; 8-byte Folded Reload
fmul d18, d7, d3
fadd d17, d17, d18
mov x9, #11213
movk x9, #64899, lsl #16
movk x9, #2195, lsl #32
movk x9, #49148, lsl #48
fmov d18, x9
ldur d27, [x29, #-168] ; 8-byte Folded Reload
fmul d18, d27, d18
fmov d1, d9
fmul d19, d18, d9
fadd d17, d19, d17
ldr d6, [sp, #5192] ; 8-byte Folded Reload
ldr d0, [sp, #11520] ; 8-byte Folded Reload
fmul d19, d0, d6
ldr d6, [sp, #6728] ; 8-byte Folded Reload
ldr d0, [sp, #11512] ; 8-byte Folded Reload
fmul d20, d0, d6
fsub d19, d19, d20
fmul d20, d19, d9
fadd d17, d17, d20
ldr d1, [sp, #7904] ; 8-byte Folded Reload
fmul d20, d2, d1
fadd d17, d20, d17
ldr d28, [sp, #10440] ; 8-byte Folded Reload
fmul d20, d14, d28
fadd d17, d20, d17
ldr d9, [sp, #10320] ; 8-byte Folded Reload
fmul d20, d7, d9
fadd d17, d20, d17
mov x9, #64744
movk x9, #21380, lsl #16
movk x9, #23316, lsl #32
movk x9, #16194, lsl #48
fmov d20, x9
ldr d29, [sp, #10432] ; 8-byte Folded Reload
fmul d20, d29, d20
fadd d17, d20, d17
ldr d6, [sp, #10776] ; 8-byte Folded Reload
ldr d2, [sp, #12176] ; 8-byte Folded Reload
fmul d20, d6, d2
fadd d17, d20, d17
ldr d20, [sp, #11864] ; 8-byte Folded Reload
ldr d6, [sp, #10424] ; 8-byte Folded Reload
fmul d20, d20, d6
fmov d0, d21
fmul d21, d21, d2
fadd d20, d21, d20
ldur d21, [x29, #-216] ; 8-byte Folded Reload
ldr d23, [sp, #10176] ; 8-byte Folded Reload
fmul d21, d21, d23
fadd d20, d21, d20
mov x9, #40862
movk x9, #31695, lsl #16
movk x9, #12355, lsl #32
movk x9, #16198, lsl #48
fmov d21, x9
fmul d21, d20, d21
fadd d17, d21, d17
ldr d21, [sp, #12016] ; 8-byte Folded Reload
fmul d21, d21, d6
fmul d22, d15, d2
fadd d21, d22, d21
ldr d22, [sp, #12336] ; 8-byte Folded Reload
fmul d22, d22, d23
fadd d21, d22, d21
mov x9, #45724
movk x9, #42429, lsl #16
movk x9, #11379, lsl #32
movk x9, #16169, lsl #48
fmov d22, x9
fmul d23, d21, d22
fsub d17, d17, d23
fmul d23, d17, d13
fadd d16, d16, d23
ldr d7, [sp, #12192] ; 8-byte Folded Reload
fmul d23, d7, d24
fmul d24, d12, d3
fadd d23, d23, d24
ldr d3, [sp, #12304] ; 8-byte Folded Reload
fmul d18, d18, d3
fadd d18, d18, d23
fmul d19, d19, d3
fadd d18, d18, d19
fmul d19, d7, d1
fadd d18, d19, d18
ldr d1, [sp, #10184] ; 8-byte Folded Reload
fmul d19, d14, d1
fadd d18, d19, d18
fmov d24, d0
fmul d19, d0, d28
ldr d0, [sp, #10896] ; 8-byte Folded Reload
fadd d19, d0, d19
fmul d23, d15, d1
fadd d19, d23, d19
ldur d23, [x29, #-184] ; 8-byte Folded Reload
fmul d19, d19, d23
fadd d18, d19, d18
fmul d19, d12, d9
fadd d18, d19, d18
mov x9, #61406
movk x9, #16023, lsl #16
movk x9, #30452, lsl #32
movk x9, #48921, lsl #48
fmov d19, x9
fmul d19, d29, d19
fadd d18, d18, d19
ldr d6, [sp, #10784] ; 8-byte Folded Reload
fmul d6, d6, d2
fadd d6, d6, d18
fmul d18, d20, d22
fsub d6, d6, d18
mov x9, #45033
movk x9, #40035, lsl #16
movk x9, #524, lsl #32
movk x9, #48971, lsl #48
fmov d18, x9
fmul d18, d21, d18
fadd d6, d6, d18
fmul d18, d6, d11
fadd d16, d16, d18
fmul d18, d8, d4
fmul d19, d26, d5
fadd d18, d18, d19
fmul d19, d31, d30
fadd d18, d19, d18
fmul d19, d25, d10
fadd d18, d19, d18
fmul d17, d17, d11
fadd d17, d18, d17
fmul d6, d6, d13
fsub d6, d17, d6
ldp d28, d22, [x29, #-208] ; 16-byte Folded Reload
fmul d16, d28, d16
fmul d6, d22, d6
fsub d6, d16, d6
str d6, [x8, #1640]
ldr d4, [sp, #9952] ; 8-byte Folded Reload
fmul d6, d26, d4
ldr d5, [sp, #9944] ; 8-byte Folded Reload
fmul d16, d8, d5
fsub d6, d6, d16
ldr d20, [sp, #9936] ; 8-byte Folded Reload
fmul d16, d25, d20
fsub d6, d6, d16
ldr d21, [sp, #9888] ; 8-byte Folded Reload
fmul d16, d31, d21
fadd d6, d16, d6
ldr d0, [sp, #10312] ; 8-byte Folded Reload
ldr d1, [sp, #11976] ; 8-byte Folded Reload
fmul d16, d1, d0
ldr d1, [sp, #10960] ; 8-byte Folded Reload
fmul d17, d14, d1
fadd d16, d16, d17
ldr d3, [sp, #10304] ; 8-byte Folded Reload
ldr d2, [sp, #12064] ; 8-byte Folded Reload
fmul d17, d2, d3
fadd d16, d17, d16
fmul d17, d16, d13
fadd d6, d6, d17
fmul d17, d7, d0
ldr d2, [sp, #10952] ; 8-byte Folded Reload
fmul d18, d14, d2
fadd d17, d17, d18
fmul d18, d24, d1
ldr d0, [sp, #10976] ; 8-byte Folded Reload
fadd d18, d0, d18
fmul d19, d15, d2
fadd d18, d19, d18
fmul d18, d18, d23
fadd d17, d17, d18
fmul d18, d12, d3
fadd d17, d18, d17
fmul d18, d17, d11
fadd d6, d18, d6
fmul d18, d8, d4
fmul d19, d26, d5
fadd d18, d19, d18
fmul d19, d31, d20
fadd d18, d19, d18
fmul d19, d25, d21
fadd d18, d19, d18
fmul d16, d16, d11
fadd d16, d16, d18
fmul d17, d17, d13
fsub d16, d16, d17
fmul d6, d28, d6
fmul d16, d22, d16
fsub d6, d6, d16
str d6, [x8, #1648]
ldr d19, [sp, #9280] ; 8-byte Folded Reload
fmul d6, d26, d19
ldr d20, [sp, #9272] ; 8-byte Folded Reload
fmul d16, d8, d20
fsub d6, d6, d16
ldr d17, [sp, #9296] ; 8-byte Folded Reload
fmul d16, d25, d17
fsub d6, d6, d16
ldr d18, [sp, #9288] ; 8-byte Folded Reload
fmul d16, d31, d18
fadd d6, d16, d6
ldr d1, [sp, #9344] ; 8-byte Folded Reload
fmul d16, d14, d1
ldr d0, [sp, #12240] ; 8-byte Folded Reload
fadd d3, d0, d16
ldr d0, [sp, #12208] ; 8-byte Folded Reload
fadd d3, d0, d3
fmul d5, d3, d13
fadd d5, d6, d5
ldr d0, [sp, #9352] ; 8-byte Folded Reload
fmul d6, d14, d0
ldr d2, [sp, #12232] ; 8-byte Folded Reload
fadd d4, d2, d6
fmul d6, d24, d1
ldr d1, [sp, #9336] ; 8-byte Folded Reload
fadd d6, d1, d6
fmul d16, d15, d0
fadd d6, d16, d6
fmul d2, d6, d23
fadd d2, d4, d2
ldr d0, [sp, #12184] ; 8-byte Folded Reload
fadd d2, d0, d2
fmul d4, d2, d11
fadd d4, d4, d5
fmul d4, d28, d4
fmul d5, d8, d19
fmul d6, d26, d20
fadd d5, d6, d5
fmul d6, d31, d17
fadd d5, d6, d5
fmul d6, d25, d18
ldr d25, [sp, #11416] ; 8-byte Folded Reload
fadd d5, d6, d5
fmul d1, d3, d11
fadd d1, d1, d5
fmul d0, d2, d13
fsub d0, d1, d0
fmul d0, d22, d0
fsub d0, d4, d0
str d0, [x8, #1656]
ldur d2, [x29, #-176] ; 8-byte Folded Reload
ldr d0, [sp, #1864] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #11280] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #1944] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #6408] ; 8-byte Folded Reload
ldr d4, [sp, #1928] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
ldr d1, [sp, #6400] ; 8-byte Folded Reload
ldr d3, [sp, #1936] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1664]
ldr d0, [sp, #1720] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #11272] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #1808] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #6376] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
str d0, [x8, #1672]
ldr d0, [sp, #1544] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #11240] ; 8-byte Folded Reload
fadd d0, d0, d1
ldr d1, [sp, #1624] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d0, d1
ldr d1, [sp, #6360] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1680]
ldr d0, [sp, #1032] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #11160] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #1128] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #6280] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
ldr d1, [sp, #6272] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1688]
ldr d0, [sp, #2016] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #9856] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #640] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #6200] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
ldr d1, [sp, #6192] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1696]
ldr d0, [sp, #7640] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #11400] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #7632] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #9832] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
ldr d1, [sp, #8184] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1704]
ldr d0, [sp, #10344] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #11200] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #8000] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
ldr d1, [sp, #10888] ; 8-byte Folded Reload
fmul d1, d1, d4
fadd d0, d1, d0
ldr d1, [sp, #10880] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
str d0, [x8, #1712]
ldr d0, [sp, #10392] ; 8-byte Folded Reload
fmul d0, d2, d0
ldr d1, [sp, #11752] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #10296] ; 8-byte Folded Reload
fmul d1, d27, d1
fadd d0, d1, d0
str d0, [x8, #1720]
ldr d0, [sp, #10576] ; 8-byte Folded Reload
fmul d0, d27, d0
ldr d27, [sp, #11296] ; 8-byte Folded Reload
ldr d1, [sp, #11784] ; 8-byte Folded Reload
fsub d0, d0, d1
str d0, [x8, #1728]
LBB19_98:
ldr d7, [sp, #11064] ; 8-byte Folded Reload
ldr d0, [sp, #9448] ; 8-byte Folded Reload
fmul d0, d7, d0
ldr d16, [sp, #11056] ; 8-byte Folded Reload
ldr d1, [sp, #9440] ; 8-byte Folded Reload
fmul d1, d16, d1
fsub d0, d0, d1
ldr d1, [sp, #12024] ; 8-byte Folded Reload
ldr d2, [sp, #9464] ; 8-byte Folded Reload
fmul d1, d1, d2
fsub d0, d0, d1
ldr d21, [sp, #11480] ; 8-byte Folded Reload
ldr d1, [sp, #9456] ; 8-byte Folded Reload
fmul d1, d21, d1
fadd d0, d1, d0
ldr d1, [sp, #10696] ; 8-byte Folded Reload
ldr d2, [sp, #8552] ; 8-byte Folded Reload
fmul d1, d1, d2
ldr d2, [sp, #10704] ; 8-byte Folded Reload
ldr d3, [sp, #8528] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #12032] ; 8-byte Folded Reload
ldr d3, [sp, #8536] ; 8-byte Folded Reload
fmul d2, d2, d3
fsub d1, d1, d2
ldr d2, [sp, #8544] ; 8-byte Folded Reload
fmul d2, d27, d2
fsub d1, d1, d2
ldr d26, [sp, #9624] ; 8-byte Folded Reload
ldr d2, [sp, #8584] ; 8-byte Folded Reload
fmul d2, d26, d2
ldr d22, [sp, #9648] ; 8-byte Folded Reload
ldr d3, [sp, #8576] ; 8-byte Folded Reload
fmul d3, d22, d3
fadd d2, d2, d3
ldr d3, [sp, #12128] ; 8-byte Folded Reload
ldr d4, [sp, #9512] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d2, d2, d3
ldr d3, [sp, #12120] ; 8-byte Folded Reload
ldr d4, [sp, #9504] ; 8-byte Folded Reload
fmul d3, d3, d4
fadd d3, d3, d2
ldr d2, [sp, #11944] ; 8-byte Folded Reload
ldr d4, [sp, #9632] ; 8-byte Folded Reload
fmul d2, d4, d2
ldr d4, [sp, #11936] ; 8-byte Folded Reload
ldr d5, [sp, #9640] ; 8-byte Folded Reload
fmul d4, d5, d4
fadd d4, d2, d4
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d5, x9
ldr d2, [sp, #10864] ; 8-byte Folded Reload
fmul d2, d2, d5
fadd d4, d4, d2
stur d4, [x29, #-184] ; 8-byte Folded Spill
fadd d3, d3, d4
ldr d4, [sp, #12344] ; 8-byte Folded Reload
ldr d6, [sp, #9480] ; 8-byte Folded Reload
fmul d4, d4, d6
fsub d3, d3, d4
ldr d4, [sp, #12272] ; 8-byte Folded Reload
ldr d6, [sp, #9472] ; 8-byte Folded Reload
fmul d4, d4, d6
fadd d3, d4, d3
ldr d4, [sp, #8744] ; 8-byte Folded Reload
fmul d4, d7, d4
ldr d6, [sp, #8424] ; 8-byte Folded Reload
fmul d6, d16, d6
fsub d4, d4, d6
ldr d6, [sp, #12080] ; 8-byte Folded Reload
ldr d7, [sp, #8440] ; 8-byte Folded Reload
fmul d6, d6, d7
fsub d4, d4, d6
ldr d6, [sp, #8432] ; 8-byte Folded Reload
ldr d20, [sp, #11408] ; 8-byte Folded Reload
fmul d6, d20, d6
fadd d4, d6, d4
ldr d6, [sp, #10680] ; 8-byte Folded Reload
ldr d7, [sp, #8472] ; 8-byte Folded Reload
fmul d6, d6, d7
ldr d7, [sp, #10688] ; 8-byte Folded Reload
ldr d16, [sp, #8456] ; 8-byte Folded Reload
fmul d7, d7, d16
fsub d6, d6, d7
ldr d7, [sp, #12112] ; 8-byte Folded Reload
ldr d16, [sp, #8464] ; 8-byte Folded Reload
fmul d7, d7, d16
fsub d6, d6, d7
ldr d23, [sp, #11304] ; 8-byte Folded Reload
ldr d7, [sp, #8448] ; 8-byte Folded Reload
fmul d7, d23, d7
fsub d6, d6, d7
ldr d7, [sp, #11288] ; 8-byte Folded Reload
ldr d16, [sp, #8504] ; 8-byte Folded Reload
fmul d7, d7, d16
ldr d16, [sp, #8496] ; 8-byte Folded Reload
ldr d15, [sp, #11048] ; 8-byte Folded Reload
fmul d16, d15, d16
fadd d7, d7, d16
ldr d16, [sp, #12264] ; 8-byte Folded Reload
ldr d17, [sp, #9432] ; 8-byte Folded Reload
fmul d16, d16, d17
fsub d7, d7, d16
ldr d16, [sp, #12256] ; 8-byte Folded Reload
ldr d17, [sp, #9424] ; 8-byte Folded Reload
fmul d16, d16, d17
fadd d7, d16, d7
ldr d16, [sp, #11928] ; 8-byte Folded Reload
ldr d17, [sp, #9560] ; 8-byte Folded Reload
fmul d16, d17, d16
ldr d17, [sp, #11920] ; 8-byte Folded Reload
ldr d18, [sp, #9568] ; 8-byte Folded Reload
fmul d17, d18, d17
fadd d17, d16, d17
ldr d16, [sp, #10264] ; 8-byte Folded Reload
fmul d16, d16, d5
fadd d5, d17, d16
stur d5, [x29, #-200] ; 8-byte Folded Spill
fadd d5, d7, d5
ldur d7, [x29, #-224] ; 8-byte Folded Reload
ldr d17, [sp, #9400] ; 8-byte Folded Reload
fmul d7, d7, d17
fsub d5, d5, d7
mov x9, #29491
movk x9, #28745, lsl #16
movk x9, #45973, lsl #32
movk x9, #16267, lsl #48
ldr d7, [sp, #12328] ; 8-byte Folded Reload
ldr d17, [sp, #9392] ; 8-byte Folded Reload
fmul d7, d7, d17
fadd d5, d7, d5
fmov d7, x9
fadd d3, d3, d7
mov x9, #14419
movk x9, #64308, lsl #16
movk x9, #365, lsl #32
movk x9, #16201, lsl #48
fadd d5, d5, d7
fmov d7, x9
fadd d3, d3, d7
stur d3, [x29, #-208] ; 8-byte Folded Spill
fadd d1, d1, d3
mov x9, #43847
movk x9, #44022, lsl #16
movk x9, #63499, lsl #32
movk x9, #16322, lsl #48
fadd d5, d5, d7
fmov d3, x9
fadd d1, d1, d3
stur d1, [x29, #-240] ; 8-byte Folded Spill
fadd d0, d0, d1
mov x9, #8448
movk x9, #17562, lsl #16
movk x9, #63752, lsl #32
movk x9, #16340, lsl #48
stur d5, [x29, #-216] ; 8-byte Folded Spill
fadd d1, d6, d5
fadd d3, d1, d3
fmov d1, x9
fadd d5, d0, d1
stur d3, [x29, #-256] ; 8-byte Folded Spill
fadd d0, d4, d3
fadd d4, d0, d1
ldr d18, [sp, #6648] ; 8-byte Folded Reload
ldr d19, [sp, #6640] ; 8-byte Folded Reload
cbz x8, LBB19_100
; %bb.99:
ldr d0, [sp, #8768] ; 8-byte Folded Reload
fmul d0, d0, d19
ldr d1, [sp, #8752] ; 8-byte Folded Reload
fmul d1, d1, d18
fadd d0, d0, d1
ldr d1, [sp, #8656] ; 8-byte Folded Reload
ldr d3, [sp, #6824] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
ldr d1, [sp, #8760] ; 8-byte Folded Reload
ldr d3, [sp, #6832] ; 8-byte Folded Reload
fmul d1, d1, d3
fadd d0, d1, d0
mov x9, #4258
movk x9, #55111, lsl #16
movk x9, #31418, lsl #32
movk x9, #16398, lsl #48
fmov d1, x9
fadd d0, d0, d1
ldr d3, [sp, #12040] ; 8-byte Folded Reload
ldr d1, [sp, #8648] ; 8-byte Folded Reload
fmul d1, d3, d1
fsub d0, d0, d1
ldr d1, [sp, #6840] ; 8-byte Folded Reload
fmul d1, d25, d1
fadd d0, d1, d0
fadd d0, d0, d5
ldr d1, [sp, #6416] ; 8-byte Folded Reload
fmul d1, d3, d1
fsub d0, d0, d1
ldr d1, [sp, #6816] ; 8-byte Folded Reload
fmul d1, d25, d1
fadd d0, d1, d0
fadd d0, d0, d4
mov x9, #45786
movk x9, #34486, lsl #16
movk x9, #57144, lsl #32
movk x9, #16311, lsl #48
fmov d1, x9
fadd d0, d0, d1
str d0, [x8, #1736]
LBB19_100:
str d4, [sp, #12320] ; 8-byte Folded Spill
str d5, [sp, #12336] ; 8-byte Folded Spill
mov x9, #43139
movk x9, #8835, lsl #16
movk x9, #28093, lsl #32
movk x9, #49187, lsl #48
fmov d0, x9
ldr d1, [sp, #5240] ; 8-byte Folded Reload
fmul d28, d1, d0
mov x9, #58251
movk x9, #46885, lsl #16
movk x9, #26312, lsl #32
movk x9, #16401, lsl #48
fmov d0, x9
ldr d1, [sp, #12024] ; 8-byte Folded Reload
fmul d1, d1, d0
ldr d3, [sp, #8520] ; 8-byte Folded Reload
fsub d1, d3, d1
mov x9, #24565
movk x9, #58125, lsl #16
movk x9, #44270, lsl #32
movk x9, #16372, lsl #48
fmov d3, x9
ldr d4, [sp, #12032] ; 8-byte Folded Reload
fmul d4, d4, d3
ldr d5, [sp, #8560] ; 8-byte Folded Reload
fsub d4, d5, d4
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d5, x9
ldr d6, [sp, #12128] ; 8-byte Folded Reload
fmul d6, d6, d5
ldr d7, [sp, #9488] ; 8-byte Folded Reload
fsub d6, d7, d6
ldr d7, [sp, #10088] ; 8-byte Folded Reload
fadd d6, d6, d7
mov x9, #65123
movk x9, #27942, lsl #16
movk x9, #23314, lsl #32
movk x9, #16371, lsl #48
fmov d7, x9
ldr d17, [sp, #12344] ; 8-byte Folded Reload
fmul d17, d17, d7
fsub d6, d6, d17
str d6, [sp, #12304] ; 8-byte Folded Spill
fadd d4, d4, d6
str d4, [sp, #12296] ; 8-byte Folded Spill
fadd d6, d1, d4
ldr d1, [sp, #12080] ; 8-byte Folded Reload
fmul d0, d1, d0
ldr d1, [sp, #6152] ; 8-byte Folded Reload
fsub d0, d1, d0
ldr d1, [sp, #12112] ; 8-byte Folded Reload
fmul d1, d1, d3
ldr d3, [sp, #8480] ; 8-byte Folded Reload
fsub d1, d3, d1
ldr d3, [sp, #12264] ; 8-byte Folded Reload
fmul d3, d3, d5
ldr d4, [sp, #9408] ; 8-byte Folded Reload
fsub d3, d4, d3
ldr d4, [sp, #10072] ; 8-byte Folded Reload
fadd d3, d3, d4
ldur d4, [x29, #-224] ; 8-byte Folded Reload
fmul d4, d4, d7
fsub d3, d3, d4
str d3, [sp, #12208] ; 8-byte Folded Spill
fadd d24, d1, d3
fadd d4, d0, d24
ldr d29, [sp, #6456] ; 8-byte Folded Reload
ldr d12, [sp, #6448] ; 8-byte Folded Reload
cbz x8, LBB19_102
; %bb.101:
ldr d0, [sp, #1896] ; 8-byte Folded Reload
fadd d0, d28, d0
fsub d0, d0, d19
mov x9, #28852
movk x9, #37576, lsl #16
movk x9, #2974, lsl #32
movk x9, #49192, lsl #48
fmov d1, x9
ldr d3, [sp, #12040] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d1, d0
fadd d0, d0, d6
fadd d0, d1, d0
fadd d0, d0, d4
str d0, [x8, #1744]
LBB19_102:
str d4, [sp, #12280] ; 8-byte Folded Spill
str d6, [sp, #12288] ; 8-byte Folded Spill
mov x9, #43139
movk x9, #8835, lsl #16
movk x9, #28093, lsl #32
movk x9, #16419, lsl #48
fmov d0, x9
ldr d1, [sp, #5232] ; 8-byte Folded Reload
fmul d19, d1, d0
mov x9, #58251
movk x9, #46885, lsl #16
movk x9, #26312, lsl #32
movk x9, #16401, lsl #48
fmov d0, x9
fmul d1, d21, d0
ldr d3, [sp, #8512] ; 8-byte Folded Reload
fadd d1, d3, d1
mov x9, #24565
movk x9, #58125, lsl #16
movk x9, #44270, lsl #32
movk x9, #16372, lsl #48
fmov d3, x9
fmul d4, d27, d3
ldr d5, [sp, #8568] ; 8-byte Folded Reload
fsub d4, d5, d4
mov x9, #54806
movk x9, #23353, lsl #16
movk x9, #56949, lsl #32
movk x9, #16326, lsl #48
fmov d5, x9
ldr d6, [sp, #12120] ; 8-byte Folded Reload
fmul d6, d6, d5
ldr d7, [sp, #9496] ; 8-byte Folded Reload
fadd d6, d7, d6
ldr d7, [sp, #10080] ; 8-byte Folded Reload
fadd d6, d6, d7
mov x9, #65123
movk x9, #27942, lsl #16
movk x9, #23314, lsl #32
movk x9, #16371, lsl #48
fmov d7, x9
ldr d17, [sp, #12272] ; 8-byte Folded Reload
fmul d17, d17, d7
fadd d30, d17, d6
fadd d31, d4, d30
fadd d6, d1, d31
fmul d0, d20, d0
ldr d1, [sp, #6160] ; 8-byte Folded Reload
fadd d0, d1, d0
fmul d1, d23, d3
ldr d3, [sp, #8488] ; 8-byte Folded Reload
fsub d1, d3, d1
ldr d3, [sp, #12256] ; 8-byte Folded Reload
fmul d3, d3, d5
ldr d4, [sp, #9416] ; 8-byte Folded Reload
fadd d3, d4, d3
ldr d4, [sp, #10064] ; 8-byte Folded Reload
fadd d3, d3, d4
ldr d4, [sp, #12328] ; 8-byte Folded Reload
fmul d4, d4, d7
fadd d9, d4, d3
fadd d10, d1, d9
fadd d11, d0, d10
cbz x8, LBB19_104
; %bb.103:
ldr d0, [sp, #1904] ; 8-byte Folded Reload
fadd d0, d19, d0
fadd d0, d18, d0
mov x9, #28852
movk x9, #37576, lsl #16
movk x9, #2974, lsl #32
movk x9, #16424, lsl #48
fmov d1, x9
fmul d1, d25, d1
fadd d0, d1, d0
fadd d0, d0, d6
fadd d0, d1, d0
fadd d0, d0, d11
str d0, [x8, #1752]
LBB19_104:
str d6, [sp, #12232] ; 8-byte Folded Spill
str d19, [sp, #12240] ; 8-byte Folded Spill
ldr d0, [sp, #8288] ; 8-byte Folded Reload
fmul d0, d20, d0
ldr d1, [sp, #12080] ; 8-byte Folded Reload
ldr d3, [sp, #8296] ; 8-byte Folded Reload
fmul d1, d1, d3
fsub d0, d0, d1
ldr d1, [sp, #10680] ; 8-byte Folded Reload
ldr d3, [sp, #8312] ; 8-byte Folded Reload
fmul d1, d1, d3
ldr d3, [sp, #10688] ; 8-byte Folded Reload
ldr d4, [sp, #8304] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d1, d1, d3
ldr d3, [sp, #12112] ; 8-byte Folded Reload
ldr d4, [sp, #9312] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d1, d1, d3
ldr d3, [sp, #9304] ; 8-byte Folded Reload
fmul d3, d23, d3
fsub d1, d1, d3
ldr d3, [sp, #11288] ; 8-byte Folded Reload
ldr d4, [sp, #9320] ; 8-byte Folded Reload
fmul d3, d3, d4
ldr d4, [sp, #9328] ; 8-byte Folded Reload
fmul d4, d15, d4
fadd d3, d3, d4
ldr d4, [sp, #12264] ; 8-byte Folded Reload
ldr d5, [sp, #8352] ; 8-byte Folded Reload
fmul d4, d4, d5
fsub d3, d3, d4
ldr d4, [sp, #12256] ; 8-byte Folded Reload
ldr d5, [sp, #8344] ; 8-byte Folded Reload
fmul d4, d4, d5
fadd d3, d4, d3
ldr d4, [sp, #11928] ; 8-byte Folded Reload
ldr d5, [sp, #8736] ; 8-byte Folded Reload
fmul d4, d5, d4
ldr d5, [sp, #11920] ; 8-byte Folded Reload
ldr d6, [sp, #6496] ; 8-byte Folded Reload
fmul d5, d6, d5
fadd d4, d4, d5
fadd d4, d4, d16
stur d4, [x29, #-232] ; 8-byte Folded Spill
fadd d3, d3, d4
ldur d4, [x29, #-224] ; 8-byte Folded Reload
ldr d5, [sp, #8368] ; 8-byte Folded Reload
fmul d4, d4, d5
fsub d3, d3, d4
ldr d4, [sp, #12328] ; 8-byte Folded Reload
ldr d5, [sp, #8360] ; 8-byte Folded Reload
fmul d4, d4, d5
fadd d3, d4, d3
mov x9, #29491
movk x9, #28745, lsl #16
movk x9, #45973, lsl #32
movk x9, #16267, lsl #48
fmov d4, x9
fadd d3, d3, d4
mov x9, #14419
movk x9, #64308, lsl #16
movk x9, #365, lsl #32
movk x9, #16201, lsl #48
fmov d4, x9
fadd d13, d3, d4
fadd d1, d1, d13
mov x9, #43847
movk x9, #44022, lsl #16
movk x9, #63499, lsl #32
movk x9, #16322, lsl #48
fmov d3, x9
fadd d14, d1, d3
fadd d0, d0, d14
mov x9, #8448
movk x9, #17562, lsl #16
movk x9, #63752, lsl #32
movk x9, #16340, lsl #48
fmov d1, x9
fadd d0, d0, d1
cbz x8, LBB19_106
; %bb.105:
ldr d1, [sp, #5600] ; 8-byte Folded Reload
fmul d1, d25, d1
ldr d3, [sp, #12040] ; 8-byte Folded Reload
ldr d4, [sp, #5592] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d1, d1, d3
fadd d1, d1, d0
str d1, [x8, #1760]
LBB19_106:
str d28, [sp, #12312] ; 8-byte Folded Spill
ldr d1, [sp, #8192] ; 8-byte Folded Reload
fmul d1, d21, d1
ldr d3, [sp, #12024] ; 8-byte Folded Reload
ldr d4, [sp, #8200] ; 8-byte Folded Reload
fmul d3, d3, d4
fsub d3, d1, d3
ldr d1, [sp, #10696] ; 8-byte Folded Reload
ldr d4, [sp, #8216] ; 8-byte Folded Reload
fmul d1, d1, d4
ldr d4, [sp, #10704] ; 8-byte Folded Reload
ldr d5, [sp, #8208] ; 8-byte Folded Reload
fmul d4, d4, d5
fsub d1, d1, d4
ldr d4, [sp, #12032] ; 8-byte Folded Reload
ldr d5, [sp, #9224] ; 8-byte Folded Reload
fmul d4, d4, d5
fsub d1, d1, d4
ldr d4, [sp, #11296] ; 8-byte Folded Reload
ldr d5, [sp, #9216] ; 8-byte Folded Reload
fmul d4, d4, d5
fsub d4, d1, d4
ldr d1, [sp, #9232] ; 8-byte Folded Reload
fmul d1, d26, d1
ldr d5, [sp, #9240] ; 8-byte Folded Reload
fmul d5, d22, d5
fadd d1, d1, d5
ldr d5, [sp, #12128] ; 8-byte Folded Reload
ldr d6, [sp, #8240] ; 8-byte Folded Reload
fmul d5, d5, d6
fsub d1, d1, d5
ldr d5, [sp, #12120] ; 8-byte Folded Reload
ldr d6, [sp, #8232] ; 8-byte Folded Reload
fmul d5, d5, d6
fadd d1, d5, d1
ldr d5, [sp, #11944] ; 8-byte Folded Reload
ldr d6, [sp, #8728] ; 8-byte Folded Reload
fmul d5, d6, d5
ldr d6, [sp, #11936] ; 8-byte Folded Reload
ldr d7, [sp, #8720] ; 8-byte Folded Reload
fmul d6, d7, d6
fadd d5, d5, d6
fadd d5, d5, d2
stur d5, [x29, #-248] ; 8-byte Folded Spill
fadd d1, d1, d5
ldr d5, [sp, #12344] ; 8-byte Folded Reload
ldr d6, [sp, #8256] ; 8-byte Folded Reload
fmul d5, d5, d6
fsub d1, d1, d5
ldr d5, [sp, #12272] ; 8-byte Folded Reload
ldr d6, [sp, #8248] ; 8-byte Folded Reload
fmul d5, d5, d6
fadd d1, d5, d1
mov x9, #29491
movk x9, #28745, lsl #16
movk x9, #45973, lsl #32
movk x9, #16267, lsl #48
fmov d5, x9
fadd d1, d1, d5
mov x9, #14419
movk x9, #64308, lsl #16
movk x9, #365, lsl #32
movk x9, #16201, lsl #48
fmov d5, x9
fadd d1, d1, d5
fadd d4, d4, d1
mov x9, #43847
movk x9, #44022, lsl #16
movk x9, #63499, lsl #32
movk x9, #16322, lsl #48
fmov d5, x9
fadd d18, d4, d5
fadd d3, d3, d18
mov x9, #8448
movk x9, #17562, lsl #16
movk x9, #63752, lsl #32
movk x9, #16340, lsl #48
fmov d4, x9
fadd d19, d3, d4
cbz x8, LBB19_108
; %bb.107:
ldr d3, [sp, #5480] ; 8-byte Folded Reload
fmul d3, d25, d3
ldr d4, [sp, #12040] ; 8-byte Folded Reload
ldr d5, [sp, #5472] ; 8-byte Folded Reload
fmul d4, d4, d5
fsub d3, d3, d4
fadd d3, d3, d19
str d3, [x8, #1768]
LBB19_108:
ldr d27, [sp, #8688] ; 8-byte Folded Reload
fmul d3, d20, d27
ldr d4, [sp, #12080] ; 8-byte Folded Reload
ldr d17, [sp, #8696] ; 8-byte Folded Reload
fmul d4, d4, d17
fsub d4, d3, d4
ldr d3, [sp, #11288] ; 8-byte Folded Reload
ldr d5, [sp, #9184] ; 8-byte Folded Reload
fmul d3, d3, d5
ldr d5, [sp, #9176] ; 8-byte Folded Reload
fmul d5, d15, d5
fadd d3, d3, d5
ldr d5, [sp, #12264] ; 8-byte Folded Reload
ldr d6, [sp, #8176] ; 8-byte Folded Reload
fmul d5, d5, d6
fsub d3, d3, d5
ldr d5, [sp, #12256] ; 8-byte Folded Reload
ldr d6, [sp, #8160] ; 8-byte Folded Reload
fmul d5, d5, d6
fadd d3, d5, d3
ldr d5, [sp, #11928] ; 8-byte Folded Reload
ldr d6, [sp, #8712] ; 8-byte Folded Reload
fmul d5, d6, d5
ldr d6, [sp, #11920] ; 8-byte Folded Reload
ldr d7, [sp, #8704] ; 8-byte Folded Reload
fmul d6, d7, d6
fadd d5, d5, d6
fadd d28, d5, d16
fadd d3, d3, d28
ldur d5, [x29, #-224] ; 8-byte Folded Reload
ldr d6, [sp, #9200] ; 8-byte Folded Reload
fmul d5, d5, d6
fsub d3, d3, d5
ldr d5, [sp, #12328] ; 8-byte Folded Reload
ldr d6, [sp, #8168] ; 8-byte Folded Reload
fmul d5, d5, d6
fadd d3, d5, d3
mov x9, #29491
movk x9, #28745, lsl #16
movk x9, #45973, lsl #32
movk x9, #16267, lsl #48
fmov d5, x9
fadd d3, d3, d5
mov x9, #14419
movk x9, #64308, lsl #16
movk x9, #365, lsl #32
movk x9, #16201, lsl #48
fmov d5, x9
fadd d20, d3, d5
ldr d3, [sp, #12112] ; 8-byte Folded Reload
ldr d5, [sp, #9208] ; 8-byte Folded Reload
fmul d3, d3, d5
ldr d5, [sp, #9192] ; 8-byte Folded Reload
fmul d5, d23, d5
fadd d3, d3, d5
fsub d3, d20, d3
mov x9, #43847
movk x9, #44022, lsl #16
movk x9, #63499, lsl #32
movk x9, #16322, lsl #48
fmov d5, x9
fadd d3, d3, d5
fadd d23, d4, d3
cbz x8, LBB19_110
; %bb.109:
fmul d4, d25, d27
ldr d5, [sp, #12040] ; 8-byte Folded Reload
fmul d5, d5, d17
fsub d4, d4, d5
fadd d4, d4, d23
str d4, [x8, #1776]
LBB19_110:
fmul d4, d21, d12
ldr d5, [sp, #12024] ; 8-byte Folded Reload
fmul d5, d5, d29
fsub d6, d4, d5
ldr d4, [sp, #9096] ; 8-byte Folded Reload
fmul d4, d26, d4
ldr d5, [sp, #9088] ; 8-byte Folded Reload
fmul d5, d22, d5
fadd d4, d4, d5
ldr d5, [sp, #12128] ; 8-byte Folded Reload
ldr d7, [sp, #7992] ; 8-byte Folded Reload
fmul d5, d5, d7
fsub d4, d4, d5
ldr d5, [sp, #12120] ; 8-byte Folded Reload
ldr d7, [sp, #7976] ; 8-byte Folded Reload
fmul d5, d5, d7
fadd d4, d5, d4
ldr d5, [sp, #11944] ; 8-byte Folded Reload
ldr d7, [sp, #8680] ; 8-byte Folded Reload
fmul d5, d7, d5
ldr d7, [sp, #11936] ; 8-byte Folded Reload
ldr d17, [sp, #8672] ; 8-byte Folded Reload
fmul d7, d17, d7
fadd d5, d5, d7
fadd d27, d5, d2
fadd d4, d4, d27
ldr d5, [sp, #12344] ; 8-byte Folded Reload
ldr d7, [sp, #9112] ; 8-byte Folded Reload
fmul d5, d5, d7
fsub d4, d4, d5
ldr d5, [sp, #12272] ; 8-byte Folded Reload
ldr d7, [sp, #7984] ; 8-byte Folded Reload
fmul d5, d5, d7
fadd d4, d5, d4
mov x9, #29491
movk x9, #28745, lsl #16
movk x9, #45973, lsl #32
movk x9, #16267, lsl #48
fmov d5, x9
fadd d4, d4, d5
mov x9, #14419
movk x9, #64308, lsl #16
movk x9, #365, lsl #32
movk x9, #16201, lsl #48
fmov d5, x9
fadd d4, d4, d5
ldr d5, [sp, #12032] ; 8-byte Folded Reload
ldr d7, [sp, #9120] ; 8-byte Folded Reload
fmul d5, d5, d7
ldr d7, [sp, #11296] ; 8-byte Folded Reload
ldr d17, [sp, #9104] ; 8-byte Folded Reload
fmul d7, d7, d17
fadd d5, d5, d7
fsub d5, d4, d5
mov x9, #43847
movk x9, #44022, lsl #16
movk x9, #63499, lsl #32
movk x9, #16322, lsl #48
fmov d7, x9
fadd d5, d5, d7
fadd d6, d6, d5
cbz x8, LBB19_112
; %bb.111:
fmul d7, d25, d12
ldr d17, [sp, #12040] ; 8-byte Folded Reload
fmul d17, d17, d29
fsub d7, d7, d17
fadd d7, d7, d6
str d7, [x8, #1784]
LBB19_112:
ldr d7, [sp, #11408] ; 8-byte Folded Reload
ldr d8, [sp, #9696] ; 8-byte Folded Reload
fmul d7, d7, d8
ldr d17, [sp, #12080] ; 8-byte Folded Reload
ldr d29, [sp, #9704] ; 8-byte Folded Reload
fmul d17, d17, d29
fsub d26, d7, d17
ldr d7, [sp, #11288] ; 8-byte Folded Reload
ldr d17, [sp, #10136] ; 8-byte Folded Reload
fmul d7, d17, d7
ldr d17, [sp, #10128] ; 8-byte Folded Reload
fmul d17, d17, d15
fadd d7, d7, d17
ldr d17, [sp, #12264] ; 8-byte Folded Reload
ldr d21, [sp, #9656] ; 8-byte Folded Reload
fmul d17, d17, d21
fsub d7, d7, d17
ldr d17, [sp, #12256] ; 8-byte Folded Reload
ldr d21, [sp, #8856] ; 8-byte Folded Reload
fmul d17, d17, d21
fadd d7, d17, d7
ldr d17, [sp, #11928] ; 8-byte Folded Reload
ldr d21, [sp, #10120] ; 8-byte Folded Reload
fmul d17, d21, d17
ldr d21, [sp, #11920] ; 8-byte Folded Reload
ldr d22, [sp, #10144] ; 8-byte Folded Reload
fmul d12, d22, d21
fadd d17, d17, d12
fadd d15, d17, d16
fadd d7, d7, d15
ldur d17, [x29, #-224] ; 8-byte Folded Reload
fmul d17, d17, d29
fsub d7, d7, d17
ldr d17, [sp, #12328] ; 8-byte Folded Reload
fmul d17, d17, d8
fadd d7, d17, d7
mov x9, #29491
movk x9, #28745, lsl #16
movk x9, #45973, lsl #32
movk x9, #16267, lsl #48
fmov d17, x9
fadd d7, d7, d17
mov x9, #14419
movk x9, #64308, lsl #16
movk x9, #365, lsl #32
movk x9, #16201, lsl #48
fmov d17, x9
fadd d7, d7, d17
ldr d17, [sp, #12112] ; 8-byte Folded Reload
fmul d17, d17, d29
ldr d21, [sp, #11304] ; 8-byte Folded Reload
fmul d12, d21, d8
fadd d17, d17, d12
fsub d17, d7, d17
fadd d26, d26, d17
cbz x8, LBB19_114
; %bb.113:
ldr d21, [sp, #11416] ; 8-byte Folded Reload
fmul d12, d21, d8
ldr d21, [sp, #12040] ; 8-byte Folded Reload
fmul d29, d21, d29
fsub d29, d12, d29
fadd d29, d29, d26
str d29, [x8, #1792]
LBB19_114:
ldr d21, [sp, #11944] ; 8-byte Folded Reload
ldr d22, [sp, #10384] ; 8-byte Folded Reload
fmul d29, d22, d21
ldr d21, [sp, #11936] ; 8-byte Folded Reload
ldr d22, [sp, #10376] ; 8-byte Folded Reload
fmul d12, d22, d21
fadd d29, d29, d12
fadd d12, d29, d2
cbz x8, LBB19_119
; %bb.115:
ldr d21, [sp, #9624] ; 8-byte Folded Reload
ldr d22, [sp, #10368] ; 8-byte Folded Reload
fmul d29, d22, d21
ldr d21, [sp, #9648] ; 8-byte Folded Reload
ldr d22, [sp, #10360] ; 8-byte Folded Reload
fmul d21, d22, d21
fadd d21, d29, d21
ldr d22, [sp, #10056] ; 8-byte Folded Reload
ldr d29, [sp, #11168] ; 8-byte Folded Reload
fmul d29, d29, d22
ldr d22, [sp, #9576] ; 8-byte Folded Reload
ldr d8, [sp, #10816] ; 8-byte Folded Reload
fmul d22, d8, d22
fadd d22, d29, d22
fadd d16, d16, d22
ldr d22, [sp, #10712] ; 8-byte Folded Reload
ldr d29, [sp, #11520] ; 8-byte Folded Reload
fmul d22, d29, d22
ldr d29, [sp, #10008] ; 8-byte Folded Reload
ldr d8, [sp, #11512] ; 8-byte Folded Reload
fmul d29, d8, d29
fadd d22, d22, d29
fadd d2, d2, d22
ldr d22, [sp, #8648] ; 8-byte Folded Reload
ldr d29, [sp, #6824] ; 8-byte Folded Reload
fadd d22, d29, d22
ldr d29, [sp, #6416] ; 8-byte Folded Reload
fadd d22, d22, d29
ldr d29, [sp, #12312] ; 8-byte Folded Reload
fadd d22, d29, d22
str d22, [x8, #1824]
ldr d22, [sp, #5592] ; 8-byte Folded Reload
str d22, [x8, #1840]
ldr d22, [sp, #5472] ; 8-byte Folded Reload
str d22, [x8, #1848]
ldr d22, [sp, #8696] ; 8-byte Folded Reload
str d22, [x8, #1856]
ldr d22, [sp, #6456] ; 8-byte Folded Reload
str d22, [x8, #1864]
ldr d22, [sp, #9704] ; 8-byte Folded Reload
str d22, [x8, #1872]
ldr d22, [sp, #6840] ; 8-byte Folded Reload
ldr d29, [sp, #6832] ; 8-byte Folded Reload
fadd d22, d29, d22
ldr d29, [sp, #6816] ; 8-byte Folded Reload
fadd d22, d22, d29
ldr d29, [sp, #12240] ; 8-byte Folded Reload
fadd d22, d29, d22
str d22, [x8, #1904]
ldr d22, [sp, #5600] ; 8-byte Folded Reload
str d22, [x8, #1920]
ldr d22, [sp, #5480] ; 8-byte Folded Reload
str d22, [x8, #1928]
ldr d22, [sp, #8688] ; 8-byte Folded Reload
str d22, [x8, #1936]
ldr d22, [sp, #6448] ; 8-byte Folded Reload
str d22, [x8, #1944]
ldr d22, [sp, #9696] ; 8-byte Folded Reload
str d22, [x8, #1952]
ldr d22, [sp, #12320] ; 8-byte Folded Reload
str d22, [x8, #1984]
ldr d22, [sp, #12280] ; 8-byte Folded Reload
str d22, [x8, #1992]
str d11, [x8, #2000]
str d0, [x8, #2008]
str d23, [x8, #2016]
str d26, [x8, #2024]
ldr d0, [sp, #12336] ; 8-byte Folded Reload
str d0, [x8, #2040]
ldr d0, [sp, #12288] ; 8-byte Folded Reload
str d0, [x8, #2048]
ldr d0, [sp, #12232] ; 8-byte Folded Reload
str d0, [x8, #2056]
str d19, [x8, #2064]
str d6, [x8, #2072]
ldur d0, [x29, #-256] ; 8-byte Folded Reload
str d0, [x8, #2096]
str d24, [x8, #2104]
str d10, [x8, #2112]
str d14, [x8, #2120]
str d3, [x8, #2128]
str d17, [x8, #2136]
ldur d0, [x29, #-240] ; 8-byte Folded Reload
str d0, [x8, #2152]
ldr d0, [sp, #12296] ; 8-byte Folded Reload
str d0, [x8, #2160]
str d31, [x8, #2168]
str d18, [x8, #2176]
str d5, [x8, #2184]
ldur d0, [x29, #-216] ; 8-byte Folded Reload
str d0, [x8, #2208]
ldr d0, [sp, #12208] ; 8-byte Folded Reload
str d0, [x8, #2216]
str d9, [x8, #2224]
str d13, [x8, #2232]
str d20, [x8, #2240]
str d7, [x8, #2248]
ldur d0, [x29, #-208] ; 8-byte Folded Reload
str d0, [x8, #2264]
ldr d0, [sp, #12304] ; 8-byte Folded Reload
str d0, [x8, #2272]
str d30, [x8, #2280]
str d1, [x8, #2288]
str d4, [x8, #2296]
ldr d25, [sp, #11480] ; 8-byte Folded Reload
ldr d30, [sp, #9720] ; 8-byte Folded Reload
fmul d0, d25, d30
ldr d18, [sp, #12024] ; 8-byte Folded Reload
ldr d31, [sp, #9712] ; 8-byte Folded Reload
fmul d1, d18, d31
fsub d3, d0, d1
ldr d22, [sp, #12128] ; 8-byte Folded Reload
ldr d0, [sp, #9736] ; 8-byte Folded Reload
fmul d0, d22, d0
fsub d0, d21, d0
ldr d29, [sp, #12120] ; 8-byte Folded Reload
ldr d1, [sp, #9728] ; 8-byte Folded Reload
fmul d1, d29, d1
fadd d0, d1, d0
fadd d0, d0, d12
ldr d21, [sp, #12344] ; 8-byte Folded Reload
fmul d1, d21, d31
fsub d0, d0, d1
ldr d26, [sp, #12272] ; 8-byte Folded Reload
fmul d1, d26, d30
fadd d0, d1, d0
mov x9, #29491
movk x9, #28745, lsl #16
movk x9, #45973, lsl #32
movk x9, #16267, lsl #48
fmov d1, x9
fadd d0, d0, d1
mov x9, #14419
movk x9, #64308, lsl #16
movk x9, #365, lsl #32
movk x9, #16201, lsl #48
fmov d1, x9
fadd d0, d0, d1
ldr d19, [sp, #12032] ; 8-byte Folded Reload
fmul d1, d19, d31
ldr d24, [sp, #11296] ; 8-byte Folded Reload
fmul d4, d24, d30
fadd d1, d1, d4
ldr d23, [sp, #11416] ; 8-byte Folded Reload
fmul d4, d23, d30
ldr d20, [sp, #12040] ; 8-byte Folded Reload
fmul d5, d20, d31
fsub d5, d4, d5
fsub d1, d0, d1
fadd d4, d3, d1
fadd d3, d5, d4
str d3, [x8, #1800]
ldr d3, [sp, #11408] ; 8-byte Folded Reload
ldr d8, [sp, #11176] ; 8-byte Folded Reload
fmul d3, d3, d8
ldr d5, [sp, #12080] ; 8-byte Folded Reload
ldr d9, [sp, #10808] ; 8-byte Folded Reload
fmul d5, d5, d9
fsub d6, d3, d5
ldr d3, [sp, #12256] ; 8-byte Folded Reload
fmul d3, d3, d8
ldr d5, [sp, #12264] ; 8-byte Folded Reload
fmul d5, d5, d9
fsub d3, d3, d5
fadd d3, d3, d16
ldur d5, [x29, #-224] ; 8-byte Folded Reload
fmul d5, d5, d9
fsub d3, d3, d5
ldr d5, [sp, #12328] ; 8-byte Folded Reload
fmul d5, d5, d8
fadd d3, d5, d3
ldr d5, [sp, #12112] ; 8-byte Folded Reload
fmul d5, d5, d9
ldr d7, [sp, #11304] ; 8-byte Folded Reload
fmul d7, d7, d8
fadd d5, d5, d7
fmul d7, d23, d8
fmul d17, d20, d9
fsub d7, d7, d17
fsub d5, d3, d5
fadd d17, d6, d5
fadd d6, d7, d17
str d6, [x8, #1808]
ldr d10, [sp, #11640] ; 8-byte Folded Reload
fmul d6, d25, d10
ldr d25, [sp, #11504] ; 8-byte Folded Reload
fmul d7, d18, d25
fsub d7, d6, d7
fmul d6, d29, d10
fmul d18, d22, d25
fsub d6, d6, d18
fadd d6, d6, d2
fmul d18, d21, d25
fsub d6, d6, d18
fmul d18, d26, d10
fadd d6, d18, d6
fmul d18, d19, d25
fmul d19, d24, d10
fadd d18, d18, d19
fmul d19, d23, d10
fmul d20, d20, d25
fsub d19, d19, d20
fsub d18, d6, d18
fadd d7, d7, d18
fadd d19, d19, d7
str d19, [x8, #1816]
str d31, [x8, #1880]
str d9, [x8, #1888]
str d25, [x8, #1896]
str d30, [x8, #1960]
str d8, [x8, #1968]
str d10, [x8, #1976]
str d17, [x8, #2032]
str d4, [x8, #2080]
str d7, [x8, #2088]
str d5, [x8, #2144]
str d1, [x8, #2192]
str d18, [x8, #2200]
mov x9, #2
movk x9, #16463, lsl #48
str x9, [x8, #1832]
str d3, [x8, #2256]
str d0, [x8, #2304]
str d6, [x8, #2312]
ldp d1, d4, [x29, #-200] ; 16-byte Folded Reload
ldr d0, [sp, #6392] ; 8-byte Folded Reload
fmul d0, d4, d0
fadd d0, d1, d0
ldur d3, [x29, #-160] ; 8-byte Folded Reload
ldr d1, [sp, #6384] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d1, d0
str d0, [x8, #2320]
ldr d0, [sp, #6368] ; 8-byte Folded Reload
fmul d0, d4, d0
ldr d1, [sp, #10072] ; 8-byte Folded Reload
fadd d0, d1, d0
str d0, [x8, #2328]
ldr d0, [sp, #6352] ; 8-byte Folded Reload
fmul d0, d3, d0
ldr d1, [sp, #10064] ; 8-byte Folded Reload
fadd d0, d1, d0
str d0, [x8, #2336]
ldr d0, [sp, #6312] ; 8-byte Folded Reload
fmul d0, d4, d0
ldur d1, [x29, #-232] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #6304] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d1, d0
str d0, [x8, #2344]
ldr d0, [sp, #6216] ; 8-byte Folded Reload
fmul d0, d4, d0
fadd d0, d28, d0
ldr d1, [sp, #6208] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d1, d0
str d0, [x8, #2352]
ldr d0, [sp, #9672] ; 8-byte Folded Reload
fmul d0, d4, d0
fadd d0, d15, d0
ldr d1, [sp, #9664] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d1, d0
str d0, [x8, #2360]
ldr d0, [sp, #10280] ; 8-byte Folded Reload
fmul d0, d4, d0
fadd d0, d16, d0
ldr d1, [sp, #10272] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d0, d1
str d0, [x8, #2368]
ldp d1, d4, [x29, #-184] ; 16-byte Folded Reload
ldr d0, [sp, #6408] ; 8-byte Folded Reload
fmul d0, d4, d0
fadd d0, d1, d0
ldur d3, [x29, #-168] ; 8-byte Folded Reload
ldr d1, [sp, #6400] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d1, d0
str d0, [x8, #2376]
ldr d0, [sp, #6376] ; 8-byte Folded Reload
fmul d0, d4, d0
ldr d1, [sp, #10088] ; 8-byte Folded Reload
fadd d0, d1, d0
str d0, [x8, #2384]
ldr d0, [sp, #6360] ; 8-byte Folded Reload
fmul d0, d3, d0
ldr d1, [sp, #10080] ; 8-byte Folded Reload
fadd d0, d1, d0
str d0, [x8, #2392]
str x9, [x8, #1912]
ldr x8, [x19]
cbz x8, LBB19_117
LBB19_116:
ldp d4, d3, [x29, #-176] ; 16-byte Folded Reload
ldr d0, [sp, #6280] ; 8-byte Folded Reload
fmul d0, d4, d0
ldur d1, [x29, #-248] ; 8-byte Folded Reload
fadd d0, d1, d0
ldr d1, [sp, #6272] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d1, d0
str d0, [x8, #2400]
ldr d0, [sp, #6200] ; 8-byte Folded Reload
fmul d0, d4, d0
fadd d0, d27, d0
ldr d1, [sp, #6192] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d1, d0
str d0, [x8, #2408]
ldr d0, [sp, #9832] ; 8-byte Folded Reload
fmul d0, d4, d0
fadd d0, d12, d0
ldr d1, [sp, #8184] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d1, d0
str d0, [x8, #2416]
ldr d0, [sp, #10888] ; 8-byte Folded Reload
fmul d0, d4, d0
fadd d0, d2, d0
ldr d1, [sp, #10880] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d0, d1
str d0, [x8, #2424]
LBB19_117:
add sp, sp, #3, lsl #12 ; =12288
add sp, sp, #176
ldp x29, x30, [sp, #144] ; 16-byte Folded Reload
ldp x20, x19, [sp, #128] ; 16-byte Folded Reload
ldp x22, x21, [sp, #112] ; 16-byte Folded Reload
ldp x24, x23, [sp, #96] ; 16-byte Folded Reload
ldp x26, x25, [sp, #80] ; 16-byte Folded Reload
ldp x28, x27, [sp, #64] ; 16-byte Folded Reload
ldp d9, d8, [sp, #48] ; 16-byte Folded Reload
ldp d11, d10, [sp, #32] ; 16-byte Folded Reload
ldp d13, d12, [sp, #16] ; 16-byte Folded Reload
ldp d15, d14, [sp], #160 ; 16-byte Folded Reload
ret
LBB19_118:
ldr d0, [sp, #11320] ; 8-byte Folded Reload
fadd d0, d0, d8
str d0, [sp, #11912] ; 8-byte Folded Spill
ldr d1, [sp, #11368] ; 8-byte Folded Reload
fadd d5, d0, d1
ldr d0, [sp, #11360] ; 8-byte Folded Reload
fadd d5, d5, d0
ldr d0, [sp, #11392] ; 8-byte Folded Reload
fadd d1, d5, d0
ldr d0, [sp, #12072] ; 8-byte Folded Reload
str d1, [sp, #12280] ; 8-byte Folded Spill
fsub d17, d0, d1
ldr x8, [x19]
cbnz x8, LBB19_93
b LBB19_94
LBB19_119:
ldr d0, [sp, #10712] ; 8-byte Folded Reload
ldr d1, [sp, #11520] ; 8-byte Folded Reload
fmul d0, d1, d0
ldr d1, [sp, #10008] ; 8-byte Folded Reload
ldr d3, [sp, #11512] ; 8-byte Folded Reload
fmul d1, d3, d1
fadd d0, d0, d1
fadd d2, d2, d0
ldr x8, [x19]
cbnz x8, LBB19_116
b LBB19_117
.loh AdrpLdrGot Lloh8, Lloh9
.cfi_endproc
; -- End function
.globl _jac_F_alloc_mem ; -- Begin function jac_F_alloc_mem
.p2align 2
_jac_F_alloc_mem: ; @jac_F_alloc_mem
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.globl _jac_F_init_mem ; -- Begin function jac_F_init_mem
.p2align 2
_jac_F_init_mem: ; @jac_F_init_mem
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.globl _jac_F_free_mem ; -- Begin function jac_F_free_mem
.p2align 2
_jac_F_free_mem: ; @jac_F_free_mem
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.globl _jac_F_checkout ; -- Begin function jac_F_checkout
.p2align 2
_jac_F_checkout: ; @jac_F_checkout
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.globl _jac_F_release ; -- Begin function jac_F_release
.p2align 2
_jac_F_release: ; @jac_F_release
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.globl _jac_F_incref ; -- Begin function jac_F_incref
.p2align 2
_jac_F_incref: ; @jac_F_incref
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.globl _jac_F_decref ; -- Begin function jac_F_decref
.p2align 2
_jac_F_decref: ; @jac_F_decref
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.globl _jac_F_n_in ; -- Begin function jac_F_n_in
.p2align 2
_jac_F_n_in: ; @jac_F_n_in
.cfi_startproc
; %bb.0:
mov w0, #2
ret
.cfi_endproc
; -- End function
.globl _jac_F_n_out ; -- Begin function jac_F_n_out
.p2align 2
_jac_F_n_out: ; @jac_F_n_out
.cfi_startproc
; %bb.0:
mov w0, #1
ret
.cfi_endproc
; -- End function
.globl _jac_F_default_in ; -- Begin function jac_F_default_in
.p2align 2
_jac_F_default_in: ; @jac_F_default_in
.cfi_startproc
; %bb.0:
movi d0, #0000000000000000
ret
.cfi_endproc
; -- End function
.globl _jac_F_name_in ; -- Begin function jac_F_name_in
.p2align 2
_jac_F_name_in: ; @jac_F_name_in
.cfi_startproc
; %bb.0:
Lloh10:
adrp x8, l_.str.2@PAGE
Lloh11:
add x8, x8, l_.str.2@PAGEOFF
cmp x0, #1
csel x8, x8, xzr, eq
Lloh12:
adrp x9, l_.str@PAGE
Lloh13:
add x9, x9, l_.str@PAGEOFF
cmp x0, #0
csel x0, x9, x8, eq
ret
.loh AdrpAdd Lloh12, Lloh13
.loh AdrpAdd Lloh10, Lloh11
.cfi_endproc
; -- End function
.globl _jac_F_name_out ; -- Begin function jac_F_name_out
.p2align 2
_jac_F_name_out: ; @jac_F_name_out
.cfi_startproc
; %bb.0:
Lloh14:
adrp x8, l_.str.3@PAGE
Lloh15:
add x8, x8, l_.str.3@PAGEOFF
cmp x0, #0
csel x0, x8, xzr, eq
ret
.loh AdrpAdd Lloh14, Lloh15
.cfi_endproc
; -- End function
.globl _jac_F_sparsity_in ; -- Begin function jac_F_sparsity_in
.p2align 2
_jac_F_sparsity_in: ; @jac_F_sparsity_in
.cfi_startproc
; %bb.0:
Lloh16:
adrp x8, _foo_jac_s2@PAGE
Lloh17:
add x8, x8, _foo_jac_s2@PAGEOFF
cmp x0, #1
csel x8, x8, xzr, eq
Lloh18:
adrp x9, _foo_jac_s0@PAGE
Lloh19:
add x9, x9, _foo_jac_s0@PAGEOFF
cmp x0, #0
csel x0, x9, x8, eq
ret
.loh AdrpAdd Lloh18, Lloh19
.loh AdrpAdd Lloh16, Lloh17
.cfi_endproc
; -- End function
.globl _jac_F_sparsity_out ; -- Begin function jac_F_sparsity_out
.p2align 2
_jac_F_sparsity_out: ; @jac_F_sparsity_out
.cfi_startproc
; %bb.0:
Lloh20:
adrp x8, _foo_jac_s3@PAGE
Lloh21:
add x8, x8, _foo_jac_s3@PAGEOFF
cmp x0, #0
csel x0, x8, xzr, eq
ret
.loh AdrpAdd Lloh20, Lloh21
.cfi_endproc
; -- End function
.globl _jac_F_work ; -- Begin function jac_F_work
.p2align 2
_jac_F_work: ; @jac_F_work
.cfi_startproc
; %bb.0:
cbz x0, LBB34_2
; %bb.1:
mov w8, #2
str x8, [x0]
LBB34_2:
cbz x1, LBB34_4
; %bb.3:
mov w8, #1
str x8, [x1]
LBB34_4:
cbz x2, LBB34_6
; %bb.5:
str xzr, [x2]
LBB34_6:
cbz x3, LBB34_8
; %bb.7:
str xzr, [x3]
LBB34_8:
mov w0, #0
ret
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "i0"
l_.str.1: ; @.str.1
.asciz "o0"
.section __TEXT,__const
.p2align 3 ; @foo_jac_s0
_foo_jac_s0:
.quad 33 ; 0x21
.quad 1 ; 0x1
.quad 0 ; 0x0
.quad 33 ; 0x21
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 4 ; 0x4
.quad 5 ; 0x5
.quad 6 ; 0x6
.quad 7 ; 0x7
.quad 8 ; 0x8
.quad 9 ; 0x9
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 15 ; 0xf
.quad 16 ; 0x10
.quad 17 ; 0x11
.quad 18 ; 0x12
.quad 19 ; 0x13
.quad 20 ; 0x14
.quad 21 ; 0x15
.quad 22 ; 0x16
.quad 23 ; 0x17
.quad 24 ; 0x18
.quad 25 ; 0x19
.quad 26 ; 0x1a
.quad 27 ; 0x1b
.quad 28 ; 0x1c
.quad 29 ; 0x1d
.quad 30 ; 0x1e
.quad 31 ; 0x1f
.quad 32 ; 0x20
.p2align 3 ; @foo_jac_s1
_foo_jac_s1:
.quad 15 ; 0xf
.quad 1 ; 0x1
.quad 0 ; 0x0
.quad 15 ; 0xf
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 4 ; 0x4
.quad 5 ; 0x5
.quad 6 ; 0x6
.quad 7 ; 0x7
.quad 8 ; 0x8
.quad 9 ; 0x9
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 13 ; 0xd
.quad 14 ; 0xe
.section __TEXT,__cstring,cstring_literals
l_.str.2: ; @.str.2
.asciz "out_o0"
l_.str.3: ; @.str.3
.asciz "jac"
.section __TEXT,__const
.p2align 3 ; @foo_jac_s2
_foo_jac_s2:
.quad 15 ; 0xf
.quad 1 ; 0x1
.quad 0 ; 0x0
.quad 0 ; 0x0
.p2align 3 ; @foo_jac_s3
_foo_jac_s3:
.quad 15 ; 0xf
.quad 33 ; 0x21
.quad 0 ; 0x0
.quad 15 ; 0xf
.quad 30 ; 0x1e
.quad 30 ; 0x1e
.quad 43 ; 0x2b
.quad 58 ; 0x3a
.quad 73 ; 0x49
.quad 82 ; 0x52
.quad 91 ; 0x5b
.quad 100 ; 0x64
.quad 109 ; 0x6d
.quad 118 ; 0x76
.quad 127 ; 0x7f
.quad 136 ; 0x88
.quad 145 ; 0x91
.quad 154 ; 0x9a
.quad 163 ; 0xa3
.quad 172 ; 0xac
.quad 181 ; 0xb5
.quad 190 ; 0xbe
.quad 199 ; 0xc7
.quad 208 ; 0xd0
.quad 217 ; 0xd9
.quad 228 ; 0xe4
.quad 238 ; 0xee
.quad 248 ; 0xf8
.quad 255 ; 0xff
.quad 262 ; 0x106
.quad 269 ; 0x10d
.quad 276 ; 0x114
.quad 283 ; 0x11b
.quad 290 ; 0x122
.quad 297 ; 0x129
.quad 304 ; 0x130
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 4 ; 0x4
.quad 5 ; 0x5
.quad 6 ; 0x6
.quad 7 ; 0x7
.quad 8 ; 0x8
.quad 9 ; 0x9
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 4 ; 0x4
.quad 5 ; 0x5
.quad 6 ; 0x6
.quad 7 ; 0x7
.quad 8 ; 0x8
.quad 9 ; 0x9
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 4 ; 0x4
.quad 5 ; 0x5
.quad 6 ; 0x6
.quad 7 ; 0x7
.quad 8 ; 0x8
.quad 9 ; 0x9
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 13 ; 0xd
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 4 ; 0x4
.quad 5 ; 0x5
.quad 6 ; 0x6
.quad 7 ; 0x7
.quad 8 ; 0x8
.quad 9 ; 0x9
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 4 ; 0x4
.quad 5 ; 0x5
.quad 6 ; 0x6
.quad 7 ; 0x7
.quad 8 ; 0x8
.quad 9 ; 0x9
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 13 ; 0xd
.quad 14 ; 0xe
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.quad 11 ; 0xb
.quad 12 ; 0xc
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 4 ; 0x4
.quad 5 ; 0x5
.quad 6 ; 0x6
.quad 7 ; 0x7
.quad 8 ; 0x8
.quad 9 ; 0x9
.quad 10 ; 0xa
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 3 ; 0x3
.quad 4 ; 0x4
.quad 5 ; 0x5
.quad 6 ; 0x6
.quad 7 ; 0x7
.quad 8 ; 0x8
.quad 9 ; 0x9
.quad 10 ; 0xa
.quad 0 ; 0x0
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 4 ; 0x4
.quad 5 ; 0x5
.quad 6 ; 0x6
.quad 7 ; 0x7
.quad 8 ; 0x8
.quad 9 ; 0x9
.quad 10 ; 0xa
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 3 ; 0x3
.quad 5 ; 0x5
.quad 7 ; 0x7
.quad 9 ; 0x9
.quad 0 ; 0x0
.quad 1 ; 0x1
.quad 2 ; 0x2
.quad 4 ; 0x4
.quad 6 ; 0x6
.quad 8 ; 0x8
.quad 10 ; 0xa
.subsections_via_symbols
| the_stack_data/11178.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _aggsum_init ## -- Begin function aggsum_init
.p2align 4, 0x90
_aggsum_init: ## @aggsum_init
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %r14d
movq %rdi, %r15
movl $32, %esi
callq _bzero
movl %r14d, 20(%r15)
movl %r14d, 24(%r15)
leaq 16(%r15), %rdi
movq _MUTEX_DEFAULT@GOTPCREL(%rip), %r14
movl (%r14), %edx
xorl %esi, %esi
xorl %ecx, %ecx
callq _mutex_init
movq _boot_ncpus@GOTPCREL(%rip), %rax
movl (%rax), %ecx
movl %ecx, (%r15)
movl (%rax), %edi
shll $2, %edi
movq _KM_SLEEP@GOTPCREL(%rip), %rax
movl (%rax), %esi
callq _kmem_zalloc
movq %rax, 8(%r15)
cmpl $0, (%r15)
jle LBB0_4
## %bb.1:
movl (%r14), %edx
movq %rax, %rdi
xorl %esi, %esi
xorl %ecx, %ecx
callq _mutex_init
cmpl $2, (%r15)
jl LBB0_4
## %bb.2:
movl $1, %r12d
movl $4, %ebx
.p2align 4, 0x90
LBB0_3: ## =>This Inner Loop Header: Depth=1
movq 8(%r15), %rdi
addq %rbx, %rdi
movl (%r14), %edx
xorl %esi, %esi
xorl %ecx, %ecx
callq _mutex_init
incq %r12
movslq (%r15), %rax
addq $4, %rbx
cmpq %rax, %r12
jl LBB0_3
LBB0_4:
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _MUTEX_DEFAULT,4,2 ## @MUTEX_DEFAULT
.comm _boot_ncpus,4,2 ## @boot_ncpus
.comm _KM_SLEEP,4,2 ## @KM_SLEEP
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _aggsum_init ; -- Begin function aggsum_init
.p2align 2
_aggsum_init: ; @aggsum_init
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x1
mov x19, x0
mov w1, #32
bl _bzero
stp w20, w20, [x19, #20]
add x0, x19, #16
Lloh0:
adrp x20, _MUTEX_DEFAULT@GOTPAGE
Lloh1:
ldr x20, [x20, _MUTEX_DEFAULT@GOTPAGEOFF]
ldr w2, [x20]
mov x1, #0
mov x3, #0
bl _mutex_init
Lloh2:
adrp x8, _boot_ncpus@GOTPAGE
Lloh3:
ldr x8, [x8, _boot_ncpus@GOTPAGEOFF]
ldr w9, [x8]
str w9, [x19]
ldr w8, [x8]
lsl w0, w8, #2
Lloh4:
adrp x8, _KM_SLEEP@GOTPAGE
Lloh5:
ldr x8, [x8, _KM_SLEEP@GOTPAGEOFF]
Lloh6:
ldr w1, [x8]
bl _kmem_zalloc
str x0, [x19, #8]
ldr w8, [x19]
cmp w8, #1
b.lt LBB0_4
; %bb.1:
ldr w2, [x20]
mov x1, #0
mov x3, #0
bl _mutex_init
ldr w8, [x19]
cmp w8, #2
b.lt LBB0_4
; %bb.2:
mov w21, #1
mov w22, #4
LBB0_3: ; =>This Inner Loop Header: Depth=1
ldr x8, [x19, #8]
add x0, x8, x22
ldr w2, [x20]
mov x1, #0
mov x3, #0
bl _mutex_init
add x21, x21, #1
ldrsw x8, [x19]
add x22, x22, #4
cmp x21, x8
b.lt LBB0_3
LBB0_4:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh4, Lloh5, Lloh6
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm _MUTEX_DEFAULT,4,2 ; @MUTEX_DEFAULT
.comm _boot_ncpus,4,2 ; @boot_ncpus
.comm _KM_SLEEP,4,2 ; @KM_SLEEP
.subsections_via_symbols
| AnghaBench/zfs/module/zfs/extr_aggsum.c_aggsum_init.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ip6addrlbl_fill
_ip6addrlbl_fill: ## @ip6addrlbl_fill
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r9d, %eax
movl %edx, %r12d
movq %rsi, %rbx
movq %rdi, %r14
movl 16(%rbp), %r9d
movl %ecx, %esi
movl %r8d, %edx
movl %eax, %ecx
movl $4, %r8d
callq _nlmsg_put
testq %rax, %rax
je LBB0_1
## %bb.2:
movq %rax, %r15
movl 8(%rbx), %edx
movl 12(%rbx), %esi
movq %rax, %rdi
movl %r12d, %ecx
callq _ip6addrlbl_putmsg
movq _IFAL_ADDRESS@GOTPCREL(%rip), %rax
movl (%rax), %esi
leaq 4(%rbx), %rdx
movq %r14, %rdi
callq _nla_put_in6_addr
testq %rax, %rax
js LBB0_4
## %bb.3:
movq _IFAL_LABEL@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl (%rbx), %edx
movq %r14, %rdi
callq _nla_put_u32
testq %rax, %rax
js LBB0_4
## %bb.5:
movq %r14, %rdi
movq %r15, %rsi
callq _nlmsg_end
xorl %eax, %eax
jmp LBB0_6
LBB0_4:
movq %r14, %rdi
movq %r15, %rsi
callq _nlmsg_cancel
LBB0_1:
movq _EMSGSIZE@GOTPCREL(%rip), %rcx
xorl %eax, %eax
subl (%rcx), %eax
LBB0_6:
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _EMSGSIZE,4,2 ## @EMSGSIZE
.comm _IFAL_ADDRESS,4,2 ## @IFAL_ADDRESS
.comm _IFAL_LABEL,4,2 ## @IFAL_LABEL
.no_dead_strip _ip6addrlbl_fill
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ip6addrlbl_fill
_ip6addrlbl_fill: ; @ip6addrlbl_fill
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x22, x2
mov x21, x1
mov x19, x0
mov x1, x3
mov x2, x4
mov x3, x5
mov w4, #4
mov x5, x6
bl _nlmsg_put
cbz x0, LBB0_5
; %bb.1:
mov x20, x0
ldp w2, w1, [x21, #8]
mov x3, x22
bl _ip6addrlbl_putmsg
Lloh0:
adrp x8, _IFAL_ADDRESS@GOTPAGE
Lloh1:
ldr x8, [x8, _IFAL_ADDRESS@GOTPAGEOFF]
Lloh2:
ldr w1, [x8]
add x2, x21, #4
mov x0, x19
bl _nla_put_in6_addr
tbnz x0, #63, LBB0_4
; %bb.2:
Lloh3:
adrp x8, _IFAL_LABEL@GOTPAGE
Lloh4:
ldr x8, [x8, _IFAL_LABEL@GOTPAGEOFF]
Lloh5:
ldr w1, [x8]
ldr w2, [x21]
mov x0, x19
bl _nla_put_u32
tbnz x0, #63, LBB0_4
; %bb.3:
mov x0, x19
mov x1, x20
bl _nlmsg_end
mov w0, #0
b LBB0_6
LBB0_4:
mov x0, x19
mov x1, x20
bl _nlmsg_cancel
LBB0_5:
Lloh6:
adrp x8, _EMSGSIZE@GOTPAGE
Lloh7:
ldr x8, [x8, _EMSGSIZE@GOTPAGEOFF]
Lloh8:
ldr w8, [x8]
neg w0, w8
LBB0_6:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.cfi_endproc
; -- End function
.comm _EMSGSIZE,4,2 ; @EMSGSIZE
.comm _IFAL_ADDRESS,4,2 ; @IFAL_ADDRESS
.comm _IFAL_LABEL,4,2 ; @IFAL_LABEL
.no_dead_strip _ip6addrlbl_fill
.subsections_via_symbols
| AnghaBench/linux/net/ipv6/extr_addrlabel.c_ip6addrlbl_fill.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function mach_msg_destroy_port
_mach_msg_destroy_port: ## @mach_msg_destroy_port
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movl %esi, %ebx
movl %edi, %r14d
movl %edi, -20(%rbp)
movl %esi, -24(%rbp)
callq _MACH_PORT_VALID
testq %rax, %rax
je LBB0_8
## %bb.1:
addl $-128, %ebx
cmpl $4, %ebx
ja LBB0_8
## %bb.2:
leaq LJTI0_0(%rip), %rax
movslq (%rax,%rbx,4), %rcx
addq %rax, %rcx
jmpq *%rcx
LBB0_3:
movq _mach_task_self_@GOTPCREL(%rip), %rax
movl (%rax), %edi
jmp LBB0_4
LBB0_5:
movq _mach_task_self_@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq _MACH_PORT_RIGHT_RECEIVE@GOTPCREL(%rip), %rax
movl (%rax), %edx
movl %r14d, %esi
movl $-1, %ecx
addq $16, %rsp
popq %rbx
popq %r14
popq %rbp
jmp _mach_port_mod_refs ## TAILCALL
LBB0_6:
movq _mach_task_self_@GOTPCREL(%rip), %rbx
movl (%rbx), %edi
movl %r14d, %esi
movl %r14d, %edx
movl $132, %ecx
callq _mach_port_insert_right
movl (%rbx), %edi
LBB0_4:
movl %r14d, %esi
addq $16, %rsp
popq %rbx
popq %r14
popq %rbp
jmp _mach_port_deallocate ## TAILCALL
LBB0_7:
movq _mach_task_self_@GOTPCREL(%rip), %rbx
movl (%rbx), %edi
leaq -20(%rbp), %rcx
leaq -24(%rbp), %r8
movl %r14d, %esi
movl $131, %edx
callq _mach_port_extract_right
movl (%rbx), %edi
movl -20(%rbp), %esi
callq _mach_port_deallocate
LBB0_8:
addq $16, %rsp
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L0_0_set_3, LBB0_3-LJTI0_0
.set L0_0_set_5, LBB0_5-LJTI0_0
.set L0_0_set_7, LBB0_7-LJTI0_0
.set L0_0_set_6, LBB0_6-LJTI0_0
LJTI0_0:
.long L0_0_set_3
.long L0_0_set_3
.long L0_0_set_5
.long L0_0_set_7
.long L0_0_set_6
.end_data_region
## -- End function
.comm _mach_task_self_,4,2 ## @mach_task_self_
.comm _MACH_PORT_RIGHT_RECEIVE,4,2 ## @MACH_PORT_RIGHT_RECEIVE
.no_dead_strip _mach_msg_destroy_port
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function mach_msg_destroy_port
_mach_msg_destroy_port: ; @mach_msg_destroy_port
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x20, x1
mov x19, x0
stp w1, w0, [sp, #8]
bl _MACH_PORT_VALID
cbz x0, LBB0_8
; %bb.1:
sub w8, w20, #128
cmp w8, #4
b.hi LBB0_8
; %bb.2:
Lloh0:
adrp x9, lJTI0_0@PAGE
Lloh1:
add x9, x9, lJTI0_0@PAGEOFF
adr x10, LBB0_3
ldrb w11, [x9, x8]
add x10, x10, x11, lsl #2
br x10
LBB0_3:
Lloh2:
adrp x8, _mach_task_self_@GOTPAGE
Lloh3:
ldr x8, [x8, _mach_task_self_@GOTPAGEOFF]
Lloh4:
ldr w0, [x8]
b LBB0_6
LBB0_4:
Lloh5:
adrp x8, _mach_task_self_@GOTPAGE
Lloh6:
ldr x8, [x8, _mach_task_self_@GOTPAGEOFF]
Lloh7:
adrp x9, _MACH_PORT_RIGHT_RECEIVE@GOTPAGE
Lloh8:
ldr x9, [x9, _MACH_PORT_RIGHT_RECEIVE@GOTPAGEOFF]
Lloh9:
ldr w0, [x8]
Lloh10:
ldr w2, [x9]
mov x1, x19
mov w3, #-1
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
b _mach_port_mod_refs
LBB0_5:
Lloh11:
adrp x20, _mach_task_self_@GOTPAGE
Lloh12:
ldr x20, [x20, _mach_task_self_@GOTPAGEOFF]
ldr w0, [x20]
mov x1, x19
mov x2, x19
mov w3, #132
bl _mach_port_insert_right
ldr w0, [x20]
LBB0_6:
mov x1, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
b _mach_port_deallocate
LBB0_7:
Lloh13:
adrp x20, _mach_task_self_@GOTPAGE
Lloh14:
ldr x20, [x20, _mach_task_self_@GOTPAGEOFF]
ldr w0, [x20]
add x3, sp, #12
add x4, sp, #8
mov x1, x19
mov w2, #131
bl _mach_port_extract_right
ldr w0, [x20]
ldr w1, [sp, #12]
bl _mach_port_deallocate
LBB0_8:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh7, Lloh8, Lloh10
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh9
.loh AdrpLdrGot Lloh11, Lloh12
.loh AdrpLdrGot Lloh13, Lloh14
.cfi_endproc
.section __TEXT,__const
lJTI0_0:
.byte (LBB0_3-LBB0_3)>>2
.byte (LBB0_3-LBB0_3)>>2
.byte (LBB0_4-LBB0_3)>>2
.byte (LBB0_7-LBB0_3)>>2
.byte (LBB0_5-LBB0_3)>>2
; -- End function
.comm _mach_task_self_,4,2 ; @mach_task_self_
.comm _MACH_PORT_RIGHT_RECEIVE,4,2 ; @MACH_PORT_RIGHT_RECEIVE
.no_dead_strip _mach_msg_destroy_port
.subsections_via_symbols
| AnghaBench/darwin-xnu/libsyscall/mach/extr_mach_msg.c_mach_msg_destroy_port.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function HUF_readStats
LCPI0_0:
.long 15 ## 0xf
.long 15 ## 0xf
.space 4
.space 4
.section __TEXT,__text,regular,pure_instructions
.globl _HUF_readStats
.p2align 4, 0x90
_HUF_readStats: ## @HUF_readStats
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $56, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq 16(%rbp), %rbx
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
testq %rbx, %rbx
je LBB0_10
## %bb.1:
movq %r9, %r13
movq %rcx, %r12
movq %rdx, %r10
movq %rsi, %r15
movq %rdi, %r14
movslq (%r9), %rcx
cmpq $128, %rcx
jb LBB0_13
## %bb.2:
leaq -126(%rcx), %rdi
movq %rdi, %rsi
shrq %rsi
cmpq %rbx, %rsi
jae LBB0_10
## %bb.3:
movq %r12, -64(%rbp) ## 8-byte Spill
leaq -127(%rcx), %r12
cmpq %r15, %r12
jae LBB0_17
## %bb.4:
testq %r12, %r12
movq %r8, -72(%rbp) ## 8-byte Spill
je LBB0_18
## %bb.5:
leaq -128(%rcx), %r11
cmpq $6, %r11
jae LBB0_34
LBB0_6:
xorl %edx, %edx
LBB0_7:
movq %rdx, %rax
andq $-2, %rax
leaq 4(,%rax,2), %rax
addq %r13, %rax
.p2align 4, 0x90
LBB0_8: ## =>This Inner Loop Header: Depth=1
movl (%rax), %ecx
sarl $4, %ecx
movl %ecx, (%r14,%rdx,4)
movl (%rax), %ecx
andl $15, %ecx
movl %ecx, 4(%r14,%rdx,4)
addq $2, %rdx
addq $4, %rax
cmpq %r12, %rdx
jb LBB0_8
jmp LBB0_19
LBB0_10:
movq _srcSize_wrong@GOTPCREL(%rip), %rax
LBB0_11:
movl (%rax), %edi
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB0_44
## %bb.12:
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _ERROR ## TAILCALL
LBB0_13:
movq %r8, -72(%rbp) ## 8-byte Spill
movq %r10, -88(%rbp) ## 8-byte Spill
movl $6, %edi
movq %rcx, -56(%rbp) ## 8-byte Spill
callq _FSE_DTABLE_SIZE_U32
movq %rsp, -80(%rbp) ## 8-byte Spill
movl %eax, %eax
shlq $2, %rax
callq ____chkstk_darwin
movq -56(%rbp), %rcx ## 8-byte Reload
addq $15, %rax
andq $-16, %rax
subq %rax, %rsp
movq %rsp, %r8
leaq 1(%rcx), %rax
cmpq %rbx, %rax
jbe LBB0_16
## %bb.14:
movq -80(%rbp), %rbx ## 8-byte Reload
movq _srcSize_wrong@GOTPCREL(%rip), %rax
movl (%rax), %edi
callq _ERROR
movq ___stack_chk_guard@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq -48(%rbp), %rcx
jne LBB0_44
## %bb.15:
movq %rbx, %rsp
jmp LBB0_33
LBB0_16:
movq %r12, -64(%rbp) ## 8-byte Spill
decq %r15
addq $4, %r13
movq %r14, %rdi
movq %r15, %rsi
movq %r13, %rdx
movl $6, %r9d
callq _FSE_decompress_wksp
movq %rax, %r12
movq %rax, %rdi
callq _FSE_isError
testq %rax, %rax
movq -80(%rbp), %rsp ## 8-byte Reload
movq -88(%rbp), %r10 ## 8-byte Reload
jne LBB0_31
jmp LBB0_20
LBB0_17:
movq _corruption_detected@GOTPCREL(%rip), %rax
jmp LBB0_11
LBB0_18:
xorl %r12d, %r12d
LBB0_19:
movq %rsi, -56(%rbp) ## 8-byte Spill
LBB0_20:
movq _HUF_TABLELOG_MAX@GOTPCREL(%rip), %r15
movl (%r15), %eax
leal 8(,%rax,8), %edx
movq %r10, %r13
movq %r10, %rdi
xorl %esi, %esi
callq _memset
testq %r12, %r12
je LBB0_30
## %bb.21:
movl (%r15), %eax
xorl %ebx, %ebx
xorl %edx, %edx
movq %r13, %rdi
.p2align 4, 0x90
LBB0_22: ## =>This Inner Loop Header: Depth=1
movslq (%r14,%rdx,4), %rcx
cmpl %eax, %ecx
jge LBB0_30
## %bb.23: ## in Loop: Header=BB0_22 Depth=1
incq (%rdi,%rcx,8)
movl $1, %esi
## kill: def $cl killed $cl killed $rcx
shll %cl, %esi
sarl %esi
movslq %esi, %rcx
addq %rcx, %rbx
incq %rdx
cmpq %rdx, %r12
jne LBB0_22
## %bb.24:
testq %rbx, %rbx
je LBB0_30
## %bb.25:
movq %rbx, %rdi
callq _BIT_highbit32
incl %eax
cmpl (%r15), %eax
ja LBB0_30
## %bb.26:
movslq %eax, %rdx
movl $1, %esi
movl %eax, %ecx
shll %cl, %esi
movq -72(%rbp), %rax ## 8-byte Reload
movq %rdx, (%rax)
movslq %esi, %r15
subq %rbx, %r15
movq %r15, %rdi
callq _BIT_highbit32
movl %eax, %ecx
movl $1, %eax
shll %cl, %eax
movslq %eax, %rbx
movq %r15, %rdi
callq _BIT_highbit32
cmpq %rbx, %r15
jne LBB0_30
## %bb.27:
movslq %eax, %rcx
incl %eax
movl %eax, (%r14,%r12,4)
incq 8(%r13,%rcx,8)
movq 8(%r13), %rax
cmpq $2, %rax
jb LBB0_30
## %bb.28:
andl $1, %eax
testq %rax, %rax
jne LBB0_30
## %bb.29:
incq %r12
movq -64(%rbp), %rax ## 8-byte Reload
movq %r12, (%rax)
movq -56(%rbp), %r12 ## 8-byte Reload
incq %r12
jmp LBB0_31
LBB0_30:
movq _corruption_detected@GOTPCREL(%rip), %rax
movl (%rax), %edi
callq _ERROR
movq %rax, %r12
LBB0_31:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB0_44
## %bb.32:
movq %r12, %rax
LBB0_33:
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_34:
shrq %r11
movl $8, %edx
movq %r11, %rax
mulq %rdx
seto %r9b
leaq (%r14,%rax), %rbx
xorl %edx, %edx
cmpq %r14, %rbx
jb LBB0_7
## %bb.35:
testb %r9b, %r9b
jne LBB0_7
## %bb.36:
leaq 4(%r14), %rbx
addq %r14, %rax
addq $4, %rax
cmpq %rbx, %rax
jb LBB0_7
## %bb.37:
testb %r9b, %r9b
jne LBB0_7
## %bb.38:
leaq 4(%r13), %rax
addq %rcx, %rcx
addq $-248, %rcx
andq $-4, %rcx
addq %r13, %rcx
cmpq %r14, %rcx
jbe LBB0_40
## %bb.39:
movabsq $4611686018427387902, %rcx ## imm = 0x3FFFFFFFFFFFFFFE
andq %rcx, %rdi
leaq (%r14,%rdi,4), %rcx
cmpq %rcx, %rax
jb LBB0_6
LBB0_40:
incq %r11
movq %r11, %rdi
andq $-4, %rdi
leaq (%rdi,%rdi), %rdx
xorl %ebx, %ebx
movabsq $4611686018427387900, %r9 ## imm = 0x3FFFFFFFFFFFFFFC
movdqa LCPI0_0(%rip), %xmm0 ## xmm0 = <15,15,u,u>
LBB0_41: ## =>This Inner Loop Header: Depth=1
movq %rbx, %rcx
andq %r9, %rcx
movq (%rax,%rcx,4), %xmm1 ## xmm1 = mem[0],zero
movq 8(%rax,%rcx,4), %xmm2 ## xmm2 = mem[0],zero
movdqa %xmm1, %xmm3
psrad $4, %xmm3
movdqa %xmm2, %xmm4
psrad $4, %xmm4
pand %xmm0, %xmm1
punpckldq %xmm1, %xmm3 ## xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
pand %xmm0, %xmm2
punpckldq %xmm2, %xmm4 ## xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
movdqu %xmm3, (%r14,%rbx,8)
movdqu %xmm4, 16(%r14,%rbx,8)
addq $4, %rbx
cmpq %rbx, %rdi
jne LBB0_41
## %bb.42:
cmpq %rdi, %r11
jne LBB0_7
jmp LBB0_19
LBB0_44:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.comm _srcSize_wrong,4,2 ## @srcSize_wrong
.comm _corruption_detected,4,2 ## @corruption_detected
.comm _HUF_TABLELOG_MAX,4,2 ## @HUF_TABLELOG_MAX
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _HUF_readStats ; -- Begin function HUF_readStats
.p2align 2
_HUF_readStats: ; @HUF_readStats
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #16
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-88]
cbz x6, LBB0_9
; %bb.1:
mov x22, x6
mov x24, x5
mov x23, x4
mov x19, x3
mov x20, x2
mov x26, x1
mov x21, x0
ldrsw x25, [x5]
cmp w25, #128
b.lo LBB0_12
; %bb.2:
sub x10, x25, #126
lsr x27, x10, #1
cmp x27, x22
b.hs LBB0_9
; %bb.3:
sub x22, x25, #127
cmp x22, x26
b.hs LBB0_30
; %bb.4:
cbz x22, LBB0_16
; %bb.5:
sub x11, x25, #128
cmp x11, #14
b.hs LBB0_31
LBB0_6:
mov x8, #0
LBB0_7:
lsl x9, x8, #1
and x9, x9, #0xfffffffffffffffc
add x9, x9, x24
add x9, x9, #4
LBB0_8: ; =>This Inner Loop Header: Depth=1
ldr w10, [x9]
asr w10, w10, #4
add x11, x21, x8, lsl #2
str w10, [x11]
ldr w10, [x9], #4
and w10, w10, #0xf
str w10, [x11, #4]
add x8, x8, #2
cmp x8, x22
b.lo LBB0_8
b LBB0_16
LBB0_9:
Lloh3:
adrp x8, _srcSize_wrong@GOTPAGE
Lloh4:
ldr x8, [x8, _srcSize_wrong@GOTPAGEOFF]
LBB0_10:
ldr w0, [x8]
ldur x8, [x29, #-88]
Lloh5:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh6:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh7:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_40
; %bb.11:
sub sp, x29, #80
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
b _ERROR
LBB0_12:
mov w0, #6
bl _FSE_DTABLE_SIZE_U32
mov x27, sp
mov w8, w0
lsl x8, x8, #2
mov x9, x8
Lloh8:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh9:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
mov x9, sp
add x8, x8, #15
and x8, x8, #0x7fffffff0
sub x4, x9, x8
mov sp, x4
add x8, x25, #1
cmp x8, x22
b.ls LBB0_15
; %bb.13:
Lloh10:
adrp x8, _srcSize_wrong@GOTPAGE
Lloh11:
ldr x8, [x8, _srcSize_wrong@GOTPAGEOFF]
Lloh12:
ldr w0, [x8]
bl _ERROR
ldur x8, [x29, #-88]
Lloh13:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh14:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh15:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_40
; %bb.14:
mov sp, x27
b LBB0_29
LBB0_15:
sub x1, x26, #1
add x2, x24, #4
mov x0, x21
mov x3, x25
mov w5, #6
bl _FSE_decompress_wksp
mov x22, x0
bl _FSE_isError
mov sp, x27
mov x27, x25
cbnz x0, LBB0_27
LBB0_16:
Lloh16:
adrp x25, _HUF_TABLELOG_MAX@GOTPAGE
Lloh17:
ldr x25, [x25, _HUF_TABLELOG_MAX@GOTPAGEOFF]
ldr w8, [x25]
lsl w8, w8, #3
add w2, w8, #8
mov x0, x20
mov w1, #0
bl _memset
cbz x22, LBB0_26
; %bb.17:
mov x24, #0
ldr w8, [x25]
mov w9, #1
mov x10, x21
mov x11, x22
LBB0_18: ; =>This Inner Loop Header: Depth=1
ldrsw x12, [x10]
cmp w12, w8
b.ge LBB0_26
; %bb.19: ; in Loop: Header=BB0_18 Depth=1
lsl x13, x12, #3
ldr x14, [x20, x13]
add x14, x14, #1
str x14, [x20, x13]
lsl w12, w9, w12
asr w12, w12, #1
add x24, x24, w12, sxtw
add x10, x10, #4
subs x11, x11, #1
b.ne LBB0_18
; %bb.20:
cbz x24, LBB0_26
; %bb.21:
mov x0, x24
bl _BIT_highbit32
add w8, w0, #1
ldr w9, [x25]
cmp w8, w9
b.hi LBB0_26
; %bb.22:
sxtw x9, w8
str x9, [x23]
mov w25, #1
lsl w8, w25, w8
sxtw x8, w8
sub x23, x8, x24
mov x0, x23
bl _BIT_highbit32
lsl w24, w25, w0
mov x0, x23
bl _BIT_highbit32
cmp x23, w24, sxtw
b.ne LBB0_26
; %bb.23:
add w8, w0, #1
str w8, [x21, x22, lsl #2]
sbfiz x8, x8, #3, #32
ldr x9, [x20, x8]
add x9, x9, #1
str x9, [x20, x8]
ldr x8, [x20, #8]
cmp x8, #2
b.lo LBB0_26
; %bb.24:
tbnz w8, #0, LBB0_26
; %bb.25:
add x8, x22, #1
str x8, [x19]
add x22, x27, #1
b LBB0_27
LBB0_26:
Lloh18:
adrp x8, _corruption_detected@GOTPAGE
Lloh19:
ldr x8, [x8, _corruption_detected@GOTPAGEOFF]
Lloh20:
ldr w0, [x8]
bl _ERROR
mov x22, x0
LBB0_27:
ldur x8, [x29, #-88]
Lloh21:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh22:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh23:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_40
; %bb.28:
mov x0, x22
LBB0_29:
sub sp, x29, #80
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB0_30:
Lloh24:
adrp x8, _corruption_detected@GOTPAGE
Lloh25:
ldr x8, [x8, _corruption_detected@GOTPAGEOFF]
b LBB0_10
LBB0_31:
mov x8, #0
lsl x9, x11, #2
and x12, x9, #0xfffffffffffffff8
cmp xzr, x11, lsr #62
cset w9, ne
add x13, x21, x12
cmp x13, x21
b.lo LBB0_7
; %bb.32:
tbnz w9, #0, LBB0_7
; %bb.33:
add x13, x21, #4
add x12, x13, x12
cmp x12, x13
b.lo LBB0_7
; %bb.34:
tbnz w9, #0, LBB0_7
; %bb.35:
add x9, x24, #4
lsl x8, x25, #1
sub x8, x8, #248
and x8, x8, #0xfffffffffffffffc
add x8, x24, x8
cmp x8, x21
b.ls LBB0_37
; %bb.36:
and x8, x10, #0x3ffffffffffffffe
add x8, x21, x8, lsl #2
cmp x9, x8
b.lo LBB0_6
LBB0_37:
mov x10, #0
lsr x8, x11, #1
add x11, x8, #1
and x12, x11, #0xfffffffffffffff8
lsl x8, x12, #1
add x13, x21, #32
movi.4s v0, #15
LBB0_38: ; =>This Inner Loop Header: Depth=1
and x14, x10, #0x3ffffffffffffff8
add x14, x9, x14, lsl #2
ldp q1, q2, [x14]
sshr.4s v3, v1, #4
and.16b v4, v1, v0
sub x14, x13, #32
st2.4s { v3, v4 }, [x14]
sshr.4s v3, v2, #4
and.16b v4, v2, v0
st2.4s { v3, v4 }, [x13]
add x10, x10, #8
add x13, x13, #64
cmp x12, x10
b.ne LBB0_38
; %bb.39:
cmp x11, x12
b.ne LBB0_7
b LBB0_16
LBB0_40:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGot Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.loh AdrpLdrGot Lloh8, Lloh9
.loh AdrpLdrGotLdr Lloh13, Lloh14, Lloh15
.loh AdrpLdrGotLdr Lloh10, Lloh11, Lloh12
.loh AdrpLdrGot Lloh16, Lloh17
.loh AdrpLdrGotLdr Lloh18, Lloh19, Lloh20
.loh AdrpLdrGotLdr Lloh21, Lloh22, Lloh23
.loh AdrpLdrGot Lloh24, Lloh25
.cfi_endproc
; -- End function
.comm _srcSize_wrong,4,2 ; @srcSize_wrong
.comm _corruption_detected,4,2 ; @corruption_detected
.comm _HUF_TABLELOG_MAX,4,2 ; @HUF_TABLELOG_MAX
.subsections_via_symbols
| AnghaBench/zstd/lib/common/extr_entropy_common.c_HUF_readStats.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ata_ali_reset
_ata_ali_reset: ## @ata_ali_reset
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edi, %ebx
callq _device_get_parent
movl %eax, %edi
callq _device_get_softc
movq %rax, %r14
movl %ebx, %edi
callq _device_get_softc
movq %rax, %r15
movl %ebx, %edi
callq _ata_generic_reset
movq (%r14), %rax
movl (%rax), %eax
andl $-2, %eax
cmpl $194, %eax
jne LBB0_8
## %bb.1:
movl %ebx, %edi
callq _GRANDPARENT
leaq -48(%rbp), %rsi
leaq -36(%rbp), %rdx
movl %eax, %edi
callq _device_get_children
testl %eax, %eax
jne LBB0_8
## %bb.2:
cmpl $0, -36(%rbp)
jle LBB0_7
## %bb.3:
xorl %ebx, %ebx
movq _ATA_ALI_1533@GOTPCREL(%rip), %r14
.p2align 4, 0x90
LBB0_4: ## =>This Inner Loop Header: Depth=1
movq -48(%rbp), %rax
movl (%rax,%rbx,4), %edi
callq _pci_get_devid
cmpq (%r14), %rax
je LBB0_5
## %bb.6: ## in Loop: Header=BB0_4 Depth=1
incq %rbx
movslq -36(%rbp), %rax
cmpq %rax, %rbx
jl LBB0_4
jmp LBB0_7
LBB0_5:
movq -48(%rbp), %rax
movl (%rax,%rbx,4), %r12d
movl %r12d, %edi
movl $88, %esi
movl $1, %edx
callq _pci_read_config
movb (%r15), %cl
movl $4, %edx
shll %cl, %edx
movl $4, %r14d
notl %edx
andl %eax, %edx
movl %r12d, %edi
movl $88, %esi
movl $1, %ecx
callq _pci_write_config
movq -48(%rbp), %rax
movl (%rax,%rbx,4), %ebx
movl %ebx, %edi
movl $88, %esi
movl $1, %edx
callq _pci_read_config
movb (%r15), %cl
shll %cl, %r14d
orl %eax, %r14d
movl %ebx, %edi
movl $88, %esi
movl %r14d, %edx
movl $1, %ecx
callq _pci_write_config
LBB0_7:
movq -48(%rbp), %rdi
movq _M_TEMP@GOTPCREL(%rip), %rax
movl (%rax), %esi
callq _free
LBB0_8:
addq $16, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _ATA_ALI_1533,8,3 ## @ATA_ALI_1533
.comm _M_TEMP,4,2 ## @M_TEMP
.no_dead_strip _ata_ali_reset
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ata_ali_reset
_ata_ali_reset: ; @ata_ali_reset
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x0
bl _device_get_parent
bl _device_get_softc
mov x21, x0
mov x0, x20
bl _device_get_softc
mov x19, x0
mov x0, x20
bl _ata_generic_reset
ldr x8, [x21]
ldr w8, [x8]
and w8, w8, #0xfffffffe
cmp w8, #194
b.ne LBB0_8
; %bb.1:
mov x0, x20
bl _GRANDPARENT
add x1, sp, #8
add x2, sp, #4
bl _device_get_children
cbnz w0, LBB0_8
; %bb.2:
ldr w8, [sp, #4]
cmp w8, #1
b.lt LBB0_7
; %bb.3:
mov x20, #0
Lloh0:
adrp x21, _ATA_ALI_1533@GOTPAGE
Lloh1:
ldr x21, [x21, _ATA_ALI_1533@GOTPAGEOFF]
LBB0_4: ; =>This Inner Loop Header: Depth=1
ldr x8, [sp, #8]
ldr w0, [x8, x20, lsl #2]
bl _pci_get_devid
ldr x8, [x21]
cmp x0, x8
b.eq LBB0_6
; %bb.5: ; in Loop: Header=BB0_4 Depth=1
add x20, x20, #1
ldrsw x8, [sp, #4]
cmp x20, x8
b.lt LBB0_4
b LBB0_7
LBB0_6:
ldr x8, [sp, #8]
lsl x21, x20, #2
ldr w20, [x8, x21]
mov x0, x20
mov w1, #88
mov w2, #1
bl _pci_read_config
ldr w8, [x19]
mov w22, #4
lsl w8, w22, w8
bic w2, w0, w8
mov x0, x20
mov w1, #88
mov w3, #1
bl _pci_write_config
ldr x8, [sp, #8]
ldr w20, [x8, x21]
mov x0, x20
mov w1, #88
mov w2, #1
bl _pci_read_config
ldr w8, [x19]
lsl w8, w22, w8
orr w2, w8, w0
mov x0, x20
mov w1, #88
mov w3, #1
bl _pci_write_config
LBB0_7:
ldr x0, [sp, #8]
Lloh2:
adrp x8, _M_TEMP@GOTPAGE
Lloh3:
ldr x8, [x8, _M_TEMP@GOTPAGEOFF]
Lloh4:
ldr w1, [x8]
bl _free
LBB0_8:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.cfi_endproc
; -- End function
.comm _ATA_ALI_1533,8,3 ; @ATA_ALI_1533
.comm _M_TEMP,4,2 ; @M_TEMP
.no_dead_strip _ata_ali_reset
.subsections_via_symbols
| AnghaBench/freebsd/sys/dev/ata/chipsets/extr_ata-acerlabs.c_ata_ali_reset.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _xprt_active ## -- Begin function xprt_active
.p2align 4, 0x90
_xprt_active: ## @xprt_active
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
movq 32(%rdi), %r14
movq %r14, %rdi
callq _mtx_lock
cmpl $0, 24(%rbx)
je LBB0_6
## %bb.1:
cmpq $0, 16(%rbx)
jne LBB0_6
## %bb.2:
movq _TRUE@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, 16(%rbx)
cmpq $0, 8(%rbx)
jne LBB0_6
## %bb.3:
movl (%rbx), %edi
callq _svc_request_space_available
testl %eax, %eax
je LBB0_5
## %bb.4:
movq %rbx, %rdi
callq _xprt_assignthread
testl %eax, %eax
jne LBB0_6
LBB0_5:
leaq 4(%r14), %rdi
movq _xp_alink@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq %rbx, %rsi
callq _TAILQ_INSERT_TAIL
LBB0_6:
movq %r14, %rdi
popq %rbx
popq %r14
popq %rbp
jmp _mtx_unlock ## TAILCALL
.cfi_endproc
## -- End function
.comm _TRUE,8,3 ## @TRUE
.comm _xp_alink,4,2 ## @xp_alink
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _xprt_active ; -- Begin function xprt_active
.p2align 2
_xprt_active: ; @xprt_active
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x20, x0
ldr x19, [x0, #32]
mov x0, x19
bl _mtx_lock
ldr w8, [x20, #24]
cbz w8, LBB0_6
; %bb.1:
ldr x8, [x20, #16]
cbnz x8, LBB0_6
; %bb.2:
Lloh0:
adrp x8, _TRUE@GOTPAGE
Lloh1:
ldr x8, [x8, _TRUE@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
str x8, [x20, #16]
ldr x8, [x20, #8]
cbnz x8, LBB0_6
; %bb.3:
ldr w0, [x20]
bl _svc_request_space_available
cbz w0, LBB0_5
; %bb.4:
mov x0, x20
bl _xprt_assignthread
cbnz w0, LBB0_6
LBB0_5:
add x0, x19, #4
Lloh3:
adrp x8, _xp_alink@GOTPAGE
Lloh4:
ldr x8, [x8, _xp_alink@GOTPAGEOFF]
Lloh5:
ldr w2, [x8]
mov x1, x20
bl _TAILQ_INSERT_TAIL
LBB0_6:
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _mtx_unlock
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _TRUE,8,3 ; @TRUE
.comm _xp_alink,4,2 ; @xp_alink
.subsections_via_symbols
| AnghaBench/freebsd/sys/rpc/extr_svc.c_xprt_active.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function packet_radio_sub
_packet_radio_sub: ## @packet_radio_sub
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %esi, %esi
movl $4, %edx
popq %rbp
jmp _packet_sub ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _packet_radio_sub
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function packet_radio_sub
_packet_radio_sub: ; @packet_radio_sub
.cfi_startproc
; %bb.0:
mov w1, #0
mov w2, #4
b _packet_sub
.cfi_endproc
; -- End function
.no_dead_strip _packet_radio_sub
.subsections_via_symbols
| AnghaBench/nodemcu-firmware/app/modules/extr_wifi_monitor.c_packet_radio_sub.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function str2uint64_t
_str2uint64_t: ## @str2uint64_t
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movb (%rdi), %cl
leal -48(%rcx), %edx
xorl %eax, %eax
cmpb $9, %dl
ja LBB0_3
## %bb.1:
incq %rdi
xorl %eax, %eax
.p2align 4, 0x90
LBB0_2: ## =>This Inner Loop Header: Depth=1
movzbl %cl, %ecx
leal (%rax,%rax,4), %eax
leal (%rcx,%rax,2), %eax
addl $-48, %eax
movzbl (%rdi), %ecx
leal -48(%rcx), %edx
incq %rdi
cmpb $10, %dl
jb LBB0_2
LBB0_3:
## kill: def $eax killed $eax killed $rax
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _str2uint64_t
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function str2uint64_t
_str2uint64_t: ; @str2uint64_t
.cfi_startproc
; %bb.0:
ldrb w9, [x0]
sub w10, w9, #48
cmp w10, #9
b.hi LBB0_4
; %bb.1:
mov x8, x0
mov w0, #0
add x8, x8, #1
mov w10, #10
LBB0_2: ; =>This Inner Loop Header: Depth=1
madd w9, w0, w10, w9
sub w0, w9, #48
ldrb w9, [x8], #1
sub w11, w9, #48
cmp w11, #10
b.lo LBB0_2
; %bb.3:
ret
LBB0_4:
mov w0, #0
ret
.cfi_endproc
; -- End function
.no_dead_strip _str2uint64_t
.subsections_via_symbols
| AnghaBench/netdata/libnetdata/storage_number/extr_..inlined.h_str2uint64_t.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function cf2_hint_init
_cf2_hint_init: ## @cf2_hint_init
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %r9, -64(%rbp) ## 8-byte Spill
movq %r8, -56(%rbp) ## 8-byte Spill
movq %rcx, -72(%rbp) ## 8-byte Spill
movq %rdx, %rbx
movl %esi, %r15d
movq %rdi, %r14
movq 16(%rbp), %r12
callq _FT_ZERO
movl %r15d, %edi
movq %rbx, -48(%rbp) ## 8-byte Spill
movq %rbx, %rsi
callq _cf2_arrstack_getPointer
movq %rax, %r15
movq 16(%rax), %rdi
movq 24(%rax), %rsi
callq _SUB_INT32
movq %rax, %r13
movl $-21, %edi
callq _cf2_intToFixed
cmpq %rax, %r13
jne LBB0_3
## %bb.1:
testq %r12, %r12
je LBB0_5
## %bb.2:
movq 16(%r15), %rax
movq %rax, 16(%r14)
movq _CF2_GhostBottom@GOTPCREL(%rip), %rax
jmp LBB0_10
LBB0_3:
movl $-20, %edi
callq _cf2_intToFixed
cmpq %rax, %r13
jne LBB0_6
## %bb.4:
testq %r12, %r12
je LBB0_9
LBB0_5:
xorl %eax, %eax
jmp LBB0_11
LBB0_6:
testq %r13, %r13
js LBB0_12
## %bb.7:
testq %r12, %r12
movq -64(%rbp), %r12 ## 8-byte Reload
movq -56(%rbp), %r13 ## 8-byte Reload
je LBB0_15
## %bb.8:
movq 24(%r15), %rax
jmp LBB0_14
LBB0_9:
movq 24(%r15), %rax
movq %rax, 16(%r14)
movq _CF2_GhostTop@GOTPCREL(%rip), %rax
LBB0_10:
movq (%rax), %rax
LBB0_11:
movq -64(%rbp), %r12 ## 8-byte Reload
movq -48(%rbp), %rbx ## 8-byte Reload
movq -56(%rbp), %r13 ## 8-byte Reload
jmp LBB0_19
LBB0_12:
testq %r12, %r12
movq -64(%rbp), %r12 ## 8-byte Reload
movq -56(%rbp), %r13 ## 8-byte Reload
je LBB0_16
## %bb.13:
movq 16(%r15), %rax
LBB0_14:
movq %rax, 16(%r14)
movq _CF2_PairBottom@GOTPCREL(%rip), %rax
jmp LBB0_18
LBB0_15:
movq 16(%r15), %rax
jmp LBB0_17
LBB0_16:
movq 24(%r15), %rax
LBB0_17:
movq %rax, 16(%r14)
movq _CF2_PairTop@GOTPCREL(%rip), %rax
LBB0_18:
movq (%rax), %rax
movq -48(%rbp), %rbx ## 8-byte Reload
LBB0_19:
movq %rax, (%r14)
movq %r14, %rdi
callq _cf2_hint_isTop
movq 16(%r14), %rdi
testq %rax, %rax
je LBB0_21
## %bb.20:
movq -72(%rbp), %rax ## 8-byte Reload
movslq (%rax), %rsi
addq %rsi, %rsi
callq _ADD_INT32
movq %rax, %rdi
movq %rax, 16(%r14)
LBB0_21:
movq %r13, %rsi
callq _ADD_INT32
movq %rax, 16(%r14)
movq %r12, 32(%r14)
movq %rbx, 8(%r14)
cmpq $0, (%r14)
je LBB0_24
## %bb.22:
cmpq $0, 8(%r15)
je LBB0_24
## %bb.23:
movq %r14, %rdi
callq _cf2_hint_isTop
xorl %ecx, %ecx
testq %rax, %rax
setne %cl
movl (%r15,%rcx,4), %eax
movl %eax, 24(%r14)
movq %r14, %rdi
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _cf2_hint_lock ## TAILCALL
LBB0_24:
movq %rax, %rdi
movq %r12, %rsi
callq _FT_MulFix
movl %eax, 24(%r14)
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _CF2_GhostBottom,8,3 ## @CF2_GhostBottom
.comm _CF2_GhostTop,8,3 ## @CF2_GhostTop
.comm _CF2_PairBottom,8,3 ## @CF2_PairBottom
.comm _CF2_PairTop,8,3 ## @CF2_PairTop
.no_dead_strip _cf2_hint_init
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function cf2_hint_init
_cf2_hint_init: ; @cf2_hint_init
.cfi_startproc
; %bb.0:
stp x26, x25, [sp, #-80]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x25, x6
mov x20, x5
mov x23, x4
mov x24, x3
mov x22, x2
mov x21, x1
mov x19, x0
bl _FT_ZERO
mov x0, x21
mov x1, x22
bl _cf2_arrstack_getPointer
mov x21, x0
ldr x0, [x0, #16]
ldr x1, [x21, #24]
bl _SUB_INT32
mov x26, x0
mov w0, #-21
bl _cf2_intToFixed
cmp x26, x0
b.ne LBB0_3
; %bb.1:
cbz x25, LBB0_5
; %bb.2:
ldr x8, [x21, #16]
str x8, [x19, #16]
Lloh0:
adrp x8, _CF2_GhostBottom@GOTPAGE
Lloh1:
ldr x8, [x8, _CF2_GhostBottom@GOTPAGEOFF]
b LBB0_16
LBB0_3:
mov w0, #-20
bl _cf2_intToFixed
cmp x26, x0
b.ne LBB0_6
; %bb.4:
cbz x25, LBB0_9
LBB0_5:
mov x8, #0
b LBB0_17
LBB0_6:
tbnz x26, #63, LBB0_10
; %bb.7:
cbz x25, LBB0_13
; %bb.8:
ldr x8, [x21, #24]
b LBB0_12
LBB0_9:
ldr x8, [x21, #24]
str x8, [x19, #16]
Lloh2:
adrp x8, _CF2_GhostTop@GOTPAGE
Lloh3:
ldr x8, [x8, _CF2_GhostTop@GOTPAGEOFF]
b LBB0_16
LBB0_10:
cbz x25, LBB0_14
; %bb.11:
ldr x8, [x21, #16]
LBB0_12:
str x8, [x19, #16]
Lloh4:
adrp x8, _CF2_PairBottom@GOTPAGE
Lloh5:
ldr x8, [x8, _CF2_PairBottom@GOTPAGEOFF]
b LBB0_16
LBB0_13:
ldr x8, [x21, #16]
b LBB0_15
LBB0_14:
ldr x8, [x21, #24]
LBB0_15:
str x8, [x19, #16]
Lloh6:
adrp x8, _CF2_PairTop@GOTPAGE
Lloh7:
ldr x8, [x8, _CF2_PairTop@GOTPAGEOFF]
LBB0_16:
ldr x8, [x8]
LBB0_17:
str x8, [x19]
mov x0, x19
bl _cf2_hint_isTop
mov x8, x0
ldr x0, [x19, #16]
cbz x8, LBB0_19
; %bb.18:
ldrsw x8, [x24]
lsl x1, x8, #1
bl _ADD_INT32
str x0, [x19, #16]
LBB0_19:
mov x1, x23
bl _ADD_INT32
str x20, [x19, #32]
stp x22, x0, [x19, #8]
ldr x8, [x19]
cbz x8, LBB0_22
; %bb.20:
ldr x8, [x21, #8]
cbz x8, LBB0_22
; %bb.21:
mov x0, x19
bl _cf2_hint_isTop
cmp x0, #0
cset w8, ne
ldr w8, [x21, w8, uxtw #2]
str w8, [x19, #24]
mov x0, x19
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
ldp x26, x25, [sp], #80 ; 16-byte Folded Reload
b _cf2_hint_lock
LBB0_22:
mov x1, x20
bl _FT_MulFix
str w0, [x19, #24]
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
ldp x26, x25, [sp], #80 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh4, Lloh5
.loh AdrpLdrGot Lloh6, Lloh7
.cfi_endproc
; -- End function
.comm _CF2_GhostBottom,8,3 ; @CF2_GhostBottom
.comm _CF2_GhostTop,8,3 ; @CF2_GhostTop
.comm _CF2_PairBottom,8,3 ; @CF2_PairBottom
.comm _CF2_PairTop,8,3 ; @CF2_PairTop
.no_dead_strip _cf2_hint_init
.subsections_via_symbols
| AnghaBench/sumatrapdf/ext/freetype/src/psaux/extr_pshints.c_cf2_hint_init.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _rxml_free_document ## -- Begin function rxml_free_document
.p2align 4, 0x90
_rxml_free_document: ## @rxml_free_document
.cfi_startproc
## %bb.0:
testq %rdi, %rdi
je LBB0_4
## %bb.1:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
movq (%rdi), %rdi
testq %rdi, %rdi
je LBB0_3
## %bb.2:
callq _rxml_free_node
LBB0_3:
movq %rbx, %rdi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _free ## TAILCALL
LBB0_4:
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _rxml_free_document ; -- Begin function rxml_free_document
.p2align 2
_rxml_free_document: ; @rxml_free_document
.cfi_startproc
; %bb.0:
cbz x0, LBB0_4
; %bb.1:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
ldr x0, [x0]
cbz x0, LBB0_3
; %bb.2:
bl _rxml_free_node
LBB0_3:
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _free
LBB0_4:
.cfi_def_cfa wsp, 0
.cfi_same_value w30
.cfi_same_value w29
.cfi_same_value w19
.cfi_same_value w20
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/RetroArch/libretro-common/formats/xml/extr_rxml.c_rxml_free_document.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _f1 ## -- Begin function f1
.p2align 4, 0x90
_f1: ## @f1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $1, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _f2 ## -- Begin function f2
.p2align 4, 0x90
_f2: ## @f2
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $2, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _f1 ; -- Begin function f1
.p2align 2
_f1: ; @f1
.cfi_startproc
; %bb.0:
mov w0, #1
ret
.cfi_endproc
; -- End function
.globl _f2 ; -- Begin function f2
.p2align 2
_f2: ; @f2
.cfi_startproc
; %bb.0:
mov w0, #2
ret
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/123182.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ip6_lasthdr ## -- Begin function ip6_lasthdr
.p2align 4, 0x90
_ip6_lasthdr: ## @ip6_lasthdr
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r14
movl %esi, %ebx
movq %rdi, %r15
testq %rcx, %rcx
jne LBB0_2
## %bb.1:
movl $-1, -28(%rbp)
leaq -28(%rbp), %r14
.p2align 4, 0x90
LBB0_2: ## =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl %ebx, %esi
movq %r14, %rcx
callq _ip6_nexthdr
testl %eax, %eax
js LBB0_5
## %bb.3: ## in Loop: Header=BB0_2 Depth=1
movl %ebx, %ecx
cmpl %ebx, %eax
jl LBB0_4
## %bb.6: ## in Loop: Header=BB0_2 Depth=1
movl %eax, %ebx
je LBB0_5
## %bb.7: ## in Loop: Header=BB0_2 Depth=1
movl (%r14), %edx
jmp LBB0_2
LBB0_4:
movl $-1, %ebx
LBB0_5:
movl %ebx, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ip6_lasthdr ; -- Begin function ip6_lasthdr
.p2align 2
_ip6_lasthdr: ; @ip6_lasthdr
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x21, x3
mov x19, x1
mov x20, x0
cbnz x3, LBB0_2
; %bb.1:
mov w8, #-1
str w8, [sp, #12]
add x21, sp, #12
LBB0_2: ; =>This Inner Loop Header: Depth=1
mov x0, x20
mov x1, x19
mov x3, x21
bl _ip6_nexthdr
tbnz w0, #31, LBB0_7
; %bb.3: ; in Loop: Header=BB0_2 Depth=1
mov x8, x19
cmp w0, w19
b.lt LBB0_6
; %bb.4: ; in Loop: Header=BB0_2 Depth=1
mov x19, x0
b.eq LBB0_7
; %bb.5: ; in Loop: Header=BB0_2 Depth=1
ldr w2, [x21]
b LBB0_2
LBB0_6:
mov w19, #-1
LBB0_7:
mov x0, x19
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/darwin-xnu/bsd/netinet6/extr_ip6_input.c_ip6_lasthdr.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function change_owner_recurse_to_sequences
_change_owner_recurse_to_sequences: ## @change_owner_recurse_to_sequences
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, -60(%rbp) ## 4-byte Spill
movq %rsi, -80(%rbp) ## 8-byte Spill
movq %rdi, -72(%rbp) ## 8-byte Spill
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movq _DependRelationId@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq _AccessShareLock@GOTPCREL(%rip), %rax
movl (%rax), %esi
callq _table_open
movl %eax, %r13d
movq _Anum_pg_depend_refclassid@GOTPCREL(%rip), %rax
movl (%rax), %r14d
movq _BTEqualStrategyNumber@GOTPCREL(%rip), %rax
movl (%rax), %r15d
movq _F_OIDEQ@GOTPCREL(%rip), %rax
movl (%rax), %r12d
movq _RelationRelationId@GOTPCREL(%rip), %rax
movq (%rax), %rdi
callq _ObjectIdGetDatum
leaq -56(%rbp), %rdi
movl %r14d, %esi
movl %r15d, %edx
movl %r12d, %ecx
movl %eax, %r8d
callq _ScanKeyInit
leaq -52(%rbp), %r14
movq _Anum_pg_depend_refobjid@GOTPCREL(%rip), %rax
movl (%rax), %ebx
movq _BTEqualStrategyNumber@GOTPCREL(%rip), %rax
movl (%rax), %r15d
movq _F_OIDEQ@GOTPCREL(%rip), %rax
movl (%rax), %r12d
movq -72(%rbp), %rdi ## 8-byte Reload
callq _ObjectIdGetDatum
movq %r14, %rdi
movl %ebx, %esi
movl %r15d, %edx
movl %r12d, %ecx
movl %eax, %r8d
callq _ScanKeyInit
movq _DependReferenceIndexId@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl %r13d, -72(%rbp) ## 4-byte Spill
movl %r13d, %edi
movl $1, %edx
xorl %ecx, %ecx
movl $2, %r8d
leaq -56(%rbp), %r9
callq _systable_beginscan
movl %eax, %r13d
movl %eax, %edi
callq _systable_getnext
movl %eax, %ebx
movl %eax, %edi
callq _HeapTupleIsValid
testq %rax, %rax
je LBB0_11
## %bb.1:
movq _DEPENDENCY_AUTO@GOTPCREL(%rip), %r15
movq _DEPENDENCY_INTERNAL@GOTPCREL(%rip), %r14
jmp LBB0_2
LBB0_9: ## in Loop: Header=BB0_2 Depth=1
movl %ebx, %edi
movl %r15d, %esi
callq _relation_close
movq %r14, %r15
movq _DEPENDENCY_INTERNAL@GOTPCREL(%rip), %r14
.p2align 4, 0x90
LBB0_10: ## in Loop: Header=BB0_2 Depth=1
movl %r13d, %edi
callq _systable_getnext
movl %eax, %ebx
movl %eax, %edi
callq _HeapTupleIsValid
testq %rax, %rax
je LBB0_11
LBB0_2: ## =>This Inner Loop Header: Depth=1
movl %ebx, %edi
callq _GETSTRUCT
movslq %eax, %r12
cmpq $0, (%r12)
je LBB0_10
## %bb.3: ## in Loop: Header=BB0_2 Depth=1
movq 8(%r12), %rax
movq _RelationRelationId@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_10
## %bb.4: ## in Loop: Header=BB0_2 Depth=1
cmpq $0, 16(%r12)
jne LBB0_10
## %bb.5: ## in Loop: Header=BB0_2 Depth=1
movq 24(%r12), %rax
cmpq (%r15), %rax
je LBB0_7
## %bb.6: ## in Loop: Header=BB0_2 Depth=1
cmpq (%r14), %rax
jne LBB0_10
LBB0_7: ## in Loop: Header=BB0_2 Depth=1
movq %r15, %r14
movl 32(%r12), %edi
movl -60(%rbp), %r15d ## 4-byte Reload
movl %r15d, %esi
callq _relation_open
movl %eax, %ebx
movl %eax, %edi
callq _RelationGetForm
movq (%rax), %rax
movq _RELKIND_SEQUENCE@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_9
## %bb.8: ## in Loop: Header=BB0_2 Depth=1
movl 32(%r12), %edi
movq -80(%rbp), %rsi ## 8-byte Reload
movl $1, %edx
movl -60(%rbp), %ecx ## 4-byte Reload
callq _ATExecChangeOwner
movq _NoLock@GOTPCREL(%rip), %rax
movl (%rax), %r15d
jmp LBB0_9
LBB0_11:
movl %r13d, %edi
callq _systable_endscan
movq _AccessShareLock@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl -72(%rbp), %edi ## 4-byte Reload
callq _relation_close
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB0_13
## %bb.12:
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_13:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.comm _DependRelationId,4,2 ## @DependRelationId
.comm _AccessShareLock,4,2 ## @AccessShareLock
.comm _Anum_pg_depend_refclassid,4,2 ## @Anum_pg_depend_refclassid
.comm _BTEqualStrategyNumber,4,2 ## @BTEqualStrategyNumber
.comm _F_OIDEQ,4,2 ## @F_OIDEQ
.comm _RelationRelationId,8,3 ## @RelationRelationId
.comm _Anum_pg_depend_refobjid,4,2 ## @Anum_pg_depend_refobjid
.comm _DependReferenceIndexId,4,2 ## @DependReferenceIndexId
.comm _DEPENDENCY_AUTO,8,3 ## @DEPENDENCY_AUTO
.comm _DEPENDENCY_INTERNAL,8,3 ## @DEPENDENCY_INTERNAL
.comm _RELKIND_SEQUENCE,8,3 ## @RELKIND_SEQUENCE
.comm _NoLock,4,2 ## @NoLock
.no_dead_strip _change_owner_recurse_to_sequences
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function change_owner_recurse_to_sequences
_change_owner_recurse_to_sequences: ; @change_owner_recurse_to_sequences
.cfi_startproc
; %bb.0:
sub sp, sp, #128
.cfi_def_cfa_offset 128
stp x28, x27, [sp, #32] ; 16-byte Folded Spill
stp x26, x25, [sp, #48] ; 16-byte Folded Spill
stp x24, x23, [sp, #64] ; 16-byte Folded Spill
stp x22, x21, [sp, #80] ; 16-byte Folded Spill
stp x20, x19, [sp, #96] ; 16-byte Folded Spill
stp x29, x30, [sp, #112] ; 16-byte Folded Spill
add x29, sp, #112
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x19, x2
str x1, [sp, #8] ; 8-byte Folded Spill
mov x22, x0
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
str x8, [sp, #24]
Lloh3:
adrp x8, _DependRelationId@GOTPAGE
Lloh4:
ldr x8, [x8, _DependRelationId@GOTPAGEOFF]
Lloh5:
ldr w0, [x8]
Lloh6:
adrp x8, _AccessShareLock@GOTPAGE
Lloh7:
ldr x8, [x8, _AccessShareLock@GOTPAGEOFF]
Lloh8:
ldr w1, [x8]
bl _table_open
mov x21, x0
Lloh9:
adrp x8, _Anum_pg_depend_refclassid@GOTPAGE
Lloh10:
ldr x8, [x8, _Anum_pg_depend_refclassid@GOTPAGEOFF]
Lloh11:
ldr w23, [x8]
Lloh12:
adrp x26, _BTEqualStrategyNumber@GOTPAGE
Lloh13:
ldr x26, [x26, _BTEqualStrategyNumber@GOTPAGEOFF]
ldr w24, [x26]
Lloh14:
adrp x27, _F_OIDEQ@GOTPAGE
Lloh15:
ldr x27, [x27, _F_OIDEQ@GOTPAGEOFF]
ldr w25, [x27]
Lloh16:
adrp x28, _RelationRelationId@GOTPAGE
Lloh17:
ldr x28, [x28, _RelationRelationId@GOTPAGEOFF]
ldr x0, [x28]
bl _ObjectIdGetDatum
mov x4, x0
add x20, sp, #16
add x0, sp, #16
mov x1, x23
mov x2, x24
mov x3, x25
bl _ScanKeyInit
add x23, x20, #4
Lloh18:
adrp x8, _Anum_pg_depend_refobjid@GOTPAGE
Lloh19:
ldr x8, [x8, _Anum_pg_depend_refobjid@GOTPAGEOFF]
Lloh20:
ldr w24, [x8]
ldr w25, [x26]
ldr w26, [x27]
mov x0, x22
bl _ObjectIdGetDatum
mov x4, x0
mov x0, x23
mov x1, x24
mov x2, x25
mov x3, x26
bl _ScanKeyInit
Lloh21:
adrp x8, _DependReferenceIndexId@GOTPAGE
Lloh22:
ldr x8, [x8, _DependReferenceIndexId@GOTPAGEOFF]
Lloh23:
ldr w1, [x8]
add x5, sp, #16
mov x0, x21
mov w2, #1
mov x3, #0
mov w4, #2
bl _systable_beginscan
mov x22, x0
bl _systable_getnext
mov x23, x0
bl _HeapTupleIsValid
cbz x0, LBB0_10
; %bb.1:
Lloh24:
adrp x24, _DEPENDENCY_AUTO@GOTPAGE
Lloh25:
ldr x24, [x24, _DEPENDENCY_AUTO@GOTPAGEOFF]
Lloh26:
adrp x25, _DEPENDENCY_INTERNAL@GOTPAGE
Lloh27:
ldr x25, [x25, _DEPENDENCY_INTERNAL@GOTPAGEOFF]
Lloh28:
adrp x26, _RELKIND_SEQUENCE@GOTPAGE
Lloh29:
ldr x26, [x26, _RELKIND_SEQUENCE@GOTPAGEOFF]
Lloh30:
adrp x20, _NoLock@GOTPAGE
Lloh31:
ldr x20, [x20, _NoLock@GOTPAGEOFF]
b LBB0_4
LBB0_2: ; in Loop: Header=BB0_4 Depth=1
mov x0, x23
bl _relation_close
LBB0_3: ; in Loop: Header=BB0_4 Depth=1
mov x0, x22
bl _systable_getnext
mov x23, x0
bl _HeapTupleIsValid
cbz x0, LBB0_10
LBB0_4: ; =>This Inner Loop Header: Depth=1
mov x0, x23
bl _GETSTRUCT
; kill: def $w0 killed $w0 def $x0
sxtw x27, w0
ldr x8, [x27]
cbz x8, LBB0_3
; %bb.5: ; in Loop: Header=BB0_4 Depth=1
ldr x8, [x27, #8]
ldr x9, [x28]
cmp x8, x9
b.ne LBB0_3
; %bb.6: ; in Loop: Header=BB0_4 Depth=1
ldr x8, [x27, #16]
cbnz x8, LBB0_3
; %bb.7: ; in Loop: Header=BB0_4 Depth=1
ldr x8, [x27, #24]
ldr x9, [x24]
ldr x10, [x25]
cmp x8, x9
ccmp x8, x10, #4, ne
b.ne LBB0_3
; %bb.8: ; in Loop: Header=BB0_4 Depth=1
ldr w0, [x27, #32]
mov x1, x19
bl _relation_open
mov x23, x0
bl _RelationGetForm
ldr x8, [x0]
ldr x9, [x26]
mov x1, x19
cmp x8, x9
b.ne LBB0_2
; %bb.9: ; in Loop: Header=BB0_4 Depth=1
ldr w0, [x27, #32]
ldr x1, [sp, #8] ; 8-byte Folded Reload
mov w2, #1
mov x3, x19
bl _ATExecChangeOwner
ldr w1, [x20]
b LBB0_2
LBB0_10:
mov x0, x22
bl _systable_endscan
Lloh32:
adrp x8, _AccessShareLock@GOTPAGE
Lloh33:
ldr x8, [x8, _AccessShareLock@GOTPAGEOFF]
Lloh34:
ldr w1, [x8]
mov x0, x21
bl _relation_close
ldr x8, [sp, #24]
Lloh35:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh36:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh37:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_12
; %bb.11:
ldp x29, x30, [sp, #112] ; 16-byte Folded Reload
ldp x20, x19, [sp, #96] ; 16-byte Folded Reload
ldp x22, x21, [sp, #80] ; 16-byte Folded Reload
ldp x24, x23, [sp, #64] ; 16-byte Folded Reload
ldp x26, x25, [sp, #48] ; 16-byte Folded Reload
ldp x28, x27, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #128
ret
LBB0_12:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh21, Lloh22, Lloh23
.loh AdrpLdrGotLdr Lloh18, Lloh19, Lloh20
.loh AdrpLdrGot Lloh16, Lloh17
.loh AdrpLdrGot Lloh14, Lloh15
.loh AdrpLdrGot Lloh12, Lloh13
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGot Lloh30, Lloh31
.loh AdrpLdrGot Lloh28, Lloh29
.loh AdrpLdrGot Lloh26, Lloh27
.loh AdrpLdrGot Lloh24, Lloh25
.loh AdrpLdrGotLdr Lloh35, Lloh36, Lloh37
.loh AdrpLdrGotLdr Lloh32, Lloh33, Lloh34
.cfi_endproc
; -- End function
.comm _DependRelationId,4,2 ; @DependRelationId
.comm _AccessShareLock,4,2 ; @AccessShareLock
.comm _Anum_pg_depend_refclassid,4,2 ; @Anum_pg_depend_refclassid
.comm _BTEqualStrategyNumber,4,2 ; @BTEqualStrategyNumber
.comm _F_OIDEQ,4,2 ; @F_OIDEQ
.comm _RelationRelationId,8,3 ; @RelationRelationId
.comm _Anum_pg_depend_refobjid,4,2 ; @Anum_pg_depend_refobjid
.comm _DependReferenceIndexId,4,2 ; @DependReferenceIndexId
.comm _DEPENDENCY_AUTO,8,3 ; @DEPENDENCY_AUTO
.comm _DEPENDENCY_INTERNAL,8,3 ; @DEPENDENCY_INTERNAL
.comm _RELKIND_SEQUENCE,8,3 ; @RELKIND_SEQUENCE
.comm _NoLock,4,2 ; @NoLock
.no_dead_strip _change_owner_recurse_to_sequences
.subsections_via_symbols
| AnghaBench/postgres/src/backend/commands/extr_tablecmds.c_change_owner_recurse_to_sequences.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function emit_ia32_mul_r
_emit_ia32_mul_r: ## @emit_ia32_mul_r
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %r8, %r14
movl %edx, -44(%rbp) ## 4-byte Spill
movl %edi, %r12d
movq (%r8), %r15
movq _IA32_ECX@GOTPCREL(%rip), %rax
movl (%rax), %edx
movl %edx, %r13d
testl %ecx, %ecx
cmovel %esi, %r13d
je LBB0_2
## %bb.1:
movl %esi, %ebx
movq _IA32_EBP@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl $64, %edi
callq _add_2reg
movl %r12d, -48(%rbp) ## 4-byte Spill
movq %r15, %r12
movq %r14, %r15
movl %eax, %r14d
movl %ebx, %edi
callq _STACK_VAR
movl $139, %edi
movl %r14d, %esi
movq %r15, %r14
movq %r12, %r15
movl -48(%rbp), %r12d ## 4-byte Reload
movl %eax, %edx
callq _EMIT3
LBB0_2:
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
je LBB0_4
## %bb.3:
movq _IA32_EBP@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _IA32_EAX@GOTPCREL(%rip), %rax
movl (%rax), %edx
movl $64, %edi
callq _add_2reg
movl %eax, %ebx
movl %r12d, %edi
callq _STACK_VAR
movl $139, %edi
movl %ebx, %esi
movl %eax, %edx
callq _EMIT3
jmp LBB0_5
LBB0_4:
movq _IA32_EAX@GOTPCREL(%rip), %rax
movl (%rax), %edx
movl $192, %edi
movl %r12d, %esi
callq _add_2reg
movl $139, %edi
movl %eax, %esi
callq _EMIT2
LBB0_5:
movl $224, %edi
movl %r13d, %esi
callq _add_1reg
movl $247, %edi
movl %eax, %esi
callq _EMIT2
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
je LBB0_7
## %bb.6:
movq _IA32_EBP@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _IA32_EAX@GOTPCREL(%rip), %rax
movl (%rax), %edx
movl $64, %edi
callq _add_2reg
movl %eax, %ebx
movl %r12d, %edi
callq _STACK_VAR
movl $137, %edi
movl %ebx, %esi
movl %eax, %edx
callq _EMIT3
jmp LBB0_8
LBB0_7:
movq _IA32_EAX@GOTPCREL(%rip), %rax
movl (%rax), %edx
movl $192, %edi
movl %r12d, %esi
callq _add_2reg
movl $137, %edi
movl %eax, %esi
callq _EMIT2
LBB0_8:
movq %r15, (%r14)
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _IA32_ECX,4,2 ## @IA32_ECX
.comm _IA32_EBP,4,2 ## @IA32_EBP
.comm _IA32_EAX,4,2 ## @IA32_EAX
.no_dead_strip _emit_ia32_mul_r
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function emit_ia32_mul_r
_emit_ia32_mul_r: ; @emit_ia32_mul_r
.cfi_startproc
; %bb.0:
stp x26, x25, [sp, #-80]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x19, x4
mov x21, x2
mov x20, x0
ldr x25, [x4]
Lloh0:
adrp x8, _IA32_ECX@GOTPAGE
Lloh1:
ldr x8, [x8, _IA32_ECX@GOTPAGEOFF]
Lloh2:
ldr w2, [x8]
cmp w3, #0
csel w22, w1, w2, eq
Lloh3:
adrp x26, _IA32_EBP@GOTPAGE
Lloh4:
ldr x26, [x26, _IA32_EBP@GOTPAGEOFF]
cbz w3, LBB0_2
; %bb.1:
mov x23, x1
ldr w1, [x26]
mov w0, #64
bl _add_2reg
mov x24, x0
mov x0, x23
bl _STACK_VAR
mov x2, x0
mov w0, #139
mov x1, x24
bl _EMIT3
LBB0_2:
Lloh5:
adrp x24, _IA32_EAX@GOTPAGE
Lloh6:
ldr x24, [x24, _IA32_EAX@GOTPAGEOFF]
cbz w21, LBB0_4
; %bb.3:
ldr w1, [x26]
ldr w2, [x24]
mov w0, #64
bl _add_2reg
mov x23, x0
mov x0, x20
bl _STACK_VAR
mov x2, x0
mov w0, #139
mov x1, x23
bl _EMIT3
b LBB0_5
LBB0_4:
ldr w2, [x24]
mov w0, #192
mov x1, x20
bl _add_2reg
mov x1, x0
mov w0, #139
bl _EMIT2
LBB0_5:
mov w0, #224
mov x1, x22
bl _add_1reg
mov x1, x0
mov w0, #247
bl _EMIT2
cbz w21, LBB0_7
; %bb.6:
ldr w1, [x26]
ldr w2, [x24]
mov w0, #64
bl _add_2reg
mov x21, x0
mov x0, x20
bl _STACK_VAR
mov x2, x0
mov w0, #137
mov x1, x21
bl _EMIT3
b LBB0_8
LBB0_7:
ldr w2, [x24]
mov w0, #192
mov x1, x20
bl _add_2reg
mov x1, x0
mov w0, #137
bl _EMIT2
LBB0_8:
str x25, [x19]
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
ldp x26, x25, [sp], #80 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGot Lloh5, Lloh6
.cfi_endproc
; -- End function
.comm _IA32_ECX,4,2 ; @IA32_ECX
.comm _IA32_EBP,4,2 ; @IA32_EBP
.comm _IA32_EAX,4,2 ; @IA32_EAX
.no_dead_strip _emit_ia32_mul_r
.subsections_via_symbols
| AnghaBench/linux/arch/x86/net/extr_bpf_jit_comp32.c_emit_ia32_mul_r.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| the_stack_data/29691.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _CountUniqueCharacters ## -- Begin function CountUniqueCharacters
.p2align 4, 0x90
_CountUniqueCharacters: ## @CountUniqueCharacters
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L_.str(%rip), %rdi
xorl %eax, %eax
popq %rbp
jmp _printf ## TAILCALL
.cfi_endproc
## -- End function
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $416, %rsp ## imm = 0x1A0
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -8(%rbp)
leaq L_.str.2(%rip), %rdi
xorl %eax, %eax
callq _printf
leaq L_.str.3(%rip), %rdi
leaq -416(%rbp), %rsi
xorl %eax, %eax
callq _scanf
leaq L_.str(%rip), %rdi
xorl %eax, %eax
callq _printf
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -8(%rbp), %rax
jne LBB1_2
## %bb.1:
xorl %eax, %eax
addq $416, %rsp ## imm = 0x1A0
popq %rbp
retq
LBB1_2:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Non unique"
L_.str.2: ## @.str.2
.asciz "Enter your name: "
L_.str.3: ## @.str.3
.asciz "%s"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _CountUniqueCharacters ; -- Begin function CountUniqueCharacters
.p2align 2
_CountUniqueCharacters: ; @CountUniqueCharacters
.cfi_startproc
; %bb.0:
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
b _printf
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #448
.cfi_def_cfa_offset 448
stp x28, x27, [sp, #416] ; 16-byte Folded Spill
stp x29, x30, [sp, #432] ; 16-byte Folded Spill
add x29, sp, #432
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w27, -24
.cfi_offset w28, -32
Lloh2:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh3:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh4:
ldr x8, [x8]
stur x8, [x29, #-24]
Lloh5:
adrp x0, l_.str.2@PAGE
Lloh6:
add x0, x0, l_.str.2@PAGEOFF
bl _printf
add x8, sp, #8
str x8, [sp]
Lloh7:
adrp x0, l_.str.3@PAGE
Lloh8:
add x0, x0, l_.str.3@PAGEOFF
bl _scanf
Lloh9:
adrp x0, l_.str@PAGE
Lloh10:
add x0, x0, l_.str@PAGEOFF
bl _printf
ldur x8, [x29, #-24]
Lloh11:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh12:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh13:
ldr x9, [x9]
cmp x9, x8
b.ne LBB1_2
; %bb.1:
mov w0, #0
ldp x29, x30, [sp, #432] ; 16-byte Folded Reload
ldp x28, x27, [sp, #416] ; 16-byte Folded Reload
add sp, sp, #448
ret
LBB1_2:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.loh AdrpAdd Lloh9, Lloh10
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpAdd Lloh5, Lloh6
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Non unique"
l_.str.2: ; @.str.2
.asciz "Enter your name: "
l_.str.3: ; @.str.3
.asciz "%s"
.subsections_via_symbols
| the_stack_data/102905.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _LzmaEnc_PrepareForLzma2 ## -- Begin function LzmaEnc_PrepareForLzma2
.p2align 4, 0x90
_LzmaEnc_PrepareForLzma2: ## @LzmaEnc_PrepareForLzma2
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rsi, 8(%rdi)
movl $1, (%rdi)
movl %edx, %esi
movq %rcx, %rdx
movq %r8, %rcx
popq %rbp
jmp _LzmaEnc_AllocAndInit ## TAILCALL
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _LzmaEnc_PrepareForLzma2 ; -- Begin function LzmaEnc_PrepareForLzma2
.p2align 2
_LzmaEnc_PrepareForLzma2: ; @LzmaEnc_PrepareForLzma2
.cfi_startproc
; %bb.0:
str x1, [x0, #8]
mov w8, #1
str w8, [x0]
mov x1, x2
mov x2, x3
mov x3, x4
b _LzmaEnc_AllocAndInit
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/sumatrapdf/ext/lzma/C/extr_LzmaEnc.c_LzmaEnc_PrepareForLzma2.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function rsxx_requeue_dma
_rsxx_requeue_dma: ## @rsxx_requeue_dma
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %r14
movq %rdi, %rbx
callq _spin_lock_bh
incl 8(%rbx)
leaq 4(%rbx), %rsi
movq %r14, %rdi
callq _list_add
movq %rbx, %rdi
popq %rbx
popq %r14
popq %rbp
jmp _spin_unlock_bh ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _rsxx_requeue_dma
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function rsxx_requeue_dma
_rsxx_requeue_dma: ; @rsxx_requeue_dma
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
mov x20, x0
bl _spin_lock_bh
ldr w8, [x20, #8]
add w8, w8, #1
str w8, [x20, #8]
add x1, x20, #4
mov x0, x19
bl _list_add
mov x0, x20
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _spin_unlock_bh
.cfi_endproc
; -- End function
.no_dead_strip _rsxx_requeue_dma
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/block/rsxx/extr_dma.c_rsxx_requeue_dma.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function acpi_als_read_value
_acpi_als_read_value: ## @acpi_als_read_value
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r15
movq %rsi, %r14
movq (%rdi), %rax
movl (%rax), %edi
xorl %ebx, %ebx
leaq -32(%rbp), %rcx
xorl %edx, %edx
callq _acpi_evaluate_integer
movl %eax, %edi
callq _ACPI_FAILURE
testq %rax, %rax
je LBB0_2
## %bb.1:
movl %r14d, %edi
callq _ACPI_EXCEPTION
movq _EIO@GOTPCREL(%rip), %rax
subl (%rax), %ebx
jmp LBB0_3
LBB0_2:
movq -32(%rbp), %rax
movq %rax, (%r15)
LBB0_3:
movl %ebx, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _AE_INFO,4,2 ## @AE_INFO
.comm _EIO,4,2 ## @EIO
.no_dead_strip _acpi_als_read_value
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function acpi_als_read_value
_acpi_als_read_value: ; @acpi_als_read_value
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x20, x2
mov x19, x1
ldr x8, [x0]
ldr w0, [x8]
add x3, sp, #8
mov x2, #0
bl _acpi_evaluate_integer
bl _ACPI_FAILURE
cbz x0, LBB0_2
; %bb.1:
mov x0, x19
bl _ACPI_EXCEPTION
Lloh0:
adrp x8, _EIO@GOTPAGE
Lloh1:
ldr x8, [x8, _EIO@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
neg w0, w8
b LBB0_3
LBB0_2:
ldr x8, [sp, #8]
str x8, [x20]
LBB0_3:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _AE_INFO,4,2 ; @AE_INFO
.comm _EIO,4,2 ; @EIO
.no_dead_strip _acpi_als_read_value
.subsections_via_symbols
| AnghaBench/linux/drivers/iio/light/extr_acpi-als.c_acpi_als_read_value.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _rdpdr_add_fds ## -- Begin function rdpdr_add_fds
.p2align 4, 0x90
_rdpdr_add_fds: ## @rdpdr_add_fds
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %r9, -64(%rbp) ## 8-byte Spill
movq %rcx, -72(%rbp) ## 8-byte Spill
movq %rdi, -56(%rbp) ## 8-byte Spill
movq 8(%rdi), %r15
testq %r15, %r15
je LBB0_21
## %bb.1:
movq %r8, %r12
movq %rdx, %r13
movq %rsi, %rbx
xorl %r14d, %r14d
jmp LBB0_2
LBB0_18: ## in Loop: Header=BB0_2 Depth=1
cmpl $5, %r14d
movl $5, %eax
cmovgel %eax, %r14d
.p2align 4, 0x90
LBB0_19: ## in Loop: Header=BB0_2 Depth=1
movl %r14d, %eax
LBB0_20: ## in Loop: Header=BB0_2 Depth=1
movq 24(%r15), %r15
movl %eax, %r14d
testq %r15, %r15
je LBB0_21
LBB0_2: ## =>This Inner Loop Header: Depth=1
movq (%r15), %rdi
testq %rdi, %rdi
je LBB0_19
## %bb.3: ## in Loop: Header=BB0_2 Depth=1
movl 8(%r15), %eax
cmpl $128, %eax
je LBB0_15
## %bb.4: ## in Loop: Header=BB0_2 Depth=1
cmpl $130, %eax
je LBB0_18
## %bb.5: ## in Loop: Header=BB0_2 Depth=1
cmpl $129, %eax
jne LBB0_19
## %bb.6: ## in Loop: Header=BB0_2 Depth=1
movq %r13, %rsi
callq _FD_SET
movl (%rbx), %edi
movq (%r15), %rsi
callq _MAX
movl %eax, (%rbx)
movl 12(%r15), %eax
testl %eax, %eax
je LBB0_10
## %bb.7: ## in Loop: Header=BB0_2 Depth=1
testl %r14d, %r14d
je LBB0_9
## %bb.8: ## in Loop: Header=BB0_2 Depth=1
cmpl %r14d, %eax
jl LBB0_9
LBB0_10: ## in Loop: Header=BB0_2 Depth=1
movl 16(%r15), %ecx
testl %ecx, %ecx
je LBB0_19
## %bb.11: ## in Loop: Header=BB0_2 Depth=1
cmpl $0, 32(%r15)
jle LBB0_19
## %bb.12: ## in Loop: Header=BB0_2 Depth=1
testl %r14d, %r14d
je LBB0_14
## %bb.13: ## in Loop: Header=BB0_2 Depth=1
movl %r14d, %eax
cmpl %r14d, %ecx
jge LBB0_20
LBB0_14: ## in Loop: Header=BB0_2 Depth=1
movq (%r15), %rax
movq -56(%rbp), %rdx ## 8-byte Reload
movq %rax, (%rdx)
movslq %ecx, %rax
imulq $274877907, %rax, %rax ## imm = 0x10624DD3
movq %rax, %rdx
shrq $63, %rdx
sarq $38, %rax
addl %edx, %eax
movl %eax, (%r12)
imull $1000, %eax, %eax ## imm = 0x3E8
movl %ecx, %edx
subl %eax, %edx
imull $1000, %edx, %eax ## imm = 0x3E8
movl %eax, 4(%r12)
movq _True@GOTPCREL(%rip), %rax
movl (%rax), %eax
movq -64(%rbp), %rdx ## 8-byte Reload
movl %eax, (%rdx)
movl %ecx, %eax
jmp LBB0_20
LBB0_15: ## in Loop: Header=BB0_2 Depth=1
leaq -41(%rbp), %rsi
xorl %edx, %edx
callq _write
testl %eax, %eax
je LBB0_17
## %bb.16: ## in Loop: Header=BB0_2 Depth=1
movq _errno@GOTPCREL(%rip), %rax
movl (%rax), %eax
movq _EBADF@GOTPCREL(%rip), %rcx
cmpl (%rcx), %eax
je LBB0_19
LBB0_17: ## in Loop: Header=BB0_2 Depth=1
movq (%r15), %rdi
movq -72(%rbp), %rsi ## 8-byte Reload
callq _FD_SET
movl (%rbx), %edi
movq (%r15), %rsi
callq _MAX
movl %eax, (%rbx)
jmp LBB0_19
LBB0_9: ## in Loop: Header=BB0_2 Depth=1
movq (%r15), %rcx
movq -56(%rbp), %rdx ## 8-byte Reload
movq %rcx, (%rdx)
movslq %eax, %rcx
imulq $274877907, %rcx, %rcx ## imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
movl %ecx, (%r12)
imull $1000, %ecx, %ecx ## imm = 0x3E8
movl %eax, %edx
subl %ecx, %edx
imull $1000, %edx, %ecx ## imm = 0x3E8
movl %ecx, 4(%r12)
movq _True@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
movq -64(%rbp), %rdx ## 8-byte Reload
movl %ecx, (%rdx)
jmp LBB0_20
LBB0_21:
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _True,4,2 ## @True
.comm _errno,4,2 ## @errno
.comm _EBADF,4,2 ## @EBADF
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _rdpdr_add_fds ; -- Begin function rdpdr_add_fds
.p2align 2
_rdpdr_add_fds: ; @rdpdr_add_fds
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x28, x27, [sp, #16] ; 16-byte Folded Spill
stp x26, x25, [sp, #32] ; 16-byte Folded Spill
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
ldr x25, [x0, #8]
cbz x25, LBB0_19
; %bb.1:
mov x19, x5
mov x20, x4
mov x21, x3
mov x22, x2
mov x23, x1
mov x24, x0
mov w27, #0
mov w28, #5
mov w26, #1000
b LBB0_4
LBB0_2: ; in Loop: Header=BB0_4 Depth=1
cmp w27, #5
csel w27, w27, w28, lt
LBB0_3: ; in Loop: Header=BB0_4 Depth=1
ldr x25, [x25, #24]
cbz x25, LBB0_19
LBB0_4: ; =>This Inner Loop Header: Depth=1
ldr x0, [x25]
cbz x0, LBB0_3
; %bb.5: ; in Loop: Header=BB0_4 Depth=1
ldr w8, [x25, #8]
cmp w8, #128
b.eq LBB0_16
; %bb.6: ; in Loop: Header=BB0_4 Depth=1
cmp w8, #130
b.eq LBB0_2
; %bb.7: ; in Loop: Header=BB0_4 Depth=1
cmp w8, #129
b.ne LBB0_3
; %bb.8: ; in Loop: Header=BB0_4 Depth=1
mov x1, x22
bl _FD_SET
ldr w0, [x23]
ldr x1, [x25]
bl _MAX
str w0, [x23]
ldr w8, [x25, #12]
cbz w8, LBB0_11
; %bb.9: ; in Loop: Header=BB0_4 Depth=1
cbz w27, LBB0_15
; %bb.10: ; in Loop: Header=BB0_4 Depth=1
cmp w8, w27
b.lt LBB0_15
LBB0_11: ; in Loop: Header=BB0_4 Depth=1
ldr w8, [x25, #16]
cbz w8, LBB0_3
; %bb.12: ; in Loop: Header=BB0_4 Depth=1
ldr w9, [x25, #32]
cmp w9, #1
b.lt LBB0_3
; %bb.13: ; in Loop: Header=BB0_4 Depth=1
cbz w27, LBB0_15
; %bb.14: ; in Loop: Header=BB0_4 Depth=1
cmp w8, w27
b.ge LBB0_3
LBB0_15: ; in Loop: Header=BB0_4 Depth=1
ldr x9, [x25]
str x9, [x24]
mov w9, #19923
movk w9, #4194, lsl #16
smull x9, w8, w9
lsr x10, x9, #63
asr x9, x9, #38
add w9, w9, w10
msub w10, w9, w26, w8
mul w10, w10, w26
stp w9, w10, [x20]
Lloh0:
adrp x9, _True@GOTPAGE
Lloh1:
ldr x9, [x9, _True@GOTPAGEOFF]
Lloh2:
ldr w9, [x9]
str w9, [x19]
mov x27, x8
b LBB0_3
LBB0_16: ; in Loop: Header=BB0_4 Depth=1
add x1, sp, #15
mov w2, #0
bl _write
cbz w0, LBB0_18
; %bb.17: ; in Loop: Header=BB0_4 Depth=1
Lloh3:
adrp x8, _errno@GOTPAGE
Lloh4:
ldr x8, [x8, _errno@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
Lloh6:
adrp x9, _EBADF@GOTPAGE
Lloh7:
ldr x9, [x9, _EBADF@GOTPAGEOFF]
Lloh8:
ldr w9, [x9]
cmp w8, w9
b.eq LBB0_3
LBB0_18: ; in Loop: Header=BB0_4 Depth=1
ldr x0, [x25]
mov x1, x21
bl _FD_SET
ldr w0, [x23]
ldr x1, [x25]
bl _MAX
str w0, [x23]
b LBB0_3
LBB0_19:
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
ldp x26, x25, [sp, #32] ; 16-byte Folded Reload
ldp x28, x27, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _True,4,2 ; @True
.comm _errno,4,2 ; @errno
.comm _EBADF,4,2 ; @EBADF
.subsections_via_symbols
| AnghaBench/reactos/modules/rosapps/applications/net/tsclient/rdesktop/extr_rdpdr.c_rdpdr_add_fds.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _a ## -- Begin function a
.p2align 4, 0x90
_a: ## @a
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq $-1, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _a ; -- Begin function a
.p2align 2
_a: ; @a
.cfi_startproc
; %bb.0:
mov x0, #-1
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/110133.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function read_bitor_expr
_read_bitor_expr: ## @read_bitor_expr
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
xorl %eax, %eax
callq _read_bitxor_expr
.p2align 4, 0x90
LBB0_2: ## =>This Inner Loop Header: Depth=1
movq %rax, %rbx
movl $124, %edi
callq _next_token
testq %rax, %rax
je LBB0_3
## %bb.1: ## in Loop: Header=BB0_2 Depth=1
movq %rbx, %rdi
callq _conv
movl %eax, %ebx
xorl %eax, %eax
callq _read_bitxor_expr
movq %rax, %rdi
callq _conv
movl $124, %edi
movl %ebx, %esi
movl %eax, %edx
callq _binop
jmp LBB0_2
LBB0_3:
movq %rbx, %rax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _read_bitor_expr
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function read_bitor_expr
_read_bitor_expr: ; @read_bitor_expr
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
bl _read_bitxor_expr
LBB0_1: ; =>This Inner Loop Header: Depth=1
mov x19, x0
mov w0, #124
bl _next_token
cbz x0, LBB0_3
; %bb.2: ; in Loop: Header=BB0_1 Depth=1
mov x0, x19
bl _conv
mov x19, x0
bl _read_bitxor_expr
bl _conv
mov x2, x0
mov w0, #124
mov x1, x19
bl _binop
b LBB0_1
LBB0_3:
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _read_bitor_expr
.subsections_via_symbols
| AnghaBench/8cc/extr_parse.c_read_bitor_expr.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function IoInitializeIrp
_IoInitializeIrp: ## @IoInitializeIrp
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r15d
movl %esi, %r14d
movq %rdi, %rbx
movl %edx, %edi
callq _IoSizeOfIrp
movq %rbx, %rdi
movl %eax, %esi
callq _bzero
movl %r14d, 20(%rbx)
movl %r15d, (%rbx)
movl %r15d, 4(%rbx)
leaq 16(%rbx), %rdi
callq _InitializeListHead
movslq %r15d, %rax
leaq (%rbx,%rax,4), %rax
addq $24, %rax
movq %rax, 8(%rbx)
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _IoInitializeIrp
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function IoInitializeIrp
_IoInitializeIrp: ; @IoInitializeIrp
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x2
mov x20, x1
mov x21, x0
mov x0, x2
bl _IoSizeOfIrp
mov x1, x0
mov x0, x21
bl _bzero
str w20, [x21, #20]
add x0, x21, #16
stp w19, w19, [x21]
bl _InitializeListHead
add x8, x21, w19, sxtw #2
add x8, x8, #24
str x8, [x21, #8]
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _IoInitializeIrp
.subsections_via_symbols
| AnghaBench/freebsd/sys/compat/ndis/extr_subr_ntoskrnl.c_IoInitializeIrp.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ib_dealloc_fmr ## -- Begin function ib_dealloc_fmr
.p2align 4, 0x90
_ib_dealloc_fmr: ## @ib_dealloc_fmr
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq (%rdi), %rax
movq 8(%rdi), %r14
callq *(%rax)
movl %eax, %ebx
testl %eax, %eax
jne LBB0_2
## %bb.1:
movq %r14, %rdi
callq _atomic_dec
LBB0_2:
movl %ebx, %eax
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ib_dealloc_fmr ; -- Begin function ib_dealloc_fmr
.p2align 2
_ib_dealloc_fmr: ; @ib_dealloc_fmr
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
ldp x8, x19, [x0]
ldr x8, [x8]
blr x8
mov x20, x0
cbnz w0, LBB0_2
; %bb.1:
mov x0, x19
bl _atomic_dec
LBB0_2:
mov x0, x20
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/freebsd/sys/ofed/drivers/infiniband/core/extr_ib_verbs.c_ib_dealloc_fmr.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ahd_sync_tqinfifo
_ahd_sync_tqinfifo: ## @ahd_sync_tqinfifo
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _AHD_TARGETROLE,4,2 ## @AHD_TARGETROLE
.comm _AHD_TMODE_CMDS,4,2 ## @AHD_TMODE_CMDS
.no_dead_strip _ahd_sync_tqinfifo
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ahd_sync_tqinfifo
_ahd_sync_tqinfifo: ; @ahd_sync_tqinfifo
.cfi_startproc
; %bb.0:
ret
.cfi_endproc
; -- End function
.comm _AHD_TARGETROLE,4,2 ; @AHD_TARGETROLE
.comm _AHD_TMODE_CMDS,4,2 ; @AHD_TMODE_CMDS
.no_dead_strip _ahd_sync_tqinfifo
.subsections_via_symbols
| AnghaBench/linux/drivers/scsi/aic7xxx/extr_aic79xx_core.c_ahd_sync_tqinfifo.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function mpc5200_wdt_remove
_mpc5200_wdt_remove: ## @mpc5200_wdt_remove
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
callq _dev_get_drvdata
movq %rax, %rbx
movq %rax, %rdi
callq _mpc5200_wdt_stop
leaq 20(%rbx), %rdi
callq _misc_deregister
movl 16(%rbx), %edi
callq _iounmap
movq (%rbx), %rdi
movq 8(%rbx), %rsi
subq %rdi, %rsi
incq %rsi
callq _release_mem_region
movq %rbx, %rdi
callq _kfree
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _mpc5200_wdt_remove
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function mpc5200_wdt_remove
_mpc5200_wdt_remove: ; @mpc5200_wdt_remove
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
bl _dev_get_drvdata
mov x19, x0
bl _mpc5200_wdt_stop
add x0, x19, #20
bl _misc_deregister
ldr w0, [x19, #16]
bl _iounmap
ldp x0, x8, [x19]
sub x8, x8, x0
add x1, x8, #1
bl _release_mem_region
mov x0, x19
bl _kfree
mov w0, #0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _mpc5200_wdt_remove
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/watchdog/extr_mpc5200_wdt.c_mpc5200_wdt_remove.c | anghabench |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.