x86 stringlengths 122 9.39M | arm stringlengths 122 9.33M | file stringlengths 19 200 | source stringclasses 2
values |
|---|---|---|---|
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _luaT_init ## -- Begin function luaT_init
.p2align 4, 0x90
_luaT_init: ## @luaT_init
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq _TM_N@GOTPCREL(%rip), %r15
cmpl $0, (%r15)
jle LBB0_3
## %bb.1:
movq %rdi, %r14
leaq _reltable.luaT_init(%rip), %r12
xorl %ebx, %ebx
.p2align 4, 0x90
LBB0_2: ## =>This Inner Loop Header: Depth=1
movslq (%r12), %rsi
leaq _reltable.luaT_init(%rip), %rax
addq %rax, %rsi
movq %r14, %rdi
callq _luaS_new
movl %eax, %r13d
movq %r14, %rdi
callq _G
movq (%rax), %rax
movl %r13d, (%rax,%rbx,4)
movq %r14, %rdi
callq _G
movq (%rax), %rax
movl (%rax,%rbx,4), %edi
callq _luaS_fix
incq %rbx
movslq (%r15), %rax
addq $4, %r12
cmpq %rax, %rbx
jl LBB0_2
LBB0_3:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__const
.p2align 2 ## @reltable.luaT_init
_reltable.luaT_init:
.long L_.str-_reltable.luaT_init
.long L_.str.1-_reltable.luaT_init
.long L_.str.2-_reltable.luaT_init
.long L_.str.3-_reltable.luaT_init
.long L_.str.4-_reltable.luaT_init
.long L_.str.5-_reltable.luaT_init
.long L_.str.6-_reltable.luaT_init
.long L_.str.7-_reltable.luaT_init
.long L_.str.8-_reltable.luaT_init
.long L_.str.9-_reltable.luaT_init
.long L_.str.10-_reltable.luaT_init
.long L_.str.11-_reltable.luaT_init
.long L_.str.12-_reltable.luaT_init
.long L_.str.13-_reltable.luaT_init
.long L_.str.14-_reltable.luaT_init
.long L_.str.15-_reltable.luaT_init
.long L_.str.16-_reltable.luaT_init
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "__index"
L_.str.1: ## @.str.1
.asciz "__newindex"
L_.str.2: ## @.str.2
.asciz "__gc"
L_.str.3: ## @.str.3
.asciz "__mode"
L_.str.4: ## @.str.4
.asciz "__eq"
L_.str.5: ## @.str.5
.asciz "__add"
L_.str.6: ## @.str.6
.asciz "__sub"
L_.str.7: ## @.str.7
.asciz "__mul"
L_.str.8: ## @.str.8
.asciz "__div"
L_.str.9: ## @.str.9
.asciz "__mod"
L_.str.10: ## @.str.10
.asciz "__pow"
L_.str.11: ## @.str.11
.asciz "__unm"
L_.str.12: ## @.str.12
.asciz "__len"
L_.str.13: ## @.str.13
.asciz "__lt"
L_.str.14: ## @.str.14
.asciz "__le"
L_.str.15: ## @.str.15
.asciz "__concat"
L_.str.16: ## @.str.16
.asciz "__call"
.comm _TM_N,4,2 ## @TM_N
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _luaT_init ; -- Begin function luaT_init
.p2align 2
_luaT_init: ; @luaT_init
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
Lloh0:
adrp x21, _TM_N@GOTPAGE
Lloh1:
ldr x21, [x21, _TM_N@GOTPAGEOFF]
ldr w8, [x21]
cmp w8, #1
b.lt LBB0_3
; %bb.1:
mov x19, x0
mov x22, #0
Lloh2:
adrp x23, _luaT_init.luaT_eventname@PAGE
Lloh3:
add x23, x23, _luaT_init.luaT_eventname@PAGEOFF
LBB0_2: ; =>This Inner Loop Header: Depth=1
ldr x1, [x23, x22, lsl #3]
mov x0, x19
bl _luaS_new
mov x20, x0
mov x0, x19
bl _G
ldr x8, [x0]
lsl x24, x22, #2
str w20, [x8, x24]
mov x0, x19
bl _G
ldr x8, [x0]
ldr w0, [x8, x24]
bl _luaS_fix
add x22, x22, #1
ldrsw x8, [x21]
cmp x22, x8
b.lt LBB0_2
LBB0_3:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpAdd Lloh2, Lloh3
.cfi_endproc
; -- End function
.section __DATA,__const
.p2align 3 ; @luaT_init.luaT_eventname
_luaT_init.luaT_eventname:
.quad l_.str
.quad l_.str.1
.quad l_.str.2
.quad l_.str.3
.quad l_.str.4
.quad l_.str.5
.quad l_.str.6
.quad l_.str.7
.quad l_.str.8
.quad l_.str.9
.quad l_.str.10
.quad l_.str.11
.quad l_.str.12
.quad l_.str.13
.quad l_.str.14
.quad l_.str.15
.quad l_.str.16
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "__index"
l_.str.1: ; @.str.1
.asciz "__newindex"
l_.str.2: ; @.str.2
.asciz "__gc"
l_.str.3: ; @.str.3
.asciz "__mode"
l_.str.4: ; @.str.4
.asciz "__eq"
l_.str.5: ; @.str.5
.asciz "__add"
l_.str.6: ; @.str.6
.asciz "__sub"
l_.str.7: ; @.str.7
.asciz "__mul"
l_.str.8: ; @.str.8
.asciz "__div"
l_.str.9: ; @.str.9
.asciz "__mod"
l_.str.10: ; @.str.10
.asciz "__pow"
l_.str.11: ; @.str.11
.asciz "__unm"
l_.str.12: ; @.str.12
.asciz "__len"
l_.str.13: ; @.str.13
.asciz "__lt"
l_.str.14: ; @.str.14
.asciz "__le"
l_.str.15: ; @.str.15
.asciz "__concat"
l_.str.16: ; @.str.16
.asciz "__call"
.comm _TM_N,4,2 ; @TM_N
.subsections_via_symbols
| AnghaBench/xLua/build/lua-5.1.5/src/extr_ltm.c_luaT_init.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function smu7_display_configuration_changed_task
_smu7_display_configuration_changed_task: ## @smu7_display_configuration_changed_task
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _smu7_program_display_gap ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _smu7_display_configuration_changed_task
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function smu7_display_configuration_changed_task
_smu7_display_configuration_changed_task: ; @smu7_display_configuration_changed_task
.cfi_startproc
; %bb.0:
b _smu7_program_display_gap
.cfi_endproc
; -- End function
.no_dead_strip _smu7_display_configuration_changed_task
.subsections_via_symbols
| AnghaBench/linux/drivers/gpu/drm/amd/powerplay/hwmgr/extr_smu7_hwmgr.c_smu7_display_configuration_changed_task.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function cnic_cm_init_bnx2_hw
_cnic_cm_init_bnx2_hw: ## @cnic_cm_init_bnx2_hw
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
xorl %eax, %eax
callq _prandom_u32
movq %rbx, %rdi
movl $45, %esi
xorl %edx, %edx
movl %eax, %ecx
callq _cnic_ctx_wr
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _cnic_cm_init_bnx2_hw
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function cnic_cm_init_bnx2_hw
_cnic_cm_init_bnx2_hw: ; @cnic_cm_init_bnx2_hw
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
bl _prandom_u32
mov x3, x0
mov x0, x19
mov w1, #45
mov w2, #0
bl _cnic_ctx_wr
mov w0, #0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _cnic_cm_init_bnx2_hw
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/broadcom/extr_cnic.c_cnic_cm_init_bnx2_hw.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _flush_tlb_page ## -- Begin function flush_tlb_page
.p2align 4, 0x90
_flush_tlb_page: ## @flush_tlb_page
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq (%rdi), %rax
testq %rax, %rax
je LBB0_7
## %bb.1:
movq (%rax), %r12
movq _NO_CONTEXT@GOTPCREL(%rip), %rax
cmpq (%rax), %r12
jne LBB0_2
LBB0_7:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_2:
movq %rsi, %r14
movq %rdi, %rbx
movq _MMU_NO_ASID@GOTPCREL(%rip), %r13
movq _MMU_CONTEXT_ASID_MASK@GOTPCREL(%rip), %rax
andq (%rax), %r12
movq (%r13), %r15
movq _PAGE_MASK@GOTPCREL(%rip), %rax
andq (%rax), %r14
callq _local_irq_save
movq (%rbx), %rax
movq _current@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq (%rcx), %rax
je LBB0_4
## %bb.3:
xorl %eax, %eax
callq _get_asid
movq %rax, %r15
movq %r12, %rdi
callq _set_asid
LBB0_4:
movq %r12, %rdi
movq %r14, %rsi
callq ___flush_tlb_page
cmpq (%r13), %r15
je LBB0_6
## %bb.5:
movq %r15, %rdi
callq _set_asid
LBB0_6:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _local_irq_restore ## TAILCALL
.cfi_endproc
## -- End function
.comm _NO_CONTEXT,8,3 ## @NO_CONTEXT
.comm _MMU_NO_ASID,8,3 ## @MMU_NO_ASID
.comm _MMU_CONTEXT_ASID_MASK,8,3 ## @MMU_CONTEXT_ASID_MASK
.comm _PAGE_MASK,8,3 ## @PAGE_MASK
.comm _current,8,3 ## @current
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _flush_tlb_page ; -- Begin function flush_tlb_page
.p2align 2
_flush_tlb_page: ; @flush_tlb_page
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
ldr x8, [x0]
cbz x8, LBB0_2
; %bb.1:
ldr x8, [x8]
Lloh0:
adrp x9, _NO_CONTEXT@GOTPAGE
Lloh1:
ldr x9, [x9, _NO_CONTEXT@GOTPAGEOFF]
Lloh2:
ldr x9, [x9]
cmp x8, x9
b.ne LBB0_3
LBB0_2:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
LBB0_3:
mov x22, x0
Lloh3:
adrp x23, _MMU_NO_ASID@GOTPAGE
Lloh4:
ldr x23, [x23, _MMU_NO_ASID@GOTPAGEOFF]
ldr x19, [x23]
Lloh5:
adrp x9, _MMU_CONTEXT_ASID_MASK@GOTPAGE
Lloh6:
ldr x9, [x9, _MMU_CONTEXT_ASID_MASK@GOTPAGEOFF]
Lloh7:
ldr x9, [x9]
and x20, x9, x8
Lloh8:
adrp x8, _PAGE_MASK@GOTPAGE
Lloh9:
ldr x8, [x8, _PAGE_MASK@GOTPAGEOFF]
Lloh10:
ldr x8, [x8]
and x21, x8, x1
bl _local_irq_save
ldr x8, [x22]
Lloh11:
adrp x9, _current@GOTPAGE
Lloh12:
ldr x9, [x9, _current@GOTPAGEOFF]
Lloh13:
ldr x9, [x9]
ldr x9, [x9]
cmp x8, x9
b.eq LBB0_5
; %bb.4:
bl _get_asid
mov x19, x0
mov x0, x20
bl _set_asid
LBB0_5:
mov x0, x20
mov x1, x21
bl ___flush_tlb_page
ldr x8, [x23]
cmp x19, x8
b.eq LBB0_7
; %bb.6:
mov x0, x19
bl _set_asid
LBB0_7:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
b _local_irq_restore
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.loh AdrpLdrGot Lloh3, Lloh4
.cfi_endproc
; -- End function
.comm _NO_CONTEXT,8,3 ; @NO_CONTEXT
.comm _MMU_NO_ASID,8,3 ; @MMU_NO_ASID
.comm _MMU_CONTEXT_ASID_MASK,8,3 ; @MMU_CONTEXT_ASID_MASK
.comm _PAGE_MASK,8,3 ; @PAGE_MASK
.comm _current,8,3 ; @current
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/arch/avr32/mm/extr_tlb.c_flush_tlb_page.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _uv_bios_get_sn_info ## -- Begin function uv_bios_get_sn_info
.p2align 4, 0x90
_uv_bios_get_sn_info: ## @uv_bios_get_sn_info
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %r9, %r14
movq %r8, %r15
movq %rcx, %r12
movq %rdx, %r13
movq %rsi, %rbx
movl %edi, %esi
movq _UV_BIOS_GET_SN_INFO@GOTPCREL(%rip), %rax
movl (%rax), %edi
leaq -56(%rbp), %rdx
leaq -48(%rbp), %rcx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq _uv_bios_call_irqsave
movq _BIOS_STATUS_SUCCESS@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_11
## %bb.1:
movq -56(%rbp), %rcx
testq %rbx, %rbx
je LBB0_3
## %bb.2:
movl %ecx, (%rbx)
LBB0_3:
testq %r13, %r13
je LBB0_5
## %bb.4:
movq %rcx, (%r13)
LBB0_5:
testq %r12, %r12
je LBB0_7
## %bb.6:
movq %rcx, (%r12)
LBB0_7:
testq %r15, %r15
je LBB0_9
## %bb.8:
movq %rcx, (%r15)
LBB0_9:
testq %r14, %r14
je LBB0_11
## %bb.10:
movq -48(%rbp), %rcx
movq %rcx, (%r14)
LBB0_11:
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _UV_BIOS_GET_SN_INFO,4,2 ## @UV_BIOS_GET_SN_INFO
.comm _BIOS_STATUS_SUCCESS,8,3 ## @BIOS_STATUS_SUCCESS
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _uv_bios_get_sn_info ; -- Begin function uv_bios_get_sn_info
.p2align 2
_uv_bios_get_sn_info: ; @uv_bios_get_sn_info
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x5
mov x20, x4
mov x21, x3
mov x22, x2
mov x23, x1
mov x1, x0
Lloh0:
adrp x8, _UV_BIOS_GET_SN_INFO@GOTPAGE
Lloh1:
ldr x8, [x8, _UV_BIOS_GET_SN_INFO@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
add x2, sp, #8
mov x3, sp
mov w4, #0
mov w5, #0
bl _uv_bios_call_irqsave
Lloh3:
adrp x8, _BIOS_STATUS_SUCCESS@GOTPAGE
Lloh4:
ldr x8, [x8, _BIOS_STATUS_SUCCESS@GOTPAGEOFF]
Lloh5:
ldr x8, [x8]
cmp x0, x8
b.ne LBB0_11
; %bb.1:
ldr x8, [sp, #8]
cbz x23, LBB0_3
; %bb.2:
str w8, [x23]
LBB0_3:
cbz x22, LBB0_5
; %bb.4:
str x8, [x22]
LBB0_5:
cbz x21, LBB0_7
; %bb.6:
str x8, [x21]
LBB0_7:
cbz x20, LBB0_9
; %bb.8:
str x8, [x20]
LBB0_9:
cbz x19, LBB0_11
; %bb.10:
ldr x8, [sp]
str x8, [x19]
LBB0_11:
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #80
ret
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _UV_BIOS_GET_SN_INFO,4,2 ; @UV_BIOS_GET_SN_INFO
.comm _BIOS_STATUS_SUCCESS,8,3 ; @BIOS_STATUS_SUCCESS
.subsections_via_symbols
| AnghaBench/linux/arch/x86/platform/uv/extr_bios_uv.c_uv_bios_get_sn_info.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| the_stack_data/165769044.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _CiSetError ## -- Begin function CiSetError
.p2align 4, 0x90
_CiSetError: ## @CiSetError
.cfi_startproc
## %bb.0:
testq %rdi, %rdi
je LBB0_2
## %bb.1:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl %esi, (%rdi)
popq %rbp
LBB0_2:
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _CiSetError ; -- Begin function CiSetError
.p2align 2
_CiSetError: ; @CiSetError
.cfi_startproc
; %bb.0:
cbz x0, LBB0_2
; %bb.1:
str w1, [x0]
LBB0_2:
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/SoftEtherVPN/src/Cedar/extr_Client.c_CiSetError.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _dm_cell_visit_release ## -- Begin function dm_cell_visit_release
.p2align 4, 0x90
_dm_cell_visit_release: ## @dm_cell_visit_release
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r12
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %rbx
callq _spin_lock_irqsave
movq %r14, %rdi
movq %r12, %rsi
callq *%r15
leaq 4(%rbx), %rsi
movq %r12, %rdi
callq _rb_erase
movq %rbx, %rdi
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp _spin_unlock_irqrestore ## TAILCALL
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _dm_cell_visit_release ; -- Begin function dm_cell_visit_release
.p2align 2
_dm_cell_visit_release: ; @dm_cell_visit_release
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x3
mov x20, x2
mov x21, x1
mov x22, x0
bl _spin_lock_irqsave
mov x0, x20
mov x1, x19
blr x21
add x1, x22, #4
mov x0, x19
bl _rb_erase
mov x0, x22
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _spin_unlock_irqrestore
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/linux/drivers/md/extr_dm-bio-prison-v1.c_dm_cell_visit_release.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function FTPFINDNEXT_FindNextFileW
_FTPFINDNEXT_FindNextFileW: ## @FTPFINDNEXT_FindNextFileW
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rsi, %rbx
movq 8(%rdi), %rax
movq (%rax), %rax
movq _INTERNET_FLAG_ASYNC@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
testl %ecx, (%rax)
je LBB0_2
## %bb.1:
movq _FTPFINDNEXT_AsyncFindNextFileProc@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl $16, %edx
callq _alloc_async_task
movq %rbx, 8(%rax)
movq %rax, %rdi
callq _INTERNET_AsyncCall
movq _ERROR_SUCCESS@GOTPCREL(%rip), %rax
movl (%rax), %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
LBB0_2:
movq %rbx, %rsi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _FTPFINDNEXT_FindNextFileProc ## TAILCALL
.cfi_endproc
## -- End function
.comm _INTERNET_FLAG_ASYNC,4,2 ## @INTERNET_FLAG_ASYNC
.comm _FTPFINDNEXT_AsyncFindNextFileProc,4,2 ## @FTPFINDNEXT_AsyncFindNextFileProc
.comm _ERROR_SUCCESS,4,2 ## @ERROR_SUCCESS
.no_dead_strip _FTPFINDNEXT_FindNextFileW
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function FTPFINDNEXT_FindNextFileW
_FTPFINDNEXT_FindNextFileW: ; @FTPFINDNEXT_FindNextFileW
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
ldr x8, [x0, #8]
ldr x8, [x8]
ldr w8, [x8]
Lloh0:
adrp x9, _INTERNET_FLAG_ASYNC@GOTPAGE
Lloh1:
ldr x9, [x9, _INTERNET_FLAG_ASYNC@GOTPAGEOFF]
Lloh2:
ldr w9, [x9]
tst w9, w8
b.eq LBB0_2
; %bb.1:
Lloh3:
adrp x8, _FTPFINDNEXT_AsyncFindNextFileProc@GOTPAGE
Lloh4:
ldr x8, [x8, _FTPFINDNEXT_AsyncFindNextFileProc@GOTPAGEOFF]
Lloh5:
ldr w1, [x8]
mov w2, #16
bl _alloc_async_task
str x19, [x0, #8]
bl _INTERNET_AsyncCall
Lloh6:
adrp x8, _ERROR_SUCCESS@GOTPAGE
Lloh7:
ldr x8, [x8, _ERROR_SUCCESS@GOTPAGEOFF]
Lloh8:
ldr w0, [x8]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB0_2:
mov x1, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _FTPFINDNEXT_FindNextFileProc
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _INTERNET_FLAG_ASYNC,4,2 ; @INTERNET_FLAG_ASYNC
.comm _FTPFINDNEXT_AsyncFindNextFileProc,4,2 ; @FTPFINDNEXT_AsyncFindNextFileProc
.comm _ERROR_SUCCESS,4,2 ; @ERROR_SUCCESS
.no_dead_strip _FTPFINDNEXT_FindNextFileW
.subsections_via_symbols
| AnghaBench/reactos/dll/win32/wininet/extr_ftp.c_FTPFINDNEXT_FindNextFileW.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _initLink ## -- Begin function initLink
.p2align 4, 0x90
_initLink: ## @initLink
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edi, %r15d
movl $16, %edi
callq _malloc
movq %rax, %r14
movl $1, (%rax)
movq $0, 8(%rax)
cmpl $2, %r15d
jl LBB0_3
## %bb.1:
negl %r15d
movl $2, %ebx
movq %r14, %r12
.p2align 4, 0x90
LBB0_2: ## =>This Inner Loop Header: Depth=1
movl $16, %edi
callq _malloc
movl %ebx, (%rax)
movq $0, 8(%rax)
movq %rax, 8(%r12)
leal (%r15,%rbx), %ecx
incl %ecx
movl %ebx, %edx
incl %edx
movq %rax, %r12
movl %edx, %ebx
cmpl $1, %ecx
jne LBB0_2
LBB0_3:
movq %r14, 8(%rax)
movq %r14, %rax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _findAndRemoveK ## -- Begin function findAndRemoveK
.p2align 4, 0x90
_findAndRemoveK: ## @findAndRemoveK
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r14d
cmpl %esi, (%rdi)
jne LBB1_11
## %bb.1:
movq %rdi, %rbx
movq %rdi, %r13
jmp LBB1_2
.p2align 4, 0x90
LBB1_11: ## =>This Inner Loop Header: Depth=1
movq 8(%rdi), %r13
movq %rdi, %rbx
movq %r13, %rdi
cmpl %esi, (%r13)
jne LBB1_11
LBB1_2:
cmpq %r13, 8(%r13)
je LBB1_10
## %bb.3:
leal -1(%r14), %r15d
leal -2(%r14), %eax
movl %eax, -44(%rbp) ## 4-byte Spill
movl %r15d, %r12d
andl $7, %r12d
andl $-8, %r15d
jmp LBB1_4
.p2align 4, 0x90
LBB1_9: ## in Loop: Header=BB1_4 Depth=1
movq 8(%r13), %rax
movq %rax, 8(%rbx)
movl (%r13), %esi
leaq L_.str(%rip), %rdi
xorl %eax, %eax
callq _printf
movq %r13, %rdi
callq _free
movq 8(%rbx), %r13
cmpq %r13, 8(%r13)
je LBB1_10
LBB1_4: ## =>This Loop Header: Depth=1
## Child Loop BB1_12 Depth 2
## Child Loop BB1_8 Depth 2
cmpl $2, %r14d
jl LBB1_9
## %bb.5: ## in Loop: Header=BB1_4 Depth=1
## implicit-def: $rbx
movl %r15d, %eax
cmpl $7, -44(%rbp) ## 4-byte Folded Reload
jb LBB1_6
.p2align 4, 0x90
LBB1_12: ## Parent Loop BB1_4 Depth=1
## => This Inner Loop Header: Depth=2
movq 8(%r13), %rcx
movq 8(%rcx), %rcx
movq 8(%rcx), %rcx
movq 8(%rcx), %rcx
movq 8(%rcx), %rcx
movq 8(%rcx), %rcx
movq 8(%rcx), %rbx
movq 8(%rbx), %r13
addl $-8, %eax
jne LBB1_12
LBB1_6: ## in Loop: Header=BB1_4 Depth=1
testl %r12d, %r12d
je LBB1_9
## %bb.7: ## in Loop: Header=BB1_4 Depth=1
movl %r12d, %eax
.p2align 4, 0x90
LBB1_8: ## Parent Loop BB1_4 Depth=1
## => This Inner Loop Header: Depth=2
movq %r13, %rbx
movq 8(%r13), %r13
decl %eax
jne LBB1_8
jmp LBB1_9
LBB1_10:
movl (%r13), %esi
leaq L_.str(%rip), %rdi
xorl %eax, %eax
callq _printf
movq %r13, %rdi
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _free ## TAILCALL
.cfi_endproc
## -- End function
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
leaq L_.str.1(%rip), %rdi
xorl %eax, %eax
callq _printf
leaq L_.str.2(%rip), %rdi
leaq -52(%rbp), %rsi
xorl %eax, %eax
callq _scanf
movl -52(%rbp), %r14d
movl $16, %edi
callq _malloc
movq %rax, %r15
movl $1, (%rax)
movq $0, 8(%rax)
cmpl $2, %r14d
jl LBB2_3
## %bb.1:
movl %r14d, %r12d
negl %r12d
movl $2, %ebx
movq %r15, %r13
.p2align 4, 0x90
LBB2_2: ## =>This Inner Loop Header: Depth=1
movl $16, %edi
callq _malloc
movl %ebx, (%rax)
movq $0, 8(%rax)
movq %rax, 8(%r13)
leal (%r12,%rbx), %ecx
incl %ecx
movl %ebx, %edx
incl %edx
movq %rax, %r13
movl %edx, %ebx
cmpl $1, %ecx
jne LBB2_2
LBB2_3:
movq %r15, 8(%rax)
leaq L_.str.3(%rip), %rdi
movl %r14d, %esi
xorl %eax, %eax
callq _printf
leaq L_.str.2(%rip), %r14
leaq -48(%rbp), %rsi
movq %r14, %rdi
xorl %eax, %eax
callq _scanf
leaq L_.str.4(%rip), %rdi
xorl %eax, %eax
callq _printf
leaq -44(%rbp), %rsi
movq %r14, %rdi
xorl %eax, %eax
callq _scanf
movl -48(%rbp), %esi
movl -44(%rbp), %edx
movq %r15, %rdi
callq _findAndRemoveK
movl (%r15), %esi
leaq L_.str.5(%rip), %rdi
xorl %eax, %eax
callq _printf
xorl %eax, %eax
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "\345\207\272\345\210\227\344\272\272\347\232\204\347\274\226\345\217\267\344\270\272:%d\n"
L_.str.1: ## @.str.1
.asciz "\350\276\223\345\205\245\345\234\206\346\241\214\344\270\212\347\232\204\344\272\272\346\225\260n:"
L_.str.2: ## @.str.2
.asciz "%d"
L_.str.3: ## @.str.3
.asciz "\344\273\216\347\254\254k\344\272\272\345\274\200\345\247\213\346\212\245\346\225\260(k>1\344\270\224k<%d)\357\274\232"
L_.str.4: ## @.str.4
.asciz "\346\225\260\345\210\260m\347\232\204\344\272\272\345\207\272\345\210\227\357\274\232"
L_.str.5: ## @.str.5
.asciz "%d \n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _initLink ; -- Begin function initLink
.p2align 2
_initLink: ; @initLink
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x0
mov w0, #16
bl _malloc
mov x19, x0
mov w8, #1
str w8, [x0]
str xzr, [x0, #8]
cmp w20, #2
b.lt LBB0_3
; %bb.1:
mov w21, #1
mov x22, x19
LBB0_2: ; =>This Inner Loop Header: Depth=1
add w21, w21, #1
mov w0, #16
bl _malloc
str w21, [x0]
str xzr, [x0, #8]
str x0, [x22, #8]
mov x22, x0
cmp w20, w21
b.ne LBB0_2
LBB0_3:
str x19, [x0, #8]
mov x0, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _findAndRemoveK ; -- Begin function findAndRemoveK
.p2align 2
_findAndRemoveK: ; @findAndRemoveK
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x2
ldr w8, [x0]
cmp w8, w1
b.ne LBB1_2
; %bb.1:
mov x22, x0
mov x20, x0
b LBB1_3
LBB1_2: ; =>This Inner Loop Header: Depth=1
ldr x20, [x0, #8]
ldr w8, [x20]
mov x22, x0
mov x0, x20
cmp w8, w1
b.ne LBB1_2
LBB1_3:
ldr x8, [x20, #8]
cmp x8, x20
b.eq LBB1_8
; %bb.4:
sub w23, w19, #1
Lloh0:
adrp x21, l_.str@PAGE
Lloh1:
add x21, x21, l_.str@PAGEOFF
b LBB1_6
LBB1_5: ; in Loop: Header=BB1_6 Depth=1
ldr x8, [x20, #8]
str x8, [x22, #8]
ldr w8, [x20]
str x8, [sp]
mov x0, x21
bl _printf
mov x0, x20
bl _free
ldr x20, [x22, #8]
ldr x8, [x20, #8]
cmp x8, x20
b.eq LBB1_8
LBB1_6: ; =>This Loop Header: Depth=1
; Child Loop BB1_7 Depth 2
mov x8, x23
cmp w19, #2
b.lt LBB1_5
LBB1_7: ; Parent Loop BB1_6 Depth=1
; => This Inner Loop Header: Depth=2
mov x22, x20
ldr x20, [x20, #8]
subs w8, w8, #1
b.ne LBB1_7
b LBB1_5
LBB1_8:
ldr w8, [x20]
str x8, [sp]
Lloh2:
adrp x0, l_.str@PAGE
Lloh3:
add x0, x0, l_.str@PAGEOFF
bl _printf
mov x0, x20
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #80
b _free
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh2, Lloh3
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
Lloh4:
adrp x0, l_.str.1@PAGE
Lloh5:
add x0, x0, l_.str.1@PAGEOFF
bl _printf
add x8, sp, #28
str x8, [sp]
Lloh6:
adrp x0, l_.str.2@PAGE
Lloh7:
add x0, x0, l_.str.2@PAGEOFF
bl _scanf
ldr w20, [sp, #28]
mov w0, #16
bl _malloc
mov x19, x0
mov w8, #1
str w8, [x0]
str xzr, [x0, #8]
cmp w20, #2
b.lt LBB2_3
; %bb.1:
mov w21, #1
mov x22, x19
LBB2_2: ; =>This Inner Loop Header: Depth=1
add w21, w21, #1
mov w0, #16
bl _malloc
str w21, [x0]
str xzr, [x0, #8]
str x0, [x22, #8]
mov x22, x0
cmp w20, w21
b.ne LBB2_2
LBB2_3:
str x19, [x0, #8]
str x20, [sp]
Lloh8:
adrp x0, l_.str.3@PAGE
Lloh9:
add x0, x0, l_.str.3@PAGEOFF
bl _printf
add x8, sp, #24
str x8, [sp]
Lloh10:
adrp x20, l_.str.2@PAGE
Lloh11:
add x20, x20, l_.str.2@PAGEOFF
mov x0, x20
bl _scanf
Lloh12:
adrp x0, l_.str.4@PAGE
Lloh13:
add x0, x0, l_.str.4@PAGEOFF
bl _printf
add x8, sp, #20
str x8, [sp]
mov x0, x20
bl _scanf
ldp w2, w1, [sp, #20]
mov x0, x19
bl _findAndRemoveK
ldr w8, [x19]
str x8, [sp]
Lloh14:
adrp x0, l_.str.5@PAGE
Lloh15:
add x0, x0, l_.str.5@PAGEOFF
bl _printf
mov w0, #0
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #80
ret
.loh AdrpAdd Lloh6, Lloh7
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh14, Lloh15
.loh AdrpAdd Lloh12, Lloh13
.loh AdrpAdd Lloh10, Lloh11
.loh AdrpAdd Lloh8, Lloh9
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "\345\207\272\345\210\227\344\272\272\347\232\204\347\274\226\345\217\267\344\270\272:%d\n"
l_.str.1: ; @.str.1
.asciz "\350\276\223\345\205\245\345\234\206\346\241\214\344\270\212\347\232\204\344\272\272\346\225\260n:"
l_.str.2: ; @.str.2
.asciz "%d"
l_.str.3: ; @.str.3
.asciz "\344\273\216\347\254\254k\344\272\272\345\274\200\345\247\213\346\212\245\346\225\260(k>1\344\270\224k<%d)\357\274\232"
l_.str.4: ; @.str.4
.asciz "\346\225\260\345\210\260m\347\232\204\344\272\272\345\207\272\345\210\227\357\274\232"
l_.str.5: ; @.str.5
.asciz "%d \n"
.subsections_via_symbols
| the_stack_data/797653.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _htx_strlen ## -- Begin function htx_strlen
.p2align 4, 0x90
_htx_strlen: ## @htx_strlen
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _strlen ## TAILCALL
.cfi_endproc
## -- End function
.globl _htx_strcpy ## -- Begin function htx_strcpy
.p2align 4, 0x90
_htx_strcpy: ## @htx_strcpy
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _strcpy ## TAILCALL
.cfi_endproc
## -- End function
.globl _htx_strncpy ## -- Begin function htx_strncpy
.p2align 4, 0x90
_htx_strncpy: ## @htx_strncpy
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _strncpy ## TAILCALL
.cfi_endproc
## -- End function
.globl _htx_strcat ## -- Begin function htx_strcat
.p2align 4, 0x90
_htx_strcat: ## @htx_strcat
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _strcat ## TAILCALL
.cfi_endproc
## -- End function
.globl _htx_strchr ## -- Begin function htx_strchr
.p2align 4, 0x90
_htx_strchr: ## @htx_strchr
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _strchr ## TAILCALL
.cfi_endproc
## -- End function
.globl _htx_strcmp ## -- Begin function htx_strcmp
.p2align 4, 0x90
_htx_strcmp: ## @htx_strcmp
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _strcmp ## TAILCALL
.cfi_endproc
## -- End function
.globl _htx_strncmp ## -- Begin function htx_strncmp
.p2align 4, 0x90
_htx_strncmp: ## @htx_strncmp
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _strncmp ## TAILCALL
.cfi_endproc
## -- End function
.globl _htx_strspn ## -- Begin function htx_strspn
.p2align 4, 0x90
_htx_strspn: ## @htx_strspn
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _strspn ## TAILCALL
.cfi_endproc
## -- End function
.globl _htx_strcspn ## -- Begin function htx_strcspn
.p2align 4, 0x90
_htx_strcspn: ## @htx_strcspn
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _strcspn ## TAILCALL
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _htx_strlen ; -- Begin function htx_strlen
.p2align 2
_htx_strlen: ; @htx_strlen
.cfi_startproc
; %bb.0:
b _strlen
.cfi_endproc
; -- End function
.globl _htx_strcpy ; -- Begin function htx_strcpy
.p2align 2
_htx_strcpy: ; @htx_strcpy
.cfi_startproc
; %bb.0:
b _strcpy
.cfi_endproc
; -- End function
.globl _htx_strncpy ; -- Begin function htx_strncpy
.p2align 2
_htx_strncpy: ; @htx_strncpy
.cfi_startproc
; %bb.0:
b _strncpy
.cfi_endproc
; -- End function
.globl _htx_strcat ; -- Begin function htx_strcat
.p2align 2
_htx_strcat: ; @htx_strcat
.cfi_startproc
; %bb.0:
b _strcat
.cfi_endproc
; -- End function
.globl _htx_strchr ; -- Begin function htx_strchr
.p2align 2
_htx_strchr: ; @htx_strchr
.cfi_startproc
; %bb.0:
b _strchr
.cfi_endproc
; -- End function
.globl _htx_strcmp ; -- Begin function htx_strcmp
.p2align 2
_htx_strcmp: ; @htx_strcmp
.cfi_startproc
; %bb.0:
b _strcmp
.cfi_endproc
; -- End function
.globl _htx_strncmp ; -- Begin function htx_strncmp
.p2align 2
_htx_strncmp: ; @htx_strncmp
.cfi_startproc
; %bb.0:
b _strncmp
.cfi_endproc
; -- End function
.globl _htx_strspn ; -- Begin function htx_strspn
.p2align 2
_htx_strspn: ; @htx_strspn
.cfi_startproc
; %bb.0:
b _strspn
.cfi_endproc
; -- End function
.globl _htx_strcspn ; -- Begin function htx_strcspn
.p2align 2
_htx_strcspn: ; @htx_strcspn
.cfi_startproc
; %bb.0:
b _strcspn
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/131591.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movl $19464, %eax ## imm = 0x4C08
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movl $1, -19496(%rbp)
movl $1, %r14d
movl $2, %edi
movl $1, %esi
xorl %edx, %edx
callq _socket
movl %eax, %ebx
leaq -19496(%rbp), %rcx
movl %eax, %edi
movl $65535, %esi ## imm = 0xFFFF
movl $4, %edx
movl $4, %r8d
callq _setsockopt
testl %eax, %eax
js LBB0_1
## %bb.4:
movb $2, -19471(%rbp)
leaq L_.str.1(%rip), %rdi
callq _inet_addr
movl %eax, -19468(%rbp)
movw $14619, -19470(%rbp) ## imm = 0x391B
leaq -19472(%rbp), %rsi
movl %ebx, %edi
movl $16, %edx
callq _bind
movl %ebx, %edi
movl $5, %esi
callq _listen
movl $16, -19492(%rbp)
leaq -19488(%rbp), %rsi
leaq -19492(%rbp), %rdx
movl %ebx, %edi
callq _accept
movl %eax, %r15d
leaq -1072(%rbp), %rbx
leaq -19456(%rbp), %r12
leaq L_.str.2(%rip), %r13
leaq L_.str.3(%rip), %r14
.p2align 4, 0x90
LBB0_5: ## =>This Inner Loop Header: Depth=1
movl $1024, %esi ## imm = 0x400
movq %rbx, %rdi
callq ___bzero
movl $18384, %esi ## imm = 0x47D0
movq %r12, %rdi
callq ___bzero
movl -19484(%rbp), %edi
callq _inet_ntoa
movq %r13, %rdi
movq %rax, %rsi
xorl %eax, %eax
callq _printf
movq ___stdinp@GOTPCREL(%rip), %rax
movq (%rax), %rdx
movq %rbx, %rdi
movl $1024, %esi ## imm = 0x400
callq _fgets
movq %rbx, %rdi
movq %r14, %rsi
callq _strtok
movl $1024, %edx ## imm = 0x400
movl %r15d, %edi
movq %rbx, %rsi
callq _write
cmpb $113, -1072(%rbp)
je LBB0_6
## %bb.7: ## in Loop: Header=BB0_5 Depth=1
movl -1072(%rbp), %eax
movl $25699, %ecx ## imm = 0x6463
xorl %ecx, %eax
movzbl -1070(%rbp), %ecx
xorl $32, %ecx
orw %ax, %cx
je LBB0_5
## %bb.8: ## in Loop: Header=BB0_5 Depth=1
movq -1072(%rbp), %rax
movabsq $8313477165408150891, %rcx ## imm = 0x735F676F6C79656B
xorq %rcx, %rax
movl -1064(%rbp), %ecx
xorq $1953653108, %rcx ## imm = 0x74726174
orq %rax, %rcx
je LBB0_5
## %bb.9: ## in Loop: Header=BB0_5 Depth=1
movl -1072(%rbp), %eax
movl $1936876912, %ecx ## imm = 0x73726570
xorl %ecx, %eax
movl -1069(%rbp), %edx
movl $1953720691, %ecx ## imm = 0x74736973
xorl %ecx, %edx
xorl %ecx, %ecx
orl %eax, %edx
setne %cl
shll $6, %ecx
movl $18384, %edx ## imm = 0x47D0
movl %r15d, %edi
movq %r12, %rsi
callq _recv
leaq L_.str.8(%rip), %rdi
movq %r12, %rsi
xorl %eax, %eax
callq _printf
jmp LBB0_5
LBB0_6:
xorl %r14d, %r14d
jmp LBB0_2
LBB0_1:
leaq L_str(%rip), %rdi
callq _puts
LBB0_2:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB0_3
## %bb.10:
movl %r14d, %eax
addq $19464, %rsp ## imm = 0x4C08
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_3:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str.1: ## @.str.1
.asciz "192.168.2.126"
L_.str.2: ## @.str.2
.asciz "> Shell#%s-$: "
L_.str.3: ## @.str.3
.asciz "\n"
L_.str.5: ## @.str.5
.asciz "cd "
L_.str.6: ## @.str.6
.asciz "keylog_start"
L_.str.7: ## @.str.7
.asciz "persist"
L_.str.8: ## @.str.8
.asciz "%s"
L_str: ## @str
.asciz "Error setting TCP socket options "
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov w9, #19472
Lloh0:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh1:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #4, lsl #12 ; =16384
sub sp, sp, #3088
Lloh2:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh3:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh4:
ldr x8, [x8]
stur x8, [x29, #-96]
mov w8, #1
str w8, [sp, #12]
mov w20, #2
mov w0, #2
mov w1, #1
mov w2, #0
bl _socket
mov x19, x0
add x3, sp, #12
mov w1, #65535
mov w2, #4
mov w4, #4
bl _setsockopt
tbnz w0, #31, LBB0_7
; %bb.1:
add x23, sp, #4, lsl #12 ; =16384
add x23, x23, #2048
strb w20, [sp, #33]
Lloh5:
adrp x0, l_.str.1@PAGE
Lloh6:
add x0, x0, l_.str.1@PAGEOFF
bl _inet_addr
str w0, [sp, #36]
mov w8, #14619
strh w8, [sp, #34]
mov w20, #16
add x1, sp, #32
mov x0, x19
mov w2, #16
bl _bind
mov x0, x19
mov w1, #5
bl _listen
str w20, [sp, #8]
add x1, sp, #16
add x2, sp, #8
mov x0, x19
bl _accept
mov x19, x0
Lloh7:
adrp x20, l_.str.2@PAGE
Lloh8:
add x20, x20, l_.str.2@PAGEOFF
Lloh9:
adrp x25, ___stdinp@GOTPAGE
Lloh10:
ldr x25, [x25, ___stdinp@GOTPAGEOFF]
Lloh11:
adrp x21, l_.str.3@PAGE
Lloh12:
add x21, x21, l_.str.3@PAGEOFF
mov w26, #25699
mov x27, #25963
movk x27, #27769, lsl #16
movk x27, #26479, lsl #32
movk x27, #29535, lsl #48
mov w28, #24948
movk w28, #29810, lsl #16
mov w24, #26995
movk w24, #29811, lsl #16
Lloh13:
adrp x22, l_.str.8@PAGE
Lloh14:
add x22, x22, l_.str.8@PAGEOFF
LBB0_2: ; =>This Inner Loop Header: Depth=1
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #2048
mov w1, #1024
bl _bzero
add x0, sp, #48
mov w1, #18384
bl _bzero
ldr w0, [sp, #20]
bl _inet_ntoa
str x0, [sp]
mov x0, x20
bl _printf
ldr x2, [x25]
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #2048
mov w1, #1024
bl _fgets
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #2048
mov x1, x21
bl _strtok
add x1, sp, #4, lsl #12 ; =16384
add x1, x1, #2048
mov x0, x19
mov w2, #1024
bl _write
ldrb w8, [x23]
cmp w8, #113
b.eq LBB0_6
; %bb.3: ; in Loop: Header=BB0_2 Depth=1
ldrh w8, [x23]
eor w8, w8, w26
ldrb w9, [x23, #2]
eor w9, w9, #0x20
orr w8, w8, w9
cbz w8, LBB0_2
; %bb.4: ; in Loop: Header=BB0_2 Depth=1
ldr x8, [sp, #18432]
eor x8, x8, x27
ldr w9, [x23, #8]
eor x9, x9, x28
orr x8, x8, x9
cbz x8, LBB0_2
; %bb.5: ; in Loop: Header=BB0_2 Depth=1
ldr w8, [x23]
mov w9, #25968
movk w9, #29554, lsl #16
eor w8, w8, w9
ldur w9, [x23, #3]
eor w9, w9, w24
orr w8, w8, w9
cmp w8, #0
cset w8, ne
lsl w3, w8, #6
add x1, sp, #48
mov x0, x19
mov w2, #18384
bl _recv
add x8, sp, #48
str x8, [sp]
mov x0, x22
bl _printf
b LBB0_2
LBB0_6:
mov w0, #0
b LBB0_8
LBB0_7:
Lloh15:
adrp x0, l_str@PAGE
Lloh16:
add x0, x0, l_str@PAGEOFF
bl _puts
mov w0, #1
LBB0_8:
ldur x8, [x29, #-96]
Lloh17:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh18:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh19:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_10
; %bb.9:
add sp, sp, #4, lsl #12 ; =16384
add sp, sp, #3088
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB0_10:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpAdd Lloh13, Lloh14
.loh AdrpAdd Lloh11, Lloh12
.loh AdrpLdrGot Lloh9, Lloh10
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpAdd Lloh5, Lloh6
.loh AdrpAdd Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh17, Lloh18, Lloh19
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str.1: ; @.str.1
.asciz "192.168.2.126"
l_.str.2: ; @.str.2
.asciz "> Shell#%s-$: "
l_.str.3: ; @.str.3
.asciz "\n"
l_.str.5: ; @.str.5
.asciz "cd "
l_.str.6: ; @.str.6
.asciz "keylog_start"
l_.str.7: ; @.str.7
.asciz "persist"
l_.str.8: ; @.str.8
.asciz "%s"
l_str: ; @str
.asciz "Error setting TCP socket options "
.subsections_via_symbols
| the_stack_data/193892147.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _h2o_qpack_lookup_expires ## -- Begin function h2o_qpack_lookup_expires
.p2align 4, 0x90
_h2o_qpack_lookup_expires: ## @h2o_qpack_lookup_expires
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $0, (%rsi)
movl $-1, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _h2o_qpack_lookup_expires ; -- Begin function h2o_qpack_lookup_expires
.p2align 2
_h2o_qpack_lookup_expires: ; @h2o_qpack_lookup_expires
.cfi_startproc
; %bb.0:
str wzr, [x1]
mov w0, #-1
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/h2o/lib/common/extr_token_table.h_h2o_qpack_lookup_expires.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function df_v3_6_pmc_stop
_df_v3_6_pmc_stop: ## @df_v3_6_pmc_stop
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
xorl %ebx, %ebx
cmpl $128, (%rdi)
jne LBB0_4
## %bb.1:
movl %edx, %r15d
movl %esi, %r14d
movq %rdi, %r12
leaq -40(%rbp), %rdx
leaq -36(%rbp), %rcx
leaq -48(%rbp), %r8
leaq -44(%rbp), %r9
callq _df_v3_6_pmc_get_ctrl_settings
movl %eax, %ebx
testl %eax, %eax
jne LBB0_4
## %bb.2:
movl -40(%rbp), %esi
movl -36(%rbp), %ecx
xorl %ebx, %ebx
movq %r12, %rdi
xorl %edx, %edx
xorl %r8d, %r8d
callq _df_v3_6_perfmon_wreg
testl %r15d, %r15d
je LBB0_4
## %bb.3:
movq %r12, %rdi
movl %r14d, %esi
callq _df_v3_6_pmc_release_cntr
LBB0_4:
movl %ebx, %eax
addq $16, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _df_v3_6_pmc_stop
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function df_v3_6_pmc_stop
_df_v3_6_pmc_stop: ; @df_v3_6_pmc_stop
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
ldr w8, [x0]
cmp w8, #128
b.ne LBB0_4
; %bb.1:
mov x21, x2
mov x19, x1
mov x20, x0
add x2, sp, #12
add x3, sp, #8
add x4, sp, #4
mov x5, sp
bl _df_v3_6_pmc_get_ctrl_settings
cbnz w0, LBB0_5
; %bb.2:
ldp w3, w1, [sp, #8]
mov x0, x20
mov w2, #0
mov w4, #0
bl _df_v3_6_perfmon_wreg
cbz w21, LBB0_4
; %bb.3:
mov x0, x20
mov x1, x19
bl _df_v3_6_pmc_release_cntr
LBB0_4:
mov w0, #0
LBB0_5:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.no_dead_strip _df_v3_6_pmc_stop
.subsections_via_symbols
| AnghaBench/linux/drivers/gpu/drm/amd/amdgpu/extr_df_v3_6.c_df_v3_6_pmc_stop.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ipw_write_packet_sent_callback
_ipw_write_packet_sent_callback: ## @ipw_write_packet_sent_callback
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subl %esi, (%rdi)
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _ipw_write_packet_sent_callback
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ipw_write_packet_sent_callback
_ipw_write_packet_sent_callback: ; @ipw_write_packet_sent_callback
.cfi_startproc
; %bb.0:
ldr w8, [x0]
sub w8, w8, w1
str w8, [x0]
ret
.cfi_endproc
; -- End function
.no_dead_strip _ipw_write_packet_sent_callback
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/char/pcmcia/ipwireless/extr_tty.c_ipw_write_packet_sent_callback.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _fpregs_active ## -- Begin function fpregs_active
.p2align 4, 0x90
_fpregs_active: ## @fpregs_active
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rsi, %rbx
callq _tsk_used_math
testq %rax, %rax
je LBB0_1
## %bb.2:
movl (%rbx), %eax
jmp LBB0_3
LBB0_1:
xorl %eax, %eax
LBB0_3:
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _fpregs_active ; -- Begin function fpregs_active
.p2align 2
_fpregs_active: ; @fpregs_active
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
bl _tsk_used_math
cbz x0, LBB0_2
; %bb.1:
ldr w0, [x19]
LBB0_2:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/arch/x86/kernel/extr_i387.c_fpregs_active.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _checkhost ## -- Begin function checkhost
.p2align 4, 0x90
_checkhost: ## @checkhost
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
callq _inet_addr
movq _INADDR_NONE@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_3
## %bb.1:
movq %rbx, %rdi
callq _gethostbyname
testq %rax, %rax
je LBB0_4
## %bb.2:
movq (%rax), %rax
movq (%rax), %rax
LBB0_3:
addq $8, %rsp
popq %rbx
popq %rbp
retq
LBB0_4:
leaq L_str(%rip), %rdi
callq _puts
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.comm _INADDR_NONE,8,3 ## @INADDR_NONE
.section __TEXT,__cstring,cstring_literals
L_str: ## @str
.asciz "unable to resolv host..."
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _checkhost ; -- Begin function checkhost
.p2align 2
_checkhost: ; @checkhost
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
bl _inet_addr
Lloh0:
adrp x8, _INADDR_NONE@GOTPAGE
Lloh1:
ldr x8, [x8, _INADDR_NONE@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
cmp x0, x8
b.ne LBB0_3
; %bb.1:
mov x0, x19
bl _gethostbyname
cbz x0, LBB0_4
; %bb.2:
ldr x8, [x0]
ldr x0, [x8]
LBB0_3:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB0_4:
Lloh3:
adrp x0, l_str@PAGE
Lloh4:
add x0, x0, l_str@PAGEOFF
bl _puts
mov w0, #1
bl _exit
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh3, Lloh4
.cfi_endproc
; -- End function
.comm _INADDR_NONE,8,3 ; @INADDR_NONE
.section __TEXT,__cstring,cstring_literals
l_str: ; @str
.asciz "unable to resolv host..."
.subsections_via_symbols
| AnghaBench/exploitdb/exploits/php/webapps/extr_25964.c_checkhost.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _NoUiMessageBox ## -- Begin function NoUiMessageBox
.p2align 4, 0x90
_NoUiMessageBox: ## @NoUiMessageBox
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
callq _puts
leaq L_str(%rip), %rdi
callq _puts
xorl %eax, %eax
popq %rbp
jmp _MachConsGetCh ## TAILCALL
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_str: ## @str
.asciz "Press any key"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _NoUiMessageBox ; -- Begin function NoUiMessageBox
.p2align 2
_NoUiMessageBox: ; @NoUiMessageBox
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
bl _puts
Lloh0:
adrp x0, l_str@PAGE
Lloh1:
add x0, x0, l_str@PAGEOFF
bl _puts
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
b _MachConsGetCh
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_str: ; @str
.asciz "Press any key"
.subsections_via_symbols
| AnghaBench/reactos/boot/freeldr/freeldr/ui/extr_noui.c_NoUiMessageBox.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
cmpl $3, %edi
jne LBB0_1
## %bb.3:
movq %rsi, %r12
xorl %r14d, %r14d
xorl %edi, %edi
callq _setuid
movq 8(%r12), %rbx
movq %rbx, %rdi
callq _strlen
movq %rax, %rdi
callq _malloc
movq %rax, %r15
movq %rax, %rdi
movq %rbx, %rsi
callq _strcpy
movq 16(%r12), %rbx
movq %rbx, %rdi
callq _strlen
movq %rax, %rdi
callq _malloc
movq %rax, %r12
movq %rax, %rdi
movq %rbx, %rsi
callq _strcpy
movl $1, %edi
movl $2000, %esi ## imm = 0x7D0
callq _calloc
movq %rax, %rbx
leaq L_.str.1(%rip), %rsi
movl $2000, %edx ## imm = 0x7D0
movq %rax, %rdi
callq ___strcat_chk
movl $2000, %edx ## imm = 0x7D0
movq %rbx, %rdi
movq %r15, %rsi
callq ___strcat_chk
leaq L_.str.2(%rip), %rsi
movl $2000, %edx ## imm = 0x7D0
movq %rbx, %rdi
callq ___strcat_chk
movl $2000, %edx ## imm = 0x7D0
movq %rbx, %rdi
movq %r12, %rsi
callq ___strcat_chk
movq %rbx, %rdi
callq _system
movq %r12, %rdi
movl $47, %esi
callq _strchr
testq %rax, %rax
je LBB0_4
## %bb.5:
movb $0, (%rax)
jmp LBB0_6
LBB0_1:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str(%rip), %rdi
movl $32, %esi
jmp LBB0_2
LBB0_4:
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str.4(%rip), %rdi
movl $33, %esi
LBB0_2:
movl $1, %edx
callq _fwrite
movl $1, %r14d
LBB0_6:
movl %r14d, %eax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Need interface & ip as argument\n"
L_.str.1: ## @.str.1
.asciz "ifconfig "
L_.str.2: ## @.str.2
.asciz " inet6 add "
L_.str.4: ## @.str.4
.asciz "New IP needs a /something prefix\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
cmp w0, #3
b.ne LBB0_3
; %bb.1:
mov x19, x1
mov w0, #0
bl _setuid
ldr x20, [x19, #8]
mov x0, x20
bl _strlen
bl _malloc
mov x21, x0
mov x1, x20
bl _strcpy
ldr x19, [x19, #16]
mov x0, x19
bl _strlen
bl _malloc
mov x20, x0
mov x1, x19
bl _strcpy
mov w0, #1
mov w1, #2000
bl _calloc
mov x19, x0
Lloh0:
adrp x1, l_.str.1@PAGE
Lloh1:
add x1, x1, l_.str.1@PAGEOFF
mov w2, #2000
bl ___strcat_chk
mov x0, x19
mov x1, x21
mov w2, #2000
bl ___strcat_chk
Lloh2:
adrp x1, l_.str.2@PAGE
Lloh3:
add x1, x1, l_.str.2@PAGEOFF
mov x0, x19
mov w2, #2000
bl ___strcat_chk
mov x0, x19
mov x1, x20
mov w2, #2000
bl ___strcat_chk
mov x0, x19
bl _system
mov x0, x20
mov w1, #47
bl _strchr
cbz x0, LBB0_4
; %bb.2:
mov w19, #0
strb wzr, [x0]
b LBB0_5
LBB0_3:
Lloh4:
adrp x8, ___stderrp@GOTPAGE
Lloh5:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh6:
ldr x3, [x8]
Lloh7:
adrp x0, l_.str@PAGE
Lloh8:
add x0, x0, l_.str@PAGEOFF
mov w19, #1
mov w1, #32
mov w2, #1
bl _fwrite
b LBB0_5
LBB0_4:
Lloh9:
adrp x8, ___stderrp@GOTPAGE
Lloh10:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh11:
ldr x3, [x8]
Lloh12:
adrp x0, l_.str.4@PAGE
Lloh13:
add x0, x0, l_.str.4@PAGEOFF
mov w19, #1
mov w1, #33
mov w2, #1
bl _fwrite
; kill: def $w19 killed $w19 killed $x19 def $x19
LBB0_5:
mov x0, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh4, Lloh5, Lloh6
.loh AdrpAdd Lloh12, Lloh13
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Need interface & ip as argument\n"
l_.str.1: ; @.str.1
.asciz "ifconfig "
l_.str.2: ; @.str.2
.asciz " inet6 add "
l_.str.4: ; @.str.4
.asciz "New IP needs a /something prefix\n"
.subsections_via_symbols
| the_stack_data/11377.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function float_to_u32_mul_1000000
_float_to_u32_mul_1000000: ## @float_to_u32_mul_1000000
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
testl %esi, %esi
js LBB0_3
## %bb.1:
movl $-1, %eax
cmpl $2139095040, %esi ## imm = 0x7F800000
jne LBB0_4
LBB0_2:
popq %rbp
retq
LBB0_3:
leaq L_.str(%rip), %rsi
jmp LBB0_6
LBB0_4:
movl %esi, %ecx
notl %ecx
testl $2139095040, %ecx ## imm = 0x7F800000
jne LBB0_7
## %bb.5:
leaq L_.str.1(%rip), %rsi
LBB0_6:
callq _dev_err
LBB0_10:
xorl %eax, %eax
popq %rbp
retq
LBB0_7:
testl %esi, %esi
je LBB0_10
## %bb.8:
cmpl $1333788672, %esi ## imm = 0x4F800000
ja LBB0_2
## %bb.9:
movl %esi, %edx
shrl $23, %edx
movl %esi, %eax
andl $8388607, %eax ## imm = 0x7FFFFF
orl $8388608, %eax ## imm = 0x800000
imull $1000000, %eax, %eax ## imm = 0xF4240
movb $127, %cl
subb %dl, %cl
movl %eax, %edi
sarl %cl, %edi
addb $-127, %dl
movl %edx, %ecx
shll %cl, %eax
cmpl $1065353216, %esi ## imm = 0x3F800000
cmovbl %edi, %eax
sarl $23, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "this is a negative number\n"
L_.str.1: ## @.str.1
.asciz "NaN or other special number\n"
.no_dead_strip _float_to_u32_mul_1000000
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function float_to_u32_mul_1000000
_float_to_u32_mul_1000000: ; @float_to_u32_mul_1000000
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
tbnz w1, #31, LBB0_3
; %bb.1:
mov w8, #2139095040
cmp w1, w8
b.ne LBB0_4
LBB0_2:
mov w0, #-1
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
LBB0_3:
Lloh0:
adrp x1, l_.str@PAGE
Lloh1:
add x1, x1, l_.str@PAGEOFF
b LBB0_6
LBB0_4:
mvn w8, w1
tst w8, #0x7f800000
b.ne LBB0_8
; %bb.5:
Lloh2:
adrp x1, l_.str.1@PAGE
Lloh3:
add x1, x1, l_.str.1@PAGEOFF
LBB0_6:
bl _dev_err
LBB0_7:
mov w0, #0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
LBB0_8:
cbz w1, LBB0_7
; %bb.9:
mov w8, #1333788672
cmp w1, w8
b.hi LBB0_2
; %bb.10:
lsr w8, w1, #23
mov w9, #8388608
bfxil w9, w1, #0, #23
mov w10, #16960
movk w10, #15, lsl #16
mul w9, w9, w10
mvn w10, w8
asr w10, w9, w10
subs w8, w8, #127
lsl w8, w9, w8
csel w8, w10, w8, lo
asr w0, w8, #23
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh2, Lloh3
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "this is a negative number\n"
l_.str.1: ; @.str.1
.asciz "NaN or other special number\n"
.no_dead_strip _float_to_u32_mul_1000000
.subsections_via_symbols
| AnghaBench/linux/drivers/media/i2c/smiapp/extr_smiapp-regs.c_float_to_u32_mul_1000000.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ocfs2_calc_group_alloc_credits
_ocfs2_calc_group_alloc_credits: ## @ocfs2_calc_group_alloc_credits
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _OCFS2_SUBALLOC_ALLOC@GOTPCREL(%rip), %rax
movl (%rax), %eax
addl $4, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _OCFS2_SUBALLOC_ALLOC,4,2 ## @OCFS2_SUBALLOC_ALLOC
.no_dead_strip _ocfs2_calc_group_alloc_credits
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ocfs2_calc_group_alloc_credits
_ocfs2_calc_group_alloc_credits: ; @ocfs2_calc_group_alloc_credits
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _OCFS2_SUBALLOC_ALLOC@GOTPAGE
Lloh1:
ldr x8, [x8, _OCFS2_SUBALLOC_ALLOC@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
add w0, w8, #4
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _OCFS2_SUBALLOC_ALLOC,4,2 ; @OCFS2_SUBALLOC_ALLOC
.no_dead_strip _ocfs2_calc_group_alloc_credits
.subsections_via_symbols
| AnghaBench/linux/fs/ocfs2/extr_journal.h_ocfs2_calc_group_alloc_credits.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
leaq L_.str(%rip), %rbx
movq %rbx, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $1, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $2, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $3, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $4, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $5, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $6, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $7, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $8, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $9, %esi
xorl %eax, %eax
callq _printf
leaq L_.str.1(%rip), %rbx
movq %rbx, %rdi
movl $9, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $10, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $1, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $12, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $2, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $15, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $3, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $19, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $4, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $24, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $5, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $30, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $6, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $37, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $7, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $45, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $8, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $54, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $9, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $64, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $10, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $75, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $11, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $87, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $12, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $100, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $13, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $114, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $14, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $129, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $15, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $145, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $16, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $162, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $17, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $180, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $18, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $199, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $19, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $219, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $20, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $240, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $21, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $262, %esi ## imm = 0x106
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $22, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $285, %esi ## imm = 0x11D
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $23, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $309, %esi ## imm = 0x135
xorl %eax, %eax
callq _printf
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Vez %d \n"
L_.str.1: ## @.str.1
.asciz "%d "
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x28, x27, [sp, #16] ; 16-byte Folded Spill
stp x26, x25, [sp, #32] ; 16-byte Folded Spill
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
str xzr, [sp]
Lloh0:
adrp x19, l_.str@PAGE
Lloh1:
add x19, x19, l_.str@PAGEOFF
mov x0, x19
bl _printf
mov w24, #1
str x24, [sp]
mov x0, x19
bl _printf
mov w22, #2
str x22, [sp]
mov x0, x19
bl _printf
mov w21, #3
str x21, [sp]
mov x0, x19
bl _printf
mov w20, #4
str x20, [sp]
mov x0, x19
bl _printf
mov w28, #5
str x28, [sp]
mov x0, x19
bl _printf
mov w27, #6
str x27, [sp]
mov x0, x19
bl _printf
mov w26, #7
str x26, [sp]
mov x0, x19
bl _printf
mov w8, #8
str x8, [sp]
mov x0, x19
bl _printf
mov w23, #9
str x23, [sp]
mov x0, x19
bl _printf
str x23, [sp]
Lloh2:
adrp x19, l_.str.1@PAGE
Lloh3:
add x19, x19, l_.str.1@PAGEOFF
mov x0, x19
bl _printf
str xzr, [sp]
mov x0, x19
bl _printf
mov w25, #10
str x25, [sp]
mov x0, x19
bl _printf
str x24, [sp]
mov x0, x19
bl _printf
mov w24, #12
str x24, [sp]
mov x0, x19
bl _printf
str x22, [sp]
mov x0, x19
bl _printf
mov w22, #15
str x22, [sp]
mov x0, x19
bl _printf
str x21, [sp]
mov x0, x19
bl _printf
mov w21, #19
str x21, [sp]
mov x0, x19
bl _printf
str x20, [sp]
mov x0, x19
bl _printf
mov w8, #24
str x8, [sp]
mov x0, x19
bl _printf
str x28, [sp]
mov x0, x19
bl _printf
mov w8, #30
str x8, [sp]
mov x0, x19
bl _printf
str x27, [sp]
mov x0, x19
bl _printf
mov w8, #37
str x8, [sp]
mov x0, x19
bl _printf
str x26, [sp]
mov x0, x19
bl _printf
mov w8, #45
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #8
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #54
str x8, [sp]
mov x0, x19
bl _printf
str x23, [sp]
mov x0, x19
bl _printf
mov w8, #64
str x8, [sp]
mov x0, x19
bl _printf
str x25, [sp]
mov x0, x19
bl _printf
mov w8, #75
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #11
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #87
str x8, [sp]
mov x0, x19
bl _printf
str x24, [sp]
mov x0, x19
bl _printf
mov w8, #100
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #13
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #114
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #14
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #129
str x8, [sp]
mov x0, x19
bl _printf
str x22, [sp]
mov x0, x19
bl _printf
mov w8, #145
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #16
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #162
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #17
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #180
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #18
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #199
str x8, [sp]
mov x0, x19
bl _printf
str x21, [sp]
mov x0, x19
bl _printf
mov w8, #219
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #20
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #240
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #21
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #262
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #22
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #285
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #23
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #309
str x8, [sp]
mov x0, x19
bl _printf
mov w0, #0
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
ldp x26, x25, [sp, #32] ; 16-byte Folded Reload
ldp x28, x27, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Vez %d \n"
l_.str.1: ; @.str.1
.asciz "%d "
.subsections_via_symbols
| the_stack_data/894511.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function apbuart_tx_empty
_apbuart_tx_empty: ## @apbuart_tx_empty
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
callq _UART_GET_STATUS
movq _UART_STATUS_THE@GOTPCREL(%rip), %rcx
andl (%rcx), %eax
je LBB0_2
## %bb.1:
movq _TIOCSER_TEMT@GOTPCREL(%rip), %rax
movl (%rax), %eax
LBB0_2:
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _UART_STATUS_THE,4,2 ## @UART_STATUS_THE
.comm _TIOCSER_TEMT,4,2 ## @TIOCSER_TEMT
.no_dead_strip _apbuart_tx_empty
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function apbuart_tx_empty
_apbuart_tx_empty: ; @apbuart_tx_empty
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
bl _UART_GET_STATUS
Lloh0:
adrp x8, _UART_STATUS_THE@GOTPAGE
Lloh1:
ldr x8, [x8, _UART_STATUS_THE@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
Lloh3:
adrp x9, _TIOCSER_TEMT@GOTPAGE
Lloh4:
ldr x9, [x9, _TIOCSER_TEMT@GOTPAGEOFF]
Lloh5:
ldr w9, [x9]
tst w8, w0
csel w0, wzr, w9, eq
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _UART_STATUS_THE,4,2 ; @UART_STATUS_THE
.comm _TIOCSER_TEMT,4,2 ; @TIOCSER_TEMT
.no_dead_strip _apbuart_tx_empty
.subsections_via_symbols
| AnghaBench/linux/drivers/tty/serial/extr_apbuart.c_apbuart_tx_empty.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function virt_efi_set_variable
_virt_efi_set_variable: ## @virt_efi_set_variable
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %r8, %r9
movq %rcx, %r8
movl %edx, %ecx
movq %rsi, %rdx
movq %rdi, %rsi
movq _set_variable@GOTPCREL(%rip), %rax
movl (%rax), %edi
popq %rbp
jmp _efi_call_virt5 ## TAILCALL
.cfi_endproc
## -- End function
.comm _set_variable,4,2 ## @set_variable
.no_dead_strip _virt_efi_set_variable
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function virt_efi_set_variable
_virt_efi_set_variable: ; @virt_efi_set_variable
.cfi_startproc
; %bb.0:
mov x5, x4
mov x4, x3
mov x3, x2
mov x2, x1
mov x1, x0
Lloh0:
adrp x8, _set_variable@GOTPAGE
Lloh1:
ldr x8, [x8, _set_variable@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
b _efi_call_virt5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _set_variable,4,2 ; @set_variable
.no_dead_strip _virt_efi_set_variable
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/arch/x86/kernel/extr_efi.c_virt_efi_set_variable.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ClearUniStr ## -- Begin function ClearUniStr
.p2align 4, 0x90
_ClearUniStr: ## @ClearUniStr
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq l_.str(%rip), %rdx
popq %rbp
jmp _UniStrCpy ## TAILCALL
.cfi_endproc
## -- End function
.section __TEXT,__const
.p2align 2 ## @.str
l_.str:
.space 4
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ClearUniStr ; -- Begin function ClearUniStr
.p2align 2
_ClearUniStr: ; @ClearUniStr
.cfi_startproc
; %bb.0:
Lloh0:
adrp x2, l_.str@PAGE
Lloh1:
add x2, x2, l_.str@PAGEOFF
b _UniStrCpy
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__const
.p2align 2 ; @.str
l_.str:
.space 4
.subsections_via_symbols
| AnghaBench/SoftEtherVPN/src/Mayaqua/extr_Internat.c_ClearUniStr.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function update_msg
_update_msg: ## @update_msg
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq _FANCY_SNAKE@GOTPCREL(%rip), %rax
cmpl $0, (%rax)
movq _message@GOTPCREL(%rip), %rax
movl (%rax), %ebx
je LBB0_1
## %bb.2:
movq _MSGBUF_LEN@GOTPCREL(%rip), %rax
movl (%rax), %eax
movl %eax, -48(%rbp) ## 4-byte Spill
movq _ostype@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -72(%rbp) ## 8-byte Spill
movq _osrelease@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -64(%rbp) ## 8-byte Spill
movq _averunnable@GOTPCREL(%rip), %r14
movq (%r14), %rax
movl (%rax), %edi
callq _LOAD_HIGH
movl %eax, -44(%rbp) ## 4-byte Spill
movq (%r14), %rax
movl (%rax), %edi
callq _LOAD_LOW
## kill: def $eax killed $eax def $rax
movq %rax, -56(%rbp) ## 8-byte Spill
movq (%r14), %rax
movl 4(%rax), %edi
callq _LOAD_HIGH
movl %eax, %r15d
movq (%r14), %rax
movl 4(%rax), %edi
callq _LOAD_LOW
movl %eax, %r12d
movq (%r14), %rax
movl 8(%rax), %edi
callq _LOAD_HIGH
movl %eax, %r13d
movq (%r14), %rax
movl 8(%rax), %edi
callq _LOAD_LOW
## kill: def $eax killed $eax def $rax
subq $8, %rsp
leaq L_.str.1(%rip), %rdx
movl %ebx, %edi
movl -48(%rbp), %esi ## 4-byte Reload
movq -72(%rbp), %rcx ## 8-byte Reload
movq -64(%rbp), %r8 ## 8-byte Reload
movl -44(%rbp), %r9d ## 4-byte Reload
pushq %rax
pushq %r13
pushq %r12
pushq %r15
pushq -56(%rbp) ## 8-byte Folded Reload
callq _snprintf
addq $48, %rsp
jmp LBB0_3
LBB0_1:
movq _ostype@GOTPCREL(%rip), %rax
movq (%rax), %rdx
movq _osrelease@GOTPCREL(%rip), %rax
movq (%rax), %rcx
leaq L_.str(%rip), %rsi
movl %ebx, %edi
callq _sprintf
LBB0_3:
movq _messagelen@GOTPCREL(%rip), %rcx
movl %eax, (%rcx)
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _FANCY_SNAKE,4,2 ## @FANCY_SNAKE
.comm _message,4,2 ## @message
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%s %s"
.comm _ostype,8,3 ## @ostype
.comm _osrelease,8,3 ## @osrelease
.comm _messagelen,4,2 ## @messagelen
.comm _MSGBUF_LEN,4,2 ## @MSGBUF_LEN
L_.str.1: ## @.str.1
.asciz "%s %s (%d.%02d %d.%02d, %d.%02d)"
.comm _averunnable,8,3 ## @averunnable
.no_dead_strip _update_msg
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function update_msg
_update_msg: ; @update_msg
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x28, x27, [sp, #16] ; 16-byte Folded Spill
stp x26, x25, [sp, #32] ; 16-byte Folded Spill
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
Lloh0:
adrp x8, _FANCY_SNAKE@GOTPAGE
Lloh1:
ldr x8, [x8, _FANCY_SNAKE@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
Lloh3:
adrp x9, _message@GOTPAGE
Lloh4:
ldr x9, [x9, _message@GOTPAGEOFF]
Lloh5:
ldr w19, [x9]
cbz w8, LBB0_2
; %bb.1:
Lloh6:
adrp x8, _MSGBUF_LEN@GOTPAGE
Lloh7:
ldr x8, [x8, _MSGBUF_LEN@GOTPAGEOFF]
Lloh8:
ldr w20, [x8]
Lloh9:
adrp x8, _ostype@GOTPAGE
Lloh10:
ldr x8, [x8, _ostype@GOTPAGEOFF]
Lloh11:
adrp x9, _osrelease@GOTPAGE
Lloh12:
ldr x9, [x9, _osrelease@GOTPAGEOFF]
Lloh13:
ldr x21, [x8]
Lloh14:
ldr x22, [x9]
Lloh15:
adrp x28, _averunnable@GOTPAGE
Lloh16:
ldr x28, [x28, _averunnable@GOTPAGEOFF]
ldr x8, [x28]
ldr w0, [x8]
bl _LOAD_HIGH
mov x23, x0
ldr x8, [x28]
ldr w0, [x8]
bl _LOAD_LOW
mov x24, x0
ldr x8, [x28]
ldr w0, [x8, #4]
bl _LOAD_HIGH
mov x25, x0
ldr x8, [x28]
ldr w0, [x8, #4]
bl _LOAD_LOW
mov x26, x0
ldr x8, [x28]
ldr w0, [x8, #8]
bl _LOAD_HIGH
mov x27, x0
ldr x8, [x28]
ldr w0, [x8, #8]
bl _LOAD_LOW
stp w27, w0, [sp, #4]
str w26, [sp]
Lloh17:
adrp x2, l_.str.1@PAGE
Lloh18:
add x2, x2, l_.str.1@PAGEOFF
mov x0, x19
mov x1, x20
mov x3, x21
mov x4, x22
mov x5, x23
mov x6, x24
mov x7, x25
bl _snprintf
b LBB0_3
LBB0_2:
Lloh19:
adrp x8, _ostype@GOTPAGE
Lloh20:
ldr x8, [x8, _ostype@GOTPAGEOFF]
Lloh21:
ldr x2, [x8]
Lloh22:
adrp x8, _osrelease@GOTPAGE
Lloh23:
ldr x8, [x8, _osrelease@GOTPAGEOFF]
Lloh24:
ldr x3, [x8]
Lloh25:
adrp x1, l_.str@PAGE
Lloh26:
add x1, x1, l_.str@PAGEOFF
mov x0, x19
bl _sprintf
LBB0_3:
Lloh27:
adrp x8, _messagelen@GOTPAGE
Lloh28:
ldr x8, [x8, _messagelen@GOTPAGEOFF]
Lloh29:
str w0, [x8]
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
ldp x26, x25, [sp, #32] ; 16-byte Folded Reload
ldp x28, x27, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh17, Lloh18
.loh AdrpLdrGot Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh14
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh13
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpAdd Lloh25, Lloh26
.loh AdrpLdrGotLdr Lloh22, Lloh23, Lloh24
.loh AdrpLdrGotLdr Lloh19, Lloh20, Lloh21
.loh AdrpLdrGotStr Lloh27, Lloh28, Lloh29
.cfi_endproc
; -- End function
.comm _FANCY_SNAKE,4,2 ; @FANCY_SNAKE
.comm _message,4,2 ; @message
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "%s %s"
.comm _ostype,8,3 ; @ostype
.comm _osrelease,8,3 ; @osrelease
.comm _messagelen,4,2 ; @messagelen
.comm _MSGBUF_LEN,4,2 ; @MSGBUF_LEN
l_.str.1: ; @.str.1
.asciz "%s %s (%d.%02d %d.%02d, %d.%02d)"
.comm _averunnable,8,3 ; @averunnable
.no_dead_strip _update_msg
.subsections_via_symbols
| AnghaBench/freebsd/sys/dev/syscons/snake/extr_snake_saver.c_update_msg.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.subsections_via_symbols
| the_stack_data/81856.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _uvc_mc_cleanup_entity ## -- Begin function uvc_mc_cleanup_entity
.p2align 4, 0x90
_uvc_mc_cleanup_entity: ## @uvc_mc_cleanup_entity
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
callq _UVC_ENTITY_TYPE
movq _UVC_TT_STREAMING@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_1
## %bb.3:
movq (%rbx), %rbx
testq %rbx, %rbx
jne LBB0_2
## %bb.4:
addq $8, %rsp
popq %rbx
popq %rbp
retq
LBB0_1:
addq $8, %rbx
LBB0_2:
movq %rbx, %rdi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _media_entity_cleanup ## TAILCALL
.cfi_endproc
## -- End function
.comm _UVC_TT_STREAMING,8,3 ## @UVC_TT_STREAMING
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _uvc_mc_cleanup_entity ; -- Begin function uvc_mc_cleanup_entity
.p2align 2
_uvc_mc_cleanup_entity: ; @uvc_mc_cleanup_entity
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
bl _UVC_ENTITY_TYPE
Lloh0:
adrp x8, _UVC_TT_STREAMING@GOTPAGE
Lloh1:
ldr x8, [x8, _UVC_TT_STREAMING@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
cmp x0, x8
b.ne LBB0_3
; %bb.1:
ldr x0, [x19]
cbnz x0, LBB0_4
; %bb.2:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB0_3:
add x0, x19, #8
LBB0_4:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _media_entity_cleanup
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _UVC_TT_STREAMING,8,3 ; @UVC_TT_STREAMING
.subsections_via_symbols
| AnghaBench/linux/drivers/media/usb/uvc/extr_uvc_entity.c_uvc_mc_cleanup_entity.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _tree_last ## -- Begin function tree_last
.p2align 4, 0x90
_tree_last: ## @tree_last
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
testl %edi, %edi
je LBB0_1
## %bb.2:
movl %edi, %eax
.p2align 4, 0x90
LBB0_3: ## =>This Inner Loop Header: Depth=1
movl %eax, %ebx
movl %eax, %edi
callq _TREE_CHAIN
testl %eax, %eax
jne LBB0_3
jmp LBB0_4
LBB0_1:
xorl %ebx, %ebx
LBB0_4:
movl %ebx, %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _tree_last ; -- Begin function tree_last
.p2align 2
_tree_last: ; @tree_last
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
cbz w0, LBB0_2
LBB0_1: ; =>This Inner Loop Header: Depth=1
mov x19, x0
bl _TREE_CHAIN
cbnz w0, LBB0_1
b LBB0_3
LBB0_2:
mov w19, #0
LBB0_3:
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/freebsd/contrib/gcc/extr_tree.c_tree_last.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.section __TEXT,__literal4,4byte_literals
.p2align 2 ## -- Begin function kalman_smoothen
LCPI0_0:
.long 0x3f200000 ## float 0.625
LCPI0_2:
.long 0x00000000 ## float 0
.section __TEXT,__literal8,8byte_literals
.p2align 3
LCPI0_1:
.quad 0x3fe3333333333333 ## double 0.59999999999999998
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_kalman_smoothen: ## @kalman_smoothen
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r12d
movq %rcx, -56(%rbp) ## 8-byte Spill
movq %rdx, %r15
movl %esi, %r13d
movq %rdi, %r14
movl 4(%rdi), %edi
leal -3(%r13), %esi
callq _FFMAX
shlq $2, %rax
movq %r15, %rbx
subq %rax, %rbx
movl (%r14), %edi
addl $3, %r13d
movl %r13d, %esi
callq _FFMIN
shlq $2, %rax
movq %r15, %r14
subq %rax, %r14
xorps %xmm0, %xmm0
xorl %r13d, %r13d
.p2align 4, 0x90
LBB0_1: ## =>This Inner Loop Header: Depth=1
movss %xmm0, -44(%rbp) ## 4-byte Spill
movq %r15, %rdi
movq %rbx, %rsi
movl %r12d, %edx
callq _avpriv_scalarproduct_float_c
movss -44(%rbp), %xmm1 ## 4-byte Reload
## xmm1 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm0
cmovaq %rbx, %r13
maxss %xmm1, %xmm0
addq $-4, %rbx
cmpq %r14, %rbx
jae LBB0_1
## %bb.2:
movl $-1, %ebx
xorps %xmm1, %xmm1
ucomiss %xmm0, %xmm1
jae LBB0_25
## %bb.3:
movss %xmm0, -44(%rbp) ## 4-byte Spill
movq %r13, %rdi
movq %r13, %rsi
movl %r12d, %edx
callq _avpriv_scalarproduct_float_c
xorps %xmm1, %xmm1
ucomiss %xmm0, %xmm1
jae LBB0_25
## %bb.4:
movss -44(%rbp), %xmm1 ## 4-byte Reload
## xmm1 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm0
jae LBB0_6
## %bb.5:
movss LCPI0_0(%rip), %xmm0 ## xmm0 = mem[0],zero,zero,zero
testl %r12d, %r12d
jg LBB0_9
LBB0_8:
xorl %ebx, %ebx
jmp LBB0_25
LBB0_6:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
mulsd LCPI0_1(%rip), %xmm1
addsd %xmm0, %xmm1
divsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
testl %r12d, %r12d
jle LBB0_8
LBB0_9:
movl %r12d, %eax
cmpl $8, %r12d
jae LBB0_11
## %bb.10:
xorl %ecx, %ecx
LBB0_20:
movq %rcx, %rdx
notq %rdx
testb $1, %al
jne LBB0_22
## %bb.21:
movq -56(%rbp), %rsi ## 8-byte Reload
jmp LBB0_23
LBB0_11:
movq -56(%rbp), %rsi ## 8-byte Reload
movq %rsi, %rdx
subq %r15, %rdx
xorl %ecx, %ecx
cmpq $32, %rdx
jb LBB0_20
## %bb.12:
movq %rsi, %rdx
subq %r13, %rdx
cmpq $32, %rdx
jb LBB0_20
## %bb.13:
movl %eax, %ecx
andl $-8, %ecx
movaps %xmm0, %xmm1
shufps $0, %xmm0, %xmm1 ## xmm1 = xmm1[0,0],xmm0[0,0]
leaq -8(%rcx), %rdx
movq %rdx, %rsi
shrq $3, %rsi
incq %rsi
testq %rdx, %rdx
je LBB0_14
## %bb.15:
movq %rsi, %rdi
andq $-2, %rdi
xorl %edx, %edx
movq -56(%rbp), %rbx ## 8-byte Reload
.p2align 4, 0x90
LBB0_16: ## =>This Inner Loop Header: Depth=1
movups (%r13,%rdx,4), %xmm2
movups 16(%r13,%rdx,4), %xmm3
movups (%r15,%rdx,4), %xmm4
movups 16(%r15,%rdx,4), %xmm5
subps %xmm2, %xmm4
subps %xmm3, %xmm5
mulps %xmm1, %xmm4
addps %xmm2, %xmm4
mulps %xmm1, %xmm5
addps %xmm3, %xmm5
movups %xmm4, (%rbx,%rdx,4)
movups %xmm5, 16(%rbx,%rdx,4)
movups 32(%r13,%rdx,4), %xmm2
movups 48(%r13,%rdx,4), %xmm3
movups 32(%r15,%rdx,4), %xmm4
movups 48(%r15,%rdx,4), %xmm5
subps %xmm2, %xmm4
subps %xmm3, %xmm5
mulps %xmm1, %xmm4
addps %xmm2, %xmm4
mulps %xmm1, %xmm5
addps %xmm3, %xmm5
movups %xmm4, 32(%rbx,%rdx,4)
movups %xmm5, 48(%rbx,%rdx,4)
addq $16, %rdx
addq $-2, %rdi
jne LBB0_16
jmp LBB0_17
LBB0_22:
movss (%r13,%rcx,4), %xmm1 ## xmm1 = mem[0],zero,zero,zero
movss (%r15,%rcx,4), %xmm2 ## xmm2 = mem[0],zero,zero,zero
subss %xmm1, %xmm2
mulss %xmm0, %xmm2
addss %xmm1, %xmm2
movq -56(%rbp), %rsi ## 8-byte Reload
movss %xmm2, (%rsi,%rcx,4)
orq $1, %rcx
LBB0_23:
xorl %ebx, %ebx
addq %rax, %rdx
je LBB0_25
.p2align 4, 0x90
LBB0_24: ## =>This Inner Loop Header: Depth=1
movss (%r13,%rcx,4), %xmm1 ## xmm1 = mem[0],zero,zero,zero
movss (%r15,%rcx,4), %xmm2 ## xmm2 = mem[0],zero,zero,zero
subss %xmm1, %xmm2
mulss %xmm0, %xmm2
addss %xmm1, %xmm2
movss %xmm2, (%rsi,%rcx,4)
movss 4(%r13,%rcx,4), %xmm1 ## xmm1 = mem[0],zero,zero,zero
movss 4(%r15,%rcx,4), %xmm2 ## xmm2 = mem[0],zero,zero,zero
subss %xmm1, %xmm2
mulss %xmm0, %xmm2
addss %xmm1, %xmm2
movss %xmm2, 4(%rsi,%rcx,4)
addq $2, %rcx
cmpq %rcx, %rax
jne LBB0_24
jmp LBB0_25
LBB0_14:
xorl %edx, %edx
movq -56(%rbp), %rbx ## 8-byte Reload
LBB0_17:
testb $1, %sil
je LBB0_19
## %bb.18:
movups (%r13,%rdx,4), %xmm2
movups 16(%r13,%rdx,4), %xmm3
movups (%r15,%rdx,4), %xmm4
movups 16(%r15,%rdx,4), %xmm5
subps %xmm2, %xmm4
subps %xmm3, %xmm5
mulps %xmm1, %xmm4
addps %xmm2, %xmm4
mulps %xmm1, %xmm5
addps %xmm3, %xmm5
movups %xmm4, (%rbx,%rdx,4)
movups %xmm5, 16(%rbx,%rdx,4)
LBB0_19:
xorl %ebx, %ebx
cmpq %rax, %rcx
jne LBB0_20
LBB0_25:
movl %ebx, %eax
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _kalman_smoothen
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function kalman_smoothen
_kalman_smoothen: ; @kalman_smoothen
.cfi_startproc
; %bb.0:
stp d9, d8, [sp, #-80]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset b8, -72
.cfi_offset b9, -80
mov x22, x4
mov x20, x3
mov x19, x2
mov x21, x1
mov x24, x0
ldr w0, [x0, #4]
sub w1, w1, #3
bl _FFMAX
sub x23, x19, x0, lsl #2
ldr w0, [x24]
add w1, w21, #3
bl _FFMIN
mov x21, #0
sub x24, x19, x0, lsl #2
movi d8, #0000000000000000
LBB0_1: ; =>This Inner Loop Header: Depth=1
mov x0, x19
mov x1, x23
mov x2, x22
bl _avpriv_scalarproduct_float_c
fcmp s0, s8
fcsel s8, s0, s8, gt
csel x21, x23, x21, gt
sub x23, x23, #4
cmp x23, x24
b.hs LBB0_1
; %bb.2:
fcmp s8, #0.0
b.ls LBB0_9
; %bb.3:
mov x0, x21
mov x1, x21
mov x2, x22
bl _avpriv_scalarproduct_float_c
fcmp s0, #0.0
b.ls LBB0_9
; %bb.4:
fmov s1, #0.62500000
fcmp s8, s0
b.hi LBB0_6
; %bb.5:
fcvt d0, s0
fcvt d1, s8
mov x8, #3689348814741910323
movk x8, #16355, lsl #48
fmov d2, x8
fmadd d1, d1, d2, d0
fdiv d0, d0, d1
fcvt s1, d0
LBB0_6:
cmp w22, #1
b.lt LBB0_17
; %bb.7:
mov w8, w22
cmp w22, #16
b.hs LBB0_10
; %bb.8:
mov x9, #0
b LBB0_15
LBB0_9:
mov w0, #-1
b LBB0_18
LBB0_10:
mov x9, #0
sub x10, x20, x19
cmp x10, #64
b.lo LBB0_15
; %bb.11:
sub x10, x20, x21
cmp x10, #64
b.lo LBB0_15
; %bb.12:
and x9, x8, #0xfffffff0
dup.4s v0, v1[0]
add x10, x20, #32
add x11, x21, #32
add x12, x19, #32
mov x13, x9
LBB0_13: ; =>This Inner Loop Header: Depth=1
ldp q2, q3, [x11, #-32]
ldp q4, q5, [x11], #64
ldp q6, q7, [x12, #-32]
ldp q16, q17, [x12], #64
fsub.4s v6, v6, v2
fsub.4s v7, v7, v3
fsub.4s v16, v16, v4
fsub.4s v17, v17, v5
fmla.4s v2, v6, v0
fmla.4s v3, v7, v0
fmla.4s v4, v16, v0
fmla.4s v5, v17, v0
stp q2, q3, [x10, #-32]
stp q4, q5, [x10], #64
subs x13, x13, #16
b.ne LBB0_13
; %bb.14:
cmp x9, x8
b.eq LBB0_17
LBB0_15:
lsl x12, x9, #2
add x10, x20, x12
add x11, x19, x12
add x12, x21, x12
sub x8, x8, x9
LBB0_16: ; =>This Inner Loop Header: Depth=1
ldr s0, [x12], #4
ldr s2, [x11], #4
fsub s2, s2, s0
fmadd s0, s1, s2, s0
str s0, [x10], #4
subs x8, x8, #1
b.ne LBB0_16
LBB0_17:
mov w0, #0
LBB0_18:
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
ldp d9, d8, [sp], #80 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _kalman_smoothen
.subsections_via_symbols
| AnghaBench/FFmpeg/libavcodec/extr_wmavoice.c_kalman_smoothen.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function file_g_read
_file_g_read: ## @file_g_read
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $56, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %ecx, %r12d
movl %edx, -68(%rbp) ## 4-byte Spill
movl %esi, %r13d
movq %rdi, %r15
movq %rsp, %rbx
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movq _LUAL_BUFFERSIZE@GOTPCREL(%rip), %rax
movl (%rax), %ecx
movl %ecx, %eax
shrl $31, %eax
addl %ecx, %eax
sarl %eax
movq %rax, -88(%rbp) ## 8-byte Spill
callq ____chkstk_darwin
addq $15, %rax
andq $-16, %rax
subq %rax, %rsp
movq %rsp, %r14
movl %r12d, -60(%rbp) ## 4-byte Spill
testl %r12d, %r12d
je LBB0_1
## %bb.2:
movq %rbx, -80(%rbp) ## 8-byte Spill
leaq -64(%rbp), %rsi
movq %r15, -96(%rbp) ## 8-byte Spill
movq %r15, %rdi
callq _luaL_buffinit
testl %r13d, %r13d
movq -88(%rbp), %rcx ## 8-byte Reload
jle LBB0_14
## %bb.3:
xorl %r15d, %r15d
movl $1, -56(%rbp) ## 4-byte Folded Spill
movl %r13d, -52(%rbp) ## 4-byte Spill
.p2align 4, 0x90
LBB0_4: ## =>This Loop Header: Depth=1
## Child Loop BB0_8 Depth 2
movl %r13d, %eax
subl %r15d, %eax
movslq %eax, %r12
cmpq %rcx, %r12
cmovaeq %rcx, %r12
movl -60(%rbp), %edi ## 4-byte Reload
movq %r14, %rsi
movl %r12d, %edx
callq _vfs_read
testl %eax, %eax
je LBB0_18
## %bb.5: ## in Loop: Header=BB0_4 Depth=1
movq _VFS_RES_ERR@GOTPCREL(%rip), %rcx
cmpl (%rcx), %eax
je LBB0_18
## %bb.6: ## in Loop: Header=BB0_4 Depth=1
movl %eax, %ebx
testl %ebx, %ebx
jle LBB0_12
## %bb.7: ## in Loop: Header=BB0_4 Depth=1
xorl %r13d, %r13d
.p2align 4, 0x90
LBB0_8: ## Parent Loop BB0_4 Depth=1
## => This Inner Loop Header: Depth=2
movsbl (%r14,%r13), %esi
leaq -64(%rbp), %rdi
callq _luaL_addchar
movl -68(%rbp), %eax ## 4-byte Reload
cmpb %al, (%r14,%r13)
je LBB0_9
## %bb.10: ## in Loop: Header=BB0_8 Depth=2
incq %r13
cmpq %r13, %rbx
jne LBB0_8
jmp LBB0_11
.p2align 4, 0x90
LBB0_9: ## in Loop: Header=BB0_4 Depth=1
movl -56(%rbp), %eax ## 4-byte Reload
subl %ebx, %eax
addl %eax, %r13d
movq _VFS_SEEK_CUR@GOTPCREL(%rip), %rax
movl (%rax), %edx
movl -60(%rbp), %edi ## 4-byte Reload
movl %r13d, %esi
callq _vfs_lseek
xorl %ebx, %ebx
LBB0_11: ## in Loop: Header=BB0_4 Depth=1
movl -52(%rbp), %r13d ## 4-byte Reload
LBB0_12: ## in Loop: Header=BB0_4 Depth=1
cmpl %r12d, %ebx
movq -88(%rbp), %rcx ## 8-byte Reload
jl LBB0_14
## %bb.13: ## in Loop: Header=BB0_4 Depth=1
addl %ecx, %r15d
addl %ecx, -56(%rbp) ## 4-byte Folded Spill
cmpl %r13d, %r15d
jl LBB0_4
LBB0_14:
leaq -64(%rbp), %rdi
callq _luaL_pushresult
jmp LBB0_15
LBB0_1:
leaq L_.str(%rip), %rsi
movq %r15, %rdi
callq _luaL_error
jmp LBB0_16
LBB0_18:
movq -96(%rbp), %rdi ## 8-byte Reload
callq _lua_pushnil
LBB0_15:
movl $1, %eax
movq -80(%rbp), %rbx ## 8-byte Reload
LBB0_16:
movq %rbx, %rsp
movq ___stack_chk_guard@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq -48(%rbp), %rcx
jne LBB0_19
## %bb.17:
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_19:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.comm _LUAL_BUFFERSIZE,4,2 ## @LUAL_BUFFERSIZE
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "open a file first"
.comm _VFS_RES_ERR,4,2 ## @VFS_RES_ERR
.comm _VFS_SEEK_CUR,4,2 ## @VFS_SEEK_CUR
.no_dead_strip _file_g_read
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function file_g_read
_file_g_read: ; @file_g_read
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #32
mov x20, x3
mov x21, x2
mov x22, x1
mov x19, sp
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-88]
Lloh3:
adrp x8, _LUAL_BUFFERSIZE@GOTPAGE
Lloh4:
ldr x8, [x8, _LUAL_BUFFERSIZE@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
cmp w8, #0
cinc w8, w8, lt
asr w23, w8, #1
add x8, x23, #15
and x8, x8, #0x1fffffff0
mov x9, x23
Lloh6:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh7:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
mov x9, sp
sub x24, x9, x8
mov sp, x24
cbz w3, LBB0_12
; %bb.1:
stp x0, x19, [x29, #-112] ; 16-byte Folded Spill
sub x1, x29, #92
bl _luaL_buffinit
cmp w22, #1
b.lt LBB0_11
; %bb.2:
mov w27, #0
mov w28, #1
LBB0_3: ; =>This Loop Header: Depth=1
; Child Loop BB0_6 Depth 2
sub w8, w22, w27
sxtw x8, w8
cmp x8, x23
csel x25, x8, x23, lo
mov x0, x20
mov x1, x24
mov x2, x25
bl _vfs_read
Lloh8:
adrp x8, _VFS_RES_ERR@GOTPAGE
Lloh9:
ldr x8, [x8, _VFS_RES_ERR@GOTPAGEOFF]
Lloh10:
ldr w8, [x8]
cmp w0, #0
ccmp w0, w8, #4, ne
b.eq LBB0_13
; %bb.4: ; in Loop: Header=BB0_3 Depth=1
mov w19, w0
cmp w19, #0
b.le LBB0_9
; %bb.5: ; in Loop: Header=BB0_3 Depth=1
mov x26, #0
LBB0_6: ; Parent Loop BB0_3 Depth=1
; => This Inner Loop Header: Depth=2
ldrsb w1, [x24, x26]
sub x0, x29, #92
bl _luaL_addchar
ldrb w8, [x24, x26]
cmp w8, w21, uxtb
b.eq LBB0_8
; %bb.7: ; in Loop: Header=BB0_6 Depth=2
add x26, x26, #1
cmp x19, x26
b.ne LBB0_6
b LBB0_9
LBB0_8: ; in Loop: Header=BB0_3 Depth=1
sub w8, w28, w19
add w1, w8, w26
Lloh11:
adrp x8, _VFS_SEEK_CUR@GOTPAGE
Lloh12:
ldr x8, [x8, _VFS_SEEK_CUR@GOTPAGEOFF]
Lloh13:
ldr w2, [x8]
mov x0, x20
bl _vfs_lseek
mov w19, #0
LBB0_9: ; in Loop: Header=BB0_3 Depth=1
cmp w19, w25
b.lt LBB0_11
; %bb.10: ; in Loop: Header=BB0_3 Depth=1
add w28, w28, w23
add w27, w27, w23
cmp w27, w22
b.lt LBB0_3
LBB0_11:
sub x0, x29, #92
bl _luaL_pushresult
b LBB0_14
LBB0_12:
Lloh14:
adrp x1, l_.str@PAGE
Lloh15:
add x1, x1, l_.str@PAGEOFF
bl _luaL_error
b LBB0_15
LBB0_13:
ldur x0, [x29, #-112] ; 8-byte Folded Reload
bl _lua_pushnil
LBB0_14:
mov w0, #1
ldur x19, [x29, #-104] ; 8-byte Folded Reload
LBB0_15:
mov sp, x19
ldur x8, [x29, #-88]
Lloh16:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh17:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh18:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_17
; %bb.16:
sub sp, x29, #80
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB0_17:
bl ___stack_chk_fail
.loh AdrpLdrGot Lloh6, Lloh7
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.loh AdrpAdd Lloh14, Lloh15
.loh AdrpLdrGotLdr Lloh16, Lloh17, Lloh18
.cfi_endproc
; -- End function
.comm _LUAL_BUFFERSIZE,4,2 ; @LUAL_BUFFERSIZE
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "open a file first"
.comm _VFS_RES_ERR,4,2 ; @VFS_RES_ERR
.comm _VFS_SEEK_CUR,4,2 ; @VFS_SEEK_CUR
.no_dead_strip _file_g_read
.subsections_via_symbols
| AnghaBench/nodemcu-firmware/app/modules/extr_file.c_file_g_read.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _decsubInit ## -- Begin function decsubInit
.p2align 4, 0x90
_decsubInit: ## @decsubInit
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %r14
movq %rdi, %rbx
movl $1, %edi
movl $24, %esi
callq _calloc
movq %rax, 8(%rbx)
movq %r14, 16(%rax)
movq $0, 8(%rax)
movq (%rbx), %rcx
movl (%rcx), %edx
movl %edx, (%rax)
testl %edx, %edx
je LBB0_18
## %bb.1:
movl $0, (%rax)
movq 8(%rcx), %rcx
cmpq $0, (%rcx)
jne LBB0_17
## %bb.2:
cmpq $0, 8(%rcx)
jne LBB0_17
## %bb.3:
cmpq $0, 16(%rcx)
jne LBB0_17
## %bb.4:
cmpq $0, 24(%rcx)
jne LBB0_17
## %bb.5:
cmpq $0, 32(%rcx)
jne LBB0_17
## %bb.6:
cmpq $0, 40(%rcx)
jne LBB0_17
## %bb.7:
cmpq $0, 48(%rcx)
jne LBB0_17
## %bb.8:
cmpq $0, 56(%rcx)
jne LBB0_17
## %bb.9:
cmpq $0, 64(%rcx)
jne LBB0_17
## %bb.10:
cmpq $0, 72(%rcx)
jne LBB0_17
## %bb.11:
cmpq $0, 80(%rcx)
jne LBB0_17
## %bb.12:
cmpq $0, 88(%rcx)
jne LBB0_17
## %bb.13:
cmpq $0, 96(%rcx)
jne LBB0_17
## %bb.14:
cmpq $0, 104(%rcx)
jne LBB0_17
## %bb.15:
cmpq $0, 112(%rcx)
jne LBB0_17
## %bb.16:
cmpq $0, 120(%rcx)
je LBB0_18
LBB0_17:
movl $1, (%rax)
jmp LBB0_19
LBB0_18:
leaq L_.str(%rip), %rdi
callq _hb_log
LBB0_19:
xorl %eax, %eax
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "decvobsub: input color palette is empty!"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _decsubInit ; -- Begin function decsubInit
.p2align 2
_decsubInit: ; @decsubInit
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
mov x20, x0
mov w0, #1
mov w1, #24
bl _calloc
str x0, [x20, #8]
stp xzr, x19, [x0, #8]
ldr x8, [x20]
ldr w9, [x8]
str w9, [x0]
cbz w9, LBB0_18
; %bb.1:
str wzr, [x0]
ldr x8, [x8, #8]
ldr x9, [x8]
cbnz x9, LBB0_17
; %bb.2:
ldr x9, [x8, #8]
cbnz x9, LBB0_17
; %bb.3:
ldr x9, [x8, #16]
cbnz x9, LBB0_17
; %bb.4:
ldr x9, [x8, #24]
cbnz x9, LBB0_17
; %bb.5:
ldr x9, [x8, #32]
cbnz x9, LBB0_17
; %bb.6:
ldr x9, [x8, #40]
cbnz x9, LBB0_17
; %bb.7:
ldr x9, [x8, #48]
cbnz x9, LBB0_17
; %bb.8:
ldr x9, [x8, #56]
cbnz x9, LBB0_17
; %bb.9:
ldr x9, [x8, #64]
cbnz x9, LBB0_17
; %bb.10:
ldr x9, [x8, #72]
cbnz x9, LBB0_17
; %bb.11:
ldr x9, [x8, #80]
cbnz x9, LBB0_17
; %bb.12:
ldr x9, [x8, #88]
cbnz x9, LBB0_17
; %bb.13:
ldr x9, [x8, #96]
cbnz x9, LBB0_17
; %bb.14:
ldr x9, [x8, #104]
cbnz x9, LBB0_17
; %bb.15:
ldr x9, [x8, #112]
cbnz x9, LBB0_17
; %bb.16:
ldr x8, [x8, #120]
cbz x8, LBB0_18
LBB0_17:
mov w8, #1
str w8, [x0]
b LBB0_19
LBB0_18:
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _hb_log
LBB0_19:
mov w0, #0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "decvobsub: input color palette is empty!"
.subsections_via_symbols
| AnghaBench/HandBrake/libhb/extr_decvobsub.c_decsubInit.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movl $5, %edi
callq _alarm
leaq L_.str(%rip), %rbx
movq %rbx, %rdi
movl %eax, %esi
xorl %eax, %eax
callq _printf
movl $2, %edi
callq _sleep
movl $2, %edi
callq _alarm
movq %rbx, %rdi
movl %eax, %esi
xorl %eax, %eax
callq _printf
.p2align 4, 0x90
LBB0_1: ## =>This Inner Loop Header: Depth=1
jmp LBB0_1
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "seconds = %d\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov w0, #5
bl _alarm
; kill: def $w0 killed $w0 def $x0
str x0, [sp]
Lloh0:
adrp x19, l_.str@PAGE
Lloh1:
add x19, x19, l_.str@PAGEOFF
mov x0, x19
bl _printf
mov w0, #2
bl _sleep
mov w0, #2
bl _alarm
; kill: def $w0 killed $w0 def $x0
str x0, [sp]
mov x0, x19
bl _printf
LBB0_1: ; =>This Inner Loop Header: Depth=1
b LBB0_1
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "seconds = %d\n"
.subsections_via_symbols
| the_stack_data/402480.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function get_sw_data
_get_sw_data: ## @get_sw_data
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq (%rsi), %rax
movslq %edi, %rcx
movl (%rax,%rcx,4), %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _get_sw_data
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function get_sw_data
_get_sw_data: ; @get_sw_data
.cfi_startproc
; %bb.0:
ldr x8, [x1]
ldr w0, [x8, w0, sxtw #2]
ret
.cfi_endproc
; -- End function
.no_dead_strip _get_sw_data
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/ti/extr_netcp_core.c_get_sw_data.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function Close
_Close: ## @Close
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq (%rdi), %rbx
leaq L_.str(%rip), %rsi
callq _var_Destroy
movq %rbx, %rdi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _free ## TAILCALL
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "rawdv-hurry-up"
.no_dead_strip _Close
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function Close
_Close: ; @Close
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
ldr x19, [x0]
Lloh0:
adrp x1, l_.str@PAGE
Lloh1:
add x1, x1, l_.str@PAGEOFF
bl _var_Destroy
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _free
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "rawdv-hurry-up"
.no_dead_strip _Close
.subsections_via_symbols
| AnghaBench/vlc/modules/demux/extr_rawdv.c_Close.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _zd_chip_clear ## -- Begin function zd_chip_clear
.p2align 4, 0x90
_zd_chip_clear: ## @zd_chip_clear
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
callq _mutex_is_locked
xorl %edi, %edi
testl %eax, %eax
sete %dil
callq _ZD_ASSERT
leaq 8(%rbx), %rdi
callq _zd_usb_clear
leaq 4(%rbx), %rdi
callq _zd_rf_clear
movq %rbx, %rdi
callq _mutex_destroy
movq %rbx, %rdi
movl $12, %esi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _ZD_MEMCLEAR ## TAILCALL
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _zd_chip_clear ; -- Begin function zd_chip_clear
.p2align 2
_zd_chip_clear: ; @zd_chip_clear
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
bl _mutex_is_locked
cmp w0, #0
cset w0, eq
bl _ZD_ASSERT
add x0, x19, #8
bl _zd_usb_clear
add x0, x19, #4
bl _zd_rf_clear
mov x0, x19
bl _mutex_destroy
mov x0, x19
mov w1, #12
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _ZD_MEMCLEAR
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/net/wireless/zd1211rw/extr_zd_chip.c_zd_chip_clear.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function thunder_pem_fdt_get_id
_thunder_pem_fdt_get_id: ## @thunder_pem_fdt_get_id
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r14
movl %esi, %ebx
movq _PCI_ID_MSI@GOTPCREL(%rip), %rax
cmpl %edx, (%rax)
jne LBB0_4
## %bb.1:
callq _ofw_bus_get_node
movl %eax, %r15d
movl %ebx, %edi
callq _pci_get_rid
leaq -32(%rbp), %rcx
movl %r15d, %edi
movl %eax, %esi
xorl %edx, %edx
callq _ofw_bus_msimap
testl %eax, %eax
jne LBB0_3
## %bb.2:
movq -32(%rbp), %rax
movq %rax, (%r14)
xorl %eax, %eax
LBB0_3:
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
LBB0_4:
movl %ebx, %esi
movq %r14, %rcx
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _pcib_get_id ## TAILCALL
.cfi_endproc
## -- End function
.comm _PCI_ID_MSI,4,2 ## @PCI_ID_MSI
.no_dead_strip _thunder_pem_fdt_get_id
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function thunder_pem_fdt_get_id
_thunder_pem_fdt_get_id: ; @thunder_pem_fdt_get_id
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x3
mov x20, x1
Lloh0:
adrp x8, _PCI_ID_MSI@GOTPAGE
Lloh1:
ldr x8, [x8, _PCI_ID_MSI@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
cmp w8, w2
b.ne LBB0_4
; %bb.1:
bl _ofw_bus_get_node
mov x21, x0
mov x0, x20
bl _pci_get_rid
mov x1, x0
add x3, sp, #8
mov x0, x21
mov x2, #0
bl _ofw_bus_msimap
cbnz w0, LBB0_3
; %bb.2:
ldr x8, [sp, #8]
str x8, [x19]
LBB0_3:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
LBB0_4:
mov x1, x20
mov x3, x19
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
b _pcib_get_id
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _PCI_ID_MSI,4,2 ; @PCI_ID_MSI
.no_dead_strip _thunder_pem_fdt_get_id
.subsections_via_symbols
| AnghaBench/freebsd/sys/arm64/cavium/extr_thunder_pcie_pem_fdt.c_thunder_pem_fdt_get_id.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function sctp_generate_autoclose_event
_sctp_generate_autoclose_event: ## @sctp_generate_autoclose_event
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq _timers@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq _SCTP_EVENT_TIMEOUT_AUTOCLOSE@GOTPCREL(%rip), %rbx
movq (%rbx), %rcx
movl (%rax,%rcx,4), %edx
movq %rdi, %rsi
callq _from_timer
movq (%rbx), %rsi
movq %rax, %rdi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _sctp_generate_timeout_event ## TAILCALL
.cfi_endproc
## -- End function
.comm _timers,8,3 ## @timers
.comm _SCTP_EVENT_TIMEOUT_AUTOCLOSE,8,3 ## @SCTP_EVENT_TIMEOUT_AUTOCLOSE
.comm _asoc,8,3 ## @asoc
.no_dead_strip _sctp_generate_autoclose_event
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function sctp_generate_autoclose_event
_sctp_generate_autoclose_event: ; @sctp_generate_autoclose_event
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x8, _timers@GOTPAGE
Lloh1:
ldr x8, [x8, _timers@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
Lloh3:
adrp x19, _SCTP_EVENT_TIMEOUT_AUTOCLOSE@GOTPAGE
Lloh4:
ldr x19, [x19, _SCTP_EVENT_TIMEOUT_AUTOCLOSE@GOTPAGEOFF]
ldr x9, [x19]
ldr w2, [x8, x9, lsl #2]
mov x1, x0
bl _from_timer
ldr x1, [x19]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _sctp_generate_timeout_event
.loh AdrpLdrGot Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _timers,8,3 ; @timers
.comm _SCTP_EVENT_TIMEOUT_AUTOCLOSE,8,3 ; @SCTP_EVENT_TIMEOUT_AUTOCLOSE
.comm _asoc,8,3 ; @asoc
.no_dead_strip _sctp_generate_autoclose_event
.subsections_via_symbols
| AnghaBench/linux/net/sctp/extr_sm_sideeffect.c_sctp_generate_autoclose_event.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rsi, %rbx
movl $1, %edi
callq _setuid
leaq 8(%rbx), %rsi
movq 8(%rbx), %rdi
callq _execv
movq (%rbx), %rdi
callq _perror
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x1
mov w0, #1
bl _setuid
mov x1, x19
ldr x0, [x1, #8]!
bl _execv
ldr x0, [x19]
bl _perror
mov w0, #1
bl _exit
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/243892612.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $32, %rsp
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -8(%rbp)
movaps L___const.main.x(%rip), %xmm0
movaps %xmm0, -32(%rbp)
movb $0, -16(%rbp)
leaq L_.str(%rip), %rdi
leaq -32(%rbp), %rsi
xorl %eax, %eax
callq _printf
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -8(%rbp), %rax
jne LBB0_2
## %bb.1:
addq $32, %rsp
popq %rbp
retq
LBB0_2:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
.p2align 4 ## @__const.main.x
L___const.main.x:
.asciz "Pontep Thaweesup"
L_.str: ## @.str
.asciz "%s"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-8]
Lloh3:
adrp x8, l___const.main.x@PAGE
Lloh4:
add x8, x8, l___const.main.x@PAGEOFF
Lloh5:
ldr q0, [x8]
str q0, [sp, #16]
strb wzr, [sp, #32]
add x8, sp, #16
str x8, [sp]
Lloh6:
adrp x0, l_.str@PAGE
Lloh7:
add x0, x0, l_.str@PAGEOFF
bl _printf
ldur x8, [x29, #-8]
Lloh8:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh9:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh10:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_2
; %bb.1:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
LBB0_2:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.loh AdrpAdd Lloh6, Lloh7
.loh AdrpAddLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l___const.main.x: ; @__const.main.x
.asciz "Pontep Thaweesup"
l_.str: ; @.str
.asciz "%s"
.subsections_via_symbols
| the_stack_data/73393.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $6, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
mov w0, #6
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/3261521.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function matroxfb_dh_restore
_matroxfb_dh_restore: ## @matroxfb_dh_restore
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %ecx, %r15d
movq %rsi, %r13
movq %rdi, %r14
movq 16(%rdi), %rsi
xorl %eax, %eax
cmpl $16, %edx
setne %al
shll $22, %eax
addl $4194305, %eax ## imm = 0x400001
cmpl $15, %edx
movl $2097153, %edx ## imm = 0x200001
cmovnel %eax, %edx
movq 8(%rsi), %rax
movq _MATROXFB_SRC_CRTC2@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq %rcx, 16(%rax)
movq %rsi, -56(%rbp) ## 8-byte Spill
jne LBB0_4
## %bb.1:
cmpq $0, 16(%rsi)
je LBB0_3
## %bb.2:
movq 24(%rax), %rsi
movq _MATROXFB_OUTPUT_MODE_MONITOR@GOTPCREL(%rip), %rdi
cmpq (%rdi), %rsi
movl $6, %edi
movl $-1073737722, %esi ## imm = 0xC0001006
cmovel %edi, %esi
orl %edx, %esi
jmp LBB0_5
LBB0_4:
movl %edx, %esi
orl $4, %esi
cmpq %rcx, (%rax)
cmovnel %edx, %esi
jmp LBB0_5
LBB0_3:
orl $-1073741822, %edx ## imm = 0xC0000002
movl %edx, %esi
LBB0_5:
movl %esi, %ebx
orl $1048576, %ebx ## imm = 0x100000
cmpq %rcx, (%rax)
cmovnel %esi, %ebx
cmpq $0, 40(%r13)
je LBB0_7
## %bb.6:
orl $33554432, %ebx ## imm = 0x2000000
movdqu (%r13), %xmm0
psrad $1, %xmm0
movdqu %xmm0, (%r13)
LBB0_7:
movl 16(%r13), %esi
movl %esi, %eax
andl $7, %eax
cmpl $2, %eax
jne LBB0_8
## %bb.9:
andl $-8, %esi
movl %esi, 16(%r13)
movl $16, -44(%rbp) ## 4-byte Folded Spill
jmp LBB0_10
LBB0_8:
movl $0, -44(%rbp) ## 4-byte Folded Spill
LBB0_10:
movl 20(%r13), %eax
shll $16, %eax
addl $-524288, %eax ## imm = 0xFFF80000
addl $-8, %esi
orl %eax, %esi
movl $15380, %edi ## imm = 0x3C14
callq _mga_outl
movl 24(%r13), %eax
movl 28(%r13), %esi
shll $16, %eax
addl $-524288, %eax ## imm = 0xFFF80000
addl $-8, %esi
orl %eax, %esi
movl $15384, %edi ## imm = 0x3C18
callq _mga_outl
movl (%r13), %eax
movl 12(%r13), %esi
shll $16, %eax
addl $-65536, %eax ## imm = 0xFFFF0000
decl %esi
orl %eax, %esi
movl $15388, %edi ## imm = 0x3C1C
callq _mga_outl
movl 4(%r13), %esi
movl 8(%r13), %eax
shll $16, %eax
addl $-65536, %eax ## imm = 0xFFFF0000
decl %esi
orl %eax, %esi
movl $15392, %edi ## imm = 0x3C20
callq _mga_outl
movl 4(%r13), %esi
shll $16, %esi
orl 28(%r13), %esi
movl $15396, %edi ## imm = 0x3C24
callq _mga_outl
movl 8(%r14), %r12d
sarl $3, %r12d
imull 4(%r14), %r12d
testl $33554432, %ebx ## imm = 0x2000000
jne LBB0_11
## %bb.12:
movl $15400, %edi ## imm = 0x3C28
movl %r15d, %esi
callq _mga_outl
xorl %eax, %eax
jmp LBB0_13
LBB0_11:
movl $15404, %edi ## imm = 0x3C2C
movl %r15d, %esi
callq _mga_outl
addl %r12d, %r15d
movl $15400, %edi ## imm = 0x3C28
movl %r15d, %esi
callq _mga_outl
addl %r12d, %r12d
movl $1, %eax
LBB0_13:
movl %ebx, %r15d
orl $268435456, %r15d ## imm = 0x10000000
movl %eax, (%r14)
movl $15424, %edi ## imm = 0x3C40
movl %r12d, %esi
callq _mga_outl
movl $15436, %edi ## imm = 0x3C4C
movl -44(%rbp), %esi ## 4-byte Reload
callq _mga_outl
testl $33554432, %ebx ## imm = 0x2000000
je LBB0_18
## %bb.14:
movl %r15d, %esi
andl $-33554433, %esi ## imm = 0xFDFFFFFF
movl $15376, %edi ## imm = 0x3C10
callq _mga_outl
xorl %ebx, %ebx
.p2align 4, 0x90
LBB0_15: ## =>This Inner Loop Header: Depth=1
movl $15432, %edi ## imm = 0x3C48
callq _mga_inl
andl $4095, %eax ## imm = 0xFFF
cmpl %ebx, %eax
movl %eax, %ebx
jae LBB0_15
## %bb.16:
xorl %ebx, %ebx
.p2align 4, 0x90
LBB0_17: ## =>This Inner Loop Header: Depth=1
movl $15432, %edi ## imm = 0x3C48
callq _mga_inl
andl $4095, %eax ## imm = 0xFFF
cmpl %ebx, %eax
movl %eax, %ebx
jae LBB0_17
LBB0_18:
movl $15376, %edi ## imm = 0x3C10
movl %r15d, %esi
callq _mga_outl
movq -56(%rbp), %rax ## 8-byte Reload
movl %r15d, (%rax)
movl (%r13), %eax
movl 32(%r13), %ecx
shll $16, %eax
movq _FB_SYNC_HOR_HIGH_ACT@GOTPCREL(%rip), %rdx
movl (%rdx), %edx
andl %ecx, %edx
leal 256(%rax), %edi
testl %edx, %edx
cmovel %eax, %edi
movq _FB_SYNC_VERT_HIGH_ACT@GOTPCREL(%rip), %rax
andl (%rax), %ecx
leal 512(%rdi), %esi
testl %ecx, %ecx
cmovel %edi, %esi
movl $15428, %edi ## imm = 0x3C44
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _mga_outl ## TAILCALL
.cfi_endproc
## -- End function
.comm _MATROXFB_SRC_CRTC2,8,3 ## @MATROXFB_SRC_CRTC2
.comm _MATROXFB_OUTPUT_MODE_MONITOR,8,3 ## @MATROXFB_OUTPUT_MODE_MONITOR
.comm _FB_SYNC_HOR_HIGH_ACT,4,2 ## @FB_SYNC_HOR_HIGH_ACT
.comm _FB_SYNC_VERT_HIGH_ACT,4,2 ## @FB_SYNC_VERT_HIGH_ACT
.no_dead_strip _matroxfb_dh_restore
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function matroxfb_dh_restore
_matroxfb_dh_restore: ; @matroxfb_dh_restore
.cfi_startproc
; %bb.0:
stp x26, x25, [sp, #-80]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x21, x3
mov x19, x1
mov x20, x0
ldr x25, [x0, #16]
mov w8, #1
movk w8, #32, lsl #16
add w9, w8, #512, lsl #12 ; =2097152
add w10, w8, #1536, lsl #12 ; =6291456
cmp w2, #16
csel w9, w9, w10, eq
cmp w2, #15
csel w10, w8, w9, eq
ldr x8, [x25, #8]
ldr x11, [x8, #16]
Lloh0:
adrp x9, _MATROXFB_SRC_CRTC2@GOTPAGE
Lloh1:
ldr x9, [x9, _MATROXFB_SRC_CRTC2@GOTPAGEOFF]
Lloh2:
ldr x9, [x9]
cmp x11, x9
b.ne LBB0_3
; %bb.1:
ldr x11, [x25, #16]
cbz x11, LBB0_4
; %bb.2:
ldr x11, [x8, #24]
Lloh3:
adrp x12, _MATROXFB_OUTPUT_MODE_MONITOR@GOTPAGE
Lloh4:
ldr x12, [x12, _MATROXFB_OUTPUT_MODE_MONITOR@GOTPAGEOFF]
Lloh5:
ldr x12, [x12]
cmp x11, x12
mov w11, #4102
movk w11, #49152, lsl #16
mov w12, #6
csel w11, w12, w11, eq
orr w10, w11, w10
b LBB0_5
LBB0_3:
ldr x11, [x8]
orr w12, w10, #0x4
cmp x11, x9
csel w10, w12, w10, eq
b LBB0_5
LBB0_4:
mov w11, #2
movk w11, #49152, lsl #16
orr w10, w10, w11
LBB0_5:
ldr x8, [x8]
orr w11, w10, #0x100000
cmp x8, x9
csel w26, w11, w10, eq
ldr x8, [x19, #40]
cbz x8, LBB0_7
; %bb.6:
orr w26, w26, #0x2000000
ldr q0, [x19]
sshr.4s v0, v0, #1
str q0, [x19]
LBB0_7:
ldr w8, [x19, #16]
and w9, w8, #0x7
cmp w9, #2
b.ne LBB0_9
; %bb.8:
and w8, w8, #0xfffffff8
str w8, [x19, #16]
mov w23, #16
b LBB0_10
LBB0_9:
mov w23, #0
LBB0_10:
orr w22, w26, #0x10000000
ldr w9, [x19, #20]
lsl w9, w9, #16
sub w9, w9, #128, lsl #12 ; =524288
sub w8, w8, #8
orr w1, w9, w8
mov w0, #15380
bl _mga_outl
ldp w8, w9, [x19, #24]
lsl w8, w8, #16
sub w8, w8, #128, lsl #12 ; =524288
sub w9, w9, #8
orr w1, w8, w9
mov w0, #15384
bl _mga_outl
ldr w8, [x19]
lsl w8, w8, #16
sub w8, w8, #16, lsl #12 ; =65536
ldr w9, [x19, #12]
sub w9, w9, #1
orr w1, w8, w9
mov w0, #15388
bl _mga_outl
ldp w9, w8, [x19, #4]
lsl w8, w8, #16
sub w8, w8, #16, lsl #12 ; =65536
sub w9, w9, #1
orr w1, w8, w9
mov w0, #15392
bl _mga_outl
ldr w8, [x19, #4]
ldr w9, [x19, #28]
orr w1, w9, w8, lsl #16
mov w0, #15396
bl _mga_outl
ldp w8, w9, [x20, #4]
asr w9, w9, #3
mul w24, w9, w8
tbnz w26, #25, LBB0_12
; %bb.11:
mov w0, #15400
mov x1, x21
bl _mga_outl
mov w8, #0
b LBB0_13
LBB0_12:
mov w0, #15404
mov x1, x21
bl _mga_outl
add w1, w24, w21
mov w0, #15400
bl _mga_outl
lsl w24, w24, #1
mov w8, #1
LBB0_13:
str w8, [x20]
mov w0, #15424
mov x1, x24
bl _mga_outl
mov w0, #15436
mov x1, x23
bl _mga_outl
tbz w26, #25, LBB0_18
; %bb.14:
and w1, w22, #0xfdffffff
mov w0, #15376
bl _mga_outl
mov w20, #0
LBB0_15: ; =>This Inner Loop Header: Depth=1
mov w0, #15432
bl _mga_inl
and w8, w0, #0xfff
cmp w8, w20
mov x20, x8
b.hs LBB0_15
; %bb.16:
mov w20, #0
LBB0_17: ; =>This Inner Loop Header: Depth=1
mov w0, #15432
bl _mga_inl
and w8, w0, #0xfff
cmp w8, w20
mov x20, x8
b.hs LBB0_17
LBB0_18:
mov w0, #15376
mov x1, x22
bl _mga_outl
str w22, [x25]
ldr w8, [x19]
lsl w9, w8, #16
ldr w10, [x19, #32]
Lloh6:
adrp x11, _FB_SYNC_HOR_HIGH_ACT@GOTPAGE
Lloh7:
ldr x11, [x11, _FB_SYNC_HOR_HIGH_ACT@GOTPAGEOFF]
Lloh8:
ldr w11, [x11]
mov w12, #256
bfi w12, w8, #16, #16
tst w11, w10
csel w8, w9, w12, eq
Lloh9:
adrp x9, _FB_SYNC_VERT_HIGH_ACT@GOTPAGE
Lloh10:
ldr x9, [x9, _FB_SYNC_VERT_HIGH_ACT@GOTPAGEOFF]
Lloh11:
ldr w9, [x9]
orr w11, w8, #0x200
tst w9, w10
csel w1, w8, w11, eq
mov w0, #15428
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
ldp x26, x25, [sp], #80 ; 16-byte Folded Reload
b _mga_outl
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.cfi_endproc
; -- End function
.comm _MATROXFB_SRC_CRTC2,8,3 ; @MATROXFB_SRC_CRTC2
.comm _MATROXFB_OUTPUT_MODE_MONITOR,8,3 ; @MATROXFB_OUTPUT_MODE_MONITOR
.comm _FB_SYNC_HOR_HIGH_ACT,4,2 ; @FB_SYNC_HOR_HIGH_ACT
.comm _FB_SYNC_VERT_HIGH_ACT,4,2 ; @FB_SYNC_VERT_HIGH_ACT
.no_dead_strip _matroxfb_dh_restore
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/video/matrox/extr_matroxfb_crtc2.c_matroxfb_dh_restore.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function dln2_spi_transfer_one
_dln2_spi_transfer_one: ## @dln2_spi_transfer_one
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %rbx
movq %rsi, %r15
movq %rdi, %r12
callq _spi_master_get_devdata
movq %rax, %r14
movl 16(%rbx), %edx
movl 20(%rbx), %esi
movl (%r15), %ecx
movq %rax, %rdi
callq _dln2_spi_transfer_setup
testl %eax, %eax
js LBB0_1
## %bb.2:
xorl %r8d, %r8d
cmpl $0, 12(%rbx)
jne LBB0_5
## %bb.3:
movq %r12, %rdi
movq %rbx, %rsi
callq _spi_transfer_is_last
xorl %r8d, %r8d
testl %eax, %eax
jne LBB0_5
## %bb.4:
movq _DLN2_SPI_ATTR_LEAVE_SS_LOW@GOTPCREL(%rip), %rax
movl (%rax), %r8d
LBB0_5:
movl 8(%rbx), %esi
movl (%rbx), %ecx
movl 4(%rbx), %edx
movq %r14, %rdi
callq _dln2_spi_rdwr
movl %eax, %r15d
testl %eax, %eax
jns LBB0_8
## %bb.6:
leaq L_.str.1(%rip), %rsi
jmp LBB0_7
LBB0_1:
movl %eax, %r15d
leaq L_.str(%rip), %rsi
LBB0_7:
movq (%r14), %rdi
callq _dev_err
LBB0_8:
movl %r15d, %eax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Cannot setup transfer\n"
.comm _DLN2_SPI_ATTR_LEAVE_SS_LOW,4,2 ## @DLN2_SPI_ATTR_LEAVE_SS_LOW
L_.str.1: ## @.str.1
.asciz "write/read failed!\n"
.no_dead_strip _dln2_spi_transfer_one
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function dln2_spi_transfer_one
_dln2_spi_transfer_one: ; @dln2_spi_transfer_one
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x2
mov x21, x1
mov x22, x0
bl _spi_master_get_devdata
mov x19, x0
ldp w2, w1, [x20, #16]
ldr w3, [x21]
bl _dln2_spi_transfer_setup
tbnz w0, #31, LBB0_3
; %bb.1:
ldr w8, [x20, #12]
cbz w8, LBB0_4
; %bb.2:
mov w4, #0
b LBB0_5
LBB0_3:
mov x21, x0
Lloh0:
adrp x1, l_.str@PAGE
Lloh1:
add x1, x1, l_.str@PAGEOFF
b LBB0_7
LBB0_4:
mov x0, x22
mov x1, x20
bl _spi_transfer_is_last
Lloh2:
adrp x8, _DLN2_SPI_ATTR_LEAVE_SS_LOW@GOTPAGE
Lloh3:
ldr x8, [x8, _DLN2_SPI_ATTR_LEAVE_SS_LOW@GOTPAGEOFF]
Lloh4:
ldr w8, [x8]
cmp w0, #0
csel w4, w8, wzr, eq
LBB0_5:
ldp w2, w1, [x20, #4]
ldr w3, [x20]
mov x0, x19
bl _dln2_spi_rdwr
mov x21, x0
tbz w0, #31, LBB0_8
; %bb.6:
Lloh5:
adrp x1, l_.str.1@PAGE
Lloh6:
add x1, x1, l_.str.1@PAGEOFF
LBB0_7:
ldr x0, [x19]
bl _dev_err
LBB0_8:
mov x0, x21
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpAdd Lloh5, Lloh6
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Cannot setup transfer\n"
.comm _DLN2_SPI_ATTR_LEAVE_SS_LOW,4,2 ; @DLN2_SPI_ATTR_LEAVE_SS_LOW
l_.str.1: ; @.str.1
.asciz "write/read failed!\n"
.no_dead_strip _dln2_spi_transfer_one
.subsections_via_symbols
| AnghaBench/linux/drivers/spi/extr_spi-dln2.c_dln2_spi_transfer_one.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -16(%rbp)
leaq L_.str(%rip), %rdi
movl $5, %esi
xorl %eax, %eax
callq _printf
leaq L_.str.1(%rip), %rbx
leaq -48(%rbp), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _scanf
leaq -44(%rbp), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _scanf
leaq -40(%rbp), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _scanf
leaq -36(%rbp), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _scanf
leaq -32(%rbp), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _scanf
leaq L_.str.2(%rip), %rdi
xorl %eax, %eax
callq _printf
movl -48(%rbp), %esi
leaq L_.str.3(%rip), %rbx
movq %rbx, %rdi
xorl %eax, %eax
callq _printf
movl -44(%rbp), %esi
movq %rbx, %rdi
xorl %eax, %eax
callq _printf
movl -40(%rbp), %esi
movq %rbx, %rdi
xorl %eax, %eax
callq _printf
movl -36(%rbp), %esi
movq %rbx, %rdi
xorl %eax, %eax
callq _printf
movl -32(%rbp), %esi
movq %rbx, %rdi
xorl %eax, %eax
callq _printf
movq -48(%rbp), %rax
movq -36(%rbp), %rcx
rolq $32, %rax
rolq $32, %rcx
movq %rcx, -48(%rbp)
movq %rax, -36(%rbp)
leaq L_.str.4(%rip), %rdi
xorl %eax, %eax
callq _printf
movl -48(%rbp), %esi
movq %rbx, %rdi
xorl %eax, %eax
callq _printf
movl -44(%rbp), %esi
movq %rbx, %rdi
xorl %eax, %eax
callq _printf
movl -40(%rbp), %esi
movq %rbx, %rdi
xorl %eax, %eax
callq _printf
movl -36(%rbp), %esi
movq %rbx, %rdi
xorl %eax, %eax
callq _printf
movl -32(%rbp), %esi
movq %rbx, %rdi
xorl %eax, %eax
callq _printf
movl $10, %edi
callq _putchar
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -16(%rbp), %rax
jne LBB0_2
## %bb.1:
addq $40, %rsp
popq %rbx
popq %rbp
retq
LBB0_2:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "enter array a (%d nums):\n"
L_.str.1: ## @.str.1
.asciz "%d"
L_.str.2: ## @.str.2
.asciz "The original order is:"
L_.str.3: ## @.str.3
.asciz "%5d"
L_.str.4: ## @.str.4
.asciz "\nThe inverted order is:"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-24]
mov w8, #5
str x8, [sp]
Lloh3:
adrp x0, l_.str@PAGE
Lloh4:
add x0, x0, l_.str@PAGEOFF
bl _printf
add x20, sp, #16
str x20, [sp]
Lloh5:
adrp x19, l_.str.1@PAGE
Lloh6:
add x19, x19, l_.str.1@PAGEOFF
mov x0, x19
bl _scanf
orr x8, x20, #0x4
str x8, [sp]
mov x0, x19
bl _scanf
add x8, x20, #8
str x8, [sp]
mov x0, x19
bl _scanf
add x8, x20, #12
str x8, [sp]
mov x0, x19
bl _scanf
add x8, x20, #16
str x8, [sp]
mov x0, x19
bl _scanf
Lloh7:
adrp x0, l_.str.2@PAGE
Lloh8:
add x0, x0, l_.str.2@PAGEOFF
bl _printf
ldr w8, [sp, #16]
str x8, [sp]
Lloh9:
adrp x19, l_.str.3@PAGE
Lloh10:
add x19, x19, l_.str.3@PAGEOFF
mov x0, x19
bl _printf
ldr w8, [sp, #20]
str x8, [sp]
mov x0, x19
bl _printf
ldr w8, [sp, #24]
str x8, [sp]
mov x0, x19
bl _printf
ldr w8, [sp, #28]
str x8, [sp]
mov x0, x19
bl _printf
ldr w8, [sp, #32]
str x8, [sp]
mov x0, x19
bl _printf
ldur d0, [sp, #28]
rev64.2s v0, v0
ldr d1, [sp, #16]
str d0, [sp, #16]
rev64.2s v0, v1
stur d0, [sp, #28]
Lloh11:
adrp x0, l_.str.4@PAGE
Lloh12:
add x0, x0, l_.str.4@PAGEOFF
bl _printf
ldr w8, [sp, #16]
str x8, [sp]
mov x0, x19
bl _printf
ldr w8, [sp, #20]
str x8, [sp]
mov x0, x19
bl _printf
ldr w8, [sp, #24]
str x8, [sp]
mov x0, x19
bl _printf
ldr w8, [sp, #28]
str x8, [sp]
mov x0, x19
bl _printf
ldr w8, [sp, #32]
str x8, [sp]
mov x0, x19
bl _printf
mov w0, #10
bl _putchar
ldur x8, [x29, #-24]
Lloh13:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh14:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh15:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_2
; %bb.1:
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #80
ret
LBB0_2:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh13, Lloh14, Lloh15
.loh AdrpAdd Lloh11, Lloh12
.loh AdrpAdd Lloh9, Lloh10
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpAdd Lloh5, Lloh6
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "enter array a (%d nums):\n"
l_.str.1: ; @.str.1
.asciz "%d"
l_.str.2: ; @.str.2
.asciz "The original order is:"
l_.str.3: ; @.str.3
.asciz "%5d"
l_.str.4: ; @.str.4
.asciz "\nThe inverted order is:"
.subsections_via_symbols
| the_stack_data/15763544.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function parse_options
_parse_options: ## @parse_options
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %rbx
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movq %rsi, -64(%rbp)
callq _ADFS_SB
xorl %r13d, %r13d
testq %rbx, %rbx
je LBB0_18
## %bb.1:
movq %rax, %r15
leaq L_.str(%rip), %rsi
leaq -64(%rbp), %rdi
callq _strsep
testq %rax, %rax
je LBB0_18
## %bb.2:
movq %rax, %r14
leaq LJTI0_0(%rip), %rbx
jmp LBB0_3
.p2align 4, 0x90
LBB0_14: ## in Loop: Header=BB0_3 Depth=1
movq %r12, %rsp
leaq -64(%rbp), %rdi
leaq L_.str(%rip), %rsi
callq _strsep
movq %rax, %r14
testq %rax, %rax
je LBB0_15
LBB0_3: ## =>This Inner Loop Header: Depth=1
movq %rsp, %r12
movq _MAX_OPT_ARGS@GOTPCREL(%rip), %rax
movl (%rax), %eax
shlq $2, %rax
callq ____chkstk_darwin
addq $15, %rax
andq $-16, %rax
subq %rax, %rsp
movq %rsp, %r13
cmpb $0, (%r14)
je LBB0_14
## %bb.4: ## in Loop: Header=BB0_3 Depth=1
movq _tokens@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
movq %r13, %rdx
callq _match_token
## kill: def $eax killed $eax def $rax
addl $-128, %eax
cmpl $3, %eax
ja LBB0_16
## %bb.5: ## in Loop: Header=BB0_3 Depth=1
movslq (%rbx,%rax,4), %rax
addq %rbx, %rax
jmpq *%rax
LBB0_6: ## in Loop: Header=BB0_3 Depth=1
movq %r13, %rdi
leaq -52(%rbp), %rsi
callq _match_int
testl %eax, %eax
jne LBB0_17
## %bb.7: ## in Loop: Header=BB0_3 Depth=1
movl -52(%rbp), %eax
movl %eax, (%r15)
jmp LBB0_14
LBB0_10: ## in Loop: Header=BB0_3 Depth=1
movq %r13, %rdi
leaq -52(%rbp), %rsi
callq _match_octal
testl %eax, %eax
jne LBB0_17
## %bb.11: ## in Loop: Header=BB0_3 Depth=1
movl -52(%rbp), %eax
movl %eax, 8(%r15)
jmp LBB0_14
LBB0_12: ## in Loop: Header=BB0_3 Depth=1
movq %r13, %rdi
leaq -52(%rbp), %rsi
callq _match_octal
testl %eax, %eax
jne LBB0_17
## %bb.13: ## in Loop: Header=BB0_3 Depth=1
movl -52(%rbp), %eax
movl %eax, 12(%r15)
jmp LBB0_14
LBB0_8: ## in Loop: Header=BB0_3 Depth=1
movq %r13, %rdi
leaq -52(%rbp), %rsi
callq _match_int
testl %eax, %eax
jne LBB0_17
## %bb.9: ## in Loop: Header=BB0_3 Depth=1
movl -52(%rbp), %eax
movl %eax, 4(%r15)
jmp LBB0_14
LBB0_15:
xorl %r13d, %r13d
jmp LBB0_18
LBB0_16:
leaq L_.str.1(%rip), %rdi
movq %r14, %rsi
callq _printk
LBB0_17:
movq _EINVAL@GOTPCREL(%rip), %rax
xorl %r13d, %r13d
subl (%rax), %r13d
movq %r12, %rsp
LBB0_18:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB0_20
## %bb.19:
movl %r13d, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_20:
callq ___stack_chk_fail
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L0_0_set_6, LBB0_6-LJTI0_0
.set L0_0_set_10, LBB0_10-LJTI0_0
.set L0_0_set_12, LBB0_12-LJTI0_0
.set L0_0_set_8, LBB0_8-LJTI0_0
LJTI0_0:
.long L0_0_set_6
.long L0_0_set_10
.long L0_0_set_12
.long L0_0_set_8
.end_data_region
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz ","
.comm _MAX_OPT_ARGS,4,2 ## @MAX_OPT_ARGS
.comm _tokens,4,2 ## @tokens
.comm _EINVAL,4,2 ## @EINVAL
L_.str.1: ## @.str.1
.asciz "ADFS-fs: unrecognised mount option \"%s\" or missing value\n"
.no_dead_strip _parse_options
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function parse_options
_parse_options: ; @parse_options
.cfi_startproc
; %bb.0:
stp x26, x25, [sp, #-80]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
sub sp, sp, #32
mov x20, x1
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-72]
stur x1, [x29, #-80]
bl _ADFS_SB
cbz x20, LBB0_15
; %bb.1:
mov x19, x0
Lloh3:
adrp x1, l_.str@PAGE
Lloh4:
add x1, x1, l_.str@PAGEOFF
sub x0, x29, #80
bl _strsep
cbz x0, LBB0_18
; %bb.2:
mov x21, x0
Lloh5:
adrp x23, _MAX_OPT_ARGS@GOTPAGE
Lloh6:
ldr x23, [x23, _MAX_OPT_ARGS@GOTPAGEOFF]
Lloh7:
adrp x20, l_.str@PAGE
Lloh8:
add x20, x20, l_.str@PAGEOFF
Lloh9:
adrp x24, _tokens@GOTPAGE
Lloh10:
ldr x24, [x24, _tokens@GOTPAGEOFF]
Lloh11:
adrp x25, lJTI0_0@PAGE
Lloh12:
add x25, x25, lJTI0_0@PAGEOFF
b LBB0_4
LBB0_3: ; in Loop: Header=BB0_4 Depth=1
mov sp, x26
sub x0, x29, #80
mov x1, x20
bl _strsep
mov x21, x0
cbz x0, LBB0_18
LBB0_4: ; =>This Inner Loop Header: Depth=1
mov x26, sp
ldr w8, [x23]
lsl x8, x8, #2
mov x9, x8
Lloh13:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh14:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
mov x9, sp
add x8, x8, #15
and x8, x8, #0x7fffffff0
sub x22, x9, x8
mov sp, x22
ldrb w8, [x21]
cbz w8, LBB0_3
; %bb.5: ; in Loop: Header=BB0_4 Depth=1
ldr w1, [x24]
mov x0, x21
mov x2, x22
bl _match_token
sub w8, w0, #128
cmp w8, #3
b.hi LBB0_16
; %bb.6: ; in Loop: Header=BB0_4 Depth=1
adr x9, LBB0_7
ldrb w10, [x25, x8]
add x9, x9, x10, lsl #2
br x9
LBB0_7: ; in Loop: Header=BB0_4 Depth=1
sub x1, x29, #84
mov x0, x22
bl _match_int
cbnz w0, LBB0_17
; %bb.8: ; in Loop: Header=BB0_4 Depth=1
ldur w8, [x29, #-84]
str w8, [x19]
b LBB0_3
LBB0_9: ; in Loop: Header=BB0_4 Depth=1
sub x1, x29, #84
mov x0, x22
bl _match_octal
cbnz w0, LBB0_17
; %bb.10: ; in Loop: Header=BB0_4 Depth=1
ldur w8, [x29, #-84]
str w8, [x19, #8]
b LBB0_3
LBB0_11: ; in Loop: Header=BB0_4 Depth=1
sub x1, x29, #84
mov x0, x22
bl _match_octal
cbnz w0, LBB0_17
; %bb.12: ; in Loop: Header=BB0_4 Depth=1
ldur w8, [x29, #-84]
str w8, [x19, #12]
b LBB0_3
LBB0_13: ; in Loop: Header=BB0_4 Depth=1
sub x1, x29, #84
mov x0, x22
bl _match_int
cbnz w0, LBB0_17
; %bb.14: ; in Loop: Header=BB0_4 Depth=1
ldur w8, [x29, #-84]
str w8, [x19, #4]
b LBB0_3
LBB0_15:
mov w0, #0
b LBB0_18
LBB0_16:
Lloh15:
adrp x0, l_.str.1@PAGE
Lloh16:
add x0, x0, l_.str.1@PAGEOFF
mov x1, x21
bl _printk
LBB0_17:
Lloh17:
adrp x8, _EINVAL@GOTPAGE
Lloh18:
ldr x8, [x8, _EINVAL@GOTPAGEOFF]
Lloh19:
ldr w8, [x8]
neg w0, w8
mov sp, x26
LBB0_18:
ldur x8, [x29, #-72]
Lloh20:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh21:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh22:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_20
; %bb.19:
sub sp, x29, #64
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
ldp x26, x25, [sp], #80 ; 16-byte Folded Reload
ret
LBB0_20:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpAdd Lloh11, Lloh12
.loh AdrpLdrGot Lloh9, Lloh10
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpLdrGot Lloh5, Lloh6
.loh AdrpLdrGot Lloh13, Lloh14
.loh AdrpAdd Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh17, Lloh18, Lloh19
.loh AdrpLdrGotLdr Lloh20, Lloh21, Lloh22
.cfi_endproc
.section __TEXT,__const
lJTI0_0:
.byte (LBB0_7-LBB0_7)>>2
.byte (LBB0_9-LBB0_7)>>2
.byte (LBB0_11-LBB0_7)>>2
.byte (LBB0_13-LBB0_7)>>2
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz ","
.comm _MAX_OPT_ARGS,4,2 ; @MAX_OPT_ARGS
.comm _tokens,4,2 ; @tokens
.comm _EINVAL,4,2 ; @EINVAL
l_.str.1: ; @.str.1
.asciz "ADFS-fs: unrecognised mount option \"%s\" or missing value\n"
.no_dead_strip _parse_options
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/fs/adfs/extr_super.c_parse_options.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function die_string
_die_string: ## @die_string
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r12d
movq %rcx, %r14
movl %esi, %r15d
movq %rdi, %rbx
movq $0, -40(%rbp)
leaq -40(%rbp), %rax
movl %esi, %edi
movl %edx, %esi
movq %rax, %rdx
movq %rbx, %rcx
callq _dwarf_attrval_string
movq _DW_DLV_OK@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_3
## %bb.1:
movq -40(%rbp), %rdi
testq %rdi, %rdi
je LBB0_3
## %bb.2:
callq _xstrdup
movq %rax, (%r14)
movl $1, %eax
jmp LBB0_7
LBB0_3:
testl %r12d, %r12d
je LBB0_5
## %bb.4:
movq %rbx, %rdi
movl %r15d, %esi
callq _die_off
movl %eax, %r14d
movl (%rbx), %edi
callq _dwarf_errmsg
leaq L_.str(%rip), %rdi
movl %r14d, %esi
movl %eax, %edx
callq _terminate
jmp LBB0_6
LBB0_5:
movq $0, (%r14)
LBB0_6:
xorl %eax, %eax
LBB0_7:
addq $16, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _DW_DLV_OK,8,3 ## @DW_DLV_OK
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "die %llu: failed to get string: %s\n"
.no_dead_strip _die_string
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function die_string
_die_string: ; @die_string
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x22, x4
mov x19, x3
mov x8, x2
mov x21, x1
mov x20, x0
str xzr, [sp, #8]
add x2, sp, #8
mov x0, x1
mov x1, x8
mov x3, x20
bl _dwarf_attrval_string
mov x8, x0
Lloh0:
adrp x9, _DW_DLV_OK@GOTPAGE
Lloh1:
ldr x9, [x9, _DW_DLV_OK@GOTPAGEOFF]
Lloh2:
ldr x9, [x9]
ldr x0, [sp, #8]
cmp x8, x9
ccmp x0, #0, #4, eq
b.ne LBB0_3
; %bb.1:
cbz w22, LBB0_4
; %bb.2:
mov x0, x20
mov x1, x21
bl _die_off
mov x19, x0
ldr w0, [x20]
bl _dwarf_errmsg
mov x2, x0
Lloh3:
adrp x0, l_.str@PAGE
Lloh4:
add x0, x0, l_.str@PAGEOFF
mov x1, x19
bl _terminate
mov w0, #0
b LBB0_5
LBB0_3:
bl _xstrdup
str x0, [x19]
mov w0, #1
b LBB0_5
LBB0_4:
mov w0, #0
str xzr, [x19]
LBB0_5:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh3, Lloh4
.cfi_endproc
; -- End function
.comm _DW_DLV_OK,8,3 ; @DW_DLV_OK
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "die %llu: failed to get string: %s\n"
.no_dead_strip _die_string
.subsections_via_symbols
| AnghaBench/freebsd/cddl/contrib/opensolaris/tools/ctf/cvt/extr_dwarf.c_die_string.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _test_submodule_init__cleanup ## -- Begin function test_submodule_init__cleanup
.p2align 4, 0x90
_test_submodule_init__cleanup: ## @test_submodule_init__cleanup
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
jmp _cl_git_sandbox_cleanup ## TAILCALL
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _test_submodule_init__cleanup ; -- Begin function test_submodule_init__cleanup
.p2align 2
_test_submodule_init__cleanup: ; @test_submodule_init__cleanup
.cfi_startproc
; %bb.0:
b _cl_git_sandbox_cleanup
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/libgit2/tests/submodule/extr_init.c_test_submodule_init__cleanup.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _sm_listen ## -- Begin function sm_listen
.p2align 4, 0x90
_sm_listen: ## @sm_listen
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $32, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edi, %r15d
movq _AF_INET@GOTPCREL(%rip), %r12
movq (%r12), %rdi
movq _SOCK_STREAM@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _IPPROTO_TCP@GOTPCREL(%rip), %rax
movl (%rax), %edx
callq _socket
testl %eax, %eax
js LBB0_7
## %bb.1:
movl %eax, %ebx
movq _F_GETFL@GOTPCREL(%rip), %rax
movl (%rax), %esi
movl %ebx, %edi
callq _fcntl
movl %eax, %r14d
movq (%r12), %rax
movq %rax, -48(%rbp)
movq _INADDR_ANY@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -56(%rbp)
movl %r15d, %edi
callq _htons
movq %rax, -64(%rbp)
movl $1, -40(%rbp)
movl $1, -36(%rbp)
movq _SOL_SOCKET@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _SO_REUSEADDR@GOTPCREL(%rip), %rax
movl (%rax), %edx
leaq -40(%rbp), %rcx
movl %ebx, %edi
movl $4, %r8d
callq _setsockopt
testq %rax, %rax
js LBB0_6
## %bb.2:
testl %r14d, %r14d
js LBB0_6
## %bb.3:
movq _FIONBIO@GOTPCREL(%rip), %rax
movl (%rax), %esi
leaq -36(%rbp), %rdx
movl %ebx, %edi
callq _ioctl
testq %rax, %rax
js LBB0_6
## %bb.4:
leaq -64(%rbp), %rsi
movl %ebx, %edi
movl $24, %edx
callq _bind
testq %rax, %rax
js LBB0_6
## %bb.5:
movl %ebx, %edi
movl $5, %esi
callq _listen
testq %rax, %rax
je LBB0_8
LBB0_6:
movl %ebx, %edi
callq _close
LBB0_7:
movl $-1, %ebx
LBB0_8:
movl %ebx, %eax
addq $32, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _AF_INET,8,3 ## @AF_INET
.comm _SOCK_STREAM,4,2 ## @SOCK_STREAM
.comm _IPPROTO_TCP,4,2 ## @IPPROTO_TCP
.comm _F_GETFL,4,2 ## @F_GETFL
.comm _INADDR_ANY,8,3 ## @INADDR_ANY
.comm _SOL_SOCKET,4,2 ## @SOL_SOCKET
.comm _SO_REUSEADDR,4,2 ## @SO_REUSEADDR
.comm _FIONBIO,4,2 ## @FIONBIO
.comm _INVALID_SOCKET,4,2 ## @INVALID_SOCKET
.comm _SOCKET_ERROR,8,3 ## @SOCKET_ERROR
.comm _stderr,4,2 ## @stderr
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _sm_listen ; -- Begin function sm_listen
.p2align 2
_sm_listen: ; @sm_listen
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x21, x0
Lloh0:
adrp x22, _AF_INET@GOTPAGE
Lloh1:
ldr x22, [x22, _AF_INET@GOTPAGEOFF]
ldr x0, [x22]
Lloh2:
adrp x8, _SOCK_STREAM@GOTPAGE
Lloh3:
ldr x8, [x8, _SOCK_STREAM@GOTPAGEOFF]
Lloh4:
ldr w1, [x8]
Lloh5:
adrp x8, _IPPROTO_TCP@GOTPAGE
Lloh6:
ldr x8, [x8, _IPPROTO_TCP@GOTPAGEOFF]
Lloh7:
ldr w2, [x8]
bl _socket
tbnz w0, #31, LBB0_7
; %bb.1:
mov x19, x0
Lloh8:
adrp x8, _F_GETFL@GOTPAGE
Lloh9:
ldr x8, [x8, _F_GETFL@GOTPAGEOFF]
Lloh10:
ldr w1, [x8]
bl _fcntl
mov x20, x0
ldr x8, [x22]
Lloh11:
adrp x9, _INADDR_ANY@GOTPAGE
Lloh12:
ldr x9, [x9, _INADDR_ANY@GOTPAGEOFF]
Lloh13:
ldr x9, [x9]
stp x9, x8, [sp, #16]
mov x0, x21
bl _htons
str x0, [sp, #8]
mov w8, #1
stp w8, w8, [sp]
Lloh14:
adrp x8, _SOL_SOCKET@GOTPAGE
Lloh15:
ldr x8, [x8, _SOL_SOCKET@GOTPAGEOFF]
Lloh16:
ldr w1, [x8]
Lloh17:
adrp x8, _SO_REUSEADDR@GOTPAGE
Lloh18:
ldr x8, [x8, _SO_REUSEADDR@GOTPAGEOFF]
Lloh19:
ldr w2, [x8]
add x3, sp, #4
mov x0, x19
mov w4, #4
bl _setsockopt
tbnz x0, #63, LBB0_6
; %bb.2:
tbnz w20, #31, LBB0_6
; %bb.3:
Lloh20:
adrp x8, _FIONBIO@GOTPAGE
Lloh21:
ldr x8, [x8, _FIONBIO@GOTPAGEOFF]
Lloh22:
ldr w1, [x8]
mov x2, sp
mov x0, x19
bl _ioctl
tbnz x0, #63, LBB0_6
; %bb.4:
add x1, sp, #8
mov x0, x19
mov w2, #24
bl _bind
tbnz x0, #63, LBB0_6
; %bb.5:
mov x0, x19
mov w1, #5
bl _listen
cbz x0, LBB0_8
LBB0_6:
mov x0, x19
bl _close
LBB0_7:
mov w19, #-1
LBB0_8:
mov x0, x19
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #80
ret
.loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh17, Lloh18, Lloh19
.loh AdrpLdrGotLdr Lloh14, Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.loh AdrpLdrGotLdr Lloh8, Lloh9, Lloh10
.loh AdrpLdrGotLdr Lloh20, Lloh21, Lloh22
.cfi_endproc
; -- End function
.comm _AF_INET,8,3 ; @AF_INET
.comm _SOCK_STREAM,4,2 ; @SOCK_STREAM
.comm _IPPROTO_TCP,4,2 ; @IPPROTO_TCP
.comm _F_GETFL,4,2 ; @F_GETFL
.comm _INADDR_ANY,8,3 ; @INADDR_ANY
.comm _SOL_SOCKET,4,2 ; @SOL_SOCKET
.comm _SO_REUSEADDR,4,2 ; @SO_REUSEADDR
.comm _FIONBIO,4,2 ; @FIONBIO
.comm _INVALID_SOCKET,4,2 ; @INVALID_SOCKET
.comm _SOCKET_ERROR,8,3 ; @SOCKET_ERROR
.comm _stderr,4,2 ; @stderr
.subsections_via_symbols
| AnghaBench/ios-webkit-debug-proxy/src/extr_socket_manager.c_sm_listen.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ar5212ModifyRfBuffer ## -- Begin function ar5212ModifyRfBuffer
.p2align 4, 0x90
_ar5212ModifyRfBuffer: ## @ar5212ModifyRfBuffer
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r14d
movl %ecx, %r12d
movl %edx, %r15d
movl %esi, %r13d
movq %rdi, %rbx
xorl %edi, %edi
cmpl $4, %r8d
setl %dil
callq _HALASSERT
xorl %edi, %edi
cmpl $33, %r15d
setl %dil
callq _HALASSERT
leal (%r12,%r15), %eax
xorl %edi, %edi
cmpl $320, %eax ## imm = 0x140
setl %dil
callq _HALASSERT
movl %r13d, %edi
movl %r15d, %esi
callq _ath_hal_reverseBits
testl %r15d, %r15d
jle LBB0_4
## %bb.1:
leal -1(%r12), %ecx
leal 6(%r12), %r9d
testl %ecx, %ecx
cmovnsl %ecx, %r9d
movl %r9d, %ecx
andl $-8, %ecx
negl %ecx
leal (%r12,%rcx), %r8d
decl %r8d
sarl $3, %r9d
shll $3, %r14d
leal (%r8,%r15), %esi
cmpl $8, %esi
movl $8, %ecx
cmovll %esi, %ecx
movl $-1, %edx
## kill: def $cl killed $cl killed $ecx
shll %cl, %edx
movl $-1, %edi
movl %r8d, %ecx
shll %cl, %edi
xorl %edx, %edi
movl %r14d, %ecx
shll %cl, %edi
movl %eax, %edx
movl %r8d, %ecx
shll %cl, %edx
movslq %r9d, %r9
movl %r14d, %ecx
shll %cl, %edx
movl $8, %ecx
movl (%rbx,%r9,4), %r10d
xorl %r10d, %edx
andl %edi, %edx
xorl %r10d, %edx
movl %edx, (%rbx,%r9,4)
subl %r8d, %ecx
cmpl %ecx, %r15d
jle LBB0_4
## %bb.2:
## kill: def $cl killed $cl killed $ecx
sarl %cl, %eax
leaq (%rbx,%r9,4), %rdx
addq $4, %rdx
.p2align 4, 0x90
LBB0_3: ## =>This Inner Loop Header: Depth=1
addl $-8, %esi
cmpl $8, %esi
movl $8, %ecx
cmovll %esi, %ecx
movl $-1, %edi
## kill: def $cl killed $cl killed $ecx
shll %cl, %edi
notl %edi
movl %r14d, %ecx
shll %cl, %edi
movl %eax, %ebx
shll %cl, %ebx
movl (%rdx), %ecx
xorl %ecx, %ebx
andl %edi, %ebx
xorl %ecx, %ebx
sarl $8, %eax
movl %ebx, (%rdx)
addq $4, %rdx
cmpl $8, %esi
ja LBB0_3
LBB0_4:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _MAX_ANALOG_START,4,2 ## @MAX_ANALOG_START
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _ar5212ModifyRfBuffer ; -- Begin function ar5212ModifyRfBuffer
.p2align 2
_ar5212ModifyRfBuffer: ; @ar5212ModifyRfBuffer
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x21, x4
mov x22, x3
mov x19, x2
mov x23, x1
mov x20, x0
cmp w4, #4
cset w0, lt
bl _HALASSERT
cmp w19, #33
cset w0, lt
bl _HALASSERT
add w8, w22, w19
cmp w8, #320
cset w0, lt
bl _HALASSERT
mov x0, x23
mov x1, x19
bl _ath_hal_reverseBits
cmp w19, #1
b.lt LBB0_4
; %bb.1:
sub w11, w22, #1
add w8, w11, #7
cmp w11, #0
csel w12, w8, w11, lt
asr w14, w12, #3
and w13, w12, #0xfffffff8
lsl w8, w21, #3
sub w15, w11, w13
add w10, w15, w19
mov w9, #8
cmp w10, #8
csel w16, w10, w9, lt
mov w10, #-1
lsl w16, w10, w16
lsl w17, w10, w15
eor w16, w16, w17
lsl w16, w16, w8
sbfiz x14, x14, #2, #32
ldr w17, [x20, x14]
bic w17, w17, w16
lsl w1, w0, w15
lsl w1, w1, w8
and w16, w16, w1
orr w16, w17, w16
str w16, [x20, x14]
sub w14, w9, w15
sub w15, w19, w14
cmp w15, #1
b.lt LBB0_4
; %bb.2:
sbfx x15, x12, #3, #29
asr w12, w0, w14
add x14, x20, x15, lsl #2
add x14, x14, #4
add w11, w11, w19
sub w11, w11, w13
LBB0_3: ; =>This Inner Loop Header: Depth=1
sub w11, w11, #8
cmp w11, #8
csel w13, w11, w9, lt
lsl w13, w10, w13
mvn w13, w13
lsl w13, w13, w8
ldr w15, [x14]
bic w15, w15, w13
lsl w16, w12, w8
and w13, w13, w16
orr w13, w15, w13
str w13, [x14], #4
asr w12, w12, #8
b.hi LBB0_3
LBB0_4:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.comm _MAX_ANALOG_START,4,2 ; @MAX_ANALOG_START
.subsections_via_symbols
| AnghaBench/freebsd/sys/dev/ath/ath_hal/ar5212/extr_ar5212_reset.c_ar5212ModifyRfBuffer.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function vnodeSingleMeterMultiOutputProcessor
_vnodeSingleMeterMultiOutputProcessor: ## @vnodeSingleMeterMultiOutputProcessor
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r15
leaq 24(%rdi), %r14
movq 8(%rdi), %r12
movq 16(%rdi), %r13
movq %r14, %rdi
callq _isTSCompQuery
testl %eax, %eax
jne LBB0_2
LBB0_1:
movq %r12, %rdi
callq _resetCtxOutputBuf
LBB0_2:
movq %r12, %rdi
callq _vnodeScanAllData
movq %r12, %rdi
callq _doFinalizeResult
movq %r14, %rdi
callq _isQueryKilled
testq %rax, %rax
jne LBB0_20
## %bb.3:
movq %r12, %rdi
callq _getNumOfResult
movq %rax, 24(%r15)
cmpq $0, 72(%r15)
jle LBB0_7
## %bb.4:
cmpq $0, 32(%r15)
jle LBB0_7
## %bb.5:
testq %rax, %rax
jle LBB0_7
## %bb.6:
movq %r12, %rdi
callq _doSkipResults
movq (%r14), %rax
LBB0_7:
testq %rax, %rax
jg LBB0_14
## %bb.8:
movl 64(%r15), %edi
movq _QUERY_NO_DATA_TO_CHECK@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _QUERY_COMPLETED@GOTPCREL(%rip), %rax
orl (%rax), %esi
callq _Q_STATUS_EQUAL
testq %rax, %rax
jne LBB0_14
## %bb.9:
movq %r12, %rdi
movq %r12, %rsi
callq _loadRequiredBlockIntoMem
movb $1, %cl
testq %rax, %rax
jg LBB0_13
## %bb.10:
js LBB0_12
## %bb.11:
xorl %ecx, %ecx
jmp LBB0_13
LBB0_12:
movl 64(%r15), %edi
movq _QUERY_NO_DATA_TO_CHECK@GOTPCREL(%rip), %rax
movl (%rax), %esi
callq _Q_STATUS_EQUAL
testq %rax, %rax
setne %cl
LBB0_13:
movzbl %cl, %edi
callq _assert
movl 8(%r13), %edx
movl (%r13), %r8d
movl 4(%r13), %ecx
movq 72(%r15), %r9
movl 56(%r15), %ebx
movl 60(%r15), %r10d
leaq L_.str(%rip), %rdi
movq %r15, %rsi
xorl %eax, %eax
pushq %rbx
pushq %r10
callq _dTrace
addq $16, %rsp
jmp LBB0_1
LBB0_14:
movq %r15, %rdi
callq _doRevisedResultsByLimit
movq %r12, %rdi
callq _moveDescOrderResultsToFront
movl 24(%r15), %eax
movl 64(%r15), %edi
addl %eax, 4(%r15)
movq _QUERY_RESBUF_FULL@GOTPCREL(%rip), %rax
movl (%rax), %esi
callq _Q_STATUS_EQUAL
testq %rax, %rax
je LBB0_19
## %bb.15:
movq %r12, %rdi
movq %r12, %rsi
callq _loadRequiredBlockIntoMem
movl $1, %edi
testq %rax, %rax
jg LBB0_18
## %bb.16:
xorl %edi, %edi
testq %rax, %rax
jns LBB0_18
## %bb.17:
movl 64(%r15), %edi
movq _QUERY_NO_DATA_TO_CHECK@GOTPCREL(%rip), %rax
movl (%rax), %esi
callq _Q_STATUS_EQUAL
xorl %edi, %edi
testq %rax, %rax
setne %dil
LBB0_18:
callq _assert
movl 8(%r13), %edx
movl (%r13), %r8d
movl 4(%r13), %ecx
movslq 60(%r15), %r9
movl 56(%r15), %ebx
subq $8, %rsp
leaq L_.str.1(%rip), %rdi
movq %r15, %rsi
xorl %eax, %eax
pushq %rbx
callq _dTrace
addq $16, %rsp
LBB0_19:
movl 8(%r13), %edx
movl (%r13), %r8d
movl 4(%r13), %ecx
movq 24(%r15), %r9
movl (%r15), %ebx
movl 4(%r15), %r10d
leaq L_.str.2(%rip), %rdi
movq %r15, %rsi
xorl %eax, %eax
pushq %rbx
pushq %r10
callq _dTrace
addq $16, %rsp
movq 48(%r15), %rax
movq %rax, 40(%r15)
movq %r14, %rdi
callq _isTSCompQuery
testl %eax, %eax
je LBB0_21
LBB0_20:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_21:
movq 24(%r15), %rax
xorl %edi, %edi
cmpq 48(%r15), %rax
setle %dil
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _assert ## TAILCALL
.cfi_endproc
## -- End function
.comm _QUERY_COMPLETED,4,2 ## @QUERY_COMPLETED
.comm _QUERY_NO_DATA_TO_CHECK,4,2 ## @QUERY_NO_DATA_TO_CHECK
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "QInfo:%p vid:%d sid:%d id:%s, skip current result, offset:%lld, next qrange:%lld-%lld"
.comm _QUERY_RESBUF_FULL,4,2 ## @QUERY_RESBUF_FULL
L_.str.1: ## @.str.1
.asciz "QInfo:%p vid:%d sid:%d id:%s, query abort due to buffer limitation, next qrange:%lld-%lld"
L_.str.2: ## @.str.2
.asciz "QInfo:%p vid:%d sid:%d id:%s, %d points returned, totalRead:%d totalReturn:%d"
.no_dead_strip _vnodeSingleMeterMultiOutputProcessor
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function vnodeSingleMeterMultiOutputProcessor
_vnodeSingleMeterMultiOutputProcessor: ; @vnodeSingleMeterMultiOutputProcessor
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x19, x0
add x20, x0, #24
ldp x21, x23, [x0, #8]
mov x0, x20
bl _isTSCompQuery
cbnz w0, LBB0_2
; %bb.1:
mov x0, x21
bl _resetCtxOutputBuf
LBB0_2:
mov x0, x21
bl _vnodeScanAllData
mov x0, x21
bl _doFinalizeResult
mov x0, x20
bl _isQueryKilled
cbnz x0, LBB0_23
; %bb.3:
Lloh0:
adrp x25, _QUERY_COMPLETED@GOTPAGE
Lloh1:
ldr x25, [x25, _QUERY_COMPLETED@GOTPAGEOFF]
Lloh2:
adrp x24, _QUERY_NO_DATA_TO_CHECK@GOTPAGE
Lloh3:
ldr x24, [x24, _QUERY_NO_DATA_TO_CHECK@GOTPAGEOFF]
Lloh4:
adrp x22, l_.str@PAGE
Lloh5:
add x22, x22, l_.str@PAGEOFF
b LBB0_6
LBB0_4: ; in Loop: Header=BB0_6 Depth=1
mov w0, #1
LBB0_5: ; in Loop: Header=BB0_6 Depth=1
bl _assert
ldp w3, w2, [x23, #4]
ldr w4, [x23]
ldr x5, [x19, #72]
ldp w8, w6, [x19, #56]
str x8, [sp]
mov x0, x22
mov x1, x19
bl _dTrace
mov x0, x21
bl _resetCtxOutputBuf
mov x0, x21
bl _vnodeScanAllData
mov x0, x21
bl _doFinalizeResult
mov x0, x20
bl _isQueryKilled
cbnz x0, LBB0_23
LBB0_6: ; =>This Inner Loop Header: Depth=1
mov x0, x21
bl _getNumOfResult
str x0, [x19, #24]
ldr x8, [x19, #72]
cmp x8, #1
b.lt LBB0_9
; %bb.7: ; in Loop: Header=BB0_6 Depth=1
ldr x8, [x19, #32]
cmp x8, #1
ccmp x0, #1, #8, ge
b.lt LBB0_9
; %bb.8: ; in Loop: Header=BB0_6 Depth=1
mov x0, x21
bl _doSkipResults
ldr x0, [x20]
LBB0_9: ; in Loop: Header=BB0_6 Depth=1
cmp x0, #0
b.gt LBB0_15
; %bb.10: ; in Loop: Header=BB0_6 Depth=1
ldr w0, [x19, #64]
ldr w8, [x25]
ldr w9, [x24]
orr w1, w9, w8
bl _Q_STATUS_EQUAL
cbnz x0, LBB0_15
; %bb.11: ; in Loop: Header=BB0_6 Depth=1
mov x0, x21
mov x1, x21
bl _loadRequiredBlockIntoMem
cmp x0, #0
b.gt LBB0_4
; %bb.12: ; in Loop: Header=BB0_6 Depth=1
tbnz x0, #63, LBB0_14
; %bb.13: ; in Loop: Header=BB0_6 Depth=1
mov w0, #0
b LBB0_5
LBB0_14: ; in Loop: Header=BB0_6 Depth=1
ldr w0, [x19, #64]
ldr w1, [x24]
bl _Q_STATUS_EQUAL
cmp x0, #0
cset w0, ne
b LBB0_5
LBB0_15:
mov x0, x19
bl _doRevisedResultsByLimit
mov x0, x21
bl _moveDescOrderResultsToFront
ldr w8, [x19, #24]
ldr w9, [x19, #4]
add w8, w9, w8
str w8, [x19, #4]
ldr w0, [x19, #64]
Lloh6:
adrp x8, _QUERY_RESBUF_FULL@GOTPAGE
Lloh7:
ldr x8, [x8, _QUERY_RESBUF_FULL@GOTPAGEOFF]
Lloh8:
ldr w1, [x8]
bl _Q_STATUS_EQUAL
cbz x0, LBB0_22
; %bb.16:
mov x0, x21
mov x1, x21
bl _loadRequiredBlockIntoMem
cmp x0, #0
b.le LBB0_18
; %bb.17:
mov w0, #1
b LBB0_21
LBB0_18:
tbnz x0, #63, LBB0_20
; %bb.19:
mov w0, #0
b LBB0_21
LBB0_20:
ldr w0, [x19, #64]
ldr w1, [x24]
bl _Q_STATUS_EQUAL
cmp x0, #0
cset w0, ne
LBB0_21:
bl _assert
ldp w3, w2, [x23, #4]
ldr w4, [x23]
ldp w6, w5, [x19, #56]
; kill: def $w5 killed $w5 def $x5
sxtw x5, w5
Lloh9:
adrp x0, l_.str.1@PAGE
Lloh10:
add x0, x0, l_.str.1@PAGEOFF
mov x1, x19
bl _dTrace
LBB0_22:
ldp w3, w2, [x23, #4]
ldr w4, [x23]
ldr x5, [x19, #24]
ldp w8, w6, [x19]
str x8, [sp]
Lloh11:
adrp x0, l_.str.2@PAGE
Lloh12:
add x0, x0, l_.str.2@PAGEOFF
mov x1, x19
bl _dTrace
ldr x8, [x19, #48]
str x8, [x19, #40]
mov x0, x20
bl _isTSCompQuery
cbz w0, LBB0_24
LBB0_23:
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #96
ret
LBB0_24:
ldr x8, [x19, #24]
ldr x9, [x19, #48]
cmp x8, x9
cset w0, le
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #96
b _assert
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpAdd Lloh9, Lloh10
.loh AdrpAdd Lloh11, Lloh12
.cfi_endproc
; -- End function
.comm _QUERY_COMPLETED,4,2 ; @QUERY_COMPLETED
.comm _QUERY_NO_DATA_TO_CHECK,4,2 ; @QUERY_NO_DATA_TO_CHECK
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "QInfo:%p vid:%d sid:%d id:%s, skip current result, offset:%lld, next qrange:%lld-%lld"
.comm _QUERY_RESBUF_FULL,4,2 ; @QUERY_RESBUF_FULL
l_.str.1: ; @.str.1
.asciz "QInfo:%p vid:%d sid:%d id:%s, query abort due to buffer limitation, next qrange:%lld-%lld"
l_.str.2: ; @.str.2
.asciz "QInfo:%p vid:%d sid:%d id:%s, %d points returned, totalRead:%d totalReturn:%d"
.no_dead_strip _vnodeSingleMeterMultiOutputProcessor
.subsections_via_symbols
| AnghaBench/TDengine/src/system/detail/src/extr_vnodeQueryProcess.c_vnodeSingleMeterMultiOutputProcessor.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function write_types
_write_types: ## @write_types
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
movq _header_file@GOTPCREL(%rip), %rax
movl (%rax), %edi
movl 4(%rdx), %edx
leaq L_.str(%rip), %rsi
xorl %eax, %eax
callq _oprintf
testq %r12, %r12
je LBB0_22
## %bb.1:
movq _GC_POINTED_TO@GOTPCREL(%rip), %rbx
jmp LBB0_5
LBB0_2: ## in Loop: Header=BB0_5 Depth=1
movq _stderr@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq 24(%r12), %rdx
leaq L_.str.9(%rip), %rsi
callq _fprintf
LBB0_3: ## in Loop: Header=BB0_5 Depth=1
movq %r13, %rbx
LBB0_4: ## in Loop: Header=BB0_5 Depth=1
movq 16(%r12), %r12
testq %r12, %r12
je LBB0_22
LBB0_5: ## =>This Loop Header: Depth=1
## Child Loop BB0_10 Depth 2
## Child Loop BB0_15 Depth 2
movq (%r12), %rax
movq _GC_MAYBE_POINTED_TO@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq (%rbx), %rax
je LBB0_7
## %bb.6: ## in Loop: Header=BB0_5 Depth=1
cmpq %rcx, %rax
jne LBB0_4
LBB0_7: ## in Loop: Header=BB0_5 Depth=1
cmpq %rcx, %rax
jne LBB0_9
## %bb.8: ## in Loop: Header=BB0_5 Depth=1
cmpq $0, 40(%r12)
je LBB0_4
LBB0_9: ## in Loop: Header=BB0_5 Depth=1
movq %rbx, %r13
movq _header_file@GOTPCREL(%rip), %rbx
movl (%rbx), %edi
movl (%r14), %edx
leaq L_.str.1(%rip), %rsi
xorl %eax, %eax
callq _oprintf
movl (%rbx), %edi
movq %r12, %rsi
callq _output_mangled_typename
movl (%rbx), %edi
leaq L_.str.2(%rip), %rsi
xorl %eax, %eax
callq _oprintf
movl (%rbx), %edi
movl (%r14), %edx
movq 24(%r12), %rcx
leaq L_.str.3(%rip), %rsi
xorl %eax, %eax
callq _oprintf
movl (%rbx), %edi
leaq L_.str.4(%rip), %rsi
xorl %eax, %eax
callq _oprintf
movq 48(%r12), %rbx
testq %rbx, %rbx
je LBB0_12
.p2align 4, 0x90
LBB0_10: ## Parent Loop BB0_5 Depth=1
## => This Inner Loop Header: Depth=2
movl 4(%rbx), %edi
leaq L_.str.5(%rip), %rsi
callq _strcmp
testq %rax, %rax
je LBB0_16
## %bb.11: ## in Loop: Header=BB0_10 Depth=2
movq 8(%rbx), %rbx
testq %rbx, %rbx
jne LBB0_10
LBB0_12: ## in Loop: Header=BB0_5 Depth=1
movq _header_file@GOTPCREL(%rip), %rax
movl (%rax), %edi
movl (%r14), %edx
movq 24(%r12), %rcx
leaq L_.str.8(%rip), %rsi
xorl %eax, %eax
callq _oprintf
cmpq $0, 40(%r12)
je LBB0_2
## %bb.13: ## in Loop: Header=BB0_5 Depth=1
movq 8(%r12), %rax
movq _TYPE_LANG_STRUCT@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
movq %r13, %rbx
jne LBB0_20
## %bb.14: ## in Loop: Header=BB0_5 Depth=1
movq 32(%r12), %r13
testq %r13, %r13
je LBB0_4
.p2align 4, 0x90
LBB0_15: ## Parent Loop BB0_5 Depth=1
## => This Inner Loop Header: Depth=2
movq %r12, %rdi
movq %r13, %rsi
xorl %edx, %edx
movq %r14, %rcx
callq _write_func_for_structure
movq 16(%r13), %r13
testq %r13, %r13
jne LBB0_15
jmp LBB0_4
LBB0_16: ## in Loop: Header=BB0_5 Depth=1
movslq (%rbx), %rax
movq 8(%rax), %rcx
movq _TYPE_STRUCT@GOTPCREL(%rip), %rdx
cmpq (%rdx), %rcx
je LBB0_21
## %bb.17: ## in Loop: Header=BB0_5 Depth=1
movq _TYPE_UNION@GOTPCREL(%rip), %rdx
cmpq (%rdx), %rcx
je LBB0_21
## %bb.18: ## in Loop: Header=BB0_5 Depth=1
movq _TYPE_LANG_STRUCT@GOTPCREL(%rip), %rdx
cmpq (%rdx), %rcx
je LBB0_21
## %bb.19: ## in Loop: Header=BB0_5 Depth=1
leaq 40(%r12), %rdi
leaq L_.str.7(%rip), %rsi
callq _error_at_line
jmp LBB0_3
LBB0_20: ## in Loop: Header=BB0_5 Depth=1
movq %r12, %rdi
movq %r12, %rsi
xorl %edx, %edx
movq %r14, %rcx
callq _write_func_for_structure
jmp LBB0_4
LBB0_21: ## in Loop: Header=BB0_5 Depth=1
movq _header_file@GOTPCREL(%rip), %rcx
movl (%rcx), %edi
movl (%r14), %edx
movq 24(%r12), %rcx
movq 24(%rax), %r9
leaq L_.str.6(%rip), %rsi
movl %edx, %r8d
xorl %eax, %eax
callq _oprintf
jmp LBB0_3
LBB0_22:
testq %r15, %r15
je LBB0_33
## %bb.23:
movq _GC_POINTED_TO@GOTPCREL(%rip), %r12
jmp LBB0_27
LBB0_24: ## in Loop: Header=BB0_27 Depth=1
movq _stderr@GOTPCREL(%rip), %rax
movl (%rax), %edi
movq 24(%r15), %rdx
leaq L_.str.9(%rip), %rsi
callq _fprintf
LBB0_25: ## in Loop: Header=BB0_27 Depth=1
movq _GC_POINTED_TO@GOTPCREL(%rip), %r12
LBB0_26: ## in Loop: Header=BB0_27 Depth=1
movq 16(%r15), %r15
testq %r15, %r15
je LBB0_33
LBB0_27: ## =>This Loop Header: Depth=1
## Child Loop BB0_31 Depth 2
movq (%r15), %rax
cmpq (%r12), %rax
jne LBB0_26
## %bb.28: ## in Loop: Header=BB0_27 Depth=1
movq 56(%r15), %r12
movq 64(%r15), %r13
movq _header_file@GOTPCREL(%rip), %rbx
movl (%rbx), %edi
movl (%r14), %edx
leaq L_.str.10(%rip), %rsi
xorl %eax, %eax
callq _oprintf
movl (%rbx), %edi
movq %r15, %rsi
callq _output_mangled_typename
movl (%rbx), %edi
leaq L_.str.11(%rip), %rsi
xorl %eax, %eax
callq _oprintf
cmpq $0, 40(%r12)
je LBB0_24
## %bb.29: ## in Loop: Header=BB0_27 Depth=1
movq 8(%r12), %rax
movq _TYPE_LANG_STRUCT@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_32
## %bb.30: ## in Loop: Header=BB0_27 Depth=1
movq 32(%r12), %rbx
testq %rbx, %rbx
movq _GC_POINTED_TO@GOTPCREL(%rip), %r12
je LBB0_26
.p2align 4, 0x90
LBB0_31: ## Parent Loop BB0_27 Depth=1
## => This Inner Loop Header: Depth=2
movq %r15, %rdi
movq %rbx, %rsi
movq %r13, %rdx
movq %r14, %rcx
callq _write_func_for_structure
movq 16(%rbx), %rbx
testq %rbx, %rbx
jne LBB0_31
jmp LBB0_26
LBB0_32: ## in Loop: Header=BB0_27 Depth=1
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
callq _write_func_for_structure
jmp LBB0_25
LBB0_33:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _header_file,4,2 ## @header_file
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "\n/* %s*/\n"
.comm _GC_POINTED_TO,8,3 ## @GC_POINTED_TO
.comm _GC_MAYBE_POINTED_TO,8,3 ## @GC_MAYBE_POINTED_TO
L_.str.1: ## @.str.1
.asciz "#define gt_%s_"
L_.str.2: ## @.str.2
.asciz "(X) do { \\\n"
L_.str.3: ## @.str.3
.asciz " if (X != NULL) gt_%sx_%s (X);\\\n"
L_.str.4: ## @.str.4
.asciz " } while (0)\n"
L_.str.5: ## @.str.5
.asciz "ptr_alias"
.comm _TYPE_STRUCT,8,3 ## @TYPE_STRUCT
.comm _TYPE_UNION,8,3 ## @TYPE_UNION
.comm _TYPE_LANG_STRUCT,8,3 ## @TYPE_LANG_STRUCT
L_.str.6: ## @.str.6
.asciz "#define gt_%sx_%s gt_%sx_%s\n"
L_.str.7: ## @.str.7
.asciz "structure alias is not a structure"
L_.str.8: ## @.str.8
.asciz "extern void gt_%sx_%s (void *);\n"
.comm _stderr,4,2 ## @stderr
L_.str.9: ## @.str.9
.asciz "warning: structure `%s' used but not defined\n"
L_.str.10: ## @.str.10
.asciz "extern void gt_%s_"
L_.str.11: ## @.str.11
.asciz " (void *);\n"
.no_dead_strip _write_types
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function write_types
_write_types: ; @write_types
.cfi_startproc
; %bb.0:
sub sp, sp, #128
.cfi_def_cfa_offset 128
stp x28, x27, [sp, #32] ; 16-byte Folded Spill
stp x26, x25, [sp, #48] ; 16-byte Folded Spill
stp x24, x23, [sp, #64] ; 16-byte Folded Spill
stp x22, x21, [sp, #80] ; 16-byte Folded Spill
stp x20, x19, [sp, #96] ; 16-byte Folded Spill
stp x29, x30, [sp, #112] ; 16-byte Folded Spill
add x29, sp, #112
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x19, x2
mov x20, x1
mov x21, x0
Lloh0:
adrp x26, _header_file@GOTPAGE
Lloh1:
ldr x26, [x26, _header_file@GOTPAGEOFF]
ldr w0, [x26]
ldr w8, [x2, #4]
str x8, [sp]
Lloh2:
adrp x1, l_.str@PAGE
Lloh3:
add x1, x1, l_.str@PAGEOFF
bl _oprintf
cbz x21, LBB0_18
; %bb.1:
Lloh4:
adrp x22, _GC_POINTED_TO@GOTPAGE
Lloh5:
ldr x22, [x22, _GC_POINTED_TO@GOTPAGEOFF]
Lloh6:
adrp x23, _GC_MAYBE_POINTED_TO@GOTPAGE
Lloh7:
ldr x23, [x23, _GC_MAYBE_POINTED_TO@GOTPAGEOFF]
Lloh8:
adrp x24, l_.str.3@PAGE
Lloh9:
add x24, x24, l_.str.3@PAGEOFF
Lloh10:
adrp x25, l_.str.4@PAGE
Lloh11:
add x25, x25, l_.str.4@PAGEOFF
Lloh12:
adrp x28, l_.str.5@PAGE
Lloh13:
add x28, x28, l_.str.5@PAGEOFF
b LBB0_4
LBB0_2: ; in Loop: Header=BB0_4 Depth=1
Lloh14:
adrp x8, _stderr@GOTPAGE
Lloh15:
ldr x8, [x8, _stderr@GOTPAGEOFF]
Lloh16:
ldr w0, [x8]
ldr x2, [x21, #24]
Lloh17:
adrp x1, l_.str.9@PAGE
Lloh18:
add x1, x1, l_.str.9@PAGEOFF
bl _fprintf
LBB0_3: ; in Loop: Header=BB0_4 Depth=1
ldr x21, [x21, #16]
cbz x21, LBB0_18
LBB0_4: ; =>This Loop Header: Depth=1
; Child Loop BB0_8 Depth 2
; Child Loop BB0_13 Depth 2
ldr x8, [x21]
ldr x10, [x22]
ldr x9, [x23]
cmp x8, x10
ccmp x8, x9, #4, ne
b.ne LBB0_3
; %bb.5: ; in Loop: Header=BB0_4 Depth=1
cmp x8, x9
b.ne LBB0_7
; %bb.6: ; in Loop: Header=BB0_4 Depth=1
ldr x8, [x21, #40]
cbz x8, LBB0_3
LBB0_7: ; in Loop: Header=BB0_4 Depth=1
ldr w0, [x26]
ldr w8, [x19]
str x8, [sp]
Lloh19:
adrp x1, l_.str.1@PAGE
Lloh20:
add x1, x1, l_.str.1@PAGEOFF
bl _oprintf
ldr w0, [x26]
mov x1, x21
bl _output_mangled_typename
ldr w0, [x26]
Lloh21:
adrp x1, l_.str.2@PAGE
Lloh22:
add x1, x1, l_.str.2@PAGEOFF
bl _oprintf
ldr w0, [x26]
ldr x8, [x21, #24]
ldr w9, [x19]
stp x9, x8, [sp]
mov x1, x24
bl _oprintf
ldr w0, [x26]
mov x1, x25
bl _oprintf
ldr x27, [x21, #48]
cbz x27, LBB0_10
LBB0_8: ; Parent Loop BB0_4 Depth=1
; => This Inner Loop Header: Depth=2
ldr w0, [x27, #4]
mov x1, x28
bl _strcmp
cbz x0, LBB0_14
; %bb.9: ; in Loop: Header=BB0_8 Depth=2
ldr x27, [x27, #8]
cbnz x27, LBB0_8
LBB0_10: ; in Loop: Header=BB0_4 Depth=1
ldr w0, [x26]
ldr x8, [x21, #24]
ldr w9, [x19]
stp x9, x8, [sp]
Lloh23:
adrp x1, l_.str.8@PAGE
Lloh24:
add x1, x1, l_.str.8@PAGEOFF
bl _oprintf
ldr x8, [x21, #40]
cbz x8, LBB0_2
; %bb.11: ; in Loop: Header=BB0_4 Depth=1
ldr x8, [x21, #8]
Lloh25:
adrp x9, _TYPE_LANG_STRUCT@GOTPAGE
Lloh26:
ldr x9, [x9, _TYPE_LANG_STRUCT@GOTPAGEOFF]
Lloh27:
ldr x9, [x9]
cmp x8, x9
b.ne LBB0_16
; %bb.12: ; in Loop: Header=BB0_4 Depth=1
ldr x27, [x21, #32]
cbz x27, LBB0_3
LBB0_13: ; Parent Loop BB0_4 Depth=1
; => This Inner Loop Header: Depth=2
mov x0, x21
mov x1, x27
mov x2, #0
mov x3, x19
bl _write_func_for_structure
ldr x27, [x27, #16]
cbnz x27, LBB0_13
b LBB0_3
LBB0_14: ; in Loop: Header=BB0_4 Depth=1
ldrsw x8, [x27]
ldr x9, [x8, #8]
Lloh28:
adrp x10, _TYPE_STRUCT@GOTPAGE
Lloh29:
ldr x10, [x10, _TYPE_STRUCT@GOTPAGEOFF]
Lloh30:
ldr x10, [x10]
Lloh31:
adrp x11, _TYPE_UNION@GOTPAGE
Lloh32:
ldr x11, [x11, _TYPE_UNION@GOTPAGEOFF]
Lloh33:
ldr x11, [x11]
Lloh34:
adrp x12, _TYPE_LANG_STRUCT@GOTPAGE
Lloh35:
ldr x12, [x12, _TYPE_LANG_STRUCT@GOTPAGEOFF]
Lloh36:
ldr x12, [x12]
cmp x9, x10
ccmp x9, x11, #4, ne
ccmp x9, x12, #4, ne
b.ne LBB0_17
; %bb.15: ; in Loop: Header=BB0_4 Depth=1
ldr w0, [x26]
ldr x9, [x21, #24]
ldr x8, [x8, #24]
ldr w10, [x19]
stp x10, x8, [sp, #16]
stp x10, x9, [sp]
Lloh37:
adrp x1, l_.str.6@PAGE
Lloh38:
add x1, x1, l_.str.6@PAGEOFF
bl _oprintf
b LBB0_3
LBB0_16: ; in Loop: Header=BB0_4 Depth=1
mov x0, x21
mov x1, x21
mov x2, #0
mov x3, x19
bl _write_func_for_structure
b LBB0_3
LBB0_17: ; in Loop: Header=BB0_4 Depth=1
add x0, x21, #40
Lloh39:
adrp x1, l_.str.7@PAGE
Lloh40:
add x1, x1, l_.str.7@PAGEOFF
bl _error_at_line
b LBB0_3
LBB0_18:
cbz x20, LBB0_28
; %bb.19:
Lloh41:
adrp x27, _GC_POINTED_TO@GOTPAGE
Lloh42:
ldr x27, [x27, _GC_POINTED_TO@GOTPAGEOFF]
Lloh43:
adrp x21, l_.str.10@PAGE
Lloh44:
add x21, x21, l_.str.10@PAGEOFF
Lloh45:
adrp x22, l_.str.11@PAGE
Lloh46:
add x22, x22, l_.str.11@PAGEOFF
Lloh47:
adrp x28, _TYPE_LANG_STRUCT@GOTPAGE
Lloh48:
ldr x28, [x28, _TYPE_LANG_STRUCT@GOTPAGEOFF]
Lloh49:
adrp x23, l_.str.9@PAGE
Lloh50:
add x23, x23, l_.str.9@PAGEOFF
b LBB0_22
LBB0_20: ; in Loop: Header=BB0_22 Depth=1
Lloh51:
adrp x8, _stderr@GOTPAGE
Lloh52:
ldr x8, [x8, _stderr@GOTPAGEOFF]
Lloh53:
ldr w0, [x8]
ldr x2, [x20, #24]
mov x1, x23
bl _fprintf
LBB0_21: ; in Loop: Header=BB0_22 Depth=1
ldr x20, [x20, #16]
cbz x20, LBB0_28
LBB0_22: ; =>This Loop Header: Depth=1
; Child Loop BB0_26 Depth 2
ldr x8, [x20]
ldr x9, [x27]
cmp x8, x9
b.ne LBB0_21
; %bb.23: ; in Loop: Header=BB0_22 Depth=1
ldp x25, x24, [x20, #56]
ldr w0, [x26]
ldr w8, [x19]
str x8, [sp]
mov x1, x21
bl _oprintf
ldr w0, [x26]
mov x1, x20
bl _output_mangled_typename
ldr w0, [x26]
mov x1, x22
bl _oprintf
ldr x8, [x25, #40]
cbz x8, LBB0_20
; %bb.24: ; in Loop: Header=BB0_22 Depth=1
ldr x8, [x25, #8]
ldr x9, [x28]
cmp x8, x9
b.ne LBB0_27
; %bb.25: ; in Loop: Header=BB0_22 Depth=1
ldr x25, [x25, #32]
cbz x25, LBB0_21
LBB0_26: ; Parent Loop BB0_22 Depth=1
; => This Inner Loop Header: Depth=2
mov x0, x20
mov x1, x25
mov x2, x24
mov x3, x19
bl _write_func_for_structure
ldr x25, [x25, #16]
cbnz x25, LBB0_26
b LBB0_21
LBB0_27: ; in Loop: Header=BB0_22 Depth=1
mov x0, x20
mov x1, x25
mov x2, x24
mov x3, x19
bl _write_func_for_structure
b LBB0_21
LBB0_28:
ldp x29, x30, [sp, #112] ; 16-byte Folded Reload
ldp x20, x19, [sp, #96] ; 16-byte Folded Reload
ldp x22, x21, [sp, #80] ; 16-byte Folded Reload
ldp x24, x23, [sp, #64] ; 16-byte Folded Reload
ldp x26, x25, [sp, #48] ; 16-byte Folded Reload
ldp x28, x27, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #128
ret
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpAdd Lloh12, Lloh13
.loh AdrpAdd Lloh10, Lloh11
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpLdrGot Lloh6, Lloh7
.loh AdrpLdrGot Lloh4, Lloh5
.loh AdrpAdd Lloh17, Lloh18
.loh AdrpLdrGotLdr Lloh14, Lloh15, Lloh16
.loh AdrpAdd Lloh21, Lloh22
.loh AdrpAdd Lloh19, Lloh20
.loh AdrpAdd Lloh23, Lloh24
.loh AdrpLdrGotLdr Lloh25, Lloh26, Lloh27
.loh AdrpLdrGotLdr Lloh34, Lloh35, Lloh36
.loh AdrpLdrGotLdr Lloh31, Lloh32, Lloh33
.loh AdrpLdrGotLdr Lloh28, Lloh29, Lloh30
.loh AdrpAdd Lloh37, Lloh38
.loh AdrpAdd Lloh39, Lloh40
.loh AdrpAdd Lloh49, Lloh50
.loh AdrpLdrGot Lloh47, Lloh48
.loh AdrpAdd Lloh45, Lloh46
.loh AdrpAdd Lloh43, Lloh44
.loh AdrpLdrGot Lloh41, Lloh42
.loh AdrpLdrGotLdr Lloh51, Lloh52, Lloh53
.cfi_endproc
; -- End function
.comm _header_file,4,2 ; @header_file
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "\n/* %s*/\n"
.comm _GC_POINTED_TO,8,3 ; @GC_POINTED_TO
.comm _GC_MAYBE_POINTED_TO,8,3 ; @GC_MAYBE_POINTED_TO
l_.str.1: ; @.str.1
.asciz "#define gt_%s_"
l_.str.2: ; @.str.2
.asciz "(X) do { \\\n"
l_.str.3: ; @.str.3
.asciz " if (X != NULL) gt_%sx_%s (X);\\\n"
l_.str.4: ; @.str.4
.asciz " } while (0)\n"
l_.str.5: ; @.str.5
.asciz "ptr_alias"
.comm _TYPE_STRUCT,8,3 ; @TYPE_STRUCT
.comm _TYPE_UNION,8,3 ; @TYPE_UNION
.comm _TYPE_LANG_STRUCT,8,3 ; @TYPE_LANG_STRUCT
l_.str.6: ; @.str.6
.asciz "#define gt_%sx_%s gt_%sx_%s\n"
l_.str.7: ; @.str.7
.asciz "structure alias is not a structure"
l_.str.8: ; @.str.8
.asciz "extern void gt_%sx_%s (void *);\n"
.comm _stderr,4,2 ; @stderr
l_.str.9: ; @.str.9
.asciz "warning: structure `%s' used but not defined\n"
l_.str.10: ; @.str.10
.asciz "extern void gt_%s_"
l_.str.11: ; @.str.11
.asciz " (void *);\n"
.no_dead_strip _write_types
.subsections_via_symbols
| AnghaBench/freebsd/contrib/gcc/extr_gengtype.c_write_types.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _HPDF_Binary_SetValue ## -- Begin function HPDF_Binary_SetValue
.p2align 4, 0x90
_HPDF_Binary_SetValue: ## @HPDF_Binary_SetValue
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
movq _HPDF_LIMIT_MAX_STRING_LEN@GOTPCREL(%rip), %rax
cmpq %rdx, (%rax)
jge LBB0_1
## %bb.5:
movl 16(%rbx), %edi
movq _HPDF_BINARY_LENGTH_ERR@GOTPCREL(%rip), %rax
movl (%rax), %esi
xorl %edx, %edx
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _HPDF_SetError ## TAILCALL
LBB0_1:
movq %rdx, %r14
movq %rsi, %r15
movq 8(%rbx), %rsi
testq %rsi, %rsi
je LBB0_3
## %bb.2:
movl 20(%rbx), %edi
callq _HPDF_FreeMem
movq $0, (%rbx)
LBB0_3:
movl 20(%rbx), %edi
movq %r14, %rsi
callq _HPDF_GetMem
movq %rax, 8(%rbx)
testq %rax, %rax
je LBB0_6
## %bb.4:
movq %rax, %rdi
movq %r15, %rsi
movq %r14, %rdx
callq _HPDF_MemCpy
movq %r14, (%rbx)
movq _HPDF_OK@GOTPCREL(%rip), %rax
movl (%rax), %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
LBB0_6:
movl 16(%rbx), %edi
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _HPDF_Error_GetCode ## TAILCALL
.cfi_endproc
## -- End function
.comm _HPDF_LIMIT_MAX_STRING_LEN,8,3 ## @HPDF_LIMIT_MAX_STRING_LEN
.comm _HPDF_BINARY_LENGTH_ERR,4,2 ## @HPDF_BINARY_LENGTH_ERR
.comm _HPDF_OK,4,2 ## @HPDF_OK
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _HPDF_Binary_SetValue ; -- Begin function HPDF_Binary_SetValue
.p2align 2
_HPDF_Binary_SetValue: ; @HPDF_Binary_SetValue
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
Lloh0:
adrp x8, _HPDF_LIMIT_MAX_STRING_LEN@GOTPAGE
Lloh1:
ldr x8, [x8, _HPDF_LIMIT_MAX_STRING_LEN@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
cmp x8, x2
b.ge LBB0_2
; %bb.1:
ldr w0, [x19, #16]
Lloh3:
adrp x8, _HPDF_BINARY_LENGTH_ERR@GOTPAGE
Lloh4:
ldr x8, [x8, _HPDF_BINARY_LENGTH_ERR@GOTPAGEOFF]
Lloh5:
ldr w1, [x8]
mov w2, #0
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _HPDF_SetError
LBB0_2:
mov x20, x2
mov x21, x1
ldr x1, [x19, #8]
cbz x1, LBB0_4
; %bb.3:
ldr w0, [x19, #20]
bl _HPDF_FreeMem
str xzr, [x19]
LBB0_4:
ldr w0, [x19, #20]
mov x1, x20
bl _HPDF_GetMem
str x0, [x19, #8]
cbz x0, LBB0_6
; %bb.5:
mov x1, x21
mov x2, x20
bl _HPDF_MemCpy
str x20, [x19]
Lloh6:
adrp x8, _HPDF_OK@GOTPAGE
Lloh7:
ldr x8, [x8, _HPDF_OK@GOTPAGEOFF]
Lloh8:
ldr w0, [x8]
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
LBB0_6:
ldr w0, [x19, #16]
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _HPDF_Error_GetCode
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.cfi_endproc
; -- End function
.comm _HPDF_LIMIT_MAX_STRING_LEN,8,3 ; @HPDF_LIMIT_MAX_STRING_LEN
.comm _HPDF_BINARY_LENGTH_ERR,4,2 ; @HPDF_BINARY_LENGTH_ERR
.comm _HPDF_OK,4,2 ; @HPDF_OK
.subsections_via_symbols
| AnghaBench/poco/PDF/src/extr_hpdf_binary.c_HPDF_Binary_SetValue.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function crypto_shash_show
_crypto_shash_show: ## @crypto_shash_show
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
callq ___crypto_shash_alg
movq %rax, %r15
leaq L_.str(%rip), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _seq_printf
movl (%r14), %edx
leaq L_.str.1(%rip), %rsi
movq %rbx, %rdi
xorl %eax, %eax
callq _seq_printf
movl (%r15), %edx
leaq L_.str.2(%rip), %rsi
movq %rbx, %rdi
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _seq_printf ## TAILCALL
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "type : shash\n"
L_.str.1: ## @.str.1
.asciz "blocksize : %u\n"
L_.str.2: ## @.str.2
.asciz "digestsize : %u\n"
.no_dead_strip _crypto_shash_show
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function crypto_shash_show
_crypto_shash_show: ; @crypto_shash_show
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x1
mov x20, x0
mov x0, x1
bl ___crypto_shash_alg
mov x21, x0
Lloh0:
adrp x1, l_.str@PAGE
Lloh1:
add x1, x1, l_.str@PAGEOFF
mov x0, x20
bl _seq_printf
ldr w8, [x19]
str x8, [sp]
Lloh2:
adrp x1, l_.str.1@PAGE
Lloh3:
add x1, x1, l_.str.1@PAGEOFF
mov x0, x20
bl _seq_printf
ldr w8, [x21]
str x8, [sp]
Lloh4:
adrp x1, l_.str.2@PAGE
Lloh5:
add x1, x1, l_.str.2@PAGEOFF
mov x0, x20
bl _seq_printf
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "type : shash\n"
l_.str.1: ; @.str.1
.asciz "blocksize : %u\n"
l_.str.2: ; @.str.2
.asciz "digestsize : %u\n"
.no_dead_strip _crypto_shash_show
.subsections_via_symbols
| AnghaBench/linux/crypto/extr_shash.c_crypto_shash_show.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function alloc_qpn
_alloc_qpn: ## @alloc_qpn
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r14
movq _IB_QPT_SMI@GOTPCREL(%rip), %rax
xorl %r12d, %r12d
cmpl %esi, (%rax)
je LBB0_2
## %bb.1:
movq _IB_QPT_GSI@GOTPCREL(%rip), %rax
movl $1, %r12d
cmpl %esi, (%rax)
jne LBB0_7
LBB0_2:
movq 8(%r14), %rbx
xorl %edi, %edi
cmpl $0, 4(%rbx)
sete %dil
callq _unlikely
testq %rax, %rax
je LBB0_4
## %bb.3:
movq %r14, %rdi
movq %rbx, %rsi
callq _get_map_page
xorl %edi, %edi
cmpl $0, 4(%rbx)
sete %dil
callq _unlikely
testq %rax, %rax
je LBB0_4
LBB0_19:
movq _ENOMEM@GOTPCREL(%rip), %rax
jmp LBB0_20
LBB0_4:
movl 4(%rbx), %esi
movl %r12d, %edi
callq _test_and_set_bit
testl %eax, %eax
je LBB0_5
## %bb.6:
movq _EBUSY@GOTPCREL(%rip), %rax
LBB0_20:
xorl %r12d, %r12d
subl (%rax), %r12d
jmp LBB0_21
LBB0_7:
movl (%r14), %eax
incl %eax
movq _QPN_MAX@GOTPCREL(%rip), %rcx
cmpl (%rcx), %eax
movl 4(%r14), %esi
movl $2, %r12d
cmovll %eax, %r12d
movq _BITS_PER_PAGE@GOTPCREL(%rip), %r13
movl %r12d, %eax
cltd
idivl (%r13)
movslq %eax, %r15
shlq $3, %r15
addq 8(%r14), %r15
movq _BITS_PER_PAGE_MASK@GOTPCREL(%rip), %rax
xorl %ecx, %ecx
movl (%rax), %ebx
andl %r12d, %ebx
sete %cl
subl %ecx, %esi
movl %esi, -48(%rbp) ## 4-byte Spill
movl $0, -44(%rbp) ## 4-byte Folded Spill
LBB0_8: ## =>This Loop Header: Depth=1
## Child Loop BB0_11 Depth 2
xorl %edi, %edi
cmpl $0, 4(%r15)
sete %dil
callq _unlikely
testq %rax, %rax
je LBB0_10
## %bb.9: ## in Loop: Header=BB0_8 Depth=1
movq %r14, %rdi
movq %r15, %rsi
callq _get_map_page
xorl %edi, %edi
cmpl $0, 4(%r15)
sete %dil
callq _unlikely
testq %rax, %rax
jne LBB0_19
LBB0_10: ## in Loop: Header=BB0_8 Depth=1
movq %r15, %rdi
callq _atomic_read
movl %eax, %edi
callq _likely
testq %rax, %rax
je LBB0_14
.p2align 4, 0x90
LBB0_11: ## Parent Loop BB0_8 Depth=1
## => This Inner Loop Header: Depth=2
movl 4(%r15), %esi
movl %ebx, %edi
callq _test_and_set_bit
testl %eax, %eax
je LBB0_22
## %bb.12: ## in Loop: Header=BB0_11 Depth=2
movq %r15, %rdi
movl %ebx, %esi
callq _find_next_offset
movl %eax, %ebx
movq %r14, %rdi
movq %r15, %rsi
movl %eax, %edx
callq _mk_qpn
cmpl (%r13), %ebx
jge LBB0_14
## %bb.13: ## in Loop: Header=BB0_11 Depth=2
movl %eax, %r12d
movq _QPN_MAX@GOTPCREL(%rip), %rax
cmpl (%rax), %r12d
jl LBB0_11
LBB0_14: ## in Loop: Header=BB0_8 Depth=1
movl -44(%rbp), %eax ## 4-byte Reload
cmpl -48(%rbp), %eax ## 4-byte Folded Reload
jge LBB0_15
## %bb.17: ## in Loop: Header=BB0_8 Depth=1
movq 8(%r14), %rax
movslq 4(%r14), %rcx
leaq (%rax,%rcx,8), %rcx
xorl %ebx, %ebx
cmpq %rcx, %r15
leaq 8(%r15), %r15
setae %bl
cmovaeq %rax, %r15
addl %ebx, %ebx
jmp LBB0_18
.p2align 4, 0x90
LBB0_15: ## in Loop: Header=BB0_8 Depth=1
movslq 4(%r14), %r15
movq _QPNMAP_ENTRIES@GOTPCREL(%rip), %rax
cmpl (%rax), %r15d
je LBB0_19
## %bb.16: ## in Loop: Header=BB0_8 Depth=1
leal 1(%r15), %eax
movl %eax, 4(%r14)
shlq $3, %r15
addq 8(%r14), %r15
xorl %ebx, %ebx
LBB0_18: ## in Loop: Header=BB0_8 Depth=1
incl -44(%rbp) ## 4-byte Folded Spill
movq %r14, %rdi
movq %r15, %rsi
movl %ebx, %edx
callq _mk_qpn
movl %eax, %r12d
jmp LBB0_8
LBB0_22:
movq %r15, %rdi
callq _atomic_dec
movl %r12d, (%r14)
jmp LBB0_21
LBB0_5:
movq %rbx, %rdi
callq _atomic_dec
LBB0_21:
movl %r12d, %eax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _IB_QPT_SMI,4,2 ## @IB_QPT_SMI
.comm _IB_QPT_GSI,4,2 ## @IB_QPT_GSI
.comm _ENOMEM,4,2 ## @ENOMEM
.comm _EBUSY,4,2 ## @EBUSY
.comm _QPN_MAX,4,2 ## @QPN_MAX
.comm _BITS_PER_PAGE_MASK,4,2 ## @BITS_PER_PAGE_MASK
.comm _BITS_PER_PAGE,4,2 ## @BITS_PER_PAGE
.comm _QPNMAP_ENTRIES,4,2 ## @QPNMAP_ENTRIES
.no_dead_strip _alloc_qpn
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function alloc_qpn
_alloc_qpn: ; @alloc_qpn
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x19, x0
Lloh0:
adrp x8, _IB_QPT_SMI@GOTPAGE
Lloh1:
ldr x8, [x8, _IB_QPT_SMI@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
cmp w8, w1
b.ne LBB0_2
; %bb.1:
mov w20, #0
b LBB0_4
LBB0_2:
Lloh3:
adrp x8, _IB_QPT_GSI@GOTPAGE
Lloh4:
ldr x8, [x8, _IB_QPT_GSI@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
cmp w8, w1
b.ne LBB0_10
; %bb.3:
mov w20, #1
LBB0_4:
ldr x21, [x19, #8]
ldr w8, [x21, #4]
cmp w8, #0
cset w0, eq
bl _unlikely
cbz x0, LBB0_7
; %bb.5:
mov x0, x19
mov x1, x21
bl _get_map_page
ldr w8, [x21, #4]
cmp w8, #0
cset w0, eq
bl _unlikely
cbz x0, LBB0_7
LBB0_6:
Lloh6:
adrp x8, _ENOMEM@GOTPAGE
Lloh7:
ldr x8, [x8, _ENOMEM@GOTPAGEOFF]
b LBB0_9
LBB0_7:
ldr w1, [x21, #4]
mov x0, x20
bl _test_and_set_bit
cbz w0, LBB0_23
; %bb.8:
Lloh8:
adrp x8, _EBUSY@GOTPAGE
Lloh9:
ldr x8, [x8, _EBUSY@GOTPAGEOFF]
LBB0_9:
ldr w8, [x8]
neg w20, w8
b LBB0_24
LBB0_10:
mov w23, #0
ldp w8, w9, [x19]
add w10, w8, #1
Lloh10:
adrp x24, _QPN_MAX@GOTPAGE
Lloh11:
ldr x24, [x24, _QPN_MAX@GOTPAGEOFF]
ldr w11, [x24]
cmp w10, w11
mov w10, #2
csinc w20, w10, w8, ge
Lloh12:
adrp x8, _BITS_PER_PAGE_MASK@GOTPAGE
Lloh13:
ldr x8, [x8, _BITS_PER_PAGE_MASK@GOTPAGEOFF]
Lloh14:
ldr w8, [x8]
ands w22, w20, w8
ldr x8, [x19, #8]
Lloh15:
adrp x25, _BITS_PER_PAGE@GOTPAGE
Lloh16:
ldr x25, [x25, _BITS_PER_PAGE@GOTPAGEOFF]
ldr w10, [x25]
sdiv w10, w20, w10
add x21, x8, w10, sxtw #3
cset w8, eq
sub w26, w9, w8
Lloh17:
adrp x27, _QPNMAP_ENTRIES@GOTPAGE
Lloh18:
ldr x27, [x27, _QPNMAP_ENTRIES@GOTPAGEOFF]
LBB0_11: ; =>This Loop Header: Depth=1
; Child Loop BB0_14 Depth 2
ldr w8, [x21, #4]
cmp w8, #0
cset w0, eq
bl _unlikely
cbz x0, LBB0_13
; %bb.12: ; in Loop: Header=BB0_11 Depth=1
mov x0, x19
mov x1, x21
bl _get_map_page
ldr w8, [x21, #4]
cmp w8, #0
cset w0, eq
bl _unlikely
cbnz x0, LBB0_6
LBB0_13: ; in Loop: Header=BB0_11 Depth=1
mov x0, x21
bl _atomic_read
bl _likely
cbz x0, LBB0_17
LBB0_14: ; Parent Loop BB0_11 Depth=1
; => This Inner Loop Header: Depth=2
ldr w1, [x21, #4]
mov x0, x22
bl _test_and_set_bit
cbz w0, LBB0_22
; %bb.15: ; in Loop: Header=BB0_14 Depth=2
mov x0, x21
mov x1, x22
bl _find_next_offset
mov x22, x0
mov x0, x19
mov x1, x21
mov x2, x22
bl _mk_qpn
ldr w8, [x25]
ldr w9, [x24]
cmp w22, w8
ccmp w0, w9, #0, lt
b.ge LBB0_17
; %bb.16: ; in Loop: Header=BB0_14 Depth=2
mov x20, x0
b LBB0_14
LBB0_17: ; in Loop: Header=BB0_11 Depth=1
cmp w23, w26
b.ge LBB0_19
; %bb.18: ; in Loop: Header=BB0_11 Depth=1
ldr x8, [x19, #8]
ldrsw x9, [x19, #4]
add x9, x8, x9, lsl #3
add x10, x21, #8
cmp x21, x9
csel x21, x10, x8, lo
cset w8, hs
lsl w22, w8, #1
b LBB0_21
LBB0_19: ; in Loop: Header=BB0_11 Depth=1
ldrsw x8, [x19, #4]
ldr w9, [x27]
cmp w8, w9
b.eq LBB0_6
; %bb.20: ; in Loop: Header=BB0_11 Depth=1
mov w22, #0
ldr x9, [x19, #8]
add w10, w8, #1
str w10, [x19, #4]
add x21, x9, x8, lsl #3
LBB0_21: ; in Loop: Header=BB0_11 Depth=1
add w23, w23, #1
mov x0, x19
mov x1, x21
mov x2, x22
bl _mk_qpn
mov x20, x0
b LBB0_11
LBB0_22:
mov x0, x21
bl _atomic_dec
str w20, [x19]
b LBB0_24
LBB0_23:
mov x0, x21
bl _atomic_dec
LBB0_24:
mov x0, x20
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGot Lloh6, Lloh7
.loh AdrpLdrGot Lloh8, Lloh9
.loh AdrpLdrGot Lloh17, Lloh18
.loh AdrpLdrGot Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGot Lloh10, Lloh11
.cfi_endproc
; -- End function
.comm _IB_QPT_SMI,4,2 ; @IB_QPT_SMI
.comm _IB_QPT_GSI,4,2 ; @IB_QPT_GSI
.comm _ENOMEM,4,2 ; @ENOMEM
.comm _EBUSY,4,2 ; @EBUSY
.comm _QPN_MAX,4,2 ; @QPN_MAX
.comm _BITS_PER_PAGE_MASK,4,2 ; @BITS_PER_PAGE_MASK
.comm _BITS_PER_PAGE,4,2 ; @BITS_PER_PAGE
.comm _QPNMAP_ENTRIES,4,2 ; @QPNMAP_ENTRIES
.no_dead_strip _alloc_qpn
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/infiniband/hw/ipath/extr_ipath_qp.c_alloc_qpn.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _mga_driver_irq_preinstall ## -- Begin function mga_driver_irq_preinstall
.p2align 4, 0x90
_mga_driver_irq_preinstall: ## @mga_driver_irq_preinstall
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _MGA_IEN@GOTPCREL(%rip), %rax
movl (%rax), %edi
xorl %esi, %esi
callq _MGA_WRITE
movq _MGA_ICLEAR@GOTPCREL(%rip), %rax
movl (%rax), %edi
movl $-1, %esi
popq %rbp
jmp _MGA_WRITE ## TAILCALL
.cfi_endproc
## -- End function
.comm _MGA_IEN,4,2 ## @MGA_IEN
.comm _MGA_ICLEAR,4,2 ## @MGA_ICLEAR
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _mga_driver_irq_preinstall ; -- Begin function mga_driver_irq_preinstall
.p2align 2
_mga_driver_irq_preinstall: ; @mga_driver_irq_preinstall
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x8, _MGA_IEN@GOTPAGE
Lloh1:
ldr x8, [x8, _MGA_IEN@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
mov w1, #0
bl _MGA_WRITE
Lloh3:
adrp x8, _MGA_ICLEAR@GOTPAGE
Lloh4:
ldr x8, [x8, _MGA_ICLEAR@GOTPAGEOFF]
Lloh5:
ldr w0, [x8]
mov w1, #-1
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
b _MGA_WRITE
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _MGA_IEN,4,2 ; @MGA_IEN
.comm _MGA_ICLEAR,4,2 ; @MGA_ICLEAR
.subsections_via_symbols
| AnghaBench/linux/drivers/gpu/drm/mga/extr_mga_irq.c_mga_driver_irq_preinstall.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function af9005_fe_read_status
_af9005_fe_read_status: ## @af9005_fe_read_status
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
cmpq $0, (%rdi)
je LBB0_1
## %bb.2:
movq %rsi, %r14
movq 8(%rdi), %r15
movl $0, (%rsi)
movl 8(%r15), %edi
movq _xd_p_agc_lock@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _agc_lock_pos@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq _agc_lock_len@GOTPCREL(%rip), %rax
movl (%rax), %ecx
leaq -32(%rbp), %r8
callq _af9005_read_register_bits
movl %eax, %ebx
testl %eax, %eax
jne LBB0_16
## %bb.3:
cmpq $0, -32(%rbp)
je LBB0_5
## %bb.4:
movq _FE_HAS_SIGNAL@GOTPCREL(%rip), %rax
movl (%rax), %eax
orl %eax, (%r14)
LBB0_5:
movl 8(%r15), %edi
movq _xd_p_fd_tpsd_lock@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _fd_tpsd_lock_pos@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq _fd_tpsd_lock_len@GOTPCREL(%rip), %rax
movl (%rax), %ecx
leaq -32(%rbp), %r8
callq _af9005_read_register_bits
movl %eax, %ebx
testl %eax, %eax
jne LBB0_16
## %bb.6:
cmpq $0, -32(%rbp)
je LBB0_8
## %bb.7:
movq _FE_HAS_CARRIER@GOTPCREL(%rip), %rax
movl (%rax), %eax
orl %eax, (%r14)
LBB0_8:
movl 8(%r15), %edi
movq _xd_r_mp2if_sync_byte_locked@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _mp2if_sync_byte_locked_pos@GOTPCREL(%rip), %rax
movl (%rax), %ecx
leaq -32(%rbp), %r8
movl %ecx, %edx
callq _af9005_read_register_bits
movl %eax, %ebx
testl %eax, %eax
jne LBB0_16
## %bb.9:
cmpq $0, -32(%rbp)
je LBB0_11
## %bb.10:
movq _FE_HAS_SYNC@GOTPCREL(%rip), %rax
movq _FE_HAS_VITERBI@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
orl (%rax), %ecx
movq _FE_HAS_LOCK@GOTPCREL(%rip), %rax
orl (%rax), %ecx
orl %ecx, (%r14)
LBB0_11:
cmpq $0, 16(%r15)
je LBB0_13
## %bb.12:
movl 8(%r15), %edi
movq _FE_HAS_LOCK@GOTPCREL(%rip), %rax
movl (%rax), %esi
andl (%r14), %esi
callq _af9005_led_control
LBB0_13:
movl 8(%r15), %edi
movq _xd_p_reg_strong_sginal_detected@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq _reg_strong_sginal_detected_pos@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq _reg_strong_sginal_detected_len@GOTPCREL(%rip), %rax
movl (%rax), %ecx
leaq -32(%rbp), %r8
callq _af9005_read_register_bits
movl %eax, %ebx
testl %eax, %eax
jne LBB0_16
## %bb.14:
movq -32(%rbp), %rsi
xorl %ebx, %ebx
cmpq (%r15), %rsi
je LBB0_16
## %bb.15:
leaq L_.str(%rip), %rdi
callq _deb_info
movq -32(%rbp), %rax
movq %rax, (%r15)
jmp LBB0_16
LBB0_1:
movq _ENODEV@GOTPCREL(%rip), %rax
xorl %ebx, %ebx
subl (%rax), %ebx
LBB0_16:
movl %ebx, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _ENODEV,4,2 ## @ENODEV
.comm _xd_p_agc_lock,4,2 ## @xd_p_agc_lock
.comm _agc_lock_pos,4,2 ## @agc_lock_pos
.comm _agc_lock_len,4,2 ## @agc_lock_len
.comm _FE_HAS_SIGNAL,4,2 ## @FE_HAS_SIGNAL
.comm _xd_p_fd_tpsd_lock,4,2 ## @xd_p_fd_tpsd_lock
.comm _fd_tpsd_lock_pos,4,2 ## @fd_tpsd_lock_pos
.comm _fd_tpsd_lock_len,4,2 ## @fd_tpsd_lock_len
.comm _FE_HAS_CARRIER,4,2 ## @FE_HAS_CARRIER
.comm _xd_r_mp2if_sync_byte_locked,4,2 ## @xd_r_mp2if_sync_byte_locked
.comm _mp2if_sync_byte_locked_pos,4,2 ## @mp2if_sync_byte_locked_pos
.comm _FE_HAS_SYNC,4,2 ## @FE_HAS_SYNC
.comm _FE_HAS_VITERBI,4,2 ## @FE_HAS_VITERBI
.comm _FE_HAS_LOCK,4,2 ## @FE_HAS_LOCK
.comm _xd_p_reg_strong_sginal_detected,4,2 ## @xd_p_reg_strong_sginal_detected
.comm _reg_strong_sginal_detected_pos,4,2 ## @reg_strong_sginal_detected_pos
.comm _reg_strong_sginal_detected_len,4,2 ## @reg_strong_sginal_detected_len
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "adjust for strong signal %d\n"
.no_dead_strip _af9005_fe_read_status
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function af9005_fe_read_status
_af9005_fe_read_status: ; @af9005_fe_read_status
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
ldr x8, [x0]
cbz x8, LBB0_15
; %bb.1:
mov x19, x1
ldr x20, [x0, #8]
str wzr, [x1]
ldr w0, [x20, #8]
Lloh0:
adrp x8, _xd_p_agc_lock@GOTPAGE
Lloh1:
ldr x8, [x8, _xd_p_agc_lock@GOTPAGEOFF]
Lloh2:
ldr w1, [x8]
Lloh3:
adrp x8, _agc_lock_pos@GOTPAGE
Lloh4:
ldr x8, [x8, _agc_lock_pos@GOTPAGEOFF]
Lloh5:
ldr w2, [x8]
Lloh6:
adrp x8, _agc_lock_len@GOTPAGE
Lloh7:
ldr x8, [x8, _agc_lock_len@GOTPAGEOFF]
Lloh8:
ldr w3, [x8]
add x4, sp, #8
bl _af9005_read_register_bits
cbnz w0, LBB0_16
; %bb.2:
ldr x8, [sp, #8]
cbz x8, LBB0_4
; %bb.3:
Lloh9:
adrp x8, _FE_HAS_SIGNAL@GOTPAGE
Lloh10:
ldr x8, [x8, _FE_HAS_SIGNAL@GOTPAGEOFF]
Lloh11:
ldr w8, [x8]
ldr w9, [x19]
orr w8, w9, w8
str w8, [x19]
LBB0_4:
ldr w0, [x20, #8]
Lloh12:
adrp x8, _xd_p_fd_tpsd_lock@GOTPAGE
Lloh13:
ldr x8, [x8, _xd_p_fd_tpsd_lock@GOTPAGEOFF]
Lloh14:
ldr w1, [x8]
Lloh15:
adrp x8, _fd_tpsd_lock_pos@GOTPAGE
Lloh16:
ldr x8, [x8, _fd_tpsd_lock_pos@GOTPAGEOFF]
Lloh17:
ldr w2, [x8]
Lloh18:
adrp x8, _fd_tpsd_lock_len@GOTPAGE
Lloh19:
ldr x8, [x8, _fd_tpsd_lock_len@GOTPAGEOFF]
Lloh20:
ldr w3, [x8]
add x4, sp, #8
bl _af9005_read_register_bits
cbnz w0, LBB0_16
; %bb.5:
ldr x8, [sp, #8]
cbz x8, LBB0_7
; %bb.6:
Lloh21:
adrp x8, _FE_HAS_CARRIER@GOTPAGE
Lloh22:
ldr x8, [x8, _FE_HAS_CARRIER@GOTPAGEOFF]
Lloh23:
ldr w8, [x8]
ldr w9, [x19]
orr w8, w9, w8
str w8, [x19]
LBB0_7:
ldr w0, [x20, #8]
Lloh24:
adrp x8, _xd_r_mp2if_sync_byte_locked@GOTPAGE
Lloh25:
ldr x8, [x8, _xd_r_mp2if_sync_byte_locked@GOTPAGEOFF]
Lloh26:
ldr w1, [x8]
Lloh27:
adrp x8, _mp2if_sync_byte_locked_pos@GOTPAGE
Lloh28:
ldr x8, [x8, _mp2if_sync_byte_locked_pos@GOTPAGEOFF]
Lloh29:
ldr w2, [x8]
add x4, sp, #8
mov x3, x2
bl _af9005_read_register_bits
cbnz w0, LBB0_16
; %bb.8:
ldr x9, [sp, #8]
Lloh30:
adrp x8, _FE_HAS_LOCK@GOTPAGE
Lloh31:
ldr x8, [x8, _FE_HAS_LOCK@GOTPAGEOFF]
cbz x9, LBB0_10
; %bb.9:
Lloh32:
adrp x9, _FE_HAS_SYNC@GOTPAGE
Lloh33:
ldr x9, [x9, _FE_HAS_SYNC@GOTPAGEOFF]
Lloh34:
ldr w9, [x9]
Lloh35:
adrp x10, _FE_HAS_VITERBI@GOTPAGE
Lloh36:
ldr x10, [x10, _FE_HAS_VITERBI@GOTPAGEOFF]
Lloh37:
ldr w10, [x10]
orr w9, w10, w9
ldr w10, [x8]
orr w9, w9, w10
ldr w10, [x19]
orr w9, w9, w10
str w9, [x19]
LBB0_10:
ldr x9, [x20, #16]
cbz x9, LBB0_12
; %bb.11:
ldr w0, [x20, #8]
ldr w9, [x19]
ldr w8, [x8]
and w1, w8, w9
bl _af9005_led_control
LBB0_12:
ldr w0, [x20, #8]
Lloh38:
adrp x8, _xd_p_reg_strong_sginal_detected@GOTPAGE
Lloh39:
ldr x8, [x8, _xd_p_reg_strong_sginal_detected@GOTPAGEOFF]
Lloh40:
ldr w1, [x8]
Lloh41:
adrp x8, _reg_strong_sginal_detected_pos@GOTPAGE
Lloh42:
ldr x8, [x8, _reg_strong_sginal_detected_pos@GOTPAGEOFF]
Lloh43:
ldr w2, [x8]
Lloh44:
adrp x8, _reg_strong_sginal_detected_len@GOTPAGE
Lloh45:
ldr x8, [x8, _reg_strong_sginal_detected_len@GOTPAGEOFF]
Lloh46:
ldr w3, [x8]
add x4, sp, #8
bl _af9005_read_register_bits
cbnz w0, LBB0_16
; %bb.13:
ldr x1, [sp, #8]
ldr x8, [x20]
cmp x1, x8
b.ne LBB0_17
; %bb.14:
mov w0, #0
b LBB0_16
LBB0_15:
Lloh47:
adrp x8, _ENODEV@GOTPAGE
Lloh48:
ldr x8, [x8, _ENODEV@GOTPAGEOFF]
Lloh49:
ldr w8, [x8]
neg w0, w8
LBB0_16:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
LBB0_17:
Lloh50:
adrp x0, l_.str@PAGE
Lloh51:
add x0, x0, l_.str@PAGEOFF
bl _deb_info
mov w0, #0
ldr x8, [sp, #8]
str x8, [x20]
b LBB0_16
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh18, Lloh19, Lloh20
.loh AdrpLdrGotLdr Lloh15, Lloh16, Lloh17
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh21, Lloh22, Lloh23
.loh AdrpLdrGotLdr Lloh27, Lloh28, Lloh29
.loh AdrpLdrGotLdr Lloh24, Lloh25, Lloh26
.loh AdrpLdrGot Lloh30, Lloh31
.loh AdrpLdrGotLdr Lloh35, Lloh36, Lloh37
.loh AdrpLdrGotLdr Lloh32, Lloh33, Lloh34
.loh AdrpLdrGotLdr Lloh44, Lloh45, Lloh46
.loh AdrpLdrGotLdr Lloh41, Lloh42, Lloh43
.loh AdrpLdrGotLdr Lloh38, Lloh39, Lloh40
.loh AdrpLdrGotLdr Lloh47, Lloh48, Lloh49
.loh AdrpAdd Lloh50, Lloh51
.cfi_endproc
; -- End function
.comm _ENODEV,4,2 ; @ENODEV
.comm _xd_p_agc_lock,4,2 ; @xd_p_agc_lock
.comm _agc_lock_pos,4,2 ; @agc_lock_pos
.comm _agc_lock_len,4,2 ; @agc_lock_len
.comm _FE_HAS_SIGNAL,4,2 ; @FE_HAS_SIGNAL
.comm _xd_p_fd_tpsd_lock,4,2 ; @xd_p_fd_tpsd_lock
.comm _fd_tpsd_lock_pos,4,2 ; @fd_tpsd_lock_pos
.comm _fd_tpsd_lock_len,4,2 ; @fd_tpsd_lock_len
.comm _FE_HAS_CARRIER,4,2 ; @FE_HAS_CARRIER
.comm _xd_r_mp2if_sync_byte_locked,4,2 ; @xd_r_mp2if_sync_byte_locked
.comm _mp2if_sync_byte_locked_pos,4,2 ; @mp2if_sync_byte_locked_pos
.comm _FE_HAS_SYNC,4,2 ; @FE_HAS_SYNC
.comm _FE_HAS_VITERBI,4,2 ; @FE_HAS_VITERBI
.comm _FE_HAS_LOCK,4,2 ; @FE_HAS_LOCK
.comm _xd_p_reg_strong_sginal_detected,4,2 ; @xd_p_reg_strong_sginal_detected
.comm _reg_strong_sginal_detected_pos,4,2 ; @reg_strong_sginal_detected_pos
.comm _reg_strong_sginal_detected_len,4,2 ; @reg_strong_sginal_detected_len
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "adjust for strong signal %d\n"
.no_dead_strip _af9005_fe_read_status
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/media/dvb/dvb-usb/extr_af9005-fe.c_af9005_fe_read_status.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _entcpy ## -- Begin function entcpy
.p2align 4, 0x90
_entcpy: ## @entcpy
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
.cfi_offset %rbx, -24
## kill: def $edx killed $edx def $rdx
testl %edx, %edx
je LBB0_16
## %bb.1:
leal -1(%rdx), %eax
cmpl $7, %eax
jb LBB0_2
## %bb.3:
movq %rdi, %rcx
subq %rsi, %rcx
cmpq $32, %rcx
jb LBB0_2
## %bb.4:
leaq 1(%rax), %r8
movabsq $8589934560, %r9 ## imm = 0x1FFFFFFE0
cmpl $31, %eax
jae LBB0_17
## %bb.5:
xorl %r10d, %r10d
jmp LBB0_6
LBB0_17:
movq %r8, %r10
andq %r9, %r10
leaq -32(%r10), %rax
movq %rax, %rcx
shrq $5, %rcx
incq %rcx
movl %ecx, %r11d
andl $3, %r11d
cmpq $96, %rax
jae LBB0_19
## %bb.18:
xorl %eax, %eax
jmp LBB0_21
LBB0_19:
andq $-4, %rcx
xorl %eax, %eax
.p2align 4, 0x90
LBB0_20: ## =>This Inner Loop Header: Depth=1
movups (%rsi,%rax), %xmm0
movups 16(%rsi,%rax), %xmm1
movups %xmm0, (%rdi,%rax)
movups %xmm1, 16(%rdi,%rax)
movups 32(%rsi,%rax), %xmm0
movups 48(%rsi,%rax), %xmm1
movups %xmm0, 32(%rdi,%rax)
movups %xmm1, 48(%rdi,%rax)
movups 64(%rsi,%rax), %xmm0
movups 80(%rsi,%rax), %xmm1
movups %xmm0, 64(%rdi,%rax)
movups %xmm1, 80(%rdi,%rax)
movups 96(%rsi,%rax), %xmm0
movups 112(%rsi,%rax), %xmm1
movups %xmm0, 96(%rdi,%rax)
movups %xmm1, 112(%rdi,%rax)
subq $-128, %rax
addq $-4, %rcx
jne LBB0_20
LBB0_21:
testq %r11, %r11
je LBB0_24
## %bb.22:
leaq (%rdi,%rax), %rcx
addq $16, %rcx
addq %rsi, %rax
addq $16, %rax
shlq $5, %r11
xorl %ebx, %ebx
.p2align 4, 0x90
LBB0_23: ## =>This Inner Loop Header: Depth=1
movups -16(%rax,%rbx), %xmm0
movups (%rax,%rbx), %xmm1
movups %xmm0, -16(%rcx,%rbx)
movups %xmm1, (%rcx,%rbx)
addq $32, %rbx
cmpq %rbx, %r11
jne LBB0_23
LBB0_24:
cmpq %r10, %r8
je LBB0_16
## %bb.25:
testb $24, %r8b
je LBB0_26
LBB0_6:
addq $24, %r9
andq %r8, %r9
subl %r9d, %edx
leaq (%rsi,%r9), %rax
leaq (%rdi,%r9), %rcx
.p2align 4, 0x90
LBB0_7: ## =>This Inner Loop Header: Depth=1
movq (%rsi,%r10), %rbx
movq %rbx, (%rdi,%r10)
addq $8, %r10
cmpq %r10, %r9
jne LBB0_7
## %bb.8:
cmpq %r9, %r8
jne LBB0_9
jmp LBB0_16
LBB0_26:
addq %r10, %rdi
addq %r10, %rsi
subl %r10d, %edx
LBB0_2:
movq %rsi, %rax
movq %rdi, %rcx
LBB0_9:
leal -1(%rdx), %r8d
testb $7, %dl
je LBB0_13
## %bb.10:
movl %edx, %esi
andl $7, %esi
xorl %edi, %edi
.p2align 4, 0x90
LBB0_11: ## =>This Inner Loop Header: Depth=1
movzbl (%rax,%rdi), %ebx
movb %bl, (%rcx,%rdi)
incq %rdi
cmpl %edi, %esi
jne LBB0_11
## %bb.12:
subl %edi, %edx
addq %rdi, %rax
addq %rdi, %rcx
LBB0_13:
cmpl $7, %r8d
jb LBB0_16
## %bb.14:
movl %edx, %edx
xorl %esi, %esi
.p2align 4, 0x90
LBB0_15: ## =>This Inner Loop Header: Depth=1
movzbl (%rax,%rsi), %ebx
movb %bl, (%rcx,%rsi)
movzbl 1(%rax,%rsi), %ebx
movb %bl, 1(%rcx,%rsi)
movzbl 2(%rax,%rsi), %ebx
movb %bl, 2(%rcx,%rsi)
movzbl 3(%rax,%rsi), %ebx
movb %bl, 3(%rcx,%rsi)
movzbl 4(%rax,%rsi), %ebx
movb %bl, 4(%rcx,%rsi)
movzbl 5(%rax,%rsi), %ebx
movb %bl, 5(%rcx,%rsi)
movzbl 6(%rax,%rsi), %ebx
movb %bl, 6(%rcx,%rsi)
movzbl 7(%rax,%rsi), %ebx
movb %bl, 7(%rcx,%rsi)
addq $8, %rsi
cmpl %esi, %edx
jne LBB0_15
LBB0_16:
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _entcpy ; -- Begin function entcpy
.p2align 2
_entcpy: ; @entcpy
.cfi_startproc
; %bb.0:
cbz w2, LBB0_15
; %bb.1:
sub w9, w2, #1
cmp w9, #7
b.lo LBB0_5
; %bb.2:
sub x8, x0, x1
cmp x8, #64
b.lo LBB0_5
; %bb.3:
add x8, x9, #1
cmp w9, #63
b.hs LBB0_6
; %bb.4:
mov x11, #0
b LBB0_10
LBB0_5:
mov x9, x1
mov x10, x0
b LBB0_14
LBB0_6:
and x11, x8, #0x1ffffffc0
add x9, x1, #32
add x10, x0, #32
mov x12, x11
LBB0_7: ; =>This Inner Loop Header: Depth=1
ldp q0, q1, [x9, #-32]
ldp q2, q3, [x9], #64
stp q0, q1, [x10, #-32]
stp q2, q3, [x10], #64
subs x12, x12, #64
b.ne LBB0_7
; %bb.8:
cmp x8, x11
b.eq LBB0_15
; %bb.9:
tst x8, #0x38
b.eq LBB0_13
LBB0_10:
and x12, x8, #0x1fffffff8
sub w2, w2, w12
add x9, x1, x12
add x10, x0, x12
add x13, x0, x11
add x14, x1, x11
sub x11, x11, x12
LBB0_11: ; =>This Inner Loop Header: Depth=1
ldr d0, [x14], #8
str d0, [x13], #8
adds x11, x11, #8
b.ne LBB0_11
; %bb.12:
cmp x8, x12
b.ne LBB0_14
b LBB0_15
LBB0_13:
add x10, x0, x11
add x9, x1, x11
sub w2, w2, w11
LBB0_14: ; =>This Inner Loop Header: Depth=1
ldrb w8, [x9], #1
strb w8, [x10], #1
subs w2, w2, #1
b.ne LBB0_14
LBB0_15:
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/94358.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _status_message_clear ## -- Begin function status_message_clear
.p2align 4, 0x90
_status_message_clear: ## @status_message_clear
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
movq 16(%rdi), %rdi
testq %rdi, %rdi
je LBB0_4
## %bb.1:
callq _free
movq $0, 16(%rbx)
cmpq $0, 8(%rbx)
jne LBB0_3
## %bb.2:
movq _TTY_NOCURSOR@GOTPCREL(%rip), %rax
movq _TTY_FREEZE@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
orl (%rax), %ecx
notl %ecx
andl %ecx, 4(%rbx)
LBB0_3:
movq _CLIENT_ALLREDRAWFLAGS@GOTPCREL(%rip), %rax
movl (%rax), %eax
orl %eax, (%rbx)
movq %rbx, %rdi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _status_pop_screen ## TAILCALL
LBB0_4:
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _TTY_NOCURSOR,4,2 ## @TTY_NOCURSOR
.comm _TTY_FREEZE,4,2 ## @TTY_FREEZE
.comm _CLIENT_ALLREDRAWFLAGS,4,2 ## @CLIENT_ALLREDRAWFLAGS
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _status_message_clear ; -- Begin function status_message_clear
.p2align 2
_status_message_clear: ; @status_message_clear
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
ldr x0, [x0, #16]
cbz x0, LBB0_4
; %bb.1:
bl _free
str xzr, [x19, #16]
ldr x8, [x19, #8]
cbnz x8, LBB0_3
; %bb.2:
Lloh0:
adrp x8, _TTY_NOCURSOR@GOTPAGE
Lloh1:
ldr x8, [x8, _TTY_NOCURSOR@GOTPAGEOFF]
Lloh2:
adrp x9, _TTY_FREEZE@GOTPAGE
Lloh3:
ldr x9, [x9, _TTY_FREEZE@GOTPAGEOFF]
Lloh4:
ldr w8, [x8]
Lloh5:
ldr w9, [x9]
orr w8, w9, w8
ldr w9, [x19, #4]
bic w8, w9, w8
str w8, [x19, #4]
LBB0_3:
Lloh6:
adrp x8, _CLIENT_ALLREDRAWFLAGS@GOTPAGE
Lloh7:
ldr x8, [x8, _CLIENT_ALLREDRAWFLAGS@GOTPAGEOFF]
Lloh8:
ldr w8, [x8]
ldr w9, [x19]
orr w8, w9, w8
str w8, [x19]
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _status_pop_screen
LBB0_4:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh4
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.cfi_endproc
; -- End function
.comm _TTY_NOCURSOR,4,2 ; @TTY_NOCURSOR
.comm _TTY_FREEZE,4,2 ; @TTY_FREEZE
.comm _CLIENT_ALLREDRAWFLAGS,4,2 ; @CLIENT_ALLREDRAWFLAGS
.subsections_via_symbols
| AnghaBench/tmux/extr_status.c_status_message_clear.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _free_global_msglist ## -- Begin function free_global_msglist
.p2align 4, 0x90
_free_global_msglist: ## @free_global_msglist
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq _msg_list@GOTPCREL(%rip), %rbx
movq (%rbx), %rax
movq (%rax), %rdi
callq _free_msglist
movq (%rbx), %rax
movq $0, (%rax)
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _msg_list,8,3 ## @msg_list
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _free_global_msglist ; -- Begin function free_global_msglist
.p2align 2
_free_global_msglist: ; @free_global_msglist
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x19, _msg_list@GOTPAGE
Lloh1:
ldr x19, [x19, _msg_list@GOTPAGEOFF]
ldr x8, [x19]
ldr x0, [x8]
bl _free_msglist
ldr x8, [x19]
str xzr, [x8]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm _msg_list,8,3 ; @msg_list
.subsections_via_symbols
| AnghaBench/macvim/src/extr_ex_eval.c_free_global_msglist.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function rdtgroup_add_file
_rdtgroup_add_file: ## @rdtgroup_add_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl 8(%rsi), %r9d
movl (%rsi), %ebx
movl 4(%rsi), %edx
movq _GLOBAL_ROOT_UID@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
movq _GLOBAL_ROOT_GID@GOTPCREL(%rip), %rax
movl (%rax), %r8d
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movq %rsi, 8(%rsp)
movl %ebx, (%rsp)
xorl %r14d, %r14d
movl %r9d, %esi
xorl %r9d, %r9d
callq ___kernfs_create_file
movq %rax, %rbx
movq %rax, %rdi
callq _IS_ERR
testq %rax, %rax
je LBB0_1
## %bb.4:
movq %rbx, %rdi
addq $40, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _PTR_ERR ## TAILCALL
LBB0_1:
movq %rbx, %rdi
callq _rdtgroup_kn_set_ugid
testl %eax, %eax
je LBB0_3
## %bb.2:
movl %eax, %r15d
movq %rbx, %rdi
callq _kernfs_remove
movl %r15d, %r14d
LBB0_3:
movl %r14d, %eax
addq $40, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _GLOBAL_ROOT_UID,4,2 ## @GLOBAL_ROOT_UID
.comm _GLOBAL_ROOT_GID,4,2 ## @GLOBAL_ROOT_GID
.no_dead_strip _rdtgroup_add_file
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function rdtgroup_add_file
_rdtgroup_add_file: ; @rdtgroup_add_file
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x7, x1
ldr w1, [x1, #8]
Lloh0:
adrp x8, _GLOBAL_ROOT_UID@GOTPAGE
Lloh1:
ldr x8, [x8, _GLOBAL_ROOT_UID@GOTPAGEOFF]
Lloh2:
ldr w3, [x8]
Lloh3:
adrp x8, _GLOBAL_ROOT_GID@GOTPAGE
Lloh4:
ldr x8, [x8, _GLOBAL_ROOT_GID@GOTPAGEOFF]
Lloh5:
ldr w4, [x8]
ldp w6, w2, [x7]
stp xzr, xzr, [sp]
mov w5, #0
bl ___kernfs_create_file
mov x19, x0
bl _IS_ERR
cbz x0, LBB0_2
; %bb.1:
mov x0, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
b _PTR_ERR
LBB0_2:
mov x0, x19
bl _rdtgroup_kn_set_ugid
mov x20, x0
cbz w0, LBB0_4
; %bb.3:
mov x0, x19
bl _kernfs_remove
LBB0_4:
mov x0, x20
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _GLOBAL_ROOT_UID,4,2 ; @GLOBAL_ROOT_UID
.comm _GLOBAL_ROOT_GID,4,2 ; @GLOBAL_ROOT_GID
.no_dead_strip _rdtgroup_add_file
.subsections_via_symbols
| AnghaBench/linux/arch/x86/kernel/cpu/resctrl/extr_rdtgroup.c_rdtgroup_add_file.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L_str(%rip), %rdi
callq _puts
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_str: ## @str
.asciz "Computer Science \nComputer Graphics \nDatabase Management welcome 'to'\nArtificialIntelligence?\tRobotics \n \t\t\"learning is fun\" \n\n'SoftwareEngineering' \n\t\"Net Centric Computing\" \nWhat are your favorite Programming Languages? \nE:\\Java\\C Programming\\Python "
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x0, l_str@PAGE
Lloh1:
add x0, x0, l_str@PAGEOFF
bl _puts
mov w0, #0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_str: ; @str
.asciz "Computer Science \nComputer Graphics \nDatabase Management welcome 'to'\nArtificialIntelligence?\tRobotics \n \t\t\"learning is fun\" \n\n'SoftwareEngineering' \n\t\"Net Centric Computing\" \nWhat are your favorite Programming Languages? \nE:\\Java\\C Programming\\Python "
.subsections_via_symbols
| the_stack_data/40762586.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function is_tic_within_range
_is_tic_within_range: ## @is_tic_within_range
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r14d
movl %esi, %r15d
movq %rdi, %rbx
callq _ccw_is_tic
testl %eax, %eax
je LBB0_1
## %bb.2:
movl (%rbx), %edi
movl %r15d, %esi
movl %r14d, %edx
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _is_cpa_within_range ## TAILCALL
LBB0_1:
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _is_tic_within_range
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function is_tic_within_range
_is_tic_within_range: ; @is_tic_within_range
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x2
mov x20, x1
mov x21, x0
bl _ccw_is_tic
cbz w0, LBB0_2
; %bb.1:
ldr w0, [x21]
mov x1, x20
mov x2, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _is_cpa_within_range
LBB0_2:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _is_tic_within_range
.subsections_via_symbols
| AnghaBench/linux/drivers/s390/cio/extr_vfio_ccw_cp.c_is_tic_within_range.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function xcopy_pt_undepend_remotedev
_xcopy_pt_undepend_remotedev: ## @xcopy_pt_undepend_remotedev
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq (%rdi), %rax
movq _XCOL_SOURCE_RECV_OP@GOTPCREL(%rip), %rcx
xorl %edx, %edx
cmpq (%rcx), %rax
sete %dl
movq 8(%rdi,%rdx,8), %rbx
leaq L_.str(%rip), %rdi
movq %rbx, %rsi
movq %rbx, %rdx
callq _pr_debug
movq %rbx, %rdi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _target_undepend_item ## TAILCALL
.cfi_endproc
## -- End function
.comm _XCOL_SOURCE_RECV_OP,8,3 ## @XCOL_SOURCE_RECV_OP
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Calling configfs_undepend_item for remote_dev: %p remote_dev->dev_group: %p\n"
.no_dead_strip _xcopy_pt_undepend_remotedev
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function xcopy_pt_undepend_remotedev
_xcopy_pt_undepend_remotedev: ; @xcopy_pt_undepend_remotedev
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
ldr x8, [x0]
Lloh0:
adrp x9, _XCOL_SOURCE_RECV_OP@GOTPAGE
Lloh1:
ldr x9, [x9, _XCOL_SOURCE_RECV_OP@GOTPAGEOFF]
Lloh2:
ldr x9, [x9]
mov w10, #8
mov w11, #16
cmp x8, x9
csel x8, x11, x10, eq
ldr x19, [x0, x8]
Lloh3:
adrp x0, l_.str@PAGE
Lloh4:
add x0, x0, l_.str@PAGEOFF
mov x1, x19
mov x2, x19
bl _pr_debug
mov x0, x19
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _target_undepend_item
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _XCOL_SOURCE_RECV_OP,8,3 ; @XCOL_SOURCE_RECV_OP
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Calling configfs_undepend_item for remote_dev: %p remote_dev->dev_group: %p\n"
.no_dead_strip _xcopy_pt_undepend_remotedev
.subsections_via_symbols
| AnghaBench/linux/drivers/target/extr_target_core_xcopy.c_xcopy_pt_undepend_remotedev.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/73383.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function blake2s_clear_lastnode
_blake2s_clear_lastnode: ## @blake2s_clear_lastnode
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq (%rdi), %rax
movl $0, 4(%rax)
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _blake2s_clear_lastnode
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function blake2s_clear_lastnode
_blake2s_clear_lastnode: ; @blake2s_clear_lastnode
.cfi_startproc
; %bb.0:
ldr x8, [x0]
str wzr, [x8, #4]
mov w0, #0
ret
.cfi_endproc
; -- End function
.no_dead_strip _blake2s_clear_lastnode
.subsections_via_symbols
| AnghaBench/freebsd/sys/contrib/libb2/extr_blake2s.c_blake2s_clear_lastnode.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _fz_append_rune ## -- Begin function fz_append_rune
.p2align 4, 0x90
_fz_append_rune: ## @fz_append_rune
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %rbx
movq %rdi, %r15
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -32(%rbp)
leaq -42(%rbp), %rdi
movl %edx, %esi
callq _fz_runetochar
movl %eax, %r14d
movl (%rbx), %edi
movl %eax, %edx
addl %edi, %edx
cmpl 4(%rbx), %edx
jle LBB0_2
## %bb.1:
movq %r15, %rdi
movq %rbx, %rsi
callq _fz_ensure_buffer
movl (%rbx), %edi
LBB0_2:
addl 16(%rbx), %edi
leaq -42(%rbp), %rsi
movl %r14d, %edx
callq _memcpy
addl %r14d, (%rbx)
movq $0, 8(%rbx)
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -32(%rbp), %rax
jne LBB0_4
## %bb.3:
addq $24, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
LBB0_4:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _fz_append_rune ; -- Begin function fz_append_rune
.p2align 2
_fz_append_rune: ; @fz_append_rune
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x1
mov x21, x0
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
str x8, [sp, #24]
add x0, sp, #14
mov x1, x2
bl _fz_runetochar
mov x20, x0
ldp w8, w9, [x19]
add w2, w8, w0
cmp w2, w9
b.le LBB0_2
; %bb.1:
mov x0, x21
mov x1, x19
bl _fz_ensure_buffer
ldr w8, [x19]
LBB0_2:
ldr w9, [x19, #16]
add w0, w8, w9
add x1, sp, #14
mov x2, x20
bl _memcpy
ldr w8, [x19]
add w8, w8, w20
str w8, [x19]
str xzr, [x19, #8]
ldr x8, [sp, #24]
Lloh3:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh4:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh5:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_4
; %bb.3:
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #80
ret
LBB0_4:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/sumatrapdf/mupdf/source/fitz/extr_buffer.c_fz_append_rune.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _error_exit ## -- Begin function error_exit
.p2align 4, 0x90
_error_exit: ## @error_exit
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $208, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %r14
movq %rsi, -216(%rbp)
movq %rdx, -208(%rbp)
movq %rcx, -200(%rbp)
movq %r8, -192(%rbp)
movq %r9, -184(%rbp)
testb %al, %al
je LBB0_14
## %bb.13:
movaps %xmm0, -176(%rbp)
movaps %xmm1, -160(%rbp)
movaps %xmm2, -144(%rbp)
movaps %xmm3, -128(%rbp)
movaps %xmm4, -112(%rbp)
movaps %xmm5, -96(%rbp)
movaps %xmm6, -80(%rbp)
movaps %xmm7, -64(%rbp)
LBB0_14:
movq ___stderrp@GOTPCREL(%rip), %rbx
movq (%rbx), %rdi
movl _g_line_number(%rip), %ecx
leaq L_.str.58(%rip), %rsi
leaq _g_input_filename(%rip), %rdx
xorl %eax, %eax
callq _fprintf
movabsq $206158430216, %rax ## imm = 0x3000000008
movq %rax, -48(%rbp)
leaq 16(%rbp), %rax
movq %rax, -40(%rbp)
leaq -224(%rbp), %rax
movq %rax, -32(%rbp)
movq (%rbx), %rdi
leaq -48(%rbp), %rdx
movq %r14, %rsi
callq _vfprintf
movq (%rbx), %rsi
movl $10, %edi
callq _fputc
movq _g_prototype_file(%rip), %rdi
testq %rdi, %rdi
je LBB0_2
## %bb.1:
callq _fclose
LBB0_2:
movq _g_table_file(%rip), %rdi
testq %rdi, %rdi
je LBB0_4
## %bb.3:
callq _fclose
LBB0_4:
movq _g_ops_ac_file(%rip), %rdi
testq %rdi, %rdi
je LBB0_6
## %bb.5:
callq _fclose
LBB0_6:
movq _g_ops_dm_file(%rip), %rdi
testq %rdi, %rdi
je LBB0_8
## %bb.7:
callq _fclose
LBB0_8:
movq _g_ops_nz_file(%rip), %rdi
testq %rdi, %rdi
je LBB0_10
## %bb.9:
callq _fclose
LBB0_10:
movq _g_input_file(%rip), %rdi
testq %rdi, %rdi
je LBB0_12
## %bb.11:
callq _fclose
LBB0_12:
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.globl _perror_exit ## -- Begin function perror_exit
.p2align 4, 0x90
_perror_exit: ## @perror_exit
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $208, %rsp
movq %rdi, %r10
movq %rsi, -200(%rbp)
movq %rdx, -192(%rbp)
movq %rcx, -184(%rbp)
movq %r8, -176(%rbp)
movq %r9, -168(%rbp)
testb %al, %al
je LBB1_14
## %bb.13:
movaps %xmm0, -160(%rbp)
movaps %xmm1, -144(%rbp)
movaps %xmm2, -128(%rbp)
movaps %xmm3, -112(%rbp)
movaps %xmm4, -96(%rbp)
movaps %xmm5, -80(%rbp)
movaps %xmm6, -64(%rbp)
movaps %xmm7, -48(%rbp)
LBB1_14:
leaq -208(%rbp), %rax
movq %rax, -16(%rbp)
leaq 16(%rbp), %rax
movq %rax, -24(%rbp)
movabsq $206158430216, %rax ## imm = 0x3000000008
movq %rax, -32(%rbp)
movq ___stderrp@GOTPCREL(%rip), %rax
movq (%rax), %rdi
leaq -32(%rbp), %rdx
movq %r10, %rsi
callq _vfprintf
leaq L_.str.1(%rip), %rdi
callq _perror
movq _g_prototype_file(%rip), %rdi
testq %rdi, %rdi
je LBB1_2
## %bb.1:
callq _fclose
LBB1_2:
movq _g_table_file(%rip), %rdi
testq %rdi, %rdi
je LBB1_4
## %bb.3:
callq _fclose
LBB1_4:
movq _g_ops_ac_file(%rip), %rdi
testq %rdi, %rdi
je LBB1_6
## %bb.5:
callq _fclose
LBB1_6:
movq _g_ops_dm_file(%rip), %rdi
testq %rdi, %rdi
je LBB1_8
## %bb.7:
callq _fclose
LBB1_8:
movq _g_ops_nz_file(%rip), %rdi
testq %rdi, %rdi
je LBB1_10
## %bb.9:
callq _fclose
LBB1_10:
movq _g_input_file(%rip), %rdi
testq %rdi, %rdi
je LBB1_12
## %bb.11:
callq _fclose
LBB1_12:
movl $1, %edi
callq _exit
.cfi_endproc
## -- End function
.globl _check_strsncpy ## -- Begin function check_strsncpy
.p2align 4, 0x90
_check_strsncpy: ## @check_strsncpy
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movslq %edx, %rcx
xorl %eax, %eax
.p2align 4, 0x90
LBB2_1: ## =>This Inner Loop Header: Depth=1
movzbl (%rsi,%rax), %edx
testb $-33, %dl
je LBB2_4
## %bb.2: ## in Loop: Header=BB2_1 Depth=1
movb %dl, (%rdi,%rax)
incq %rax
cmpq %rcx, %rax
jle LBB2_1
## %bb.3:
leaq L_.str.60(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB2_4:
movb $0, (%rdi,%rax)
## kill: def $eax killed $eax killed $rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _check_strcncpy ## -- Begin function check_strcncpy
.p2align 4, 0x90
_check_strcncpy: ## @check_strcncpy
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movslq %ecx, %r8
xorl %eax, %eax
.p2align 4, 0x90
LBB3_1: ## =>This Inner Loop Header: Depth=1
movzbl (%rsi,%rax), %ecx
testb %cl, %cl
je LBB3_5
## %bb.2: ## in Loop: Header=BB3_1 Depth=1
cmpb %dl, %cl
je LBB3_5
## %bb.3: ## in Loop: Header=BB3_1 Depth=1
movb %cl, (%rdi,%rax)
incq %rax
cmpq %r8, %rax
jle LBB3_1
## %bb.4:
leaq L_.str.60(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB3_5:
movb $0, (%rdi,%rax)
## kill: def $eax killed $eax killed $rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _check_atoi ## -- Begin function check_atoi
.p2align 4, 0x90
_check_atoi: ## @check_atoi
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movb (%rdi), %cl
leal -48(%rcx), %eax
xorl %r8d, %r8d
cmpb $9, %al
ja LBB4_1
## %bb.2:
xorl %r8d, %r8d
movq %rdi, %rax
.p2align 4, 0x90
LBB4_3: ## =>This Inner Loop Header: Depth=1
movzbl %cl, %ecx
leal (%r8,%r8,4), %edx
leal (%rcx,%rdx,2), %r8d
addl $-48, %r8d
movzbl 1(%rax), %ecx
incq %rax
leal -48(%rcx), %edx
cmpb $10, %dl
jb LBB4_3
jmp LBB4_4
LBB4_1:
movq %rdi, %rax
LBB4_4:
testb $-33, %cl
jne LBB4_6
## %bb.5:
movl %r8d, (%rsi)
subl %edi, %eax
## kill: def $eax killed $eax killed $rax
popq %rbp
retq
LBB4_6:
movsbl %cl, %esi
leaq L_.str.61(%rip), %rdi
xorl %eax, %eax
callq _error_exit
.cfi_endproc
## -- End function
.globl _skip_spaces ## -- Begin function skip_spaces
.p2align 4, 0x90
_skip_spaces: ## @skip_spaces
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $-1, %eax
.p2align 4, 0x90
LBB5_1: ## =>This Inner Loop Header: Depth=1
incl %eax
cmpb $32, (%rdi)
leaq 1(%rdi), %rdi
je LBB5_1
## %bb.2:
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _num_bits ## -- Begin function num_bits
.p2align 4, 0x90
_num_bits: ## @num_bits
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl %edi, %eax
shrl %eax
andl $21845, %eax ## imm = 0x5555
andl $21845, %edi ## imm = 0x5555
addl %eax, %edi
movl %edi, %eax
shrl $2, %eax
andl $13107, %eax ## imm = 0x3333
andl $13107, %edi ## imm = 0x3333
addl %eax, %edi
movl %edi, %eax
shrl $4, %eax
andl $1799, %eax ## imm = 0x707
andl $1799, %edi ## imm = 0x707
addl %eax, %edi
movzbl %dil, %eax
shrl $8, %edi
addl %edi, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _atoh ## -- Begin function atoh
.p2align 4, 0x90
_atoh: ## @atoh
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
jmp LBB7_1
.p2align 4, 0x90
LBB7_3: ## in Loop: Header=BB7_1 Depth=1
shll $4, %eax
addl %edx, %eax
addl %ecx, %eax
incq %rdi
LBB7_1: ## =>This Inner Loop Header: Depth=1
movsbl (%rdi), %ecx
leal -48(%rcx), %esi
movl $-48, %edx
cmpb $10, %sil
jb LBB7_3
## %bb.2: ## in Loop: Header=BB7_1 Depth=1
leal -97(%rcx), %esi
movl $-87, %edx
cmpb $5, %sil
jbe LBB7_3
## %bb.4:
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _fgetline ## -- Begin function fgetline
.p2align 4, 0x90
_fgetline: ## @fgetline
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movl %esi, %ebx
movq %rdi, %r14
callq _fgets
testq %rax, %rax
je LBB8_1
## %bb.2:
cmpb $13, (%r14)
jne LBB8_4
## %bb.3:
leaq 1(%r14), %rsi
decl %ebx
movslq %ebx, %rdx
movq %r14, %rdi
callq _memcpy
LBB8_4:
movq %r14, %rdi
callq _strlen
testl %eax, %eax
je LBB8_9
## %bb.5:
movslq %eax, %rcx
addq %r14, %rcx
decq %rcx
jmp LBB8_6
.p2align 4, 0x90
LBB8_8: ## in Loop: Header=BB8_6 Depth=1
decq %rcx
decl %eax
je LBB8_9
LBB8_6: ## =>This Inner Loop Header: Depth=1
movzbl (%rcx), %edx
cmpb $13, %dl
je LBB8_8
## %bb.7: ## in Loop: Header=BB8_6 Depth=1
cmpb $10, %dl
je LBB8_8
jmp LBB8_10
LBB8_9:
xorl %eax, %eax
LBB8_10:
movslq %eax, %rcx
movb $0, (%r14,%rcx)
incl _g_line_number(%rip)
jmp LBB8_11
LBB8_1:
movl $-1, %eax
LBB8_11:
## kill: def $eax killed $eax killed $rax
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _get_oper_cycles ## -- Begin function get_oper_cycles
.p2align 4, 0x90
_get_oper_cycles: ## @get_oper_cycles
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movslq %edx, %r12
xorl %eax, %eax
cmpb $46, 60(%rdi,%r12)
je LBB9_31
## %bb.1:
movl %esi, %r14d
movq %rdi, %rbx
movzbl 30(%rdi), %eax
leaq _g_size_select_table(%rip), %rcx
movslq (%rcx,%rax,4), %r15
cmpl $1, %edx
jg LBB9_29
## %bb.2:
jne LBB9_8
## %bb.3:
leaq L_.str.62(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB9_4
## %bb.6:
leaq L_.str.63(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
jne LBB9_21
## %bb.7:
movzbl 64(%rbx,%r12), %eax
movslq %r14d, %rcx
leaq (%rcx,%rcx,2), %rcx
leaq _g_clr_cycle_table(%rip), %rdx
leaq (%rdx,%rcx,4), %rcx
jmp LBB9_30
LBB9_8:
cmpl $12, %r14d
jne LBB9_21
## %bb.9:
testl %edx, %edx
jne LBB9_21
## %bb.10:
leaq L_.str.64(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
jne LBB9_12
## %bb.11:
leaq 31(%rbx), %rdi
leaq L_.str.65(%rip), %rsi
callq _strcmp
testl %eax, %eax
je LBB9_20
LBB9_12:
leaq L_.str.66(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB9_20
## %bb.13:
leaq L_.str.67(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
jne LBB9_15
## %bb.14:
leaq 31(%rbx), %rdi
leaq L_.str.65(%rip), %rsi
callq _strcmp
testl %eax, %eax
je LBB9_20
LBB9_15:
leaq L_.str.68(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
jne LBB9_17
## %bb.16:
leaq 31(%rbx), %rdi
leaq L_.str.65(%rip), %rsi
callq _strcmp
testl %eax, %eax
je LBB9_20
LBB9_17:
leaq L_.str.69(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
jne LBB9_19
## %bb.18:
leaq 31(%rbx), %rdi
leaq L_.str.65(%rip), %rsi
callq _strcmp
testl %eax, %eax
je LBB9_20
LBB9_19:
leaq L_.str.70(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB9_20
LBB9_21:
leaq L_.str.71(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB9_22
## %bb.23:
leaq L_.str.72(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB9_24
## %bb.25:
leaq L_.str.73(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB9_26
## %bb.27:
leaq L_.str.74(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB9_28
LBB9_29:
movzbl 64(%rbx,%r12), %eax
movslq %r14d, %rcx
leaq (%rcx,%rcx,8), %rcx
leaq _g_ea_cycle_table(%rip), %rdx
leaq (%rdx,%rcx,4), %rcx
leaq (%r12,%r12,2), %rdx
leaq (%rcx,%rdx,4), %rcx
LBB9_30:
addl (%rcx,%r15,4), %eax
LBB9_31:
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
LBB9_20:
movzbl 64(%rbx,%r12), %eax
leaq (%r12,%r12,2), %rcx
leaq _g_ea_cycle_table(%rip), %rdx
leaq (%rdx,%rcx,4), %rcx
movl 432(%rcx,%r15,4), %ecx
addl %ecx, %eax
addl $2, %eax
jmp LBB9_31
LBB9_4:
movzbl 64(%rbx,%r12), %eax
movslq %r14d, %rcx
leaq (%rcx,%rcx,2), %rcx
leaq _g_moves_cycle_table(%rip), %rdx
leaq (%rdx,%rcx,4), %rcx
jmp LBB9_30
LBB9_22:
movzbl 64(%rbx,%r12), %eax
movslq %r14d, %rcx
leaq _g_jmp_cycle_table(%rip), %rdx
addl (%rdx,%rcx,4), %eax
jmp LBB9_31
LBB9_24:
movzbl 64(%rbx,%r12), %eax
movslq %r14d, %rcx
leaq _g_jsr_cycle_table(%rip), %rdx
addl (%rdx,%rcx,4), %eax
jmp LBB9_31
LBB9_26:
movzbl 64(%rbx,%r12), %eax
movslq %r14d, %rcx
leaq _g_lea_cycle_table(%rip), %rdx
addl (%rdx,%rcx,4), %eax
jmp LBB9_31
LBB9_28:
movzbl 64(%rbx,%r12), %eax
movslq %r14d, %rcx
leaq _g_pea_cycle_table(%rip), %rdx
addl (%rdx,%rcx,4), %eax
jmp LBB9_31
.cfi_endproc
## -- End function
.globl _find_opcode ## -- Begin function find_opcode
.p2align 4, 0x90
_find_opcode: ## @find_opcode
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, -48(%rbp) ## 8-byte Spill
movq %rdx, %r15
movl %esi, %r12d
movq %rdi, %r13
movq _g_opcode_input_table@GOTPCREL(%rip), %rbx
addq $35, %rbx
jmp LBB10_1
.p2align 4, 0x90
LBB10_5: ## in Loop: Header=BB10_1 Depth=1
addq $68, %rbx
LBB10_1: ## =>This Inner Loop Header: Depth=1
leaq -35(%rbx), %r14
movq %r13, %rdi
movq %r14, %rsi
callq _strcmp
testl %eax, %eax
jne LBB10_5
## %bb.2: ## in Loop: Header=BB10_1 Depth=1
movzbl -5(%rbx), %eax
cmpl %r12d, %eax
jne LBB10_5
## %bb.3: ## in Loop: Header=BB10_1 Depth=1
leaq -4(%rbx), %rsi
movq %r15, %rdi
callq _strcmp
testl %eax, %eax
jne LBB10_5
## %bb.4: ## in Loop: Header=BB10_1 Depth=1
movq -48(%rbp), %rdi ## 8-byte Reload
movq %rbx, %rsi
callq _strcmp
testl %eax, %eax
jne LBB10_5
## %bb.6:
movq %r14, %rax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _find_illegal_opcode ## -- Begin function find_illegal_opcode
.p2align 4, 0x90
_find_illegal_opcode: ## @find_illegal_opcode
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq _g_opcode_input_table@GOTPCREL(%rip), %rbx
leaq L_.str.75(%rip), %r14
.p2align 4, 0x90
LBB11_1: ## =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movq %r14, %rsi
callq _strcmp
addq $68, %rbx
testl %eax, %eax
jne LBB11_1
## %bb.2:
addq $-68, %rbx
movq %rbx, %rax
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _extract_opcode_info ## -- Begin function extract_opcode_info
.p2align 4, 0x90
_extract_opcode_info: ## @extract_opcode_info
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %r8, %rbx
movq %rcx, %r15
movq %rdx, -56(%rbp) ## 8-byte Spill
movq %rsi, %r14
leaq L_.str.76(%rip), %rsi
callq _strstr
xorl %r13d, %r13d
testq %rax, %rax
je LBB12_100
## %bb.1:
movq %rax, %r12
movb 12(%rax), %cl
movq %r14, %rax
testb %cl, %cl
je LBB12_21
## %bb.2:
movq %r14, %rax
cmpb $44, %cl
je LBB12_21
## %bb.3:
leaq 1(%r14), %rax
movb %cl, (%r14)
movb 13(%r12), %cl
testb %cl, %cl
je LBB12_21
## %bb.4:
cmpb $44, %cl
je LBB12_21
## %bb.5:
leaq 2(%r14), %rax
movb %cl, 1(%r14)
movb 14(%r12), %cl
testb %cl, %cl
je LBB12_21
## %bb.6:
cmpb $44, %cl
je LBB12_21
## %bb.7:
leaq 3(%r14), %rax
movb %cl, 2(%r14)
movb 15(%r12), %cl
testb %cl, %cl
je LBB12_21
## %bb.8:
cmpb $44, %cl
je LBB12_21
## %bb.9:
leaq 4(%r14), %rax
movb %cl, 3(%r14)
movb 16(%r12), %cl
testb %cl, %cl
je LBB12_21
## %bb.10:
cmpb $44, %cl
je LBB12_21
## %bb.11:
leaq 5(%r14), %rax
movb %cl, 4(%r14)
movb 17(%r12), %cl
testb %cl, %cl
je LBB12_21
## %bb.12:
cmpb $44, %cl
je LBB12_21
## %bb.13:
leaq 6(%r14), %rax
movb %cl, 5(%r14)
movb 18(%r12), %cl
testb %cl, %cl
je LBB12_21
## %bb.14:
cmpb $44, %cl
je LBB12_21
## %bb.15:
leaq 7(%r14), %rax
movb %cl, 6(%r14)
movb 19(%r12), %cl
testb %cl, %cl
je LBB12_21
## %bb.16:
cmpb $44, %cl
je LBB12_21
## %bb.17:
leaq 8(%r14), %rax
movb %cl, 7(%r14)
movb 20(%r12), %cl
testb %cl, %cl
je LBB12_21
## %bb.18:
cmpb $44, %cl
je LBB12_21
## %bb.19:
movq %rbx, %rdx
leaq 9(%r14), %rax
movb %cl, 8(%r14)
movb 21(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.38:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.39:
leaq 10(%r14), %rax
movb %cl, 9(%r14)
movb 22(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.40:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.41:
leaq 11(%r14), %rax
movb %cl, 10(%r14)
movb 23(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.42:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.43:
leaq 12(%r14), %rax
movb %cl, 11(%r14)
movb 24(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.44:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.45:
leaq 13(%r14), %rax
movb %cl, 12(%r14)
movb 25(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.46:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.47:
leaq 14(%r14), %rax
movb %cl, 13(%r14)
movb 26(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.48:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.49:
leaq 15(%r14), %rax
movb %cl, 14(%r14)
movb 27(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.50:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.51:
leaq 16(%r14), %rax
movb %cl, 15(%r14)
movb 28(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.52:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.53:
leaq 17(%r14), %rax
movb %cl, 16(%r14)
movb 29(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.54:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.55:
leaq 18(%r14), %rax
movb %cl, 17(%r14)
movb 30(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.56:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.57:
leaq 19(%r14), %rax
movb %cl, 18(%r14)
movb 31(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.58:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.59:
leaq 20(%r14), %rax
movb %cl, 19(%r14)
movb 32(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.60:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.61:
leaq 21(%r14), %rax
movb %cl, 20(%r14)
movb 33(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.62:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.63:
leaq 22(%r14), %rax
movb %cl, 21(%r14)
movb 34(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.64:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.65:
leaq 23(%r14), %rax
movb %cl, 22(%r14)
movb 35(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.66:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.67:
leaq 24(%r14), %rax
movb %cl, 23(%r14)
movb 36(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.68:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.69:
leaq 25(%r14), %rax
movb %cl, 24(%r14)
movb 37(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.70:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.71:
leaq 26(%r14), %rax
movb %cl, 25(%r14)
movb 38(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.72:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.73:
leaq 27(%r14), %rax
movb %cl, 26(%r14)
movb 39(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.74:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.75:
leaq 28(%r14), %rax
movb %cl, 27(%r14)
movb 40(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.76:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.77:
leaq 29(%r14), %rax
movb %cl, 28(%r14)
movb 41(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.78:
cmpb $44, %cl
movq %rdx, %rbx
je LBB12_21
## %bb.79:
leaq 30(%r14), %rax
movb %cl, 29(%r14)
movb 42(%r12), %cl
testb %cl, %cl
je LBB12_20
## %bb.80:
cmpb $44, %cl
jne LBB12_81
LBB12_20:
movq %rdx, %rbx
LBB12_21:
addq $12, %r12
movb $0, (%rax)
subl %r14d, %eax
cltq
cmpb $44, (%r12,%rax)
jne LBB12_100
## %bb.22:
movq %rbx, -48(%rbp) ## 8-byte Spill
addq %rax, %r12
movabsq $-4294967296, %rbx ## imm = 0xFFFFFFFF00000000
movabsq $4294967296, %r14 ## imm = 0x100000000
incq %r12
movq %rbx, %rax
movq %r12, %rcx
.p2align 4, 0x90
LBB12_23: ## =>This Inner Loop Header: Depth=1
addq %r14, %rax
cmpb $32, (%rcx)
leaq 1(%rcx), %rcx
je LBB12_23
## %bb.24:
sarq $32, %rax
addq %rax, %r12
movq %r12, %rdi
callq _atoi
movq -56(%rbp), %rcx ## 8-byte Reload
movl %eax, (%rcx)
movq %r12, %rdi
movl $44, %esi
callq _strchr
testq %rax, %rax
je LBB12_100
## %bb.25:
incq %rax
movq %rbx, %rdx
movq %rax, %rcx
.p2align 4, 0x90
LBB12_26: ## =>This Inner Loop Header: Depth=1
addq %r14, %rdx
cmpb $32, (%rcx)
leaq 1(%rcx), %rcx
je LBB12_26
## %bb.27:
sarq $32, %rdx
leaq (%rax,%rdx), %rcx
movb (%rax,%rdx), %dl
movq %r15, %rax
testb %dl, %dl
je LBB12_83
## %bb.28:
movq %r15, %rax
cmpb $44, %dl
je LBB12_83
## %bb.29:
leaq 1(%r15), %rax
movb %dl, (%r15)
movb 1(%rcx), %dl
testb %dl, %dl
je LBB12_83
## %bb.30:
cmpb $44, %dl
je LBB12_83
## %bb.31:
leaq 2(%r15), %rax
movb %dl, 1(%r15)
movb 2(%rcx), %dl
testb %dl, %dl
je LBB12_83
## %bb.32:
cmpb $44, %dl
je LBB12_83
## %bb.33:
leaq 3(%r15), %rax
movb %dl, 2(%r15)
movb 3(%rcx), %dl
testb %dl, %dl
je LBB12_83
## %bb.34:
cmpb $44, %dl
je LBB12_83
## %bb.35:
leaq 4(%r15), %rax
movb %dl, 3(%r15)
movb 4(%rcx), %dl
testb %dl, %dl
je LBB12_83
## %bb.36:
cmpb $44, %dl
jne LBB12_37
LBB12_83:
movb $0, (%rax)
subl %r15d, %eax
cltq
cmpb $44, (%rcx,%rax)
jne LBB12_100
## %bb.84:
addq %rax, %rcx
incq %rcx
movq %rcx, %rax
.p2align 4, 0x90
LBB12_85: ## =>This Inner Loop Header: Depth=1
addq %r14, %rbx
cmpb $32, (%rax)
leaq 1(%rax), %rax
je LBB12_85
## %bb.86:
sarq $32, %rbx
leaq (%rcx,%rbx), %rax
movb (%rcx,%rbx), %dl
movq -48(%rbp), %rsi ## 8-byte Reload
movq %rsi, %rcx
testb %dl, %dl
je LBB12_99
## %bb.87:
movq %rsi, %rcx
cmpb $41, %dl
je LBB12_99
## %bb.88:
leaq 1(%rsi), %rcx
movb %dl, (%rsi)
movb 1(%rax), %dl
testb %dl, %dl
je LBB12_99
## %bb.89:
cmpb $41, %dl
je LBB12_99
## %bb.90:
leaq 2(%rsi), %rcx
movb %dl, 1(%rsi)
movb 2(%rax), %dl
testb %dl, %dl
je LBB12_99
## %bb.91:
cmpb $41, %dl
je LBB12_99
## %bb.92:
leaq 3(%rsi), %rcx
movb %dl, 2(%rsi)
movb 3(%rax), %dl
testb %dl, %dl
je LBB12_99
## %bb.93:
cmpb $41, %dl
je LBB12_99
## %bb.94:
leaq 4(%rsi), %rcx
movb %dl, 3(%rsi)
movb 4(%rax), %dl
testb %dl, %dl
je LBB12_99
## %bb.95:
cmpb $41, %dl
je LBB12_99
## %bb.96:
leaq 5(%rsi), %rcx
movb %dl, 4(%rsi)
movb 5(%rax), %dl
testb %dl, %dl
je LBB12_99
## %bb.97:
cmpb $41, %dl
jne LBB12_98
LBB12_99:
movb $0, (%rcx)
subl %esi, %ecx
movslq %ecx, %rcx
xorl %r13d, %r13d
cmpb $41, (%rax,%rcx)
sete %r13b
LBB12_100:
movl %r13d, %eax
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB12_37:
movb %dl, (%rax)
LBB12_82:
leaq L_.str.60(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB12_98:
movb %dl, (%rcx)
jmp LBB12_82
LBB12_81:
movb %cl, (%rax)
jmp LBB12_82
.cfi_endproc
## -- End function
.globl _add_replace_string ## -- Begin function add_replace_string
.p2align 4, 0x90
_add_replace_string: ## @add_replace_string
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movslq 12060(%rdi), %rax
cmpq $30, %rax
jge LBB13_1
## %bb.2:
movq %rdx, %r14
movq %rdi, %rbx
imulq $402, %rax, %rdi ## imm = 0x192
addq %rbx, %rdi
callq _strcpy
movslq 12060(%rbx), %rax
leal 1(%rax), %ecx
movl %ecx, 12060(%rbx)
imulq $402, %rax, %rax ## imm = 0x192
leaq (%rbx,%rax), %rdi
addq $201, %rdi
movq %r14, %rsi
popq %rbx
popq %r14
popq %rbp
jmp _strcpy ## TAILCALL
LBB13_1:
leaq L_.str.78(%rip), %rdi
xorl %eax, %eax
callq _error_exit
.cfi_endproc
## -- End function
.globl _write_body ## -- Begin function write_body
.p2align 4, 0x90
_write_body: ## @write_body
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $472, %rsp ## imm = 0x1D8
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, -480(%rbp) ## 8-byte Spill
movq %rdi, -488(%rbp) ## 8-byte Spill
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
cmpl $0, 60300(%rsi)
jle LBB14_10
## %bb.1:
movq %rsi, %r12
leaq -256(%rbp), %r15
xorl %ebx, %ebx
movq %rsi, -496(%rbp) ## 8-byte Spill
jmp LBB14_2
.p2align 4, 0x90
LBB14_8: ## in Loop: Header=BB14_2 Depth=1
cmpl $0, -468(%rbp) ## 4-byte Folded Reload
movq -496(%rbp), %r12 ## 8-byte Reload
movq -504(%rbp), %rbx ## 8-byte Reload
je LBB14_12
LBB14_9: ## in Loop: Header=BB14_2 Depth=1
movq -488(%rbp), %rdi ## 8-byte Reload
leaq L_.str.81(%rip), %rsi
movq %r15, %rdx
xorl %eax, %eax
callq _fprintf
incq %rbx
movslq 60300(%r12), %rax
cmpq %rax, %rbx
jge LBB14_10
LBB14_2: ## =>This Loop Header: Depth=1
## Child Loop BB14_5 Depth 2
imulq $201, %rbx, %rsi
addq %r12, %rsi
movl $201, %edx
movq %r15, %rdi
callq ___strcpy_chk
movq %r15, %rdi
leaq L_.str.79(%rip), %rsi
callq _strstr
testq %rax, %rax
je LBB14_9
## %bb.3: ## in Loop: Header=BB14_2 Depth=1
movq -480(%rbp), %rax ## 8-byte Reload
movl 12060(%rax), %r14d
testl %r14d, %r14d
jle LBB14_12
## %bb.4: ## in Loop: Header=BB14_2 Depth=1
movq %rbx, -504(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
movq -480(%rbp), %rbx ## 8-byte Reload
movl $0, -468(%rbp) ## 4-byte Folded Spill
jmp LBB14_5
.p2align 4, 0x90
LBB14_7: ## in Loop: Header=BB14_5 Depth=2
incq %r12
movslq %r14d, %rax
addq $402, %rbx ## imm = 0x192
cmpq %rax, %r12
movq %r13, %r15
jge LBB14_8
LBB14_5: ## Parent Loop BB14_2 Depth=1
## => This Inner Loop Header: Depth=2
movq %r15, %r13
movq %r15, %rdi
movq %rbx, %rsi
callq _strstr
testq %rax, %rax
je LBB14_7
## %bb.6: ## in Loop: Header=BB14_5 Depth=2
movq %rax, %r15
movq %rbx, %rdi
callq _strlen
movq %r15, %rsi
addq %rax, %rsi
movl $201, %edx
leaq -464(%rbp), %r14
movq %r14, %rdi
callq ___strcpy_chk
leaq 201(%rbx), %rsi
movq %r15, %rdi
callq _strcpy
movq %r15, %rdi
movq %r14, %rsi
callq _strcat
movq -480(%rbp), %rax ## 8-byte Reload
movl 12060(%rax), %r14d
movl $1, -468(%rbp) ## 4-byte Folded Spill
jmp LBB14_7
LBB14_10:
leaq L_.str.82(%rip), %rdi
movl $2, %esi
movl $1, %edx
movq -488(%rbp), %rcx ## 8-byte Reload
callq _fwrite
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB14_13
## %bb.11:
addq $472, %rsp ## imm = 0x1D8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB14_12:
leaq L_.str.80(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB14_13:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.globl _get_base_name ## -- Begin function get_base_name
.p2align 4, 0x90
_get_base_name: ## @get_base_name
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %rbx
movq %rdi, %r14
leaq L_.str.83(%rip), %rsi
movq %rbx, %rdx
xorl %eax, %eax
callq _sprintf
movzbl 30(%rbx), %r15d
testl %r15d, %r15d
je LBB15_2
## %bb.1:
movq %r14, %rdi
callq _strlen
leaq (%rax,%r14), %rdi
leaq L_.str.84(%rip), %rsi
movl %r15d, %edx
xorl %eax, %eax
callq _sprintf
LBB15_2:
leaq 31(%rbx), %r15
leaq L_.str.85(%rip), %rsi
movq %r15, %rdi
callq _strcmp
testl %eax, %eax
je LBB15_4
## %bb.3:
movq %r14, %rdi
callq _strlen
leaq (%rax,%r14), %rdi
leaq L_.str.86(%rip), %rsi
movq %r15, %rdx
xorl %eax, %eax
callq _sprintf
LBB15_4:
addq $35, %rbx
leaq L_.str.85(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB15_5
## %bb.6:
movq %r14, %rdi
callq _strlen
addq %rax, %r14
leaq L_.str.86(%rip), %rsi
movq %r14, %rdi
movq %rbx, %rdx
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _sprintf ## TAILCALL
LBB15_5:
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _write_prototype ## -- Begin function write_prototype
.p2align 4, 0x90
_write_prototype: ## @write_prototype
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rsi, %rdx
leaq L_.str.87(%rip), %rsi
xorl %eax, %eax
popq %rbp
jmp _fprintf ## TAILCALL
.cfi_endproc
## -- End function
.globl _write_function_name ## -- Begin function write_function_name
.p2align 4, 0x90
_write_function_name: ## @write_function_name
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rsi, %rdx
leaq L_.str.88(%rip), %rsi
xorl %eax, %eax
popq %rbp
jmp _fprintf ## TAILCALL
.cfi_endproc
## -- End function
.globl _add_opcode_output_table_entry ## -- Begin function add_opcode_output_table_entry
.p2align 4, 0x90
_add_opcode_output_table_entry: ## @add_opcode_output_table_entry
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movslq _g_opcode_output_table_length(%rip), %rax
cmpq $3001, %rax ## imm = 0xBB9
jge LBB18_2
## %bb.1:
leal 1(%rax), %ecx
movl %ecx, _g_opcode_output_table_length(%rip)
movq %rax, %rcx
shlq $6, %rcx
leaq (%rcx,%rax,4), %rbx
movq _g_opcode_output_table@GOTPCREL(%rip), %r14
leaq (%r14,%rbx), %rax
movl 64(%rdi), %ecx
movl %ecx, 64(%r14,%rbx)
movups (%rdi), %xmm0
movups 16(%rdi), %xmm1
movups 32(%rdi), %xmm2
movups 48(%rdi), %xmm3
movups %xmm3, 48(%r14,%rbx)
movups %xmm2, 32(%r14,%rbx)
movups %xmm1, 16(%r14,%rbx)
movups %xmm0, (%r14,%rbx)
movl $30, %edx
movq %rax, %rdi
callq ___strcpy_chk
movzwl 42(%r14,%rbx), %eax
movl %eax, %ecx
shrl %ecx
andl $21845, %ecx ## imm = 0x5555
andl $21845, %eax ## imm = 0x5555
addl %ecx, %eax
movl %eax, %ecx
shrl $2, %ecx
andl $13107, %ecx ## imm = 0x3333
andl $13107, %eax ## imm = 0x3333
addl %ecx, %eax
movl %eax, %ecx
shrl $4, %ecx
andl $1799, %ecx ## imm = 0x707
andl $1799, %eax ## imm = 0x707
addl %ecx, %eax
movl %eax, %ecx
shrl $8, %ecx
addl %eax, %ecx
movb %cl, 40(%r14,%rbx)
popq %rbx
popq %r14
popq %rbp
retq
LBB18_2:
leaq L_.str.89(%rip), %rdi
xorl %eax, %eax
callq _error_exit
.cfi_endproc
## -- End function
.globl _print_opcode_output_table ## -- Begin function print_opcode_output_table
.p2align 4, 0x90
_print_opcode_output_table: ## @print_opcode_output_table
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r14
movslq _g_opcode_output_table_length(%rip), %rsi
movq _g_opcode_output_table@GOTPCREL(%rip), %rbx
leaq _compare_nof_true_bits(%rip), %rcx
movl $68, %edx
movq %rbx, %rdi
callq _qsort
cmpl $0, _g_opcode_output_table_length(%rip)
jle LBB19_3
## %bb.1:
leaq L_.str.91(%rip), %r12
leaq L_.str.92(%rip), %r13
xorl %r15d, %r15d
.p2align 4, 0x90
LBB19_2: ## =>This Inner Loop Header: Depth=1
movzwl 42(%rbx), %ecx
movzwl 44(%rbx), %r8d
movq %r14, %rdi
leaq L_.str.90(%rip), %rsi
movq %rbx, %rdx
xorl %eax, %eax
callq _fprintf
movzbl 64(%rbx), %edx
movq %r14, %rdi
movq %r12, %rsi
xorl %eax, %eax
callq _fprintf
movl $2, %esi
movl $1, %edx
movq %r13, %rdi
movq %r14, %rcx
callq _fwrite
movzbl 65(%rbx), %edx
movq %r14, %rdi
movq %r12, %rsi
xorl %eax, %eax
callq _fprintf
movl $2, %esi
movl $1, %edx
movq %r13, %rdi
movq %r14, %rcx
callq _fwrite
movzbl 66(%rbx), %edx
movq %r14, %rdi
movq %r12, %rsi
xorl %eax, %eax
callq _fprintf
movl $4, %esi
movl $1, %edx
leaq L_.str.93(%rip), %rdi
movq %r14, %rcx
callq _fwrite
incq %r15
movslq _g_opcode_output_table_length(%rip), %rax
addq $68, %rbx
cmpq %rax, %r15
jl LBB19_2
LBB19_3:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function compare_nof_true_bits
_compare_nof_true_bits: ## @compare_nof_true_bits
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movzbl 40(%rdi), %eax
movzbl 40(%rsi), %ecx
cmpb %cl, %al
jne LBB20_3
## %bb.1:
movzwl 42(%rdi), %eax
movzwl 42(%rsi), %ecx
cmpw %cx, %ax
jne LBB20_3
## %bb.2:
movzwl 44(%rdi), %eax
movzwl 44(%rsi), %ecx
LBB20_3:
subl %ecx, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _write_table_entry ## -- Begin function write_table_entry
.p2align 4, 0x90
_write_table_entry: ## @write_table_entry
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %rbx
movq %rdi, %r14
movzwl 42(%rsi), %ecx
movzwl 44(%rsi), %r8d
leaq L_.str.90(%rip), %rsi
movq %rbx, %rdx
xorl %eax, %eax
callq _fprintf
movzbl 64(%rbx), %edx
leaq L_.str.91(%rip), %r15
movq %r14, %rdi
movq %r15, %rsi
xorl %eax, %eax
callq _fprintf
leaq L_.str.92(%rip), %r12
movl $2, %esi
movl $1, %edx
movq %r12, %rdi
movq %r14, %rcx
callq _fwrite
movzbl 65(%rbx), %edx
movq %r14, %rdi
movq %r15, %rsi
xorl %eax, %eax
callq _fprintf
movl $2, %esi
movl $1, %edx
movq %r12, %rdi
movq %r14, %rcx
callq _fwrite
movzbl 66(%rbx), %edx
movq %r14, %rdi
movq %r15, %rsi
xorl %eax, %eax
callq _fprintf
leaq L_.str.93(%rip), %rdi
movl $4, %esi
movl $1, %edx
movq %r14, %rcx
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp _fwrite ## TAILCALL
.cfi_endproc
## -- End function
.globl _set_opcode_struct ## -- Begin function set_opcode_struct
.p2align 4, 0x90
_set_opcode_struct: ## @set_opcode_struct
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r14d
movq %rsi, %rbx
movl 64(%rdi), %eax
movl %eax, 64(%rsi)
movups (%rdi), %xmm0
movups 16(%rdi), %xmm1
movups 32(%rdi), %xmm2
movups 48(%rdi), %xmm3
movups %xmm3, 48(%rsi)
movups %xmm2, 32(%rsi)
movups %xmm1, 16(%rsi)
movups %xmm0, (%rsi)
movq %rsi, %rdi
movl %edx, %esi
xorl %edx, %edx
callq _get_oper_cycles
movb %al, 64(%rbx)
movq %rbx, %rdi
movl %r14d, %esi
movl $1, %edx
callq _get_oper_cycles
movb %al, 65(%rbx)
movq %rbx, %rdi
movl %r14d, %esi
movl $2, %edx
callq _get_oper_cycles
movb %al, 66(%rbx)
leaq 35(%rbx), %r15
leaq L_.str.85(%rip), %rsi
movq %r15, %rdi
callq _strcmp
movslq %r14d, %r12
testl %r14d, %r14d
je LBB22_3
## %bb.1:
testl %eax, %eax
jne LBB22_3
## %bb.2:
leaq (%r12,%r12,2), %rax
leaq _g_ea_info_table(%rip), %rcx
movq (%rcx,%rax,8), %r8
leaq L_.str.94(%rip), %rcx
movl $5, %edx
movq %r15, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
LBB22_3:
leaq (%r12,%r12,2), %rax
leaq _g_ea_info_table(%rip), %rcx
movzwl 16(%rcx,%rax,8), %edx
orw %dx, 42(%rbx)
movzwl 20(%rcx,%rax,8), %eax
orw %ax, 44(%rbx)
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _generate_opcode_handler ## -- Begin function generate_opcode_handler
.p2align 4, 0x90
_generate_opcode_handler: ## @generate_opcode_handler
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $248, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r13d
movq %rcx, %r14
movq %rdx, %r12
movq %rsi, -280(%rbp) ## 8-byte Spill
movq %rdi, -264(%rbp) ## 8-byte Spill
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movl $68, %edi
callq _malloc
movq %rax, %rbx
movq %r14, %rdi
movq %rax, %rsi
movl %r13d, %edx
callq _set_opcode_struct
leaq -256(%rbp), %r14
movq %r14, %rdi
movq %rbx, %rsi
callq _get_base_name
movq _g_prototype_file(%rip), %rdi
leaq L_.str.87(%rip), %rsi
movq %r14, %rdx
xorl %eax, %eax
callq _fprintf
movslq _g_opcode_output_table_length(%rip), %rax
cmpq $3001, %rax ## imm = 0xBB9
jge LBB23_11
## %bb.1:
leal 1(%rax), %ecx
movl %ecx, _g_opcode_output_table_length(%rip)
movq %rax, %rcx
shlq $6, %rcx
leaq (%rcx,%rax,4), %r15
movq _g_opcode_output_table@GOTPCREL(%rip), %r14
leaq (%r14,%r15), %rdi
movl 64(%rbx), %eax
movl %eax, 64(%r14,%r15)
movups (%rbx), %xmm0
movups 16(%rbx), %xmm1
movups 32(%rbx), %xmm2
movq %rbx, -272(%rbp) ## 8-byte Spill
movups 48(%rbx), %xmm3
movups %xmm3, 48(%r14,%r15)
movups %xmm2, 32(%r14,%r15)
movups %xmm1, 16(%r14,%r15)
movups %xmm0, (%r14,%r15)
leaq -256(%rbp), %rsi
movl $30, %edx
callq ___strcpy_chk
movzwl 42(%r14,%r15), %eax
movl %eax, %ecx
shrl %ecx
andl $21845, %ecx ## imm = 0x5555
andl $21845, %eax ## imm = 0x5555
addl %ecx, %eax
movl %eax, %ecx
shrl $2, %ecx
andl $13107, %ecx ## imm = 0x3333
andl $13107, %eax ## imm = 0x3333
addl %ecx, %eax
movl %eax, %ecx
shrl $4, %ecx
andl $1799, %ecx ## imm = 0x707
andl $1799, %eax ## imm = 0x707
addl %ecx, %eax
movl %eax, %ecx
shrl $8, %ecx
addl %eax, %ecx
movb %cl, 40(%r14,%r15)
leaq L_.str.88(%rip), %rsi
movq -264(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rdi
leaq -256(%rbp), %rdx
xorl %eax, %eax
callq _fprintf
testl %r13d, %r13d
je LBB23_9
## %bb.2:
movslq %r13d, %rax
leaq (%rax,%rax,2), %r14
leaq _g_ea_info_table(%rip), %r15
movq 8(%r15,%r14,8), %r8
leaq L_.str.95(%rip), %rcx
leaq -256(%rbp), %rdi
movl $201, %edx
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
movslq 12060(%r12), %rax
cmpq $30, %rax
jge LBB23_12
## %bb.3:
leaq (%r15,%r14,8), %r13
addq $8, %r13
imulq $402, %rax, %rax ## imm = 0x192
movups L_.str.96(%rip), %xmm0
movups %xmm0, (%r12,%rax)
movabsq $15867435649286469, %rcx ## imm = 0x385F59415F4145
movq %rcx, 13(%r12,%rax)
movslq 12060(%r12), %rax
leal 1(%rax), %ecx
movl %ecx, 12060(%r12)
imulq $402, %rax, %rax ## imm = 0x192
leaq (%r12,%rax), %rdi
addq $201, %rdi
leaq -256(%rbp), %r14
movq %r14, %rsi
callq _strcpy
movq (%r13), %r8
leaq L_.str.97(%rip), %rcx
movl $201, %edx
movq %r14, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
movslq 12060(%r12), %rax
cmpq $30, %rax
jge LBB23_12
## %bb.4:
imulq $402, %rax, %rax ## imm = 0x192
movups L_.str.98(%rip), %xmm0
movups %xmm0, (%r12,%rax)
movabsq $15253934331486017, %rcx ## imm = 0x36315F59415F41
movq %rcx, 14(%r12,%rax)
movslq 12060(%r12), %rax
leal 1(%rax), %ecx
movl %ecx, 12060(%r12)
imulq $402, %rax, %rax ## imm = 0x192
leaq (%r12,%rax), %rdi
addq $201, %rdi
leaq -256(%rbp), %r14
movq %r14, %rsi
callq _strcpy
movq (%r13), %r8
leaq L_.str.99(%rip), %rcx
movl $201, %edx
movq %r14, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
movslq 12060(%r12), %rax
cmpq $30, %rax
jge LBB23_12
## %bb.5:
imulq $402, %rax, %rax ## imm = 0x192
movups L_.str.100(%rip), %xmm0
movups %xmm0, (%r12,%rax)
movabsq $14130233447898945, %rcx ## imm = 0x32335F59415F41
movq %rcx, 14(%r12,%rax)
movslq 12060(%r12), %rax
leal 1(%rax), %ecx
movl %ecx, 12060(%r12)
imulq $402, %rax, %rax ## imm = 0x192
leaq (%r12,%rax), %rdi
addq $201, %rdi
leaq -256(%rbp), %r14
movq %r14, %rsi
callq _strcpy
movq (%r13), %r8
leaq L_.str.101(%rip), %rcx
movl $201, %edx
movq %r14, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
movslq 12060(%r12), %rax
cmpq $30, %rax
jge LBB23_12
## %bb.6:
imulq $402, %rax, %rax ## imm = 0x192
movups L_.str.102(%rip), %xmm0
movups %xmm0, (%r12,%rax)
movabsq $15867435649290821, %rcx ## imm = 0x385F59415F5245
movq %rcx, 15(%r12,%rax)
movslq 12060(%r12), %rax
leal 1(%rax), %ecx
movl %ecx, 12060(%r12)
imulq $402, %rax, %rax ## imm = 0x192
leaq (%r12,%rax), %rdi
addq $201, %rdi
leaq -256(%rbp), %r14
movq %r14, %rsi
callq _strcpy
movq (%r13), %r8
leaq L_.str.103(%rip), %rcx
movl $201, %edx
movq %r14, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
movslq 12060(%r12), %rax
cmpq $30, %rax
jge LBB23_12
## %bb.7:
imulq $402, %rax, %rax ## imm = 0x192
movups L_.str.104(%rip), %xmm0
movups %xmm0, (%r12,%rax)
movabsq $15253934331486034, %rcx ## imm = 0x36315F59415F52
movq %rcx, 16(%r12,%rax)
movslq 12060(%r12), %rax
leal 1(%rax), %ecx
movl %ecx, 12060(%r12)
imulq $402, %rax, %rax ## imm = 0x192
leaq (%r12,%rax), %rdi
addq $201, %rdi
leaq -256(%rbp), %r14
movq %r14, %rsi
callq _strcpy
movq (%r13), %r8
leaq L_.str.105(%rip), %rcx
movl $201, %edx
movq %r14, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
movslq 12060(%r12), %rax
cmpq $30, %rax
jge LBB23_12
## %bb.8:
imulq $402, %rax, %rax ## imm = 0x192
movups L_.str.106(%rip), %xmm0
movups %xmm0, (%r12,%rax)
movabsq $14130233447898962, %rcx ## imm = 0x32335F59415F52
movq %rcx, 16(%r12,%rax)
movslq 12060(%r12), %rax
leal 1(%rax), %ecx
movl %ecx, 12060(%r12)
imulq $402, %rax, %rax ## imm = 0x192
leaq (%r12,%rax), %rdi
addq $201, %rdi
leaq -256(%rbp), %rsi
callq _strcpy
movq -264(%rbp), %rbx ## 8-byte Reload
LBB23_9:
movq %rbx, %rdi
movq -280(%rbp), %rsi ## 8-byte Reload
movq %r12, %rdx
callq _write_body
incl _g_num_functions(%rip)
movq -272(%rbp), %rdi ## 8-byte Reload
callq _free
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB23_13
## %bb.10:
addq $248, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB23_12:
leaq L_.str.78(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB23_11:
leaq L_.str.89(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB23_13:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.globl _generate_opcode_ea_variants ## -- Begin function generate_opcode_ea_variants
.p2align 4, 0x90
_generate_opcode_ea_variants: ## @generate_opcode_ea_variants
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r14
movq %rdx, %r13
movq %rsi, %r12
movq %rdi, %r15
movl 12060(%rdx), %eax
movl %eax, -44(%rbp) ## 4-byte Spill
leaq 46(%rcx), %rbx
leaq L_.str.107(%rip), %rsi
movq %rbx, %rdi
callq _strcmp
testl %eax, %eax
je LBB24_24
## %bb.1:
cmpb $65, (%rbx)
jne LBB24_3
## %bb.2:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $1, %r8d
callq _generate_opcode_handler
LBB24_3:
movl -44(%rbp), %ebx ## 4-byte Reload
movl %ebx, 12060(%r13)
cmpb $43, 47(%r14)
jne LBB24_6
## %bb.4:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $2, %r8d
callq _generate_opcode_handler
movl %ebx, 12060(%r13)
cmpb $8, 30(%r14)
jne LBB24_6
## %bb.5:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $3, %r8d
callq _generate_opcode_handler
LBB24_6:
movl %ebx, 12060(%r13)
cmpb $45, 48(%r14)
jne LBB24_9
## %bb.7:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $4, %r8d
callq _generate_opcode_handler
movl %ebx, 12060(%r13)
cmpb $8, 30(%r14)
jne LBB24_9
## %bb.8:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $5, %r8d
callq _generate_opcode_handler
LBB24_9:
movl %ebx, 12060(%r13)
cmpb $68, 49(%r14)
jne LBB24_11
## %bb.10:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $6, %r8d
callq _generate_opcode_handler
LBB24_11:
movl %ebx, 12060(%r13)
cmpb $88, 50(%r14)
jne LBB24_13
## %bb.12:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $7, %r8d
callq _generate_opcode_handler
LBB24_13:
movl %ebx, 12060(%r13)
cmpb $87, 51(%r14)
jne LBB24_15
## %bb.14:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $8, %r8d
callq _generate_opcode_handler
LBB24_15:
movl %ebx, 12060(%r13)
cmpb $76, 52(%r14)
jne LBB24_17
## %bb.16:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $9, %r8d
callq _generate_opcode_handler
LBB24_17:
movl %ebx, 12060(%r13)
cmpb $100, 53(%r14)
jne LBB24_19
## %bb.18:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $10, %r8d
callq _generate_opcode_handler
LBB24_19:
movl %ebx, 12060(%r13)
cmpb $120, 54(%r14)
jne LBB24_21
## %bb.20:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $11, %r8d
callq _generate_opcode_handler
LBB24_21:
movl %ebx, 12060(%r13)
cmpb $73, 55(%r14)
jne LBB24_23
## %bb.22:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
movl $12, %r8d
callq _generate_opcode_handler
LBB24_23:
movl %ebx, 12060(%r13)
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB24_24:
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %r14, %rcx
xorl %r8d, %r8d
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _generate_opcode_handler ## TAILCALL
.cfi_endproc
## -- End function
.globl _generate_opcode_cc_variants ## -- Begin function generate_opcode_cc_variants
.p2align 4, 0x90
_generate_opcode_cc_variants: ## @generate_opcode_cc_variants
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $120, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r14d
movq %rcx, %r15
movq %rdx, %r13
movq %rsi, -152(%rbp) ## 8-byte Spill
movq %rdi, -144(%rbp) ## 8-byte Spill
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movl 12060(%rdx), %eax
movl %eax, -116(%rbp) ## 4-byte Spill
movl $68, %edi
callq _malloc
movups (%r15), %xmm0
movups 16(%r15), %xmm1
movups 32(%r15), %xmm2
movups 48(%r15), %xmm3
movups %xmm2, 32(%rax)
movups %xmm0, (%rax)
movups %xmm1, 16(%rax)
movups %xmm3, 48(%rax)
movl 64(%r15), %ecx
movl %ecx, 64(%rax)
orb $15, 43(%rax)
movslq %r14d, %rcx
movq %rax, -128(%rbp) ## 8-byte Spill
addq %rax, %rcx
movq %rcx, -136(%rbp) ## 8-byte Spill
movw $512, %bx ## imm = 0x200
leaq _g_cc_table(%rip), %r12
leaq -80(%rbp), %rax
leaq -112(%rbp), %r15
xorl %r14d, %r14d
.p2align 4, 0x90
LBB25_1: ## =>This Inner Loop Header: Depth=1
movl %ebx, -120(%rbp) ## 4-byte Spill
movq 40(%r14,%r12), %r8
movl $20, %edx
movq %rax, %rbx
movq %rax, %rdi
xorl %esi, %esi
leaq L_.str.108(%rip), %rcx
xorl %eax, %eax
callq ___sprintf_chk
movq 40(%r14,%r12), %r8
movl $20, %edx
movq %r15, %rdi
xorl %esi, %esi
leaq L_.str.109(%rip), %rcx
xorl %eax, %eax
callq ___sprintf_chk
movslq 12060(%r13), %rax
cmpq $30, %rax
jge LBB25_6
## %bb.2: ## in Loop: Header=BB25_1 Depth=1
imulq $402, %rax, %rax ## imm = 0x192
movabsq $4993156412100589133, %rcx ## imm = 0x454B414D4B38364D
movq %rcx, (%r13,%rax)
movl $4408159, 8(%r13,%rax) ## imm = 0x43435F
movslq 12060(%r13), %rax
leal 1(%rax), %ecx
movl %ecx, 12060(%r13)
imulq $402, %rax, %rax ## imm = 0x192
leaq (%rax,%r13), %rdi
addq $201, %rdi
movq %rbx, %rsi
callq _strcpy
movslq 12060(%r13), %rax
cmpq $30, %rax
jge LBB25_6
## %bb.3: ## in Loop: Header=BB25_1 Depth=1
imulq $402, %rax, %rax ## imm = 0x192
movups L_.str.111(%rip), %xmm0
movups %xmm0, (%r13,%rax)
movslq 12060(%r13), %rax
leal 1(%rax), %ecx
movl %ecx, 12060(%r13)
imulq $402, %rax, %rax ## imm = 0x192
leaq (%rax,%r13), %rdi
addq $201, %rdi
movq %r15, %rsi
callq _strcpy
leaq _g_cc_table(%rip), %rax
movq 32(%r14,%rax), %rsi
movq -136(%rbp), %rdi ## 8-byte Reload
callq _strcpy
movq -128(%rbp), %rcx ## 8-byte Reload
movzwl 44(%rcx), %eax
andl $-3841, %eax ## imm = 0xF0FF
movl -120(%rbp), %ebx ## 4-byte Reload
orl %ebx, %eax
movw %ax, 44(%rcx)
movq -144(%rbp), %rdi ## 8-byte Reload
movq -152(%rbp), %rsi ## 8-byte Reload
movq %r13, %rdx
callq _generate_opcode_ea_variants
movl -116(%rbp), %eax ## 4-byte Reload
movl %eax, 12060(%r13)
addl $256, %ebx ## imm = 0x100
addq $16, %r14
cmpq $224, %r14
leaq -80(%rbp), %rax
leaq _g_cc_table(%rip), %r12
jne LBB25_1
## %bb.4:
movq -128(%rbp), %rdi ## 8-byte Reload
callq _free
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB25_7
## %bb.5:
addq $120, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB25_6:
leaq L_.str.78(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB25_7:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.globl _process_opcode_handlers ## -- Begin function process_opcode_handlers
.p2align 4, 0x90
_process_opcode_handlers: ## @process_opcode_handlers
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $904, %rsp ## imm = 0x388
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movabsq $-4294967296, %r13 ## imm = 0xFFFFFFFF00000000
movq _g_input_file(%rip), %r15
movl $12064, %edi ## imm = 0x2F20
callq _malloc
movq %rax, -896(%rbp) ## 8-byte Spill
movl $60304, %edi ## imm = 0xEB90
callq _malloc
movq _g_ops_ac_file(%rip), %rcx
movq %rcx, -920(%rbp) ## 8-byte Spill
movq %rax, -888(%rbp) ## 8-byte Spill
decq %rax
movq %rax, -936(%rbp) ## 8-byte Spill
leaq L_.str.76(%rip), %rbx
leaq -256(%rbp), %rdi
leaq -464(%rbp), %r12
movq _g_opcode_input_table@GOTPCREL(%rip), %rax
addq $35, %rax
movq %rax, -928(%rbp) ## 8-byte Spill
movq %r15, -912(%rbp) ## 8-byte Spill
.p2align 4, 0x90
LBB26_1: ## =>This Loop Header: Depth=1
## Child Loop BB26_2 Depth 2
## Child Loop BB26_11 Depth 3
## Child Loop BB26_18 Depth 2
## Child Loop BB26_23 Depth 3
## Child Loop BB26_34 Depth 2
movb $0, -256(%rbp)
movq %rbx, %rsi
movq %rdi, %r14
jmp LBB26_2
.p2align 4, 0x90
LBB26_14: ## in Loop: Header=BB26_2 Depth=2
movb $0, -256(%rbp)
incl _g_line_number(%rip)
LBB26_16: ## in Loop: Header=BB26_2 Depth=2
leaq -256(%rbp), %r14
movq %r14, %rdi
movq %rbx, %rsi
LBB26_2: ## Parent Loop BB26_1 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB26_11 Depth 3
callq _strstr
testq %rax, %rax
jne LBB26_17
## %bb.3: ## in Loop: Header=BB26_2 Depth=2
movl $80, %edx
movq %r14, %rdi
leaq L_.str.112(%rip), %rsi
callq _memcmp
testl %eax, %eax
je LBB26_4
## %bb.6: ## in Loop: Header=BB26_2 Depth=2
movq %r14, %rdi
movl $200, %esi
movq %r15, %rdx
callq _fgets
testq %rax, %rax
je LBB26_56
## %bb.7: ## in Loop: Header=BB26_2 Depth=2
cmpb $13, -256(%rbp)
leaq -256(%rbp), %rdi
jne LBB26_9
## %bb.8: ## in Loop: Header=BB26_2 Depth=2
movl $199, %edx
leaq -255(%rbp), %rsi
callq _memcpy
leaq -256(%rbp), %rdi
LBB26_9: ## in Loop: Header=BB26_2 Depth=2
callq _strlen
testl %eax, %eax
je LBB26_14
## %bb.10: ## in Loop: Header=BB26_2 Depth=2
movq %rax, %rcx
shlq $32, %rcx
cltq
jmp LBB26_11
.p2align 4, 0x90
LBB26_13: ## in Loop: Header=BB26_11 Depth=3
addq %r13, %rcx
decq %rax
testl %eax, %eax
je LBB26_14
LBB26_11: ## Parent Loop BB26_1 Depth=1
## Parent Loop BB26_2 Depth=2
## => This Inner Loop Header: Depth=3
movzbl -257(%rbp,%rax), %edx
cmpb $13, %dl
je LBB26_13
## %bb.12: ## in Loop: Header=BB26_11 Depth=3
cmpb $10, %dl
je LBB26_13
## %bb.15: ## in Loop: Header=BB26_2 Depth=2
sarq $32, %rcx
movb $0, -256(%rbp,%rcx)
incl _g_line_number(%rip)
testl $-2147483648, %eax ## imm = 0x80000000
je LBB26_16
jmp LBB26_56
.p2align 4, 0x90
LBB26_17: ## in Loop: Header=BB26_1 Depth=1
movq -888(%rbp), %rax ## 8-byte Reload
movl $0, 60300(%rax)
xorl %eax, %eax
.p2align 4, 0x90
LBB26_18: ## Parent Loop BB26_1 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB26_23 Depth 3
cltq
imulq $201, %rax, %rbx
movq -888(%rbp), %rax ## 8-byte Reload
leaq (%rax,%rbx), %r14
movq %r14, %rdi
movl $200, %esi
movq %r15, %rdx
callq _fgets
testq %rax, %rax
je LBB26_57
## %bb.19: ## in Loop: Header=BB26_18 Depth=2
cmpb $13, (%r14)
jne LBB26_21
## %bb.20: ## in Loop: Header=BB26_18 Depth=2
movq -888(%rbp), %rax ## 8-byte Reload
leaq (%rax,%rbx), %rsi
incq %rsi
movl $199, %edx
movq %r14, %rdi
callq _memcpy
LBB26_21: ## in Loop: Header=BB26_18 Depth=2
movq %r14, %rdi
callq _strlen
testl %eax, %eax
je LBB26_26
## %bb.22: ## in Loop: Header=BB26_18 Depth=2
movq %rax, %rcx
shlq $32, %rcx
cltq
addq -936(%rbp), %rbx ## 8-byte Folded Reload
jmp LBB26_23
.p2align 4, 0x90
LBB26_25: ## in Loop: Header=BB26_23 Depth=3
addq %r13, %rcx
decq %rax
testl %eax, %eax
je LBB26_26
LBB26_23: ## Parent Loop BB26_1 Depth=1
## Parent Loop BB26_18 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%rbx,%rax), %edx
cmpb $13, %dl
je LBB26_25
## %bb.24: ## in Loop: Header=BB26_23 Depth=3
cmpb $10, %dl
je LBB26_25
## %bb.27: ## in Loop: Header=BB26_18 Depth=2
sarq $32, %rcx
movb $0, (%rcx,%r14)
incl _g_line_number(%rip)
testl $-2147483648, %eax ## imm = 0x80000000
je LBB26_28
jmp LBB26_57
.p2align 4, 0x90
LBB26_26: ## in Loop: Header=BB26_18 Depth=2
movb $0, (%r14)
incl _g_line_number(%rip)
LBB26_28: ## in Loop: Header=BB26_18 Depth=2
movq -888(%rbp), %rsi ## 8-byte Reload
movslq 60300(%rsi), %rcx
imulq $201, %rcx, %rdx
leal 1(%rcx), %eax
cmpb $125, (%rsi,%rdx)
movl %eax, 60300(%rsi)
je LBB26_29
## %bb.31: ## in Loop: Header=BB26_18 Depth=2
cmpl $299, %ecx ## imm = 0x12B
jle LBB26_18
jmp LBB26_32
.p2align 4, 0x90
LBB26_29: ## in Loop: Header=BB26_1 Depth=1
incl _g_num_primitives(%rip)
leaq -256(%rbp), %rdi
movq %r12, %rsi
leaq -900(%rbp), %rdx
leaq -672(%rbp), %rcx
leaq -880(%rbp), %r8
callq _extract_opcode_info
testl %eax, %eax
je LBB26_30
## %bb.33: ## in Loop: Header=BB26_1 Depth=1
movl -900(%rbp), %r15d
movq -928(%rbp), %rbx ## 8-byte Reload
jmp LBB26_34
.p2align 4, 0x90
LBB26_38: ## in Loop: Header=BB26_34 Depth=2
addq $68, %rbx
LBB26_34: ## Parent Loop BB26_1 Depth=1
## => This Inner Loop Header: Depth=2
leaq -35(%rbx), %r14
movq %r12, %rdi
movq %r14, %rsi
callq _strcmp
testl %eax, %eax
jne LBB26_38
## %bb.35: ## in Loop: Header=BB26_34 Depth=2
movzbl -5(%rbx), %eax
cmpl %eax, %r15d
jne LBB26_38
## %bb.36: ## in Loop: Header=BB26_34 Depth=2
leaq -4(%rbx), %rsi
leaq -672(%rbp), %rdi
callq _strcmp
testl %eax, %eax
jne LBB26_38
## %bb.37: ## in Loop: Header=BB26_34 Depth=2
leaq -880(%rbp), %rdi
movq %rbx, %rsi
callq _strcmp
testl %eax, %eax
jne LBB26_38
## %bb.39: ## in Loop: Header=BB26_1 Depth=1
movb -464(%rbp), %cl
movq _g_ops_dm_file(%rip), %rax
movq -920(%rbp), %rbx ## 8-byte Reload
cmpq _g_ops_ac_file(%rip), %rbx
jne LBB26_42
## %bb.40: ## in Loop: Header=BB26_1 Depth=1
cmpb $99, %cl
jle LBB26_42
## %bb.41: ## in Loop: Header=BB26_1 Depth=1
movq %rax, %rbx
movq -912(%rbp), %r15 ## 8-byte Reload
movq -896(%rbp), %rcx ## 8-byte Reload
jmp LBB26_45
.p2align 4, 0x90
LBB26_42: ## in Loop: Header=BB26_1 Depth=1
movq %rbx, %rdx
cmpb $110, %cl
movq -912(%rbp), %r15 ## 8-byte Reload
movq -896(%rbp), %rcx ## 8-byte Reload
jl LBB26_44
## %bb.43: ## in Loop: Header=BB26_1 Depth=1
movq _g_ops_nz_file(%rip), %rdx
LBB26_44: ## in Loop: Header=BB26_1 Depth=1
cmpq %rax, %rbx
cmoveq %rdx, %rbx
LBB26_45: ## in Loop: Header=BB26_1 Depth=1
movl $0, 12060(%rcx)
movq %r14, %rdi
leaq L_.str.118(%rip), %rsi
callq _strcmp
testl %eax, %eax
movq %rbx, -920(%rbp) ## 8-byte Spill
je LBB26_47
## %bb.46: ## in Loop: Header=BB26_1 Depth=1
movq %r14, %rdi
leaq L_.str.119(%rip), %rsi
callq _strcmp
testl %eax, %eax
je LBB26_47
## %bb.50: ## in Loop: Header=BB26_1 Depth=1
movq %r14, %rdi
leaq L_.str.120(%rip), %rsi
callq _strcmp
testl %eax, %eax
je LBB26_51
## %bb.52: ## in Loop: Header=BB26_1 Depth=1
movq %r14, %rdi
leaq L_.str.121(%rip), %rsi
callq _strcmp
movq %rbx, %rdi
testl %eax, %eax
je LBB26_53
## %bb.54: ## in Loop: Header=BB26_1 Depth=1
movq -888(%rbp), %rsi ## 8-byte Reload
movq -896(%rbp), %rdx ## 8-byte Reload
movq %r14, %rcx
callq _generate_opcode_ea_variants
jmp LBB26_49
.p2align 4, 0x90
LBB26_47: ## in Loop: Header=BB26_1 Depth=1
movq %rbx, %rdi
movq -888(%rbp), %rsi ## 8-byte Reload
movq -896(%rbp), %rdx ## 8-byte Reload
movq %r14, %rcx
movl $1, %r8d
LBB26_48: ## in Loop: Header=BB26_1 Depth=1
callq _generate_opcode_cc_variants
LBB26_49: ## in Loop: Header=BB26_1 Depth=1
leaq L_.str.76(%rip), %rbx
leaq -256(%rbp), %rdi
jmp LBB26_1
LBB26_51: ## in Loop: Header=BB26_1 Depth=1
movq %rbx, %rdi
movq -888(%rbp), %rsi ## 8-byte Reload
movq -896(%rbp), %rdx ## 8-byte Reload
movq %r14, %rcx
movl $2, %r8d
jmp LBB26_48
LBB26_53: ## in Loop: Header=BB26_1 Depth=1
movq -888(%rbp), %rsi ## 8-byte Reload
movq -896(%rbp), %rdx ## 8-byte Reload
movq %r14, %rcx
movl $4, %r8d
jmp LBB26_48
LBB26_4:
movq -896(%rbp), %rdi ## 8-byte Reload
callq _free
movq -888(%rbp), %rdi ## 8-byte Reload
callq _free
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB26_55
## %bb.5:
addq $904, %rsp ## imm = 0x388
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB26_57:
leaq L_.str.115(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB26_32:
leaq L_.str.114(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB26_56:
leaq L_.str.113(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB26_30:
leaq L_.str.116(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB26_55:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function populate_table
LCPI27_0:
.byte 65 ## 0x41
.byte 75 ## 0x4b
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 84 ## 0x54
.byte 65 ## 0x41
.byte 66 ## 0x42
.byte 76 ## 0x4c
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 83 ## 0x53
.byte 84 ## 0x54
.byte 65 ## 0x41
.byte 82 ## 0x52
.byte 84 ## 0x54
.byte 0 ## 0x0
LCPI27_1:
.byte 77 ## 0x4d
.byte 54 ## 0x36
.byte 56 ## 0x38
.byte 75 ## 0x4b
.byte 77 ## 0x4d
.byte 65 ## 0x41
.byte 75 ## 0x4b
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 84 ## 0x54
.byte 65 ## 0x41
.byte 66 ## 0x42
.byte 76 ## 0x4c
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 83 ## 0x53
LCPI27_2:
.byte 46 ## 0x2e
.byte 46 ## 0x2e
.byte 46 ## 0x2e
.byte 46 ## 0x2e
.byte 46 ## 0x2e
.byte 46 ## 0x2e
.byte 46 ## 0x2e
.byte 46 ## 0x2e
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
LCPI27_3:
.short 32768 ## 0x8000
.short 16384 ## 0x4000
.short 8192 ## 0x2000
.short 4096 ## 0x1000
.short 2048 ## 0x800
.short 1024 ## 0x400
.short 512 ## 0x200
.short 256 ## 0x100
LCPI27_4:
.byte 49 ## 0x31
.byte 49 ## 0x31
.byte 49 ## 0x31
.byte 49 ## 0x31
.byte 49 ## 0x31
.byte 49 ## 0x31
.byte 49 ## 0x31
.byte 49 ## 0x31
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
LCPI27_5:
.short 128 ## 0x80
.short 64 ## 0x40
.short 32 ## 0x20
.short 16 ## 0x10
.short 8 ## 0x8
.short 4 ## 0x4
.short 2 ## 0x2
.short 1 ## 0x1
.section __TEXT,__text,regular,pure_instructions
.globl _populate_table
.p2align 4, 0x90
_populate_table: ## @populate_table
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $280, %rsp ## imm = 0x118
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movb $0, -288(%rbp)
movdqa -288(%rbp), %xmm0
movdqu -283(%rbp), %xmm1
pxor LCPI27_0(%rip), %xmm1
pxor LCPI27_1(%rip), %xmm0
movabsq $-4294967296, %r13 ## imm = 0xFFFFFFFF00000000
por %xmm1, %xmm0
ptest %xmm0, %xmm0
je LBB27_11
## %bb.1:
movabsq $6007596621766939743, %rax ## imm = 0x535F454C4241545F
movabsq $4993156412100589133, %rcx ## imm = 0x454B414D4B38364D
movabsq $23734338584796997, %rdx ## imm = 0x54524154535F45
movabsq $5495026325976861505, %rsi ## imm = 0x4C4241545F454B41
leaq -287(%rbp), %r14
leaq -288(%rbp), %r15
movq %rcx, %xmm1
movq %rax, %xmm0
punpcklqdq %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[0]
movdqa %xmm1, -320(%rbp) ## 16-byte Spill
movq %rsi, %xmm1
movq %rdx, %xmm0
punpcklqdq %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[0]
movdqa %xmm1, -304(%rbp) ## 16-byte Spill
jmp LBB27_2
.p2align 4, 0x90
LBB27_10: ## in Loop: Header=BB27_2 Depth=1
movb $0, -288(%rbp)
incl _g_line_number(%rip)
LBB27_22: ## in Loop: Header=BB27_2 Depth=1
movdqu -283(%rbp), %xmm0
pxor -304(%rbp), %xmm0 ## 16-byte Folded Reload
movdqa -288(%rbp), %xmm1
pxor -320(%rbp), %xmm1 ## 16-byte Folded Reload
por %xmm0, %xmm1
ptest %xmm1, %xmm1
je LBB27_11
LBB27_2: ## =>This Loop Header: Depth=1
## Child Loop BB27_7 Depth 2
movq _g_input_file(%rip), %rdx
movq %r15, %rdi
movl $200, %esi
callq _fgets
testq %rax, %rax
je LBB27_208
## %bb.3: ## in Loop: Header=BB27_2 Depth=1
cmpb $13, -288(%rbp)
jne LBB27_5
## %bb.4: ## in Loop: Header=BB27_2 Depth=1
movl $199, %edx
movq %r15, %rdi
movq %r14, %rsi
callq _memcpy
LBB27_5: ## in Loop: Header=BB27_2 Depth=1
movq %r15, %rdi
callq _strlen
testl %eax, %eax
je LBB27_10
## %bb.6: ## in Loop: Header=BB27_2 Depth=1
movq %rax, %rcx
shlq $32, %rcx
cltq
jmp LBB27_7
.p2align 4, 0x90
LBB27_9: ## in Loop: Header=BB27_7 Depth=2
addq %r13, %rcx
decq %rax
testl %eax, %eax
je LBB27_10
LBB27_7: ## Parent Loop BB27_2 Depth=1
## => This Inner Loop Header: Depth=2
movzbl -289(%rbp,%rax), %edx
cmpb $13, %dl
je LBB27_9
## %bb.8: ## in Loop: Header=BB27_7 Depth=2
cmpb $10, %dl
je LBB27_9
## %bb.21: ## in Loop: Header=BB27_2 Depth=1
sarq $32, %rcx
movb $0, -288(%rbp,%rcx)
incl _g_line_number(%rip)
testl $-2147483648, %eax ## imm = 0x80000000
je LBB27_22
LBB27_208:
leaq L_.str.123(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB27_11:
movq _g_input_file(%rip), %rdx
leaq -288(%rbp), %r12
movq %r12, %rdi
movl $200, %esi
callq _fgets
testq %rax, %rax
je LBB27_208
## %bb.12:
movabsq $4294967296, %rbx ## imm = 0x100000000
movq _g_opcode_input_table@GOTPCREL(%rip), %r15
jmp LBB27_13
.p2align 4, 0x90
LBB27_20: ## in Loop: Header=BB27_13 Depth=1
movb $0, -288(%rbp)
incl _g_line_number(%rip)
LBB27_207: ## in Loop: Header=BB27_13 Depth=1
addq $68, %r15
movq _g_input_file(%rip), %rdx
movq %r12, %rdi
movl $200, %esi
callq _fgets
testq %rax, %rax
je LBB27_208
LBB27_13: ## =>This Loop Header: Depth=1
## Child Loop BB27_17 Depth 2
## Child Loop BB27_28 Depth 2
## Child Loop BB27_33 Depth 2
## Child Loop BB27_96 Depth 2
## Child Loop BB27_101 Depth 2
## Child Loop BB27_106 Depth 2
## Child Loop BB27_111 Depth 2
## Child Loop BB27_146 Depth 2
## Child Loop BB27_170 Depth 2
## Child Loop BB27_172 Depth 2
## Child Loop BB27_174 Depth 2
## Child Loop BB27_176 Depth 2
## Child Loop BB27_178 Depth 2
## Child Loop BB27_184 Depth 2
## Child Loop BB27_189 Depth 2
## Child Loop BB27_194 Depth 2
## Child Loop BB27_199 Depth 2
## Child Loop BB27_203 Depth 2
cmpb $13, -288(%rbp)
jne LBB27_15
## %bb.14: ## in Loop: Header=BB27_13 Depth=1
movl $199, %edx
movq %r12, %rdi
leaq -287(%rbp), %rsi
callq _memcpy
LBB27_15: ## in Loop: Header=BB27_13 Depth=1
movq %r12, %rdi
callq _strlen
testl %eax, %eax
je LBB27_20
## %bb.16: ## in Loop: Header=BB27_13 Depth=1
movq %rax, %rcx
shlq $32, %rcx
cltq
jmp LBB27_17
.p2align 4, 0x90
LBB27_19: ## in Loop: Header=BB27_17 Depth=2
addq %r13, %rcx
decq %rax
testl %eax, %eax
je LBB27_20
LBB27_17: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
movzbl -289(%rbp,%rax), %edx
cmpb $13, %dl
je LBB27_19
## %bb.18: ## in Loop: Header=BB27_17 Depth=2
cmpb $10, %dl
je LBB27_19
## %bb.23: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rcx
movb $0, -288(%rbp,%rcx)
incl _g_line_number(%rip)
testl $-2147483648, %eax ## imm = 0x80000000
jne LBB27_208
## %bb.24: ## in Loop: Header=BB27_13 Depth=1
movb -288(%rbp), %r14b
testb %r14b, %r14b
je LBB27_207
## %bb.25: ## in Loop: Header=BB27_13 Depth=1
movl $80, %edx
movq %r12, %rdi
leaq L_.str.112(%rip), %rsi
callq _memcmp
testl %eax, %eax
je LBB27_209
## %bb.26: ## in Loop: Header=BB27_13 Depth=1
movq %r12, %rax
cmpb $32, %r14b
jne LBB27_29
## %bb.27: ## in Loop: Header=BB27_13 Depth=1
movq %r12, %rcx
.p2align 4, 0x90
LBB27_28: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
leaq 1(%rcx), %rax
cmpb $32, 1(%rcx)
movq %rax, %rcx
je LBB27_28
LBB27_29: ## in Loop: Header=BB27_13 Depth=1
subl %r12d, %eax
movslq %eax, %rcx
leaq (%rcx,%rbp), %rax
addq $-288, %rax ## imm = 0xFEE0
movb -288(%rbp,%rcx), %dl
movq %r15, %rcx
testb $-33, %dl
je LBB27_32
## %bb.30: ## in Loop: Header=BB27_13 Depth=1
movb %dl, (%r15)
movb 1(%rax), %cl
testb $-33, %cl
jne LBB27_36
## %bb.31: ## in Loop: Header=BB27_13 Depth=1
leaq 1(%r15), %rcx
jmp LBB27_32
LBB27_36: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 1(%r15)
movb 2(%rax), %cl
testb $-33, %cl
jne LBB27_38
## %bb.37: ## in Loop: Header=BB27_13 Depth=1
leaq 2(%r15), %rcx
jmp LBB27_32
LBB27_38: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 2(%r15)
movb 3(%rax), %cl
testb $-33, %cl
jne LBB27_40
## %bb.39: ## in Loop: Header=BB27_13 Depth=1
leaq 3(%r15), %rcx
jmp LBB27_32
LBB27_40: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 3(%r15)
movb 4(%rax), %cl
testb $-33, %cl
jne LBB27_42
## %bb.41: ## in Loop: Header=BB27_13 Depth=1
leaq 4(%r15), %rcx
jmp LBB27_32
LBB27_42: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 4(%r15)
movb 5(%rax), %cl
testb $-33, %cl
jne LBB27_44
## %bb.43: ## in Loop: Header=BB27_13 Depth=1
leaq 5(%r15), %rcx
jmp LBB27_32
LBB27_44: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 5(%r15)
movb 6(%rax), %cl
testb $-33, %cl
jne LBB27_46
## %bb.45: ## in Loop: Header=BB27_13 Depth=1
leaq 6(%r15), %rcx
jmp LBB27_32
LBB27_46: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 6(%r15)
movb 7(%rax), %cl
testb $-33, %cl
jne LBB27_48
## %bb.47: ## in Loop: Header=BB27_13 Depth=1
leaq 7(%r15), %rcx
jmp LBB27_32
LBB27_48: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 7(%r15)
movb 8(%rax), %cl
testb $-33, %cl
jne LBB27_50
## %bb.49: ## in Loop: Header=BB27_13 Depth=1
leaq 8(%r15), %rcx
jmp LBB27_32
LBB27_50: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 8(%r15)
movb 9(%rax), %cl
testb $-33, %cl
jne LBB27_52
## %bb.51: ## in Loop: Header=BB27_13 Depth=1
leaq 9(%r15), %rcx
jmp LBB27_32
LBB27_52: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 9(%r15)
movb 10(%rax), %cl
testb $-33, %cl
jne LBB27_54
## %bb.53: ## in Loop: Header=BB27_13 Depth=1
leaq 10(%r15), %rcx
jmp LBB27_32
LBB27_54: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 10(%r15)
movb 11(%rax), %cl
testb $-33, %cl
jne LBB27_56
## %bb.55: ## in Loop: Header=BB27_13 Depth=1
leaq 11(%r15), %rcx
jmp LBB27_32
LBB27_56: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 11(%r15)
movb 12(%rax), %cl
testb $-33, %cl
jne LBB27_58
## %bb.57: ## in Loop: Header=BB27_13 Depth=1
leaq 12(%r15), %rcx
jmp LBB27_32
LBB27_58: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 12(%r15)
movb 13(%rax), %cl
testb $-33, %cl
jne LBB27_60
## %bb.59: ## in Loop: Header=BB27_13 Depth=1
leaq 13(%r15), %rcx
jmp LBB27_32
LBB27_60: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 13(%r15)
movb 14(%rax), %cl
testb $-33, %cl
jne LBB27_62
## %bb.61: ## in Loop: Header=BB27_13 Depth=1
leaq 14(%r15), %rcx
jmp LBB27_32
LBB27_62: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 14(%r15)
movb 15(%rax), %cl
testb $-33, %cl
jne LBB27_64
## %bb.63: ## in Loop: Header=BB27_13 Depth=1
leaq 15(%r15), %rcx
jmp LBB27_32
LBB27_64: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 15(%r15)
movb 16(%rax), %cl
testb $-33, %cl
jne LBB27_66
## %bb.65: ## in Loop: Header=BB27_13 Depth=1
leaq 16(%r15), %rcx
jmp LBB27_32
LBB27_66: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 16(%r15)
movb 17(%rax), %cl
testb $-33, %cl
jne LBB27_68
## %bb.67: ## in Loop: Header=BB27_13 Depth=1
leaq 17(%r15), %rcx
jmp LBB27_32
LBB27_68: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 17(%r15)
movb 18(%rax), %cl
testb $-33, %cl
jne LBB27_70
## %bb.69: ## in Loop: Header=BB27_13 Depth=1
leaq 18(%r15), %rcx
jmp LBB27_32
LBB27_70: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 18(%r15)
movb 19(%rax), %cl
testb $-33, %cl
jne LBB27_72
## %bb.71: ## in Loop: Header=BB27_13 Depth=1
leaq 19(%r15), %rcx
jmp LBB27_32
LBB27_72: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 19(%r15)
movb 20(%rax), %cl
testb $-33, %cl
jne LBB27_74
## %bb.73: ## in Loop: Header=BB27_13 Depth=1
leaq 20(%r15), %rcx
jmp LBB27_32
LBB27_74: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 20(%r15)
movb 21(%rax), %cl
testb $-33, %cl
jne LBB27_76
## %bb.75: ## in Loop: Header=BB27_13 Depth=1
leaq 21(%r15), %rcx
jmp LBB27_32
LBB27_76: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 21(%r15)
movb 22(%rax), %cl
testb $-33, %cl
jne LBB27_78
## %bb.77: ## in Loop: Header=BB27_13 Depth=1
leaq 22(%r15), %rcx
jmp LBB27_32
LBB27_78: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 22(%r15)
movb 23(%rax), %cl
testb $-33, %cl
jne LBB27_80
## %bb.79: ## in Loop: Header=BB27_13 Depth=1
leaq 23(%r15), %rcx
jmp LBB27_32
LBB27_80: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 23(%r15)
movb 24(%rax), %cl
testb $-33, %cl
jne LBB27_82
## %bb.81: ## in Loop: Header=BB27_13 Depth=1
leaq 24(%r15), %rcx
jmp LBB27_32
LBB27_82: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 24(%r15)
movb 25(%rax), %cl
testb $-33, %cl
jne LBB27_84
## %bb.83: ## in Loop: Header=BB27_13 Depth=1
leaq 25(%r15), %rcx
jmp LBB27_32
LBB27_84: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 25(%r15)
movb 26(%rax), %cl
testb $-33, %cl
jne LBB27_86
## %bb.85: ## in Loop: Header=BB27_13 Depth=1
leaq 26(%r15), %rcx
jmp LBB27_32
LBB27_86: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 26(%r15)
movb 27(%rax), %cl
testb $-33, %cl
jne LBB27_88
## %bb.87: ## in Loop: Header=BB27_13 Depth=1
leaq 27(%r15), %rcx
jmp LBB27_32
LBB27_88: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 27(%r15)
movb 28(%rax), %cl
testb $-33, %cl
jne LBB27_90
## %bb.89: ## in Loop: Header=BB27_13 Depth=1
leaq 28(%r15), %rcx
jmp LBB27_32
LBB27_90: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 28(%r15)
movb 29(%rax), %dl
testb $-33, %dl
jne LBB27_92
## %bb.91: ## in Loop: Header=BB27_13 Depth=1
leaq 29(%r15), %rcx
jmp LBB27_32
LBB27_92: ## in Loop: Header=BB27_13 Depth=1
leaq 30(%r15), %rcx
movb %dl, 29(%r15)
movb 30(%rax), %dl
testb $-33, %dl
jne LBB27_93
LBB27_32: ## in Loop: Header=BB27_13 Depth=1
movb $0, (%rcx)
subl %r15d, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %r13, %rdx
movq %rax, %rcx
.p2align 4, 0x90
LBB27_33: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
addq %rbx, %rdx
cmpb $32, (%rcx)
leaq 1(%rcx), %rcx
je LBB27_33
## %bb.34: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rdx
leaq (%rax,%rdx), %rcx
movb (%rax,%rdx), %dl
leal -48(%rdx), %esi
xorl %eax, %eax
cmpb $9, %sil
ja LBB27_35
## %bb.95: ## in Loop: Header=BB27_13 Depth=1
xorl %eax, %eax
movq %rcx, %rsi
.p2align 4, 0x90
LBB27_96: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
movzbl %dl, %edx
leal (%rax,%rax,4), %eax
leal (%rdx,%rax,2), %eax
addl $-48, %eax
movzbl 1(%rsi), %edx
incq %rsi
leal -48(%rdx), %edi
cmpb $10, %dil
jb LBB27_96
jmp LBB27_97
LBB27_35: ## in Loop: Header=BB27_13 Depth=1
movq %rcx, %rsi
LBB27_97: ## in Loop: Header=BB27_13 Depth=1
testb $-33, %dl
jne LBB27_98
## %bb.100: ## in Loop: Header=BB27_13 Depth=1
subl %ecx, %esi
movslq %esi, %rdx
addq %rcx, %rdx
movb %al, 30(%r15)
movq %r13, %rsi
movq %rdx, %rax
.p2align 4, 0x90
LBB27_101: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
addq %rbx, %rsi
cmpb $32, (%rax)
leaq 1(%rax), %rax
je LBB27_101
## %bb.102: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rsi
leaq (%rdx,%rsi), %rax
leaq 31(%r15), %rdi
movb (%rdx,%rsi), %cl
movq %rdi, %rdx
testb $-33, %cl
je LBB27_105
## %bb.103: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 31(%r15)
movb 1(%rax), %cl
testb $-33, %cl
jne LBB27_131
## %bb.104: ## in Loop: Header=BB27_13 Depth=1
leaq 32(%r15), %rdx
jmp LBB27_105
LBB27_131: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 32(%r15)
movb 2(%rax), %cl
testb $-33, %cl
jne LBB27_133
## %bb.132: ## in Loop: Header=BB27_13 Depth=1
leaq 33(%r15), %rdx
jmp LBB27_105
LBB27_133: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 33(%r15)
movb 3(%rax), %cl
testb $-33, %cl
jne LBB27_135
## %bb.134: ## in Loop: Header=BB27_13 Depth=1
leaq 34(%r15), %rdx
jmp LBB27_105
LBB27_135: ## in Loop: Header=BB27_13 Depth=1
leaq 35(%r15), %rdx
movb %cl, 34(%r15)
movb 4(%rax), %cl
testb $-33, %cl
jne LBB27_136
LBB27_105: ## in Loop: Header=BB27_13 Depth=1
movb $0, (%rdx)
subl %edi, %edx
movslq %edx, %rcx
addq %rcx, %rax
movq %r13, %rsi
movq %rax, %rcx
.p2align 4, 0x90
LBB27_106: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
addq %rbx, %rsi
cmpb $32, (%rcx)
leaq 1(%rcx), %rcx
je LBB27_106
## %bb.107: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rsi
leaq (%rax,%rsi), %rcx
leaq 35(%r15), %rdi
movb (%rax,%rsi), %dl
movq %rdi, %rax
testb $-33, %dl
je LBB27_110
## %bb.108: ## in Loop: Header=BB27_13 Depth=1
movb %dl, 35(%r15)
movb 1(%rcx), %al
testb $-33, %al
jne LBB27_137
## %bb.109: ## in Loop: Header=BB27_13 Depth=1
leaq 36(%r15), %rax
jmp LBB27_110
LBB27_137: ## in Loop: Header=BB27_13 Depth=1
movb %al, 36(%r15)
movb 2(%rcx), %al
testb $-33, %al
jne LBB27_139
## %bb.138: ## in Loop: Header=BB27_13 Depth=1
leaq 37(%r15), %rax
jmp LBB27_110
LBB27_139: ## in Loop: Header=BB27_13 Depth=1
movb %al, 37(%r15)
movb 3(%rcx), %al
testb $-33, %al
jne LBB27_141
## %bb.140: ## in Loop: Header=BB27_13 Depth=1
leaq 38(%r15), %rax
jmp LBB27_110
LBB27_141: ## in Loop: Header=BB27_13 Depth=1
movb %al, 38(%r15)
movb 4(%rcx), %dl
testb $-33, %dl
jne LBB27_143
## %bb.142: ## in Loop: Header=BB27_13 Depth=1
leaq 39(%r15), %rax
jmp LBB27_110
LBB27_143: ## in Loop: Header=BB27_13 Depth=1
leaq 40(%r15), %rax
movb %dl, 39(%r15)
movb 5(%rcx), %dl
testb $-33, %dl
jne LBB27_144
LBB27_110: ## in Loop: Header=BB27_13 Depth=1
movb $0, (%rax)
subl %edi, %eax
cltq
addq %rax, %rcx
movq %r13, %rdx
movq %rcx, %rax
.p2align 4, 0x90
LBB27_111: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
addq %rbx, %rdx
cmpb $32, (%rax)
leaq 1(%rax), %rax
je LBB27_111
## %bb.112: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rdx
leaq (%rcx,%rdx), %rax
movb (%rcx,%rdx), %dl
leaq -80(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.113: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -80(%rbp)
movb 1(%rax), %dl
leaq -79(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.114: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -79(%rbp)
movb 2(%rax), %dl
leaq -78(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.115: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -78(%rbp)
movb 3(%rax), %dl
leaq -77(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.116: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -77(%rbp)
movb 4(%rax), %dl
leaq -76(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.117: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -76(%rbp)
movb 5(%rax), %dl
leaq -75(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.118: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -75(%rbp)
movb 6(%rax), %dl
leaq -74(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.119: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -74(%rbp)
movb 7(%rax), %dl
leaq -73(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.120: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -73(%rbp)
movb 8(%rax), %dl
leaq -72(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.121: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -72(%rbp)
movb 9(%rax), %dl
leaq -71(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.122: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -71(%rbp)
movb 10(%rax), %dl
leaq -70(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.123: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -70(%rbp)
movb 11(%rax), %dl
leaq -69(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.124: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -69(%rbp)
movb 12(%rax), %dl
leaq -68(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.125: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -68(%rbp)
movb 13(%rax), %dl
leaq -67(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.126: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -67(%rbp)
movb 14(%rax), %dl
leaq -66(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.127: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -66(%rbp)
movb 15(%rax), %dl
leaq -65(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.128: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -65(%rbp)
movb 16(%rax), %dl
leaq -64(%rbp), %rcx
testb $-33, %dl
je LBB27_145
## %bb.129: ## in Loop: Header=BB27_13 Depth=1
movb %dl, -64(%rbp)
movb 17(%rax), %dl
leaq -63(%rbp), %rcx
testb $-33, %dl
jne LBB27_130
LBB27_145: ## in Loop: Header=BB27_13 Depth=1
movb $0, (%rcx)
leaq -80(%rbp), %rdx
subl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %r13, %rsi
movq %rax, %rcx
.p2align 4, 0x90
LBB27_146: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
addq %rbx, %rsi
cmpb $32, (%rcx)
leaq 1(%rcx), %rcx
je LBB27_146
## %bb.147: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rsi
leaq (%rax,%rsi), %rcx
leaq 46(%r15), %rdi
movb (%rax,%rsi), %dl
movq %rdi, %rax
testb $-33, %dl
je LBB27_169
## %bb.148: ## in Loop: Header=BB27_13 Depth=1
movb %dl, 46(%r15)
movb 1(%rcx), %al
testb $-33, %al
jne LBB27_150
## %bb.149: ## in Loop: Header=BB27_13 Depth=1
leaq 47(%r15), %rax
jmp LBB27_169
LBB27_150: ## in Loop: Header=BB27_13 Depth=1
movb %al, 47(%r15)
movb 2(%rcx), %al
testb $-33, %al
jne LBB27_152
## %bb.151: ## in Loop: Header=BB27_13 Depth=1
leaq 48(%r15), %rax
jmp LBB27_169
LBB27_152: ## in Loop: Header=BB27_13 Depth=1
movb %al, 48(%r15)
movb 3(%rcx), %al
testb $-33, %al
jne LBB27_154
## %bb.153: ## in Loop: Header=BB27_13 Depth=1
leaq 49(%r15), %rax
jmp LBB27_169
LBB27_154: ## in Loop: Header=BB27_13 Depth=1
movb %al, 49(%r15)
movb 4(%rcx), %al
testb $-33, %al
jne LBB27_156
## %bb.155: ## in Loop: Header=BB27_13 Depth=1
leaq 50(%r15), %rax
jmp LBB27_169
LBB27_156: ## in Loop: Header=BB27_13 Depth=1
movb %al, 50(%r15)
movb 5(%rcx), %al
testb $-33, %al
jne LBB27_158
## %bb.157: ## in Loop: Header=BB27_13 Depth=1
leaq 51(%r15), %rax
jmp LBB27_169
LBB27_158: ## in Loop: Header=BB27_13 Depth=1
movb %al, 51(%r15)
movb 6(%rcx), %al
testb $-33, %al
jne LBB27_160
## %bb.159: ## in Loop: Header=BB27_13 Depth=1
leaq 52(%r15), %rax
jmp LBB27_169
LBB27_160: ## in Loop: Header=BB27_13 Depth=1
movb %al, 52(%r15)
movb 7(%rcx), %al
testb $-33, %al
jne LBB27_162
## %bb.161: ## in Loop: Header=BB27_13 Depth=1
leaq 53(%r15), %rax
jmp LBB27_169
LBB27_162: ## in Loop: Header=BB27_13 Depth=1
movb %al, 53(%r15)
movb 8(%rcx), %al
testb $-33, %al
jne LBB27_164
## %bb.163: ## in Loop: Header=BB27_13 Depth=1
leaq 54(%r15), %rax
jmp LBB27_169
LBB27_164: ## in Loop: Header=BB27_13 Depth=1
movb %al, 54(%r15)
movb 9(%rcx), %al
testb $-33, %al
jne LBB27_166
## %bb.165: ## in Loop: Header=BB27_13 Depth=1
leaq 55(%r15), %rax
jmp LBB27_169
LBB27_166: ## in Loop: Header=BB27_13 Depth=1
movb %al, 55(%r15)
movb 10(%rcx), %dl
testb $-33, %dl
jne LBB27_168
## %bb.167: ## in Loop: Header=BB27_13 Depth=1
leaq 56(%r15), %rax
jmp LBB27_169
LBB27_168: ## in Loop: Header=BB27_13 Depth=1
leaq 57(%r15), %rax
movb %dl, 56(%r15)
movb 11(%rcx), %dl
testb $-33, %dl
jne LBB27_144
LBB27_169: ## in Loop: Header=BB27_13 Depth=1
movb $0, (%rax)
subl %edi, %eax
cltq
addq %rax, %rcx
movq %r13, %rdx
movq %rcx, %rax
.p2align 4, 0x90
LBB27_170: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
addq %rbx, %rdx
cmpb $32, (%rax)
leaq 1(%rax), %rax
je LBB27_170
## %bb.171: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rdx
leaq (%rcx,%rdx), %rax
incq %rax
movb (%rcx,%rdx), %cl
movb %cl, 57(%r15)
movq %r13, %rdx
movq %rax, %rcx
.p2align 4, 0x90
LBB27_172: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
addq %rbx, %rdx
cmpb $32, (%rcx)
leaq 1(%rcx), %rcx
je LBB27_172
## %bb.173: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rdx
leaq (%rax,%rdx), %rcx
incq %rcx
movb (%rax,%rdx), %al
movb %al, 58(%r15)
movq %r13, %rdx
movq %rcx, %rax
.p2align 4, 0x90
LBB27_174: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
addq %rbx, %rdx
cmpb $32, (%rax)
leaq 1(%rax), %rax
je LBB27_174
## %bb.175: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rdx
leaq (%rcx,%rdx), %rax
incq %rax
movb (%rcx,%rdx), %cl
movb %cl, 59(%r15)
movq %r13, %rcx
movq %rax, %rdx
.p2align 4, 0x90
LBB27_176: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
addq %rbx, %rcx
cmpb $32, (%rdx)
leaq 1(%rdx), %rdx
je LBB27_176
## %bb.177: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rcx
addq %rcx, %rax
movq %r13, %rdx
movq %rax, %rcx
.p2align 4, 0x90
LBB27_178: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
addq %rbx, %rdx
cmpb $32, (%rcx)
leaq 1(%rcx), %rcx
je LBB27_178
## %bb.179: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rdx
leaq (%rax,%rdx), %rcx
cmpb $46, (%rax,%rdx)
jne LBB27_181
## %bb.180: ## in Loop: Header=BB27_13 Depth=1
movb $46, 60(%r15)
movb $0, 64(%r15)
incq %rcx
jmp LBB27_188
LBB27_181: ## in Loop: Header=BB27_13 Depth=1
movb $48, 60(%r15)
movb (%rcx), %al
leal -48(%rax), %esi
xorl %edx, %edx
cmpb $9, %sil
ja LBB27_182
## %bb.183: ## in Loop: Header=BB27_13 Depth=1
xorl %edx, %edx
movq %rcx, %rsi
.p2align 4, 0x90
LBB27_184: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
movzbl %al, %eax
leal (%rdx,%rdx,4), %edx
leal (%rax,%rdx,2), %edx
addl $-48, %edx
movzbl 1(%rsi), %eax
incq %rsi
leal -48(%rax), %edi
cmpb $10, %dil
jb LBB27_184
jmp LBB27_185
LBB27_182: ## in Loop: Header=BB27_13 Depth=1
movq %rcx, %rsi
LBB27_185: ## in Loop: Header=BB27_13 Depth=1
testb $-33, %al
jne LBB27_186
## %bb.187: ## in Loop: Header=BB27_13 Depth=1
subl %ecx, %esi
movslq %esi, %rax
addq %rax, %rcx
movb %dl, 64(%r15)
LBB27_188: ## in Loop: Header=BB27_13 Depth=1
movq %r13, %rax
movq %rcx, %rdx
.p2align 4, 0x90
LBB27_189: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
addq %rbx, %rax
cmpb $32, (%rdx)
leaq 1(%rdx), %rdx
je LBB27_189
## %bb.190: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rax
leaq (%rcx,%rax), %rdx
cmpb $46, (%rcx,%rax)
jne LBB27_191
## %bb.197: ## in Loop: Header=BB27_13 Depth=1
movb $46, 61(%r15)
movb $0, 65(%r15)
incq %rdx
movq %rdx, %rsi
jmp LBB27_198
LBB27_191: ## in Loop: Header=BB27_13 Depth=1
movb $49, 61(%r15)
movb (%rdx), %al
leal -48(%rax), %esi
xorl %ecx, %ecx
cmpb $9, %sil
ja LBB27_192
## %bb.193: ## in Loop: Header=BB27_13 Depth=1
xorl %ecx, %ecx
movq %rdx, %rsi
.p2align 4, 0x90
LBB27_194: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
movzbl %al, %eax
leal (%rcx,%rcx,4), %ecx
leal (%rax,%rcx,2), %ecx
addl $-48, %ecx
movzbl 1(%rsi), %eax
incq %rsi
leal -48(%rax), %edi
cmpb $10, %dil
jb LBB27_194
jmp LBB27_195
LBB27_192: ## in Loop: Header=BB27_13 Depth=1
movq %rdx, %rsi
LBB27_195: ## in Loop: Header=BB27_13 Depth=1
testb $-33, %al
jne LBB27_186
## %bb.196: ## in Loop: Header=BB27_13 Depth=1
subl %edx, %esi
movslq %esi, %rsi
addq %rdx, %rsi
movb %cl, 65(%r15)
LBB27_198: ## in Loop: Header=BB27_13 Depth=1
xorl %edi, %edi
movq %r13, %rax
movq %rsi, %rcx
.p2align 4, 0x90
LBB27_199: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
movl %edi, %edx
addq %rbx, %rax
cmpb $32, (%rcx)
leaq 1(%rcx), %rcx
leal 1(%rdx), %edi
je LBB27_199
## %bb.200: ## in Loop: Header=BB27_13 Depth=1
sarq $32, %rax
cmpb $46, (%rsi,%rax)
jne LBB27_201
## %bb.205: ## in Loop: Header=BB27_13 Depth=1
movb $46, 62(%r15)
xorl %ecx, %ecx
jmp LBB27_206
LBB27_201: ## in Loop: Header=BB27_13 Depth=1
movb $50, 62(%r15)
movb (%rsi,%rax), %al
leal -48(%rax), %edi
xorl %ecx, %ecx
cmpb $9, %dil
ja LBB27_204
## %bb.202: ## in Loop: Header=BB27_13 Depth=1
movslq %edx, %rcx
leaq (%rsi,%rcx), %rdx
incq %rdx
xorl %ecx, %ecx
.p2align 4, 0x90
LBB27_203: ## Parent Loop BB27_13 Depth=1
## => This Inner Loop Header: Depth=2
movzbl %al, %eax
leal (%rcx,%rcx,4), %ecx
leal (%rax,%rcx,2), %ecx
addl $-48, %ecx
movzbl (%rdx), %eax
leal -48(%rax), %esi
incq %rdx
cmpb $10, %sil
jb LBB27_203
LBB27_204: ## in Loop: Header=BB27_13 Depth=1
testb $-33, %al
jne LBB27_186
LBB27_206: ## in Loop: Header=BB27_13 Depth=1
movb %cl, 66(%r15)
movq -80(%rbp), %xmm0 ## xmm0 = mem[0],zero
movdqa %xmm0, %xmm1
movdqa LCPI27_2(%rip), %xmm4 ## xmm4 = <46,46,46,46,46,46,46,46,u,u,u,u,u,u,u,u>
pcmpeqb %xmm4, %xmm1
pmovsxbw %xmm1, %xmm1
movdqa LCPI27_3(%rip), %xmm2 ## xmm2 = [32768,16384,8192,4096,2048,1024,512,256]
pandn %xmm2, %xmm1
movdqa LCPI27_4(%rip), %xmm5 ## xmm5 = <49,49,49,49,49,49,49,49,u,u,u,u,u,u,u,u>
pcmpeqb %xmm5, %xmm0
pmovsxbw %xmm0, %xmm0
pand %xmm2, %xmm0
movq -72(%rbp), %xmm2 ## xmm2 = mem[0],zero
movdqa %xmm2, %xmm3
pcmpeqb %xmm4, %xmm3
pmovzxbw %xmm3, %xmm3 ## xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
movdqa LCPI27_5(%rip), %xmm4 ## xmm4 = [128,64,32,16,8,4,2,1]
pandn %xmm4, %xmm3
por %xmm1, %xmm3
pcmpeqb %xmm5, %xmm2
pmovzxbw %xmm2, %xmm1 ## xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
pand %xmm4, %xmm1
por %xmm0, %xmm1
pshufd $238, %xmm1, %xmm0 ## xmm0 = xmm1[2,3,2,3]
por %xmm1, %xmm0
pshufd $85, %xmm0, %xmm1 ## xmm1 = xmm0[1,1,1,1]
por %xmm0, %xmm1
movdqa %xmm1, %xmm0
psrld $16, %xmm0
pshufd $238, %xmm3, %xmm2 ## xmm2 = xmm3[2,3,2,3]
por %xmm3, %xmm2
pshufd $85, %xmm2, %xmm3 ## xmm3 = xmm2[1,1,1,1]
por %xmm2, %xmm3
movdqa %xmm3, %xmm2
psrld $16, %xmm2
por %xmm3, %xmm2
pextrw $0, %xmm2, 42(%r15)
por %xmm1, %xmm0
pextrw $0, %xmm0, 44(%r15)
jmp LBB27_207
LBB27_209:
movb $0, (%r15)
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB27_211
## %bb.210:
addq $280, %rsp ## imm = 0x118
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB27_186:
movsbl %al, %esi
jmp LBB27_99
LBB27_98:
movsbl %dl, %esi
LBB27_99:
leaq L_.str.61(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB27_211:
callq ___stack_chk_fail
LBB27_136:
movb %cl, (%rdx)
jmp LBB27_94
LBB27_144:
movb %dl, (%rax)
LBB27_94:
leaq L_.str.60(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB27_93:
movb %dl, (%rcx)
jmp LBB27_94
LBB27_130:
movb %dl, -63(%rbp)
jmp LBB27_94
.cfi_endproc
## -- End function
.globl _read_insert ## -- Begin function read_insert
.p2align 4, 0x90
_read_insert: ## @read_insert
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
leaq 1(%rdi), %r14
leaq -1(%rdi), %r15
jmp LBB28_1
.p2align 4, 0x90
LBB28_9: ## in Loop: Header=BB28_1 Depth=1
xorl %eax, %eax
LBB28_10: ## in Loop: Header=BB28_1 Depth=1
movslq %eax, %rcx
movb $0, (%rbx,%rcx)
incl _g_line_number(%rip)
testl %ecx, %ecx
jne LBB28_11
LBB28_1: ## =>This Loop Header: Depth=1
## Child Loop BB28_6 Depth 2
movq _g_input_file(%rip), %rdx
movq %rbx, %rdi
movl $200, %esi
callq _fgets
testq %rax, %rax
je LBB28_28
## %bb.2: ## in Loop: Header=BB28_1 Depth=1
cmpb $13, (%rbx)
jne LBB28_4
## %bb.3: ## in Loop: Header=BB28_1 Depth=1
movl $199, %edx
movq %rbx, %rdi
movq %r14, %rsi
callq _memcpy
LBB28_4: ## in Loop: Header=BB28_1 Depth=1
movq %rbx, %rdi
callq _strlen
testl %eax, %eax
je LBB28_9
## %bb.5: ## in Loop: Header=BB28_1 Depth=1
movslq %eax, %rcx
addq %r15, %rcx
jmp LBB28_6
.p2align 4, 0x90
LBB28_8: ## in Loop: Header=BB28_6 Depth=2
decq %rcx
decl %eax
je LBB28_9
LBB28_6: ## Parent Loop BB28_1 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rcx), %edx
cmpb $13, %dl
je LBB28_8
## %bb.7: ## in Loop: Header=BB28_6 Depth=2
cmpb $10, %dl
je LBB28_8
jmp LBB28_10
LBB28_11:
js LBB28_28
## %bb.12:
movl %eax, %r13d
movw $10, (%rbx,%r13)
cmpl $4798, %eax ## imm = 0x12BE
ja LBB28_26
## %bb.13:
movabsq $-4294967296, %r14 ## imm = 0xFFFFFFFF00000000
xorl %r15d, %r15d
movq %rbx, -48(%rbp) ## 8-byte Spill
.p2align 4, 0x90
LBB28_14: ## =>This Loop Header: Depth=1
## Child Loop BB28_19 Depth 2
leaq (%rbx,%r13), %r12
incq %r12
movq _g_input_file(%rip), %rdx
movq %r12, %rdi
movl $200, %esi
callq _fgets
testq %rax, %rax
je LBB28_28
## %bb.15: ## in Loop: Header=BB28_14 Depth=1
cmpb $13, (%r12)
jne LBB28_17
## %bb.16: ## in Loop: Header=BB28_14 Depth=1
leaq 1(%r12), %rsi
movl $199, %edx
movq %r12, %rdi
callq _memcpy
LBB28_17: ## in Loop: Header=BB28_14 Depth=1
movq %r12, %rdi
callq _strlen
movq %rax, %rbx
testl %ebx, %ebx
je LBB28_22
## %bb.18: ## in Loop: Header=BB28_14 Depth=1
movq %rbx, %rax
shlq $32, %rax
movslq %ebx, %rcx
addq %r13, %rcx
addq -48(%rbp), %rcx ## 8-byte Folded Reload
jmp LBB28_19
.p2align 4, 0x90
LBB28_21: ## in Loop: Header=BB28_19 Depth=2
addq %r14, %rax
decq %rcx
decl %ebx
je LBB28_22
LBB28_19: ## Parent Loop BB28_14 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rcx), %edx
cmpb $13, %dl
je LBB28_21
## %bb.20: ## in Loop: Header=BB28_19 Depth=2
cmpb $10, %dl
je LBB28_21
## %bb.23: ## in Loop: Header=BB28_14 Depth=1
sarq $32, %rax
movb $0, (%r12,%rax)
incl _g_line_number(%rip)
testl %ebx, %ebx
jns LBB28_24
jmp LBB28_28
.p2align 4, 0x90
LBB28_22: ## in Loop: Header=BB28_14 Depth=1
movb $0, (%r12)
incl _g_line_number(%rip)
xorl %ebx, %ebx
LBB28_24: ## in Loop: Header=BB28_14 Depth=1
movq %r12, %rdi
leaq L_.str.112(%rip), %rsi
callq _strcmp
testl %eax, %eax
je LBB28_27
## %bb.25: ## in Loop: Header=BB28_14 Depth=1
incq %r13
testq %r15, %r15
cmovneq %r15, %r12
testl %ebx, %ebx
movl $0, %eax
cmovneq %rax, %r12
movl %ebx, %eax
addq %rax, %r13
movq -48(%rbp), %rbx ## 8-byte Reload
movw $10, (%rbx,%r13)
movq %r12, %r15
cmpq $4799, %r13 ## imm = 0x12BF
jl LBB28_14
LBB28_26:
leaq L_.str.124(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB28_27:
testq %r15, %r15
cmovneq %r15, %r12
movb $0, (%r12)
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB28_28:
leaq L_.str.125(%rip), %rdi
xorl %eax, %eax
callq _error_exit
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function main
LCPI29_0:
.byte 82 ## 0x52
.byte 79 ## 0x4f
.byte 84 ## 0x54
.byte 79 ## 0x4f
.byte 84 ## 0x54
.byte 89 ## 0x59
.byte 80 ## 0x50
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 72 ## 0x48
.byte 69 ## 0x45
.byte 65 ## 0x41
.byte 68 ## 0x44
.byte 69 ## 0x45
.byte 82 ## 0x52
.byte 0 ## 0x0
LCPI29_1:
.byte 75 ## 0x4b
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 84 ## 0x54
.byte 65 ## 0x41
.byte 66 ## 0x42
.byte 76 ## 0x4c
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 72 ## 0x48
.byte 69 ## 0x45
.byte 65 ## 0x41
.byte 68 ## 0x44
.byte 69 ## 0x45
.byte 82 ## 0x52
.byte 0 ## 0x0
LCPI29_2:
.byte 77 ## 0x4d
.byte 54 ## 0x36
.byte 56 ## 0x38
.byte 75 ## 0x4b
.byte 77 ## 0x4d
.byte 65 ## 0x41
.byte 75 ## 0x4b
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 84 ## 0x54
.byte 65 ## 0x41
.byte 66 ## 0x42
.byte 76 ## 0x4c
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 72 ## 0x48
LCPI29_3:
.byte 95 ## 0x5f
.byte 72 ## 0x48
.byte 65 ## 0x41
.byte 78 ## 0x4e
.byte 68 ## 0x44
.byte 76 ## 0x4c
.byte 69 ## 0x45
.byte 82 ## 0x52
.byte 95 ## 0x5f
.byte 72 ## 0x48
.byte 69 ## 0x45
.byte 65 ## 0x41
.byte 68 ## 0x44
.byte 69 ## 0x45
.byte 82 ## 0x52
.byte 0 ## 0x0
LCPI29_4:
.byte 82 ## 0x52
.byte 79 ## 0x4f
.byte 84 ## 0x54
.byte 79 ## 0x4f
.byte 84 ## 0x54
.byte 89 ## 0x59
.byte 80 ## 0x50
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 70 ## 0x46
.byte 79 ## 0x4f
.byte 79 ## 0x4f
.byte 84 ## 0x54
.byte 69 ## 0x45
.byte 82 ## 0x52
.byte 0 ## 0x0
LCPI29_5:
.byte 75 ## 0x4b
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 84 ## 0x54
.byte 65 ## 0x41
.byte 66 ## 0x42
.byte 76 ## 0x4c
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 70 ## 0x46
.byte 79 ## 0x4f
.byte 79 ## 0x4f
.byte 84 ## 0x54
.byte 69 ## 0x45
.byte 82 ## 0x52
.byte 0 ## 0x0
LCPI29_6:
.byte 77 ## 0x4d
.byte 54 ## 0x36
.byte 56 ## 0x38
.byte 75 ## 0x4b
.byte 77 ## 0x4d
.byte 65 ## 0x41
.byte 75 ## 0x4b
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 84 ## 0x54
.byte 65 ## 0x41
.byte 66 ## 0x42
.byte 76 ## 0x4c
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 70 ## 0x46
LCPI29_7:
.byte 95 ## 0x5f
.byte 72 ## 0x48
.byte 65 ## 0x41
.byte 78 ## 0x4e
.byte 68 ## 0x44
.byte 76 ## 0x4c
.byte 69 ## 0x45
.byte 82 ## 0x52
.byte 95 ## 0x5f
.byte 70 ## 0x46
.byte 79 ## 0x4f
.byte 79 ## 0x4f
.byte 84 ## 0x54
.byte 69 ## 0x45
.byte 82 ## 0x52
.byte 0 ## 0x0
LCPI29_8:
.byte 79 ## 0x4f
.byte 68 ## 0x44
.byte 89 ## 0x59
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
.byte 0 ## 0x0
LCPI29_9:
.byte 77 ## 0x4d
.byte 54 ## 0x36
.byte 56 ## 0x38
.byte 75 ## 0x4b
.byte 77 ## 0x4d
.byte 65 ## 0x41
.byte 75 ## 0x4b
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 84 ## 0x54
.byte 65 ## 0x41
.byte 66 ## 0x42
.byte 76 ## 0x4c
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 66 ## 0x42
LCPI29_10:
.byte 68 ## 0x44
.byte 69 ## 0x45
.byte 95 ## 0x5f
.byte 72 ## 0x48
.byte 65 ## 0x41
.byte 78 ## 0x4e
.byte 68 ## 0x44
.byte 76 ## 0x4c
.byte 69 ## 0x45
.byte 82 ## 0x52
.byte 95 ## 0x5f
.byte 66 ## 0x42
.byte 79 ## 0x4f
.byte 68 ## 0x44
.byte 89 ## 0x59
.byte 0 ## 0x0
.section __TEXT,__text,regular,pure_instructions
.globl _main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movl $22360, %eax ## imm = 0x5758
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r14
movl %edi, %ebx
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
leaq -1072(%rbp), %rdi
movl $1024, %esi ## imm = 0x400
callq ___bzero
movq _g_version(%rip), %rsi
leaq L_.str.126(%rip), %rdi
xorl %eax, %eax
callq _printf
leaq L_str(%rip), %rdi
callq _puts
cmpl $2, %ebx
jl LBB29_8
## %bb.1:
movq 8(%r14), %rsi
leaq -1072(%rbp), %r15
movl $1024, %edx ## imm = 0x400
movq %r15, %rdi
callq ___strcpy_chk
movq %r15, %rdi
.p2align 4, 0x90
LBB29_3: ## =>This Inner Loop Header: Depth=1
movl $92, %esi
callq _strchr
testq %rax, %rax
je LBB29_4
## %bb.2: ## in Loop: Header=BB29_3 Depth=1
movb $47, (%rax)
movq %rax, %rdi
jmp LBB29_3
LBB29_4:
movq %r15, %rdi
callq _strlen
cmpb $47, -1(%rax,%r15)
je LBB29_6
## %bb.5:
leaq L_.str.128(%rip), %rsi
leaq -1072(%rbp), %rdi
movl $1024, %edx ## imm = 0x400
callq ___strcat_chk
LBB29_6:
cmpl $3, %ebx
jl LBB29_8
## %bb.7:
movq 16(%r14), %rsi
leaq _g_input_filename(%rip), %rdi
movl $1024, %edx ## imm = 0x400
callq ___strcpy_chk
LBB29_8:
leaq L_.str.129(%rip), %rcx
leaq L_.str.130(%rip), %r9
leaq -2096(%rbp), %rbx
leaq -1072(%rbp), %r8
movl $1024, %edx ## imm = 0x400
movq %rbx, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
leaq L_.str.131(%rip), %rsi
movq %rbx, %rdi
callq _fopen
movq %rax, _g_prototype_file(%rip)
testq %rax, %rax
je LBB29_9
## %bb.11:
leaq L_.str.129(%rip), %rcx
leaq L_.str.133(%rip), %r9
leaq -2096(%rbp), %rbx
leaq -1072(%rbp), %r8
movl $1024, %edx ## imm = 0x400
movq %rbx, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
leaq L_.str.131(%rip), %rsi
movq %rbx, %rdi
callq _fopen
movq %rax, _g_table_file(%rip)
testq %rax, %rax
je LBB29_12
## %bb.13:
leaq L_.str.129(%rip), %rcx
leaq L_.str.135(%rip), %r9
leaq -2096(%rbp), %rbx
leaq -1072(%rbp), %r8
movl $1024, %edx ## imm = 0x400
movq %rbx, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
leaq L_.str.131(%rip), %rsi
movq %rbx, %rdi
callq _fopen
movq %rax, _g_ops_ac_file(%rip)
testq %rax, %rax
je LBB29_14
## %bb.15:
leaq L_.str.129(%rip), %rcx
leaq L_.str.137(%rip), %r9
leaq -2096(%rbp), %rbx
leaq -1072(%rbp), %r8
movl $1024, %edx ## imm = 0x400
movq %rbx, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
leaq L_.str.131(%rip), %rsi
movq %rbx, %rdi
callq _fopen
movq %rax, _g_ops_dm_file(%rip)
testq %rax, %rax
je LBB29_16
## %bb.17:
leaq L_.str.129(%rip), %rcx
leaq L_.str.139(%rip), %r9
leaq -2096(%rbp), %rbx
leaq -1072(%rbp), %r8
movl $1024, %edx ## imm = 0x400
movq %rbx, %rdi
xorl %esi, %esi
xorl %eax, %eax
callq ___sprintf_chk
leaq L_.str.131(%rip), %rsi
movq %rbx, %rdi
callq _fopen
movq %rax, _g_ops_nz_file(%rip)
testq %rax, %rax
je LBB29_18
## %bb.19:
leaq _g_input_filename(%rip), %rdi
leaq L_.str.141(%rip), %rsi
callq _fopen
movq %rax, _g_input_file(%rip)
testq %rax, %rax
je LBB29_89
## %bb.20:
movq %rax, %rbx
movabsq $-4294967296, %r13 ## imm = 0xFFFFFFFF00000000
movb $0, -2304(%rbp)
leaq L_.str.112(%rip), %rsi
leaq -2304(%rbp), %rdi
movl $80, %edx
callq _memcmp
testl %eax, %eax
je LBB29_31
## %bb.21:
leaq -2303(%rbp), %r14
leaq -2304(%rbp), %r12
leaq L_.str.112(%rip), %r15
jmp LBB29_22
.p2align 4, 0x90
LBB29_30: ## in Loop: Header=BB29_22 Depth=1
movb $0, -2304(%rbp)
incl _g_line_number(%rip)
LBB29_42: ## in Loop: Header=BB29_22 Depth=1
movl $80, %edx
movq %r12, %rdi
movq %r15, %rsi
callq _memcmp
movq _g_input_file(%rip), %rbx
testl %eax, %eax
je LBB29_31
LBB29_22: ## =>This Loop Header: Depth=1
## Child Loop BB29_27 Depth 2
movq %r12, %rdi
movl $200, %esi
movq %rbx, %rdx
callq _fgets
testq %rax, %rax
je LBB29_76
## %bb.23: ## in Loop: Header=BB29_22 Depth=1
cmpb $13, -2304(%rbp)
jne LBB29_25
## %bb.24: ## in Loop: Header=BB29_22 Depth=1
movl $199, %edx
movq %r12, %rdi
movq %r14, %rsi
callq _memcpy
LBB29_25: ## in Loop: Header=BB29_22 Depth=1
movq %r12, %rdi
callq _strlen
testl %eax, %eax
je LBB29_30
## %bb.26: ## in Loop: Header=BB29_22 Depth=1
movq %rax, %rcx
shlq $32, %rcx
cltq
jmp LBB29_27
.p2align 4, 0x90
LBB29_29: ## in Loop: Header=BB29_27 Depth=2
addq %r13, %rcx
decq %rax
testl %eax, %eax
je LBB29_30
LBB29_27: ## Parent Loop BB29_22 Depth=1
## => This Inner Loop Header: Depth=2
movzbl -2305(%rbp,%rax), %edx
cmpb $13, %dl
je LBB29_29
## %bb.28: ## in Loop: Header=BB29_27 Depth=2
cmpb $10, %dl
je LBB29_29
## %bb.41: ## in Loop: Header=BB29_22 Depth=1
sarq $32, %rcx
movb $0, -2304(%rbp,%rcx)
incl _g_line_number(%rip)
testl $-2147483648, %eax ## imm = 0x80000000
je LBB29_42
jmp LBB29_76
LBB29_31:
leaq -2304(%rbp), %rdi
movl $200, %esi
movq %rbx, %rdx
callq _fgets
testq %rax, %rax
je LBB29_76
## %bb.32:
movabsq $6864968314218631007, %rax ## imm = 0x5F45444F43504F5F
movabsq $4993156412100589133, %rdx ## imm = 0x454B414D4B38364D
movabsq $6436856990945923167, %rcx ## imm = 0x59544F544F52505F
leaq -2304(%rbp), %rbx
movq %rdx, %xmm1
movq %rcx, %xmm0
movdqa %xmm1, %xmm2
punpcklqdq %xmm0, %xmm2 ## xmm2 = xmm2[0],xmm0[0]
movdqa %xmm2, -22400(%rbp) ## 16-byte Spill
leaq -7312(%rbp), %r15
movq %rax, %xmm0
punpcklqdq %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[0]
movdqa %xmm1, -22384(%rbp) ## 16-byte Spill
movl $0, -22352(%rbp) ## 4-byte Folded Spill
movl $0, -22344(%rbp) ## 4-byte Folded Spill
movl $0, -22348(%rbp) ## 4-byte Folded Spill
xorl %r12d, %r12d
movl $0, -22356(%rbp) ## 4-byte Folded Spill
xorl %r14d, %r14d
movl $0, -22360(%rbp) ## 4-byte Folded Spill
movl $0, -22340(%rbp) ## 4-byte Folded Spill
jmp LBB29_33
LBB29_74: ## in Loop: Header=BB29_33 Depth=1
callq _process_opcode_handlers
movl $1, -22352(%rbp) ## 4-byte Folded Spill
.p2align 4, 0x90
LBB29_75: ## in Loop: Header=BB29_33 Depth=1
movq _g_input_file(%rip), %rdx
movq %rbx, %rdi
movl $200, %esi
callq _fgets
testq %rax, %rax
je LBB29_76
LBB29_33: ## =>This Loop Header: Depth=1
## Child Loop BB29_37 Depth 2
cmpb $13, -2304(%rbp)
jne LBB29_35
## %bb.34: ## in Loop: Header=BB29_33 Depth=1
movl $199, %edx
movq %rbx, %rdi
leaq -2303(%rbp), %rsi
callq _memcpy
LBB29_35: ## in Loop: Header=BB29_33 Depth=1
movq %rbx, %rdi
callq _strlen
testl %eax, %eax
je LBB29_40
## %bb.36: ## in Loop: Header=BB29_33 Depth=1
movq %rax, %rcx
shlq $32, %rcx
cltq
jmp LBB29_37
.p2align 4, 0x90
LBB29_39: ## in Loop: Header=BB29_37 Depth=2
addq %r13, %rcx
decq %rax
testl %eax, %eax
je LBB29_40
LBB29_37: ## Parent Loop BB29_33 Depth=1
## => This Inner Loop Header: Depth=2
movzbl -2305(%rbp,%rax), %edx
cmpb $13, %dl
je LBB29_39
## %bb.38: ## in Loop: Header=BB29_37 Depth=2
cmpb $10, %dl
je LBB29_39
## %bb.43: ## in Loop: Header=BB29_33 Depth=1
sarq $32, %rcx
movb $0, -2304(%rbp,%rcx)
incl _g_line_number(%rip)
testl $-2147483648, %eax ## imm = 0x80000000
je LBB29_44
jmp LBB29_76
.p2align 4, 0x90
LBB29_40: ## in Loop: Header=BB29_33 Depth=1
movb $0, -2304(%rbp)
incl _g_line_number(%rip)
LBB29_44: ## in Loop: Header=BB29_33 Depth=1
movdqu -2294(%rbp), %xmm0
pxor LCPI29_0(%rip), %xmm0
movdqa -2304(%rbp), %xmm1
pxor -22400(%rbp), %xmm1 ## 16-byte Folded Reload
por %xmm0, %xmm1
ptest %xmm1, %xmm1
je LBB29_45
## %bb.47: ## in Loop: Header=BB29_33 Depth=1
movdqu -2298(%rbp), %xmm0
pxor LCPI29_1(%rip), %xmm0
movdqa -2304(%rbp), %xmm1
pxor LCPI29_2(%rip), %xmm1
por %xmm0, %xmm1
ptest %xmm1, %xmm1
je LBB29_48
## %bb.50: ## in Loop: Header=BB29_33 Depth=1
movdqu -2289(%rbp), %xmm0
pxor LCPI29_3(%rip), %xmm0
movdqa -2304(%rbp), %xmm1
pxor -22384(%rbp), %xmm1 ## 16-byte Folded Reload
por %xmm0, %xmm1
ptest %xmm1, %xmm1
je LBB29_51
## %bb.53: ## in Loop: Header=BB29_33 Depth=1
movdqu -2294(%rbp), %xmm0
pxor LCPI29_4(%rip), %xmm0
movdqa -2304(%rbp), %xmm1
pxor -22400(%rbp), %xmm1 ## 16-byte Folded Reload
por %xmm0, %xmm1
ptest %xmm1, %xmm1
je LBB29_54
## %bb.56: ## in Loop: Header=BB29_33 Depth=1
movdqu -2298(%rbp), %xmm0
pxor LCPI29_5(%rip), %xmm0
movdqa -2304(%rbp), %xmm1
pxor LCPI29_6(%rip), %xmm1
por %xmm0, %xmm1
ptest %xmm1, %xmm1
je LBB29_57
## %bb.59: ## in Loop: Header=BB29_33 Depth=1
movdqu -2289(%rbp), %xmm0
pxor LCPI29_7(%rip), %xmm0
movdqa -2304(%rbp), %xmm1
pxor -22384(%rbp), %xmm1 ## 16-byte Folded Reload
por %xmm0, %xmm1
ptest %xmm1, %xmm1
je LBB29_60
## %bb.62: ## in Loop: Header=BB29_33 Depth=1
movd -2288(%rbp), %xmm0 ## xmm0 = mem[0],zero,zero,zero
pxor LCPI29_8(%rip), %xmm0
movdqa -2304(%rbp), %xmm1
pxor LCPI29_9(%rip), %xmm1
por %xmm0, %xmm1
ptest %xmm1, %xmm1
je LBB29_63
## %bb.68: ## in Loop: Header=BB29_33 Depth=1
movdqu -2291(%rbp), %xmm0
pxor LCPI29_10(%rip), %xmm0
movdqa -2304(%rbp), %xmm1
pxor -22384(%rbp), %xmm1 ## 16-byte Folded Reload
por %xmm0, %xmm1
ptest %xmm1, %xmm1
jne LBB29_77
## %bb.69: ## in Loop: Header=BB29_33 Depth=1
cmpl $0, -22340(%rbp) ## 4-byte Folded Reload
je LBB29_100
## %bb.70: ## in Loop: Header=BB29_33 Depth=1
testl %r14d, %r14d
je LBB29_101
## %bb.71: ## in Loop: Header=BB29_33 Depth=1
testl %r12d, %r12d
je LBB29_102
## %bb.72: ## in Loop: Header=BB29_33 Depth=1
cmpl $0, -22344(%rbp) ## 4-byte Folded Reload
je LBB29_103
## %bb.73: ## in Loop: Header=BB29_33 Depth=1
cmpl $0, -22352(%rbp) ## 4-byte Folded Reload
je LBB29_74
jmp LBB29_104
.p2align 4, 0x90
LBB29_45: ## in Loop: Header=BB29_33 Depth=1
cmpl $0, -22340(%rbp) ## 4-byte Folded Reload
jne LBB29_90
## %bb.46: ## in Loop: Header=BB29_33 Depth=1
movq %r15, %rdi
callq _read_insert
movq _g_prototype_file(%rip), %rdi
leaq L_.str.146(%rip), %rsi
movq %r15, %rdx
xorl %eax, %eax
callq _fprintf
movl $1, -22340(%rbp) ## 4-byte Folded Spill
jmp LBB29_75
.p2align 4, 0x90
LBB29_48: ## in Loop: Header=BB29_33 Depth=1
testl %r14d, %r14d
jne LBB29_91
## %bb.49: ## in Loop: Header=BB29_33 Depth=1
movq %r15, %rdi
callq _read_insert
movq _g_table_file(%rip), %rsi
movq %r15, %rdi
callq _fputs
movl $1, %r14d
jmp LBB29_75
LBB29_51: ## in Loop: Header=BB29_33 Depth=1
testl %r12d, %r12d
jne LBB29_92
## %bb.52: ## in Loop: Header=BB29_33 Depth=1
movq %r15, %rdi
callq _read_insert
movq _g_ops_ac_file(%rip), %rdi
movl %r14d, %r12d
leaq L_.str.146(%rip), %r14
movq %r14, %rsi
movq %r15, %rdx
xorl %eax, %eax
callq _fprintf
movq _g_ops_dm_file(%rip), %rdi
movq %r14, %rsi
movq %r15, %rdx
xorl %eax, %eax
callq _fprintf
movq _g_ops_nz_file(%rip), %rdi
movq %r14, %rsi
movl %r12d, %r14d
movq %r15, %rdx
xorl %eax, %eax
callq _fprintf
movl $1, %r12d
jmp LBB29_75
LBB29_54: ## in Loop: Header=BB29_33 Depth=1
cmpl $0, -22360(%rbp) ## 4-byte Folded Reload
jne LBB29_93
## %bb.55: ## in Loop: Header=BB29_33 Depth=1
leaq -12320(%rbp), %rdi
callq _read_insert
movl $1, -22360(%rbp) ## 4-byte Folded Spill
jmp LBB29_75
LBB29_57: ## in Loop: Header=BB29_33 Depth=1
cmpl $0, -22356(%rbp) ## 4-byte Folded Reload
jne LBB29_94
## %bb.58: ## in Loop: Header=BB29_33 Depth=1
leaq -17328(%rbp), %rdi
callq _read_insert
movl $1, -22356(%rbp) ## 4-byte Folded Spill
jmp LBB29_75
LBB29_60: ## in Loop: Header=BB29_33 Depth=1
cmpl $0, -22348(%rbp) ## 4-byte Folded Reload
jne LBB29_95
## %bb.61: ## in Loop: Header=BB29_33 Depth=1
leaq -22336(%rbp), %rdi
callq _read_insert
movl $1, -22348(%rbp) ## 4-byte Folded Spill
jmp LBB29_75
LBB29_63: ## in Loop: Header=BB29_33 Depth=1
cmpl $0, -22340(%rbp) ## 4-byte Folded Reload
je LBB29_96
## %bb.64: ## in Loop: Header=BB29_33 Depth=1
testl %r14d, %r14d
je LBB29_97
## %bb.65: ## in Loop: Header=BB29_33 Depth=1
testl %r12d, %r12d
je LBB29_98
## %bb.66: ## in Loop: Header=BB29_33 Depth=1
cmpl $0, -22344(%rbp) ## 4-byte Folded Reload
jne LBB29_99
## %bb.67: ## in Loop: Header=BB29_33 Depth=1
callq _populate_table
movl $1, -22344(%rbp) ## 4-byte Folded Spill
jmp LBB29_75
LBB29_77:
movabsq $4993156412100589133, %rcx ## imm = 0x454B414D4B38364D
xorq -2304(%rbp), %rcx
movabsq $19226358274411329, %rax ## imm = 0x444E455F454B41
xorq -2299(%rbp), %rax
orq %rcx, %rax
jne LBB29_88
## %bb.78:
cmpl $0, -22340(%rbp) ## 4-byte Folded Reload
je LBB29_105
## %bb.79:
cmpl $0, -22360(%rbp) ## 4-byte Folded Reload
je LBB29_106
## %bb.80:
testl %r14d, %r14d
je LBB29_107
## %bb.81:
cmpl $0, -22356(%rbp) ## 4-byte Folded Reload
je LBB29_108
## %bb.82:
cmpl $0, -22344(%rbp) ## 4-byte Folded Reload
je LBB29_109
## %bb.83:
testl %r12d, %r12d
je LBB29_110
## %bb.84:
cmpl $0, -22348(%rbp) ## 4-byte Folded Reload
je LBB29_111
## %bb.85:
cmpl $0, -22352(%rbp) ## 4-byte Folded Reload
je LBB29_112
## %bb.86:
movq _g_table_file(%rip), %rdi
callq _print_opcode_output_table
movq _g_prototype_file(%rip), %rdi
leaq L_.str.146(%rip), %rbx
leaq -12320(%rbp), %rdx
movq %rbx, %rsi
xorl %eax, %eax
callq _fprintf
movq _g_table_file(%rip), %rdi
leaq -17328(%rbp), %rdx
movq %rbx, %rsi
xorl %eax, %eax
callq _fprintf
movq _g_ops_ac_file(%rip), %rdi
leaq -22336(%rbp), %r14
movq %rbx, %rsi
movq %r14, %rdx
xorl %eax, %eax
callq _fprintf
movq _g_ops_dm_file(%rip), %rdi
movq %rbx, %rsi
movq %r14, %rdx
xorl %eax, %eax
callq _fprintf
movq _g_ops_nz_file(%rip), %rdi
movq %rbx, %rsi
movq %r14, %rdx
xorl %eax, %eax
callq _fprintf
movq _g_prototype_file(%rip), %rdi
callq _fclose
movq _g_table_file(%rip), %rdi
callq _fclose
movq _g_ops_ac_file(%rip), %rdi
callq _fclose
movq _g_ops_dm_file(%rip), %rdi
callq _fclose
movq _g_ops_nz_file(%rip), %rdi
callq _fclose
movq _g_input_file(%rip), %rdi
callq _fclose
movl _g_num_functions(%rip), %esi
movl _g_num_primitives(%rip), %edx
leaq L_.str.178(%rip), %rdi
xorl %eax, %eax
callq _printf
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB29_113
## %bb.87:
xorl %eax, %eax
addq $22360, %rsp ## imm = 0x5758
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB29_76:
leaq L_.str.143(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_90:
leaq L_.str.145(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_91:
leaq L_.str.148(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_92:
leaq L_.str.150(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_93:
leaq L_.str.152(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_94:
leaq L_.str.154(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_104:
leaq L_.str.167(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_95:
leaq L_.str.156(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_99:
leaq L_.str.161(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_100:
leaq L_.str.163(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_101:
leaq L_.str.164(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_102:
leaq L_.str.165(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_103:
leaq L_.str.166(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_96:
leaq L_.str.158(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_97:
leaq L_.str.159(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_98:
leaq L_.str.160(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_9:
leaq L_.str.132(%rip), %rdi
jmp LBB29_10
LBB29_12:
leaq L_.str.134(%rip), %rdi
jmp LBB29_10
LBB29_14:
leaq L_.str.136(%rip), %rdi
jmp LBB29_10
LBB29_16:
leaq L_.str.138(%rip), %rdi
jmp LBB29_10
LBB29_18:
leaq L_.str.140(%rip), %rdi
LBB29_10:
leaq -2096(%rbp), %rsi
xorl %eax, %eax
callq _perror_exit
LBB29_89:
leaq L_.str.142(%rip), %rdi
leaq _g_input_filename(%rip), %rsi
xorl %eax, %eax
callq _perror_exit
LBB29_88:
leaq L_.str.177(%rip), %rdi
leaq -2304(%rbp), %rsi
xorl %eax, %eax
callq _error_exit
LBB29_105:
leaq L_.str.169(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_106:
leaq L_.str.170(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_107:
leaq L_.str.171(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_108:
leaq L_.str.172(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_109:
leaq L_.str.173(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_110:
leaq L_.str.174(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_111:
leaq L_.str.175(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_112:
leaq L_.str.176(%rip), %rdi
xorl %eax, %eax
callq _error_exit
LBB29_113:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "3.3"
.section __DATA,__data
.globl _g_version ## @g_version
.p2align 3
_g_version:
.quad L_.str
.globl _g_input_filename ## @g_input_filename
.p2align 4
_g_input_filename:
.asciz "m68k_in.c\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
.globl _g_input_file ## @g_input_file
.zerofill __DATA,__common,_g_input_file,8,3
.globl _g_prototype_file ## @g_prototype_file
.zerofill __DATA,__common,_g_prototype_file,8,3
.globl _g_table_file ## @g_table_file
.zerofill __DATA,__common,_g_table_file,8,3
.globl _g_ops_ac_file ## @g_ops_ac_file
.zerofill __DATA,__common,_g_ops_ac_file,8,3
.globl _g_ops_dm_file ## @g_ops_dm_file
.zerofill __DATA,__common,_g_ops_dm_file,8,3
.globl _g_ops_nz_file ## @g_ops_nz_file
.zerofill __DATA,__common,_g_ops_nz_file,8,3
.globl _g_num_functions ## @g_num_functions
.zerofill __DATA,__common,_g_num_functions,4,2
.globl _g_num_primitives ## @g_num_primitives
.zerofill __DATA,__common,_g_num_primitives,4,2
.globl _g_line_number ## @g_line_number
.p2align 2
_g_line_number:
.long 1 ## 0x1
.globl _g_opcode_output_table_length ## @g_opcode_output_table_length
.zerofill __DATA,__common,_g_opcode_output_table_length,4,2
.section __TEXT,__cstring,cstring_literals
L_.str.1: ## @.str.1
.space 1
L_.str.2: ## @.str.2
.asciz "ai"
L_.str.3: ## @.str.3
.asciz "AY_AI"
L_.str.4: ## @.str.4
.asciz "pi"
L_.str.5: ## @.str.5
.asciz "AY_PI"
L_.str.6: ## @.str.6
.asciz "pi7"
L_.str.7: ## @.str.7
.asciz "A7_PI"
L_.str.8: ## @.str.8
.asciz "pd"
L_.str.9: ## @.str.9
.asciz "AY_PD"
L_.str.10: ## @.str.10
.asciz "pd7"
L_.str.11: ## @.str.11
.asciz "A7_PD"
L_.str.12: ## @.str.12
.asciz "di"
L_.str.13: ## @.str.13
.asciz "AY_DI"
L_.str.14: ## @.str.14
.asciz "ix"
L_.str.15: ## @.str.15
.asciz "AY_IX"
L_.str.16: ## @.str.16
.asciz "aw"
L_.str.17: ## @.str.17
.asciz "AW"
L_.str.18: ## @.str.18
.asciz "al"
L_.str.19: ## @.str.19
.asciz "AL"
L_.str.20: ## @.str.20
.asciz "pcdi"
L_.str.21: ## @.str.21
.asciz "PCDI"
L_.str.22: ## @.str.22
.asciz "pcix"
L_.str.23: ## @.str.23
.asciz "PCIX"
L_.str.24: ## @.str.24
.asciz "i"
L_.str.25: ## @.str.25
.asciz "I"
.section __DATA,__data
.globl _g_ea_info_table ## @g_ea_info_table
.p2align 4
_g_ea_info_table:
.quad L_.str.1
.quad L_.str.1
.long 0 ## 0x0
.long 0 ## 0x0
.quad L_.str.2
.quad L_.str.3
.long 56 ## 0x38
.long 16 ## 0x10
.quad L_.str.4
.quad L_.str.5
.long 56 ## 0x38
.long 24 ## 0x18
.quad L_.str.6
.quad L_.str.7
.long 63 ## 0x3f
.long 31 ## 0x1f
.quad L_.str.8
.quad L_.str.9
.long 56 ## 0x38
.long 32 ## 0x20
.quad L_.str.10
.quad L_.str.11
.long 63 ## 0x3f
.long 39 ## 0x27
.quad L_.str.12
.quad L_.str.13
.long 56 ## 0x38
.long 40 ## 0x28
.quad L_.str.14
.quad L_.str.15
.long 56 ## 0x38
.long 48 ## 0x30
.quad L_.str.16
.quad L_.str.17
.long 63 ## 0x3f
.long 56 ## 0x38
.quad L_.str.18
.quad L_.str.19
.long 63 ## 0x3f
.long 57 ## 0x39
.quad L_.str.20
.quad L_.str.21
.long 63 ## 0x3f
.long 58 ## 0x3a
.quad L_.str.22
.quad L_.str.23
.long 63 ## 0x3f
.long 59 ## 0x3b
.quad L_.str.24
.quad L_.str.25
.long 63 ## 0x3f
.long 60 ## 0x3c
.section __TEXT,__cstring,cstring_literals
L_.str.26: ## @.str.26
.asciz "t"
L_.str.27: ## @.str.27
.asciz "T"
L_.str.28: ## @.str.28
.asciz "f"
L_.str.29: ## @.str.29
.asciz "F"
L_.str.30: ## @.str.30
.asciz "hi"
L_.str.31: ## @.str.31
.asciz "HI"
L_.str.32: ## @.str.32
.asciz "ls"
L_.str.33: ## @.str.33
.asciz "LS"
L_.str.34: ## @.str.34
.asciz "cc"
L_.str.35: ## @.str.35
.asciz "CC"
L_.str.36: ## @.str.36
.asciz "cs"
L_.str.37: ## @.str.37
.asciz "CS"
L_.str.38: ## @.str.38
.asciz "ne"
L_.str.39: ## @.str.39
.asciz "NE"
L_.str.40: ## @.str.40
.asciz "eq"
L_.str.41: ## @.str.41
.asciz "EQ"
L_.str.42: ## @.str.42
.asciz "vc"
L_.str.43: ## @.str.43
.asciz "VC"
L_.str.44: ## @.str.44
.asciz "vs"
L_.str.45: ## @.str.45
.asciz "VS"
L_.str.46: ## @.str.46
.asciz "pl"
L_.str.47: ## @.str.47
.asciz "PL"
L_.str.48: ## @.str.48
.asciz "mi"
L_.str.49: ## @.str.49
.asciz "MI"
L_.str.50: ## @.str.50
.asciz "ge"
L_.str.51: ## @.str.51
.asciz "GE"
L_.str.52: ## @.str.52
.asciz "lt"
L_.str.53: ## @.str.53
.asciz "LT"
L_.str.54: ## @.str.54
.asciz "gt"
L_.str.55: ## @.str.55
.asciz "GT"
L_.str.56: ## @.str.56
.asciz "le"
L_.str.57: ## @.str.57
.asciz "LE"
.section __DATA,__data
.globl _g_cc_table ## @g_cc_table
.p2align 4
_g_cc_table:
.quad L_.str.26
.quad L_.str.27
.quad L_.str.28
.quad L_.str.29
.quad L_.str.30
.quad L_.str.31
.quad L_.str.32
.quad L_.str.33
.quad L_.str.34
.quad L_.str.35
.quad L_.str.36
.quad L_.str.37
.quad L_.str.38
.quad L_.str.39
.quad L_.str.40
.quad L_.str.41
.quad L_.str.42
.quad L_.str.43
.quad L_.str.44
.quad L_.str.45
.quad L_.str.46
.quad L_.str.47
.quad L_.str.48
.quad L_.str.49
.quad L_.str.50
.quad L_.str.51
.quad L_.str.52
.quad L_.str.53
.quad L_.str.54
.quad L_.str.55
.quad L_.str.56
.quad L_.str.57
.globl _g_size_select_table ## @g_size_select_table
.p2align 4
_g_size_select_table:
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 1 ## 0x1
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 1 ## 0x1
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 2 ## 0x2
.globl _g_ea_cycle_table ## @g_ea_cycle_table
.p2align 4
_g_ea_cycle_table:
.space 36
.long 0 ## 0x0
.long 4 ## 0x4
.long 8 ## 0x8
.long 0 ## 0x0
.long 4 ## 0x4
.long 8 ## 0x8
.long 0 ## 0x0
.long 4 ## 0x4
.long 4 ## 0x4
.long 0 ## 0x0
.long 4 ## 0x4
.long 8 ## 0x8
.long 0 ## 0x0
.long 4 ## 0x4
.long 8 ## 0x8
.long 0 ## 0x0
.long 4 ## 0x4
.long 4 ## 0x4
.long 0 ## 0x0
.long 4 ## 0x4
.long 8 ## 0x8
.long 0 ## 0x0
.long 4 ## 0x4
.long 8 ## 0x8
.long 0 ## 0x0
.long 4 ## 0x4
.long 4 ## 0x4
.long 0 ## 0x0
.long 6 ## 0x6
.long 10 ## 0xa
.long 0 ## 0x0
.long 6 ## 0x6
.long 10 ## 0xa
.long 0 ## 0x0
.long 5 ## 0x5
.long 5 ## 0x5
.long 0 ## 0x0
.long 6 ## 0x6
.long 10 ## 0xa
.long 0 ## 0x0
.long 6 ## 0x6
.long 10 ## 0xa
.long 0 ## 0x0
.long 5 ## 0x5
.long 5 ## 0x5
.long 0 ## 0x0
.long 8 ## 0x8
.long 12 ## 0xc
.long 0 ## 0x0
.long 8 ## 0x8
.long 12 ## 0xc
.long 0 ## 0x0
.long 5 ## 0x5
.long 5 ## 0x5
.long 0 ## 0x0
.long 10 ## 0xa
.long 14 ## 0xe
.long 0 ## 0x0
.long 10 ## 0xa
.long 14 ## 0xe
.long 0 ## 0x0
.long 7 ## 0x7
.long 7 ## 0x7
.long 0 ## 0x0
.long 8 ## 0x8
.long 12 ## 0xc
.long 0 ## 0x0
.long 8 ## 0x8
.long 12 ## 0xc
.long 0 ## 0x0
.long 4 ## 0x4
.long 4 ## 0x4
.long 0 ## 0x0
.long 12 ## 0xc
.long 16 ## 0x10
.long 0 ## 0x0
.long 12 ## 0xc
.long 16 ## 0x10
.long 0 ## 0x0
.long 4 ## 0x4
.long 4 ## 0x4
.long 0 ## 0x0
.long 8 ## 0x8
.long 12 ## 0xc
.long 0 ## 0x0
.long 8 ## 0x8
.long 12 ## 0xc
.long 0 ## 0x0
.long 5 ## 0x5
.long 5 ## 0x5
.long 0 ## 0x0
.long 10 ## 0xa
.long 14 ## 0xe
.long 0 ## 0x0
.long 10 ## 0xa
.long 14 ## 0xe
.long 0 ## 0x0
.long 7 ## 0x7
.long 7 ## 0x7
.long 0 ## 0x0
.long 4 ## 0x4
.long 8 ## 0x8
.long 0 ## 0x0
.long 4 ## 0x4
.long 8 ## 0x8
.long 0 ## 0x0
.long 2 ## 0x2
.long 4 ## 0x4
.globl _g_jmp_cycle_table ## @g_jmp_cycle_table
.p2align 4
_g_jmp_cycle_table:
.long 0 ## 0x0
.long 4 ## 0x4
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 6 ## 0x6
.long 10 ## 0xa
.long 6 ## 0x6
.long 8 ## 0x8
.long 6 ## 0x6
.long 10 ## 0xa
.long 0 ## 0x0
.globl _g_jsr_cycle_table ## @g_jsr_cycle_table
.p2align 4
_g_jsr_cycle_table:
.long 0 ## 0x0
.long 4 ## 0x4
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 6 ## 0x6
.long 10 ## 0xa
.long 6 ## 0x6
.long 8 ## 0x8
.long 6 ## 0x6
.long 10 ## 0xa
.long 0 ## 0x0
.globl _g_lea_cycle_table ## @g_lea_cycle_table
.p2align 4
_g_lea_cycle_table:
.long 0 ## 0x0
.long 4 ## 0x4
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 8 ## 0x8
.long 12 ## 0xc
.long 8 ## 0x8
.long 12 ## 0xc
.long 8 ## 0x8
.long 12 ## 0xc
.long 0 ## 0x0
.globl _g_pea_cycle_table ## @g_pea_cycle_table
.p2align 4
_g_pea_cycle_table:
.long 0 ## 0x0
.long 6 ## 0x6
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 10 ## 0xa
.long 14 ## 0xe
.long 10 ## 0xa
.long 14 ## 0xe
.long 10 ## 0xa
.long 14 ## 0xe
.long 0 ## 0x0
.globl _g_moves_cycle_table ## @g_moves_cycle_table
.p2align 4
_g_moves_cycle_table:
.space 12
.long 0 ## 0x0
.long 4 ## 0x4
.long 6 ## 0x6
.long 0 ## 0x0
.long 4 ## 0x4
.long 6 ## 0x6
.long 0 ## 0x0
.long 4 ## 0x4
.long 6 ## 0x6
.long 0 ## 0x0
.long 6 ## 0x6
.long 12 ## 0xc
.long 0 ## 0x0
.long 6 ## 0x6
.long 12 ## 0xc
.long 0 ## 0x0
.long 12 ## 0xc
.long 16 ## 0x10
.long 0 ## 0x0
.long 16 ## 0x10
.long 20 ## 0x14
.long 0 ## 0x0
.long 12 ## 0xc
.long 16 ## 0x10
.long 0 ## 0x0
.long 16 ## 0x10
.long 20 ## 0x14
.space 12
.space 12
.space 12
.globl _g_clr_cycle_table ## @g_clr_cycle_table
.p2align 4
_g_clr_cycle_table:
.space 12
.long 0 ## 0x0
.long 4 ## 0x4
.long 6 ## 0x6
.long 0 ## 0x0
.long 4 ## 0x4
.long 6 ## 0x6
.long 0 ## 0x0
.long 4 ## 0x4
.long 6 ## 0x6
.long 0 ## 0x0
.long 6 ## 0x6
.long 8 ## 0x8
.long 0 ## 0x0
.long 6 ## 0x6
.long 8 ## 0x8
.long 0 ## 0x0
.long 8 ## 0x8
.long 10 ## 0xa
.long 0 ## 0x0
.long 10 ## 0xa
.long 14 ## 0xe
.long 0 ## 0x0
.long 8 ## 0x8
.long 10 ## 0xa
.long 0 ## 0x0
.long 10 ## 0xa
.long 14 ## 0xe
.space 12
.space 12
.space 12
.section __TEXT,__cstring,cstring_literals
L_.str.58: ## @.str.58
.asciz "In %s, near or on line %d:\n\t"
L_.str.60: ## @.str.60
.asciz "Field too long"
L_.str.61: ## @.str.61
.asciz "Malformed integer value (%c)"
L_.str.62: ## @.str.62
.asciz "moves"
L_.str.63: ## @.str.63
.asciz "clr"
L_.str.64: ## @.str.64
.asciz "add"
L_.str.65: ## @.str.65
.asciz "er"
L_.str.66: ## @.str.66
.asciz "adda"
L_.str.67: ## @.str.67
.asciz "and"
L_.str.68: ## @.str.68
.asciz "or"
L_.str.69: ## @.str.69
.asciz "sub"
L_.str.70: ## @.str.70
.asciz "suba"
L_.str.71: ## @.str.71
.asciz "jmp"
L_.str.72: ## @.str.72
.asciz "jsr"
L_.str.73: ## @.str.73
.asciz "lea"
L_.str.74: ## @.str.74
.asciz "pea"
.comm _g_opcode_input_table,68000,4 ## @g_opcode_input_table
L_.str.75: ## @.str.75
.asciz "illegal"
L_.str.76: ## @.str.76
.asciz "M68KMAKE_OP"
L_.str.78: ## @.str.78
.asciz "overflow in replace structure"
L_.str.79: ## @.str.79
.asciz "M68KMAKE"
L_.str.80: ## @.str.80
.asciz "Unknown M68KMAKE directive"
L_.str.81: ## @.str.81
.asciz "%s\n"
L_.str.82: ## @.str.82
.asciz "\n\n"
L_.str.83: ## @.str.83
.asciz "m68k_op_%s"
L_.str.84: ## @.str.84
.asciz "_%d"
L_.str.85: ## @.str.85
.asciz "."
L_.str.86: ## @.str.86
.asciz "_%s"
L_.str.87: ## @.str.87
.asciz "void %s(void);\n"
L_.str.88: ## @.str.88
.asciz "void %s(void)\n"
L_.str.89: ## @.str.89
.asciz "Opcode output table overflow"
.comm _g_opcode_output_table,204000,4 ## @g_opcode_output_table
L_.str.90: ## @.str.90
.asciz "\t{%-28s, 0x%04x, 0x%04x, {"
L_.str.91: ## @.str.91
.asciz "%3d"
L_.str.92: ## @.str.92
.asciz ", "
L_.str.93: ## @.str.93
.asciz "}},\n"
L_.str.94: ## @.str.94
.asciz "%s"
L_.str.95: ## @.str.95
.asciz "EA_%s_8()"
L_.str.96: ## @.str.96
.asciz "M68KMAKE_GET_EA_AY_8"
L_.str.97: ## @.str.97
.asciz "EA_%s_16()"
L_.str.98: ## @.str.98
.asciz "M68KMAKE_GET_EA_AY_16"
L_.str.99: ## @.str.99
.asciz "EA_%s_32()"
L_.str.100: ## @.str.100
.asciz "M68KMAKE_GET_EA_AY_32"
L_.str.101: ## @.str.101
.asciz "OPER_%s_8()"
L_.str.102: ## @.str.102
.asciz "M68KMAKE_GET_OPER_AY_8"
L_.str.103: ## @.str.103
.asciz "OPER_%s_16()"
L_.str.104: ## @.str.104
.asciz "M68KMAKE_GET_OPER_AY_16"
L_.str.105: ## @.str.105
.asciz "OPER_%s_32()"
L_.str.106: ## @.str.106
.asciz "M68KMAKE_GET_OPER_AY_32"
L_.str.107: ## @.str.107
.asciz ".........."
L_.str.108: ## @.str.108
.asciz "COND_%s()"
L_.str.109: ## @.str.109
.asciz "COND_NOT_%s()"
L_.str.110: ## @.str.110
.asciz "M68KMAKE_CC"
L_.str.111: ## @.str.111
.asciz "M68KMAKE_NOT_CC"
L_.str.112: ## @.str.112
.asciz "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
L_.str.113: ## @.str.113
.asciz "Premature end of file when getting function name"
L_.str.114: ## @.str.114
.asciz "Function too long"
L_.str.115: ## @.str.115
.asciz "Premature end of file when getting function body"
L_.str.116: ## @.str.116
.asciz "Invalid M68KMAKE_OP format"
L_.str.118: ## @.str.118
.asciz "bcc"
L_.str.119: ## @.str.119
.asciz "scc"
L_.str.120: ## @.str.120
.asciz "dbcc"
L_.str.121: ## @.str.121
.asciz "trapcc"
L_.str.122: ## @.str.122
.asciz "M68KMAKE_TABLE_START"
L_.str.123: ## @.str.123
.asciz "Premature EOF while reading table"
L_.str.124: ## @.str.124
.asciz "Buffer overflow reading inserts"
L_.str.125: ## @.str.125
.asciz "Premature EOF while reading inserts"
L_.str.126: ## @.str.126
.asciz "\n\t\tMusashi v%s 68000, 68010, 68EC020, 68020 emulator\n"
L_.str.128: ## @.str.128
.asciz "/"
L_.str.129: ## @.str.129
.asciz "%s%s"
L_.str.130: ## @.str.130
.asciz "m68kops.h"
L_.str.131: ## @.str.131
.asciz "wt"
L_.str.132: ## @.str.132
.asciz "Unable to create prototype file (%s)\n"
L_.str.133: ## @.str.133
.asciz "m68kops.c"
L_.str.134: ## @.str.134
.asciz "Unable to create table file (%s)\n"
L_.str.135: ## @.str.135
.asciz "m68kopac.c"
L_.str.136: ## @.str.136
.asciz "Unable to create ops ac file (%s)\n"
L_.str.137: ## @.str.137
.asciz "m68kopdm.c"
L_.str.138: ## @.str.138
.asciz "Unable to create ops dm file (%s)\n"
L_.str.139: ## @.str.139
.asciz "m68kopnz.c"
L_.str.140: ## @.str.140
.asciz "Unable to create ops nz file (%s)\n"
L_.str.141: ## @.str.141
.asciz "rt"
L_.str.142: ## @.str.142
.asciz "can't open %s for input"
L_.str.143: ## @.str.143
.asciz "Premature EOF while reading input file"
L_.str.144: ## @.str.144
.asciz "M68KMAKE_PROTOTYPE_HEADER"
L_.str.145: ## @.str.145
.asciz "Duplicate prototype header"
L_.str.146: ## @.str.146
.asciz "%s\n\n"
L_.str.147: ## @.str.147
.asciz "M68KMAKE_TABLE_HEADER"
L_.str.148: ## @.str.148
.asciz "Duplicate table header"
L_.str.149: ## @.str.149
.asciz "M68KMAKE_OPCODE_HANDLER_HEADER"
L_.str.150: ## @.str.150
.asciz "Duplicate opcode handler header"
L_.str.151: ## @.str.151
.asciz "M68KMAKE_PROTOTYPE_FOOTER"
L_.str.152: ## @.str.152
.asciz "Duplicate prototype footer"
L_.str.153: ## @.str.153
.asciz "M68KMAKE_TABLE_FOOTER"
L_.str.154: ## @.str.154
.asciz "Duplicate table footer"
L_.str.155: ## @.str.155
.asciz "M68KMAKE_OPCODE_HANDLER_FOOTER"
L_.str.156: ## @.str.156
.asciz "Duplicate opcode handler footer"
L_.str.157: ## @.str.157
.asciz "M68KMAKE_TABLE_BODY"
L_.str.158: ## @.str.158
.asciz "Table body encountered before prototype header"
L_.str.159: ## @.str.159
.asciz "Table body encountered before table header"
L_.str.160: ## @.str.160
.asciz "Table body encountered before opcode handler header"
L_.str.161: ## @.str.161
.asciz "Duplicate table body"
L_.str.162: ## @.str.162
.asciz "M68KMAKE_OPCODE_HANDLER_BODY"
L_.str.163: ## @.str.163
.asciz "Opcode handlers encountered before prototype header"
L_.str.164: ## @.str.164
.asciz "Opcode handlers encountered before table header"
L_.str.165: ## @.str.165
.asciz "Opcode handlers encountered before opcode handler header"
L_.str.166: ## @.str.166
.asciz "Opcode handlers encountered before table body"
L_.str.167: ## @.str.167
.asciz "Duplicate opcode handler section"
L_.str.168: ## @.str.168
.asciz "M68KMAKE_END"
L_.str.169: ## @.str.169
.asciz "Missing prototype header"
L_.str.170: ## @.str.170
.asciz "Missing prototype footer"
L_.str.171: ## @.str.171
.asciz "Missing table header"
L_.str.172: ## @.str.172
.asciz "Missing table footer"
L_.str.173: ## @.str.173
.asciz "Missing table body"
L_.str.174: ## @.str.174
.asciz "Missing opcode handler header"
L_.str.175: ## @.str.175
.asciz "Missing opcode handler footer"
L_.str.176: ## @.str.176
.asciz "Missing opcode handler body"
L_.str.177: ## @.str.177
.asciz "Unknown section identifier: %s"
L_.str.178: ## @.str.178
.asciz "Generated %d opcode handlers from %d primitives\n"
L_str: ## @str
.asciz "\t\tCopyright 1998-2000 Karl Stenerud (karl@mame.net)\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _error_exit ; -- Begin function error_exit
.p2align 2
_error_exit: ; @error_exit
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
Lloh0:
adrp x20, ___stderrp@GOTPAGE
Lloh1:
ldr x20, [x20, ___stderrp@GOTPAGEOFF]
ldr x0, [x20]
Lloh2:
adrp x8, _g_line_number@PAGE
Lloh3:
ldr w8, [x8, _g_line_number@PAGEOFF]
Lloh4:
adrp x9, _g_input_filename@PAGE
Lloh5:
add x9, x9, _g_input_filename@PAGEOFF
stp x9, x8, [sp]
Lloh6:
adrp x1, l_.str.58@PAGE
Lloh7:
add x1, x1, l_.str.58@PAGEOFF
bl _fprintf
add x8, x29, #16
str x8, [sp, #24]
ldr x0, [x20]
add x2, x29, #16
mov x1, x19
bl _vfprintf
ldr x1, [x20]
mov w0, #10
bl _fputc
Lloh8:
adrp x8, _g_prototype_file@PAGE
Lloh9:
ldr x0, [x8, _g_prototype_file@PAGEOFF]
cbz x0, LBB0_2
; %bb.1:
bl _fclose
LBB0_2:
Lloh10:
adrp x8, _g_table_file@PAGE
Lloh11:
ldr x0, [x8, _g_table_file@PAGEOFF]
cbz x0, LBB0_4
; %bb.3:
bl _fclose
LBB0_4:
Lloh12:
adrp x8, _g_ops_ac_file@PAGE
Lloh13:
ldr x0, [x8, _g_ops_ac_file@PAGEOFF]
cbz x0, LBB0_6
; %bb.5:
bl _fclose
LBB0_6:
Lloh14:
adrp x8, _g_ops_dm_file@PAGE
Lloh15:
ldr x0, [x8, _g_ops_dm_file@PAGEOFF]
cbz x0, LBB0_8
; %bb.7:
bl _fclose
LBB0_8:
Lloh16:
adrp x8, _g_ops_nz_file@PAGE
Lloh17:
ldr x0, [x8, _g_ops_nz_file@PAGEOFF]
cbz x0, LBB0_10
; %bb.9:
bl _fclose
LBB0_10:
Lloh18:
adrp x8, _g_input_file@PAGE
Lloh19:
ldr x0, [x8, _g_input_file@PAGEOFF]
cbz x0, LBB0_12
; %bb.11:
bl _fclose
LBB0_12:
mov w0, #1
bl _exit
.loh AdrpLdr Lloh8, Lloh9
.loh AdrpAdd Lloh6, Lloh7
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpLdr Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdr Lloh10, Lloh11
.loh AdrpLdr Lloh12, Lloh13
.loh AdrpLdr Lloh14, Lloh15
.loh AdrpLdr Lloh16, Lloh17
.loh AdrpLdr Lloh18, Lloh19
.cfi_endproc
; -- End function
.globl _perror_exit ; -- Begin function perror_exit
.p2align 2
_perror_exit: ; @perror_exit
.cfi_startproc
; %bb.0:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
mov x1, x0
add x8, x29, #16
str x8, [sp, #8]
Lloh20:
adrp x8, ___stderrp@GOTPAGE
Lloh21:
ldr x8, [x8, ___stderrp@GOTPAGEOFF]
Lloh22:
ldr x0, [x8]
add x2, x29, #16
bl _vfprintf
Lloh23:
adrp x0, l_.str.1@PAGE
Lloh24:
add x0, x0, l_.str.1@PAGEOFF
bl _perror
Lloh25:
adrp x8, _g_prototype_file@PAGE
Lloh26:
ldr x0, [x8, _g_prototype_file@PAGEOFF]
cbz x0, LBB1_2
; %bb.1:
bl _fclose
LBB1_2:
Lloh27:
adrp x8, _g_table_file@PAGE
Lloh28:
ldr x0, [x8, _g_table_file@PAGEOFF]
cbz x0, LBB1_4
; %bb.3:
bl _fclose
LBB1_4:
Lloh29:
adrp x8, _g_ops_ac_file@PAGE
Lloh30:
ldr x0, [x8, _g_ops_ac_file@PAGEOFF]
cbz x0, LBB1_6
; %bb.5:
bl _fclose
LBB1_6:
Lloh31:
adrp x8, _g_ops_dm_file@PAGE
Lloh32:
ldr x0, [x8, _g_ops_dm_file@PAGEOFF]
cbz x0, LBB1_8
; %bb.7:
bl _fclose
LBB1_8:
Lloh33:
adrp x8, _g_ops_nz_file@PAGE
Lloh34:
ldr x0, [x8, _g_ops_nz_file@PAGEOFF]
cbz x0, LBB1_10
; %bb.9:
bl _fclose
LBB1_10:
Lloh35:
adrp x8, _g_input_file@PAGE
Lloh36:
ldr x0, [x8, _g_input_file@PAGEOFF]
cbz x0, LBB1_12
; %bb.11:
bl _fclose
LBB1_12:
mov w0, #1
bl _exit
.loh AdrpLdr Lloh25, Lloh26
.loh AdrpAdd Lloh23, Lloh24
.loh AdrpLdrGotLdr Lloh20, Lloh21, Lloh22
.loh AdrpLdr Lloh27, Lloh28
.loh AdrpLdr Lloh29, Lloh30
.loh AdrpLdr Lloh31, Lloh32
.loh AdrpLdr Lloh33, Lloh34
.loh AdrpLdr Lloh35, Lloh36
.cfi_endproc
; -- End function
.globl _check_strsncpy ; -- Begin function check_strsncpy
.p2align 2
_check_strsncpy: ; @check_strsncpy
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
; kill: def $w2 killed $w2 def $x2
mov x8, x0
mov x0, #0
sxtw x9, w2
LBB2_1: ; =>This Inner Loop Header: Depth=1
ldrb w10, [x1, x0]
orr w11, w10, #0x20
cmp w11, #32
b.eq LBB2_4
; %bb.2: ; in Loop: Header=BB2_1 Depth=1
strb w10, [x8, x0]
add x0, x0, #1
cmp x0, x9
b.le LBB2_1
; %bb.3:
Lloh37:
adrp x0, l_.str.60@PAGE
Lloh38:
add x0, x0, l_.str.60@PAGEOFF
bl _error_exit
LBB2_4:
strb wzr, [x8, x0]
; kill: def $w0 killed $w0 killed $x0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh37, Lloh38
.cfi_endproc
; -- End function
.globl _check_strcncpy ; -- Begin function check_strcncpy
.p2align 2
_check_strcncpy: ; @check_strcncpy
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
; kill: def $w3 killed $w3 def $x3
mov x8, x0
mov x0, #0
and w9, w2, #0xff
sxtw x10, w3
LBB3_1: ; =>This Inner Loop Header: Depth=1
ldrb w11, [x1, x0]
cmp w11, #0
ccmp w11, w9, #4, ne
b.eq LBB3_4
; %bb.2: ; in Loop: Header=BB3_1 Depth=1
strb w11, [x8, x0]
add x0, x0, #1
cmp x0, x10
b.le LBB3_1
; %bb.3:
Lloh39:
adrp x0, l_.str.60@PAGE
Lloh40:
add x0, x0, l_.str.60@PAGEOFF
bl _error_exit
LBB3_4:
strb wzr, [x8, x0]
; kill: def $w0 killed $w0 killed $x0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh39, Lloh40
.cfi_endproc
; -- End function
.globl _check_atoi ; -- Begin function check_atoi
.p2align 2
_check_atoi: ; @check_atoi
.cfi_startproc
; %bb.0:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
ldrb w8, [x0]
sub w9, w8, #48
cmp w9, #9
b.hi LBB4_3
; %bb.1:
mov w9, #0
mov w11, #10
mov x12, x8
mov x10, x0
LBB4_2: ; =>This Inner Loop Header: Depth=1
ldrb w8, [x10, #1]!
mul w9, w9, w11
add w9, w9, w12, uxtb
sub w9, w9, #48
sub w13, w8, #48
mov x12, x8
cmp w13, #10
b.lo LBB4_2
b LBB4_4
LBB4_3:
mov w9, #0
mov x10, x0
LBB4_4:
orr w11, w8, #0x20
cmp w11, #32
b.ne LBB4_6
; %bb.5:
str w9, [x1]
sub w0, w10, w0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
LBB4_6:
sxtb x8, w8
str x8, [sp]
Lloh41:
adrp x0, l_.str.61@PAGE
Lloh42:
add x0, x0, l_.str.61@PAGEOFF
bl _error_exit
.loh AdrpAdd Lloh41, Lloh42
.cfi_endproc
; -- End function
.globl _skip_spaces ; -- Begin function skip_spaces
.p2align 2
_skip_spaces: ; @skip_spaces
.cfi_startproc
; %bb.0:
mov x8, x0
mov w0, #-1
LBB5_1: ; =>This Inner Loop Header: Depth=1
ldrb w9, [x8], #1
add w0, w0, #1
cmp w9, #32
b.eq LBB5_1
; %bb.2:
ret
.cfi_endproc
; -- End function
.globl _num_bits ; -- Begin function num_bits
.p2align 2
_num_bits: ; @num_bits
.cfi_startproc
; %bb.0:
mov w8, #21845
and w9, w8, w0, lsr #1
and w8, w0, w8
add w8, w9, w8
lsr w9, w8, #2
and w9, w9, #0x33333333
and w8, w8, #0x33333333
add w8, w9, w8
lsr w9, w8, #4
and w9, w9, #0xffffff0f
and w8, w8, #0xf0f0f0f
add w8, w9, w8
and w9, w8, #0xf
add w0, w9, w8, lsr #8
ret
.cfi_endproc
; -- End function
.globl _atoh ; -- Begin function atoh
.p2align 2
_atoh: ; @atoh
.cfi_startproc
; %bb.0:
mov w8, #0
b LBB7_3
LBB7_1: ; in Loop: Header=BB7_3 Depth=1
mov w10, #-48
LBB7_2: ; in Loop: Header=BB7_3 Depth=1
add w8, w10, w8, lsl #4
add w8, w8, w9
add x0, x0, #1
LBB7_3: ; =>This Inner Loop Header: Depth=1
ldrsb w9, [x0]
sub w10, w9, #48
cmp w10, #10
b.lo LBB7_1
; %bb.4: ; in Loop: Header=BB7_3 Depth=1
sub w10, w9, #97
cmp w10, #5
b.hi LBB7_6
; %bb.5: ; in Loop: Header=BB7_3 Depth=1
mov w10, #-87
b LBB7_2
LBB7_6:
mov x0, x8
ret
.cfi_endproc
; -- End function
.globl _fgetline ; -- Begin function fgetline
.p2align 2
_fgetline: ; @fgetline
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x20, x1
mov x19, x0
bl _fgets
cbz x0, LBB8_8
; %bb.1:
ldrb w8, [x19]
cmp w8, #13
b.ne LBB8_3
; %bb.2:
add x1, x19, #1
sub w8, w20, #1
sxtw x2, w8
mov x0, x19
bl _memcpy
LBB8_3:
mov x0, x19
bl _strlen
cbz w0, LBB8_7
; %bb.4:
add x8, x19, w0, sxtw
sub x8, x8, #1
LBB8_5: ; =>This Inner Loop Header: Depth=1
ldrb w9, [x8]
cmp w9, #13
ccmp w9, #10, #4, ne
b.ne LBB8_7
; %bb.6: ; in Loop: Header=BB8_5 Depth=1
sub x8, x8, #1
sub w0, w0, #1
cbnz w0, LBB8_5
LBB8_7:
strb wzr, [x19, w0, sxtw]
adrp x8, _g_line_number@PAGE
ldr w9, [x8, _g_line_number@PAGEOFF]
add w9, w9, #1
str w9, [x8, _g_line_number@PAGEOFF]
b LBB8_9
LBB8_8:
mov w0, #-1
LBB8_9:
; kill: def $w0 killed $w0 killed $x0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _get_oper_cycles ; -- Begin function get_oper_cycles
.p2align 2
_get_oper_cycles: ; @get_oper_cycles
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
; kill: def $w2 killed $w2 def $x2
sxtw x21, w2
add x8, x0, x21
ldrb w8, [x8, #60]
cmp w8, #46
b.ne LBB9_2
; %bb.1:
mov w0, #0
b LBB9_29
LBB9_2:
mov x19, x1
mov x20, x0
ldrb w8, [x0, #30]
Lloh43:
adrp x9, _g_size_select_table@PAGE
Lloh44:
add x9, x9, _g_size_select_table@PAGEOFF
ldrsw x22, [x9, x8, lsl #2]
cmp w2, #1
b.gt LBB9_23
; %bb.3:
b.ne LBB9_7
; %bb.4:
Lloh45:
adrp x1, l_.str.62@PAGE
Lloh46:
add x1, x1, l_.str.62@PAGEOFF
mov x0, x20
bl _strcmp
cbz w0, LBB9_25
; %bb.5:
Lloh47:
adrp x1, l_.str.63@PAGE
Lloh48:
add x1, x1, l_.str.63@PAGEOFF
mov x0, x20
bl _strcmp
cbnz w0, LBB9_19
; %bb.6:
add x8, x20, x21
Lloh49:
adrp x9, _g_clr_cycle_table@PAGE
Lloh50:
add x9, x9, _g_clr_cycle_table@PAGEOFF
b LBB9_26
LBB9_7:
cmp w19, #12
b.ne LBB9_19
; %bb.8:
cbnz w2, LBB9_19
; %bb.9:
Lloh51:
adrp x1, l_.str.64@PAGE
Lloh52:
add x1, x1, l_.str.64@PAGEOFF
mov x0, x20
bl _strcmp
cbnz w0, LBB9_11
; %bb.10:
add x0, x20, #31
Lloh53:
adrp x1, l_.str.65@PAGE
Lloh54:
add x1, x1, l_.str.65@PAGEOFF
bl _strcmp
cbz w0, LBB9_24
LBB9_11:
Lloh55:
adrp x1, l_.str.66@PAGE
Lloh56:
add x1, x1, l_.str.66@PAGEOFF
mov x0, x20
bl _strcmp
cbz w0, LBB9_24
; %bb.12:
Lloh57:
adrp x1, l_.str.67@PAGE
Lloh58:
add x1, x1, l_.str.67@PAGEOFF
mov x0, x20
bl _strcmp
cbnz w0, LBB9_14
; %bb.13:
add x0, x20, #31
Lloh59:
adrp x1, l_.str.65@PAGE
Lloh60:
add x1, x1, l_.str.65@PAGEOFF
bl _strcmp
cbz w0, LBB9_24
LBB9_14:
Lloh61:
adrp x1, l_.str.68@PAGE
Lloh62:
add x1, x1, l_.str.68@PAGEOFF
mov x0, x20
bl _strcmp
cbnz w0, LBB9_16
; %bb.15:
add x0, x20, #31
Lloh63:
adrp x1, l_.str.65@PAGE
Lloh64:
add x1, x1, l_.str.65@PAGEOFF
bl _strcmp
cbz w0, LBB9_24
LBB9_16:
Lloh65:
adrp x1, l_.str.69@PAGE
Lloh66:
add x1, x1, l_.str.69@PAGEOFF
mov x0, x20
bl _strcmp
cbnz w0, LBB9_18
; %bb.17:
add x0, x20, #31
Lloh67:
adrp x1, l_.str.65@PAGE
Lloh68:
add x1, x1, l_.str.65@PAGEOFF
bl _strcmp
cbz w0, LBB9_24
LBB9_18:
Lloh69:
adrp x1, l_.str.70@PAGE
Lloh70:
add x1, x1, l_.str.70@PAGEOFF
mov x0, x20
bl _strcmp
cbz w0, LBB9_24
LBB9_19:
Lloh71:
adrp x1, l_.str.71@PAGE
Lloh72:
add x1, x1, l_.str.71@PAGEOFF
mov x0, x20
bl _strcmp
cbz w0, LBB9_30
; %bb.20:
Lloh73:
adrp x1, l_.str.72@PAGE
Lloh74:
add x1, x1, l_.str.72@PAGEOFF
mov x0, x20
bl _strcmp
cbz w0, LBB9_31
; %bb.21:
Lloh75:
adrp x1, l_.str.73@PAGE
Lloh76:
add x1, x1, l_.str.73@PAGEOFF
mov x0, x20
bl _strcmp
cbz w0, LBB9_32
; %bb.22:
Lloh77:
adrp x1, l_.str.74@PAGE
Lloh78:
add x1, x1, l_.str.74@PAGEOFF
mov x0, x20
bl _strcmp
cbz w0, LBB9_33
LBB9_23:
add x8, x20, x21
ldrb w8, [x8, #64]
Lloh79:
adrp x9, _g_ea_cycle_table@PAGE
Lloh80:
add x9, x9, _g_ea_cycle_table@PAGEOFF
mov w10, #36
smaddl x9, w19, w10, x9
mov w10, #12
madd x9, x21, x10, x9
b LBB9_27
LBB9_24:
add x8, x20, x21
ldrb w8, [x8, #64]
Lloh81:
adrp x9, _g_ea_cycle_table@PAGE
Lloh82:
add x9, x9, _g_ea_cycle_table@PAGEOFF
mov w10, #12
madd x9, x21, x10, x9
add x9, x9, x22, lsl #2
ldr w9, [x9, #432]
add w8, w8, w9
add w0, w8, #2
b LBB9_29
LBB9_25:
add x8, x20, x21
Lloh83:
adrp x9, _g_moves_cycle_table@PAGE
Lloh84:
add x9, x9, _g_moves_cycle_table@PAGEOFF
LBB9_26:
ldrb w8, [x8, #64]
mov w10, #12
smaddl x9, w19, w10, x9
LBB9_27:
ldr w9, [x9, x22, lsl #2]
LBB9_28:
add w0, w9, w8
LBB9_29:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
LBB9_30:
add x8, x20, x21
ldrb w8, [x8, #64]
Lloh85:
adrp x9, _g_jmp_cycle_table@PAGE
Lloh86:
add x9, x9, _g_jmp_cycle_table@PAGEOFF
ldr w9, [x9, w19, sxtw #2]
b LBB9_28
LBB9_31:
add x8, x20, x21
ldrb w8, [x8, #64]
Lloh87:
adrp x9, _g_jsr_cycle_table@PAGE
Lloh88:
add x9, x9, _g_jsr_cycle_table@PAGEOFF
ldr w9, [x9, w19, sxtw #2]
b LBB9_28
LBB9_32:
add x8, x20, x21
ldrb w8, [x8, #64]
Lloh89:
adrp x9, _g_lea_cycle_table@PAGE
Lloh90:
add x9, x9, _g_lea_cycle_table@PAGEOFF
ldr w9, [x9, w19, sxtw #2]
b LBB9_28
LBB9_33:
add x8, x20, x21
ldrb w8, [x8, #64]
Lloh91:
adrp x9, _g_pea_cycle_table@PAGE
Lloh92:
add x9, x9, _g_pea_cycle_table@PAGEOFF
ldr w9, [x9, w19, sxtw #2]
b LBB9_28
.loh AdrpAdd Lloh43, Lloh44
.loh AdrpAdd Lloh45, Lloh46
.loh AdrpAdd Lloh47, Lloh48
.loh AdrpAdd Lloh49, Lloh50
.loh AdrpAdd Lloh51, Lloh52
.loh AdrpAdd Lloh53, Lloh54
.loh AdrpAdd Lloh55, Lloh56
.loh AdrpAdd Lloh57, Lloh58
.loh AdrpAdd Lloh59, Lloh60
.loh AdrpAdd Lloh61, Lloh62
.loh AdrpAdd Lloh63, Lloh64
.loh AdrpAdd Lloh65, Lloh66
.loh AdrpAdd Lloh67, Lloh68
.loh AdrpAdd Lloh69, Lloh70
.loh AdrpAdd Lloh71, Lloh72
.loh AdrpAdd Lloh73, Lloh74
.loh AdrpAdd Lloh75, Lloh76
.loh AdrpAdd Lloh77, Lloh78
.loh AdrpAdd Lloh79, Lloh80
.loh AdrpAdd Lloh81, Lloh82
.loh AdrpAdd Lloh83, Lloh84
.loh AdrpAdd Lloh85, Lloh86
.loh AdrpAdd Lloh87, Lloh88
.loh AdrpAdd Lloh89, Lloh90
.loh AdrpAdd Lloh91, Lloh92
.cfi_endproc
; -- End function
.globl _find_opcode ; -- Begin function find_opcode
.p2align 2
_find_opcode: ; @find_opcode
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x3
mov x20, x2
mov x21, x1
mov x22, x0
Lloh93:
adrp x8, _g_opcode_input_table@GOTPAGE
Lloh94:
ldr x8, [x8, _g_opcode_input_table@GOTPAGEOFF]
add x23, x8, #35
b LBB10_2
LBB10_1: ; in Loop: Header=BB10_2 Depth=1
add x23, x23, #68
LBB10_2: ; =>This Inner Loop Header: Depth=1
sub x24, x23, #35
mov x0, x22
mov x1, x24
bl _strcmp
cbnz w0, LBB10_1
; %bb.3: ; in Loop: Header=BB10_2 Depth=1
ldurb w8, [x23, #-5]
cmp w8, w21
b.ne LBB10_1
; %bb.4: ; in Loop: Header=BB10_2 Depth=1
sub x1, x23, #4
mov x0, x20
bl _strcmp
cbnz w0, LBB10_1
; %bb.5: ; in Loop: Header=BB10_2 Depth=1
mov x0, x19
mov x1, x23
bl _strcmp
cbnz w0, LBB10_1
; %bb.6:
mov x0, x24
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh93, Lloh94
.cfi_endproc
; -- End function
.globl _find_illegal_opcode ; -- Begin function find_illegal_opcode
.p2align 2
_find_illegal_opcode: ; @find_illegal_opcode
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh95:
adrp x19, _g_opcode_input_table@GOTPAGE
Lloh96:
ldr x19, [x19, _g_opcode_input_table@GOTPAGEOFF]
Lloh97:
adrp x20, l_.str.75@PAGE
Lloh98:
add x20, x20, l_.str.75@PAGEOFF
LBB11_1: ; =>This Inner Loop Header: Depth=1
mov x0, x19
mov x1, x20
bl _strcmp
add x19, x19, #68
cbnz w0, LBB11_1
; %bb.2:
sub x0, x19, #68
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh97, Lloh98
.loh AdrpLdrGot Lloh95, Lloh96
.cfi_endproc
; -- End function
.globl _extract_opcode_info ; -- Begin function extract_opcode_info
.p2align 2
_extract_opcode_info: ; @extract_opcode_info
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x4
mov x20, x3
mov x21, x2
mov x22, x1
Lloh99:
adrp x1, l_.str.76@PAGE
Lloh100:
add x1, x1, l_.str.76@PAGEOFF
bl _strstr
cbz x0, LBB12_95
; %bb.1:
mov x8, x0
ldrb w10, [x8, #12]!
mov x9, x22
cbz w10, LBB12_63
; %bb.2:
mov x9, x22
cmp w10, #44
b.eq LBB12_63
; %bb.3:
mov x9, x22
strb w10, [x9], #1
ldrb w10, [x0, #13]
cbz w10, LBB12_63
; %bb.4:
cmp w10, #44
b.eq LBB12_63
; %bb.5:
add x9, x22, #2
strb w10, [x22, #1]
ldrb w10, [x0, #14]
cbz w10, LBB12_63
; %bb.6:
cmp w10, #44
b.eq LBB12_63
; %bb.7:
add x9, x22, #3
strb w10, [x22, #2]
ldrb w10, [x0, #15]
cbz w10, LBB12_63
; %bb.8:
cmp w10, #44
b.eq LBB12_63
; %bb.9:
add x9, x22, #4
strb w10, [x22, #3]
ldrb w10, [x0, #16]
cbz w10, LBB12_63
; %bb.10:
cmp w10, #44
b.eq LBB12_63
; %bb.11:
add x9, x22, #5
strb w10, [x22, #4]
ldrb w10, [x0, #17]
cbz w10, LBB12_63
; %bb.12:
cmp w10, #44
b.eq LBB12_63
; %bb.13:
add x9, x22, #6
strb w10, [x22, #5]
ldrb w10, [x0, #18]
cbz w10, LBB12_63
; %bb.14:
cmp w10, #44
b.eq LBB12_63
; %bb.15:
add x9, x22, #7
strb w10, [x22, #6]
ldrb w10, [x0, #19]
cbz w10, LBB12_63
; %bb.16:
cmp w10, #44
b.eq LBB12_63
; %bb.17:
add x9, x22, #8
strb w10, [x22, #7]
ldrb w10, [x0, #20]
cbz w10, LBB12_63
; %bb.18:
cmp w10, #44
b.eq LBB12_63
; %bb.19:
add x9, x22, #9
strb w10, [x22, #8]
ldrb w10, [x0, #21]
cbz w10, LBB12_63
; %bb.20:
cmp w10, #44
b.eq LBB12_63
; %bb.21:
add x9, x22, #10
strb w10, [x22, #9]
ldrb w10, [x0, #22]
cbz w10, LBB12_63
; %bb.22:
cmp w10, #44
b.eq LBB12_63
; %bb.23:
add x9, x22, #11
strb w10, [x22, #10]
ldrb w10, [x0, #23]
cbz w10, LBB12_63
; %bb.24:
cmp w10, #44
b.eq LBB12_63
; %bb.25:
add x9, x22, #12
strb w10, [x22, #11]
ldrb w10, [x0, #24]
cbz w10, LBB12_63
; %bb.26:
cmp w10, #44
b.eq LBB12_63
; %bb.27:
add x9, x22, #13
strb w10, [x22, #12]
ldrb w10, [x0, #25]
cbz w10, LBB12_63
; %bb.28:
cmp w10, #44
b.eq LBB12_63
; %bb.29:
add x9, x22, #14
strb w10, [x22, #13]
ldrb w10, [x0, #26]
cbz w10, LBB12_63
; %bb.30:
cmp w10, #44
b.eq LBB12_63
; %bb.31:
add x9, x22, #15
strb w10, [x22, #14]
ldrb w10, [x0, #27]
cbz w10, LBB12_63
; %bb.32:
cmp w10, #44
b.eq LBB12_63
; %bb.33:
add x9, x22, #16
strb w10, [x22, #15]
ldrb w10, [x0, #28]
cbz w10, LBB12_63
; %bb.34:
cmp w10, #44
b.eq LBB12_63
; %bb.35:
add x9, x22, #17
strb w10, [x22, #16]
ldrb w10, [x0, #29]
cbz w10, LBB12_63
; %bb.36:
cmp w10, #44
b.eq LBB12_63
; %bb.37:
add x9, x22, #18
strb w10, [x22, #17]
ldrb w10, [x0, #30]
cbz w10, LBB12_63
; %bb.38:
cmp w10, #44
b.eq LBB12_63
; %bb.39:
add x9, x22, #19
strb w10, [x22, #18]
ldrb w10, [x0, #31]
cbz w10, LBB12_63
; %bb.40:
cmp w10, #44
b.eq LBB12_63
; %bb.41:
add x9, x22, #20
strb w10, [x22, #19]
ldrb w10, [x0, #32]
cbz w10, LBB12_63
; %bb.42:
cmp w10, #44
b.eq LBB12_63
; %bb.43:
add x9, x22, #21
strb w10, [x22, #20]
ldrb w10, [x0, #33]
cbz w10, LBB12_63
; %bb.44:
cmp w10, #44
b.eq LBB12_63
; %bb.45:
add x9, x22, #22
strb w10, [x22, #21]
ldrb w10, [x0, #34]
cbz w10, LBB12_63
; %bb.46:
cmp w10, #44
b.eq LBB12_63
; %bb.47:
add x9, x22, #23
strb w10, [x22, #22]
ldrb w10, [x0, #35]
cbz w10, LBB12_63
; %bb.48:
cmp w10, #44
b.eq LBB12_63
; %bb.49:
add x9, x22, #24
strb w10, [x22, #23]
ldrb w10, [x0, #36]
cbz w10, LBB12_63
; %bb.50:
cmp w10, #44
b.eq LBB12_63
; %bb.51:
add x9, x22, #25
strb w10, [x22, #24]
ldrb w10, [x0, #37]
cbz w10, LBB12_63
; %bb.52:
cmp w10, #44
b.eq LBB12_63
; %bb.53:
add x9, x22, #26
strb w10, [x22, #25]
ldrb w10, [x0, #38]
cbz w10, LBB12_63
; %bb.54:
cmp w10, #44
b.eq LBB12_63
; %bb.55:
add x9, x22, #27
strb w10, [x22, #26]
ldrb w10, [x0, #39]
cbz w10, LBB12_63
; %bb.56:
cmp w10, #44
b.eq LBB12_63
; %bb.57:
add x9, x22, #28
strb w10, [x22, #27]
ldrb w10, [x0, #40]
cbz w10, LBB12_63
; %bb.58:
cmp w10, #44
b.eq LBB12_63
; %bb.59:
add x9, x22, #29
strb w10, [x22, #28]
ldrb w10, [x0, #41]
cbz w10, LBB12_63
; %bb.60:
cmp w10, #44
b.eq LBB12_63
; %bb.61:
add x9, x22, #30
strb w10, [x22, #29]
ldrb w10, [x0, #42]
cbz w10, LBB12_63
; %bb.62:
cmp w10, #44
b.ne LBB12_96
LBB12_63:
mov w0, #0
strb wzr, [x9]
sub w9, w9, w22
add x8, x8, w9, sxtw
ldrb w9, [x8]
cmp w9, #44
b.ne LBB12_95
; %bb.64:
add x8, x8, #1
mov x9, #-4294967296
mov x10, #4294967296
mov x11, x8
LBB12_65: ; =>This Inner Loop Header: Depth=1
ldrb w12, [x11], #1
add x9, x9, x10
cmp w12, #32
b.eq LBB12_65
; %bb.66:
add x22, x8, x9, asr #32
mov x0, x22
bl _atoi
str w0, [x21]
mov x0, x22
mov w1, #44
bl _strchr
cbz x0, LBB12_95
; %bb.67:
add x8, x0, #1
mov x9, #-4294967296
mov x10, #4294967296
mov x11, x8
LBB12_68: ; =>This Inner Loop Header: Depth=1
ldrb w12, [x11], #1
add x9, x9, x10
cmp w12, #32
b.eq LBB12_68
; %bb.69:
add x8, x8, x9, asr #32
ldrb w10, [x8]
mov x9, x20
cbz w10, LBB12_79
; %bb.70:
mov x9, x20
cmp w10, #44
b.eq LBB12_79
; %bb.71:
mov x9, x20
strb w10, [x9], #1
ldrb w10, [x8, #1]
cbz w10, LBB12_79
; %bb.72:
cmp w10, #44
b.eq LBB12_79
; %bb.73:
add x9, x20, #2
strb w10, [x20, #1]
ldrb w10, [x8, #2]
cbz w10, LBB12_79
; %bb.74:
cmp w10, #44
b.eq LBB12_79
; %bb.75:
add x9, x20, #3
strb w10, [x20, #2]
ldrb w10, [x8, #3]
cbz w10, LBB12_79
; %bb.76:
cmp w10, #44
b.eq LBB12_79
; %bb.77:
add x9, x20, #4
strb w10, [x20, #3]
ldrb w10, [x8, #4]
cbz w10, LBB12_79
; %bb.78:
cmp w10, #44
b.ne LBB12_96
LBB12_79:
mov w0, #0
strb wzr, [x9]
sub w9, w9, w20
add x8, x8, w9, sxtw
ldrb w9, [x8]
cmp w9, #44
b.ne LBB12_95
; %bb.80:
add x8, x8, #1
mov x9, #-4294967296
mov x10, #4294967296
mov x11, x8
LBB12_81: ; =>This Inner Loop Header: Depth=1
ldrb w12, [x11], #1
add x9, x9, x10
cmp w12, #32
b.eq LBB12_81
; %bb.82:
add x8, x8, x9, asr #32
ldrb w10, [x8]
mov x9, x19
cbz w10, LBB12_94
; %bb.83:
mov x9, x19
cmp w10, #41
b.eq LBB12_94
; %bb.84:
mov x9, x19
strb w10, [x9], #1
ldrb w10, [x8, #1]
cbz w10, LBB12_94
; %bb.85:
cmp w10, #41
b.eq LBB12_94
; %bb.86:
add x9, x19, #2
strb w10, [x19, #1]
ldrb w10, [x8, #2]
cbz w10, LBB12_94
; %bb.87:
cmp w10, #41
b.eq LBB12_94
; %bb.88:
add x9, x19, #3
strb w10, [x19, #2]
ldrb w10, [x8, #3]
cbz w10, LBB12_94
; %bb.89:
cmp w10, #41
b.eq LBB12_94
; %bb.90:
add x9, x19, #4
strb w10, [x19, #3]
ldrb w10, [x8, #4]
cbz w10, LBB12_94
; %bb.91:
cmp w10, #41
b.eq LBB12_94
; %bb.92:
add x9, x19, #5
strb w10, [x19, #4]
ldrb w10, [x8, #5]
cbz w10, LBB12_94
; %bb.93:
cmp w10, #41
b.ne LBB12_96
LBB12_94:
strb wzr, [x9]
sub w9, w9, w19
ldrb w8, [x8, w9, sxtw]
cmp w8, #41
cset w0, eq
LBB12_95:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
LBB12_96:
strb w10, [x9]
Lloh101:
adrp x0, l_.str.60@PAGE
Lloh102:
add x0, x0, l_.str.60@PAGEOFF
bl _error_exit
.loh AdrpAdd Lloh99, Lloh100
.loh AdrpAdd Lloh101, Lloh102
.cfi_endproc
; -- End function
.globl _add_replace_string ; -- Begin function add_replace_string
.p2align 2
_add_replace_string: ; @add_replace_string
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
ldrsw x8, [x0, #12060]
cmp w8, #30
b.ge LBB13_2
; %bb.1:
mov x19, x2
mov x20, x0
mov w21, #402
madd x0, x8, x21, x0
bl _strcpy
ldrsw x8, [x20, #12060]
add w9, w8, #1
str w9, [x20, #12060]
madd x8, x8, x21, x20
add x0, x8, #201
mov x1, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _strcpy
LBB13_2:
Lloh103:
adrp x0, l_.str.78@PAGE
Lloh104:
add x0, x0, l_.str.78@PAGEOFF
bl _error_exit
.loh AdrpAdd Lloh103, Lloh104
.cfi_endproc
; -- End function
.globl _write_body ; -- Begin function write_body
.p2align 2
_write_body: ; @write_body
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #432
mov x19, x0
Lloh105:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh106:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh107:
ldr x8, [x8]
stur x8, [x29, #-96]
mov w8, #60300
add x26, x1, x8
ldr w8, [x26]
cmp w8, #1
b.lt LBB14_10
; %bb.1:
mov x20, x2
mov x21, x1
mov x27, #0
b LBB14_4
LBB14_2: ; in Loop: Header=BB14_4 Depth=1
cbz w23, LBB14_12
LBB14_3: ; in Loop: Header=BB14_4 Depth=1
add x8, sp, #215
str x8, [sp]
mov x0, x19
Lloh108:
adrp x1, l_.str.81@PAGE
Lloh109:
add x1, x1, l_.str.81@PAGEOFF
bl _fprintf
add x27, x27, #1
ldrsw x8, [x26]
cmp x27, x8
b.ge LBB14_10
LBB14_4: ; =>This Loop Header: Depth=1
; Child Loop BB14_8 Depth 2
mov w8, #201
madd x1, x27, x8, x21
add x0, sp, #215
mov w2, #201
bl ___strcpy_chk
add x0, sp, #215
Lloh110:
adrp x1, l_.str.79@PAGE
Lloh111:
add x1, x1, l_.str.79@PAGEOFF
bl _strstr
cbz x0, LBB14_3
; %bb.5: ; in Loop: Header=BB14_4 Depth=1
ldr w22, [x20, #12060]
cmp w22, #1
b.lt LBB14_12
; %bb.6: ; in Loop: Header=BB14_4 Depth=1
mov x28, #0
mov w23, #0
mov x24, x20
b LBB14_8
LBB14_7: ; in Loop: Header=BB14_8 Depth=2
add x28, x28, #1
add x24, x24, #402
cmp x28, w22, sxtw
b.ge LBB14_2
LBB14_8: ; Parent Loop BB14_4 Depth=1
; => This Inner Loop Header: Depth=2
add x0, sp, #215
mov x1, x24
bl _strstr
cbz x0, LBB14_7
; %bb.9: ; in Loop: Header=BB14_8 Depth=2
mov x25, x0
mov x0, x24
bl _strlen
add x1, x25, x0
add x0, sp, #14
mov w2, #201
bl ___strcpy_chk
add x1, x24, #201
mov x0, x25
bl _strcpy
add x1, sp, #14
bl _strcat
ldr w22, [x20, #12060]
mov w23, #1
b LBB14_7
LBB14_10:
Lloh112:
adrp x0, l_.str.82@PAGE
Lloh113:
add x0, x0, l_.str.82@PAGEOFF
mov w1, #2
mov w2, #1
mov x3, x19
bl _fwrite
ldur x8, [x29, #-96]
Lloh114:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh115:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh116:
ldr x9, [x9]
cmp x9, x8
b.ne LBB14_13
; %bb.11:
add sp, sp, #432
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB14_12:
Lloh117:
adrp x0, l_.str.80@PAGE
Lloh118:
add x0, x0, l_.str.80@PAGEOFF
bl _error_exit
LBB14_13:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh105, Lloh106, Lloh107
.loh AdrpAdd Lloh108, Lloh109
.loh AdrpAdd Lloh110, Lloh111
.loh AdrpLdrGotLdr Lloh114, Lloh115, Lloh116
.loh AdrpAdd Lloh112, Lloh113
.loh AdrpAdd Lloh117, Lloh118
.cfi_endproc
; -- End function
.globl _get_base_name ; -- Begin function get_base_name
.p2align 2
_get_base_name: ; @get_base_name
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x1
mov x19, x0
str x1, [sp]
Lloh119:
adrp x1, l_.str.83@PAGE
Lloh120:
add x1, x1, l_.str.83@PAGEOFF
bl _sprintf
ldrb w21, [x20, #30]
cbz w21, LBB15_2
; %bb.1:
mov x0, x19
bl _strlen
add x0, x19, x0
str x21, [sp]
Lloh121:
adrp x1, l_.str.84@PAGE
Lloh122:
add x1, x1, l_.str.84@PAGEOFF
bl _sprintf
LBB15_2:
add x21, x20, #31
Lloh123:
adrp x1, l_.str.85@PAGE
Lloh124:
add x1, x1, l_.str.85@PAGEOFF
mov x0, x21
bl _strcmp
cbz w0, LBB15_4
; %bb.3:
mov x0, x19
bl _strlen
add x0, x19, x0
str x21, [sp]
Lloh125:
adrp x1, l_.str.86@PAGE
Lloh126:
add x1, x1, l_.str.86@PAGEOFF
bl _sprintf
LBB15_4:
add x20, x20, #35
Lloh127:
adrp x1, l_.str.85@PAGE
Lloh128:
add x1, x1, l_.str.85@PAGEOFF
mov x0, x20
bl _strcmp
cbz w0, LBB15_6
; %bb.5:
mov x0, x19
bl _strlen
add x0, x19, x0
str x20, [sp]
Lloh129:
adrp x1, l_.str.86@PAGE
Lloh130:
add x1, x1, l_.str.86@PAGEOFF
bl _sprintf
LBB15_6:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.loh AdrpAdd Lloh119, Lloh120
.loh AdrpAdd Lloh121, Lloh122
.loh AdrpAdd Lloh123, Lloh124
.loh AdrpAdd Lloh125, Lloh126
.loh AdrpAdd Lloh127, Lloh128
.loh AdrpAdd Lloh129, Lloh130
.cfi_endproc
; -- End function
.globl _write_prototype ; -- Begin function write_prototype
.p2align 2
_write_prototype: ; @write_prototype
.cfi_startproc
; %bb.0:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
str x1, [sp]
Lloh131:
adrp x1, l_.str.87@PAGE
Lloh132:
add x1, x1, l_.str.87@PAGEOFF
bl _fprintf
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.loh AdrpAdd Lloh131, Lloh132
.cfi_endproc
; -- End function
.globl _write_function_name ; -- Begin function write_function_name
.p2align 2
_write_function_name: ; @write_function_name
.cfi_startproc
; %bb.0:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
str x1, [sp]
Lloh133:
adrp x1, l_.str.88@PAGE
Lloh134:
add x1, x1, l_.str.88@PAGEOFF
bl _fprintf
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.loh AdrpAdd Lloh133, Lloh134
.cfi_endproc
; -- End function
.globl _add_opcode_output_table_entry ; -- Begin function add_opcode_output_table_entry
.p2align 2
_add_opcode_output_table_entry: ; @add_opcode_output_table_entry
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
adrp x9, _g_opcode_output_table_length@PAGE
ldrsw x8, [x9, _g_opcode_output_table_length@PAGEOFF]
cmp w8, #3001
b.ge LBB18_2
; %bb.1:
add w10, w8, #1
str w10, [x9, _g_opcode_output_table_length@PAGEOFF]
Lloh135:
adrp x9, _g_opcode_output_table@GOTPAGE
Lloh136:
ldr x9, [x9, _g_opcode_output_table@GOTPAGEOFF]
mov w10, #68
madd x19, x8, x10, x9
ldr q0, [x0]
str q0, [x19]
ldp q1, q0, [x0, #32]
ldr w8, [x0, #64]
ldr q2, [x0, #16]
str w8, [x19, #64]
stp q1, q0, [x19, #32]
str q2, [x19, #16]
mov x0, x19
mov w2, #30
bl ___strcpy_chk
ldrh w8, [x19, #42]
lsr w9, w8, #1
and w9, w9, #0x55555555
and w8, w8, #0x55555555
add w8, w9, w8
lsr w9, w8, #2
and w9, w9, #0x33333333
and w8, w8, #0x33333333
add w8, w9, w8
lsr w9, w8, #4
and w9, w9, #0xffffff0f
and w8, w8, #0xf0f0f0f
add w8, w9, w8
add w8, w8, w8, lsr #8
strb w8, [x19, #40]
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB18_2:
Lloh137:
adrp x0, l_.str.89@PAGE
Lloh138:
add x0, x0, l_.str.89@PAGEOFF
bl _error_exit
.loh AdrpLdrGot Lloh135, Lloh136
.loh AdrpAdd Lloh137, Lloh138
.cfi_endproc
; -- End function
.globl _print_opcode_output_table ; -- Begin function print_opcode_output_table
.p2align 2
_print_opcode_output_table: ; @print_opcode_output_table
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x26, x25, [sp, #32] ; 16-byte Folded Spill
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x19, x0
adrp x24, _g_opcode_output_table_length@PAGE
ldrsw x1, [x24, _g_opcode_output_table_length@PAGEOFF]
Lloh139:
adrp x0, _g_opcode_output_table@GOTPAGE
Lloh140:
ldr x0, [x0, _g_opcode_output_table@GOTPAGEOFF]
Lloh141:
adrp x3, _compare_nof_true_bits@PAGE
Lloh142:
add x3, x3, _compare_nof_true_bits@PAGEOFF
mov w2, #68
bl _qsort
ldr w8, [x24, _g_opcode_output_table_length@PAGEOFF]
cmp w8, #1
b.lt LBB19_3
; %bb.1:
mov x25, #0
Lloh143:
adrp x26, _g_opcode_output_table@GOTPAGE
Lloh144:
ldr x26, [x26, _g_opcode_output_table@GOTPAGEOFF]
Lloh145:
adrp x20, l_.str.90@PAGE
Lloh146:
add x20, x20, l_.str.90@PAGEOFF
Lloh147:
adrp x21, l_.str.91@PAGE
Lloh148:
add x21, x21, l_.str.91@PAGEOFF
Lloh149:
adrp x22, l_.str.92@PAGE
Lloh150:
add x22, x22, l_.str.92@PAGEOFF
Lloh151:
adrp x23, l_.str.93@PAGE
Lloh152:
add x23, x23, l_.str.93@PAGEOFF
LBB19_2: ; =>This Inner Loop Header: Depth=1
ldrh w8, [x26, #42]
ldrh w9, [x26, #44]
stp x8, x9, [sp, #8]
str x26, [sp]
mov x0, x19
mov x1, x20
bl _fprintf
ldrb w8, [x26, #64]
str x8, [sp]
mov x0, x19
mov x1, x21
bl _fprintf
mov x0, x22
mov w1, #2
mov w2, #1
mov x3, x19
bl _fwrite
ldrb w8, [x26, #65]
str x8, [sp]
mov x0, x19
mov x1, x21
bl _fprintf
mov x0, x22
mov w1, #2
mov w2, #1
mov x3, x19
bl _fwrite
ldrb w8, [x26, #66]
str x8, [sp]
mov x0, x19
mov x1, x21
bl _fprintf
mov x0, x23
mov w1, #4
mov w2, #1
mov x3, x19
bl _fwrite
add x25, x25, #1
ldrsw x8, [x24, _g_opcode_output_table_length@PAGEOFF]
add x26, x26, #68
cmp x25, x8
b.lt LBB19_2
LBB19_3:
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
ldp x26, x25, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpAdd Lloh141, Lloh142
.loh AdrpLdrGot Lloh139, Lloh140
.loh AdrpAdd Lloh151, Lloh152
.loh AdrpAdd Lloh149, Lloh150
.loh AdrpAdd Lloh147, Lloh148
.loh AdrpAdd Lloh145, Lloh146
.loh AdrpLdrGot Lloh143, Lloh144
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function compare_nof_true_bits
_compare_nof_true_bits: ; @compare_nof_true_bits
.cfi_startproc
; %bb.0:
mov x8, x0
ldrb w9, [x0, #40]
ldrb w10, [x1, #40]
subs w0, w9, w10
b.ne LBB20_3
; %bb.1:
ldrh w9, [x8, #42]
ldrh w10, [x1, #42]
subs w0, w9, w10
b.ne LBB20_3
; %bb.2:
ldrh w8, [x8, #44]
ldrh w9, [x1, #44]
sub w0, w8, w9
LBB20_3:
ret
.cfi_endproc
; -- End function
.globl _write_table_entry ; -- Begin function write_table_entry
.p2align 2
_write_table_entry: ; @write_table_entry
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x1
mov x19, x0
ldrh w8, [x1, #42]
ldrh w9, [x1, #44]
stp x8, x9, [sp, #8]
str x1, [sp]
Lloh153:
adrp x1, l_.str.90@PAGE
Lloh154:
add x1, x1, l_.str.90@PAGEOFF
bl _fprintf
ldrb w8, [x20, #64]
str x8, [sp]
Lloh155:
adrp x21, l_.str.91@PAGE
Lloh156:
add x21, x21, l_.str.91@PAGEOFF
mov x0, x19
mov x1, x21
bl _fprintf
Lloh157:
adrp x22, l_.str.92@PAGE
Lloh158:
add x22, x22, l_.str.92@PAGEOFF
mov x0, x22
mov w1, #2
mov w2, #1
mov x3, x19
bl _fwrite
ldrb w8, [x20, #65]
str x8, [sp]
mov x0, x19
mov x1, x21
bl _fprintf
mov x0, x22
mov w1, #2
mov w2, #1
mov x3, x19
bl _fwrite
ldrb w8, [x20, #66]
str x8, [sp]
mov x0, x19
mov x1, x21
bl _fprintf
Lloh159:
adrp x0, l_.str.93@PAGE
Lloh160:
add x0, x0, l_.str.93@PAGEOFF
mov w1, #4
mov w2, #1
mov x3, x19
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #80
b _fwrite
.loh AdrpAdd Lloh159, Lloh160
.loh AdrpAdd Lloh157, Lloh158
.loh AdrpAdd Lloh155, Lloh156
.loh AdrpAdd Lloh153, Lloh154
.cfi_endproc
; -- End function
.globl _set_opcode_struct ; -- Begin function set_opcode_struct
.p2align 2
_set_opcode_struct: ; @set_opcode_struct
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x24, x23, [sp, #16] ; 16-byte Folded Spill
stp x22, x21, [sp, #32] ; 16-byte Folded Spill
stp x20, x19, [sp, #48] ; 16-byte Folded Spill
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x21, x2
mov x19, x1
ldr q0, [x0]
str q0, [x1]
ldp q0, q1, [x0, #16]
ldr q2, [x0, #48]
ldr w8, [x0, #64]
str w8, [x1, #64]
stp q1, q2, [x1, #32]
str q0, [x1, #16]
mov x0, x1
mov x1, x2
mov w2, #0
bl _get_oper_cycles
strb w0, [x19, #64]
mov x0, x19
mov x1, x21
mov w2, #1
bl _get_oper_cycles
strb w0, [x19, #65]
mov x0, x19
mov x1, x21
mov w2, #2
bl _get_oper_cycles
strb w0, [x19, #66]
add x20, x19, #35
Lloh161:
adrp x1, l_.str.85@PAGE
Lloh162:
add x1, x1, l_.str.85@PAGEOFF
mov x0, x20
bl _strcmp
sxtw x22, w21
Lloh163:
adrp x23, _g_ea_info_table@PAGE
Lloh164:
add x23, x23, _g_ea_info_table@PAGEOFF
cbz w21, LBB22_3
; %bb.1:
cbnz w0, LBB22_3
; %bb.2:
mov w8, #24
mul x8, x22, x8
ldr x8, [x23, x8]
str x8, [sp]
Lloh165:
adrp x3, l_.str.94@PAGE
Lloh166:
add x3, x3, l_.str.94@PAGEOFF
mov x0, x20
mov w1, #0
mov w2, #5
bl ___sprintf_chk
LBB22_3:
mov w8, #24
madd x8, x22, x8, x23
ldrh w9, [x8, #16]
ldrh w10, [x19, #42]
orr w9, w10, w9
strh w9, [x19, #42]
ldrh w8, [x8, #20]
ldrh w9, [x19, #44]
orr w8, w9, w8
strh w8, [x19, #44]
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #80
ret
.loh AdrpAdd Lloh163, Lloh164
.loh AdrpAdd Lloh161, Lloh162
.loh AdrpAdd Lloh165, Lloh166
.cfi_endproc
; -- End function
.globl _generate_opcode_handler ; -- Begin function generate_opcode_handler
.p2align 2
_generate_opcode_handler: ; @generate_opcode_handler
.cfi_startproc
; %bb.0:
sub sp, sp, #304
.cfi_def_cfa_offset 304
stp x26, x25, [sp, #224] ; 16-byte Folded Spill
stp x24, x23, [sp, #240] ; 16-byte Folded Spill
stp x22, x21, [sp, #256] ; 16-byte Folded Spill
stp x20, x19, [sp, #272] ; 16-byte Folded Spill
stp x29, x30, [sp, #288] ; 16-byte Folded Spill
add x29, sp, #288
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
mov x23, x4
mov x24, x3
mov x19, x2
mov x20, x1
mov x22, x0
Lloh167:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh168:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh169:
ldr x8, [x8]
stur x8, [x29, #-72]
mov w0, #68
bl _malloc
mov x21, x0
mov x0, x24
mov x1, x21
mov x2, x23
bl _set_opcode_struct
add x25, sp, #15
add x0, sp, #15
mov x1, x21
bl _get_base_name
Lloh170:
adrp x8, _g_prototype_file@PAGE
Lloh171:
ldr x0, [x8, _g_prototype_file@PAGEOFF]
str x25, [sp]
Lloh172:
adrp x1, l_.str.87@PAGE
Lloh173:
add x1, x1, l_.str.87@PAGEOFF
bl _fprintf
adrp x9, _g_opcode_output_table_length@PAGE
ldrsw x8, [x9, _g_opcode_output_table_length@PAGEOFF]
cmp w8, #3001
b.ge LBB23_12
; %bb.1:
add w10, w8, #1
str w10, [x9, _g_opcode_output_table_length@PAGEOFF]
Lloh174:
adrp x9, _g_opcode_output_table@GOTPAGE
Lloh175:
ldr x9, [x9, _g_opcode_output_table@GOTPAGEOFF]
mov w10, #68
madd x24, x8, x10, x9
ldp q0, q1, [x21, #32]
stp q0, q1, [x24, #32]
ldr w8, [x21, #64]
str w8, [x24, #64]
ldp q1, q0, [x21]
stp q1, q0, [x24]
add x1, sp, #15
mov x0, x24
mov w2, #30
bl ___strcpy_chk
ldrh w8, [x24, #42]
lsr w9, w8, #1
and w9, w9, #0x55555555
and w8, w8, #0x55555555
add w8, w9, w8
lsr w9, w8, #2
and w9, w9, #0x33333333
and w8, w8, #0x33333333
add w8, w9, w8
lsr w9, w8, #4
and w9, w9, #0xffffff0f
and w8, w8, #0xf0f0f0f
add w8, w9, w8
add w8, w8, w8, lsr #8
strb w8, [x24, #40]
str x25, [sp]
Lloh176:
adrp x1, l_.str.88@PAGE
Lloh177:
add x1, x1, l_.str.88@PAGEOFF
mov x0, x22
bl _fprintf
cbz w23, LBB23_9
; %bb.2:
Lloh178:
adrp x8, _g_ea_info_table@PAGE
Lloh179:
add x8, x8, _g_ea_info_table@PAGEOFF
mov w9, #24
smaddl x23, w23, w9, x8
ldr x8, [x23, #8]!
str x8, [sp]
Lloh180:
adrp x3, l_.str.95@PAGE
Lloh181:
add x3, x3, l_.str.95@PAGEOFF
add x0, sp, #15
mov w1, #0
mov w2, #201
bl ___sprintf_chk
ldrsw x8, [x19, #12060]
cmp w8, #30
b.ge LBB23_11
; %bb.3:
mov w9, #402
madd x8, x8, x9, x19
Lloh182:
adrp x10, l_.str.96@PAGE
Lloh183:
add x10, x10, l_.str.96@PAGEOFF
ldr q0, [x10]
ldur x10, [x10, #13]
stur x10, [x8, #13]
str q0, [x8]
ldrsw x8, [x19, #12060]
add w10, w8, #1
str w10, [x19, #12060]
madd x8, x8, x9, x19
add x0, x8, #201
add x1, sp, #15
bl _strcpy
ldr x8, [x23]
str x8, [sp]
Lloh184:
adrp x3, l_.str.97@PAGE
Lloh185:
add x3, x3, l_.str.97@PAGEOFF
add x0, sp, #15
mov w1, #0
mov w2, #201
bl ___sprintf_chk
ldrsw x8, [x19, #12060]
cmp w8, #30
b.ge LBB23_11
; %bb.4:
mov w9, #402
madd x8, x8, x9, x19
Lloh186:
adrp x10, l_.str.98@PAGE
Lloh187:
add x10, x10, l_.str.98@PAGEOFF
ldr q0, [x10]
ldur x10, [x10, #14]
stur x10, [x8, #14]
str q0, [x8]
ldrsw x8, [x19, #12060]
add w10, w8, #1
str w10, [x19, #12060]
madd x8, x8, x9, x19
add x0, x8, #201
add x1, sp, #15
bl _strcpy
ldr x8, [x23]
str x8, [sp]
Lloh188:
adrp x3, l_.str.99@PAGE
Lloh189:
add x3, x3, l_.str.99@PAGEOFF
add x0, sp, #15
mov w1, #0
mov w2, #201
bl ___sprintf_chk
ldrsw x8, [x19, #12060]
cmp w8, #30
b.ge LBB23_11
; %bb.5:
mov w9, #402
madd x8, x8, x9, x19
Lloh190:
adrp x10, l_.str.100@PAGE
Lloh191:
add x10, x10, l_.str.100@PAGEOFF
ldr q0, [x10]
ldur x10, [x10, #14]
stur x10, [x8, #14]
str q0, [x8]
ldrsw x8, [x19, #12060]
add w10, w8, #1
str w10, [x19, #12060]
madd x8, x8, x9, x19
add x0, x8, #201
add x1, sp, #15
bl _strcpy
ldr x8, [x23]
str x8, [sp]
Lloh192:
adrp x3, l_.str.101@PAGE
Lloh193:
add x3, x3, l_.str.101@PAGEOFF
add x0, sp, #15
mov w1, #0
mov w2, #201
bl ___sprintf_chk
ldrsw x8, [x19, #12060]
cmp w8, #30
b.ge LBB23_11
; %bb.6:
mov w9, #402
madd x8, x8, x9, x19
Lloh194:
adrp x10, l_.str.102@PAGE
Lloh195:
add x10, x10, l_.str.102@PAGEOFF
ldr q0, [x10]
ldur x10, [x10, #15]
stur x10, [x8, #15]
str q0, [x8]
ldrsw x8, [x19, #12060]
add w10, w8, #1
str w10, [x19, #12060]
madd x8, x8, x9, x19
add x0, x8, #201
add x1, sp, #15
bl _strcpy
ldr x8, [x23]
str x8, [sp]
Lloh196:
adrp x3, l_.str.103@PAGE
Lloh197:
add x3, x3, l_.str.103@PAGEOFF
add x0, sp, #15
mov w1, #0
mov w2, #201
bl ___sprintf_chk
ldrsw x8, [x19, #12060]
cmp w8, #30
b.ge LBB23_11
; %bb.7:
mov w9, #402
madd x8, x8, x9, x19
Lloh198:
adrp x10, l_.str.104@PAGE
Lloh199:
add x10, x10, l_.str.104@PAGEOFF
ldr q0, [x10]
ldr x10, [x10, #16]
str x10, [x8, #16]
str q0, [x8]
ldrsw x8, [x19, #12060]
add w10, w8, #1
str w10, [x19, #12060]
madd x8, x8, x9, x19
add x0, x8, #201
add x1, sp, #15
bl _strcpy
ldr x8, [x23]
str x8, [sp]
Lloh200:
adrp x3, l_.str.105@PAGE
Lloh201:
add x3, x3, l_.str.105@PAGEOFF
add x0, sp, #15
mov w1, #0
mov w2, #201
bl ___sprintf_chk
ldrsw x8, [x19, #12060]
cmp w8, #30
b.ge LBB23_11
; %bb.8:
mov w9, #402
madd x8, x8, x9, x19
Lloh202:
adrp x10, l_.str.106@PAGE
Lloh203:
add x10, x10, l_.str.106@PAGEOFF
ldr q0, [x10]
ldr x10, [x10, #16]
str x10, [x8, #16]
str q0, [x8]
ldrsw x8, [x19, #12060]
add w10, w8, #1
str w10, [x19, #12060]
madd x8, x8, x9, x19
add x0, x8, #201
add x1, sp, #15
bl _strcpy
LBB23_9:
mov x0, x22
mov x1, x20
mov x2, x19
bl _write_body
adrp x8, _g_num_functions@PAGE
ldr w9, [x8, _g_num_functions@PAGEOFF]
add w9, w9, #1
str w9, [x8, _g_num_functions@PAGEOFF]
mov x0, x21
bl _free
ldur x8, [x29, #-72]
Lloh204:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh205:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh206:
ldr x9, [x9]
cmp x9, x8
b.ne LBB23_13
; %bb.10:
ldp x29, x30, [sp, #288] ; 16-byte Folded Reload
ldp x20, x19, [sp, #272] ; 16-byte Folded Reload
ldp x22, x21, [sp, #256] ; 16-byte Folded Reload
ldp x24, x23, [sp, #240] ; 16-byte Folded Reload
ldp x26, x25, [sp, #224] ; 16-byte Folded Reload
add sp, sp, #304
ret
LBB23_11:
Lloh207:
adrp x0, l_.str.78@PAGE
Lloh208:
add x0, x0, l_.str.78@PAGEOFF
bl _error_exit
LBB23_12:
Lloh209:
adrp x0, l_.str.89@PAGE
Lloh210:
add x0, x0, l_.str.89@PAGEOFF
bl _error_exit
LBB23_13:
bl ___stack_chk_fail
.loh AdrpAdd Lloh172, Lloh173
.loh AdrpLdr Lloh170, Lloh171
.loh AdrpLdrGotLdr Lloh167, Lloh168, Lloh169
.loh AdrpAdd Lloh176, Lloh177
.loh AdrpLdrGot Lloh174, Lloh175
.loh AdrpAdd Lloh180, Lloh181
.loh AdrpAdd Lloh178, Lloh179
.loh AdrpAdd Lloh184, Lloh185
.loh AdrpAdd Lloh182, Lloh183
.loh AdrpAdd Lloh188, Lloh189
.loh AdrpAdd Lloh186, Lloh187
.loh AdrpAdd Lloh192, Lloh193
.loh AdrpAdd Lloh190, Lloh191
.loh AdrpAdd Lloh196, Lloh197
.loh AdrpAdd Lloh194, Lloh195
.loh AdrpAdd Lloh200, Lloh201
.loh AdrpAdd Lloh198, Lloh199
.loh AdrpAdd Lloh202, Lloh203
.loh AdrpLdrGotLdr Lloh204, Lloh205, Lloh206
.loh AdrpAdd Lloh207, Lloh208
.loh AdrpAdd Lloh209, Lloh210
.cfi_endproc
; -- End function
.globl _generate_opcode_ea_variants ; -- Begin function generate_opcode_ea_variants
.p2align 2
_generate_opcode_ea_variants: ; @generate_opcode_ea_variants
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x20, x3
mov x19, x2
mov x21, x1
mov x22, x0
ldr w24, [x2, #12060]
add x23, x3, #46
Lloh211:
adrp x1, l_.str.107@PAGE
Lloh212:
add x1, x1, l_.str.107@PAGEOFF
mov x0, x23
bl _strcmp
cbz w0, LBB24_24
; %bb.1:
ldrb w8, [x23]
cmp w8, #65
b.ne LBB24_3
; %bb.2:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #1
bl _generate_opcode_handler
LBB24_3:
str w24, [x19, #12060]
ldrb w8, [x20, #47]
cmp w8, #43
b.ne LBB24_6
; %bb.4:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #2
bl _generate_opcode_handler
str w24, [x19, #12060]
ldrb w8, [x20, #30]
cmp w8, #8
b.ne LBB24_6
; %bb.5:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #3
bl _generate_opcode_handler
LBB24_6:
str w24, [x19, #12060]
ldrb w8, [x20, #48]
cmp w8, #45
b.ne LBB24_9
; %bb.7:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #4
bl _generate_opcode_handler
str w24, [x19, #12060]
ldrb w8, [x20, #30]
cmp w8, #8
b.ne LBB24_9
; %bb.8:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #5
bl _generate_opcode_handler
LBB24_9:
str w24, [x19, #12060]
ldrb w8, [x20, #49]
cmp w8, #68
b.ne LBB24_11
; %bb.10:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #6
bl _generate_opcode_handler
LBB24_11:
str w24, [x19, #12060]
ldrb w8, [x20, #50]
cmp w8, #88
b.ne LBB24_13
; %bb.12:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #7
bl _generate_opcode_handler
LBB24_13:
str w24, [x19, #12060]
ldrb w8, [x20, #51]
cmp w8, #87
b.ne LBB24_15
; %bb.14:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #8
bl _generate_opcode_handler
LBB24_15:
str w24, [x19, #12060]
ldrb w8, [x20, #52]
cmp w8, #76
b.ne LBB24_17
; %bb.16:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #9
bl _generate_opcode_handler
LBB24_17:
str w24, [x19, #12060]
ldrb w8, [x20, #53]
cmp w8, #100
b.ne LBB24_19
; %bb.18:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #10
bl _generate_opcode_handler
LBB24_19:
str w24, [x19, #12060]
ldrb w8, [x20, #54]
cmp w8, #120
b.ne LBB24_21
; %bb.20:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #11
bl _generate_opcode_handler
LBB24_21:
str w24, [x19, #12060]
ldrb w8, [x20, #55]
cmp w8, #73
b.ne LBB24_23
; %bb.22:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #12
bl _generate_opcode_handler
LBB24_23:
str w24, [x19, #12060]
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
LBB24_24:
mov x0, x22
mov x1, x21
mov x2, x19
mov x3, x20
mov w4, #0
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
b _generate_opcode_handler
.loh AdrpAdd Lloh211, Lloh212
.cfi_endproc
; -- End function
.globl _generate_opcode_cc_variants ; -- Begin function generate_opcode_cc_variants
.p2align 2
_generate_opcode_cc_variants: ; @generate_opcode_cc_variants
.cfi_startproc
; %bb.0:
sub sp, sp, #160
.cfi_def_cfa_offset 160
stp x28, x27, [sp, #64] ; 16-byte Folded Spill
stp x26, x25, [sp, #80] ; 16-byte Folded Spill
stp x24, x23, [sp, #96] ; 16-byte Folded Spill
stp x22, x21, [sp, #112] ; 16-byte Folded Spill
stp x20, x19, [sp, #128] ; 16-byte Folded Spill
stp x29, x30, [sp, #144] ; 16-byte Folded Spill
add x29, sp, #144
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x23, x4
mov x25, x3
mov x19, x2
mov x20, x1
mov x21, x0
Lloh213:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh214:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh215:
ldr x8, [x8]
str x8, [sp, #56]
ldr w26, [x2, #12060]
mov w0, #68
bl _malloc
mov x22, x0
ldp q0, q1, [x25, #32]
stp q0, q1, [x0, #32]
ldr w8, [x25, #64]
str w8, [x0, #64]
ldrh w8, [x0, #42]
orr w8, w8, #0xf00
add x23, x0, w23, sxtw
Lloh216:
adrp x27, _g_cc_table@PAGE+40
Lloh217:
add x27, x27, _g_cc_table@PAGEOFF+40
mov w28, #2
ldp q1, q0, [x25]
stp q1, q0, [x0]
strh w8, [x0, #42]
mov w24, #402
Lloh218:
adrp x25, l_.str.111@PAGE
Lloh219:
add x25, x25, l_.str.111@PAGEOFF
LBB25_1: ; =>This Inner Loop Header: Depth=1
ldr x8, [x27]
str x8, [sp]
add x0, sp, #36
mov w1, #0
mov w2, #20
Lloh220:
adrp x3, l_.str.108@PAGE
Lloh221:
add x3, x3, l_.str.108@PAGEOFF
bl ___sprintf_chk
ldr x8, [x27]
str x8, [sp]
add x0, sp, #16
mov w1, #0
mov w2, #20
Lloh222:
adrp x3, l_.str.109@PAGE
Lloh223:
add x3, x3, l_.str.109@PAGEOFF
bl ___sprintf_chk
ldrsw x8, [x19, #12060]
cmp w8, #30
b.ge LBB25_6
; %bb.2: ; in Loop: Header=BB25_1 Depth=1
madd x8, x8, x24, x19
Lloh224:
adrp x9, l_.str.110@PAGE
Lloh225:
add x9, x9, l_.str.110@PAGEOFF
Lloh226:
ldr x9, [x9]
str x9, [x8]
mov w9, #17247
movk w9, #67, lsl #16
str w9, [x8, #8]
ldrsw x8, [x19, #12060]
add w9, w8, #1
str w9, [x19, #12060]
madd x8, x8, x24, x19
add x0, x8, #201
add x1, sp, #36
bl _strcpy
ldrsw x8, [x19, #12060]
cmp w8, #30
b.ge LBB25_6
; %bb.3: ; in Loop: Header=BB25_1 Depth=1
mul x8, x8, x24
ldr q0, [x25]
str q0, [x19, x8]
ldrsw x8, [x19, #12060]
add w9, w8, #1
str w9, [x19, #12060]
madd x8, x8, x24, x19
add x0, x8, #201
add x1, sp, #16
bl _strcpy
ldur x1, [x27, #-8]
mov x0, x23
bl _strcpy
ldrh w8, [x22, #44]
and w8, w8, #0xfffff0ff
orr w8, w8, w28, lsl #8
strh w8, [x22, #44]
mov x0, x21
mov x1, x20
mov x2, x19
mov x3, x22
bl _generate_opcode_ea_variants
str w26, [x19, #12060]
add x28, x28, #1
add x27, x27, #16
cmp x28, #16
b.ne LBB25_1
; %bb.4:
mov x0, x22
bl _free
ldr x8, [sp, #56]
Lloh227:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh228:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh229:
ldr x9, [x9]
cmp x9, x8
b.ne LBB25_7
; %bb.5:
ldp x29, x30, [sp, #144] ; 16-byte Folded Reload
ldp x20, x19, [sp, #128] ; 16-byte Folded Reload
ldp x22, x21, [sp, #112] ; 16-byte Folded Reload
ldp x24, x23, [sp, #96] ; 16-byte Folded Reload
ldp x26, x25, [sp, #80] ; 16-byte Folded Reload
ldp x28, x27, [sp, #64] ; 16-byte Folded Reload
add sp, sp, #160
ret
LBB25_6:
Lloh230:
adrp x0, l_.str.78@PAGE
Lloh231:
add x0, x0, l_.str.78@PAGEOFF
bl _error_exit
LBB25_7:
bl ___stack_chk_fail
.loh AdrpAdd Lloh218, Lloh219
.loh AdrpAdd Lloh216, Lloh217
.loh AdrpLdrGotLdr Lloh213, Lloh214, Lloh215
.loh AdrpAdd Lloh222, Lloh223
.loh AdrpAdd Lloh220, Lloh221
.loh AdrpAddLdr Lloh224, Lloh225, Lloh226
.loh AdrpLdrGotLdr Lloh227, Lloh228, Lloh229
.loh AdrpAdd Lloh230, Lloh231
.cfi_endproc
; -- End function
.globl _process_opcode_handlers ; -- Begin function process_opcode_handlers
.p2align 2
_process_opcode_handlers: ; @process_opcode_handlers
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #864
Lloh232:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh233:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh234:
ldr x8, [x8]
stur x8, [x29, #-96]
Lloh235:
adrp x8, _g_input_file@PAGE
Lloh236:
ldr x8, [x8, _g_input_file@PAGEOFF]
str x8, [sp, #24] ; 8-byte Folded Spill
mov w0, #12064
bl _malloc
mov x19, x0
mov w0, #60304
bl _malloc
mov x21, x0
Lloh237:
adrp x8, _g_ops_ac_file@PAGE
Lloh238:
ldr x10, [x8, _g_ops_ac_file@PAGEOFF]
add x9, sp, #640
add x24, x9, #1
mov w8, #60300
add x26, x0, x8
sub x25, x9, #1
sub x8, x0, #1
stp x10, x8, [sp, #8] ; 16-byte Folded Spill
Lloh239:
adrp x27, l_.str.76@PAGE
Lloh240:
add x27, x27, l_.str.76@PAGEOFF
adrp x23, _g_line_number@PAGE
mov x22, #-4294967296
mov w20, #201
str x19, [sp] ; 8-byte Folded Spill
LBB26_1: ; =>This Loop Header: Depth=1
; Child Loop BB26_3 Depth 2
; Child Loop BB26_10 Depth 3
; Child Loop BB26_14 Depth 2
; Child Loop BB26_19 Depth 3
; Child Loop BB26_28 Depth 2
strb wzr, [sp, #640]
b LBB26_3
LBB26_2: ; in Loop: Header=BB26_3 Depth=2
strb wzr, [sp, #640]
ldr w8, [x23, _g_line_number@PAGEOFF]
add w8, w8, #1
str w8, [x23, _g_line_number@PAGEOFF]
LBB26_3: ; Parent Loop BB26_1 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB26_10 Depth 3
add x0, sp, #640
mov x1, x27
bl _strstr
cbnz x0, LBB26_13
; %bb.4: ; in Loop: Header=BB26_3 Depth=2
add x0, sp, #640
Lloh241:
adrp x1, l_.str.112@PAGE
Lloh242:
add x1, x1, l_.str.112@PAGEOFF
mov w2, #80
bl _memcmp
cbz w0, LBB26_45
; %bb.5: ; in Loop: Header=BB26_3 Depth=2
add x0, sp, #640
mov w1, #200
ldr x2, [sp, #24] ; 8-byte Folded Reload
bl _fgets
cbz x0, LBB26_49
; %bb.6: ; in Loop: Header=BB26_3 Depth=2
ldrb w8, [sp, #640]
cmp w8, #13
b.ne LBB26_8
; %bb.7: ; in Loop: Header=BB26_3 Depth=2
ldr q0, [x24]
str q0, [sp, #640]
ldp q0, q1, [x24, #16]
ldp q2, q3, [x24, #48]
stp q2, q3, [sp, #688]
stp q0, q1, [sp, #656]
ldp q0, q1, [x24, #80]
ldp q2, q3, [x24, #112]
stp q2, q3, [sp, #752]
stp q0, q1, [sp, #720]
ldp q0, q1, [x24, #144]
ldr q2, [x24, #176]
ldur x8, [x24, #191]
add x9, sp, #576
stur x8, [x9, #255]
stp q1, q2, [sp, #800]
str q0, [sp, #784]
LBB26_8: ; in Loop: Header=BB26_3 Depth=2
add x0, sp, #640
bl _strlen
cbz w0, LBB26_2
; %bb.9: ; in Loop: Header=BB26_3 Depth=2
lsl x9, x0, #32
sxtw x8, w0
LBB26_10: ; Parent Loop BB26_1 Depth=1
; Parent Loop BB26_3 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w10, [x25, x8]
cmp w10, #13
ccmp w10, #10, #4, ne
b.ne LBB26_12
; %bb.11: ; in Loop: Header=BB26_10 Depth=3
add x9, x9, x22
sub x8, x8, #1
cbnz w8, LBB26_10
b LBB26_2
LBB26_12: ; in Loop: Header=BB26_3 Depth=2
asr x9, x9, #32
add x10, sp, #640
strb wzr, [x10, x9]
ldr w9, [x23, _g_line_number@PAGEOFF]
add w9, w9, #1
str w9, [x23, _g_line_number@PAGEOFF]
tbz w8, #31, LBB26_3
b LBB26_49
LBB26_13: ; in Loop: Header=BB26_1 Depth=1
mov w19, #0
str wzr, [x26]
LBB26_14: ; Parent Loop BB26_1 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB26_19 Depth 3
smaddl x27, w19, w20, x21
mov x0, x27
mov w1, #200
ldr x2, [sp, #24] ; 8-byte Folded Reload
bl _fgets
cbz x0, LBB26_47
; %bb.15: ; in Loop: Header=BB26_14 Depth=2
sxtw x28, w19
ldrb w8, [x27]
cmp w8, #13
b.ne LBB26_17
; %bb.16: ; in Loop: Header=BB26_14 Depth=2
madd x8, x28, x20, x21
ldur q0, [x8, #1]
str q0, [x27]
ldur q0, [x8, #17]
ldur q1, [x8, #33]
ldur q2, [x8, #49]
ldur q3, [x8, #65]
stp q2, q3, [x27, #48]
stp q0, q1, [x27, #16]
ldur q0, [x8, #81]
ldur q1, [x8, #97]
ldur q2, [x8, #113]
ldur q3, [x8, #129]
stp q2, q3, [x27, #112]
stp q0, q1, [x27, #80]
ldur q0, [x8, #145]
ldur q1, [x8, #161]
ldur q2, [x8, #177]
ldr x8, [x8, #192]
stur x8, [x27, #191]
stp q1, q2, [x27, #160]
str q0, [x27, #144]
LBB26_17: ; in Loop: Header=BB26_14 Depth=2
mov x0, x27
bl _strlen
cbz w0, LBB26_21
; %bb.18: ; in Loop: Header=BB26_14 Depth=2
lsl x9, x0, #32
sxtw x8, w0
ldr x10, [sp, #16] ; 8-byte Folded Reload
madd x10, x28, x20, x10
LBB26_19: ; Parent Loop BB26_1 Depth=1
; Parent Loop BB26_14 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w11, [x10, x8]
cmp w11, #13
ccmp w11, #10, #4, ne
b.ne LBB26_22
; %bb.20: ; in Loop: Header=BB26_19 Depth=3
add x9, x9, x22
sub x8, x8, #1
cbnz w8, LBB26_19
LBB26_21: ; in Loop: Header=BB26_14 Depth=2
strb wzr, [x27]
ldr w8, [x23, _g_line_number@PAGEOFF]
add w8, w8, #1
str w8, [x23, _g_line_number@PAGEOFF]
b LBB26_23
LBB26_22: ; in Loop: Header=BB26_14 Depth=2
asr x9, x9, #32
madd x10, x28, x20, x21
strb wzr, [x10, x9]
ldr w9, [x23, _g_line_number@PAGEOFF]
add w9, w9, #1
str w9, [x23, _g_line_number@PAGEOFF]
tbnz w8, #31, LBB26_47
LBB26_23: ; in Loop: Header=BB26_14 Depth=2
ldrsw x8, [x26]
mul x9, x8, x20
ldrb w9, [x21, x9]
add w19, w8, #1
str w19, [x26]
cmp w9, #125
b.eq LBB26_25
; %bb.24: ; in Loop: Header=BB26_14 Depth=2
cmp w8, #299
b.le LBB26_14
b LBB26_48
LBB26_25: ; in Loop: Header=BB26_1 Depth=1
adrp x9, _g_num_primitives@PAGE
ldr w8, [x9, _g_num_primitives@PAGEOFF]
add w8, w8, #1
str w8, [x9, _g_num_primitives@PAGEOFF]
add x0, sp, #640
add x1, sp, #439
add x2, sp, #32
add x3, sp, #238
add x4, sp, #37
bl _extract_opcode_info
cbz w0, LBB26_50
; %bb.26: ; in Loop: Header=BB26_1 Depth=1
Lloh243:
adrp x8, _g_opcode_input_table@GOTPAGE
Lloh244:
ldr x8, [x8, _g_opcode_input_table@GOTPAGEOFF]
add x27, x8, #35
ldr w19, [sp, #32]
b LBB26_28
LBB26_27: ; in Loop: Header=BB26_28 Depth=2
add x27, x27, #68
LBB26_28: ; Parent Loop BB26_1 Depth=1
; => This Inner Loop Header: Depth=2
sub x28, x27, #35
add x0, sp, #439
mov x1, x28
bl _strcmp
cbnz w0, LBB26_27
; %bb.29: ; in Loop: Header=BB26_28 Depth=2
ldurb w8, [x27, #-5]
cmp w19, w8
b.ne LBB26_27
; %bb.30: ; in Loop: Header=BB26_28 Depth=2
sub x1, x27, #4
add x0, sp, #238
bl _strcmp
cbnz w0, LBB26_27
; %bb.31: ; in Loop: Header=BB26_28 Depth=2
add x0, sp, #37
mov x1, x27
bl _strcmp
cbnz w0, LBB26_27
; %bb.32: ; in Loop: Header=BB26_1 Depth=1
Lloh245:
adrp x8, _g_ops_ac_file@PAGE
Lloh246:
ldr x9, [x8, _g_ops_ac_file@PAGEOFF]
ldrsb w8, [sp, #439]
Lloh247:
adrp x10, _g_ops_dm_file@PAGE
Lloh248:
ldr x27, [x10, _g_ops_dm_file@PAGEOFF]
ldr x10, [sp, #8] ; 8-byte Folded Reload
cmp x10, x9
mov x9, x10
b.ne LBB26_34
; %bb.33: ; in Loop: Header=BB26_1 Depth=1
cmp w8, #99
b.gt LBB26_35
LBB26_34: ; in Loop: Header=BB26_1 Depth=1
cmp w8, #109
ccmp x9, x27, #0, gt
Lloh249:
adrp x8, _g_ops_nz_file@PAGE
Lloh250:
ldr x8, [x8, _g_ops_nz_file@PAGEOFF]
csel x27, x8, x9, eq
LBB26_35: ; in Loop: Header=BB26_1 Depth=1
ldr x19, [sp] ; 8-byte Folded Reload
str wzr, [x19, #12060]
mov x0, x28
Lloh251:
adrp x1, l_.str.118@PAGE
Lloh252:
add x1, x1, l_.str.118@PAGEOFF
bl _strcmp
cbz w0, LBB26_40
; %bb.36: ; in Loop: Header=BB26_1 Depth=1
mov x0, x28
Lloh253:
adrp x1, l_.str.119@PAGE
Lloh254:
add x1, x1, l_.str.119@PAGEOFF
bl _strcmp
cbz w0, LBB26_40
; %bb.37: ; in Loop: Header=BB26_1 Depth=1
mov x0, x28
Lloh255:
adrp x1, l_.str.120@PAGE
Lloh256:
add x1, x1, l_.str.120@PAGEOFF
bl _strcmp
cbz w0, LBB26_43
; %bb.38: ; in Loop: Header=BB26_1 Depth=1
mov x0, x28
Lloh257:
adrp x1, l_.str.121@PAGE
Lloh258:
add x1, x1, l_.str.121@PAGEOFF
bl _strcmp
cbz w0, LBB26_44
; %bb.39: ; in Loop: Header=BB26_1 Depth=1
mov x0, x27
mov x1, x21
mov x2, x19
mov x3, x28
bl _generate_opcode_ea_variants
b LBB26_42
LBB26_40: ; in Loop: Header=BB26_1 Depth=1
mov x0, x27
mov x1, x21
mov x2, x19
mov x3, x28
mov w4, #1
LBB26_41: ; in Loop: Header=BB26_1 Depth=1
bl _generate_opcode_cc_variants
LBB26_42: ; in Loop: Header=BB26_1 Depth=1
str x27, [sp, #8] ; 8-byte Folded Spill
Lloh259:
adrp x27, l_.str.76@PAGE
Lloh260:
add x27, x27, l_.str.76@PAGEOFF
b LBB26_1
LBB26_43: ; in Loop: Header=BB26_1 Depth=1
mov x0, x27
mov x1, x21
mov x2, x19
mov x3, x28
mov w4, #2
b LBB26_41
LBB26_44: ; in Loop: Header=BB26_1 Depth=1
mov x0, x27
mov x1, x21
mov x2, x19
mov x3, x28
mov w4, #4
b LBB26_41
LBB26_45:
mov x0, x19
bl _free
mov x0, x21
bl _free
ldur x8, [x29, #-96]
Lloh261:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh262:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh263:
ldr x9, [x9]
cmp x9, x8
b.ne LBB26_51
; %bb.46:
add sp, sp, #864
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB26_47:
Lloh264:
adrp x0, l_.str.115@PAGE
Lloh265:
add x0, x0, l_.str.115@PAGEOFF
bl _error_exit
LBB26_48:
Lloh266:
adrp x0, l_.str.114@PAGE
Lloh267:
add x0, x0, l_.str.114@PAGEOFF
bl _error_exit
LBB26_49:
Lloh268:
adrp x0, l_.str.113@PAGE
Lloh269:
add x0, x0, l_.str.113@PAGEOFF
bl _error_exit
LBB26_50:
Lloh270:
adrp x0, l_.str.116@PAGE
Lloh271:
add x0, x0, l_.str.116@PAGEOFF
bl _error_exit
LBB26_51:
bl ___stack_chk_fail
.loh AdrpAdd Lloh239, Lloh240
.loh AdrpLdr Lloh237, Lloh238
.loh AdrpLdr Lloh235, Lloh236
.loh AdrpLdrGotLdr Lloh232, Lloh233, Lloh234
.loh AdrpAdd Lloh241, Lloh242
.loh AdrpLdrGot Lloh243, Lloh244
.loh AdrpLdr Lloh247, Lloh248
.loh AdrpLdr Lloh245, Lloh246
.loh AdrpLdr Lloh249, Lloh250
.loh AdrpAdd Lloh251, Lloh252
.loh AdrpAdd Lloh253, Lloh254
.loh AdrpAdd Lloh255, Lloh256
.loh AdrpAdd Lloh257, Lloh258
.loh AdrpAdd Lloh259, Lloh260
.loh AdrpLdrGotLdr Lloh261, Lloh262, Lloh263
.loh AdrpAdd Lloh264, Lloh265
.loh AdrpAdd Lloh266, Lloh267
.loh AdrpAdd Lloh268, Lloh269
.loh AdrpAdd Lloh270, Lloh271
.cfi_endproc
; -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ; -- Begin function populate_table
lCPI27_0:
.short 32768 ; 0x8000
.short 16384 ; 0x4000
.short 8192 ; 0x2000
.short 4096 ; 0x1000
.short 2048 ; 0x800
.short 1024 ; 0x400
.short 512 ; 0x200
.short 256 ; 0x100
lCPI27_1:
.short 128 ; 0x80
.short 64 ; 0x40
.short 32 ; 0x20
.short 16 ; 0x10
.short 8 ; 0x8
.short 4 ; 0x4
.short 2 ; 0x2
.short 1 ; 0x1
.section __TEXT,__text,regular,pure_instructions
.globl _populate_table
.p2align 2
_populate_table: ; @populate_table
.cfi_startproc
; %bb.0:
stp d9, d8, [sp, #-112]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 112
stp x28, x27, [sp, #16] ; 16-byte Folded Spill
stp x26, x25, [sp, #32] ; 16-byte Folded Spill
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
.cfi_offset b8, -104
.cfi_offset b9, -112
sub sp, sp, #432
add x20, sp, #176
Lloh272:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh273:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh274:
ldr x8, [x8]
stur x8, [x29, #-120]
mov x19, #21599
movk x19, #16961, lsl #16
movk x19, #17740, lsl #32
movk x19, #21343, lsl #48
mov x23, #13901
movk x23, #19256, lsl #16
movk x23, #16717, lsl #32
movk x23, #17739, lsl #48
mov x24, #24389
movk x24, #21587, lsl #16
movk x24, #21057, lsl #32
movk x24, #84, lsl #48
strb wzr, [sp, #176]
ldp x8, x9, [sp, #176]
eor x8, x8, x23
eor x9, x9, x19
ldur x10, [sp, #189]
eor x10, x10, x24
orr x8, x8, x9
adrp x21, _g_input_file@PAGE
adrp x22, _g_line_number@PAGE
orr x8, x8, x10
cbz x8, LBB27_13
; %bb.1:
add x8, sp, #176
add x25, x8, #1
add x20, sp, #176
sub x26, x8, #1
mov x27, #-4294967296
b LBB27_4
LBB27_2: ; in Loop: Header=BB27_4 Depth=1
strb wzr, [sp, #176]
ldr w8, [x22, _g_line_number@PAGEOFF]
add w8, w8, #1
str w8, [x22, _g_line_number@PAGEOFF]
LBB27_3: ; in Loop: Header=BB27_4 Depth=1
ldp x8, x9, [sp, #176]
eor x8, x8, x23
eor x9, x9, x19
ldur x10, [sp, #189]
eor x10, x10, x24
orr x8, x8, x9
orr x8, x8, x10
cbz x8, LBB27_13
LBB27_4: ; =>This Loop Header: Depth=1
; Child Loop BB27_9 Depth 2
ldr x2, [x21, _g_input_file@PAGEOFF]
add x0, sp, #176
mov w1, #200
bl _fgets
cbz x0, LBB27_12
; %bb.5: ; in Loop: Header=BB27_4 Depth=1
ldrb w8, [sp, #176]
cmp w8, #13
b.ne LBB27_7
; %bb.6: ; in Loop: Header=BB27_4 Depth=1
ldr q0, [x25]
str q0, [sp, #176]
ldp q0, q1, [x25, #16]
ldp q2, q3, [x25, #48]
stp q2, q3, [sp, #224]
stp q0, q1, [sp, #192]
ldp q0, q1, [x25, #80]
ldp q2, q3, [x25, #112]
stp q2, q3, [sp, #288]
stp q0, q1, [sp, #256]
ldp q0, q1, [x25, #144]
ldr q2, [x25, #176]
ldur x8, [x25, #191]
stur x8, [x20, #191]
stp q1, q2, [sp, #336]
str q0, [sp, #320]
LBB27_7: ; in Loop: Header=BB27_4 Depth=1
add x0, sp, #176
bl _strlen
cbz w0, LBB27_2
; %bb.8: ; in Loop: Header=BB27_4 Depth=1
lsl x9, x0, #32
sxtw x8, w0
LBB27_9: ; Parent Loop BB27_4 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x26, x8]
cmp w10, #13
ccmp w10, #10, #4, ne
b.ne LBB27_11
; %bb.10: ; in Loop: Header=BB27_9 Depth=2
add x9, x9, x27
sub x8, x8, #1
cbnz w8, LBB27_9
b LBB27_2
LBB27_11: ; in Loop: Header=BB27_4 Depth=1
asr x9, x9, #32
strb wzr, [x20, x9]
ldr w9, [x22, _g_line_number@PAGEOFF]
add w9, w9, #1
str w9, [x22, _g_line_number@PAGEOFF]
tbz w8, #31, LBB27_3
LBB27_12:
Lloh275:
adrp x0, l_.str.123@PAGE
Lloh276:
add x0, x0, l_.str.123@PAGEOFF
bl _error_exit
LBB27_13:
ldr x2, [x21, _g_input_file@PAGEOFF]
add x0, sp, #176
mov w1, #200
bl _fgets
cbz x0, LBB27_12
; %bb.14:
add x9, sp, #176
orr x24, x9, #0x1
sub x10, x29, #144
orr x11, x10, #0x1
orr x8, x10, #0x2
stp x8, x11, [sp, #128] ; 16-byte Folded Spill
orr x11, x10, #0x3
orr x8, x10, #0x4
stp x8, x11, [sp, #112] ; 16-byte Folded Spill
mov w8, #5
orr x8, x10, x8
str x8, [sp, #104] ; 8-byte Folded Spill
orr x11, x10, #0x6
orr x8, x10, #0x7
stp x8, x11, [sp, #88] ; 16-byte Folded Spill
add x11, x10, #8
add x8, x10, #9
stp x8, x11, [sp, #72] ; 16-byte Folded Spill
add x11, x10, #10
add x8, x10, #11
stp x8, x11, [sp, #56] ; 16-byte Folded Spill
add x11, x10, #12
add x8, x10, #13
stp x8, x11, [sp, #40] ; 16-byte Folded Spill
add x11, x10, #14
add x8, x10, #15
stp x8, x11, [sp, #24] ; 16-byte Folded Spill
add x11, x10, #16
add x8, x10, #17
stp x8, x11, [sp, #8] ; 16-byte Folded Spill
sub x27, x9, #1
Lloh277:
adrp x28, _g_opcode_input_table@GOTPAGE
Lloh278:
ldr x28, [x28, _g_opcode_input_table@GOTPAGEOFF]
mov x26, #-4294967296
mov x19, #4294967296
mov w25, #10
movi.8b v8, #46
Lloh279:
adrp x8, lCPI27_0@PAGE
Lloh280:
ldr q0, [x8, lCPI27_0@PAGEOFF]
str q0, [sp, #160] ; 16-byte Folded Spill
movi.8b v9, #49
Lloh281:
adrp x8, lCPI27_1@PAGE
Lloh282:
ldr q0, [x8, lCPI27_1@PAGEOFF]
str q0, [sp, #144] ; 16-byte Folded Spill
b LBB27_17
LBB27_15: ; in Loop: Header=BB27_17 Depth=1
strb wzr, [sp, #176]
ldr w8, [x22, _g_line_number@PAGEOFF]
add w8, w8, #1
str w8, [x22, _g_line_number@PAGEOFF]
LBB27_16: ; in Loop: Header=BB27_17 Depth=1
add x28, x28, #68
ldr x2, [x21, _g_input_file@PAGEOFF]
add x0, sp, #176
mov w1, #200
bl _fgets
cbz x0, LBB27_12
LBB27_17: ; =>This Loop Header: Depth=1
; Child Loop BB27_21 Depth 2
; Child Loop BB27_27 Depth 2
; Child Loop BB27_88 Depth 2
; Child Loop BB27_91 Depth 2
; Child Loop BB27_95 Depth 2
; Child Loop BB27_105 Depth 2
; Child Loop BB27_117 Depth 2
; Child Loop BB27_138 Depth 2
; Child Loop BB27_162 Depth 2
; Child Loop BB27_164 Depth 2
; Child Loop BB27_166 Depth 2
; Child Loop BB27_168 Depth 2
; Child Loop BB27_170 Depth 2
; Child Loop BB27_175 Depth 2
; Child Loop BB27_180 Depth 2
; Child Loop BB27_185 Depth 2
; Child Loop BB27_190 Depth 2
; Child Loop BB27_195 Depth 2
ldrb w8, [sp, #176]
cmp w8, #13
b.ne LBB27_19
; %bb.18: ; in Loop: Header=BB27_17 Depth=1
ldr q0, [x24]
str q0, [sp, #176]
ldp q0, q1, [x24, #16]
ldp q2, q3, [x24, #48]
stp q2, q3, [sp, #224]
stp q0, q1, [sp, #192]
ldp q0, q1, [x24, #80]
ldp q2, q3, [x24, #112]
stp q2, q3, [sp, #288]
stp q0, q1, [sp, #256]
ldp q0, q1, [x24, #144]
ldr q2, [x24, #176]
ldur x8, [x24, #191]
stur x8, [x20, #191]
stp q1, q2, [sp, #336]
str q0, [sp, #320]
LBB27_19: ; in Loop: Header=BB27_17 Depth=1
add x0, sp, #176
bl _strlen
cbz w0, LBB27_15
; %bb.20: ; in Loop: Header=BB27_17 Depth=1
lsl x9, x0, #32
sxtw x8, w0
LBB27_21: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x27, x8]
cmp w10, #13
ccmp w10, #10, #4, ne
b.ne LBB27_23
; %bb.22: ; in Loop: Header=BB27_21 Depth=2
add x9, x9, x26
sub x8, x8, #1
cbnz w8, LBB27_21
b LBB27_15
LBB27_23: ; in Loop: Header=BB27_17 Depth=1
asr x9, x9, #32
add x10, sp, #176
strb wzr, [x10, x9]
ldr w9, [x22, _g_line_number@PAGEOFF]
add w9, w9, #1
str w9, [x22, _g_line_number@PAGEOFF]
tbnz w8, #31, LBB27_12
; %bb.24: ; in Loop: Header=BB27_17 Depth=1
ldrb w23, [sp, #176]
cbz w23, LBB27_16
; %bb.25: ; in Loop: Header=BB27_17 Depth=1
add x0, sp, #176
Lloh283:
adrp x1, l_.str.112@PAGE
Lloh284:
add x1, x1, l_.str.112@PAGEOFF
mov w2, #80
bl _memcmp
cbz w0, LBB27_199
; %bb.26: ; in Loop: Header=BB27_17 Depth=1
add x8, sp, #176
cmp w23, #32
b.ne LBB27_28
LBB27_27: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w9, [x8, #1]!
cmp w9, #32
b.eq LBB27_27
LBB27_28: ; in Loop: Header=BB27_17 Depth=1
add x9, sp, #176
sub w8, w8, w9
add x8, x9, w8, sxtw
ldrb w10, [x8]
orr w11, w10, #0x20
mov x9, x28
cmp w11, #32
b.eq LBB27_87
; %bb.29: ; in Loop: Header=BB27_17 Depth=1
mov x9, x28
strb w10, [x9], #1
ldrb w10, [x8, #1]
orr w11, w10, #0x20
cmp w11, #32
b.eq LBB27_87
; %bb.30: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #1]
ldrb w9, [x8, #2]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_32
; %bb.31: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #2
b LBB27_87
LBB27_32: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #2]
ldrb w9, [x8, #3]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_34
; %bb.33: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #3
b LBB27_87
LBB27_34: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #3]
ldrb w9, [x8, #4]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_36
; %bb.35: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #4
b LBB27_87
LBB27_36: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #4]
ldrb w9, [x8, #5]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_38
; %bb.37: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #5
b LBB27_87
LBB27_38: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #5]
ldrb w9, [x8, #6]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_40
; %bb.39: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #6
b LBB27_87
LBB27_40: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #6]
ldrb w9, [x8, #7]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_42
; %bb.41: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #7
b LBB27_87
LBB27_42: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #7]
ldrb w9, [x8, #8]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_44
; %bb.43: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #8
b LBB27_87
LBB27_44: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #8]
ldrb w9, [x8, #9]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_46
; %bb.45: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #9
b LBB27_87
LBB27_46: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #9]
ldrb w9, [x8, #10]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_48
; %bb.47: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #10
b LBB27_87
LBB27_48: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #10]
ldrb w9, [x8, #11]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_50
; %bb.49: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #11
b LBB27_87
LBB27_50: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #11]
ldrb w9, [x8, #12]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_52
; %bb.51: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #12
b LBB27_87
LBB27_52: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #12]
ldrb w9, [x8, #13]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_54
; %bb.53: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #13
b LBB27_87
LBB27_54: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #13]
ldrb w9, [x8, #14]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_56
; %bb.55: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #14
b LBB27_87
LBB27_56: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #14]
ldrb w9, [x8, #15]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_58
; %bb.57: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #15
b LBB27_87
LBB27_58: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #15]
ldrb w9, [x8, #16]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_60
; %bb.59: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #16
b LBB27_87
LBB27_60: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #16]
ldrb w9, [x8, #17]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_62
; %bb.61: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #17
b LBB27_87
LBB27_62: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #17]
ldrb w9, [x8, #18]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_64
; %bb.63: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #18
b LBB27_87
LBB27_64: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #18]
ldrb w9, [x8, #19]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_66
; %bb.65: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #19
b LBB27_87
LBB27_66: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #19]
ldrb w9, [x8, #20]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_68
; %bb.67: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #20
b LBB27_87
LBB27_68: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #20]
ldrb w9, [x8, #21]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_70
; %bb.69: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #21
b LBB27_87
LBB27_70: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #21]
ldrb w9, [x8, #22]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_72
; %bb.71: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #22
b LBB27_87
LBB27_72: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #22]
ldrb w9, [x8, #23]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_74
; %bb.73: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #23
b LBB27_87
LBB27_74: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #23]
ldrb w9, [x8, #24]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_76
; %bb.75: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #24
b LBB27_87
LBB27_76: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #24]
ldrb w9, [x8, #25]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_78
; %bb.77: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #25
b LBB27_87
LBB27_78: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #25]
ldrb w9, [x8, #26]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_80
; %bb.79: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #26
b LBB27_87
LBB27_80: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #26]
ldrb w9, [x8, #27]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_82
; %bb.81: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #27
b LBB27_87
LBB27_82: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #27]
ldrb w9, [x8, #28]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_84
; %bb.83: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #28
b LBB27_87
LBB27_84: ; in Loop: Header=BB27_17 Depth=1
strb w9, [x28, #28]
ldrb w10, [x8, #29]
orr w9, w10, #0x20
cmp w9, #32
b.ne LBB27_86
; %bb.85: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #29
b LBB27_87
LBB27_86: ; in Loop: Header=BB27_17 Depth=1
add x9, x28, #30
strb w10, [x28, #29]
ldrb w10, [x8, #30]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_207
LBB27_87: ; in Loop: Header=BB27_17 Depth=1
strb wzr, [x9]
sub w9, w9, w28
add x8, x8, w9, sxtw
mov x9, #-4294967296
mov x10, x8
LBB27_88: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x10], #1
add x9, x9, x19
cmp w11, #32
b.eq LBB27_88
; %bb.89: ; in Loop: Header=BB27_17 Depth=1
add x8, x8, x9, asr #32
ldrb w10, [x8]
sub w9, w10, #48
cmp w9, #9
b.hi LBB27_92
; %bb.90: ; in Loop: Header=BB27_17 Depth=1
mov w9, #0
mov x12, x10
mov x11, x8
LBB27_91: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x11, #1]!
mul w9, w9, w25
add w9, w9, w12, uxtb
sub w9, w9, #48
sub w13, w10, #48
mov x12, x10
cmp w13, #10
b.lo LBB27_91
b LBB27_93
LBB27_92: ; in Loop: Header=BB27_17 Depth=1
mov w9, #0
mov x11, x8
LBB27_93: ; in Loop: Header=BB27_17 Depth=1
orr w12, w10, #0x20
cmp w12, #32
b.ne LBB27_202
; %bb.94: ; in Loop: Header=BB27_17 Depth=1
sub w10, w11, w8
add x8, x8, w10, sxtw
strb w9, [x28, #30]
mov x9, #-4294967296
mov x10, x8
LBB27_95: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x10], #1
add x9, x9, x19
cmp w11, #32
b.eq LBB27_95
; %bb.96: ; in Loop: Header=BB27_17 Depth=1
add x8, x8, x9, asr #32
add x9, x28, #31
ldrb w11, [x8]
orr w12, w11, #0x20
mov x10, x9
cmp w12, #32
b.eq LBB27_104
; %bb.97: ; in Loop: Header=BB27_17 Depth=1
strb w11, [x28, #31]
ldrb w10, [x8, #1]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_99
; %bb.98: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #32
b LBB27_104
LBB27_99: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #32]
ldrb w10, [x8, #2]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_101
; %bb.100: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #33
b LBB27_104
LBB27_101: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #33]
ldrb w11, [x8, #3]
orr w10, w11, #0x20
cmp w10, #32
b.ne LBB27_103
; %bb.102: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #34
b LBB27_104
LBB27_103: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #35
strb w11, [x28, #34]
ldrb w11, [x8, #4]
orr w12, w11, #0x20
cmp w12, #32
b.ne LBB27_205
LBB27_104: ; in Loop: Header=BB27_17 Depth=1
strb wzr, [x10]
sub w9, w10, w9
add x8, x8, w9, sxtw
mov x9, #-4294967296
mov x10, x8
LBB27_105: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x10], #1
add x9, x9, x19
cmp w11, #32
b.eq LBB27_105
; %bb.106: ; in Loop: Header=BB27_17 Depth=1
add x8, x8, x9, asr #32
add x9, x28, #35
ldrb w11, [x8]
orr w12, w11, #0x20
mov x10, x9
cmp w12, #32
b.eq LBB27_116
; %bb.107: ; in Loop: Header=BB27_17 Depth=1
strb w11, [x28, #35]
ldrb w10, [x8, #1]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_109
; %bb.108: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #36
b LBB27_116
LBB27_109: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #36]
ldrb w10, [x8, #2]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_111
; %bb.110: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #37
b LBB27_116
LBB27_111: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #37]
ldrb w10, [x8, #3]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_113
; %bb.112: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #38
b LBB27_116
LBB27_113: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #38]
ldrb w11, [x8, #4]
orr w10, w11, #0x20
cmp w10, #32
b.ne LBB27_115
; %bb.114: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #39
b LBB27_116
LBB27_115: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #40
strb w11, [x28, #39]
ldrb w11, [x8, #5]
orr w12, w11, #0x20
cmp w12, #32
b.ne LBB27_205
LBB27_116: ; in Loop: Header=BB27_17 Depth=1
strb wzr, [x10]
sub w9, w10, w9
add x8, x8, w9, sxtw
mov x9, #-4294967296
mov x10, x8
LBB27_117: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x10], #1
add x9, x9, x19
cmp w11, #32
b.eq LBB27_117
; %bb.118: ; in Loop: Header=BB27_17 Depth=1
add x8, x8, x9, asr #32
ldrb w9, [x8]
orr w10, w9, #0x20
cmp w10, #32
b.ne LBB27_120
; %bb.119: ; in Loop: Header=BB27_17 Depth=1
sub x9, x29, #144
b LBB27_137
LBB27_120: ; in Loop: Header=BB27_17 Depth=1
sturb w9, [x29, #-144]
ldrb w10, [x8, #1]
orr w11, w10, #0x20
ldr x9, [sp, #136] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.121: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-143]
ldrb w10, [x8, #2]
orr w11, w10, #0x20
ldr x9, [sp, #128] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.122: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-142]
ldrb w10, [x8, #3]
orr w11, w10, #0x20
ldr x9, [sp, #120] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.123: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-141]
ldrb w10, [x8, #4]
orr w11, w10, #0x20
ldr x9, [sp, #112] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.124: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-140]
ldrb w10, [x8, #5]
orr w11, w10, #0x20
ldr x9, [sp, #104] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.125: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-139]
ldrb w10, [x8, #6]
orr w11, w10, #0x20
ldr x9, [sp, #96] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.126: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-138]
ldrb w10, [x8, #7]
orr w11, w10, #0x20
ldr x9, [sp, #88] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.127: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-137]
ldrb w10, [x8, #8]
orr w11, w10, #0x20
ldr x9, [sp, #80] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.128: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-136]
ldrb w10, [x8, #9]
orr w11, w10, #0x20
ldr x9, [sp, #72] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.129: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-135]
ldrb w10, [x8, #10]
orr w11, w10, #0x20
ldr x9, [sp, #64] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.130: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-134]
ldrb w10, [x8, #11]
orr w11, w10, #0x20
ldr x9, [sp, #56] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.131: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-133]
ldrb w10, [x8, #12]
orr w11, w10, #0x20
ldr x9, [sp, #48] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.132: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-132]
ldrb w10, [x8, #13]
orr w11, w10, #0x20
ldr x9, [sp, #40] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.133: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-131]
ldrb w10, [x8, #14]
orr w11, w10, #0x20
ldr x9, [sp, #32] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.134: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-130]
ldrb w10, [x8, #15]
orr w11, w10, #0x20
ldr x9, [sp, #24] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.135: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-129]
ldrb w10, [x8, #16]
orr w11, w10, #0x20
ldr x9, [sp, #16] ; 8-byte Folded Reload
cmp w11, #32
b.eq LBB27_137
; %bb.136: ; in Loop: Header=BB27_17 Depth=1
sturb w10, [x29, #-128]
ldrb w10, [x8, #17]
orr w11, w10, #0x20
ldr x9, [sp, #8] ; 8-byte Folded Reload
cmp w11, #32
b.ne LBB27_208
LBB27_137: ; in Loop: Header=BB27_17 Depth=1
strb wzr, [x9]
sub x10, x29, #144
sub w9, w9, w10
add x8, x8, w9, sxtw
mov x9, #-4294967296
mov x10, x8
LBB27_138: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x10], #1
add x9, x9, x19
cmp w11, #32
b.eq LBB27_138
; %bb.139: ; in Loop: Header=BB27_17 Depth=1
add x8, x8, x9, asr #32
add x9, x28, #46
ldrb w11, [x8]
orr w12, w11, #0x20
mov x10, x9
cmp w12, #32
b.eq LBB27_161
; %bb.140: ; in Loop: Header=BB27_17 Depth=1
strb w11, [x28, #46]
ldrb w10, [x8, #1]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_142
; %bb.141: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #47
b LBB27_161
LBB27_142: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #47]
ldrb w10, [x8, #2]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_144
; %bb.143: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #48
b LBB27_161
LBB27_144: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #48]
ldrb w10, [x8, #3]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_146
; %bb.145: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #49
b LBB27_161
LBB27_146: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #49]
ldrb w10, [x8, #4]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_148
; %bb.147: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #50
b LBB27_161
LBB27_148: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #50]
ldrb w10, [x8, #5]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_150
; %bb.149: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #51
b LBB27_161
LBB27_150: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #51]
ldrb w10, [x8, #6]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_152
; %bb.151: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #52
b LBB27_161
LBB27_152: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #52]
ldrb w10, [x8, #7]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_154
; %bb.153: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #53
b LBB27_161
LBB27_154: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #53]
ldrb w10, [x8, #8]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_156
; %bb.155: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #54
b LBB27_161
LBB27_156: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #54]
ldrb w10, [x8, #9]
orr w11, w10, #0x20
cmp w11, #32
b.ne LBB27_158
; %bb.157: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #55
b LBB27_161
LBB27_158: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #55]
ldrb w11, [x8, #10]
orr w10, w11, #0x20
cmp w10, #32
b.ne LBB27_160
; %bb.159: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #56
b LBB27_161
LBB27_160: ; in Loop: Header=BB27_17 Depth=1
add x10, x28, #57
strb w11, [x28, #56]
ldrb w11, [x8, #11]
orr w12, w11, #0x20
cmp w12, #32
b.ne LBB27_205
LBB27_161: ; in Loop: Header=BB27_17 Depth=1
strb wzr, [x10]
sub w9, w10, w9
add x8, x8, w9, sxtw
mov x9, #-4294967296
mov x10, x8
LBB27_162: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x10], #1
add x9, x9, x19
cmp w11, #32
b.eq LBB27_162
; %bb.163: ; in Loop: Header=BB27_17 Depth=1
add x8, x8, x9, asr #32
ldrb w9, [x8], #1
strb w9, [x28, #57]
mov x9, #-4294967296
mov x10, x8
LBB27_164: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x10], #1
add x9, x9, x19
cmp w11, #32
b.eq LBB27_164
; %bb.165: ; in Loop: Header=BB27_17 Depth=1
add x8, x8, x9, asr #32
ldrb w9, [x8], #1
strb w9, [x28, #58]
mov x9, #-4294967296
mov x10, x8
LBB27_166: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x10], #1
add x9, x9, x19
cmp w11, #32
b.eq LBB27_166
; %bb.167: ; in Loop: Header=BB27_17 Depth=1
add x8, x8, x9, asr #32
ldrb w9, [x8], #1
strb w9, [x28, #59]
mov x9, #-4294967296
mov x10, x8
LBB27_168: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x10], #1
add x9, x9, x19
cmp w11, #32
b.eq LBB27_168
; %bb.169: ; in Loop: Header=BB27_17 Depth=1
add x8, x8, x9, asr #32
mov x9, #-4294967296
mov x10, x8
LBB27_170: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x10], #1
add x9, x9, x19
cmp w11, #32
b.eq LBB27_170
; %bb.171: ; in Loop: Header=BB27_17 Depth=1
add x9, x8, x9, asr #32
ldrb w8, [x9]
cmp w8, #46
b.ne LBB27_173
; %bb.172: ; in Loop: Header=BB27_17 Depth=1
strb w8, [x28, #60]
strb wzr, [x28, #64]
add x8, x9, #1
b LBB27_179
LBB27_173: ; in Loop: Header=BB27_17 Depth=1
mov w8, #48
strb w8, [x28, #60]
ldrb w8, [x9]
sub w10, w8, #48
cmp w10, #9
b.hi LBB27_176
; %bb.174: ; in Loop: Header=BB27_17 Depth=1
mov w10, #0
mov x12, x8
mov x11, x9
LBB27_175: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w8, [x11, #1]!
mul w10, w10, w25
add w10, w10, w12, uxtb
sub w10, w10, #48
sub w13, w8, #48
mov x12, x8
cmp w13, #10
b.lo LBB27_175
b LBB27_177
LBB27_176: ; in Loop: Header=BB27_17 Depth=1
mov w10, #0
mov x11, x9
LBB27_177: ; in Loop: Header=BB27_17 Depth=1
orr w12, w8, #0x20
cmp w12, #32
b.ne LBB27_201
; %bb.178: ; in Loop: Header=BB27_17 Depth=1
sub w8, w11, w9
add x8, x9, w8, sxtw
strb w10, [x28, #64]
LBB27_179: ; in Loop: Header=BB27_17 Depth=1
mov x9, #-4294967296
mov x10, x8
LBB27_180: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x10], #1
add x9, x9, x19
cmp w11, #32
b.eq LBB27_180
; %bb.181: ; in Loop: Header=BB27_17 Depth=1
add x9, x8, x9, asr #32
ldrb w8, [x9]
cmp w8, #46
b.ne LBB27_183
; %bb.182: ; in Loop: Header=BB27_17 Depth=1
strb w8, [x28, #61]
strb wzr, [x28, #65]
add x9, x9, #1
b LBB27_189
LBB27_183: ; in Loop: Header=BB27_17 Depth=1
mov w8, #49
strb w8, [x28, #61]
ldrb w8, [x9]
sub w10, w8, #48
cmp w10, #9
b.hi LBB27_186
; %bb.184: ; in Loop: Header=BB27_17 Depth=1
mov w10, #0
mov x12, x8
mov x11, x9
LBB27_185: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w8, [x11, #1]!
mul w10, w10, w25
add w10, w10, w12, uxtb
sub w10, w10, #48
sub w13, w8, #48
mov x12, x8
cmp w13, #10
b.lo LBB27_185
b LBB27_187
LBB27_186: ; in Loop: Header=BB27_17 Depth=1
mov w10, #0
mov x11, x9
LBB27_187: ; in Loop: Header=BB27_17 Depth=1
orr w12, w8, #0x20
cmp w12, #32
b.ne LBB27_201
; %bb.188: ; in Loop: Header=BB27_17 Depth=1
sub w8, w11, w9
add x9, x9, w8, sxtw
strb w10, [x28, #65]
LBB27_189: ; in Loop: Header=BB27_17 Depth=1
mov w10, #0
mov x8, #-4294967296
mov x12, x9
LBB27_190: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
mov x11, x10
ldrb w13, [x12], #1
add x8, x8, x19
add w10, w10, #1
cmp w13, #32
b.eq LBB27_190
; %bb.191: ; in Loop: Header=BB27_17 Depth=1
asr x8, x8, #32
ldrb w10, [x9, x8]
cmp w10, #46
b.ne LBB27_193
; %bb.192: ; in Loop: Header=BB27_17 Depth=1
mov w10, #0
mov w8, #46
strb w8, [x28, #62]
b LBB27_198
LBB27_193: ; in Loop: Header=BB27_17 Depth=1
mov w10, #50
strb w10, [x28, #62]
ldrb w8, [x9, x8]
sub w10, w8, #48
cmp w10, #9
b.hi LBB27_196
; %bb.194: ; in Loop: Header=BB27_17 Depth=1
mov w10, #0
add x9, x9, w11, sxtw
add x9, x9, #1
LBB27_195: ; Parent Loop BB27_17 Depth=1
; => This Inner Loop Header: Depth=2
mul w10, w10, w25
add w8, w10, w8, uxtb
sub w10, w8, #48
ldrb w8, [x9], #1
sub w11, w8, #48
cmp w11, #10
b.lo LBB27_195
b LBB27_197
LBB27_196: ; in Loop: Header=BB27_17 Depth=1
mov w10, #0
LBB27_197: ; in Loop: Header=BB27_17 Depth=1
orr w9, w8, #0x20
cmp w9, #32
b.ne LBB27_201
LBB27_198: ; in Loop: Header=BB27_17 Depth=1
strb w10, [x28, #66]
ldp d0, d1, [x29, #-144]
cmeq.8b v2, v0, v8
sshll.8h v2, v2, #0
ldp q4, q3, [sp, #144] ; 32-byte Folded Reload
bic.16b v2, v3, v2
cmeq.8b v0, v0, v9
sshll.8h v0, v0, #0
and.16b v0, v0, v3
cmeq.8b v3, v1, v8
ushll.8h v3, v3, #0
bic.16b v3, v4, v3
orr.16b v2, v2, v3
cmeq.8b v1, v1, v9
ushll.8h v1, v1, #0
and.16b v1, v1, v4
orr.16b v0, v0, v1
ext.16b v1, v0, v0, #8
orr.8b v0, v0, v1
umov.h w8, v0[1]
umov.h w9, v0[0]
orr w8, w9, w8
umov.h w9, v0[2]
orr w8, w8, w9
umov.h w9, v0[3]
ext.16b v0, v2, v2, #8
orr.8b v0, v2, v0
umov.h w10, v0[1]
orr w8, w8, w9
umov.h w9, v0[0]
orr w9, w9, w10
umov.h w10, v0[2]
orr w9, w9, w10
umov.h w10, v0[3]
orr w9, w9, w10
strh w9, [x28, #42]
strh w8, [x28, #44]
b LBB27_16
LBB27_199:
strb wzr, [x28]
ldur x8, [x29, #-120]
Lloh285:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh286:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh287:
ldr x9, [x9]
cmp x9, x8
b.ne LBB27_204
; %bb.200:
add sp, sp, #432
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
ldp x26, x25, [sp, #32] ; 16-byte Folded Reload
ldp x28, x27, [sp, #16] ; 16-byte Folded Reload
ldp d9, d8, [sp], #112 ; 16-byte Folded Reload
ret
LBB27_201:
sxtb x8, w8
b LBB27_203
LBB27_202:
sxtb x8, w10
LBB27_203:
str x8, [sp]
Lloh288:
adrp x0, l_.str.61@PAGE
Lloh289:
add x0, x0, l_.str.61@PAGEOFF
bl _error_exit
LBB27_204:
bl ___stack_chk_fail
LBB27_205:
strb w11, [x10]
LBB27_206:
Lloh290:
adrp x0, l_.str.60@PAGE
Lloh291:
add x0, x0, l_.str.60@PAGEOFF
bl _error_exit
LBB27_207:
strb w10, [x9]
b LBB27_206
LBB27_208:
sturb w10, [x29, #-127]
b LBB27_206
.loh AdrpLdrGotLdr Lloh272, Lloh273, Lloh274
.loh AdrpAdd Lloh275, Lloh276
.loh AdrpLdr Lloh281, Lloh282
.loh AdrpAdrp Lloh279, Lloh281
.loh AdrpLdr Lloh279, Lloh280
.loh AdrpLdrGot Lloh277, Lloh278
.loh AdrpAdd Lloh283, Lloh284
.loh AdrpLdrGotLdr Lloh285, Lloh286, Lloh287
.loh AdrpAdd Lloh288, Lloh289
.loh AdrpAdd Lloh290, Lloh291
.cfi_endproc
; -- End function
.globl _read_insert ; -- Begin function read_insert
.p2align 2
_read_insert: ; @read_insert
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x19, x0
add x20, x0, #1
sub x21, x0, #1
adrp x23, _g_input_file@PAGE
adrp x24, _g_line_number@PAGE
b LBB28_2
LBB28_1: ; in Loop: Header=BB28_2 Depth=1
strb wzr, [x19, w0, sxtw]
ldr w8, [x24, _g_line_number@PAGEOFF]
add w8, w8, #1
str w8, [x24, _g_line_number@PAGEOFF]
cbnz w0, LBB28_9
LBB28_2: ; =>This Loop Header: Depth=1
; Child Loop BB28_7 Depth 2
ldr x2, [x23, _g_input_file@PAGEOFF]
mov x0, x19
mov w1, #200
bl _fgets
cbz x0, LBB28_25
; %bb.3: ; in Loop: Header=BB28_2 Depth=1
ldrb w8, [x19]
cmp w8, #13
b.ne LBB28_5
; %bb.4: ; in Loop: Header=BB28_2 Depth=1
ldr q0, [x20]
str q0, [x19]
ldp q0, q1, [x20, #16]
ldp q2, q3, [x20, #48]
stp q2, q3, [x19, #48]
stp q0, q1, [x19, #16]
ldp q0, q1, [x20, #80]
ldp q2, q3, [x20, #112]
stp q2, q3, [x19, #112]
stp q0, q1, [x19, #80]
ldp q0, q1, [x20, #144]
ldr q2, [x20, #176]
ldur x8, [x20, #191]
stur x8, [x19, #191]
stp q1, q2, [x19, #160]
str q0, [x19, #144]
LBB28_5: ; in Loop: Header=BB28_2 Depth=1
mov x0, x19
bl _strlen
cbz w0, LBB28_1
; %bb.6: ; in Loop: Header=BB28_2 Depth=1
add x8, x21, w0, sxtw
LBB28_7: ; Parent Loop BB28_2 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w9, [x8]
cmp w9, #13
ccmp w9, #10, #4, ne
b.ne LBB28_1
; %bb.8: ; in Loop: Header=BB28_7 Depth=2
sub x8, x8, #1
sub w0, w0, #1
cbnz w0, LBB28_7
b LBB28_1
LBB28_9:
tbnz w0, #31, LBB28_25
; %bb.10:
mov w27, w0
mov w8, #10
strh w8, [x19, x27]
mov w8, #4798
cmp w0, w8
b.hi LBB28_23
; %bb.11:
mov x26, #0
mov x28, #-4294967296
Lloh292:
adrp x20, l_.str.112@PAGE
Lloh293:
add x20, x20, l_.str.112@PAGEOFF
LBB28_12: ; =>This Loop Header: Depth=1
; Child Loop BB28_17 Depth 2
add x25, x27, #1
add x21, x19, x25
ldr x2, [x23, _g_input_file@PAGEOFF]
mov x0, x21
mov w1, #200
bl _fgets
cbz x0, LBB28_25
; %bb.13: ; in Loop: Header=BB28_12 Depth=1
ldrb w8, [x21]
cmp w8, #13
b.ne LBB28_15
; %bb.14: ; in Loop: Header=BB28_12 Depth=1
ldur q0, [x21, #145]
ldur q1, [x21, #161]
stp q0, q1, [x21, #144]
ldur q0, [x21, #177]
str q0, [x21, #176]
ldr x8, [x21, #192]
stur x8, [x21, #191]
ldur q0, [x21, #81]
ldur q1, [x21, #97]
stp q0, q1, [x21, #80]
ldur q0, [x21, #113]
ldur q1, [x21, #129]
stp q0, q1, [x21, #112]
ldur q0, [x21, #17]
ldur q1, [x21, #33]
stp q0, q1, [x21, #16]
ldur q0, [x21, #49]
ldur q1, [x21, #65]
stp q0, q1, [x21, #48]
ldur q0, [x21, #1]
str q0, [x21]
LBB28_15: ; in Loop: Header=BB28_12 Depth=1
mov x0, x21
bl _strlen
mov x22, x0
cbz w22, LBB28_19
; %bb.16: ; in Loop: Header=BB28_12 Depth=1
lsl x8, x22, #32
add x9, x27, w22, sxtw
add x9, x19, x9
LBB28_17: ; Parent Loop BB28_12 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x9]
cmp w10, #13
ccmp w10, #10, #4, ne
b.ne LBB28_20
; %bb.18: ; in Loop: Header=BB28_17 Depth=2
add x8, x8, x28
sub x9, x9, #1
sub w22, w22, #1
cbnz w22, LBB28_17
LBB28_19: ; in Loop: Header=BB28_12 Depth=1
mov w22, #0
strb wzr, [x21]
ldr w8, [x24, _g_line_number@PAGEOFF]
add w8, w8, #1
str w8, [x24, _g_line_number@PAGEOFF]
b LBB28_21
LBB28_20: ; in Loop: Header=BB28_12 Depth=1
asr x8, x8, #32
strb wzr, [x21, x8]
ldr w8, [x24, _g_line_number@PAGEOFF]
add w8, w8, #1
str w8, [x24, _g_line_number@PAGEOFF]
tbnz w22, #31, LBB28_25
LBB28_21: ; in Loop: Header=BB28_12 Depth=1
mov x0, x21
mov x1, x20
bl _strcmp
cmp x26, #0
csel x8, x21, x26, eq
cbz w0, LBB28_24
; %bb.22: ; in Loop: Header=BB28_12 Depth=1
cmp w22, #0
csel x26, x8, xzr, eq
add x27, x25, w22, uxtw
mov w8, #10
strh w8, [x19, x27]
mov w8, #4799
cmp x27, x8
b.lt LBB28_12
LBB28_23:
Lloh294:
adrp x0, l_.str.124@PAGE
Lloh295:
add x0, x0, l_.str.124@PAGEOFF
bl _error_exit
LBB28_24:
strb wzr, [x8]
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB28_25:
Lloh296:
adrp x0, l_.str.125@PAGE
Lloh297:
add x0, x0, l_.str.125@PAGEOFF
bl _error_exit
.loh AdrpAdd Lloh292, Lloh293
.loh AdrpAdd Lloh294, Lloh295
.loh AdrpAdd Lloh296, Lloh297
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov w9, #22336
Lloh298:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh299:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #5, lsl #12 ; =20480
sub sp, sp, #1856
mov x19, x1
mov x20, x0
Lloh300:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh301:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh302:
ldr x8, [x8]
stur x8, [x29, #-104]
add x21, sp, #5, lsl #12 ; =20480
add x21, x21, #808
add x0, sp, #5, lsl #12 ; =20480
add x0, x0, #808
mov w1, #1024
bl _bzero
Lloh303:
adrp x8, _g_version@PAGE
Lloh304:
ldr x8, [x8, _g_version@PAGEOFF]
str x8, [sp]
Lloh305:
adrp x0, l_.str.126@PAGE
Lloh306:
add x0, x0, l_.str.126@PAGEOFF
bl _printf
Lloh307:
adrp x0, l_str@PAGE
Lloh308:
add x0, x0, l_str@PAGEOFF
bl _puts
cmp w20, #2
b.lt LBB29_8
; %bb.1:
ldr x1, [x19, #8]
add x22, sp, #5, lsl #12 ; =20480
add x22, x22, #808
add x0, sp, #5, lsl #12 ; =20480
add x0, x0, #808
mov w2, #1024
bl ___strcpy_chk
add x0, sp, #5, lsl #12 ; =20480
add x0, x0, #808
mov w1, #92
bl _strchr
cbz x0, LBB29_4
; %bb.2:
mov w23, #47
LBB29_3: ; =>This Inner Loop Header: Depth=1
strb w23, [x0]
mov w1, #92
bl _strchr
cbnz x0, LBB29_3
LBB29_4:
add x0, sp, #5, lsl #12 ; =20480
add x0, x0, #808
bl _strlen
add x8, x0, x22
ldurb w8, [x8, #-1]
cmp w8, #47
b.eq LBB29_6
; %bb.5:
Lloh309:
adrp x1, l_.str.128@PAGE
Lloh310:
add x1, x1, l_.str.128@PAGEOFF
add x0, sp, #5, lsl #12 ; =20480
add x0, x0, #808
mov w2, #1024
bl ___strcat_chk
LBB29_6:
cmp w20, #3
b.lt LBB29_8
; %bb.7:
ldr x1, [x19, #16]
Lloh311:
adrp x0, _g_input_filename@PAGE
Lloh312:
add x0, x0, _g_input_filename@PAGEOFF
mov w2, #1024
bl ___strcpy_chk
LBB29_8:
Lloh313:
adrp x8, l_.str.130@PAGE
Lloh314:
add x8, x8, l_.str.130@PAGEOFF
Lloh315:
adrp x3, l_.str.129@PAGE
Lloh316:
add x3, x3, l_.str.129@PAGEOFF
stp x21, x8, [sp]
add x19, sp, #4, lsl #12 ; =16384
add x19, x19, #3880
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3880
mov w1, #0
mov w2, #1024
bl ___sprintf_chk
Lloh317:
adrp x1, l_.str.131@PAGE
Lloh318:
add x1, x1, l_.str.131@PAGEOFF
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3880
bl _fopen
adrp x8, _g_prototype_file@PAGE
str x0, [x8, _g_prototype_file@PAGEOFF]
cbz x0, LBB29_95
; %bb.9:
Lloh319:
adrp x8, l_.str.133@PAGE
Lloh320:
add x8, x8, l_.str.133@PAGEOFF
add x19, sp, #5, lsl #12 ; =20480
add x19, x19, #808
Lloh321:
adrp x3, l_.str.129@PAGE
Lloh322:
add x3, x3, l_.str.129@PAGEOFF
stp x19, x8, [sp]
add x20, sp, #4, lsl #12 ; =16384
add x20, x20, #3880
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3880
mov w1, #0
mov w2, #1024
bl ___sprintf_chk
Lloh323:
adrp x1, l_.str.131@PAGE
Lloh324:
add x1, x1, l_.str.131@PAGEOFF
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3880
bl _fopen
adrp x8, _g_table_file@PAGE
str x0, [x8, _g_table_file@PAGEOFF]
cbz x0, LBB29_96
; %bb.10:
Lloh325:
adrp x8, l_.str.135@PAGE
Lloh326:
add x8, x8, l_.str.135@PAGEOFF
Lloh327:
adrp x3, l_.str.129@PAGE
Lloh328:
add x3, x3, l_.str.129@PAGEOFF
stp x19, x8, [sp]
add x19, sp, #4, lsl #12 ; =16384
add x19, x19, #3880
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3880
mov w1, #0
mov w2, #1024
bl ___sprintf_chk
Lloh329:
adrp x1, l_.str.131@PAGE
Lloh330:
add x1, x1, l_.str.131@PAGEOFF
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3880
bl _fopen
adrp x8, _g_ops_ac_file@PAGE
str x0, [x8, _g_ops_ac_file@PAGEOFF]
cbz x0, LBB29_97
; %bb.11:
Lloh331:
adrp x8, l_.str.137@PAGE
Lloh332:
add x8, x8, l_.str.137@PAGEOFF
add x19, sp, #5, lsl #12 ; =20480
add x19, x19, #808
Lloh333:
adrp x3, l_.str.129@PAGE
Lloh334:
add x3, x3, l_.str.129@PAGEOFF
stp x19, x8, [sp]
add x20, sp, #4, lsl #12 ; =16384
add x20, x20, #3880
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3880
mov w1, #0
mov w2, #1024
bl ___sprintf_chk
Lloh335:
adrp x1, l_.str.131@PAGE
Lloh336:
add x1, x1, l_.str.131@PAGEOFF
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3880
bl _fopen
adrp x8, _g_ops_dm_file@PAGE
str x0, [x8, _g_ops_dm_file@PAGEOFF]
cbz x0, LBB29_98
; %bb.12:
Lloh337:
adrp x8, l_.str.139@PAGE
Lloh338:
add x8, x8, l_.str.139@PAGEOFF
Lloh339:
adrp x3, l_.str.129@PAGE
Lloh340:
add x3, x3, l_.str.129@PAGEOFF
stp x19, x8, [sp]
add x19, sp, #4, lsl #12 ; =16384
add x19, x19, #3880
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3880
mov w1, #0
mov w2, #1024
bl ___sprintf_chk
Lloh341:
adrp x1, l_.str.131@PAGE
Lloh342:
add x1, x1, l_.str.131@PAGEOFF
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3880
bl _fopen
adrp x8, _g_ops_nz_file@PAGE
str x0, [x8, _g_ops_nz_file@PAGEOFF]
cbz x0, LBB29_99
; %bb.13:
Lloh343:
adrp x19, _g_input_filename@PAGE
Lloh344:
add x19, x19, _g_input_filename@PAGEOFF
Lloh345:
adrp x1, l_.str.141@PAGE
Lloh346:
add x1, x1, l_.str.141@PAGEOFF
mov x0, x19
bl _fopen
adrp x8, _g_input_file@PAGE
str x0, [x8, _g_input_file@PAGEOFF]
cbz x0, LBB29_100
; %bb.14:
mov x20, x0
add x27, sp, #4, lsl #12 ; =16384
add x27, x27, #3664
strb wzr, [x27]
Lloh347:
adrp x1, l_.str.112@PAGE
Lloh348:
add x1, x1, l_.str.112@PAGEOFF
add x21, sp, #4, lsl #12 ; =16384
add x21, x21, #3664
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3664
mov w2, #80
bl _memcmp
adrp x28, _g_line_number@PAGE
cbz w0, LBB29_26
; %bb.15:
add x22, x21, #1
sub x23, x21, #1
mov x24, #-4294967296
Lloh349:
adrp x19, l_.str.112@PAGE
Lloh350:
add x19, x19, l_.str.112@PAGEOFF
b LBB29_18
LBB29_16: ; in Loop: Header=BB29_18 Depth=1
strb wzr, [x27]
ldr w8, [x28, _g_line_number@PAGEOFF]
add w8, w8, #1
str w8, [x28, _g_line_number@PAGEOFF]
LBB29_17: ; in Loop: Header=BB29_18 Depth=1
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3664
mov x1, x19
mov w2, #80
bl _memcmp
Lloh351:
adrp x8, _g_input_file@PAGE
Lloh352:
ldr x20, [x8, _g_input_file@PAGEOFF]
cbz w0, LBB29_26
LBB29_18: ; =>This Loop Header: Depth=1
; Child Loop BB29_23 Depth 2
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3664
mov w1, #200
mov x2, x20
bl _fgets
cbz x0, LBB29_79
; %bb.19: ; in Loop: Header=BB29_18 Depth=1
ldrb w8, [x27]
cmp w8, #13
b.ne LBB29_21
; %bb.20: ; in Loop: Header=BB29_18 Depth=1
ldr q0, [x22]
str q0, [x27]
ldp q0, q1, [x22, #16]
ldp q2, q3, [x22, #48]
stp q2, q3, [x27, #48]
stp q0, q1, [x27, #16]
ldp q0, q1, [x22, #80]
ldp q2, q3, [x22, #112]
stp q2, q3, [x27, #112]
stp q0, q1, [x27, #80]
ldp q0, q1, [x22, #144]
ldr q2, [x22, #176]
ldur x8, [x22, #191]
stur x8, [x27, #191]
stp q1, q2, [x27, #160]
str q0, [x27, #144]
LBB29_21: ; in Loop: Header=BB29_18 Depth=1
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3664
bl _strlen
cbz w0, LBB29_16
; %bb.22: ; in Loop: Header=BB29_18 Depth=1
lsl x9, x0, #32
sxtw x8, w0
LBB29_23: ; Parent Loop BB29_18 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x23, x8]
cmp w10, #13
ccmp w10, #10, #4, ne
b.ne LBB29_25
; %bb.24: ; in Loop: Header=BB29_23 Depth=2
add x9, x9, x24
sub x8, x8, #1
cbnz w8, LBB29_23
b LBB29_16
LBB29_25: ; in Loop: Header=BB29_18 Depth=1
asr x9, x9, #32
strb wzr, [x21, x9]
ldr w9, [x28, _g_line_number@PAGEOFF]
add w9, w9, #1
str w9, [x28, _g_line_number@PAGEOFF]
tbz w8, #31, LBB29_17
b LBB29_79
LBB29_26:
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3664
mov w1, #200
mov x2, x20
bl _fgets
cbz x0, LBB29_79
; %bb.27:
stp xzr, xzr, [sp, #24] ; 16-byte Folded Spill
str wzr, [sp, #20] ; 4-byte Folded Spill
str wzr, [sp, #40] ; 4-byte Folded Spill
mov w23, #0
mov w26, #0
mov x25, #20575
movk x25, #20306, lsl #16
movk x25, #20308, lsl #32
movk x25, #22868, lsl #48
mov x24, #13901
movk x24, #19256, lsl #16
movk x24, #16717, lsl #32
movk x24, #17739, lsl #48
add x8, sp, #4, lsl #12 ; =16384
add x8, x8, #3664
orr x22, x8, #0x1
sub x21, x8, #1
mov w19, #82
mov x20, #-4294967296
b LBB29_30
LBB29_28: ; in Loop: Header=BB29_30 Depth=1
bl _process_opcode_handlers
mov w8, #1
str w8, [sp, #24] ; 4-byte Folded Spill
LBB29_29: ; in Loop: Header=BB29_30 Depth=1
Lloh353:
adrp x8, _g_input_file@PAGE
Lloh354:
ldr x2, [x8, _g_input_file@PAGEOFF]
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3664
mov w1, #200
bl _fgets
cbz x0, LBB29_79
LBB29_30: ; =>This Loop Header: Depth=1
; Child Loop BB29_34 Depth 2
ldrb w8, [x27]
cmp w8, #13
b.ne LBB29_32
; %bb.31: ; in Loop: Header=BB29_30 Depth=1
ldr q0, [x22]
str q0, [x27]
ldp q0, q1, [x22, #16]
ldp q2, q3, [x22, #48]
stp q2, q3, [x27, #48]
stp q0, q1, [x27, #16]
ldp q0, q1, [x22, #80]
ldp q2, q3, [x22, #112]
stp q2, q3, [x27, #112]
stp q0, q1, [x27, #80]
ldp q0, q1, [x22, #144]
ldr q2, [x22, #176]
ldur x8, [x22, #191]
stur x8, [x27, #191]
stp q1, q2, [x27, #160]
str q0, [x27, #144]
LBB29_32: ; in Loop: Header=BB29_30 Depth=1
add x0, sp, #4, lsl #12 ; =16384
add x0, x0, #3664
bl _strlen
cbz w0, LBB29_36
; %bb.33: ; in Loop: Header=BB29_30 Depth=1
lsl x9, x0, #32
sxtw x8, w0
LBB29_34: ; Parent Loop BB29_30 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x21, x8]
cmp w10, #13
ccmp w10, #10, #4, ne
b.ne LBB29_37
; %bb.35: ; in Loop: Header=BB29_34 Depth=2
add x9, x9, x20
sub x8, x8, #1
cbnz w8, LBB29_34
LBB29_36: ; in Loop: Header=BB29_30 Depth=1
strb wzr, [x27]
ldr w8, [x28, _g_line_number@PAGEOFF]
add w8, w8, #1
str w8, [x28, _g_line_number@PAGEOFF]
b LBB29_38
LBB29_37: ; in Loop: Header=BB29_30 Depth=1
asr x9, x9, #32
add x10, sp, #4, lsl #12 ; =16384
add x10, x10, #3664
strb wzr, [x10, x9]
ldr w9, [x28, _g_line_number@PAGEOFF]
add w9, w9, #1
str w9, [x28, _g_line_number@PAGEOFF]
tbnz w8, #31, LBB29_79
LBB29_38: ; in Loop: Header=BB29_30 Depth=1
ldp x8, x9, [x27]
eor x8, x8, x24
eor x9, x9, x25
ldr x10, [x27, #16]
mov x11, #17744
movk x11, #18527, lsl #16
movk x11, #16709, lsl #32
movk x11, #17732, lsl #48
eor x10, x10, x11
ldrh w11, [x27, #24]
eor x11, x11, x19
orr x8, x8, x9
orr x9, x10, x11
orr x8, x8, x9
cbz x8, LBB29_51
; %bb.39: ; in Loop: Header=BB29_30 Depth=1
ldp x8, x9, [x27]
eor x8, x8, x24
mov x10, #21599
movk x10, #16961, lsl #16
movk x10, #17740, lsl #32
movk x10, #18527, lsl #48
eor x9, x9, x10
ldur x10, [x27, #14]
mov x12, #18527
movk x12, #16709, lsl #16
movk x12, #17732, lsl #32
movk x12, #82, lsl #48
eor x10, x10, x12
orr x8, x8, x9
orr x8, x8, x10
cbz x8, LBB29_53
; %bb.40: ; in Loop: Header=BB29_30 Depth=1
ldp x8, x9, [x27]
eor x8, x8, x24
mov x10, #20319
movk x10, #17232, lsl #16
movk x10, #17487, lsl #32
movk x10, #24389, lsl #48
eor x9, x9, x10
ldr x10, [x27, #16]
mov x11, #16712
movk x11, #17486, lsl #16
movk x11, #17740, lsl #32
movk x11, #24402, lsl #48
eor x10, x10, x11
ldur x11, [x27, #23]
eor x11, x11, x12
orr x8, x8, x9
orr x9, x10, x11
orr x8, x8, x9
mov x12, #18015
movk x12, #20303, lsl #16
movk x12, #17748, lsl #32
movk x12, #82, lsl #48
cbz x8, LBB29_55
; %bb.41: ; in Loop: Header=BB29_30 Depth=1
ldp x8, x9, [x27]
eor x8, x8, x24
eor x9, x9, x25
ldr x10, [x27, #16]
mov x11, #17744
movk x11, #18015, lsl #16
movk x11, #20303, lsl #32
movk x11, #17748, lsl #48
eor x10, x10, x11
ldrh w11, [x27, #24]
eor x11, x11, x19
orr x8, x8, x9
orr x9, x10, x11
orr x8, x8, x9
cbz x8, LBB29_57
; %bb.42: ; in Loop: Header=BB29_30 Depth=1
ldp x8, x9, [x27]
eor x8, x8, x24
mov x10, #21599
movk x10, #16961, lsl #16
movk x10, #17740, lsl #32
movk x10, #18015, lsl #48
eor x9, x9, x10
ldur x10, [x27, #14]
eor x10, x10, x12
orr x8, x8, x9
orr x8, x8, x10
cbz x8, LBB29_59
; %bb.43: ; in Loop: Header=BB29_30 Depth=1
ldp x8, x9, [x27]
eor x8, x8, x24
mov x10, #20319
movk x10, #17232, lsl #16
movk x10, #17487, lsl #32
movk x10, #24389, lsl #48
eor x9, x9, x10
ldr x10, [x27, #16]
mov x11, #16712
movk x11, #17486, lsl #16
movk x11, #17740, lsl #32
movk x11, #24402, lsl #48
eor x10, x10, x11
ldur x11, [x27, #23]
eor x11, x11, x12
orr x8, x8, x9
orr x9, x10, x11
orr x8, x8, x9
cbz x8, LBB29_61
; %bb.44: ; in Loop: Header=BB29_30 Depth=1
ldp x8, x9, [x27]
eor x8, x8, x24
mov x10, #21599
movk x10, #16961, lsl #16
movk x10, #17740, lsl #32
movk x10, #16991, lsl #48
eor x9, x9, x10
ldr w10, [x27, #16]
mov w11, #17487
movk w11, #89, lsl #16
eor x10, x10, x11
orr x8, x8, x9
orr x8, x8, x10
cbz x8, LBB29_63
; %bb.45: ; in Loop: Header=BB29_30 Depth=1
ldp x8, x9, [x27]
eor x8, x8, x24
mov x10, #20319
movk x10, #17232, lsl #16
movk x10, #17487, lsl #32
movk x10, #24389, lsl #48
eor x9, x9, x10
ldr x10, [x27, #16]
mov x11, #16712
movk x11, #17486, lsl #16
movk x11, #17740, lsl #32
movk x11, #24402, lsl #48
eor x10, x10, x11
ldur x11, [x27, #21]
mov x12, #21061
movk x12, #16991, lsl #16
movk x12, #17487, lsl #32
movk x12, #89, lsl #48
eor x11, x11, x12
orr x8, x8, x9
orr x9, x10, x11
orr x8, x8, x9
cbnz x8, LBB29_68
; %bb.46: ; in Loop: Header=BB29_30 Depth=1
cbz w26, LBB29_88
; %bb.47: ; in Loop: Header=BB29_30 Depth=1
cbz w23, LBB29_89
; %bb.48: ; in Loop: Header=BB29_30 Depth=1
ldr w8, [sp, #40] ; 4-byte Folded Reload
cbz w8, LBB29_90
; %bb.49: ; in Loop: Header=BB29_30 Depth=1
ldr w8, [sp, #28] ; 4-byte Folded Reload
cbz w8, LBB29_91
; %bb.50: ; in Loop: Header=BB29_30 Depth=1
ldr w8, [sp, #24] ; 4-byte Folded Reload
cbz w8, LBB29_28
b LBB29_85
LBB29_51: ; in Loop: Header=BB29_30 Depth=1
cbnz w26, LBB29_80
; %bb.52: ; in Loop: Header=BB29_30 Depth=1
add x0, sp, #3, lsl #12 ; =12288
add x0, x0, #2759
bl _read_insert
Lloh355:
adrp x8, _g_prototype_file@PAGE
Lloh356:
ldr x0, [x8, _g_prototype_file@PAGEOFF]
add x8, sp, #3, lsl #12 ; =12288
add x8, x8, #2759
str x8, [sp]
Lloh357:
adrp x1, l_.str.146@PAGE
Lloh358:
add x1, x1, l_.str.146@PAGEOFF
bl _fprintf
mov w26, #1
b LBB29_29
LBB29_53: ; in Loop: Header=BB29_30 Depth=1
cbnz w23, LBB29_81
; %bb.54: ; in Loop: Header=BB29_30 Depth=1
add x0, sp, #3, lsl #12 ; =12288
add x0, x0, #2759
bl _read_insert
Lloh359:
adrp x8, _g_table_file@PAGE
Lloh360:
ldr x1, [x8, _g_table_file@PAGEOFF]
add x0, sp, #3, lsl #12 ; =12288
add x0, x0, #2759
bl _fputs
mov w23, #1
b LBB29_29
LBB29_55: ; in Loop: Header=BB29_30 Depth=1
ldr w8, [sp, #40] ; 4-byte Folded Reload
cbnz w8, LBB29_82
; %bb.56: ; in Loop: Header=BB29_30 Depth=1
add x0, sp, #3, lsl #12 ; =12288
add x0, x0, #2759
bl _read_insert
Lloh361:
adrp x8, _g_ops_ac_file@PAGE
Lloh362:
ldr x0, [x8, _g_ops_ac_file@PAGEOFF]
mov x25, x23
add x23, sp, #3, lsl #12 ; =12288
add x23, x23, #2759
str x23, [sp]
Lloh363:
adrp x19, l_.str.146@PAGE
Lloh364:
add x19, x19, l_.str.146@PAGEOFF
mov x1, x19
bl _fprintf
Lloh365:
adrp x8, _g_ops_dm_file@PAGE
Lloh366:
ldr x0, [x8, _g_ops_dm_file@PAGEOFF]
str x23, [sp]
mov x1, x19
bl _fprintf
Lloh367:
adrp x8, _g_ops_nz_file@PAGE
Lloh368:
ldr x0, [x8, _g_ops_nz_file@PAGEOFF]
str x23, [sp]
mov x23, x25
mov x25, #20575
movk x25, #20306, lsl #16
movk x25, #20308, lsl #32
movk x25, #22868, lsl #48
mov x1, x19
mov w19, #82
bl _fprintf
mov w8, #1
str w8, [sp, #40] ; 4-byte Folded Spill
b LBB29_29
LBB29_57: ; in Loop: Header=BB29_30 Depth=1
ldr w8, [sp, #36] ; 4-byte Folded Reload
cbnz w8, LBB29_83
; %bb.58: ; in Loop: Header=BB29_30 Depth=1
add x0, sp, #2, lsl #12 ; =8192
add x0, x0, #1854
bl _read_insert
mov w8, #1
str w8, [sp, #36] ; 4-byte Folded Spill
b LBB29_29
LBB29_59: ; in Loop: Header=BB29_30 Depth=1
ldr w8, [sp, #32] ; 4-byte Folded Reload
cbnz w8, LBB29_84
; %bb.60: ; in Loop: Header=BB29_30 Depth=1
add x0, sp, #1, lsl #12 ; =4096
add x0, x0, #949
bl _read_insert
mov w8, #1
str w8, [sp, #32] ; 4-byte Folded Spill
b LBB29_29
LBB29_61: ; in Loop: Header=BB29_30 Depth=1
ldr w8, [sp, #20] ; 4-byte Folded Reload
cbnz w8, LBB29_86
; %bb.62: ; in Loop: Header=BB29_30 Depth=1
add x0, sp, #44
bl _read_insert
mov w8, #1
str w8, [sp, #20] ; 4-byte Folded Spill
b LBB29_29
LBB29_63: ; in Loop: Header=BB29_30 Depth=1
cbz w26, LBB29_92
; %bb.64: ; in Loop: Header=BB29_30 Depth=1
cbz w23, LBB29_93
; %bb.65: ; in Loop: Header=BB29_30 Depth=1
ldr w8, [sp, #40] ; 4-byte Folded Reload
cbz w8, LBB29_94
; %bb.66: ; in Loop: Header=BB29_30 Depth=1
ldr w8, [sp, #28] ; 4-byte Folded Reload
cbnz w8, LBB29_87
; %bb.67: ; in Loop: Header=BB29_30 Depth=1
bl _populate_table
mov w8, #1
str w8, [sp, #28] ; 4-byte Folded Spill
b LBB29_29
LBB29_68:
ldr x8, [x27]
eor x8, x8, x24
ldur x9, [x27, #5]
mov x10, #19265
movk x10, #24389, lsl #16
movk x10, #20037, lsl #32
movk x10, #68, lsl #48
eor x9, x9, x10
orr x8, x8, x9
cbnz x8, LBB29_101
; %bb.69:
cbz w26, LBB29_102
; %bb.70:
ldr w8, [sp, #36] ; 4-byte Folded Reload
cbz w8, LBB29_103
; %bb.71:
cbz w23, LBB29_104
; %bb.72:
ldr w8, [sp, #32] ; 4-byte Folded Reload
cbz w8, LBB29_105
; %bb.73:
ldr w8, [sp, #28] ; 4-byte Folded Reload
cbz w8, LBB29_106
; %bb.74:
ldr w8, [sp, #40] ; 4-byte Folded Reload
cbz w8, LBB29_107
; %bb.75:
ldr w8, [sp, #20] ; 4-byte Folded Reload
cbz w8, LBB29_108
; %bb.76:
ldr w8, [sp, #24] ; 4-byte Folded Reload
cbz w8, LBB29_109
; %bb.77:
adrp x22, _g_table_file@PAGE
ldr x0, [x22, _g_table_file@PAGEOFF]
bl _print_opcode_output_table
adrp x21, _g_prototype_file@PAGE
ldr x0, [x21, _g_prototype_file@PAGEOFF]
add x8, sp, #2, lsl #12 ; =8192
add x8, x8, #1854
str x8, [sp]
Lloh369:
adrp x19, l_.str.146@PAGE
Lloh370:
add x19, x19, l_.str.146@PAGEOFF
mov x1, x19
bl _fprintf
ldr x0, [x22, _g_table_file@PAGEOFF]
add x8, sp, #1, lsl #12 ; =4096
add x8, x8, #949
str x8, [sp]
mov x1, x19
bl _fprintf
adrp x23, _g_ops_ac_file@PAGE
ldr x0, [x23, _g_ops_ac_file@PAGEOFF]
add x20, sp, #44
str x20, [sp]
mov x1, x19
bl _fprintf
adrp x24, _g_ops_dm_file@PAGE
ldr x0, [x24, _g_ops_dm_file@PAGEOFF]
str x20, [sp]
mov x1, x19
bl _fprintf
adrp x25, _g_ops_nz_file@PAGE
ldr x0, [x25, _g_ops_nz_file@PAGEOFF]
str x20, [sp]
mov x1, x19
bl _fprintf
ldr x0, [x21, _g_prototype_file@PAGEOFF]
bl _fclose
ldr x0, [x22, _g_table_file@PAGEOFF]
bl _fclose
ldr x0, [x23, _g_ops_ac_file@PAGEOFF]
bl _fclose
ldr x0, [x24, _g_ops_dm_file@PAGEOFF]
bl _fclose
ldr x0, [x25, _g_ops_nz_file@PAGEOFF]
bl _fclose
Lloh371:
adrp x8, _g_input_file@PAGE
Lloh372:
ldr x0, [x8, _g_input_file@PAGEOFF]
bl _fclose
Lloh373:
adrp x8, _g_num_functions@PAGE
Lloh374:
ldr w8, [x8, _g_num_functions@PAGEOFF]
Lloh375:
adrp x9, _g_num_primitives@PAGE
Lloh376:
ldr w9, [x9, _g_num_primitives@PAGEOFF]
stp x8, x9, [sp]
Lloh377:
adrp x0, l_.str.178@PAGE
Lloh378:
add x0, x0, l_.str.178@PAGEOFF
bl _printf
ldur x8, [x29, #-104]
Lloh379:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh380:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh381:
ldr x9, [x9]
cmp x9, x8
b.ne LBB29_110
; %bb.78:
mov w0, #0
add sp, sp, #5, lsl #12 ; =20480
add sp, sp, #1856
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB29_79:
Lloh382:
adrp x0, l_.str.143@PAGE
Lloh383:
add x0, x0, l_.str.143@PAGEOFF
bl _error_exit
LBB29_80:
Lloh384:
adrp x0, l_.str.145@PAGE
Lloh385:
add x0, x0, l_.str.145@PAGEOFF
bl _error_exit
LBB29_81:
Lloh386:
adrp x0, l_.str.148@PAGE
Lloh387:
add x0, x0, l_.str.148@PAGEOFF
bl _error_exit
LBB29_82:
Lloh388:
adrp x0, l_.str.150@PAGE
Lloh389:
add x0, x0, l_.str.150@PAGEOFF
bl _error_exit
LBB29_83:
Lloh390:
adrp x0, l_.str.152@PAGE
Lloh391:
add x0, x0, l_.str.152@PAGEOFF
bl _error_exit
LBB29_84:
Lloh392:
adrp x0, l_.str.154@PAGE
Lloh393:
add x0, x0, l_.str.154@PAGEOFF
bl _error_exit
LBB29_85:
Lloh394:
adrp x0, l_.str.167@PAGE
Lloh395:
add x0, x0, l_.str.167@PAGEOFF
bl _error_exit
LBB29_86:
Lloh396:
adrp x0, l_.str.156@PAGE
Lloh397:
add x0, x0, l_.str.156@PAGEOFF
bl _error_exit
LBB29_87:
Lloh398:
adrp x0, l_.str.161@PAGE
Lloh399:
add x0, x0, l_.str.161@PAGEOFF
bl _error_exit
LBB29_88:
Lloh400:
adrp x0, l_.str.163@PAGE
Lloh401:
add x0, x0, l_.str.163@PAGEOFF
bl _error_exit
LBB29_89:
Lloh402:
adrp x0, l_.str.164@PAGE
Lloh403:
add x0, x0, l_.str.164@PAGEOFF
bl _error_exit
LBB29_90:
Lloh404:
adrp x0, l_.str.165@PAGE
Lloh405:
add x0, x0, l_.str.165@PAGEOFF
bl _error_exit
LBB29_91:
Lloh406:
adrp x0, l_.str.166@PAGE
Lloh407:
add x0, x0, l_.str.166@PAGEOFF
bl _error_exit
LBB29_92:
Lloh408:
adrp x0, l_.str.158@PAGE
Lloh409:
add x0, x0, l_.str.158@PAGEOFF
bl _error_exit
LBB29_93:
Lloh410:
adrp x0, l_.str.159@PAGE
Lloh411:
add x0, x0, l_.str.159@PAGEOFF
bl _error_exit
LBB29_94:
Lloh412:
adrp x0, l_.str.160@PAGE
Lloh413:
add x0, x0, l_.str.160@PAGEOFF
bl _error_exit
LBB29_95:
str x19, [sp]
Lloh414:
adrp x0, l_.str.132@PAGE
Lloh415:
add x0, x0, l_.str.132@PAGEOFF
bl _perror_exit
LBB29_96:
str x20, [sp]
Lloh416:
adrp x0, l_.str.134@PAGE
Lloh417:
add x0, x0, l_.str.134@PAGEOFF
bl _perror_exit
LBB29_97:
str x19, [sp]
Lloh418:
adrp x0, l_.str.136@PAGE
Lloh419:
add x0, x0, l_.str.136@PAGEOFF
bl _perror_exit
LBB29_98:
str x20, [sp]
Lloh420:
adrp x0, l_.str.138@PAGE
Lloh421:
add x0, x0, l_.str.138@PAGEOFF
bl _perror_exit
LBB29_99:
str x19, [sp]
Lloh422:
adrp x0, l_.str.140@PAGE
Lloh423:
add x0, x0, l_.str.140@PAGEOFF
bl _perror_exit
LBB29_100:
str x19, [sp]
Lloh424:
adrp x0, l_.str.142@PAGE
Lloh425:
add x0, x0, l_.str.142@PAGEOFF
bl _perror_exit
LBB29_101:
add x8, sp, #4, lsl #12 ; =16384
add x8, x8, #3664
str x8, [sp]
Lloh426:
adrp x0, l_.str.177@PAGE
Lloh427:
add x0, x0, l_.str.177@PAGEOFF
bl _error_exit
LBB29_102:
Lloh428:
adrp x0, l_.str.169@PAGE
Lloh429:
add x0, x0, l_.str.169@PAGEOFF
bl _error_exit
LBB29_103:
Lloh430:
adrp x0, l_.str.170@PAGE
Lloh431:
add x0, x0, l_.str.170@PAGEOFF
bl _error_exit
LBB29_104:
Lloh432:
adrp x0, l_.str.171@PAGE
Lloh433:
add x0, x0, l_.str.171@PAGEOFF
bl _error_exit
LBB29_105:
Lloh434:
adrp x0, l_.str.172@PAGE
Lloh435:
add x0, x0, l_.str.172@PAGEOFF
bl _error_exit
LBB29_106:
Lloh436:
adrp x0, l_.str.173@PAGE
Lloh437:
add x0, x0, l_.str.173@PAGEOFF
bl _error_exit
LBB29_107:
Lloh438:
adrp x0, l_.str.174@PAGE
Lloh439:
add x0, x0, l_.str.174@PAGEOFF
bl _error_exit
LBB29_108:
Lloh440:
adrp x0, l_.str.175@PAGE
Lloh441:
add x0, x0, l_.str.175@PAGEOFF
bl _error_exit
LBB29_109:
Lloh442:
adrp x0, l_.str.176@PAGE
Lloh443:
add x0, x0, l_.str.176@PAGEOFF
bl _error_exit
LBB29_110:
bl ___stack_chk_fail
.loh AdrpAdd Lloh307, Lloh308
.loh AdrpAdd Lloh305, Lloh306
.loh AdrpLdr Lloh303, Lloh304
.loh AdrpLdrGotLdr Lloh300, Lloh301, Lloh302
.loh AdrpLdrGot Lloh298, Lloh299
.loh AdrpAdd Lloh309, Lloh310
.loh AdrpAdd Lloh311, Lloh312
.loh AdrpAdd Lloh317, Lloh318
.loh AdrpAdd Lloh315, Lloh316
.loh AdrpAdd Lloh313, Lloh314
.loh AdrpAdd Lloh323, Lloh324
.loh AdrpAdd Lloh321, Lloh322
.loh AdrpAdd Lloh319, Lloh320
.loh AdrpAdd Lloh329, Lloh330
.loh AdrpAdd Lloh327, Lloh328
.loh AdrpAdd Lloh325, Lloh326
.loh AdrpAdd Lloh335, Lloh336
.loh AdrpAdd Lloh333, Lloh334
.loh AdrpAdd Lloh331, Lloh332
.loh AdrpAdd Lloh341, Lloh342
.loh AdrpAdd Lloh339, Lloh340
.loh AdrpAdd Lloh337, Lloh338
.loh AdrpAdd Lloh345, Lloh346
.loh AdrpAdd Lloh343, Lloh344
.loh AdrpAdd Lloh347, Lloh348
.loh AdrpAdd Lloh349, Lloh350
.loh AdrpLdr Lloh351, Lloh352
.loh AdrpLdr Lloh353, Lloh354
.loh AdrpAdd Lloh357, Lloh358
.loh AdrpLdr Lloh355, Lloh356
.loh AdrpLdr Lloh359, Lloh360
.loh AdrpLdr Lloh367, Lloh368
.loh AdrpLdr Lloh365, Lloh366
.loh AdrpAdd Lloh363, Lloh364
.loh AdrpLdr Lloh361, Lloh362
.loh AdrpLdrGotLdr Lloh379, Lloh380, Lloh381
.loh AdrpAdd Lloh377, Lloh378
.loh AdrpLdr Lloh375, Lloh376
.loh AdrpLdr Lloh373, Lloh374
.loh AdrpLdr Lloh371, Lloh372
.loh AdrpAdd Lloh369, Lloh370
.loh AdrpAdd Lloh382, Lloh383
.loh AdrpAdd Lloh384, Lloh385
.loh AdrpAdd Lloh386, Lloh387
.loh AdrpAdd Lloh388, Lloh389
.loh AdrpAdd Lloh390, Lloh391
.loh AdrpAdd Lloh392, Lloh393
.loh AdrpAdd Lloh394, Lloh395
.loh AdrpAdd Lloh396, Lloh397
.loh AdrpAdd Lloh398, Lloh399
.loh AdrpAdd Lloh400, Lloh401
.loh AdrpAdd Lloh402, Lloh403
.loh AdrpAdd Lloh404, Lloh405
.loh AdrpAdd Lloh406, Lloh407
.loh AdrpAdd Lloh408, Lloh409
.loh AdrpAdd Lloh410, Lloh411
.loh AdrpAdd Lloh412, Lloh413
.loh AdrpAdd Lloh414, Lloh415
.loh AdrpAdd Lloh416, Lloh417
.loh AdrpAdd Lloh418, Lloh419
.loh AdrpAdd Lloh420, Lloh421
.loh AdrpAdd Lloh422, Lloh423
.loh AdrpAdd Lloh424, Lloh425
.loh AdrpAdd Lloh426, Lloh427
.loh AdrpAdd Lloh428, Lloh429
.loh AdrpAdd Lloh430, Lloh431
.loh AdrpAdd Lloh432, Lloh433
.loh AdrpAdd Lloh434, Lloh435
.loh AdrpAdd Lloh436, Lloh437
.loh AdrpAdd Lloh438, Lloh439
.loh AdrpAdd Lloh440, Lloh441
.loh AdrpAdd Lloh442, Lloh443
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "3.3"
.section __DATA,__data
.globl _g_version ; @g_version
.p2align 3
_g_version:
.quad l_.str
.globl _g_input_filename ; @g_input_filename
_g_input_filename:
.asciz "m68k_in.c\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
.globl _g_input_file ; @g_input_file
.zerofill __DATA,__common,_g_input_file,8,3
.globl _g_prototype_file ; @g_prototype_file
.zerofill __DATA,__common,_g_prototype_file,8,3
.globl _g_table_file ; @g_table_file
.zerofill __DATA,__common,_g_table_file,8,3
.globl _g_ops_ac_file ; @g_ops_ac_file
.zerofill __DATA,__common,_g_ops_ac_file,8,3
.globl _g_ops_dm_file ; @g_ops_dm_file
.zerofill __DATA,__common,_g_ops_dm_file,8,3
.globl _g_ops_nz_file ; @g_ops_nz_file
.zerofill __DATA,__common,_g_ops_nz_file,8,3
.globl _g_num_functions ; @g_num_functions
.zerofill __DATA,__common,_g_num_functions,4,2
.globl _g_num_primitives ; @g_num_primitives
.zerofill __DATA,__common,_g_num_primitives,4,2
.globl _g_line_number ; @g_line_number
.p2align 2
_g_line_number:
.long 1 ; 0x1
.globl _g_opcode_output_table_length ; @g_opcode_output_table_length
.zerofill __DATA,__common,_g_opcode_output_table_length,4,2
.section __TEXT,__cstring,cstring_literals
l_.str.1: ; @.str.1
.space 1
l_.str.2: ; @.str.2
.asciz "ai"
l_.str.3: ; @.str.3
.asciz "AY_AI"
l_.str.4: ; @.str.4
.asciz "pi"
l_.str.5: ; @.str.5
.asciz "AY_PI"
l_.str.6: ; @.str.6
.asciz "pi7"
l_.str.7: ; @.str.7
.asciz "A7_PI"
l_.str.8: ; @.str.8
.asciz "pd"
l_.str.9: ; @.str.9
.asciz "AY_PD"
l_.str.10: ; @.str.10
.asciz "pd7"
l_.str.11: ; @.str.11
.asciz "A7_PD"
l_.str.12: ; @.str.12
.asciz "di"
l_.str.13: ; @.str.13
.asciz "AY_DI"
l_.str.14: ; @.str.14
.asciz "ix"
l_.str.15: ; @.str.15
.asciz "AY_IX"
l_.str.16: ; @.str.16
.asciz "aw"
l_.str.17: ; @.str.17
.asciz "AW"
l_.str.18: ; @.str.18
.asciz "al"
l_.str.19: ; @.str.19
.asciz "AL"
l_.str.20: ; @.str.20
.asciz "pcdi"
l_.str.21: ; @.str.21
.asciz "PCDI"
l_.str.22: ; @.str.22
.asciz "pcix"
l_.str.23: ; @.str.23
.asciz "PCIX"
l_.str.24: ; @.str.24
.asciz "i"
l_.str.25: ; @.str.25
.asciz "I"
.section __DATA,__data
.globl _g_ea_info_table ; @g_ea_info_table
.p2align 3
_g_ea_info_table:
.quad l_.str.1
.quad l_.str.1
.long 0 ; 0x0
.long 0 ; 0x0
.quad l_.str.2
.quad l_.str.3
.long 56 ; 0x38
.long 16 ; 0x10
.quad l_.str.4
.quad l_.str.5
.long 56 ; 0x38
.long 24 ; 0x18
.quad l_.str.6
.quad l_.str.7
.long 63 ; 0x3f
.long 31 ; 0x1f
.quad l_.str.8
.quad l_.str.9
.long 56 ; 0x38
.long 32 ; 0x20
.quad l_.str.10
.quad l_.str.11
.long 63 ; 0x3f
.long 39 ; 0x27
.quad l_.str.12
.quad l_.str.13
.long 56 ; 0x38
.long 40 ; 0x28
.quad l_.str.14
.quad l_.str.15
.long 56 ; 0x38
.long 48 ; 0x30
.quad l_.str.16
.quad l_.str.17
.long 63 ; 0x3f
.long 56 ; 0x38
.quad l_.str.18
.quad l_.str.19
.long 63 ; 0x3f
.long 57 ; 0x39
.quad l_.str.20
.quad l_.str.21
.long 63 ; 0x3f
.long 58 ; 0x3a
.quad l_.str.22
.quad l_.str.23
.long 63 ; 0x3f
.long 59 ; 0x3b
.quad l_.str.24
.quad l_.str.25
.long 63 ; 0x3f
.long 60 ; 0x3c
.section __TEXT,__cstring,cstring_literals
l_.str.26: ; @.str.26
.asciz "t"
l_.str.27: ; @.str.27
.asciz "T"
l_.str.28: ; @.str.28
.asciz "f"
l_.str.29: ; @.str.29
.asciz "F"
l_.str.30: ; @.str.30
.asciz "hi"
l_.str.31: ; @.str.31
.asciz "HI"
l_.str.32: ; @.str.32
.asciz "ls"
l_.str.33: ; @.str.33
.asciz "LS"
l_.str.34: ; @.str.34
.asciz "cc"
l_.str.35: ; @.str.35
.asciz "CC"
l_.str.36: ; @.str.36
.asciz "cs"
l_.str.37: ; @.str.37
.asciz "CS"
l_.str.38: ; @.str.38
.asciz "ne"
l_.str.39: ; @.str.39
.asciz "NE"
l_.str.40: ; @.str.40
.asciz "eq"
l_.str.41: ; @.str.41
.asciz "EQ"
l_.str.42: ; @.str.42
.asciz "vc"
l_.str.43: ; @.str.43
.asciz "VC"
l_.str.44: ; @.str.44
.asciz "vs"
l_.str.45: ; @.str.45
.asciz "VS"
l_.str.46: ; @.str.46
.asciz "pl"
l_.str.47: ; @.str.47
.asciz "PL"
l_.str.48: ; @.str.48
.asciz "mi"
l_.str.49: ; @.str.49
.asciz "MI"
l_.str.50: ; @.str.50
.asciz "ge"
l_.str.51: ; @.str.51
.asciz "GE"
l_.str.52: ; @.str.52
.asciz "lt"
l_.str.53: ; @.str.53
.asciz "LT"
l_.str.54: ; @.str.54
.asciz "gt"
l_.str.55: ; @.str.55
.asciz "GT"
l_.str.56: ; @.str.56
.asciz "le"
l_.str.57: ; @.str.57
.asciz "LE"
.section __DATA,__data
.globl _g_cc_table ; @g_cc_table
.p2align 3
_g_cc_table:
.quad l_.str.26
.quad l_.str.27
.quad l_.str.28
.quad l_.str.29
.quad l_.str.30
.quad l_.str.31
.quad l_.str.32
.quad l_.str.33
.quad l_.str.34
.quad l_.str.35
.quad l_.str.36
.quad l_.str.37
.quad l_.str.38
.quad l_.str.39
.quad l_.str.40
.quad l_.str.41
.quad l_.str.42
.quad l_.str.43
.quad l_.str.44
.quad l_.str.45
.quad l_.str.46
.quad l_.str.47
.quad l_.str.48
.quad l_.str.49
.quad l_.str.50
.quad l_.str.51
.quad l_.str.52
.quad l_.str.53
.quad l_.str.54
.quad l_.str.55
.quad l_.str.56
.quad l_.str.57
.globl _g_size_select_table ; @g_size_select_table
.p2align 2
_g_size_select_table:
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 1 ; 0x1
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 1 ; 0x1
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 2 ; 0x2
.globl _g_ea_cycle_table ; @g_ea_cycle_table
.p2align 2
_g_ea_cycle_table:
.space 36
.long 0 ; 0x0
.long 4 ; 0x4
.long 8 ; 0x8
.long 0 ; 0x0
.long 4 ; 0x4
.long 8 ; 0x8
.long 0 ; 0x0
.long 4 ; 0x4
.long 4 ; 0x4
.long 0 ; 0x0
.long 4 ; 0x4
.long 8 ; 0x8
.long 0 ; 0x0
.long 4 ; 0x4
.long 8 ; 0x8
.long 0 ; 0x0
.long 4 ; 0x4
.long 4 ; 0x4
.long 0 ; 0x0
.long 4 ; 0x4
.long 8 ; 0x8
.long 0 ; 0x0
.long 4 ; 0x4
.long 8 ; 0x8
.long 0 ; 0x0
.long 4 ; 0x4
.long 4 ; 0x4
.long 0 ; 0x0
.long 6 ; 0x6
.long 10 ; 0xa
.long 0 ; 0x0
.long 6 ; 0x6
.long 10 ; 0xa
.long 0 ; 0x0
.long 5 ; 0x5
.long 5 ; 0x5
.long 0 ; 0x0
.long 6 ; 0x6
.long 10 ; 0xa
.long 0 ; 0x0
.long 6 ; 0x6
.long 10 ; 0xa
.long 0 ; 0x0
.long 5 ; 0x5
.long 5 ; 0x5
.long 0 ; 0x0
.long 8 ; 0x8
.long 12 ; 0xc
.long 0 ; 0x0
.long 8 ; 0x8
.long 12 ; 0xc
.long 0 ; 0x0
.long 5 ; 0x5
.long 5 ; 0x5
.long 0 ; 0x0
.long 10 ; 0xa
.long 14 ; 0xe
.long 0 ; 0x0
.long 10 ; 0xa
.long 14 ; 0xe
.long 0 ; 0x0
.long 7 ; 0x7
.long 7 ; 0x7
.long 0 ; 0x0
.long 8 ; 0x8
.long 12 ; 0xc
.long 0 ; 0x0
.long 8 ; 0x8
.long 12 ; 0xc
.long 0 ; 0x0
.long 4 ; 0x4
.long 4 ; 0x4
.long 0 ; 0x0
.long 12 ; 0xc
.long 16 ; 0x10
.long 0 ; 0x0
.long 12 ; 0xc
.long 16 ; 0x10
.long 0 ; 0x0
.long 4 ; 0x4
.long 4 ; 0x4
.long 0 ; 0x0
.long 8 ; 0x8
.long 12 ; 0xc
.long 0 ; 0x0
.long 8 ; 0x8
.long 12 ; 0xc
.long 0 ; 0x0
.long 5 ; 0x5
.long 5 ; 0x5
.long 0 ; 0x0
.long 10 ; 0xa
.long 14 ; 0xe
.long 0 ; 0x0
.long 10 ; 0xa
.long 14 ; 0xe
.long 0 ; 0x0
.long 7 ; 0x7
.long 7 ; 0x7
.long 0 ; 0x0
.long 4 ; 0x4
.long 8 ; 0x8
.long 0 ; 0x0
.long 4 ; 0x4
.long 8 ; 0x8
.long 0 ; 0x0
.long 2 ; 0x2
.long 4 ; 0x4
.globl _g_jmp_cycle_table ; @g_jmp_cycle_table
.p2align 2
_g_jmp_cycle_table:
.long 0 ; 0x0
.long 4 ; 0x4
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 6 ; 0x6
.long 10 ; 0xa
.long 6 ; 0x6
.long 8 ; 0x8
.long 6 ; 0x6
.long 10 ; 0xa
.long 0 ; 0x0
.globl _g_jsr_cycle_table ; @g_jsr_cycle_table
.p2align 2
_g_jsr_cycle_table:
.long 0 ; 0x0
.long 4 ; 0x4
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 6 ; 0x6
.long 10 ; 0xa
.long 6 ; 0x6
.long 8 ; 0x8
.long 6 ; 0x6
.long 10 ; 0xa
.long 0 ; 0x0
.globl _g_lea_cycle_table ; @g_lea_cycle_table
.p2align 2
_g_lea_cycle_table:
.long 0 ; 0x0
.long 4 ; 0x4
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 8 ; 0x8
.long 12 ; 0xc
.long 8 ; 0x8
.long 12 ; 0xc
.long 8 ; 0x8
.long 12 ; 0xc
.long 0 ; 0x0
.globl _g_pea_cycle_table ; @g_pea_cycle_table
.p2align 2
_g_pea_cycle_table:
.long 0 ; 0x0
.long 6 ; 0x6
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 10 ; 0xa
.long 14 ; 0xe
.long 10 ; 0xa
.long 14 ; 0xe
.long 10 ; 0xa
.long 14 ; 0xe
.long 0 ; 0x0
.globl _g_moves_cycle_table ; @g_moves_cycle_table
.p2align 2
_g_moves_cycle_table:
.space 12
.long 0 ; 0x0
.long 4 ; 0x4
.long 6 ; 0x6
.long 0 ; 0x0
.long 4 ; 0x4
.long 6 ; 0x6
.long 0 ; 0x0
.long 4 ; 0x4
.long 6 ; 0x6
.long 0 ; 0x0
.long 6 ; 0x6
.long 12 ; 0xc
.long 0 ; 0x0
.long 6 ; 0x6
.long 12 ; 0xc
.long 0 ; 0x0
.long 12 ; 0xc
.long 16 ; 0x10
.long 0 ; 0x0
.long 16 ; 0x10
.long 20 ; 0x14
.long 0 ; 0x0
.long 12 ; 0xc
.long 16 ; 0x10
.long 0 ; 0x0
.long 16 ; 0x10
.long 20 ; 0x14
.space 12
.space 12
.space 12
.globl _g_clr_cycle_table ; @g_clr_cycle_table
.p2align 2
_g_clr_cycle_table:
.space 12
.long 0 ; 0x0
.long 4 ; 0x4
.long 6 ; 0x6
.long 0 ; 0x0
.long 4 ; 0x4
.long 6 ; 0x6
.long 0 ; 0x0
.long 4 ; 0x4
.long 6 ; 0x6
.long 0 ; 0x0
.long 6 ; 0x6
.long 8 ; 0x8
.long 0 ; 0x0
.long 6 ; 0x6
.long 8 ; 0x8
.long 0 ; 0x0
.long 8 ; 0x8
.long 10 ; 0xa
.long 0 ; 0x0
.long 10 ; 0xa
.long 14 ; 0xe
.long 0 ; 0x0
.long 8 ; 0x8
.long 10 ; 0xa
.long 0 ; 0x0
.long 10 ; 0xa
.long 14 ; 0xe
.space 12
.space 12
.space 12
.section __TEXT,__cstring,cstring_literals
l_.str.58: ; @.str.58
.asciz "In %s, near or on line %d:\n\t"
l_.str.60: ; @.str.60
.asciz "Field too long"
l_.str.61: ; @.str.61
.asciz "Malformed integer value (%c)"
l_.str.62: ; @.str.62
.asciz "moves"
l_.str.63: ; @.str.63
.asciz "clr"
l_.str.64: ; @.str.64
.asciz "add"
l_.str.65: ; @.str.65
.asciz "er"
l_.str.66: ; @.str.66
.asciz "adda"
l_.str.67: ; @.str.67
.asciz "and"
l_.str.68: ; @.str.68
.asciz "or"
l_.str.69: ; @.str.69
.asciz "sub"
l_.str.70: ; @.str.70
.asciz "suba"
l_.str.71: ; @.str.71
.asciz "jmp"
l_.str.72: ; @.str.72
.asciz "jsr"
l_.str.73: ; @.str.73
.asciz "lea"
l_.str.74: ; @.str.74
.asciz "pea"
.comm _g_opcode_input_table,68000,1 ; @g_opcode_input_table
l_.str.75: ; @.str.75
.asciz "illegal"
l_.str.76: ; @.str.76
.asciz "M68KMAKE_OP"
l_.str.78: ; @.str.78
.asciz "overflow in replace structure"
l_.str.79: ; @.str.79
.asciz "M68KMAKE"
l_.str.80: ; @.str.80
.asciz "Unknown M68KMAKE directive"
l_.str.81: ; @.str.81
.asciz "%s\n"
l_.str.82: ; @.str.82
.asciz "\n\n"
l_.str.83: ; @.str.83
.asciz "m68k_op_%s"
l_.str.84: ; @.str.84
.asciz "_%d"
l_.str.85: ; @.str.85
.asciz "."
l_.str.86: ; @.str.86
.asciz "_%s"
l_.str.87: ; @.str.87
.asciz "void %s(void);\n"
l_.str.88: ; @.str.88
.asciz "void %s(void)\n"
l_.str.89: ; @.str.89
.asciz "Opcode output table overflow"
.comm _g_opcode_output_table,204000,1 ; @g_opcode_output_table
l_.str.90: ; @.str.90
.asciz "\t{%-28s, 0x%04x, 0x%04x, {"
l_.str.91: ; @.str.91
.asciz "%3d"
l_.str.92: ; @.str.92
.asciz ", "
l_.str.93: ; @.str.93
.asciz "}},\n"
l_.str.94: ; @.str.94
.asciz "%s"
l_.str.95: ; @.str.95
.asciz "EA_%s_8()"
l_.str.96: ; @.str.96
.asciz "M68KMAKE_GET_EA_AY_8"
l_.str.97: ; @.str.97
.asciz "EA_%s_16()"
l_.str.98: ; @.str.98
.asciz "M68KMAKE_GET_EA_AY_16"
l_.str.99: ; @.str.99
.asciz "EA_%s_32()"
l_.str.100: ; @.str.100
.asciz "M68KMAKE_GET_EA_AY_32"
l_.str.101: ; @.str.101
.asciz "OPER_%s_8()"
l_.str.102: ; @.str.102
.asciz "M68KMAKE_GET_OPER_AY_8"
l_.str.103: ; @.str.103
.asciz "OPER_%s_16()"
l_.str.104: ; @.str.104
.asciz "M68KMAKE_GET_OPER_AY_16"
l_.str.105: ; @.str.105
.asciz "OPER_%s_32()"
l_.str.106: ; @.str.106
.asciz "M68KMAKE_GET_OPER_AY_32"
l_.str.107: ; @.str.107
.asciz ".........."
l_.str.108: ; @.str.108
.asciz "COND_%s()"
l_.str.109: ; @.str.109
.asciz "COND_NOT_%s()"
l_.str.110: ; @.str.110
.asciz "M68KMAKE_CC"
l_.str.111: ; @.str.111
.asciz "M68KMAKE_NOT_CC"
l_.str.112: ; @.str.112
.asciz "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
l_.str.113: ; @.str.113
.asciz "Premature end of file when getting function name"
l_.str.114: ; @.str.114
.asciz "Function too long"
l_.str.115: ; @.str.115
.asciz "Premature end of file when getting function body"
l_.str.116: ; @.str.116
.asciz "Invalid M68KMAKE_OP format"
l_.str.118: ; @.str.118
.asciz "bcc"
l_.str.119: ; @.str.119
.asciz "scc"
l_.str.120: ; @.str.120
.asciz "dbcc"
l_.str.121: ; @.str.121
.asciz "trapcc"
l_.str.122: ; @.str.122
.asciz "M68KMAKE_TABLE_START"
l_.str.123: ; @.str.123
.asciz "Premature EOF while reading table"
l_.str.124: ; @.str.124
.asciz "Buffer overflow reading inserts"
l_.str.125: ; @.str.125
.asciz "Premature EOF while reading inserts"
l_.str.126: ; @.str.126
.asciz "\n\t\tMusashi v%s 68000, 68010, 68EC020, 68020 emulator\n"
l_.str.128: ; @.str.128
.asciz "/"
l_.str.129: ; @.str.129
.asciz "%s%s"
l_.str.130: ; @.str.130
.asciz "m68kops.h"
l_.str.131: ; @.str.131
.asciz "wt"
l_.str.132: ; @.str.132
.asciz "Unable to create prototype file (%s)\n"
l_.str.133: ; @.str.133
.asciz "m68kops.c"
l_.str.134: ; @.str.134
.asciz "Unable to create table file (%s)\n"
l_.str.135: ; @.str.135
.asciz "m68kopac.c"
l_.str.136: ; @.str.136
.asciz "Unable to create ops ac file (%s)\n"
l_.str.137: ; @.str.137
.asciz "m68kopdm.c"
l_.str.138: ; @.str.138
.asciz "Unable to create ops dm file (%s)\n"
l_.str.139: ; @.str.139
.asciz "m68kopnz.c"
l_.str.140: ; @.str.140
.asciz "Unable to create ops nz file (%s)\n"
l_.str.141: ; @.str.141
.asciz "rt"
l_.str.142: ; @.str.142
.asciz "can't open %s for input"
l_.str.143: ; @.str.143
.asciz "Premature EOF while reading input file"
l_.str.144: ; @.str.144
.asciz "M68KMAKE_PROTOTYPE_HEADER"
l_.str.145: ; @.str.145
.asciz "Duplicate prototype header"
l_.str.146: ; @.str.146
.asciz "%s\n\n"
l_.str.147: ; @.str.147
.asciz "M68KMAKE_TABLE_HEADER"
l_.str.148: ; @.str.148
.asciz "Duplicate table header"
l_.str.149: ; @.str.149
.asciz "M68KMAKE_OPCODE_HANDLER_HEADER"
l_.str.150: ; @.str.150
.asciz "Duplicate opcode handler header"
l_.str.151: ; @.str.151
.asciz "M68KMAKE_PROTOTYPE_FOOTER"
l_.str.152: ; @.str.152
.asciz "Duplicate prototype footer"
l_.str.153: ; @.str.153
.asciz "M68KMAKE_TABLE_FOOTER"
l_.str.154: ; @.str.154
.asciz "Duplicate table footer"
l_.str.155: ; @.str.155
.asciz "M68KMAKE_OPCODE_HANDLER_FOOTER"
l_.str.156: ; @.str.156
.asciz "Duplicate opcode handler footer"
l_.str.157: ; @.str.157
.asciz "M68KMAKE_TABLE_BODY"
l_.str.158: ; @.str.158
.asciz "Table body encountered before prototype header"
l_.str.159: ; @.str.159
.asciz "Table body encountered before table header"
l_.str.160: ; @.str.160
.asciz "Table body encountered before opcode handler header"
l_.str.161: ; @.str.161
.asciz "Duplicate table body"
l_.str.162: ; @.str.162
.asciz "M68KMAKE_OPCODE_HANDLER_BODY"
l_.str.163: ; @.str.163
.asciz "Opcode handlers encountered before prototype header"
l_.str.164: ; @.str.164
.asciz "Opcode handlers encountered before table header"
l_.str.165: ; @.str.165
.asciz "Opcode handlers encountered before opcode handler header"
l_.str.166: ; @.str.166
.asciz "Opcode handlers encountered before table body"
l_.str.167: ; @.str.167
.asciz "Duplicate opcode handler section"
l_.str.168: ; @.str.168
.asciz "M68KMAKE_END"
l_.str.169: ; @.str.169
.asciz "Missing prototype header"
l_.str.170: ; @.str.170
.asciz "Missing prototype footer"
l_.str.171: ; @.str.171
.asciz "Missing table header"
l_.str.172: ; @.str.172
.asciz "Missing table footer"
l_.str.173: ; @.str.173
.asciz "Missing table body"
l_.str.174: ; @.str.174
.asciz "Missing opcode handler header"
l_.str.175: ; @.str.175
.asciz "Missing opcode handler footer"
l_.str.176: ; @.str.176
.asciz "Missing opcode handler body"
l_.str.177: ; @.str.177
.asciz "Unknown section identifier: %s"
l_.str.178: ; @.str.178
.asciz "Generated %d opcode handlers from %d primitives\n"
l_str: ; @str
.asciz "\t\tCopyright 1998-2000 Karl Stenerud (karl@mame.net)\n"
.subsections_via_symbols
| the_stack_data/15465.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function test_uint64_compare_func
_test_uint64_compare_func: ## @test_uint64_compare_func
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movl $256, -24(%rbp) ## imm = 0x100
movl $257, -20(%rbp) ## imm = 0x101
leaq -24(%rbp), %rbx
movq %rbx, %rdi
movq %rbx, %rsi
callq _uint64_compare_func
xorl %edi, %edi
testl %eax, %eax
sete %dil
callq _assert_se
leaq -20(%rbp), %r14
movq %rbx, %rdi
movq %r14, %rsi
callq _uint64_compare_func
xorl %edi, %edi
cmpl $-1, %eax
sete %dil
callq _assert_se
movq %r14, %rdi
movq %rbx, %rsi
callq _uint64_compare_func
xorl %edi, %edi
cmpl $1, %eax
sete %dil
callq _assert_se
addq $16, %rsp
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _test_uint64_compare_func
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function test_uint64_compare_func
_test_uint64_compare_func: ; @test_uint64_compare_func
.cfi_startproc
; %bb.0:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
mov w8, #256
stur w8, [x29, #-4]
mov w8, #257
str w8, [sp, #8]
sub x0, x29, #4
sub x1, x29, #4
bl _uint64_compare_func
cmp w0, #0
cset w0, eq
bl _assert_se
sub x0, x29, #4
add x1, sp, #8
bl _uint64_compare_func
cmn w0, #1
cset w0, eq
bl _assert_se
add x0, sp, #8
sub x1, x29, #4
bl _uint64_compare_func
cmp w0, #1
cset w0, eq
bl _assert_se
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.cfi_endproc
; -- End function
.no_dead_strip _test_uint64_compare_func
.subsections_via_symbols
| AnghaBench/systemd/src/test/extr_test-hashmap.c_test_uint64_compare_func.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _secure_send_create_rpc_query ## -- Begin function secure_send_create_rpc_query
.p2align 4, 0x90
_secure_send_create_rpc_query: ## @secure_send_create_rpc_query
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %esi, %esi
popq %rbp
jmp __secure_send_create_rpc_query ## TAILCALL
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _secure_send_create_rpc_query ; -- Begin function secure_send_create_rpc_query
.p2align 2
_secure_send_create_rpc_query: ; @secure_send_create_rpc_query
.cfi_startproc
; %bb.0:
mov w1, #0
b __secure_send_create_rpc_query
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/kphp-kdb/rpc-proxy/extr_rpc-proxy-secure-send.c_secure_send_create_rpc_query.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function slic_handle_frame_error
_slic_handle_frame_error: ## @slic_handle_frame_error
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r14
movq (%rdi), %rax
addq $8, %r14
movq _SLIC_MODEL_OASIS@GOTPCREL(%rip), %rcx
movq (%rsi), %rbx
cmpq (%rcx), %rax
jne LBB0_29
## %bb.1:
movl 4(%rbx), %edi
callq _le32_to_cpu
movl %eax, %r15d
movl (%rbx), %edi
callq _le32_to_cpu
movl %eax, %ebx
movq _SLIC_VRHSTATB_TPCSUM@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_3
## %bb.2:
movq _rx_tpcsum@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_3:
movq _SLIC_VRHSTAT_TPOFLO@GOTPCREL(%rip), %rax
testl %r15d, (%rax)
je LBB0_5
## %bb.4:
movq _rx_tpoflow@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_5:
movq _SLIC_VRHSTATB_TPHLEN@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_7
## %bb.6:
movq _rx_tphlen@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_7:
movq _SLIC_VRHSTATB_IPCSUM@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_9
## %bb.8:
movq _rx_ipcsum@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_9:
movq _SLIC_VRHSTATB_IPLERR@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_11
## %bb.10:
movq _rx_iplen@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_11:
movq _SLIC_VRHSTATB_IPHERR@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_13
## %bb.12:
movq _rx_iphlen@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_13:
movq _SLIC_VRHSTATB_RCVE@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_15
## %bb.14:
movq _rx_early@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_15:
movq _SLIC_VRHSTATB_BUFF@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_17
## %bb.16:
movq _rx_buffoflow@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_17:
movq _SLIC_VRHSTATB_CODE@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_19
## %bb.18:
movq _rx_lcode@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_19:
movq _SLIC_VRHSTATB_DRBL@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_21
## %bb.20:
movq _rx_drbl@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_21:
movq _SLIC_VRHSTATB_CRC@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_23
## %bb.22:
movq _rx_crc@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_23:
movq _SLIC_VRHSTAT_802OE@GOTPCREL(%rip), %rax
testl %r15d, (%rax)
je LBB0_25
## %bb.24:
movq _rx_oflow802@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_25:
movq _SLIC_VRHSTATB_802UE@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_27
## %bb.26:
movq _rx_uflow802@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_27:
movq _SLIC_VRHSTATB_CARRE@GOTPCREL(%rip), %rax
testl %ebx, (%rax)
je LBB0_59
## %bb.28:
movq _tx_carrier@GOTPCREL(%rip), %rax
jmp LBB0_58
LBB0_29:
movl (%rbx), %edi
callq _le32_to_cpu
movl %eax, %r15d
movq _SLIC_VGBSTAT_XPERR@GOTPCREL(%rip), %rax
testl %r15d, (%rax)
je LBB0_36
## %bb.30:
movq _SLIC_VGBSTAT_XERRSHFT@GOTPCREL(%rip), %rax
movb (%rax), %cl
movl %r15d, %ebx
sarl %cl, %ebx
movq _SLIC_VGBSTAT_XCSERR@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_32
## %bb.31:
movq _rx_tpcsum@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_32:
movq _SLIC_VGBSTAT_XUFLOW@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_34
## %bb.33:
movq _rx_tpoflow@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_34:
movq _SLIC_VGBSTAT_XHLEN@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_36
## %bb.35:
movq _rx_tphlen@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_36:
movq _SLIC_VGBSTAT_NETERR@GOTPCREL(%rip), %rax
testl %r15d, (%rax)
je LBB0_43
## %bb.37:
movq _SLIC_VGBSTAT_NERRSHFT@GOTPCREL(%rip), %rax
movb (%rax), %cl
movl %r15d, %ebx
sarl %cl, %ebx
movq _SLIC_VGBSTAT_NERRMSK@GOTPCREL(%rip), %rax
andl (%rax), %ebx
movq _SLIC_VGBSTAT_NCSERR@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_39
## %bb.38:
movq _rx_ipcsum@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_39:
movq _SLIC_VGBSTAT_NUFLOW@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_41
## %bb.40:
movq _rx_iplen@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_41:
movq _SLIC_VGBSTAT_NHLEN@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_43
## %bb.42:
movq _rx_iphlen@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_43:
movq _SLIC_VGBSTAT_LNKERR@GOTPCREL(%rip), %rax
testl %r15d, (%rax)
je LBB0_59
## %bb.44:
movq _SLIC_VGBSTAT_LERRMSK@GOTPCREL(%rip), %rax
movl (%rax), %ebx
andl %r15d, %ebx
movq _SLIC_VGBSTAT_LDEARLY@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_46
## %bb.45:
movq _rx_early@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_46:
movq _SLIC_VGBSTAT_LBOFLO@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_48
## %bb.47:
movq _rx_buffoflow@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_48:
movq _SLIC_VGBSTAT_LCODERR@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_50
## %bb.49:
movq _rx_lcode@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_50:
movq _SLIC_VGBSTAT_LDBLNBL@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_52
## %bb.51:
movq _rx_drbl@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_52:
movq _SLIC_VGBSTAT_LCRCERR@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_54
## %bb.53:
movq _rx_crc@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_54:
movq _SLIC_VGBSTAT_LOFLO@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_56
## %bb.55:
movq _rx_oflow802@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_56:
movq _SLIC_VGBSTAT_LUFLO@GOTPCREL(%rip), %rax
cmpl (%rax), %ebx
jne LBB0_59
## %bb.57:
movq _rx_uflow802@GOTPCREL(%rip), %rax
LBB0_58:
movl (%rax), %esi
movq %r14, %rdi
callq _SLIC_INC_STATS_COUNTER
LBB0_59:
movq _rx_errors@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %r14, %rdi
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp _SLIC_INC_STATS_COUNTER ## TAILCALL
.cfi_endproc
## -- End function
.comm _SLIC_MODEL_OASIS,8,3 ## @SLIC_MODEL_OASIS
.comm _SLIC_VRHSTATB_TPCSUM,4,2 ## @SLIC_VRHSTATB_TPCSUM
.comm _rx_tpcsum,4,2 ## @rx_tpcsum
.comm _SLIC_VRHSTAT_TPOFLO,4,2 ## @SLIC_VRHSTAT_TPOFLO
.comm _rx_tpoflow,4,2 ## @rx_tpoflow
.comm _SLIC_VRHSTATB_TPHLEN,4,2 ## @SLIC_VRHSTATB_TPHLEN
.comm _rx_tphlen,4,2 ## @rx_tphlen
.comm _SLIC_VRHSTATB_IPCSUM,4,2 ## @SLIC_VRHSTATB_IPCSUM
.comm _rx_ipcsum,4,2 ## @rx_ipcsum
.comm _SLIC_VRHSTATB_IPLERR,4,2 ## @SLIC_VRHSTATB_IPLERR
.comm _rx_iplen,4,2 ## @rx_iplen
.comm _SLIC_VRHSTATB_IPHERR,4,2 ## @SLIC_VRHSTATB_IPHERR
.comm _rx_iphlen,4,2 ## @rx_iphlen
.comm _SLIC_VRHSTATB_RCVE,4,2 ## @SLIC_VRHSTATB_RCVE
.comm _rx_early,4,2 ## @rx_early
.comm _SLIC_VRHSTATB_BUFF,4,2 ## @SLIC_VRHSTATB_BUFF
.comm _rx_buffoflow,4,2 ## @rx_buffoflow
.comm _SLIC_VRHSTATB_CODE,4,2 ## @SLIC_VRHSTATB_CODE
.comm _rx_lcode,4,2 ## @rx_lcode
.comm _SLIC_VRHSTATB_DRBL,4,2 ## @SLIC_VRHSTATB_DRBL
.comm _rx_drbl,4,2 ## @rx_drbl
.comm _SLIC_VRHSTATB_CRC,4,2 ## @SLIC_VRHSTATB_CRC
.comm _rx_crc,4,2 ## @rx_crc
.comm _SLIC_VRHSTAT_802OE,4,2 ## @SLIC_VRHSTAT_802OE
.comm _rx_oflow802,4,2 ## @rx_oflow802
.comm _SLIC_VRHSTATB_802UE,4,2 ## @SLIC_VRHSTATB_802UE
.comm _rx_uflow802,4,2 ## @rx_uflow802
.comm _SLIC_VRHSTATB_CARRE,4,2 ## @SLIC_VRHSTATB_CARRE
.comm _tx_carrier,4,2 ## @tx_carrier
.comm _SLIC_VGBSTAT_XPERR,4,2 ## @SLIC_VGBSTAT_XPERR
.comm _SLIC_VGBSTAT_XERRSHFT,4,2 ## @SLIC_VGBSTAT_XERRSHFT
.comm _SLIC_VGBSTAT_XCSERR,4,2 ## @SLIC_VGBSTAT_XCSERR
.comm _SLIC_VGBSTAT_XUFLOW,4,2 ## @SLIC_VGBSTAT_XUFLOW
.comm _SLIC_VGBSTAT_XHLEN,4,2 ## @SLIC_VGBSTAT_XHLEN
.comm _SLIC_VGBSTAT_NETERR,4,2 ## @SLIC_VGBSTAT_NETERR
.comm _SLIC_VGBSTAT_NERRSHFT,4,2 ## @SLIC_VGBSTAT_NERRSHFT
.comm _SLIC_VGBSTAT_NERRMSK,4,2 ## @SLIC_VGBSTAT_NERRMSK
.comm _SLIC_VGBSTAT_NCSERR,4,2 ## @SLIC_VGBSTAT_NCSERR
.comm _SLIC_VGBSTAT_NUFLOW,4,2 ## @SLIC_VGBSTAT_NUFLOW
.comm _SLIC_VGBSTAT_NHLEN,4,2 ## @SLIC_VGBSTAT_NHLEN
.comm _SLIC_VGBSTAT_LNKERR,4,2 ## @SLIC_VGBSTAT_LNKERR
.comm _SLIC_VGBSTAT_LERRMSK,4,2 ## @SLIC_VGBSTAT_LERRMSK
.comm _SLIC_VGBSTAT_LDEARLY,4,2 ## @SLIC_VGBSTAT_LDEARLY
.comm _SLIC_VGBSTAT_LBOFLO,4,2 ## @SLIC_VGBSTAT_LBOFLO
.comm _SLIC_VGBSTAT_LCODERR,4,2 ## @SLIC_VGBSTAT_LCODERR
.comm _SLIC_VGBSTAT_LDBLNBL,4,2 ## @SLIC_VGBSTAT_LDBLNBL
.comm _SLIC_VGBSTAT_LCRCERR,4,2 ## @SLIC_VGBSTAT_LCRCERR
.comm _SLIC_VGBSTAT_LOFLO,4,2 ## @SLIC_VGBSTAT_LOFLO
.comm _SLIC_VGBSTAT_LUFLO,4,2 ## @SLIC_VGBSTAT_LUFLO
.comm _rx_errors,4,2 ## @rx_errors
.no_dead_strip _slic_handle_frame_error
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function slic_handle_frame_error
_slic_handle_frame_error: ; @slic_handle_frame_error
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
ldr x8, [x19], #8
Lloh0:
adrp x9, _SLIC_MODEL_OASIS@GOTPAGE
Lloh1:
ldr x9, [x9, _SLIC_MODEL_OASIS@GOTPAGEOFF]
Lloh2:
ldr x9, [x9]
ldr x20, [x1]
cmp x8, x9
b.ne LBB0_29
; %bb.1:
ldr w0, [x20, #4]
bl _le32_to_cpu
mov x21, x0
ldr w0, [x20]
bl _le32_to_cpu
mov x20, x0
Lloh3:
adrp x8, _SLIC_VRHSTATB_TPCSUM@GOTPAGE
Lloh4:
ldr x8, [x8, _SLIC_VRHSTATB_TPCSUM@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
tst w8, w0
b.eq LBB0_3
; %bb.2:
Lloh6:
adrp x8, _rx_tpcsum@GOTPAGE
Lloh7:
ldr x8, [x8, _rx_tpcsum@GOTPAGEOFF]
Lloh8:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_3:
Lloh9:
adrp x8, _SLIC_VRHSTAT_TPOFLO@GOTPAGE
Lloh10:
ldr x8, [x8, _SLIC_VRHSTAT_TPOFLO@GOTPAGEOFF]
Lloh11:
ldr w8, [x8]
tst w8, w21
b.eq LBB0_5
; %bb.4:
Lloh12:
adrp x8, _rx_tpoflow@GOTPAGE
Lloh13:
ldr x8, [x8, _rx_tpoflow@GOTPAGEOFF]
Lloh14:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_5:
Lloh15:
adrp x8, _SLIC_VRHSTATB_TPHLEN@GOTPAGE
Lloh16:
ldr x8, [x8, _SLIC_VRHSTATB_TPHLEN@GOTPAGEOFF]
Lloh17:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_7
; %bb.6:
Lloh18:
adrp x8, _rx_tphlen@GOTPAGE
Lloh19:
ldr x8, [x8, _rx_tphlen@GOTPAGEOFF]
Lloh20:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_7:
Lloh21:
adrp x8, _SLIC_VRHSTATB_IPCSUM@GOTPAGE
Lloh22:
ldr x8, [x8, _SLIC_VRHSTATB_IPCSUM@GOTPAGEOFF]
Lloh23:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_9
; %bb.8:
Lloh24:
adrp x8, _rx_ipcsum@GOTPAGE
Lloh25:
ldr x8, [x8, _rx_ipcsum@GOTPAGEOFF]
Lloh26:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_9:
Lloh27:
adrp x8, _SLIC_VRHSTATB_IPLERR@GOTPAGE
Lloh28:
ldr x8, [x8, _SLIC_VRHSTATB_IPLERR@GOTPAGEOFF]
Lloh29:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_11
; %bb.10:
Lloh30:
adrp x8, _rx_iplen@GOTPAGE
Lloh31:
ldr x8, [x8, _rx_iplen@GOTPAGEOFF]
Lloh32:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_11:
Lloh33:
adrp x8, _SLIC_VRHSTATB_IPHERR@GOTPAGE
Lloh34:
ldr x8, [x8, _SLIC_VRHSTATB_IPHERR@GOTPAGEOFF]
Lloh35:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_13
; %bb.12:
Lloh36:
adrp x8, _rx_iphlen@GOTPAGE
Lloh37:
ldr x8, [x8, _rx_iphlen@GOTPAGEOFF]
Lloh38:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_13:
Lloh39:
adrp x8, _SLIC_VRHSTATB_RCVE@GOTPAGE
Lloh40:
ldr x8, [x8, _SLIC_VRHSTATB_RCVE@GOTPAGEOFF]
Lloh41:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_15
; %bb.14:
Lloh42:
adrp x8, _rx_early@GOTPAGE
Lloh43:
ldr x8, [x8, _rx_early@GOTPAGEOFF]
Lloh44:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_15:
Lloh45:
adrp x8, _SLIC_VRHSTATB_BUFF@GOTPAGE
Lloh46:
ldr x8, [x8, _SLIC_VRHSTATB_BUFF@GOTPAGEOFF]
Lloh47:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_17
; %bb.16:
Lloh48:
adrp x8, _rx_buffoflow@GOTPAGE
Lloh49:
ldr x8, [x8, _rx_buffoflow@GOTPAGEOFF]
Lloh50:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_17:
Lloh51:
adrp x8, _SLIC_VRHSTATB_CODE@GOTPAGE
Lloh52:
ldr x8, [x8, _SLIC_VRHSTATB_CODE@GOTPAGEOFF]
Lloh53:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_19
; %bb.18:
Lloh54:
adrp x8, _rx_lcode@GOTPAGE
Lloh55:
ldr x8, [x8, _rx_lcode@GOTPAGEOFF]
Lloh56:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_19:
Lloh57:
adrp x8, _SLIC_VRHSTATB_DRBL@GOTPAGE
Lloh58:
ldr x8, [x8, _SLIC_VRHSTATB_DRBL@GOTPAGEOFF]
Lloh59:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_21
; %bb.20:
Lloh60:
adrp x8, _rx_drbl@GOTPAGE
Lloh61:
ldr x8, [x8, _rx_drbl@GOTPAGEOFF]
Lloh62:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_21:
Lloh63:
adrp x8, _SLIC_VRHSTATB_CRC@GOTPAGE
Lloh64:
ldr x8, [x8, _SLIC_VRHSTATB_CRC@GOTPAGEOFF]
Lloh65:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_23
; %bb.22:
Lloh66:
adrp x8, _rx_crc@GOTPAGE
Lloh67:
ldr x8, [x8, _rx_crc@GOTPAGEOFF]
Lloh68:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_23:
Lloh69:
adrp x8, _SLIC_VRHSTAT_802OE@GOTPAGE
Lloh70:
ldr x8, [x8, _SLIC_VRHSTAT_802OE@GOTPAGEOFF]
Lloh71:
ldr w8, [x8]
tst w8, w21
b.eq LBB0_25
; %bb.24:
Lloh72:
adrp x8, _rx_oflow802@GOTPAGE
Lloh73:
ldr x8, [x8, _rx_oflow802@GOTPAGEOFF]
Lloh74:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_25:
Lloh75:
adrp x8, _SLIC_VRHSTATB_802UE@GOTPAGE
Lloh76:
ldr x8, [x8, _SLIC_VRHSTATB_802UE@GOTPAGEOFF]
Lloh77:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_27
; %bb.26:
Lloh78:
adrp x8, _rx_uflow802@GOTPAGE
Lloh79:
ldr x8, [x8, _rx_uflow802@GOTPAGEOFF]
Lloh80:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_27:
Lloh81:
adrp x8, _SLIC_VRHSTATB_CARRE@GOTPAGE
Lloh82:
ldr x8, [x8, _SLIC_VRHSTATB_CARRE@GOTPAGEOFF]
Lloh83:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_59
; %bb.28:
Lloh84:
adrp x8, _tx_carrier@GOTPAGE
Lloh85:
ldr x8, [x8, _tx_carrier@GOTPAGEOFF]
b LBB0_58
LBB0_29:
ldr w0, [x20]
bl _le32_to_cpu
mov x20, x0
Lloh86:
adrp x8, _SLIC_VGBSTAT_XPERR@GOTPAGE
Lloh87:
ldr x8, [x8, _SLIC_VGBSTAT_XPERR@GOTPAGEOFF]
Lloh88:
ldr w8, [x8]
tst w8, w0
b.eq LBB0_36
; %bb.30:
Lloh89:
adrp x8, _SLIC_VGBSTAT_XERRSHFT@GOTPAGE
Lloh90:
ldr x8, [x8, _SLIC_VGBSTAT_XERRSHFT@GOTPAGEOFF]
Lloh91:
ldr w8, [x8]
asr w21, w20, w8
Lloh92:
adrp x8, _SLIC_VGBSTAT_XCSERR@GOTPAGE
Lloh93:
ldr x8, [x8, _SLIC_VGBSTAT_XCSERR@GOTPAGEOFF]
Lloh94:
ldr w8, [x8]
cmp w21, w8
b.ne LBB0_32
; %bb.31:
Lloh95:
adrp x8, _rx_tpcsum@GOTPAGE
Lloh96:
ldr x8, [x8, _rx_tpcsum@GOTPAGEOFF]
Lloh97:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_32:
Lloh98:
adrp x8, _SLIC_VGBSTAT_XUFLOW@GOTPAGE
Lloh99:
ldr x8, [x8, _SLIC_VGBSTAT_XUFLOW@GOTPAGEOFF]
Lloh100:
ldr w8, [x8]
cmp w21, w8
b.ne LBB0_34
; %bb.33:
Lloh101:
adrp x8, _rx_tpoflow@GOTPAGE
Lloh102:
ldr x8, [x8, _rx_tpoflow@GOTPAGEOFF]
Lloh103:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_34:
Lloh104:
adrp x8, _SLIC_VGBSTAT_XHLEN@GOTPAGE
Lloh105:
ldr x8, [x8, _SLIC_VGBSTAT_XHLEN@GOTPAGEOFF]
Lloh106:
ldr w8, [x8]
cmp w21, w8
b.ne LBB0_36
; %bb.35:
Lloh107:
adrp x8, _rx_tphlen@GOTPAGE
Lloh108:
ldr x8, [x8, _rx_tphlen@GOTPAGEOFF]
Lloh109:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_36:
Lloh110:
adrp x8, _SLIC_VGBSTAT_NETERR@GOTPAGE
Lloh111:
ldr x8, [x8, _SLIC_VGBSTAT_NETERR@GOTPAGEOFF]
Lloh112:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_43
; %bb.37:
Lloh113:
adrp x8, _SLIC_VGBSTAT_NERRSHFT@GOTPAGE
Lloh114:
ldr x8, [x8, _SLIC_VGBSTAT_NERRSHFT@GOTPAGEOFF]
Lloh115:
ldr w8, [x8]
asr w8, w20, w8
Lloh116:
adrp x9, _SLIC_VGBSTAT_NERRMSK@GOTPAGE
Lloh117:
ldr x9, [x9, _SLIC_VGBSTAT_NERRMSK@GOTPAGEOFF]
Lloh118:
ldr w9, [x9]
and w21, w8, w9
Lloh119:
adrp x8, _SLIC_VGBSTAT_NCSERR@GOTPAGE
Lloh120:
ldr x8, [x8, _SLIC_VGBSTAT_NCSERR@GOTPAGEOFF]
Lloh121:
ldr w8, [x8]
cmp w21, w8
b.ne LBB0_39
; %bb.38:
Lloh122:
adrp x8, _rx_ipcsum@GOTPAGE
Lloh123:
ldr x8, [x8, _rx_ipcsum@GOTPAGEOFF]
Lloh124:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_39:
Lloh125:
adrp x8, _SLIC_VGBSTAT_NUFLOW@GOTPAGE
Lloh126:
ldr x8, [x8, _SLIC_VGBSTAT_NUFLOW@GOTPAGEOFF]
Lloh127:
ldr w8, [x8]
cmp w21, w8
b.ne LBB0_41
; %bb.40:
Lloh128:
adrp x8, _rx_iplen@GOTPAGE
Lloh129:
ldr x8, [x8, _rx_iplen@GOTPAGEOFF]
Lloh130:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_41:
Lloh131:
adrp x8, _SLIC_VGBSTAT_NHLEN@GOTPAGE
Lloh132:
ldr x8, [x8, _SLIC_VGBSTAT_NHLEN@GOTPAGEOFF]
Lloh133:
ldr w8, [x8]
cmp w21, w8
b.ne LBB0_43
; %bb.42:
Lloh134:
adrp x8, _rx_iphlen@GOTPAGE
Lloh135:
ldr x8, [x8, _rx_iphlen@GOTPAGEOFF]
Lloh136:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_43:
Lloh137:
adrp x8, _SLIC_VGBSTAT_LNKERR@GOTPAGE
Lloh138:
ldr x8, [x8, _SLIC_VGBSTAT_LNKERR@GOTPAGEOFF]
Lloh139:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_59
; %bb.44:
Lloh140:
adrp x8, _SLIC_VGBSTAT_LERRMSK@GOTPAGE
Lloh141:
ldr x8, [x8, _SLIC_VGBSTAT_LERRMSK@GOTPAGEOFF]
Lloh142:
ldr w8, [x8]
and w20, w8, w20
Lloh143:
adrp x8, _SLIC_VGBSTAT_LDEARLY@GOTPAGE
Lloh144:
ldr x8, [x8, _SLIC_VGBSTAT_LDEARLY@GOTPAGEOFF]
Lloh145:
ldr w8, [x8]
cmp w20, w8
b.ne LBB0_46
; %bb.45:
Lloh146:
adrp x8, _rx_early@GOTPAGE
Lloh147:
ldr x8, [x8, _rx_early@GOTPAGEOFF]
Lloh148:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_46:
Lloh149:
adrp x8, _SLIC_VGBSTAT_LBOFLO@GOTPAGE
Lloh150:
ldr x8, [x8, _SLIC_VGBSTAT_LBOFLO@GOTPAGEOFF]
Lloh151:
ldr w8, [x8]
cmp w20, w8
b.ne LBB0_48
; %bb.47:
Lloh152:
adrp x8, _rx_buffoflow@GOTPAGE
Lloh153:
ldr x8, [x8, _rx_buffoflow@GOTPAGEOFF]
Lloh154:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_48:
Lloh155:
adrp x8, _SLIC_VGBSTAT_LCODERR@GOTPAGE
Lloh156:
ldr x8, [x8, _SLIC_VGBSTAT_LCODERR@GOTPAGEOFF]
Lloh157:
ldr w8, [x8]
cmp w20, w8
b.ne LBB0_50
; %bb.49:
Lloh158:
adrp x8, _rx_lcode@GOTPAGE
Lloh159:
ldr x8, [x8, _rx_lcode@GOTPAGEOFF]
Lloh160:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_50:
Lloh161:
adrp x8, _SLIC_VGBSTAT_LDBLNBL@GOTPAGE
Lloh162:
ldr x8, [x8, _SLIC_VGBSTAT_LDBLNBL@GOTPAGEOFF]
Lloh163:
ldr w8, [x8]
cmp w20, w8
b.ne LBB0_52
; %bb.51:
Lloh164:
adrp x8, _rx_drbl@GOTPAGE
Lloh165:
ldr x8, [x8, _rx_drbl@GOTPAGEOFF]
Lloh166:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_52:
Lloh167:
adrp x8, _SLIC_VGBSTAT_LCRCERR@GOTPAGE
Lloh168:
ldr x8, [x8, _SLIC_VGBSTAT_LCRCERR@GOTPAGEOFF]
Lloh169:
ldr w8, [x8]
cmp w20, w8
b.ne LBB0_54
; %bb.53:
Lloh170:
adrp x8, _rx_crc@GOTPAGE
Lloh171:
ldr x8, [x8, _rx_crc@GOTPAGEOFF]
Lloh172:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_54:
Lloh173:
adrp x8, _SLIC_VGBSTAT_LOFLO@GOTPAGE
Lloh174:
ldr x8, [x8, _SLIC_VGBSTAT_LOFLO@GOTPAGEOFF]
Lloh175:
ldr w8, [x8]
cmp w20, w8
b.ne LBB0_56
; %bb.55:
Lloh176:
adrp x8, _rx_oflow802@GOTPAGE
Lloh177:
ldr x8, [x8, _rx_oflow802@GOTPAGEOFF]
Lloh178:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_56:
Lloh179:
adrp x8, _SLIC_VGBSTAT_LUFLO@GOTPAGE
Lloh180:
ldr x8, [x8, _SLIC_VGBSTAT_LUFLO@GOTPAGEOFF]
Lloh181:
ldr w8, [x8]
cmp w20, w8
b.ne LBB0_59
; %bb.57:
Lloh182:
adrp x8, _rx_uflow802@GOTPAGE
Lloh183:
ldr x8, [x8, _rx_uflow802@GOTPAGEOFF]
LBB0_58:
ldr w1, [x8]
mov x0, x19
bl _SLIC_INC_STATS_COUNTER
LBB0_59:
Lloh184:
adrp x8, _rx_errors@GOTPAGE
Lloh185:
ldr x8, [x8, _rx_errors@GOTPAGEOFF]
Lloh186:
ldr w1, [x8]
mov x0, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _SLIC_INC_STATS_COUNTER
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpLdrGotLdr Lloh15, Lloh16, Lloh17
.loh AdrpLdrGotLdr Lloh18, Lloh19, Lloh20
.loh AdrpLdrGotLdr Lloh21, Lloh22, Lloh23
.loh AdrpLdrGotLdr Lloh24, Lloh25, Lloh26
.loh AdrpLdrGotLdr Lloh27, Lloh28, Lloh29
.loh AdrpLdrGotLdr Lloh30, Lloh31, Lloh32
.loh AdrpLdrGotLdr Lloh33, Lloh34, Lloh35
.loh AdrpLdrGotLdr Lloh36, Lloh37, Lloh38
.loh AdrpLdrGotLdr Lloh39, Lloh40, Lloh41
.loh AdrpLdrGotLdr Lloh42, Lloh43, Lloh44
.loh AdrpLdrGotLdr Lloh45, Lloh46, Lloh47
.loh AdrpLdrGotLdr Lloh48, Lloh49, Lloh50
.loh AdrpLdrGotLdr Lloh51, Lloh52, Lloh53
.loh AdrpLdrGotLdr Lloh54, Lloh55, Lloh56
.loh AdrpLdrGotLdr Lloh57, Lloh58, Lloh59
.loh AdrpLdrGotLdr Lloh60, Lloh61, Lloh62
.loh AdrpLdrGotLdr Lloh63, Lloh64, Lloh65
.loh AdrpLdrGotLdr Lloh66, Lloh67, Lloh68
.loh AdrpLdrGotLdr Lloh69, Lloh70, Lloh71
.loh AdrpLdrGotLdr Lloh72, Lloh73, Lloh74
.loh AdrpLdrGotLdr Lloh75, Lloh76, Lloh77
.loh AdrpLdrGotLdr Lloh78, Lloh79, Lloh80
.loh AdrpLdrGotLdr Lloh81, Lloh82, Lloh83
.loh AdrpLdrGot Lloh84, Lloh85
.loh AdrpLdrGotLdr Lloh86, Lloh87, Lloh88
.loh AdrpLdrGotLdr Lloh92, Lloh93, Lloh94
.loh AdrpLdrGotLdr Lloh89, Lloh90, Lloh91
.loh AdrpLdrGotLdr Lloh95, Lloh96, Lloh97
.loh AdrpLdrGotLdr Lloh98, Lloh99, Lloh100
.loh AdrpLdrGotLdr Lloh101, Lloh102, Lloh103
.loh AdrpLdrGotLdr Lloh104, Lloh105, Lloh106
.loh AdrpLdrGotLdr Lloh107, Lloh108, Lloh109
.loh AdrpLdrGotLdr Lloh110, Lloh111, Lloh112
.loh AdrpLdrGotLdr Lloh119, Lloh120, Lloh121
.loh AdrpLdrGotLdr Lloh116, Lloh117, Lloh118
.loh AdrpLdrGotLdr Lloh113, Lloh114, Lloh115
.loh AdrpLdrGotLdr Lloh122, Lloh123, Lloh124
.loh AdrpLdrGotLdr Lloh125, Lloh126, Lloh127
.loh AdrpLdrGotLdr Lloh128, Lloh129, Lloh130
.loh AdrpLdrGotLdr Lloh131, Lloh132, Lloh133
.loh AdrpLdrGotLdr Lloh134, Lloh135, Lloh136
.loh AdrpLdrGotLdr Lloh137, Lloh138, Lloh139
.loh AdrpLdrGotLdr Lloh143, Lloh144, Lloh145
.loh AdrpLdrGotLdr Lloh140, Lloh141, Lloh142
.loh AdrpLdrGotLdr Lloh146, Lloh147, Lloh148
.loh AdrpLdrGotLdr Lloh149, Lloh150, Lloh151
.loh AdrpLdrGotLdr Lloh152, Lloh153, Lloh154
.loh AdrpLdrGotLdr Lloh155, Lloh156, Lloh157
.loh AdrpLdrGotLdr Lloh158, Lloh159, Lloh160
.loh AdrpLdrGotLdr Lloh161, Lloh162, Lloh163
.loh AdrpLdrGotLdr Lloh164, Lloh165, Lloh166
.loh AdrpLdrGotLdr Lloh167, Lloh168, Lloh169
.loh AdrpLdrGotLdr Lloh170, Lloh171, Lloh172
.loh AdrpLdrGotLdr Lloh173, Lloh174, Lloh175
.loh AdrpLdrGotLdr Lloh176, Lloh177, Lloh178
.loh AdrpLdrGotLdr Lloh179, Lloh180, Lloh181
.loh AdrpLdrGot Lloh182, Lloh183
.loh AdrpLdrGotLdr Lloh184, Lloh185, Lloh186
.cfi_endproc
; -- End function
.comm _SLIC_MODEL_OASIS,8,3 ; @SLIC_MODEL_OASIS
.comm _SLIC_VRHSTATB_TPCSUM,4,2 ; @SLIC_VRHSTATB_TPCSUM
.comm _rx_tpcsum,4,2 ; @rx_tpcsum
.comm _SLIC_VRHSTAT_TPOFLO,4,2 ; @SLIC_VRHSTAT_TPOFLO
.comm _rx_tpoflow,4,2 ; @rx_tpoflow
.comm _SLIC_VRHSTATB_TPHLEN,4,2 ; @SLIC_VRHSTATB_TPHLEN
.comm _rx_tphlen,4,2 ; @rx_tphlen
.comm _SLIC_VRHSTATB_IPCSUM,4,2 ; @SLIC_VRHSTATB_IPCSUM
.comm _rx_ipcsum,4,2 ; @rx_ipcsum
.comm _SLIC_VRHSTATB_IPLERR,4,2 ; @SLIC_VRHSTATB_IPLERR
.comm _rx_iplen,4,2 ; @rx_iplen
.comm _SLIC_VRHSTATB_IPHERR,4,2 ; @SLIC_VRHSTATB_IPHERR
.comm _rx_iphlen,4,2 ; @rx_iphlen
.comm _SLIC_VRHSTATB_RCVE,4,2 ; @SLIC_VRHSTATB_RCVE
.comm _rx_early,4,2 ; @rx_early
.comm _SLIC_VRHSTATB_BUFF,4,2 ; @SLIC_VRHSTATB_BUFF
.comm _rx_buffoflow,4,2 ; @rx_buffoflow
.comm _SLIC_VRHSTATB_CODE,4,2 ; @SLIC_VRHSTATB_CODE
.comm _rx_lcode,4,2 ; @rx_lcode
.comm _SLIC_VRHSTATB_DRBL,4,2 ; @SLIC_VRHSTATB_DRBL
.comm _rx_drbl,4,2 ; @rx_drbl
.comm _SLIC_VRHSTATB_CRC,4,2 ; @SLIC_VRHSTATB_CRC
.comm _rx_crc,4,2 ; @rx_crc
.comm _SLIC_VRHSTAT_802OE,4,2 ; @SLIC_VRHSTAT_802OE
.comm _rx_oflow802,4,2 ; @rx_oflow802
.comm _SLIC_VRHSTATB_802UE,4,2 ; @SLIC_VRHSTATB_802UE
.comm _rx_uflow802,4,2 ; @rx_uflow802
.comm _SLIC_VRHSTATB_CARRE,4,2 ; @SLIC_VRHSTATB_CARRE
.comm _tx_carrier,4,2 ; @tx_carrier
.comm _SLIC_VGBSTAT_XPERR,4,2 ; @SLIC_VGBSTAT_XPERR
.comm _SLIC_VGBSTAT_XERRSHFT,4,2 ; @SLIC_VGBSTAT_XERRSHFT
.comm _SLIC_VGBSTAT_XCSERR,4,2 ; @SLIC_VGBSTAT_XCSERR
.comm _SLIC_VGBSTAT_XUFLOW,4,2 ; @SLIC_VGBSTAT_XUFLOW
.comm _SLIC_VGBSTAT_XHLEN,4,2 ; @SLIC_VGBSTAT_XHLEN
.comm _SLIC_VGBSTAT_NETERR,4,2 ; @SLIC_VGBSTAT_NETERR
.comm _SLIC_VGBSTAT_NERRSHFT,4,2 ; @SLIC_VGBSTAT_NERRSHFT
.comm _SLIC_VGBSTAT_NERRMSK,4,2 ; @SLIC_VGBSTAT_NERRMSK
.comm _SLIC_VGBSTAT_NCSERR,4,2 ; @SLIC_VGBSTAT_NCSERR
.comm _SLIC_VGBSTAT_NUFLOW,4,2 ; @SLIC_VGBSTAT_NUFLOW
.comm _SLIC_VGBSTAT_NHLEN,4,2 ; @SLIC_VGBSTAT_NHLEN
.comm _SLIC_VGBSTAT_LNKERR,4,2 ; @SLIC_VGBSTAT_LNKERR
.comm _SLIC_VGBSTAT_LERRMSK,4,2 ; @SLIC_VGBSTAT_LERRMSK
.comm _SLIC_VGBSTAT_LDEARLY,4,2 ; @SLIC_VGBSTAT_LDEARLY
.comm _SLIC_VGBSTAT_LBOFLO,4,2 ; @SLIC_VGBSTAT_LBOFLO
.comm _SLIC_VGBSTAT_LCODERR,4,2 ; @SLIC_VGBSTAT_LCODERR
.comm _SLIC_VGBSTAT_LDBLNBL,4,2 ; @SLIC_VGBSTAT_LDBLNBL
.comm _SLIC_VGBSTAT_LCRCERR,4,2 ; @SLIC_VGBSTAT_LCRCERR
.comm _SLIC_VGBSTAT_LOFLO,4,2 ; @SLIC_VGBSTAT_LOFLO
.comm _SLIC_VGBSTAT_LUFLO,4,2 ; @SLIC_VGBSTAT_LUFLO
.comm _rx_errors,4,2 ; @rx_errors
.no_dead_strip _slic_handle_frame_error
.subsections_via_symbols
| AnghaBench/linux/drivers/net/ethernet/alacritech/extr_slicoss.c_slic_handle_frame_error.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function luaB_load
_luaB_load: ## @luaB_load
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
leaq -40(%rbp), %rdx
movl $1, %esi
callq _lua_tolstring
movq %rax, %r12
leaq L_.str(%rip), %rdx
movq %rbx, %rdi
movl $3, %esi
callq _luaL_optstring
movq %rax, %r15
movq %rbx, %rdi
movl $4, %esi
callq _lua_isnone
movl %eax, %r14d
testq %r12, %r12
je LBB0_2
## %bb.1:
movq %rbx, %rdi
movl $2, %esi
movq %r12, %rdx
callq _luaL_optstring
movq -40(%rbp), %rdx
movq %rbx, %rdi
movq %r12, %rsi
movq %rax, %rcx
movq %r15, %r8
callq _luaL_loadbufferx
jmp LBB0_3
LBB0_2:
leaq L_.str.1(%rip), %rdx
movq %rbx, %rdi
movl $2, %esi
callq _luaL_optstring
movq %rax, %r12
movq _LUA_TFUNCTION@GOTPCREL(%rip), %rax
movl (%rax), %edx
movq %rbx, %rdi
movl $1, %esi
callq _luaL_checktype
movq _RESERVEDSLOT@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %rbx, %rdi
callq _lua_settop
movq _generic_reader@GOTPCREL(%rip), %rax
movl (%rax), %esi
movq %rbx, %rdi
xorl %edx, %edx
movq %r12, %rcx
movq %r15, %r8
callq _lua_load
LBB0_3:
xorl %edx, %edx
testl %r14d, %r14d
sete %dl
shll $2, %edx
movq %rbx, %rdi
movl %eax, %esi
callq _load_aux
addq $16, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "bt"
L_.str.1: ## @.str.1
.asciz "=(load)"
.comm _LUA_TFUNCTION,4,2 ## @LUA_TFUNCTION
.comm _RESERVEDSLOT,4,2 ## @RESERVEDSLOT
.comm _generic_reader,4,2 ## @generic_reader
.no_dead_strip _luaB_load
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function luaB_load
_luaB_load: ; @luaB_load
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
add x2, sp, #8
mov w1, #1
bl _lua_tolstring
mov x22, x0
Lloh0:
adrp x2, l_.str@PAGE
Lloh1:
add x2, x2, l_.str@PAGEOFF
mov x0, x19
mov w1, #3
bl _luaL_optstring
mov x21, x0
mov x0, x19
mov w1, #4
bl _lua_isnone
mov x20, x0
cbz x22, LBB0_2
; %bb.1:
mov x0, x19
mov w1, #2
mov x2, x22
bl _luaL_optstring
mov x3, x0
ldr x2, [sp, #8]
mov x0, x19
mov x1, x22
mov x4, x21
bl _luaL_loadbufferx
b LBB0_3
LBB0_2:
Lloh2:
adrp x2, l_.str.1@PAGE
Lloh3:
add x2, x2, l_.str.1@PAGEOFF
mov x0, x19
mov w1, #2
bl _luaL_optstring
mov x22, x0
Lloh4:
adrp x8, _LUA_TFUNCTION@GOTPAGE
Lloh5:
ldr x8, [x8, _LUA_TFUNCTION@GOTPAGEOFF]
Lloh6:
ldr w2, [x8]
mov x0, x19
mov w1, #1
bl _luaL_checktype
Lloh7:
adrp x8, _RESERVEDSLOT@GOTPAGE
Lloh8:
ldr x8, [x8, _RESERVEDSLOT@GOTPAGEOFF]
Lloh9:
ldr w1, [x8]
mov x0, x19
bl _lua_settop
Lloh10:
adrp x8, _generic_reader@GOTPAGE
Lloh11:
ldr x8, [x8, _generic_reader@GOTPAGEOFF]
Lloh12:
ldr w1, [x8]
mov x0, x19
mov x2, #0
mov x3, x22
mov x4, x21
bl _lua_load
LBB0_3:
mov x1, x0
cmp w20, #0
cset w8, eq
lsl w2, w8, #2
mov x0, x19
bl _load_aux
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh10, Lloh11, Lloh12
.loh AdrpLdrGotLdr Lloh7, Lloh8, Lloh9
.loh AdrpLdrGotLdr Lloh4, Lloh5, Lloh6
.loh AdrpAdd Lloh2, Lloh3
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "bt"
l_.str.1: ; @.str.1
.asciz "=(load)"
.comm _LUA_TFUNCTION,4,2 ; @LUA_TFUNCTION
.comm _RESERVEDSLOT,4,2 ; @RESERVEDSLOT
.comm _generic_reader,4,2 ; @generic_reader
.no_dead_strip _luaB_load
.subsections_via_symbols
| AnghaBench/skynet/3rd/lua/extr_lbaselib.c_luaB_load.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _trace_seq_path ## -- Begin function trace_seq_path
.p2align 4, 0x90
_trace_seq_path: ## @trace_seq_path
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
xorl %r14d, %r14d
cmpl $0, (%rdi)
jne LBB0_6
## %bb.1:
movq %rsi, %r15
movq %rdi, %rbx
movl 4(%rdi), %r13d
callq ___trace_seq_init
movq %rbx, %rdi
callq _TRACE_SEQ_BUF_LEFT
testl %eax, %eax
jle LBB0_5
## %bb.2:
leaq 4(%rbx), %r12
leaq L_.str(%rip), %rdx
movq %r12, %rdi
movq %r15, %rsi
callq _seq_buf_path
movq %r12, %rdi
callq _seq_buf_has_overflowed
movl %eax, %edi
callq _unlikely
testq %rax, %rax
je LBB0_3
## %bb.4:
movl %r13d, (%r12)
LBB0_5:
movl $1, (%rbx)
jmp LBB0_6
LBB0_3:
movl $1, %r14d
LBB0_6:
movl %r14d, %eax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _trace_seq_path ; -- Begin function trace_seq_path
.p2align 2
_trace_seq_path: ; @trace_seq_path
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
ldr w8, [x0]
cbz w8, LBB0_2
; %bb.1:
mov w0, #0
b LBB0_7
LBB0_2:
mov x21, x1
mov x19, x0
mov x20, x0
ldr w22, [x20, #4]!
bl ___trace_seq_init
mov x0, x19
bl _TRACE_SEQ_BUF_LEFT
cmp w0, #1
b.lt LBB0_5
; %bb.3:
Lloh0:
adrp x2, l_.str@PAGE
Lloh1:
add x2, x2, l_.str@PAGEOFF
mov x0, x20
mov x1, x21
bl _seq_buf_path
mov x0, x20
bl _seq_buf_has_overflowed
bl _unlikely
cbz x0, LBB0_6
; %bb.4:
str w22, [x20]
LBB0_5:
mov w0, #0
mov w8, #1
str w8, [x19]
b LBB0_7
LBB0_6:
mov w0, #1
LBB0_7:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "\n"
.subsections_via_symbols
| AnghaBench/linux/kernel/trace/extr_trace_seq.c_trace_seq_path.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function dwc2_pci_remove
_dwc2_pci_remove: ## @dwc2_pci_remove
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %r14
callq _pci_get_drvdata
movq %rax, %rbx
movl 4(%rax), %edi
callq _platform_device_unregister
movl (%rbx), %edi
callq _usb_phy_generic_unregister
movq %r14, %rdi
xorl %esi, %esi
popq %rbx
popq %r14
popq %rbp
jmp _pci_set_drvdata ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _dwc2_pci_remove
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function dwc2_pci_remove
_dwc2_pci_remove: ; @dwc2_pci_remove
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
bl _pci_get_drvdata
mov x20, x0
ldr w0, [x0, #4]
bl _platform_device_unregister
ldr w0, [x20]
bl _usb_phy_generic_unregister
mov x0, x19
mov x1, #0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _pci_set_drvdata
.cfi_endproc
; -- End function
.no_dead_strip _dwc2_pci_remove
.subsections_via_symbols
| AnghaBench/linux/drivers/usb/dwc2/extr_pci.c_dwc2_pci_remove.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $1240, %rsp ## imm = 0x4D8
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
leaq L_.str(%rip), %rdi
xorl %r13d, %r13d
xorl %eax, %eax
callq _printf
leaq L_.str.1(%rip), %r14
leaq -1264(%rbp), %rsi
movq %r14, %rdi
xorl %eax, %eax
callq _scanf
leaq L_.str.2(%rip), %rdi
xorl %eax, %eax
callq _printf
leaq -1252(%rbp), %rsi
movq %r14, %rdi
xorl %eax, %eax
callq _scanf
leaq L_.str.3(%rip), %rdi
xorl %eax, %eax
callq _printf
leaq -1256(%rbp), %rsi
movq %r14, %rdi
xorl %eax, %eax
callq _scanf
movl -1256(%rbp), %ebx
leaq L_str(%rip), %rdi
callq _puts
cmpl $0, -1252(%rbp)
jle LBB0_38
## %bb.1:
movl %ebx, -1260(%rbp) ## 4-byte Spill
leaq -448(%rbp), %rbx
leaq L_.str.1(%rip), %r15
xorl %r12d, %r12d
.p2align 4, 0x90
LBB0_2: ## =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movq %rbx, %rsi
xorl %eax, %eax
callq _scanf
movl (%rbx), %eax
testl %eax, %eax
js LBB0_7
## %bb.3: ## in Loop: Header=BB0_2 Depth=1
cmpl -1264(%rbp), %eax
jge LBB0_7
## %bb.4: ## in Loop: Header=BB0_2 Depth=1
incq %r12
movslq -1252(%rbp), %rax
addq $4, %rbx
cmpq %rax, %r12
jl LBB0_2
## %bb.5:
movl %eax, %eax
testl %eax, %eax
jle LBB0_6
## %bb.8:
movl -1256(%rbp), %edi
movl %eax, %r15d
andl $-8, %r15d
leaq -8(%r15), %r10
movq %r10, -1280(%rbp) ## 8-byte Spill
shrq $3, %r10
incq %r10
leaq -1(%rax), %r11
movq %r10, %rcx
andq $-2, %rcx
movq %rcx, -1272(%rbp) ## 8-byte Spill
movl %eax, %r12d
andl $3, %r12d
movl %eax, %r9d
andl $-4, %r9d
xorl %r8d, %r8d
xorl %r13d, %r13d
jmp LBB0_9
.p2align 4, 0x90
LBB0_35: ## in Loop: Header=BB0_9 Depth=1
movslq %r14d, %rcx
movl -448(%rbp,%rcx,4), %edi
addl -848(%rbp,%rcx,4), %r13d
movl %edi, -1248(%rbp,%r8,4)
movl $99999, -448(%rbp,%rcx,4) ## imm = 0x1869F
incq %r8
cmpq %rax, %r8
je LBB0_36
LBB0_9: ## =>This Loop Header: Depth=1
## Child Loop BB0_14 Depth 2
## Child Loop BB0_18 Depth 2
## Child Loop BB0_20 Depth 2
## Child Loop BB0_32 Depth 2
cmpl $8, %eax
jae LBB0_11
## %bb.10: ## in Loop: Header=BB0_9 Depth=1
xorl %edx, %edx
jmp LBB0_18
.p2align 4, 0x90
LBB0_11: ## in Loop: Header=BB0_9 Depth=1
movd %edi, %xmm0
pshufd $0, %xmm0, %xmm0 ## xmm0 = xmm0[0,0,0,0]
cmpq $0, -1280(%rbp) ## 8-byte Folded Reload
je LBB0_12
## %bb.13: ## in Loop: Header=BB0_9 Depth=1
movq -1272(%rbp), %rbx ## 8-byte Reload
xorl %edx, %edx
.p2align 4, 0x90
LBB0_14: ## Parent Loop BB0_9 Depth=1
## => This Inner Loop Header: Depth=2
movdqa %xmm0, %xmm1
psubd -448(%rbp,%rdx,4), %xmm1
movdqa %xmm0, %xmm2
psubd -432(%rbp,%rdx,4), %xmm2
pabsd %xmm1, %xmm1
pabsd %xmm2, %xmm2
movdqa %xmm1, -848(%rbp,%rdx,4)
movdqa %xmm2, -832(%rbp,%rdx,4)
movdqa %xmm0, %xmm1
psubd -416(%rbp,%rdx,4), %xmm1
movdqa %xmm0, %xmm2
psubd -400(%rbp,%rdx,4), %xmm2
pabsd %xmm1, %xmm1
pabsd %xmm2, %xmm2
movdqa %xmm1, -816(%rbp,%rdx,4)
movdqa %xmm2, -800(%rbp,%rdx,4)
addq $16, %rdx
addq $-2, %rbx
jne LBB0_14
## %bb.15: ## in Loop: Header=BB0_9 Depth=1
testb $1, %r10b
je LBB0_17
LBB0_16: ## in Loop: Header=BB0_9 Depth=1
movdqa %xmm0, %xmm1
psubd -448(%rbp,%rdx,4), %xmm1
movdqa -432(%rbp,%rdx,4), %xmm2
psubd %xmm2, %xmm0
pabsd %xmm1, %xmm1
pabsd %xmm0, %xmm0
movdqa %xmm1, -848(%rbp,%rdx,4)
movdqa %xmm0, -832(%rbp,%rdx,4)
LBB0_17: ## in Loop: Header=BB0_9 Depth=1
movq %r15, %rdx
cmpq %rax, %r15
je LBB0_19
.p2align 4, 0x90
LBB0_18: ## Parent Loop BB0_9 Depth=1
## => This Inner Loop Header: Depth=2
movl %edi, %ecx
subl -448(%rbp,%rdx,4), %ecx
movl %ecx, %ebx
negl %ebx
cmovsl %ecx, %ebx
movl %ebx, -848(%rbp,%rdx,4)
incq %rdx
cmpq %rdx, %rax
jne LBB0_18
LBB0_19: ## in Loop: Header=BB0_9 Depth=1
xorl %ecx, %ecx
xorl %ebx, %ebx
cmpq $3, %r11
jae LBB0_20
LBB0_30: ## in Loop: Header=BB0_9 Depth=1
movl %ebx, %r14d
testq %r12, %r12
je LBB0_35
## %bb.31: ## in Loop: Header=BB0_9 Depth=1
movq %r12, %rdx
jmp LBB0_32
.p2align 4, 0x90
LBB0_34: ## in Loop: Header=BB0_32 Depth=2
incq %rcx
movl %r14d, %ebx
decq %rdx
je LBB0_35
LBB0_32: ## Parent Loop BB0_9 Depth=1
## => This Inner Loop Header: Depth=2
movl -848(%rbp,%rcx,4), %esi
movslq %ebx, %rdi
movl %ecx, %r14d
cmpl -848(%rbp,%rdi,4), %esi
jl LBB0_34
## %bb.33: ## in Loop: Header=BB0_32 Depth=2
movl %ebx, %r14d
jmp LBB0_34
.p2align 4, 0x90
LBB0_28: ## in Loop: Header=BB0_20 Depth=2
movl %edx, %ebx
LBB0_29: ## in Loop: Header=BB0_20 Depth=2
addq $4, %rcx
cmpq %r9, %rcx
je LBB0_30
LBB0_20: ## Parent Loop BB0_9 Depth=1
## => This Inner Loop Header: Depth=2
movslq %ebx, %rdi
movl -848(%rbp,%rcx,4), %esi
movl %ecx, %edx
cmpl -848(%rbp,%rdi,4), %esi
jl LBB0_22
## %bb.21: ## in Loop: Header=BB0_20 Depth=2
movl %ebx, %edx
LBB0_22: ## in Loop: Header=BB0_20 Depth=2
movl -844(%rbp,%rcx,4), %esi
movslq %edx, %rdi
cmpl -848(%rbp,%rdi,4), %esi
jge LBB0_24
## %bb.23: ## in Loop: Header=BB0_20 Depth=2
leal 1(%rcx), %edx
LBB0_24: ## in Loop: Header=BB0_20 Depth=2
movl -840(%rbp,%rcx,4), %esi
movslq %edx, %rdi
cmpl -848(%rbp,%rdi,4), %esi
jge LBB0_26
## %bb.25: ## in Loop: Header=BB0_20 Depth=2
leal 2(%rcx), %edx
LBB0_26: ## in Loop: Header=BB0_20 Depth=2
movl -836(%rbp,%rcx,4), %esi
movslq %edx, %rdi
cmpl -848(%rbp,%rdi,4), %esi
jge LBB0_28
## %bb.27: ## in Loop: Header=BB0_20 Depth=2
leal 3(%rcx), %ebx
jmp LBB0_29
LBB0_12: ## in Loop: Header=BB0_9 Depth=1
xorl %edx, %edx
testb $1, %r10b
jne LBB0_16
jmp LBB0_17
LBB0_7:
leaq L_.str.5(%rip), %rdi
xorl %eax, %eax
callq _printf
jmp LBB0_42
LBB0_36:
movl %edi, -1256(%rbp)
jmp LBB0_37
LBB0_6:
xorl %r13d, %r13d
LBB0_37:
movl -1260(%rbp), %ebx ## 4-byte Reload
LBB0_38:
leaq L_.str.6(%rip), %rdi
movl %ebx, %esi
xorl %eax, %eax
callq _printf
movl -1252(%rbp), %eax
testl %eax, %eax
jle LBB0_41
## %bb.39:
leaq L_.str.8(%rip), %r14
leaq L_.str.7(%rip), %r15
xorl %ebx, %ebx
.p2align 4, 0x90
LBB0_40: ## =>This Inner Loop Header: Depth=1
decl %eax
cmpq %rax, %rbx
movl -1248(%rbp,%rbx,4), %esi
movq %r15, %rdi
cmoveq %r14, %rdi
xorl %eax, %eax
callq _printf
incq %rbx
movslq -1252(%rbp), %rax
cmpq %rax, %rbx
jl LBB0_40
LBB0_41:
leaq L_.str.9(%rip), %rdi
movl %r13d, %esi
xorl %eax, %eax
callq _printf
LBB0_42:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB0_44
## %bb.43:
xorl %eax, %eax
addq $1240, %rsp ## imm = 0x4D8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_44:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Enter the number of cylinders (0 to n-1): n = "
L_.str.1: ## @.str.1
.asciz "%d"
L_.str.2: ## @.str.2
.asciz "Enter the number of requested tracks: "
L_.str.3: ## @.str.3
.asciz "Enter the current location of pointer head: "
L_.str.5: ## @.str.5
.asciz "INVALID INPUT!!! ABORTING!!"
L_.str.6: ## @.str.6
.asciz "The requests are processed in the following order: %d -> "
L_.str.7: ## @.str.7
.asciz "%d -> "
L_.str.8: ## @.str.8
.asciz "%d \n"
L_.str.9: ## @.str.9
.asciz "The total seek distance is: %d \n"
L_str: ## @str
.asciz "Enter the requested tracks in FIFO orider: "
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
sub sp, sp, #1232
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-56]
Lloh3:
adrp x0, l_.str@PAGE
Lloh4:
add x0, x0, l_.str@PAGEOFF
bl _printf
add x8, sp, #16
str x8, [sp]
Lloh5:
adrp x19, l_.str.1@PAGE
Lloh6:
add x19, x19, l_.str.1@PAGEOFF
mov x0, x19
bl _scanf
Lloh7:
adrp x0, l_.str.2@PAGE
Lloh8:
add x0, x0, l_.str.2@PAGEOFF
bl _printf
add x8, sp, #12
str x8, [sp]
mov x0, x19
bl _scanf
Lloh9:
adrp x0, l_.str.3@PAGE
Lloh10:
add x0, x0, l_.str.3@PAGEOFF
bl _printf
add x8, sp, #20
str x8, [sp]
mov x0, x19
bl _scanf
ldr w20, [sp, #20]
Lloh11:
adrp x0, l_str@PAGE
Lloh12:
add x0, x0, l_str@PAGEOFF
bl _puts
ldr w8, [sp, #12]
cmp w8, #1
b.lt LBB0_18
; %bb.1:
mov x21, #0
add x22, sp, #824
Lloh13:
adrp x19, l_.str.1@PAGE
Lloh14:
add x19, x19, l_.str.1@PAGEOFF
LBB0_2: ; =>This Inner Loop Header: Depth=1
str x22, [sp]
mov x0, x19
bl _scanf
ldr w8, [x22]
tbnz w8, #31, LBB0_23
; %bb.3: ; in Loop: Header=BB0_2 Depth=1
ldr w9, [sp, #16]
cmp w8, w9
b.ge LBB0_23
; %bb.4: ; in Loop: Header=BB0_2 Depth=1
add x21, x21, #1
ldrsw x8, [sp, #12]
add x22, x22, #4
cmp x21, x8
b.lt LBB0_2
; %bb.5:
and x8, x8, #0xffffffff
cmp w8, #1
b.lt LBB0_18
; %bb.6:
mov x9, #0
mov w19, #0
ldr w17, [sp, #20]
and x10, x8, #0xfffffff0
add x11, sp, #424
add x12, x11, #32
add x13, sp, #824
add x14, x13, #32
add x15, sp, #24
mov w16, #34463
movk w16, #1, lsl #16
LBB0_7: ; =>This Loop Header: Depth=1
; Child Loop BB0_10 Depth 2
; Child Loop BB0_13 Depth 2
; Child Loop BB0_15 Depth 2
cmp w8, #16
b.hs LBB0_9
; %bb.8: ; in Loop: Header=BB0_7 Depth=1
mov x2, #0
b LBB0_12
LBB0_9: ; in Loop: Header=BB0_7 Depth=1
dup.4s v0, w17
mov x0, x14
mov x1, x12
mov x2, x10
LBB0_10: ; Parent Loop BB0_7 Depth=1
; => This Inner Loop Header: Depth=2
ldp q1, q2, [x0, #-32]
ldp q3, q4, [x0], #64
sub.4s v1, v0, v1
sub.4s v2, v0, v2
sub.4s v3, v0, v3
sub.4s v4, v0, v4
abs.4s v1, v1
abs.4s v2, v2
abs.4s v3, v3
abs.4s v4, v4
stp q1, q2, [x1, #-32]
stp q3, q4, [x1], #64
subs x2, x2, #16
b.ne LBB0_10
; %bb.11: ; in Loop: Header=BB0_7 Depth=1
mov x2, x10
cmp x10, x8
b.eq LBB0_14
LBB0_12: ; in Loop: Header=BB0_7 Depth=1
lsl x1, x2, #2
add x0, x11, x1
add x1, x13, x1
sub x2, x8, x2
LBB0_13: ; Parent Loop BB0_7 Depth=1
; => This Inner Loop Header: Depth=2
ldr w3, [x1], #4
subs w3, w17, w3
cneg w3, w3, mi
str w3, [x0], #4
subs x2, x2, #1
b.ne LBB0_13
LBB0_14: ; in Loop: Header=BB0_7 Depth=1
mov x0, #0
mov w17, #0
LBB0_15: ; Parent Loop BB0_7 Depth=1
; => This Inner Loop Header: Depth=2
ldr w1, [x11, x0, lsl #2]
ldr w2, [x11, w17, sxtw #2]
cmp w1, w2
csel w17, w0, w17, lt
add x0, x0, #1
cmp x8, x0
b.ne LBB0_15
; %bb.16: ; in Loop: Header=BB0_7 Depth=1
sbfiz x0, x17, #2, #32
ldr w17, [x13, x0]
str w17, [x15, x9, lsl #2]
ldr w1, [x11, x0]
add w19, w1, w19
str w16, [x13, x0]
add x9, x9, #1
cmp x9, x8
b.ne LBB0_7
; %bb.17:
str w17, [sp, #20]
b LBB0_19
LBB0_18:
mov w19, #0
LBB0_19:
str x20, [sp]
Lloh15:
adrp x0, l_.str.6@PAGE
Lloh16:
add x0, x0, l_.str.6@PAGEOFF
bl _printf
ldr w8, [sp, #12]
cmp w8, #1
b.lt LBB0_22
; %bb.20:
mov x20, #0
Lloh17:
adrp x21, l_.str.7@PAGE
Lloh18:
add x21, x21, l_.str.7@PAGEOFF
add x22, sp, #24
Lloh19:
adrp x23, l_.str.8@PAGE
Lloh20:
add x23, x23, l_.str.8@PAGEOFF
LBB0_21: ; =>This Inner Loop Header: Depth=1
sub w8, w8, #1
cmp x20, x8
csel x0, x23, x21, eq
ldr w8, [x22, x20, lsl #2]
str x8, [sp]
bl _printf
add x20, x20, #1
ldrsw x8, [sp, #12]
cmp x20, x8
b.lt LBB0_21
LBB0_22:
str x19, [sp]
Lloh21:
adrp x0, l_.str.9@PAGE
Lloh22:
add x0, x0, l_.str.9@PAGEOFF
b LBB0_24
LBB0_23:
Lloh23:
adrp x0, l_.str.5@PAGE
Lloh24:
add x0, x0, l_.str.5@PAGEOFF
LBB0_24:
bl _printf
ldur x8, [x29, #-56]
Lloh25:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh26:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh27:
ldr x9, [x9]
cmp x9, x8
b.ne LBB0_26
; %bb.25:
mov w0, #0
add sp, sp, #1232
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
LBB0_26:
bl ___stack_chk_fail
.loh AdrpAdd Lloh11, Lloh12
.loh AdrpAdd Lloh9, Lloh10
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpAdd Lloh5, Lloh6
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh13, Lloh14
.loh AdrpAdd Lloh15, Lloh16
.loh AdrpAdd Lloh19, Lloh20
.loh AdrpAdd Lloh17, Lloh18
.loh AdrpAdd Lloh21, Lloh22
.loh AdrpAdd Lloh23, Lloh24
.loh AdrpLdrGotLdr Lloh25, Lloh26, Lloh27
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Enter the number of cylinders (0 to n-1): n = "
l_.str.1: ; @.str.1
.asciz "%d"
l_.str.2: ; @.str.2
.asciz "Enter the number of requested tracks: "
l_.str.3: ; @.str.3
.asciz "Enter the current location of pointer head: "
l_.str.5: ; @.str.5
.asciz "INVALID INPUT!!! ABORTING!!"
l_.str.6: ; @.str.6
.asciz "The requests are processed in the following order: %d -> "
l_.str.7: ; @.str.7
.asciz "%d -> "
l_.str.8: ; @.str.8
.asciz "%d \n"
l_.str.9: ; @.str.9
.asciz "The total seek distance is: %d \n"
l_str: ; @str
.asciz "Enter the requested tracks in FIFO orider: "
.subsections_via_symbols
| the_stack_data/126231.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _thr_func ## -- Begin function thr_func
.p2align 4, 0x90
_thr_func: ## @thr_func
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
leaq L_.str(%rip), %rbx
movq %rbx, %rdi
movl $1, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $2, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $3, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $4, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $5, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $6, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $7, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $8, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $9, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $10, %esi
xorl %eax, %eax
callq _printf
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
leaq _thr_func(%rip), %rdx
leaq -16(%rbp), %rdi
xorl %esi, %esi
xorl %ecx, %ecx
callq _pthread_create
leaq L_.str.1(%rip), %rbx
movq %rbx, %rdi
movl $1, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $2, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $3, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $4, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $5, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $6, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $7, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $8, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $9, %esi
xorl %eax, %eax
callq _printf
movq %rbx, %rdi
movl $10, %esi
xorl %eax, %eax
callq _printf
movl $1, %edi
callq _sleep
xorl %eax, %eax
addq $8, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "thread: %d\n"
L_.str.1: ## @.str.1
.asciz "main: %d\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _thr_func ; -- Begin function thr_func
.p2align 2
_thr_func: ; @thr_func
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov w8, #1
str x8, [sp]
Lloh0:
adrp x19, l_.str@PAGE
Lloh1:
add x19, x19, l_.str@PAGEOFF
mov x0, x19
bl _printf
mov w8, #2
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #3
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #4
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #5
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #6
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #7
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #8
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #9
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #10
str x8, [sp]
mov x0, x19
bl _printf
mov x0, #0
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh2:
adrp x2, _thr_func@PAGE
Lloh3:
add x2, x2, _thr_func@PAGEOFF
add x0, sp, #8
mov x1, #0
mov x3, #0
bl _pthread_create
mov w8, #1
str x8, [sp]
Lloh4:
adrp x19, l_.str.1@PAGE
Lloh5:
add x19, x19, l_.str.1@PAGEOFF
mov x0, x19
bl _printf
mov w8, #2
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #3
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #4
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #5
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #6
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #7
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #8
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #9
str x8, [sp]
mov x0, x19
bl _printf
mov w8, #10
str x8, [sp]
mov x0, x19
bl _printf
mov w0, #1
bl _sleep
mov w0, #0
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh2, Lloh3
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "thread: %d\n"
l_.str.1: ; @.str.1
.asciz "main: %d\n"
.subsections_via_symbols
| the_stack_data/12637177.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function check_object_files
_check_object_files: ## @check_object_files
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %rdi, %rbx
movl 4(%rdi), %edi
callq _git_path_exists
movl %eax, %edi
callq _cl_assert
movl (%rbx), %edi
callq _git_path_exists
movl %eax, %edi
addq $8, %rsp
popq %rbx
popq %rbp
jmp _cl_assert ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _check_object_files
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function check_object_files
_check_object_files: ; @check_object_files
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
ldr w0, [x0, #4]
bl _git_path_exists
bl _cl_assert
ldr w0, [x19]
bl _git_path_exists
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
b _cl_assert
.cfi_endproc
; -- End function
.no_dead_strip _check_object_files
.subsections_via_symbols
| AnghaBench/libgit2/tests/object/raw/extr_write.c_check_object_files.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function simple_hash_strcmp
_simple_hash_strcmp: ## @simple_hash_strcmp
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movb (%rdi), %r8b
testb %r8b, %r8b
je LBB0_1
## %bb.2:
incq %rdi
movl $-2128831035, %ecx ## imm = 0x811C9DC5
xorl %eax, %eax
jmp LBB0_3
.p2align 4, 0x90
LBB0_5: ## in Loop: Header=BB0_3 Depth=1
imull $16777619, %ecx, %ecx ## imm = 0x1000193
xorl %r8d, %ecx
movzbl (%rdi), %r8d
incq %rdi
testb %r8b, %r8b
je LBB0_6
LBB0_3: ## =>This Inner Loop Header: Depth=1
movzbl %r8b, %r8d
testl %eax, %eax
jne LBB0_5
## %bb.4: ## in Loop: Header=BB0_3 Depth=1
movsbl (%rsi), %r9d
incq %rsi
movl %r8d, %eax
subl %r9d, %eax
jmp LBB0_5
LBB0_1:
xorl %eax, %eax
movl $-2128831035, %ecx ## imm = 0x811C9DC5
LBB0_6:
movl %ecx, (%rdx)
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _simple_hash_strcmp
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function simple_hash_strcmp
_simple_hash_strcmp: ; @simple_hash_strcmp
.cfi_startproc
; %bb.0:
mov w9, #40389
movk w9, #33052, lsl #16
ldrb w10, [x0]
cbz w10, LBB0_6
; %bb.1:
mov x8, x0
mov w0, #0
add x8, x8, #1
mov w11, #403
movk w11, #256, lsl #16
b LBB0_3
LBB0_2: ; in Loop: Header=BB0_3 Depth=1
mul w9, w9, w11
eor w9, w9, w10
ldrb w10, [x8], #1
cbz w10, LBB0_5
LBB0_3: ; =>This Inner Loop Header: Depth=1
cbnz w0, LBB0_2
; %bb.4: ; in Loop: Header=BB0_3 Depth=1
ldrsb w12, [x1], #1
sub w0, w10, w12
b LBB0_2
LBB0_5:
str w9, [x2]
ret
LBB0_6:
mov w0, #0
str w9, [x2]
ret
.cfi_endproc
; -- End function
.no_dead_strip _simple_hash_strcmp
.subsections_via_symbols
| AnghaBench/netdata/collectors/proc.plugin/extr_....daemon..libnetdatainlined.h_simple_hash_strcmp.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _saa7134_i2c_unregister ## -- Begin function saa7134_i2c_unregister
.p2align 4, 0x90
_saa7134_i2c_unregister: ## @saa7134_i2c_unregister
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
callq _i2c_del_adapter
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _saa7134_i2c_unregister ; -- Begin function saa7134_i2c_unregister
.p2align 2
_saa7134_i2c_unregister: ; @saa7134_i2c_unregister
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
bl _i2c_del_adapter
mov w0, #0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/drivers/media/video/saa7134/extr_saa7134-i2c.c_saa7134_i2c_unregister.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function cxl_read_afu_descriptor
_cxl_read_afu_descriptor: ## @cxl_read_afu_descriptor
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
callq _AFUD_READ_INFO
movl %eax, %r14d
movl %eax, %edi
callq _AFUD_NUM_INTS_PER_PROC
movl %eax, 52(%rbx)
movl %r14d, %edi
callq _AFUD_NUM_PROCS
movl %eax, 48(%rbx)
movl %r14d, %edi
callq _AFUD_NUM_CRS
movl %eax, 44(%rbx)
movl %r14d, %edi
callq _AFUD_AFU_DIRECTED
testq %rax, %rax
je LBB0_2
## %bb.1:
movq _CXL_MODE_DIRECTED@GOTPCREL(%rip), %rax
movl (%rax), %eax
orl %eax, 40(%rbx)
LBB0_2:
movl %r14d, %edi
callq _AFUD_DEDICATED_PROCESS
testq %rax, %rax
je LBB0_4
## %bb.3:
movq _CXL_MODE_DEDICATED@GOTPCREL(%rip), %rax
movl (%rax), %eax
orl %eax, 40(%rbx)
LBB0_4:
movl %r14d, %edi
callq _AFUD_TIME_SLICED
testq %rax, %rax
je LBB0_6
## %bb.5:
movq _CXL_MODE_TIME_SLICED@GOTPCREL(%rip), %rax
movl (%rax), %eax
orl %eax, 40(%rbx)
LBB0_6:
movq %rbx, %rdi
callq _AFUD_READ_PPPSA
movl %eax, %r14d
movl %eax, %edi
callq _AFUD_PPPSA_LEN
shll $12, %eax
movl %eax, (%rbx)
movl %r14d, %edi
callq _AFUD_PPPSA_PSA
movl %eax, 36(%rbx)
movl %r14d, %edi
callq _AFUD_PPPSA_PP
movl %eax, 32(%rbx)
testl %eax, %eax
je LBB0_8
## %bb.7:
movq %rbx, %rdi
callq _AFUD_READ_PPPSA_OFF
movq 24(%rbx), %rcx
movl %eax, (%rcx)
LBB0_8:
movq %rbx, %rdi
callq _AFUD_READ_CR
movl %eax, %edi
callq _AFUD_CR_LEN
shll $8, %eax
movl %eax, 4(%rbx)
movq %rbx, %rdi
callq _AFUD_READ_CR_OFF
movl %eax, 20(%rbx)
movq %rbx, %rdi
callq _AFUD_READ_EB
movl %eax, %edi
callq _AFUD_EB_LEN
shll $12, %eax
movl %eax, 8(%rbx)
movq %rbx, %rdi
callq _AFUD_READ_EB_OFF
movl %eax, 16(%rbx)
movl %eax, %edi
xorl %esi, %esi
movl $11, %edx
callq _EXTRACT_PPC_BITS
testq %rax, %rax
je LBB0_10
## %bb.9:
leaq 12(%rbx), %r14
movl 16(%rbx), %edx
leaq L_.str(%rip), %rsi
movq %r14, %rdi
callq _dev_warn
leaq L_.str.1(%rip), %rsi
movq %r14, %rdi
callq _dev_info
movl $0, 8(%rbx)
LBB0_10:
xorl %eax, %eax
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _CXL_MODE_DIRECTED,4,2 ## @CXL_MODE_DIRECTED
.comm _CXL_MODE_DEDICATED,4,2 ## @CXL_MODE_DEDICATED
.comm _CXL_MODE_TIME_SLICED,4,2 ## @CXL_MODE_TIME_SLICED
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Invalid AFU error buffer offset %Lx\n"
L_.str.1: ## @.str.1
.asciz "Ignoring AFU error buffer in the descriptor\n"
.no_dead_strip _cxl_read_afu_descriptor
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function cxl_read_afu_descriptor
_cxl_read_afu_descriptor: ; @cxl_read_afu_descriptor
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
bl _AFUD_READ_INFO
mov x20, x0
bl _AFUD_NUM_INTS_PER_PROC
str w0, [x19, #52]
mov x0, x20
bl _AFUD_NUM_PROCS
str w0, [x19, #48]
mov x0, x20
bl _AFUD_NUM_CRS
str w0, [x19, #44]
mov x0, x20
bl _AFUD_AFU_DIRECTED
cbz x0, LBB0_2
; %bb.1:
Lloh0:
adrp x8, _CXL_MODE_DIRECTED@GOTPAGE
Lloh1:
ldr x8, [x8, _CXL_MODE_DIRECTED@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
ldr w9, [x19, #40]
orr w8, w9, w8
str w8, [x19, #40]
LBB0_2:
mov x0, x20
bl _AFUD_DEDICATED_PROCESS
cbz x0, LBB0_4
; %bb.3:
Lloh3:
adrp x8, _CXL_MODE_DEDICATED@GOTPAGE
Lloh4:
ldr x8, [x8, _CXL_MODE_DEDICATED@GOTPAGEOFF]
Lloh5:
ldr w8, [x8]
ldr w9, [x19, #40]
orr w8, w9, w8
str w8, [x19, #40]
LBB0_4:
mov x0, x20
bl _AFUD_TIME_SLICED
cbz x0, LBB0_6
; %bb.5:
Lloh6:
adrp x8, _CXL_MODE_TIME_SLICED@GOTPAGE
Lloh7:
ldr x8, [x8, _CXL_MODE_TIME_SLICED@GOTPAGEOFF]
Lloh8:
ldr w8, [x8]
ldr w9, [x19, #40]
orr w8, w9, w8
str w8, [x19, #40]
LBB0_6:
mov x0, x19
bl _AFUD_READ_PPPSA
mov x20, x0
bl _AFUD_PPPSA_LEN
lsl w8, w0, #12
str w8, [x19]
mov x0, x20
bl _AFUD_PPPSA_PSA
str w0, [x19, #36]
mov x0, x20
bl _AFUD_PPPSA_PP
str w0, [x19, #32]
cbz w0, LBB0_8
; %bb.7:
mov x0, x19
bl _AFUD_READ_PPPSA_OFF
ldr x8, [x19, #24]
str w0, [x8]
LBB0_8:
mov x0, x19
bl _AFUD_READ_CR
bl _AFUD_CR_LEN
lsl w8, w0, #8
str w8, [x19, #4]
mov x0, x19
bl _AFUD_READ_CR_OFF
str w0, [x19, #20]
mov x0, x19
bl _AFUD_READ_EB
bl _AFUD_EB_LEN
lsl w8, w0, #12
str w8, [x19, #8]
mov x0, x19
bl _AFUD_READ_EB_OFF
str w0, [x19, #16]
mov w1, #0
mov w2, #11
bl _EXTRACT_PPC_BITS
cbz x0, LBB0_10
; %bb.9:
add x20, x19, #12
ldr w2, [x19, #16]
Lloh9:
adrp x1, l_.str@PAGE
Lloh10:
add x1, x1, l_.str@PAGEOFF
mov x0, x20
bl _dev_warn
Lloh11:
adrp x1, l_.str.1@PAGE
Lloh12:
add x1, x1, l_.str.1@PAGEOFF
mov x0, x20
bl _dev_info
str wzr, [x19, #8]
LBB0_10:
mov w0, #0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpAdd Lloh11, Lloh12
.loh AdrpAdd Lloh9, Lloh10
.cfi_endproc
; -- End function
.comm _CXL_MODE_DIRECTED,4,2 ; @CXL_MODE_DIRECTED
.comm _CXL_MODE_DEDICATED,4,2 ; @CXL_MODE_DEDICATED
.comm _CXL_MODE_TIME_SLICED,4,2 ; @CXL_MODE_TIME_SLICED
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Invalid AFU error buffer offset %Lx\n"
l_.str.1: ; @.str.1
.asciz "Ignoring AFU error buffer in the descriptor\n"
.no_dead_strip _cxl_read_afu_descriptor
.subsections_via_symbols
| AnghaBench/linux/drivers/misc/cxl/extr_pci.c_cxl_read_afu_descriptor.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _sEnInterrupts ## -- Begin function sEnInterrupts
.p2align 4, 0x90
_sEnInterrupts: ## @sEnInterrupts
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %r14d
movq %rdi, %rbx
movq _RXINT_EN@GOTPCREL(%rip), %rax
movq _SRCINT_EN@GOTPCREL(%rip), %rcx
movl (%rcx), %ecx
orl (%rax), %ecx
movq _MCINT_EN@GOTPCREL(%rip), %rax
orl (%rax), %ecx
andl %esi, %ecx
movq (%rdi), %rdi
orl %ecx, 8(%rdi)
movq __INDX_ADDR@GOTPCREL(%rip), %r12
movl (%r12), %r15d
callq _le32dec
movq %rbx, %rdi
movl %r15d, %esi
movl %eax, %edx
callq _rp_writech4
movq _TXINT_EN@GOTPCREL(%rip), %rax
movl (%rax), %eax
andl %r14d, %eax
movq 8(%rbx), %rdi
orl %eax, 8(%rdi)
movl (%r12), %r15d
callq _le32dec
movq %rbx, %rdi
movl %r15d, %esi
movl %eax, %edx
callq _rp_writech4
movq _CHANINT_EN@GOTPCREL(%rip), %rax
testl %r14d, (%rax)
je LBB0_1
## %bb.2:
movq __INT_MASK@GOTPCREL(%rip), %r14
movl (%r14), %esi
movq %rbx, %rdi
callq _rp_readch1
movq _rp_sBitMapSetTbl@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
movq 16(%rbx), %rdx
orl (%rcx,%rdx,4), %eax
movl (%r14), %esi
movq %rbx, %rdi
movl %eax, %edx
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp _rp_writech1 ## TAILCALL
LBB0_1:
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _RXINT_EN,4,2 ## @RXINT_EN
.comm _SRCINT_EN,4,2 ## @SRCINT_EN
.comm _MCINT_EN,4,2 ## @MCINT_EN
.comm __INDX_ADDR,4,2 ## @_INDX_ADDR
.comm _TXINT_EN,4,2 ## @TXINT_EN
.comm _CHANINT_EN,4,2 ## @CHANINT_EN
.comm __INT_MASK,4,2 ## @_INT_MASK
.comm _rp_sBitMapSetTbl,8,3 ## @rp_sBitMapSetTbl
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _sEnInterrupts ; -- Begin function sEnInterrupts
.p2align 2
_sEnInterrupts: ; @sEnInterrupts
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x1
mov x19, x0
Lloh0:
adrp x8, _RXINT_EN@GOTPAGE
Lloh1:
ldr x8, [x8, _RXINT_EN@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
Lloh3:
adrp x9, _SRCINT_EN@GOTPAGE
Lloh4:
ldr x9, [x9, _SRCINT_EN@GOTPAGEOFF]
Lloh5:
ldr w9, [x9]
orr w8, w9, w8
Lloh6:
adrp x9, _MCINT_EN@GOTPAGE
Lloh7:
ldr x9, [x9, _MCINT_EN@GOTPAGEOFF]
Lloh8:
ldr w9, [x9]
orr w8, w8, w9
and w8, w8, w1
ldr x0, [x0]
ldr w9, [x0, #8]
orr w8, w9, w8
str w8, [x0, #8]
Lloh9:
adrp x22, __INDX_ADDR@GOTPAGE
Lloh10:
ldr x22, [x22, __INDX_ADDR@GOTPAGEOFF]
ldr w21, [x22]
bl _le32dec
mov x2, x0
mov x0, x19
mov x1, x21
bl _rp_writech4
Lloh11:
adrp x8, _TXINT_EN@GOTPAGE
Lloh12:
ldr x8, [x8, _TXINT_EN@GOTPAGEOFF]
Lloh13:
ldr w8, [x8]
and w8, w8, w20
ldr x0, [x19, #8]
ldr w9, [x0, #8]
orr w8, w9, w8
str w8, [x0, #8]
ldr w21, [x22]
bl _le32dec
mov x2, x0
mov x0, x19
mov x1, x21
bl _rp_writech4
Lloh14:
adrp x8, _CHANINT_EN@GOTPAGE
Lloh15:
ldr x8, [x8, _CHANINT_EN@GOTPAGEOFF]
Lloh16:
ldr w8, [x8]
tst w8, w20
b.eq LBB0_2
; %bb.1:
Lloh17:
adrp x20, __INT_MASK@GOTPAGE
Lloh18:
ldr x20, [x20, __INT_MASK@GOTPAGEOFF]
ldr w1, [x20]
mov x0, x19
bl _rp_readch1
Lloh19:
adrp x8, _rp_sBitMapSetTbl@GOTPAGE
Lloh20:
ldr x8, [x8, _rp_sBitMapSetTbl@GOTPAGEOFF]
Lloh21:
ldr x8, [x8]
ldr x9, [x19, #16]
ldr w8, [x8, x9, lsl #2]
orr w2, w8, w0
ldr w1, [x20]
mov x0, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
b _rp_writech1
LBB0_2:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh14, Lloh15, Lloh16
.loh AdrpLdrGotLdr Lloh11, Lloh12, Lloh13
.loh AdrpLdrGot Lloh9, Lloh10
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh19, Lloh20, Lloh21
.loh AdrpLdrGot Lloh17, Lloh18
.cfi_endproc
; -- End function
.comm _RXINT_EN,4,2 ; @RXINT_EN
.comm _SRCINT_EN,4,2 ; @SRCINT_EN
.comm _MCINT_EN,4,2 ; @MCINT_EN
.comm __INDX_ADDR,4,2 ; @_INDX_ADDR
.comm _TXINT_EN,4,2 ; @TXINT_EN
.comm _CHANINT_EN,4,2 ; @CHANINT_EN
.comm __INT_MASK,4,2 ; @_INT_MASK
.comm _rp_sBitMapSetTbl,8,3 ; @rp_sBitMapSetTbl
.subsections_via_symbols
| AnghaBench/freebsd/sys/dev/rp/extr_rp.c_sEnInterrupts.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function cfq_should_idle
_cfq_should_idle: ## @cfq_should_idle
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r15
movq %rdi, %r14
movq %rsi, %rdi
callq _cfqq_prio
movl %eax, %ebx
movq (%r15), %r12
xorl %edi, %edi
testq %r12, %r12
sete %dil
callq _BUG_ON
xorl %edi, %edi
cmpl $0, (%r12)
sete %dil
callq _BUG_ON
movq _IDLE_WORKLOAD@GOTPCREL(%rip), %rcx
xorl %eax, %eax
cmpl (%rcx), %ebx
je LBB0_9
## %bb.1:
cmpq $0, (%r14)
je LBB0_9
## %bb.2:
movq %r15, %rdi
callq _cfq_cfqq_idle_window
testq %rax, %rax
je LBB0_5
## %bb.3:
movl 16(%r14), %edi
callq _blk_queue_nonrot
movq %rax, %rcx
movl $1, %eax
testq %rcx, %rcx
je LBB0_9
## %bb.4:
cmpq $0, 8(%r14)
je LBB0_9
LBB0_5:
movl (%r12), %ecx
cmpl $1, %ecx
jne LBB0_8
## %bb.6:
movq %r15, %rdi
callq _cfq_cfqq_sync
movq %rax, %rcx
movl $1, %eax
testq %rcx, %rcx
jne LBB0_9
## %bb.7:
movl (%r12), %ecx
LBB0_8:
leaq L_.str(%rip), %rdx
movq %r14, %rdi
movq %r15, %rsi
callq _cfq_log_cfqq
xorl %eax, %eax
LBB0_9:
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _IDLE_WORKLOAD,4,2 ## @IDLE_WORKLOAD
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Not idling. st->count:%d"
.no_dead_strip _cfq_should_idle
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function cfq_should_idle
_cfq_should_idle: ; @cfq_should_idle
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x1
mov x20, x0
mov x0, x1
bl _cfqq_prio
mov x21, x0
ldr x22, [x19]
cmp x22, #0
cset w0, eq
bl _BUG_ON
ldr w8, [x22]
cmp w8, #0
cset w0, eq
bl _BUG_ON
Lloh0:
adrp x8, _IDLE_WORKLOAD@GOTPAGE
Lloh1:
ldr x8, [x8, _IDLE_WORKLOAD@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
cmp w21, w8
b.eq LBB0_10
; %bb.1:
ldr x8, [x20]
cbz x8, LBB0_10
; %bb.2:
mov x0, x19
bl _cfq_cfqq_idle_window
cbz x0, LBB0_5
; %bb.3:
ldr w0, [x20, #16]
bl _blk_queue_nonrot
cbz x0, LBB0_7
; %bb.4:
ldr x8, [x20, #8]
cbz x8, LBB0_7
LBB0_5:
ldr w3, [x22]
cmp w3, #1
b.ne LBB0_9
; %bb.6:
mov x0, x19
bl _cfq_cfqq_sync
cbz x0, LBB0_8
LBB0_7:
mov w0, #1
b LBB0_11
LBB0_8:
ldr w3, [x22]
LBB0_9:
Lloh3:
adrp x2, l_.str@PAGE
Lloh4:
add x2, x2, l_.str@PAGEOFF
mov x0, x20
mov x1, x19
bl _cfq_log_cfqq
LBB0_10:
mov w0, #0
LBB0_11:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh3, Lloh4
.cfi_endproc
; -- End function
.comm _IDLE_WORKLOAD,4,2 ; @IDLE_WORKLOAD
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Not idling. st->count:%d"
.no_dead_strip _cfq_should_idle
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/block/extr_cfq-iosched.c_cfq_should_idle.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
mov w0, #0
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| the_stack_data/31387828.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _pmap_bios16_leave ## -- Begin function pmap_bios16_leave
.p2align 4, 0x90
_pmap_bios16_leave: ## @pmap_bios16_leave
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _pmap_methods_ptr@GOTPCREL(%rip), %rax
movq (%rax), %rax
popq %rbp
jmpq *(%rax) ## TAILCALL
.cfi_endproc
## -- End function
.comm _pmap_methods_ptr,8,3 ## @pmap_methods_ptr
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _pmap_bios16_leave ; -- Begin function pmap_bios16_leave
.p2align 2
_pmap_bios16_leave: ; @pmap_bios16_leave
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _pmap_methods_ptr@GOTPAGE
Lloh1:
ldr x8, [x8, _pmap_methods_ptr@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
ldr x1, [x8]
br x1
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _pmap_methods_ptr,8,3 ; @pmap_methods_ptr
.subsections_via_symbols
| AnghaBench/freebsd/sys/i386/i386/extr_pmap_base.c_pmap_bios16_leave.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _zip_deflate ## -- Begin function zip_deflate
.p2align 4, 0x90
_zip_deflate: ## @zip_deflate
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r15
movq %rsi, %r13
movq %rdi, %r14
leaq 24(%rsi), %rbx
movq %rbx, %rdx
callq _prepare_zip_command
movq (%r14), %rdi
leaq 12(%r15), %rsi
callq _atomic64_add
movq %rbx, %rdi
movq %r15, %rsi
callq _zip_load_instr
movl %eax, %r12d
leaq 8(%r15), %rdi
callq _atomic64_inc
movq %r13, %rdi
callq _zip_poll_result
leaq 4(%r15), %rdi
callq _atomic64_inc
movslq 16(%r13), %rsi
movl %esi, 28(%r14)
cmpq $129, %rsi
je LBB0_4
## %bb.1:
cmpl $131, %esi
je LBB0_7
## %bb.2:
cmpl $130, %esi
jne LBB0_9
## %bb.3:
leaq L_.str(%rip), %rdi
jmp LBB0_8
LBB0_4:
leaq L_.str.1(%rip), %rdi
xorl %eax, %eax
callq _zip_dbg
movq %r15, %rdi
movl %r12d, %esi
callq _zip_update_cmd_bufs
movslq 8(%r14), %rsi
leal -128(%rsi), %eax
cmpl $6, %eax
ja LBB0_11
## %bb.5:
leaq LJTI0_0(%rip), %rcx
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
LBB0_6:
leaq L_.str.5(%rip), %rdi
movl $128, %esi
jmp LBB0_13
LBB0_7:
leaq L_.str.2(%rip), %rdi
LBB0_8:
xorl %eax, %eax
callq _zip_dbg
jmp LBB0_10
LBB0_9:
leaq L_.str.3(%rip), %rdi
xorl %eax, %eax
callq _zip_err
LBB0_10:
movq _ZIP_ERROR@GOTPCREL(%rip), %rax
movl (%rax), %eax
jmp LBB0_20
LBB0_11:
leaq L_.str.8(%rip), %rdi
xorl %eax, %eax
callq _zip_err
jmp LBB0_17
LBB0_12:
leaq L_.str.4(%rip), %rdi
movl $132, %esi
LBB0_13:
xorl %eax, %eax
callq _zip_dbg
movl 12(%r13), %eax
jmp LBB0_16
LBB0_14:
leaq L_.str.7(%rip), %rdi
movl $133, %esi
xorl %eax, %eax
callq _zip_dbg
jmp LBB0_17
LBB0_15:
leaq L_.str.6(%rip), %rdi
movl $134, %esi
xorl %eax, %eax
callq _zip_dbg
movl 8(%r13), %eax
LBB0_16:
movl %eax, 24(%r14)
LBB0_17:
movq (%r13), %rdi
movq %r15, %rsi
callq _atomic64_add
movq 16(%r14), %rsi
movq (%r13), %rdx
cmpq %rdx, %rsi
jge LBB0_19
## %bb.18:
leaq L_.str.9(%rip), %rdi
xorl %eax, %eax
callq _zip_err
xorl %edx, %edx
LBB0_19:
movq %rdx, 16(%r14)
xorl %eax, %eax
LBB0_20:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L0_0_set_6, LBB0_6-LJTI0_0
.set L0_0_set_11, LBB0_11-LJTI0_0
.set L0_0_set_12, LBB0_12-LJTI0_0
.set L0_0_set_14, LBB0_14-LJTI0_0
.set L0_0_set_15, LBB0_15-LJTI0_0
LJTI0_0:
.long L0_0_set_6
.long L0_0_set_11
.long L0_0_set_11
.long L0_0_set_11
.long L0_0_set_12
.long L0_0_set_14
.long L0_0_set_15
.end_data_region
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Zip instruction not yet completed"
.comm _ZIP_ERROR,4,2 ## @ZIP_ERROR
L_.str.1: ## @.str.1
.asciz "Zip instruction completed successfully"
L_.str.2: ## @.str.2
.asciz "Output Truncate error"
L_.str.3: ## @.str.3
.asciz "Zip instruction failed. Code:%d"
L_.str.4: ## @.str.4
.asciz "RAW Format: %d "
L_.str.5: ## @.str.5
.asciz "ZLIB Format: %d "
L_.str.6: ## @.str.6
.asciz "GZIP Format: %d "
L_.str.7: ## @.str.7
.asciz "LZS Format: %d "
L_.str.8: ## @.str.8
.asciz "Unknown Format:%d\n"
L_.str.9: ## @.str.9
.asciz "output_len (%d) < total bytes written(%d)\n"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _zip_deflate ; -- Begin function zip_deflate
.p2align 2
_zip_deflate: ; @zip_deflate
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x21, x2
mov x20, x1
mov x19, x0
add x22, x1, #24
mov x2, x22
bl _prepare_zip_command
ldr x0, [x19]
add x1, x21, #12
bl _atomic64_add
mov x0, x22
mov x1, x21
bl _zip_load_instr
mov x22, x0
add x0, x21, #8
bl _atomic64_inc
mov x0, x20
bl _zip_poll_result
add x0, x21, #4
bl _atomic64_inc
ldr w8, [x20, #16]
str w8, [x19, #28]
cmp w8, #129
b.eq LBB0_4
; %bb.1:
cmp w8, #131
b.eq LBB0_7
; %bb.2:
cmp w8, #130
b.ne LBB0_9
; %bb.3:
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
b LBB0_8
LBB0_4:
Lloh2:
adrp x0, l_.str.1@PAGE
Lloh3:
add x0, x0, l_.str.1@PAGEOFF
bl _zip_dbg
mov x0, x21
mov x1, x22
bl _zip_update_cmd_bufs
ldrsw x1, [x19, #8]
sub w8, w1, #128
cmp w8, #6
b.hi LBB0_11
; %bb.5:
Lloh4:
adrp x9, lJTI0_0@PAGE
Lloh5:
add x9, x9, lJTI0_0@PAGEOFF
adr x10, LBB0_6
ldrb w11, [x9, x8]
add x10, x10, x11, lsl #2
br x10
LBB0_6:
mov w8, #128
str x8, [sp]
Lloh6:
adrp x0, l_.str.5@PAGE
Lloh7:
add x0, x0, l_.str.5@PAGEOFF
b LBB0_13
LBB0_7:
Lloh8:
adrp x0, l_.str.2@PAGE
Lloh9:
add x0, x0, l_.str.2@PAGEOFF
LBB0_8:
bl _zip_dbg
b LBB0_10
LBB0_9:
sxtw x1, w8
Lloh10:
adrp x0, l_.str.3@PAGE
Lloh11:
add x0, x0, l_.str.3@PAGEOFF
bl _zip_err
LBB0_10:
Lloh12:
adrp x8, _ZIP_ERROR@GOTPAGE
Lloh13:
ldr x8, [x8, _ZIP_ERROR@GOTPAGEOFF]
Lloh14:
ldr w0, [x8]
b LBB0_20
LBB0_11:
Lloh15:
adrp x0, l_.str.8@PAGE
Lloh16:
add x0, x0, l_.str.8@PAGEOFF
bl _zip_err
b LBB0_17
LBB0_12:
mov w8, #132
str x8, [sp]
Lloh17:
adrp x0, l_.str.4@PAGE
Lloh18:
add x0, x0, l_.str.4@PAGEOFF
LBB0_13:
bl _zip_dbg
ldr w8, [x20, #12]
b LBB0_16
LBB0_14:
mov w8, #133
str x8, [sp]
Lloh19:
adrp x0, l_.str.7@PAGE
Lloh20:
add x0, x0, l_.str.7@PAGEOFF
bl _zip_dbg
b LBB0_17
LBB0_15:
mov w8, #134
str x8, [sp]
Lloh21:
adrp x0, l_.str.6@PAGE
Lloh22:
add x0, x0, l_.str.6@PAGEOFF
bl _zip_dbg
ldr w8, [x20, #8]
LBB0_16:
str w8, [x19, #24]
LBB0_17:
ldr x0, [x20]
mov x1, x21
bl _atomic64_add
ldr x1, [x19, #16]
ldr x8, [x20]
cmp x1, x8
b.ge LBB0_19
; %bb.18:
str x8, [sp]
Lloh23:
adrp x0, l_.str.9@PAGE
Lloh24:
add x0, x0, l_.str.9@PAGEOFF
bl _zip_err
mov x8, #0
LBB0_19:
mov w0, #0
str x8, [x19, #16]
LBB0_20:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh6, Lloh7
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpAdd Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpAdd Lloh15, Lloh16
.loh AdrpAdd Lloh17, Lloh18
.loh AdrpAdd Lloh19, Lloh20
.loh AdrpAdd Lloh21, Lloh22
.loh AdrpAdd Lloh23, Lloh24
.cfi_endproc
.section __TEXT,__const
lJTI0_0:
.byte (LBB0_6-LBB0_6)>>2
.byte (LBB0_11-LBB0_6)>>2
.byte (LBB0_11-LBB0_6)>>2
.byte (LBB0_11-LBB0_6)>>2
.byte (LBB0_12-LBB0_6)>>2
.byte (LBB0_14-LBB0_6)>>2
.byte (LBB0_15-LBB0_6)>>2
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Zip instruction not yet completed"
.comm _ZIP_ERROR,4,2 ; @ZIP_ERROR
l_.str.1: ; @.str.1
.asciz "Zip instruction completed successfully"
l_.str.2: ; @.str.2
.asciz "Output Truncate error"
l_.str.3: ; @.str.3
.asciz "Zip instruction failed. Code:%d"
l_.str.4: ; @.str.4
.asciz "RAW Format: %d "
l_.str.5: ; @.str.5
.asciz "ZLIB Format: %d "
l_.str.6: ; @.str.6
.asciz "GZIP Format: %d "
l_.str.7: ; @.str.7
.asciz "LZS Format: %d "
l_.str.8: ; @.str.8
.asciz "Unknown Format:%d\n"
l_.str.9: ; @.str.9
.asciz "output_len (%d) < total bytes written(%d)\n"
.subsections_via_symbols
| AnghaBench/linux/drivers/crypto/cavium/zip/extr_zip_deflate.c_zip_deflate.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function cs2000_wait_pll_lock
_cs2000_wait_pll_lock: ## @cs2000_wait_pll_lock
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r15
callq _priv_to_dev
movq %rax, %r14
movl $256, %ebx ## imm = 0x100
movq _DEVICE_CTRL@GOTPCREL(%rip), %r12
movq _PLL_UNLOCK@GOTPCREL(%rip), %r13
.p2align 4, 0x90
LBB0_1: ## =>This Inner Loop Header: Depth=1
movl (%r12), %esi
movq %r15, %rdi
callq _cs2000_read
testl %eax, %eax
js LBB0_6
## %bb.2: ## in Loop: Header=BB0_1 Depth=1
testl %eax, (%r13)
je LBB0_3
## %bb.4: ## in Loop: Header=BB0_1 Depth=1
movl $1, %edi
callq _udelay
decl %ebx
jne LBB0_1
## %bb.5:
leaq L_.str(%rip), %rsi
movq %r14, %rdi
callq _dev_err
movq _ETIMEDOUT@GOTPCREL(%rip), %rcx
xorl %eax, %eax
subl (%rcx), %eax
jmp LBB0_6
LBB0_3:
xorl %eax, %eax
LBB0_6:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _DEVICE_CTRL,4,2 ## @DEVICE_CTRL
.comm _PLL_UNLOCK,4,2 ## @PLL_UNLOCK
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "pll lock failed\n"
.comm _ETIMEDOUT,4,2 ## @ETIMEDOUT
.no_dead_strip _cs2000_wait_pll_lock
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function cs2000_wait_pll_lock
_cs2000_wait_pll_lock: ; @cs2000_wait_pll_lock
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x0
bl _priv_to_dev
mov x20, x0
mov w21, #256
Lloh0:
adrp x22, _DEVICE_CTRL@GOTPAGE
Lloh1:
ldr x22, [x22, _DEVICE_CTRL@GOTPAGEOFF]
Lloh2:
adrp x23, _PLL_UNLOCK@GOTPAGE
Lloh3:
ldr x23, [x23, _PLL_UNLOCK@GOTPAGEOFF]
LBB0_1: ; =>This Inner Loop Header: Depth=1
ldr w1, [x22]
mov x0, x19
bl _cs2000_read
tbnz w0, #31, LBB0_6
; %bb.2: ; in Loop: Header=BB0_1 Depth=1
ldr w8, [x23]
tst w8, w0
b.eq LBB0_5
; %bb.3: ; in Loop: Header=BB0_1 Depth=1
mov w0, #1
bl _udelay
subs w21, w21, #1
b.ne LBB0_1
; %bb.4:
Lloh4:
adrp x1, l_.str@PAGE
Lloh5:
add x1, x1, l_.str@PAGEOFF
mov x0, x20
bl _dev_err
Lloh6:
adrp x8, _ETIMEDOUT@GOTPAGE
Lloh7:
ldr x8, [x8, _ETIMEDOUT@GOTPAGEOFF]
Lloh8:
ldr w8, [x8]
neg w0, w8
b LBB0_6
LBB0_5:
mov w0, #0
LBB0_6:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
.loh AdrpAdd Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _DEVICE_CTRL,4,2 ; @DEVICE_CTRL
.comm _PLL_UNLOCK,4,2 ; @PLL_UNLOCK
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "pll lock failed\n"
.comm _ETIMEDOUT,4,2 ; @ETIMEDOUT
.no_dead_strip _cs2000_wait_pll_lock
.subsections_via_symbols
| AnghaBench/linux/drivers/clk/extr_clk-cs2000-cp.c_cs2000_wait_pll_lock.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function Hi16
_Hi16: ## @Hi16
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl %edi, %eax
sarl $16, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _Hi16
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function Hi16
_Hi16: ; @Hi16
.cfi_startproc
; %bb.0:
asr w0, w0, #16
ret
.cfi_endproc
; -- End function
.no_dead_strip _Hi16
.subsections_via_symbols
| AnghaBench/linux/net/wireless/extr_lib80211_crypt_tkip.c_Hi16.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function hdsp_set_rpm_disconnect
_hdsp_set_rpm_disconnect: ## @hdsp_set_rpm_disconnect
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _HDSP_RPM_Disconnect@GOTPCREL(%rip), %rax
movl (%rax), %edx
testl %esi, %esi
je LBB0_2
## %bb.1:
orl (%rdi), %edx
jmp LBB0_3
LBB0_2:
notl %edx
andl (%rdi), %edx
LBB0_3:
movl %edx, (%rdi)
movq _HDSP_controlRegister@GOTPCREL(%rip), %rax
movl (%rax), %esi
callq _hdsp_write
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _HDSP_RPM_Disconnect,4,2 ## @HDSP_RPM_Disconnect
.comm _HDSP_controlRegister,4,2 ## @HDSP_controlRegister
.no_dead_strip _hdsp_set_rpm_disconnect
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function hdsp_set_rpm_disconnect
_hdsp_set_rpm_disconnect: ; @hdsp_set_rpm_disconnect
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x8, _HDSP_RPM_Disconnect@GOTPAGE
Lloh1:
ldr x8, [x8, _HDSP_RPM_Disconnect@GOTPAGEOFF]
Lloh2:
ldr w8, [x8]
cbz w1, LBB0_2
; %bb.1:
ldr w9, [x0]
orr w2, w9, w8
b LBB0_3
LBB0_2:
ldr w9, [x0]
bic w2, w9, w8
LBB0_3:
str w2, [x0]
Lloh3:
adrp x8, _HDSP_controlRegister@GOTPAGE
Lloh4:
ldr x8, [x8, _HDSP_controlRegister@GOTPAGEOFF]
Lloh5:
ldr w1, [x8]
bl _hdsp_write
mov w0, #0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpLdrGotLdr Lloh3, Lloh4, Lloh5
.cfi_endproc
; -- End function
.comm _HDSP_RPM_Disconnect,4,2 ; @HDSP_RPM_Disconnect
.comm _HDSP_controlRegister,4,2 ; @HDSP_controlRegister
.no_dead_strip _hdsp_set_rpm_disconnect
.subsections_via_symbols
| AnghaBench/linux/sound/pci/rme9652/extr_hdsp.c_hdsp_set_rpm_disconnect.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function sm_state_to_str
_sm_state_to_str: ## @sm_state_to_str
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movl %edi, %ebx
movq _sm_state_strings@GOTPCREL(%rip), %r14
movq (%r14), %rdi
callq _ARRAY_SIZE
cmpw %bx, %ax
jbe LBB0_1
## %bb.2:
movq (%r14), %rax
movzwl %bx, %ecx
movq (%rax,%rcx,8), %rax
jmp LBB0_3
LBB0_1:
leaq L_.str(%rip), %rax
LBB0_3:
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _sm_state_strings,8,3 ## @sm_state_strings
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Unknown state"
.no_dead_strip _sm_state_to_str
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function sm_state_to_str
_sm_state_to_str: ; @sm_state_to_str
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
Lloh0:
adrp x20, _sm_state_strings@GOTPAGE
Lloh1:
ldr x20, [x20, _sm_state_strings@GOTPAGEOFF]
ldr x0, [x20]
bl _ARRAY_SIZE
cmp w0, w19
b.ls LBB0_2
; %bb.1:
ldr x8, [x20]
ldr x0, [x8, w19, uxtw #3]
b LBB0_3
LBB0_2:
Lloh2:
adrp x0, l_.str@PAGE
Lloh3:
add x0, x0, l_.str@PAGEOFF
LBB0_3:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpAdd Lloh2, Lloh3
.cfi_endproc
; -- End function
.comm _sm_state_strings,8,3 ; @sm_state_strings
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Unknown state"
.no_dead_strip _sm_state_to_str
.subsections_via_symbols
| AnghaBench/linux/drivers/net/phy/extr_sfp.c_sm_state_to_str.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _nfs3svc_encode_voidres ## -- Begin function nfs3svc_encode_voidres
.p2align 4, 0x90
_nfs3svc_encode_voidres: ## @nfs3svc_encode_voidres
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _xdr_ressize_check ## TAILCALL
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _nfs3svc_encode_voidres ; -- Begin function nfs3svc_encode_voidres
.p2align 2
_nfs3svc_encode_voidres: ; @nfs3svc_encode_voidres
.cfi_startproc
; %bb.0:
b _xdr_ressize_check
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/fs/nfsd/extr_nfs3xdr.c_nfs3svc_encode_voidres.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _reaper ## -- Begin function reaper
.p2align 4, 0x90
_reaper: ## @reaper
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq _WNOHANG@GOTPCREL(%rip), %r14
movl (%r14), %esi
leaq -28(%rbp), %rdi
xorl %edx, %edx
callq _wait3
testq %rax, %rax
jle LBB0_3
## %bb.1:
movq _children@GOTPCREL(%rip), %r15
leaq -28(%rbp), %rbx
.p2align 4, 0x90
LBB0_2: ## =>This Inner Loop Header: Depth=1
decl (%r15)
movl (%r14), %esi
movq %rbx, %rdi
xorl %edx, %edx
callq _wait3
testq %rax, %rax
jg LBB0_2
LBB0_3:
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _WNOHANG,4,2 ## @WNOHANG
.comm _children,4,2 ## @children
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _reaper ; -- Begin function reaper
.p2align 2
_reaper: ; @reaper
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh0:
adrp x19, _WNOHANG@GOTPAGE
Lloh1:
ldr x19, [x19, _WNOHANG@GOTPAGEOFF]
ldr w1, [x19]
add x0, sp, #12
mov x2, #0
bl _wait3
cmp x0, #1
b.lt LBB0_3
; %bb.1:
Lloh2:
adrp x20, _children@GOTPAGE
Lloh3:
ldr x20, [x20, _children@GOTPAGEOFF]
LBB0_2: ; =>This Inner Loop Header: Depth=1
ldr w8, [x20]
sub w8, w8, #1
str w8, [x20]
ldr w1, [x19]
add x0, sp, #12
mov x2, #0
bl _wait3
cmp x0, #0
b.gt LBB0_2
LBB0_3:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #48
ret
.loh AdrpLdrGot Lloh0, Lloh1
.loh AdrpLdrGot Lloh2, Lloh3
.cfi_endproc
; -- End function
.comm _WNOHANG,4,2 ; @WNOHANG
.comm _children,4,2 ; @children
.subsections_via_symbols
| AnghaBench/freebsd/usr.sbin/ypbind/extr_ypbind.c_reaper.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function nfsd_proc_null
_nfsd_proc_null: ## @nfsd_proc_null
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _nfs_ok@GOTPCREL(%rip), %rax
movl (%rax), %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.comm _nfs_ok,4,2 ## @nfs_ok
.no_dead_strip _nfsd_proc_null
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function nfsd_proc_null
_nfsd_proc_null: ; @nfsd_proc_null
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _nfs_ok@GOTPAGE
Lloh1:
ldr x8, [x8, _nfs_ok@GOTPAGEOFF]
Lloh2:
ldr w0, [x8]
ret
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.cfi_endproc
; -- End function
.comm _nfs_ok,4,2 ; @nfs_ok
.no_dead_strip _nfsd_proc_null
.subsections_via_symbols
| AnghaBench/fastsocket/kernel/fs/nfsd/extr_nfsproc.c_nfsd_proc_null.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _InitSeqStack ## -- Begin function InitSeqStack
.p2align 4, 0x90
_InitSeqStack: ## @InitSeqStack
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $-1, 12(%rdi)
movq $0, (%rdi)
movw $0, 8(%rdi)
movl $1, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _isSeqStackEmpty ## -- Begin function isSeqStackEmpty
.p2align 4, 0x90
_isSeqStackEmpty: ## @isSeqStackEmpty
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movabsq $-4294967297, %rcx ## imm = 0xFFFFFFFEFFFFFFFF
xorl %eax, %eax
cmpq %rcx, %rsi
seta %al
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _Push ## -- Begin function Push
.p2align 4, 0x90
_Push: ## @Push
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movslq 12(%rdi), %rcx
xorl %eax, %eax
cmpq $9, %rcx
je LBB2_2
## %bb.1:
movb %sil, 1(%rcx,%rdi)
incl 12(%rdi)
movl $1, %eax
LBB2_2:
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _Pop ## -- Begin function Pop
.p2align 4, 0x90
_Pop: ## @Pop
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movslq 12(%rdi), %rax
cmpq $-1, %rax
je LBB3_1
## %bb.2:
movb (%rdi,%rax), %al
movb %al, (%rsi)
decl 12(%rdi)
movl $1, %eax
popq %rbp
retq
LBB3_1:
xorl %eax, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _matchBrackets ## -- Begin function matchBrackets
.p2align 4, 0x90
_matchBrackets: ## @matchBrackets
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
subq $16, %rsp
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movabsq $-4294967296, %r10 ## imm = 0xFFFFFFFF00000000
movl $-1, -20(%rbp)
movq $0, -32(%rbp)
movw $0, -24(%rbp)
testl %esi, %esi
jle LBB4_8
## %bb.1:
movl %esi, %edx
movl $-1, %ecx
xorl %esi, %esi
leaq -32(%rbp), %r11
leaq L_str.5(%rip), %r9
leaq L_str.4(%rip), %r8
## implicit-def: $r14b
jmp LBB4_2
LBB4_16: ## in Loop: Header=BB4_2 Depth=1
movb $40, %r14b
.p2align 4, 0x90
LBB4_7: ## in Loop: Header=BB4_2 Depth=1
incq %rsi
movl %eax, %ecx
cmpq %rsi, %rdx
je LBB4_8
LBB4_2: ## =>This Inner Loop Header: Depth=1
movzbl (%rdi,%rsi), %ebx
cmpb $40, %bl
je LBB4_5
## %bb.3: ## in Loop: Header=BB4_2 Depth=1
cmpb $123, %bl
je LBB4_5
## %bb.4: ## in Loop: Header=BB4_2 Depth=1
cmpb $91, %bl
jne LBB4_10
LBB4_5: ## in Loop: Header=BB4_2 Depth=1
movl $9, %eax
cmpl $9, %ecx
je LBB4_7
## %bb.6: ## in Loop: Header=BB4_2 Depth=1
movslq %ecx, %rax
movb %bl, 1(%rax,%r11)
movl -20(%rbp), %eax
incl %eax
movl %eax, -20(%rbp)
jmp LBB4_7
.p2align 4, 0x90
LBB4_10: ## in Loop: Header=BB4_2 Depth=1
movq -24(%rbp), %rcx
cmpq %r10, %rcx
jae LBB4_29
## %bb.11: ## in Loop: Header=BB4_2 Depth=1
movq %rcx, %rax
shrq $32, %rax
cmpl $-1, %eax
je LBB4_12
## %bb.13: ## in Loop: Header=BB4_2 Depth=1
sarq $32, %rcx
movzbl -32(%rbp,%rcx), %r14d
decl %eax
movl %eax, -20(%rbp)
cmpb $41, %bl
je LBB4_15
jmp LBB4_17
LBB4_12: ## in Loop: Header=BB4_2 Depth=1
movl $-1, %eax
cmpb $41, %bl
jne LBB4_17
LBB4_15: ## in Loop: Header=BB4_2 Depth=1
cmpb $40, %r14b
je LBB4_16
LBB4_17: ## in Loop: Header=BB4_2 Depth=1
cmpb $93, %bl
jne LBB4_20
## %bb.18: ## in Loop: Header=BB4_2 Depth=1
cmpb $91, %r14b
jne LBB4_20
## %bb.19: ## in Loop: Header=BB4_2 Depth=1
movb $91, %r14b
jmp LBB4_7
LBB4_20: ## in Loop: Header=BB4_2 Depth=1
cmpb $91, %bl
jne LBB4_23
## %bb.21: ## in Loop: Header=BB4_2 Depth=1
cmpb $93, %r14b
jne LBB4_23
## %bb.22: ## in Loop: Header=BB4_2 Depth=1
movb $93, %r14b
jmp LBB4_7
LBB4_23: ## in Loop: Header=BB4_2 Depth=1
cmpb $125, %bl
jne LBB4_26
## %bb.24: ## in Loop: Header=BB4_2 Depth=1
cmpb $123, %r14b
jne LBB4_26
## %bb.25: ## in Loop: Header=BB4_2 Depth=1
movb $123, %r14b
jmp LBB4_7
LBB4_26: ## in Loop: Header=BB4_2 Depth=1
cmpb $123, %bl
jne LBB4_28
## %bb.27: ## in Loop: Header=BB4_2 Depth=1
cmpb $125, %r14b
movb $125, %r14b
je LBB4_7
LBB4_28:
movq %r8, %r9
jmp LBB4_29
LBB4_8:
movl $1, %eax
cmpq %r10, -24(%rbp)
jae LBB4_30
## %bb.9:
leaq L_str(%rip), %r9
LBB4_29:
movq %r9, %rdi
callq _puts
xorl %eax, %eax
LBB4_30:
addq $16, %rsp
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _isLeftBrackets ## -- Begin function isLeftBrackets
.p2align 4, 0x90
_isLeftBrackets: ## @isLeftBrackets
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $1, %eax
cmpb $40, %dil
je LBB5_4
## %bb.1:
cmpb $91, %dil
je LBB5_4
## %bb.2:
cmpb $123, %dil
je LBB5_4
## %bb.3:
xorl %eax, %eax
LBB5_4:
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _isMatch ## -- Begin function isMatch
.p2align 4, 0x90
_isMatch: ## @isMatch
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $1, %eax
cmpb $40, %dil
jne LBB6_2
## %bb.1:
cmpb $41, %sil
jne LBB6_2
LBB6_11:
popq %rbp
retq
LBB6_2:
cmpb $41, %dil
jne LBB6_4
## %bb.3:
cmpb $40, %sil
je LBB6_11
LBB6_4:
cmpb $91, %dil
jne LBB6_6
## %bb.5:
cmpb $93, %sil
je LBB6_11
LBB6_6:
cmpb $93, %dil
jne LBB6_8
## %bb.7:
cmpb $91, %sil
je LBB6_11
LBB6_8:
cmpb $123, %dil
jne LBB6_10
## %bb.9:
cmpb $125, %sil
je LBB6_11
LBB6_10:
xorb $125, %dil
xorb $123, %sil
xorl %eax, %eax
orb %dil, %sil
sete %al
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $32, %rsp
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -8(%rbp)
movabsq $6727015725152107387, %rax ## imm = 0x5D5B292928287B7B
movq %rax, -24(%rbp)
movb $125, -16(%rbp)
leaq -24(%rbp), %rdi
movl $9, %esi
callq _matchBrackets
testl %eax, %eax
je LBB7_2
## %bb.1:
leaq L_str.6(%rip), %rdi
callq _puts
LBB7_2:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -8(%rbp), %rax
jne LBB7_4
## %bb.3:
xorl %eax, %eax
addq $32, %rsp
popq %rbp
retq
LBB7_4:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__const
l___const.main.arr6: ## @__const.main.arr6
.ascii "{{(())[]}"
.section __TEXT,__cstring,cstring_literals
L_str: ## @str
.asciz "\345\214\271\351\205\215\345\244\261\350\264\245\357\274\214\345\267\246\346\213\254\345\217\267\345\215\225\350\272\253..."
L_str.4: ## @str.4
.asciz "\345\214\271\351\205\215\345\244\261\350\264\245..."
L_str.5: ## @str.5
.asciz "\345\214\271\351\205\215\345\244\261\350\264\245\357\274\214\345\217\263\346\213\254\345\217\267\345\215\225\350\272\253..."
L_str.6: ## @str.6
.asciz "\346\213\254\345\217\267\345\214\271\351\205\215\357\274\201"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _InitSeqStack ; -- Begin function InitSeqStack
.p2align 2
_InitSeqStack: ; @InitSeqStack
.cfi_startproc
; %bb.0:
mov w8, #-1
str w8, [x0, #12]
str xzr, [x0]
strh wzr, [x0, #8]
mov w0, #1
ret
.cfi_endproc
; -- End function
.globl _isSeqStackEmpty ; -- Begin function isSeqStackEmpty
.p2align 2
_isSeqStackEmpty: ; @isSeqStackEmpty
.cfi_startproc
; %bb.0:
mov x8, #-4294967297
cmp x1, x8
cset w0, hi
ret
.cfi_endproc
; -- End function
.globl _Push ; -- Begin function Push
.p2align 2
_Push: ; @Push
.cfi_startproc
; %bb.0:
ldrsw x8, [x0, #12]
cmp w8, #9
b.ne LBB2_2
; %bb.1:
mov w0, #0
ret
LBB2_2:
add x8, x8, x0
strb w1, [x8, #1]
ldr w8, [x0, #12]
add w8, w8, #1
str w8, [x0, #12]
mov w0, #1
ret
.cfi_endproc
; -- End function
.globl _Pop ; -- Begin function Pop
.p2align 2
_Pop: ; @Pop
.cfi_startproc
; %bb.0:
ldrsw x8, [x0, #12]
cmn w8, #1
b.eq LBB3_2
; %bb.1:
ldrb w8, [x0, x8]
strb w8, [x1]
ldr w8, [x0, #12]
sub w8, w8, #1
str w8, [x0, #12]
mov w0, #1
ret
LBB3_2:
mov w0, #0
ret
.cfi_endproc
; -- End function
.globl _matchBrackets ; -- Begin function matchBrackets
.p2align 2
_matchBrackets: ; @matchBrackets
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh1:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh2:
ldr x8, [x8]
stur x8, [x29, #-8]
mov w8, #-1
str w8, [sp, #20]
str xzr, [sp, #8]
strh wzr, [sp, #16]
cmp w1, #1
b.lt LBB4_21
; %bb.1:
mov w9, w1
mov w13, #-1
add x11, sp, #8
Lloh3:
adrp x10, l_str.5@PAGE
Lloh4:
add x10, x10, l_str.5@PAGEOFF
mov x12, #-4294967297
; implicit-def: $w14
Lloh5:
adrp x8, l_str.4@PAGE
Lloh6:
add x8, x8, l_str.4@PAGEOFF
b LBB4_3
LBB4_2: ; in Loop: Header=BB4_3 Depth=1
add x0, x0, #1
subs x9, x9, #1
b.eq LBB4_21
LBB4_3: ; =>This Inner Loop Header: Depth=1
ldrb w15, [x0]
cmp w15, #40
b.eq LBB4_6
; %bb.4: ; in Loop: Header=BB4_3 Depth=1
cmp w15, #123
b.eq LBB4_6
; %bb.5: ; in Loop: Header=BB4_3 Depth=1
cmp w15, #91
b.ne LBB4_8
LBB4_6: ; in Loop: Header=BB4_3 Depth=1
cmp w13, #9
b.eq LBB4_2
; %bb.7: ; in Loop: Header=BB4_3 Depth=1
add x13, x11, w13, sxtw
strb w15, [x13, #1]
ldr w13, [sp, #20]
add w13, w13, #1
str w13, [sp, #20]
b LBB4_2
LBB4_8: ; in Loop: Header=BB4_3 Depth=1
ldr x13, [sp, #16]
cmp x13, x12
b.hi LBB4_24
; %bb.9: ; in Loop: Header=BB4_3 Depth=1
lsr x16, x13, #32
cmn w16, #1
b.eq LBB4_11
; %bb.10: ; in Loop: Header=BB4_3 Depth=1
asr x13, x13, #32
ldrb w14, [x11, x13]
sub w13, w16, #1
str w13, [sp, #20]
cmp w15, #41
b.eq LBB4_12
b LBB4_13
LBB4_11: ; in Loop: Header=BB4_3 Depth=1
mov w13, #-1
cmp w15, #41
b.ne LBB4_13
LBB4_12: ; in Loop: Header=BB4_3 Depth=1
cmp w14, #40
b.eq LBB4_2
LBB4_13: ; in Loop: Header=BB4_3 Depth=1
cmp w15, #93
b.ne LBB4_15
; %bb.14: ; in Loop: Header=BB4_3 Depth=1
cmp w14, #91
b.eq LBB4_2
LBB4_15: ; in Loop: Header=BB4_3 Depth=1
cmp w15, #91
b.ne LBB4_17
; %bb.16: ; in Loop: Header=BB4_3 Depth=1
cmp w14, #93
b.eq LBB4_2
LBB4_17: ; in Loop: Header=BB4_3 Depth=1
cmp w15, #125
b.ne LBB4_19
; %bb.18: ; in Loop: Header=BB4_3 Depth=1
cmp w14, #123
b.eq LBB4_2
LBB4_19: ; in Loop: Header=BB4_3 Depth=1
cmp w15, #123
b.ne LBB4_25
; %bb.20: ; in Loop: Header=BB4_3 Depth=1
cmp w14, #125
b.eq LBB4_2
b LBB4_25
LBB4_21:
ldr x8, [sp, #16]
mov x9, #-4294967297
cmp x8, x9
b.hi LBB4_23
; %bb.22:
Lloh7:
adrp x8, l_str@PAGE
Lloh8:
add x8, x8, l_str@PAGEOFF
b LBB4_25
LBB4_23:
mov w0, #1
b LBB4_26
LBB4_24:
mov x8, x10
LBB4_25:
mov x0, x8
bl _puts
mov w0, #0
LBB4_26:
ldur x8, [x29, #-8]
Lloh9:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh10:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh11:
ldr x9, [x9]
cmp x9, x8
b.ne LBB4_28
; %bb.27:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #48
ret
LBB4_28:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
.loh AdrpAdd Lloh5, Lloh6
.loh AdrpAdd Lloh3, Lloh4
.loh AdrpAdd Lloh7, Lloh8
.loh AdrpLdrGotLdr Lloh9, Lloh10, Lloh11
.cfi_endproc
; -- End function
.globl _isLeftBrackets ; -- Begin function isLeftBrackets
.p2align 2
_isLeftBrackets: ; @isLeftBrackets
.cfi_startproc
; %bb.0:
mov w8, #1
cmp w0, #123
csel w9, w8, wzr, eq
cmp w0, #91
csel w9, w8, w9, eq
cmp w0, #40
csel w0, w8, w9, eq
ret
.cfi_endproc
; -- End function
.globl _isMatch ; -- Begin function isMatch
.p2align 2
_isMatch: ; @isMatch
.cfi_startproc
; %bb.0:
cmp w0, #40
b.ne LBB6_3
; %bb.1:
cmp w1, #41
b.ne LBB6_3
; %bb.2:
mov w0, #1
ret
LBB6_3:
cmp w0, #41
b.ne LBB6_6
; %bb.4:
cmp w1, #40
b.ne LBB6_6
; %bb.5:
mov w0, #1
ret
LBB6_6:
cmp w0, #91
b.ne LBB6_9
; %bb.7:
cmp w1, #93
b.ne LBB6_9
; %bb.8:
mov w0, #1
ret
LBB6_9:
cmp w0, #93
b.ne LBB6_12
; %bb.10:
cmp w1, #91
b.ne LBB6_12
; %bb.11:
mov w0, #1
ret
LBB6_12:
cmp w0, #123
b.ne LBB6_15
; %bb.13:
cmp w1, #125
b.ne LBB6_15
; %bb.14:
mov w0, #1
ret
LBB6_15:
cmp w0, #125
mov w8, #123
ccmp w1, w8, #0, eq
cset w0, eq
ret
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #48
.cfi_def_cfa_offset 48
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh12:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh13:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh14:
ldr x8, [x8]
stur x8, [x29, #-8]
Lloh15:
adrp x8, l___const.main.arr6@PAGE
Lloh16:
add x8, x8, l___const.main.arr6@PAGEOFF
Lloh17:
ldr x8, [x8]
str x8, [sp, #8]
mov w8, #125
strb w8, [sp, #16]
add x0, sp, #8
mov w1, #9
bl _matchBrackets
cbz w0, LBB7_2
; %bb.1:
Lloh18:
adrp x0, l_str.6@PAGE
Lloh19:
add x0, x0, l_str.6@PAGEOFF
bl _puts
LBB7_2:
ldur x8, [x29, #-8]
Lloh20:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh21:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh22:
ldr x9, [x9]
cmp x9, x8
b.ne LBB7_4
; %bb.3:
mov w0, #0
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #48
ret
LBB7_4:
bl ___stack_chk_fail
.loh AdrpAddLdr Lloh15, Lloh16, Lloh17
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpAdd Lloh18, Lloh19
.loh AdrpLdrGotLdr Lloh20, Lloh21, Lloh22
.cfi_endproc
; -- End function
.section __TEXT,__const
l___const.main.arr6: ; @__const.main.arr6
.ascii "{{(())[]}"
.section __TEXT,__cstring,cstring_literals
l_str: ; @str
.asciz "\345\214\271\351\205\215\345\244\261\350\264\245\357\274\214\345\267\246\346\213\254\345\217\267\345\215\225\350\272\253..."
l_str.4: ; @str.4
.asciz "\345\214\271\351\205\215\345\244\261\350\264\245..."
l_str.5: ; @str.5
.asciz "\345\214\271\351\205\215\345\244\261\350\264\245\357\274\214\345\217\263\346\213\254\345\217\267\345\215\225\350\272\253..."
l_str.6: ; @str.6
.asciz "\346\213\254\345\217\267\345\214\271\351\205\215\357\274\201"
.subsections_via_symbols
| the_stack_data/90761854.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ## -- Begin function main
.p2align 4, 0x90
_main: ## @main
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $16, %rsp
leaq L_.str(%rip), %rdi
xorl %eax, %eax
callq _printf
leaq L_.str.1(%rip), %rdi
leaq -4(%rbp), %rsi
xorl %eax, %eax
callq _scanf
movl -4(%rbp), %esi
leaq L_.str.2(%rip), %rdi
xorl %eax, %eax
callq _printf
xorl %eax, %eax
addq $16, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "Ingrese el valor de a: "
L_.str.1: ## @.str.1
.asciz "%i"
L_.str.2: ## @.str.2
.asciz "El valor es: %i"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
bl _printf
sub x8, x29, #4
str x8, [sp]
Lloh2:
adrp x0, l_.str.1@PAGE
Lloh3:
add x0, x0, l_.str.1@PAGEOFF
bl _scanf
ldur w8, [x29, #-4]
str x8, [sp]
Lloh4:
adrp x0, l_.str.2@PAGE
Lloh5:
add x0, x0, l_.str.2@PAGEOFF
bl _printf
mov w0, #0
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh2, Lloh3
.loh AdrpAdd Lloh0, Lloh1
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "Ingrese el valor de a: "
l_.str.1: ; @.str.1
.asciz "%i"
l_.str.2: ; @.str.2
.asciz "El valor es: %i"
.subsections_via_symbols
| the_stack_data/237642286.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function xo_is_line_buffered
_xo_is_line_buffered: ## @xo_is_line_buffered
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
callq _fileno
movl %eax, %edi
callq _isatty
xorl %ecx, %ecx
testq %rax, %rax
setne %cl
movl %ecx, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.no_dead_strip _xo_is_line_buffered
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function xo_is_line_buffered
_xo_is_line_buffered: ; @xo_is_line_buffered
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
bl _fileno
bl _isatty
cmp x0, #0
cset w0, ne
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.no_dead_strip _xo_is_line_buffered
.subsections_via_symbols
| AnghaBench/freebsd/contrib/libxo/libxo/extr_libxo.c_xo_is_line_buffered.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _j2day ## -- Begin function j2day
.p2align 4, 0x90
_j2day: ## @j2day
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
## kill: def $edi killed $edi def $rdi
movl %edi, %eax
incl %eax
cltq
imulq $-1840700269, %rax, %rax ## imm = 0x92492493
shrq $32, %rax
addl %edi, %eax
incl %eax
movl %eax, %ecx
shrl $31, %ecx
sarl $2, %eax
addl %ecx, %eax
leal (,%rax,8), %ecx
subl %ecx, %eax
leal (%rdi,%rax), %ecx
incl %ecx
testl %ecx, %ecx
leal 8(%rdi,%rax), %eax
cmovnsl %ecx, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _j2day ; -- Begin function j2day
.p2align 2
_j2day: ; @j2day
.cfi_startproc
; %bb.0:
add w8, w0, #1
mov w9, #9363
movk w9, #37449, lsl #16
smull x9, w8, w9
lsr x9, x9, #32
add w9, w9, w8
asr w10, w9, #2
add w9, w10, w9, lsr #31
sub w9, w9, w9, lsl #3
add w8, w8, w9
add w9, w8, #7
cmp w8, #0
csel w0, w9, w8, lt
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
| AnghaBench/postgres/src/backend/utils/adt/extr_datetime.c_j2day.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _lj_debug_slotname ## -- Begin function lj_debug_slotname
.p2align 4, 0x90
_lj_debug_slotname: ## @lj_debug_slotname
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r14
movq %rdx, %r15
movq %rsi, %rbx
movq %rdi, %r13
callq _proto_bcpos
movq %r13, %rdi
movl %eax, %esi
movq %r15, -48(%rbp) ## 8-byte Spill
movq %r15, %rdx
callq _debug_varname
testq %rax, %rax
je LBB0_1
LBB0_23:
movq %rax, (%r14)
leaq L_.str(%rip), %r15
jmp LBB0_22
LBB0_1:
movq %r14, -56(%rbp) ## 8-byte Spill
.p2align 4, 0x90
LBB0_3: ## =>This Inner Loop Header: Depth=1
addq $-4, %rbx
movq %r13, %rdi
callq _proto_bc
cmpq %rax, %rbx
jbe LBB0_8
## %bb.4: ## in Loop: Header=BB0_3 Depth=1
movl (%rbx), %r14d
movl %r14d, %edi
callq _bc_op
movslq %eax, %r15
movl %r14d, %edi
callq _bc_a
movq %rax, %r12
movq %r15, %rdi
callq _bcmode_a
movq _BCMbase@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_9
## %bb.5: ## in Loop: Header=BB0_3 Depth=1
cmpq %r12, -48(%rbp) ## 8-byte Folded Reload
jl LBB0_3
## %bb.6: ## in Loop: Header=BB0_3 Depth=1
movq _BC_KNIL@GOTPCREL(%rip), %rax
cmpq %r15, (%rax)
jne LBB0_8
## %bb.7: ## in Loop: Header=BB0_3 Depth=1
movl %r14d, %edi
callq _bc_d
cmpq %rax, -48(%rbp) ## 8-byte Folded Reload
jg LBB0_3
jmp LBB0_8
.p2align 4, 0x90
LBB0_9: ## in Loop: Header=BB0_3 Depth=1
movq %r15, %rdi
callq _bcmode_a
movq _BCMdst@GOTPCREL(%rip), %rcx
cmpq (%rcx), %rax
jne LBB0_3
## %bb.10: ## in Loop: Header=BB0_3 Depth=1
cmpq -48(%rbp), %r12 ## 8-byte Folded Reload
jne LBB0_3
## %bb.11: ## in Loop: Header=BB0_3 Depth=1
movl %r14d, %edi
callq _bc_op
cmpl $130, %eax
jne LBB0_12
## %bb.2: ## in Loop: Header=BB0_3 Depth=1
movl %r14d, %edi
callq _bc_d
movq %rax, %r14
movq %r13, %rdi
movq %rbx, %rsi
callq _proto_bcpos
movq %r13, %rdi
movl %eax, %esi
movq %r14, -48(%rbp) ## 8-byte Spill
movq %r14, %rdx
callq _debug_varname
testq %rax, %rax
movq -56(%rbp), %r14 ## 8-byte Reload
je LBB0_3
jmp LBB0_23
LBB0_12:
cmpl $128, %eax
je LBB0_21
## %bb.13:
cmpl $129, %eax
je LBB0_16
## %bb.14:
cmpl $131, %eax
jne LBB0_8
## %bb.15:
movl %r14d, %edi
callq _bc_d
notl %eax
movq %r13, %rdi
movl %eax, %esi
callq _proto_kgc
movl %eax, %edi
callq _gco2str
movl %eax, %edi
callq _strdata
movq -56(%rbp), %rcx ## 8-byte Reload
movq %rax, (%rcx)
leaq L_.str.1(%rip), %r15
jmp LBB0_22
LBB0_8:
xorl %r15d, %r15d
LBB0_22:
movq %r15, %rax
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB0_21:
movl %r14d, %edi
callq _bc_d
movq %r13, %rdi
movq %rax, %rsi
callq _lj_debug_uvname
movq -56(%rbp), %rcx ## 8-byte Reload
movq %rax, (%rcx)
leaq L_.str.4(%rip), %r15
jmp LBB0_22
LBB0_16:
movl %r14d, %edi
callq _bc_c
notl %eax
movq %r13, %rdi
movl %eax, %esi
callq _proto_kgc
movl %eax, %edi
callq _gco2str
movl %eax, %edi
callq _strdata
movq -56(%rbp), %rcx ## 8-byte Reload
movq %rax, (%rcx)
movq %r13, %rdi
callq _proto_bc
leaq L_.str.3(%rip), %r15
cmpq %rax, %rbx
jbe LBB0_22
## %bb.17:
movl -4(%rbx), %ebx
movl %ebx, %edi
callq _bc_op
cmpl $130, %eax
jne LBB0_22
## %bb.18:
movl %ebx, %edi
callq _bc_a
movq _LJ_FR2@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
movq -48(%rbp), %rdx ## 8-byte Reload
addq %rdx, %rcx
incq %rcx
cmpq %rcx, %rax
jne LBB0_22
## %bb.19:
movl %ebx, %edi
callq _bc_d
movq %rax, %rbx
movl %r14d, %edi
callq _bc_b
cmpq %rax, %rbx
jne LBB0_22
## %bb.20:
leaq L_.str.2(%rip), %r15
jmp LBB0_22
.cfi_endproc
## -- End function
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "local"
.comm _BCMbase,8,3 ## @BCMbase
.comm _BC_KNIL,8,3 ## @BC_KNIL
.comm _BCMdst,8,3 ## @BCMdst
L_.str.1: ## @.str.1
.asciz "global"
.comm _LJ_FR2,8,3 ## @LJ_FR2
L_.str.2: ## @.str.2
.asciz "method"
L_.str.3: ## @.str.3
.asciz "field"
L_.str.4: ## @.str.4
.asciz "upvalue"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _lj_debug_slotname ; -- Begin function lj_debug_slotname
.p2align 2
_lj_debug_slotname: ; @lj_debug_slotname
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x19, x3
mov x20, x2
mov x22, x1
mov x21, x0
bl _proto_bcpos
mov x1, x0
mov x0, x21
mov x2, x20
bl _debug_varname
cbz x0, LBB0_2
LBB0_1:
str x0, [x19]
Lloh0:
adrp x0, l_.str@PAGE
Lloh1:
add x0, x0, l_.str@PAGEOFF
b LBB0_16
LBB0_2:
Lloh2:
adrp x26, _BCMbase@GOTPAGE
Lloh3:
ldr x26, [x26, _BCMbase@GOTPAGEOFF]
Lloh4:
adrp x27, _BC_KNIL@GOTPAGE
Lloh5:
ldr x27, [x27, _BC_KNIL@GOTPAGEOFF]
Lloh6:
adrp x28, _BCMdst@GOTPAGE
Lloh7:
ldr x28, [x28, _BCMdst@GOTPAGEOFF]
b LBB0_4
LBB0_3: ; in Loop: Header=BB0_4 Depth=1
mov x0, x24
bl _bcmode_a
ldr x8, [x28]
cmp x0, x8
ccmp x25, x20, #0, eq
b.eq LBB0_9
LBB0_4: ; =>This Inner Loop Header: Depth=1
sub x22, x22, #4
mov x0, x21
bl _proto_bc
cmp x22, x0
b.ls LBB0_15
; %bb.5: ; in Loop: Header=BB0_4 Depth=1
ldr w23, [x22]
mov x0, x23
bl _bc_op
; kill: def $w0 killed $w0 def $x0
sxtw x24, w0
mov x0, x23
bl _bc_a
mov x25, x0
mov x0, x24
bl _bcmode_a
ldr x8, [x26]
cmp x0, x8
b.ne LBB0_3
; %bb.6: ; in Loop: Header=BB0_4 Depth=1
cmp x20, x25
b.lt LBB0_4
; %bb.7: ; in Loop: Header=BB0_4 Depth=1
ldr x8, [x27]
cmp x8, x24
b.ne LBB0_15
; %bb.8: ; in Loop: Header=BB0_4 Depth=1
mov x0, x23
bl _bc_d
cmp x20, x0
b.gt LBB0_4
b LBB0_15
LBB0_9: ; in Loop: Header=BB0_4 Depth=1
mov x0, x23
bl _bc_op
cmp w0, #130
b.ne LBB0_11
; %bb.10: ; in Loop: Header=BB0_4 Depth=1
mov x0, x23
bl _bc_d
mov x20, x0
mov x0, x21
mov x1, x22
bl _proto_bcpos
mov x1, x0
mov x0, x21
mov x2, x20
bl _debug_varname
cbz x0, LBB0_4
b LBB0_1
LBB0_11:
cmp w0, #128
b.eq LBB0_17
; %bb.12:
cmp w0, #129
b.eq LBB0_18
; %bb.13:
cmp w0, #131
b.ne LBB0_15
; %bb.14:
mov x0, x23
bl _bc_d
mvn w1, w0
mov x0, x21
bl _proto_kgc
bl _gco2str
bl _strdata
str x0, [x19]
Lloh8:
adrp x0, l_.str.1@PAGE
Lloh9:
add x0, x0, l_.str.1@PAGEOFF
b LBB0_16
LBB0_15:
mov x0, #0
LBB0_16:
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB0_17:
mov x0, x23
bl _bc_d
mov x1, x0
mov x0, x21
bl _lj_debug_uvname
str x0, [x19]
Lloh10:
adrp x0, l_.str.4@PAGE
Lloh11:
add x0, x0, l_.str.4@PAGEOFF
b LBB0_16
LBB0_18:
mov x0, x23
bl _bc_c
mvn w1, w0
mov x0, x21
bl _proto_kgc
bl _gco2str
bl _strdata
str x0, [x19]
mov x0, x21
bl _proto_bc
cmp x22, x0
b.ls LBB0_23
; %bb.19:
ldur w19, [x22, #-4]
mov x0, x19
bl _bc_op
cmp w0, #130
b.ne LBB0_23
; %bb.20:
mov x0, x19
bl _bc_a
Lloh12:
adrp x8, _LJ_FR2@GOTPAGE
Lloh13:
ldr x8, [x8, _LJ_FR2@GOTPAGEOFF]
Lloh14:
ldr x8, [x8]
add x8, x20, x8
add x8, x8, #1
cmp x0, x8
b.ne LBB0_23
; %bb.21:
mov x0, x19
bl _bc_d
mov x19, x0
mov x0, x23
bl _bc_b
cmp x19, x0
b.ne LBB0_23
; %bb.22:
Lloh15:
adrp x0, l_.str.2@PAGE
Lloh16:
add x0, x0, l_.str.2@PAGEOFF
b LBB0_16
LBB0_23:
Lloh17:
adrp x0, l_.str.3@PAGE
Lloh18:
add x0, x0, l_.str.3@PAGEOFF
b LBB0_16
.loh AdrpAdd Lloh0, Lloh1
.loh AdrpLdrGot Lloh6, Lloh7
.loh AdrpLdrGot Lloh4, Lloh5
.loh AdrpLdrGot Lloh2, Lloh3
.loh AdrpAdd Lloh8, Lloh9
.loh AdrpAdd Lloh10, Lloh11
.loh AdrpLdrGotLdr Lloh12, Lloh13, Lloh14
.loh AdrpAdd Lloh15, Lloh16
.loh AdrpAdd Lloh17, Lloh18
.cfi_endproc
; -- End function
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "local"
.comm _BCMbase,8,3 ; @BCMbase
.comm _BC_KNIL,8,3 ; @BC_KNIL
.comm _BCMdst,8,3 ; @BCMdst
l_.str.1: ; @.str.1
.asciz "global"
.comm _LJ_FR2,8,3 ; @LJ_FR2
l_.str.2: ; @.str.2
.asciz "method"
l_.str.3: ; @.str.3
.asciz "field"
l_.str.4: ; @.str.4
.asciz "upvalue"
.subsections_via_symbols
| AnghaBench/xLua/build/luajit-2.1.0b3/src/extr_lj_debug.c_lj_debug_slotname.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _stbi_failure_reason ## -- Begin function stbi_failure_reason
.p2align 4, 0x90
_stbi_failure_reason: ## @stbi_failure_reason
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq _failure_reason(%rip), %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_image_free ## -- Begin function stbi_image_free
.p2align 4, 0x90
_stbi_image_free: ## @stbi_image_free
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _free ## TAILCALL
.cfi_endproc
## -- End function
.globl _stbi_register_loader ## -- Begin function stbi_register_loader
.p2align 4, 0x90
_stbi_register_loader: ## @stbi_register_loader
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %ecx, %ecx
movq _loaders@GOTPCREL(%rip), %r8
xorl %edx, %edx
movl $1, %eax
.p2align 4, 0x90
LBB2_1: ## =>This Inner Loop Header: Depth=1
movq (%r8,%rdx,8), %rsi
cmpq %rdi, %rsi
je LBB2_10
## %bb.2: ## in Loop: Header=BB2_1 Depth=1
testq %rsi, %rsi
je LBB2_3
## %bb.6: ## in Loop: Header=BB2_1 Depth=1
movq 8(%r8,%rdx,8), %rsi
cmpq %rdi, %rsi
je LBB2_10
## %bb.7: ## in Loop: Header=BB2_1 Depth=1
testq %rsi, %rsi
je LBB2_4
## %bb.8: ## in Loop: Header=BB2_1 Depth=1
addq $2, %rdx
addq $16, %rcx
cmpq $32, %rdx
jne LBB2_1
## %bb.9:
xorl %eax, %eax
LBB2_10:
popq %rbp
retq
LBB2_3:
addq _loaders@GOTPCREL(%rip), %rcx
jmp LBB2_5
LBB2_4:
incq %rdx
addq %r8, %rcx
addq $8, %rcx
LBB2_5:
movq %rdi, (%rcx)
incl %edx
movl %edx, _max_loaders(%rip)
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_load ## -- Begin function stbi_load
.p2align 4, 0x90
_stbi_load: ## @stbi_load
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r14d
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, %r13
leaq L_.str(%rip), %rsi
callq _fopen
testq %rax, %rax
je LBB3_1
## %bb.3:
movq %rax, %rbx
movq %rax, %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r15, %rcx
movl %r14d, %r8d
callq _stbi_load_from_file
movq %rax, %r14
movq %rbx, %rdi
callq _fclose
movq %r14, %rax
jmp LBB3_2
LBB3_1:
leaq L_.str.1(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
LBB3_2:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_load_from_file ## -- Begin function stbi_load_from_file
.p2align 4, 0x90
_stbi_load_from_file: ## @stbi_load_from_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movl $14120, %eax ## imm = 0x3728
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, -44(%rbp) ## 4-byte Spill
movq %rcx, %r12
movq %rdx, -56(%rbp) ## 8-byte Spill
movq %rsi, %r13
movq %rdi, %rbx
callq _ftell
movq %rax, %r14
movq %rbx, %rdi
callq _fgetc
cmpl $-1, %eax
je LBB4_6
## %bb.1:
cmpb $-1, %al
jne LBB4_6
## %bb.2:
xorl %r15d, %r15d
.p2align 4, 0x90
LBB4_4: ## =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
callq _fgetc
cmpl $-1, %eax
cmovel %r15d, %eax
cmpb $-1, %al
jne LBB4_5
## %bb.3: ## in Loop: Header=BB4_4 Depth=1
testq %rbx, %rbx
jne LBB4_4
jmp LBB4_6
LBB4_5:
cmpb $-40, %al
jne LBB4_6
## %bb.25:
movslq %r14d, %rsi
movq %rbx, %rdi
xorl %edx, %edx
callq _fseek
movq %rbx, -14136(%rbp)
leaq -14152(%rbp), %rdi
movq %r13, %rsi
movq -56(%rbp), %rdx ## 8-byte Reload
movq %r12, %rcx
movl -44(%rbp), %r8d ## 4-byte Reload
callq _load_jpeg_image
jmp LBB4_24
LBB4_6:
leaq L_.str.22(%rip), %rax
movq %rax, _failure_reason(%rip)
movslq %r14d, %rsi
movq %rbx, %rdi
xorl %edx, %edx
callq _fseek
movq %rbx, %rdi
callq _stbi_png_test_file
testl %eax, %eax
je LBB4_8
## %bb.7:
movq %rbx, -14136(%rbp)
leaq -14152(%rbp), %rdi
movq %r13, %rsi
movq -56(%rbp), %rdx ## 8-byte Reload
movq %r12, %rcx
movl -44(%rbp), %r8d ## 4-byte Reload
callq _do_png
jmp LBB4_24
LBB4_8:
movq %rbx, %rdi
callq _ftell
movq %rax, %r14
movq %rbx, -14136(%rbp)
leaq -14152(%rbp), %rdi
callq _bmp_test
movl %eax, %r15d
movslq %r14d, %rsi
movq %rbx, %rdi
xorl %edx, %edx
callq _fseek
testl %r15d, %r15d
je LBB4_10
## %bb.9:
movq %rbx, -14136(%rbp)
leaq -14152(%rbp), %rdi
movq %r13, %rsi
movq -56(%rbp), %rdx ## 8-byte Reload
movq %r12, %rcx
movl -44(%rbp), %r8d ## 4-byte Reload
callq _bmp_load
jmp LBB4_24
LBB4_10:
movq %rbx, %rdi
callq _ftell
movq %rax, %r14
movq %rbx, -14136(%rbp)
leaq -14152(%rbp), %rdi
callq _get32
movl %eax, %r15d
movslq %r14d, %rsi
movq %rbx, %rdi
xorl %edx, %edx
callq _fseek
cmpl $943870035, %r15d ## imm = 0x38425053
jne LBB4_12
## %bb.11:
movq %rbx, -14136(%rbp)
leaq -14152(%rbp), %rdi
movq %r13, %rsi
movq -56(%rbp), %rdx ## 8-byte Reload
movq %r12, %rcx
movl -44(%rbp), %r8d ## 4-byte Reload
callq _psd_load
LBB4_24:
addq $14120, %rsp ## imm = 0x3728
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB4_12:
movq %rbx, %rdi
callq _stbi_hdr_test_file
testl %eax, %eax
je LBB4_13
## %bb.15:
movq %r12, %r14
movq %rbx, -14136(%rbp)
leaq -14152(%rbp), %rdi
movq %r13, %rsi
movq -56(%rbp), %r15 ## 8-byte Reload
movq %r15, %rdx
movq %r12, %rcx
movl -44(%rbp), %ebx ## 4-byte Reload
movl %ebx, %r8d
callq _hdr_load
movl (%r13), %esi
movl (%r15), %edx
testl %ebx, %ebx
jne LBB4_17
## %bb.16:
movl (%r14), %ebx
LBB4_17:
movq %rax, %rdi
movl %ebx, %ecx
addq $14120, %rsp ## imm = 0x3728
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _hdr_to_ldr ## TAILCALL
LBB4_13:
cmpl $0, _max_loaders(%rip)
jle LBB4_21
## %bb.14:
xorl %r14d, %r14d
movq _loaders@GOTPCREL(%rip), %r15
.p2align 4, 0x90
LBB4_19: ## =>This Inner Loop Header: Depth=1
movq (%r15,%r14,8), %rax
movq %rbx, %rdi
callq *16(%rax)
testl %eax, %eax
jne LBB4_20
## %bb.18: ## in Loop: Header=BB4_19 Depth=1
incq %r14
movslq _max_loaders(%rip), %rax
cmpq %rax, %r14
jl LBB4_19
LBB4_21:
movq %rbx, %rdi
callq _stbi_tga_test_file
testl %eax, %eax
je LBB4_23
## %bb.22:
movq %rbx, -14136(%rbp)
leaq -14152(%rbp), %rdi
movq %r13, %rsi
movq -56(%rbp), %rdx ## 8-byte Reload
movq %r12, %rcx
movl -44(%rbp), %r8d ## 4-byte Reload
callq _tga_load
jmp LBB4_24
LBB4_20:
movq (%r15,%r14,8), %rax
movq 24(%rax), %rax
movq %rbx, %rdi
movq %r13, %rsi
movq -56(%rbp), %rdx ## 8-byte Reload
movq %r12, %rcx
movl -44(%rbp), %r8d ## 4-byte Reload
addq $14120, %rsp ## imm = 0x3728
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmpq *%rax ## TAILCALL
LBB4_23:
leaq L_.str.2(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
jmp LBB4_24
.cfi_endproc
## -- End function
.globl _stbi_jpeg_test_file ## -- Begin function stbi_jpeg_test_file
.p2align 4, 0x90
_stbi_jpeg_test_file: ## @stbi_jpeg_test_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
callq _ftell
movq %rax, %r14
movq %rbx, %rdi
callq _fgetc
cmpl $-1, %eax
je LBB5_6
## %bb.1:
cmpb $-1, %al
jne LBB5_6
## %bb.2:
xorl %r15d, %r15d
.p2align 4, 0x90
LBB5_4: ## =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
callq _fgetc
cmpl $-1, %eax
cmovel %r15d, %eax
cmpb $-1, %al
jne LBB5_5
## %bb.3: ## in Loop: Header=BB5_4 Depth=1
testq %rbx, %rbx
jne LBB5_4
jmp LBB5_6
LBB5_5:
cmpb $-40, %al
jne LBB5_6
## %bb.7:
movl $1, %r15d
jmp LBB5_8
LBB5_6:
leaq L_.str.22(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %r15d, %r15d
LBB5_8:
movslq %r14d, %rsi
movq %rbx, %rdi
xorl %edx, %edx
callq _fseek
movl %r15d, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_jpeg_load_from_file ## -- Begin function stbi_jpeg_load_from_file
.p2align 4, 0x90
_stbi_jpeg_load_from_file: ## @stbi_jpeg_load_from_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $14096, %eax ## imm = 0x3710
callq ____chkstk_darwin
subq %rax, %rsp
movq %rdi, -14080(%rbp)
leaq -14096(%rbp), %rdi
callq _load_jpeg_image
addq $14096, %rsp ## imm = 0x3710
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_png_test_file ## -- Begin function stbi_png_test_file
.p2align 4, 0x90
_stbi_png_test_file: ## @stbi_png_test_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r15
callq _ftell
movq %rax, %r14
testq %r15, %r15
je LBB7_9
## %bb.1:
movq %r15, %rdi
callq _fgetc
cmpl $137, %eax
jne LBB7_9
## %bb.2:
movq %r15, %rdi
callq _fgetc
cmpl $80, %eax
jne LBB7_9
## %bb.3:
movq %r15, %rdi
callq _fgetc
cmpl $78, %eax
jne LBB7_9
## %bb.4:
movq %r15, %rdi
callq _fgetc
cmpl $71, %eax
jne LBB7_9
## %bb.5:
movq %r15, %rdi
callq _fgetc
cmpl $13, %eax
jne LBB7_9
## %bb.6:
movq %r15, %rdi
callq _fgetc
cmpl $10, %eax
jne LBB7_9
## %bb.7:
movq %r15, %rdi
callq _fgetc
cmpl $26, %eax
jne LBB7_9
## %bb.8:
movq %r15, %rdi
callq _fgetc
movl $1, %ebx
cmpl $10, %eax
je LBB7_10
LBB7_9:
leaq L_.str.68(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %ebx, %ebx
LBB7_10:
movslq %r14d, %rsi
movq %r15, %rdi
xorl %edx, %edx
callq _fseek
movl %ebx, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_png_load_from_file ## -- Begin function stbi_png_load_from_file
.p2align 4, 0x90
_stbi_png_load_from_file: ## @stbi_png_load_from_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $64, %rsp
movq %rdi, -48(%rbp)
leaq -64(%rbp), %rdi
callq _do_png
addq $64, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_bmp_test_file ## -- Begin function stbi_bmp_test_file
.p2align 4, 0x90
_stbi_bmp_test_file: ## @stbi_bmp_test_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
callq _ftell
movq %rax, %r14
movq %rbx, -48(%rbp)
leaq -64(%rbp), %rdi
callq _bmp_test
movl %eax, %r15d
movslq %r14d, %rsi
movq %rbx, %rdi
xorl %edx, %edx
callq _fseek
movl %r15d, %eax
addq $40, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_bmp_load_from_file ## -- Begin function stbi_bmp_load_from_file
.p2align 4, 0x90
_stbi_bmp_load_from_file: ## @stbi_bmp_load_from_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq %rdi, -24(%rbp)
leaq -40(%rbp), %rdi
callq _bmp_load
addq $48, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_psd_test_file ## -- Begin function stbi_psd_test_file
.p2align 4, 0x90
_stbi_psd_test_file: ## @stbi_psd_test_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r15
callq _ftell
movq %rax, %r14
movq %r15, -48(%rbp)
leaq -64(%rbp), %rdi
callq _get32
xorl %ebx, %ebx
cmpl $943870035, %eax ## imm = 0x38425053
sete %bl
movslq %r14d, %rsi
movq %r15, %rdi
xorl %edx, %edx
callq _fseek
movl %ebx, %eax
addq $40, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_psd_load_from_file ## -- Begin function stbi_psd_load_from_file
.p2align 4, 0x90
_stbi_psd_load_from_file: ## @stbi_psd_load_from_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq %rdi, -24(%rbp)
leaq -40(%rbp), %rdi
callq _psd_load
addq $48, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_hdr_test_file ## -- Begin function stbi_hdr_test_file
.p2align 4, 0x90
_stbi_hdr_test_file: ## @stbi_hdr_test_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
callq _ftell
movq %rax, %r14
xorl %r15d, %r15d
testq %rbx, %rbx
je LBB13_12
## %bb.1:
movq %rbx, %rdi
callq _fgetc
cmpl $35, %eax
jne LBB13_12
## %bb.2:
movq %rbx, %rdi
callq _fgetc
cmpl $63, %eax
jne LBB13_12
## %bb.3:
movq %rbx, %rdi
callq _fgetc
cmpl $82, %eax
jne LBB13_12
## %bb.4:
movq %rbx, %rdi
callq _fgetc
cmpl $65, %eax
jne LBB13_12
## %bb.5:
movq %rbx, %rdi
callq _fgetc
cmpl $68, %eax
jne LBB13_12
## %bb.6:
movq %rbx, %rdi
callq _fgetc
cmpl $73, %eax
jne LBB13_12
## %bb.7:
movq %rbx, %rdi
callq _fgetc
cmpl $65, %eax
jne LBB13_12
## %bb.8:
movq %rbx, %rdi
callq _fgetc
cmpl $78, %eax
jne LBB13_12
## %bb.9:
movq %rbx, %rdi
callq _fgetc
cmpl $67, %eax
jne LBB13_12
## %bb.10:
movq %rbx, %rdi
callq _fgetc
cmpl $69, %eax
jne LBB13_12
## %bb.11:
movq %rbx, %rdi
callq _fgetc
xorl %r15d, %r15d
cmpl $10, %eax
sete %r15b
LBB13_12:
movslq %r14d, %rsi
movq %rbx, %rdi
xorl %edx, %edx
callq _fseek
movl %r15d, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_hdr_load_from_file ## -- Begin function stbi_hdr_load_from_file
.p2align 4, 0x90
_stbi_hdr_load_from_file: ## @stbi_hdr_load_from_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq %rdi, -24(%rbp)
leaq -40(%rbp), %rdi
callq _hdr_load
addq $48, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function hdr_to_ldr
LCPI15_0:
.long 0x437f0000 ## float 255
.long 0x437f0000 ## float 255
.long 0x437f0000 ## float 255
.long 0x437f0000 ## float 255
LCPI15_1:
.long 0x3f000000 ## float 0.5
.long 0x3f000000 ## float 0.5
.long 0x3f000000 ## float 0.5
.long 0x3f000000 ## float 0.5
LCPI15_2:
.byte 0 ## 0x0
.byte 4 ## 0x4
.byte 8 ## 0x8
.byte 12 ## 0xc
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.section __TEXT,__literal4,4byte_literals
.p2align 2
LCPI15_3:
.long 0x437f0000 ## float 255
LCPI15_4:
.long 0x3f000000 ## float 0.5
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_hdr_to_ldr: ## @hdr_to_ldr
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $184, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
## kill: def $ecx killed $ecx def $rcx
movl %esi, %r13d
movq %rdi, %r12
imull %edx, %r13d
movl %r13d, %eax
movq %rcx, -72(%rbp) ## 8-byte Spill
imull %ecx, %eax
movslq %eax, %rdi
callq _malloc
movq %rax, -64(%rbp) ## 8-byte Spill
testq %rax, %rax
je LBB15_1
## %bb.2:
testl %r13d, %r13d
jle LBB15_15
## %bb.3:
movq -72(%rbp), %rcx ## 8-byte Reload
movl %ecx, %eax
andl $1, %eax
leal (%rcx,%rax), %esi
decl %esi
movss _h2l_scale_i(%rip), %xmm2 ## xmm2 = mem[0],zero,zero,zero
movss _h2l_gamma_i(%rip), %xmm0 ## xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm3
movslq %ecx, %rax
movl %r13d, %ecx
movq %rcx, -144(%rbp) ## 8-byte Spill
movl %esi, %r15d
movl %r15d, %ecx
andl $-4, %ecx
movq %rcx, -120(%rbp) ## 8-byte Spill
movaps %xmm2, %xmm4
shufps $0, %xmm2, %xmm4 ## xmm4 = xmm4[0,0],xmm2[0,0]
movq %rax, -88(%rbp) ## 8-byte Spill
leaq (,%rax,4), %rax
movq %rax, -136(%rbp) ## 8-byte Spill
xorl %edi, %edi
movss LCPI15_3(%rip), %xmm5 ## xmm5 = mem[0],zero,zero,zero
movss LCPI15_4(%rip), %xmm6 ## xmm6 = mem[0],zero,zero,zero
movq %r12, %r14
movq -64(%rbp), %r13 ## 8-byte Reload
movl %esi, -52(%rbp) ## 4-byte Spill
movaps %xmm2, -176(%rbp) ## 16-byte Spill
movaps %xmm4, -160(%rbp) ## 16-byte Spill
movq %r12, -128(%rbp) ## 8-byte Spill
movsd %xmm3, -48(%rbp) ## 8-byte Spill
jmp LBB15_4
.p2align 4, 0x90
LBB15_14: ## in Loop: Header=BB15_4 Depth=1
incq %rdi
addq -88(%rbp), %r13 ## 8-byte Folded Reload
addq -136(%rbp), %r14 ## 8-byte Folded Reload
cmpq -144(%rbp), %rdi ## 8-byte Folded Reload
je LBB15_15
LBB15_4: ## =>This Loop Header: Depth=1
## Child Loop BB15_8 Depth 2
## Child Loop BB15_10 Depth 2
movl $0, %eax
testl %esi, %esi
jle LBB15_12
## %bb.5: ## in Loop: Header=BB15_4 Depth=1
cmpl $4, %esi
movq %rdi, -80(%rbp) ## 8-byte Spill
jae LBB15_7
## %bb.6: ## in Loop: Header=BB15_4 Depth=1
xorl %ebx, %ebx
jmp LBB15_10
.p2align 4, 0x90
LBB15_7: ## in Loop: Header=BB15_4 Depth=1
xorl %r12d, %r12d
movq -120(%rbp), %rbx ## 8-byte Reload
.p2align 4, 0x90
LBB15_8: ## Parent Loop BB15_4 Depth=1
## => This Inner Loop Header: Depth=2
movups (%r14,%r12,4), %xmm0
mulps %xmm4, %xmm0
cvtps2pd %xmm0, %xmm1
movaps %xmm1, -112(%rbp) ## 16-byte Spill
movhlps %xmm0, %xmm0 ## xmm0 = xmm0[1,1]
cvtps2pd %xmm0, %xmm0
movaps %xmm0, -192(%rbp) ## 16-byte Spill
movaps %xmm1, %xmm0
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
callq _pow
movaps %xmm0, -224(%rbp) ## 16-byte Spill
movaps -112(%rbp), %xmm0 ## 16-byte Reload
movhlps %xmm0, %xmm0 ## xmm0 = xmm0[1,1]
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
callq _pow
movaps %xmm0, -112(%rbp) ## 16-byte Spill
movaps -192(%rbp), %xmm0 ## 16-byte Reload
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
callq _pow
movaps %xmm0, -208(%rbp) ## 16-byte Spill
movapd -192(%rbp), %xmm0 ## 16-byte Reload
unpckhpd %xmm0, %xmm0 ## xmm0 = xmm0[1,1]
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
callq _pow
movaps LCPI15_0(%rip), %xmm2 ## xmm2 = [2.55E+2,2.55E+2,2.55E+2,2.55E+2]
movaps -160(%rbp), %xmm4 ## 16-byte Reload
movapd -208(%rbp), %xmm1 ## 16-byte Reload
unpcklpd %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[0]
movapd -224(%rbp), %xmm0 ## 16-byte Reload
unpcklpd -112(%rbp), %xmm0 ## 16-byte Folded Reload
## xmm0 = xmm0[0],mem[0]
cvtpd2ps %xmm0, %xmm0
cvtpd2ps %xmm1, %xmm1
unpcklpd %xmm1, %xmm0 ## xmm0 = xmm0[0],xmm1[0]
mulps %xmm2, %xmm0
addps LCPI15_1(%rip), %xmm0
xorpd %xmm1, %xmm1
maxps %xmm0, %xmm1
movaps %xmm2, %xmm0
minps %xmm1, %xmm0
cvttps2dq %xmm0, %xmm0
pshufb LCPI15_2(%rip), %xmm0 ## xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
movd %xmm0, (%r13,%r12)
addq $4, %r12
cmpq %r12, %rbx
jne LBB15_8
## %bb.9: ## in Loop: Header=BB15_4 Depth=1
movq %rbx, %rcx
movl -52(%rbp), %esi ## 4-byte Reload
movl %esi, %eax
cmpq %r15, %rbx
movq -128(%rbp), %r12 ## 8-byte Reload
movaps -176(%rbp), %xmm2 ## 16-byte Reload
movsd -48(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movq -80(%rbp), %rdi ## 8-byte Reload
movss LCPI15_3(%rip), %xmm5 ## xmm5 = mem[0],zero,zero,zero
movss LCPI15_4(%rip), %xmm6 ## xmm6 = mem[0],zero,zero,zero
je LBB15_12
.p2align 4, 0x90
LBB15_10: ## Parent Loop BB15_4 Depth=1
## => This Inner Loop Header: Depth=2
movss (%r14,%rbx,4), %xmm0 ## xmm0 = mem[0],zero,zero,zero
mulss %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
callq _pow
movss LCPI15_4(%rip), %xmm6 ## xmm6 = mem[0],zero,zero,zero
movss LCPI15_3(%rip), %xmm5 ## xmm5 = mem[0],zero,zero,zero
movaps -176(%rbp), %xmm2 ## 16-byte Reload
cvtsd2ss %xmm0, %xmm0
mulss %xmm5, %xmm0
addss %xmm6, %xmm0
xorps %xmm1, %xmm1
maxss %xmm0, %xmm1
movaps %xmm5, %xmm0
minss %xmm1, %xmm0
cvttss2si %xmm0, %eax
movb %al, (%r13,%rbx)
incq %rbx
cmpq %rbx, %r15
jne LBB15_10
## %bb.11: ## in Loop: Header=BB15_4 Depth=1
movl -52(%rbp), %esi ## 4-byte Reload
movl %esi, %eax
movsd -48(%rbp), %xmm3 ## 8-byte Reload
## xmm3 = mem[0],zero
movaps -160(%rbp), %xmm4 ## 16-byte Reload
movq -80(%rbp), %rdi ## 8-byte Reload
LBB15_12: ## in Loop: Header=BB15_4 Depth=1
cmpl -72(%rbp), %eax ## 4-byte Folded Reload
jge LBB15_14
## %bb.13: ## in Loop: Header=BB15_4 Depth=1
movq %rdi, %rcx
imulq -88(%rbp), %rcx ## 8-byte Folded Reload
cltq
addq %rcx, %rax
movss (%r12,%rax,4), %xmm0 ## xmm0 = mem[0],zero,zero,zero
mulss %xmm5, %xmm0
addss %xmm6, %xmm0
xorps %xmm1, %xmm1
maxss %xmm0, %xmm1
movaps %xmm5, %xmm0
minss %xmm1, %xmm0
cvttss2si %xmm0, %ecx
movq -64(%rbp), %rdx ## 8-byte Reload
movb %cl, (%rdx,%rax)
jmp LBB15_14
LBB15_15:
movq %r12, %rdi
callq _free
jmp LBB15_16
LBB15_1:
movq %r12, %rdi
callq _free
leaq L_.str.5(%rip), %rax
movq %rax, _failure_reason(%rip)
LBB15_16:
movq -64(%rbp), %rax ## 8-byte Reload
addq $184, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_tga_test_file ## -- Begin function stbi_tga_test_file
.p2align 4, 0x90
_stbi_tga_test_file: ## @stbi_tga_test_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
callq _ftell
movq %rax, %r14
movq %rbx, -48(%rbp)
leaq -64(%rbp), %rdi
callq _tga_test
movl %eax, %r15d
movslq %r14d, %rsi
movq %rbx, %rdi
xorl %edx, %edx
callq _fseek
movl %r15d, %eax
addq $40, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_tga_load_from_file ## -- Begin function stbi_tga_load_from_file
.p2align 4, 0x90
_stbi_tga_load_from_file: ## @stbi_tga_load_from_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq %rdi, -24(%rbp)
leaq -40(%rbp), %rdi
callq _tga_load
addq $48, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_load_from_memory ## -- Begin function stbi_load_from_memory
.p2align 4, 0x90
_stbi_load_from_memory: ## @stbi_load_from_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movl $14120, %eax ## imm = 0x3728
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r9d, %r12d
movq %rcx, %r13
movl %esi, %r14d
movq %rdi, %rbx
movslq %esi, %rax
leaq (%rdi,%rax), %r15
testl %eax, %eax
jle LBB18_23
## %bb.1:
movb (%rbx), %sil
cmpb $-1, %sil
je LBB18_2
LBB18_21:
leaq L_.str.22(%rip), %rcx
movq %rcx, _failure_reason(%rip)
cmpl $1, %r14d
je LBB18_23
## %bb.22:
cmpb $-119, %sil
jne LBB18_23
## %bb.7:
cmpl $3, %r14d
jb LBB18_23
## %bb.8:
cmpb $80, 1(%rbx)
jne LBB18_23
## %bb.9:
cmpl $4, %r14d
jb LBB18_23
## %bb.10:
cmpb $78, 2(%rbx)
jne LBB18_23
## %bb.11:
cmpl $5, %r14d
jb LBB18_23
## %bb.12:
cmpb $71, 3(%rbx)
jne LBB18_23
## %bb.13:
cmpl $6, %r14d
jb LBB18_23
## %bb.14:
cmpb $13, 4(%rbx)
jne LBB18_23
## %bb.15:
cmpl $7, %r14d
jb LBB18_23
## %bb.16:
cmpb $10, 5(%rbx)
jne LBB18_23
## %bb.17:
cmpl $8, %r14d
jb LBB18_23
## %bb.18:
cmpb $26, 6(%rbx)
jne LBB18_23
## %bb.19:
cmpb $10, 7(%rbx)
jne LBB18_23
## %bb.20:
movq $0, -14144(%rbp)
movq %rbx, -14136(%rbp)
movq %r15, -14128(%rbp)
leaq -14160(%rbp), %rdi
movq %rdx, %rsi
movq %r13, %rdx
movq %r8, %rcx
movl %r12d, %r8d
callq _do_png
jmp LBB18_39
LBB18_23:
movq %rdx, -48(%rbp) ## 8-byte Spill
movq %r8, -56(%rbp) ## 8-byte Spill
leaq L_.str.68(%rip), %rax
movq %rax, _failure_reason(%rip)
movq $0, -14144(%rbp)
movq %rbx, -14136(%rbp)
movq %r15, -14128(%rbp)
leaq -14160(%rbp), %rdi
callq _bmp_test
testl %eax, %eax
je LBB18_25
## %bb.24:
movq $0, -14144(%rbp)
movq %rbx, -14136(%rbp)
movq %r15, -14128(%rbp)
leaq -14160(%rbp), %rdi
movq -48(%rbp), %rsi ## 8-byte Reload
movq %r13, %rdx
movq -56(%rbp), %rcx ## 8-byte Reload
movl %r12d, %r8d
callq _bmp_load
jmp LBB18_39
LBB18_25:
movq $0, -14144(%rbp)
movq %rbx, -14136(%rbp)
movq %r15, -14128(%rbp)
leaq -14160(%rbp), %rdi
callq _get32
cmpl $943870035, %eax ## imm = 0x38425053
jne LBB18_27
## %bb.26:
movq $0, -14144(%rbp)
movq %rbx, -14136(%rbp)
movq %r15, -14128(%rbp)
leaq -14160(%rbp), %rdi
movq -48(%rbp), %rsi ## 8-byte Reload
movq %r13, %rdx
movq -56(%rbp), %rcx ## 8-byte Reload
movl %r12d, %r8d
callq _psd_load
jmp LBB18_39
LBB18_2:
leaq 1(%rbx), %rcx
.p2align 4, 0x90
LBB18_3: ## =>This Inner Loop Header: Depth=1
cmpq %r15, %rcx
jae LBB18_21
## %bb.4: ## in Loop: Header=BB18_3 Depth=1
movzbl (%rcx), %eax
incq %rcx
cmpb $-1, %al
je LBB18_3
## %bb.5:
cmpb $-40, %al
jne LBB18_21
## %bb.6:
movq $0, -14144(%rbp)
movq %rbx, -14136(%rbp)
movq %r15, -14128(%rbp)
leaq -14160(%rbp), %rdi
movq %rdx, %rsi
movq %r13, %rdx
movq %r8, %rcx
movl %r12d, %r8d
callq _load_jpeg_image
jmp LBB18_39
LBB18_27:
movq %rbx, %rdi
movl %r14d, %esi
callq _stbi_hdr_test_memory
testl %eax, %eax
je LBB18_28
## %bb.30:
movq $0, -14144(%rbp)
movq %rbx, -14136(%rbp)
movq %r15, -14128(%rbp)
leaq -14160(%rbp), %rdi
movq -48(%rbp), %r14 ## 8-byte Reload
movq %r14, %rsi
movq %r13, %rbx
movq %r13, %rdx
movq -56(%rbp), %r13 ## 8-byte Reload
movq %r13, %rcx
movl %r12d, %r8d
callq _hdr_load
movl (%r14), %esi
movl (%rbx), %edx
testl %r12d, %r12d
jne LBB18_32
## %bb.31:
movl (%r13), %r12d
LBB18_32:
movq %rax, %rdi
movl %r12d, %ecx
addq $14120, %rsp ## imm = 0x3728
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _hdr_to_ldr ## TAILCALL
LBB18_28:
movl %r12d, -60(%rbp) ## 4-byte Spill
cmpl $0, _max_loaders(%rip)
jle LBB18_36
## %bb.29:
xorl %r12d, %r12d
.p2align 4, 0x90
LBB18_34: ## =>This Inner Loop Header: Depth=1
movq _loaders@GOTPCREL(%rip), %rax
movq (%rax,%r12,8), %rax
movq %rbx, %rdi
movl %r14d, %esi
callq *(%rax)
testl %eax, %eax
jne LBB18_35
## %bb.33: ## in Loop: Header=BB18_34 Depth=1
incq %r12
movslq _max_loaders(%rip), %rax
cmpq %rax, %r12
jl LBB18_34
LBB18_36:
movq $0, -14144(%rbp)
movq %rbx, -14136(%rbp)
movq %r15, -14128(%rbp)
leaq -14160(%rbp), %rdi
callq _tga_test
testl %eax, %eax
je LBB18_38
## %bb.37:
movq $0, -14144(%rbp)
movq %rbx, -14136(%rbp)
movq %r15, -14128(%rbp)
leaq -14160(%rbp), %rdi
movq -48(%rbp), %rsi ## 8-byte Reload
movq %r13, %rdx
movq -56(%rbp), %rcx ## 8-byte Reload
movl -60(%rbp), %r8d ## 4-byte Reload
callq _tga_load
jmp LBB18_39
LBB18_35:
movq _loaders@GOTPCREL(%rip), %rax
movq (%rax,%r12,8), %rax
movq 8(%rax), %rax
movq %rbx, %rdi
movl %r14d, %esi
movq -48(%rbp), %rdx ## 8-byte Reload
movq %r13, %rcx
movq -56(%rbp), %r8 ## 8-byte Reload
movl -60(%rbp), %r9d ## 4-byte Reload
addq $14120, %rsp ## imm = 0x3728
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmpq *%rax ## TAILCALL
LBB18_38:
leaq L_.str.2(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
LBB18_39:
addq $14120, %rsp ## imm = 0x3728
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_jpeg_test_memory ## -- Begin function stbi_jpeg_test_memory
.p2align 4, 0x90
_stbi_jpeg_test_memory: ## @stbi_jpeg_test_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
testl %esi, %esi
jle LBB19_6
## %bb.1:
cmpb $-1, (%rdi)
je LBB19_2
LBB19_6:
leaq L_.str.22(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
popq %rbp
retq
LBB19_2:
movslq %esi, %rax
addq %rdi, %rax
incq %rdi
.p2align 4, 0x90
LBB19_3: ## =>This Inner Loop Header: Depth=1
cmpq %rax, %rdi
jae LBB19_6
## %bb.4: ## in Loop: Header=BB19_3 Depth=1
movzbl (%rdi), %ecx
incq %rdi
cmpb $-1, %cl
je LBB19_3
## %bb.5:
cmpb $-40, %cl
jne LBB19_6
## %bb.7:
movl $1, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_jpeg_load_from_memory ## -- Begin function stbi_jpeg_load_from_memory
.p2align 4, 0x90
_stbi_jpeg_load_from_memory: ## @stbi_jpeg_load_from_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl $14096, %eax ## imm = 0x3710
callq ____chkstk_darwin
subq %rax, %rsp
movq $0, -14080(%rbp)
movq %rdi, -14072(%rbp)
movslq %esi, %rax
addq %rdi, %rax
movq %rax, -14064(%rbp)
leaq -14096(%rbp), %rdi
movq %rdx, %rsi
movq %rcx, %rdx
movq %r8, %rcx
movl %r9d, %r8d
callq _load_jpeg_image
addq $14096, %rsp ## imm = 0x3710
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_png_test_memory ## -- Begin function stbi_png_test_memory
.p2align 4, 0x90
_stbi_png_test_memory: ## @stbi_png_test_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
testl %esi, %esi
jle LBB21_16
## %bb.1:
cmpb $-119, (%rdi)
jne LBB21_16
## %bb.2:
cmpl $1, %esi
je LBB21_16
## %bb.3:
cmpb $80, 1(%rdi)
jne LBB21_16
## %bb.4:
cmpl $3, %esi
jl LBB21_16
## %bb.5:
cmpb $78, 2(%rdi)
jne LBB21_16
## %bb.6:
cmpl $4, %esi
jl LBB21_16
## %bb.7:
cmpb $71, 3(%rdi)
jne LBB21_16
## %bb.8:
cmpl $5, %esi
jl LBB21_16
## %bb.9:
cmpb $13, 4(%rdi)
jne LBB21_16
## %bb.10:
cmpl $6, %esi
jl LBB21_16
## %bb.11:
cmpb $10, 5(%rdi)
jne LBB21_16
## %bb.12:
cmpl $7, %esi
jl LBB21_16
## %bb.13:
cmpb $26, 6(%rdi)
jne LBB21_16
## %bb.14:
cmpl $8, %esi
jl LBB21_16
## %bb.15:
movl $1, %eax
cmpb $10, 7(%rdi)
je LBB21_17
LBB21_16:
leaq L_.str.68(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
LBB21_17:
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_png_load_from_memory ## -- Begin function stbi_png_load_from_memory
.p2align 4, 0x90
_stbi_png_load_from_memory: ## @stbi_png_load_from_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $64, %rsp
movq $0, -48(%rbp)
movq %rdi, -40(%rbp)
movslq %esi, %rax
addq %rdi, %rax
movq %rax, -32(%rbp)
leaq -64(%rbp), %rdi
movq %rdx, %rsi
movq %rcx, %rdx
movq %r8, %rcx
movl %r9d, %r8d
callq _do_png
addq $64, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_bmp_test_memory ## -- Begin function stbi_bmp_test_memory
.p2align 4, 0x90
_stbi_bmp_test_memory: ## @stbi_bmp_test_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq $0, -24(%rbp)
movq %rdi, -16(%rbp)
movslq %esi, %rax
addq %rdi, %rax
movq %rax, -8(%rbp)
leaq -40(%rbp), %rdi
callq _bmp_test
addq $48, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_bmp_load_from_memory ## -- Begin function stbi_bmp_load_from_memory
.p2align 4, 0x90
_stbi_bmp_load_from_memory: ## @stbi_bmp_load_from_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq $0, -24(%rbp)
movq %rdi, -16(%rbp)
movslq %esi, %rax
addq %rdi, %rax
movq %rax, -8(%rbp)
leaq -40(%rbp), %rdi
movq %rdx, %rsi
movq %rcx, %rdx
movq %r8, %rcx
movl %r9d, %r8d
callq _bmp_load
addq $48, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_psd_test_memory ## -- Begin function stbi_psd_test_memory
.p2align 4, 0x90
_stbi_psd_test_memory: ## @stbi_psd_test_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq $0, -24(%rbp)
movq %rdi, -16(%rbp)
movslq %esi, %rax
addq %rdi, %rax
movq %rax, -8(%rbp)
leaq -40(%rbp), %rdi
callq _get32
xorl %ecx, %ecx
cmpl $943870035, %eax ## imm = 0x38425053
sete %cl
movl %ecx, %eax
addq $48, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_psd_load_from_memory ## -- Begin function stbi_psd_load_from_memory
.p2align 4, 0x90
_stbi_psd_load_from_memory: ## @stbi_psd_load_from_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq $0, -24(%rbp)
movq %rdi, -16(%rbp)
movslq %esi, %rax
addq %rdi, %rax
movq %rax, -8(%rbp)
leaq -40(%rbp), %rdi
movq %rdx, %rsi
movq %rcx, %rdx
movq %r8, %rcx
movl %r9d, %r8d
callq _psd_load
addq $48, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_hdr_test_memory ## -- Begin function stbi_hdr_test_memory
.p2align 4, 0x90
_stbi_hdr_test_memory: ## @stbi_hdr_test_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
xorl %eax, %eax
testl %esi, %esi
jle LBB27_22
## %bb.1:
cmpb $35, (%rdi)
jne LBB27_22
## %bb.2:
cmpl $1, %esi
je LBB27_22
## %bb.3:
cmpb $63, 1(%rdi)
jne LBB27_22
## %bb.4:
cmpl $3, %esi
jl LBB27_22
## %bb.5:
cmpb $82, 2(%rdi)
jne LBB27_22
## %bb.6:
cmpl $4, %esi
jl LBB27_22
## %bb.7:
cmpb $65, 3(%rdi)
jne LBB27_22
## %bb.8:
cmpl $5, %esi
jl LBB27_22
## %bb.9:
cmpb $68, 4(%rdi)
jne LBB27_22
## %bb.10:
cmpl $6, %esi
jl LBB27_22
## %bb.11:
cmpb $73, 5(%rdi)
jne LBB27_22
## %bb.12:
cmpl $7, %esi
jl LBB27_22
## %bb.13:
cmpb $65, 6(%rdi)
jne LBB27_22
## %bb.14:
cmpl $8, %esi
jl LBB27_22
## %bb.15:
cmpb $78, 7(%rdi)
jne LBB27_22
## %bb.16:
cmpl $9, %esi
jl LBB27_22
## %bb.17:
cmpb $67, 8(%rdi)
jne LBB27_22
## %bb.18:
cmpl $10, %esi
jl LBB27_22
## %bb.19:
cmpb $69, 9(%rdi)
jne LBB27_22
## %bb.20:
cmpl $11, %esi
jl LBB27_22
## %bb.21:
xorl %eax, %eax
cmpb $10, 10(%rdi)
sete %al
LBB27_22:
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_hdr_load_from_memory ## -- Begin function stbi_hdr_load_from_memory
.p2align 4, 0x90
_stbi_hdr_load_from_memory: ## @stbi_hdr_load_from_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq $0, -24(%rbp)
movq %rdi, -16(%rbp)
movslq %esi, %rax
addq %rdi, %rax
movq %rax, -8(%rbp)
leaq -40(%rbp), %rdi
movq %rdx, %rsi
movq %rcx, %rdx
movq %r8, %rcx
movl %r9d, %r8d
callq _hdr_load
addq $48, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_tga_test_memory ## -- Begin function stbi_tga_test_memory
.p2align 4, 0x90
_stbi_tga_test_memory: ## @stbi_tga_test_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq $0, -24(%rbp)
movq %rdi, -16(%rbp)
movslq %esi, %rax
addq %rdi, %rax
movq %rax, -8(%rbp)
leaq -40(%rbp), %rdi
callq _tga_test
addq $48, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_tga_load_from_memory ## -- Begin function stbi_tga_load_from_memory
.p2align 4, 0x90
_stbi_tga_load_from_memory: ## @stbi_tga_load_from_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $48, %rsp
movq $0, -24(%rbp)
movq %rdi, -16(%rbp)
movslq %esi, %rax
addq %rdi, %rax
movq %rax, -8(%rbp)
leaq -40(%rbp), %rdi
movq %rdx, %rsi
movq %rcx, %rdx
movq %r8, %rcx
movl %r9d, %r8d
callq _tga_load
addq $48, %rsp
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_loadf ## -- Begin function stbi_loadf
.p2align 4, 0x90
_stbi_loadf: ## @stbi_loadf
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r14d
movq %rcx, %rbx
movq %rdx, %r12
movq %rsi, %r13
leaq L_.str(%rip), %rsi
callq _fopen
testq %rax, %rax
je LBB31_1
## %bb.2:
movq %rax, %r15
movq %rax, %rdi
callq _stbi_hdr_test_file
testl %eax, %eax
je LBB31_4
## %bb.3:
movq %r15, -64(%rbp)
leaq -80(%rbp), %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %rbx, %rcx
movl %r14d, %r8d
callq _hdr_load
movq %rax, %rbx
jmp LBB31_9
LBB31_1:
leaq L_.str.1(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %ebx, %ebx
jmp LBB31_10
LBB31_4:
movq %r15, %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %rbx, %rcx
movl %r14d, %r8d
callq _stbi_load_from_file
testq %rax, %rax
je LBB31_8
## %bb.5:
movl (%r13), %esi
movl (%r12), %edx
testl %r14d, %r14d
jne LBB31_7
## %bb.6:
movl (%rbx), %r14d
LBB31_7:
movq %rax, %rdi
movl %r14d, %ecx
callq _ldr_to_hdr
movq %rax, %rbx
jmp LBB31_9
LBB31_8:
leaq L_.str.2(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %ebx, %ebx
LBB31_9:
movq %r15, %rdi
callq _fclose
LBB31_10:
movq %rbx, %rax
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_loadf_from_file ## -- Begin function stbi_loadf_from_file
.p2align 4, 0x90
_stbi_loadf_from_file: ## @stbi_loadf_from_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r15d
movq %rcx, %r14
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %rbx
callq _stbi_hdr_test_file
testl %eax, %eax
je LBB32_2
## %bb.1:
movq %rbx, -64(%rbp)
leaq -80(%rbp), %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r14, %rcx
movl %r15d, %r8d
callq _hdr_load
LBB32_7:
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB32_2:
movq %rbx, %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r14, %rcx
movl %r15d, %r8d
callq _stbi_load_from_file
testq %rax, %rax
je LBB32_6
## %bb.3:
movl (%r13), %esi
movl (%r12), %edx
testl %r15d, %r15d
jne LBB32_5
## %bb.4:
movl (%r14), %r15d
LBB32_5:
movq %rax, %rdi
movl %r15d, %ecx
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _ldr_to_hdr ## TAILCALL
LBB32_6:
leaq L_.str.2(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
jmp LBB32_7
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function ldr_to_hdr
LCPI33_0:
.long 0x437f0000 ## float 255
.long 0x437f0000 ## float 255
.long 0x437f0000 ## float 255
.long 0x437f0000 ## float 255
.section __TEXT,__literal4,4byte_literals
.p2align 2
LCPI33_1:
.long 0x437f0000 ## float 255
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_ldr_to_hdr: ## @ldr_to_hdr
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $184, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
## kill: def $ecx killed $ecx def $rcx
movl %esi, %r13d
movq %rdi, %r12
imull %edx, %r13d
movl %r13d, %eax
movq %rcx, -80(%rbp) ## 8-byte Spill
imull %ecx, %eax
movslq %eax, %rdi
shlq $2, %rdi
callq _malloc
testq %rax, %rax
je LBB33_1
## %bb.2:
movq %rax, -64(%rbp) ## 8-byte Spill
testl %r13d, %r13d
jle LBB33_16
## %bb.3:
movq -80(%rbp), %rcx ## 8-byte Reload
movl %ecx, %eax
andl $1, %eax
leal (%rcx,%rax), %edx
decl %edx
movss _l2h_gamma(%rip), %xmm0 ## xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movss _l2h_scale(%rip), %xmm1 ## xmm1 = mem[0],zero,zero,zero
movaps %xmm1, -192(%rbp) ## 16-byte Spill
movslq %ecx, %rax
movl %r13d, %ecx
movq %rcx, -144(%rbp) ## 8-byte Spill
movl %edx, %r15d
movl %r15d, %ecx
andl $-4, %ecx
movq %rcx, -120(%rbp) ## 8-byte Spill
shufps $0, %xmm1, %xmm1 ## xmm1 = xmm1[0,0,0,0]
movaps %xmm1, -176(%rbp) ## 16-byte Spill
movq %rax, -88(%rbp) ## 8-byte Spill
leaq (,%rax,4), %rax
movq %rax, -136(%rbp) ## 8-byte Spill
xorl %esi, %esi
movss LCPI33_1(%rip), %xmm2 ## xmm2 = mem[0],zero,zero,zero
movaps LCPI33_0(%rip), %xmm3 ## xmm3 = [2.55E+2,2.55E+2,2.55E+2,2.55E+2]
movq %r12, %r14
movq -64(%rbp), %r13 ## 8-byte Reload
movl %edx, -52(%rbp) ## 4-byte Spill
movq %r12, -72(%rbp) ## 8-byte Spill
movsd %xmm0, -48(%rbp) ## 8-byte Spill
jmp LBB33_4
.p2align 4, 0x90
LBB33_15: ## in Loop: Header=BB33_4 Depth=1
incq %rsi
addq -136(%rbp), %r13 ## 8-byte Folded Reload
addq -88(%rbp), %r14 ## 8-byte Folded Reload
cmpq -144(%rbp), %rsi ## 8-byte Folded Reload
je LBB33_16
LBB33_4: ## =>This Loop Header: Depth=1
## Child Loop BB33_8 Depth 2
## Child Loop BB33_11 Depth 2
movl $0, %eax
testl %edx, %edx
jle LBB33_13
## %bb.5: ## in Loop: Header=BB33_4 Depth=1
cmpl $4, %edx
jae LBB33_7
## %bb.6: ## in Loop: Header=BB33_4 Depth=1
xorl %ebx, %ebx
jmp LBB33_10
.p2align 4, 0x90
LBB33_7: ## in Loop: Header=BB33_4 Depth=1
movq %rsi, -128(%rbp) ## 8-byte Spill
xorl %r12d, %r12d
movq -120(%rbp), %rbx ## 8-byte Reload
.p2align 4, 0x90
LBB33_8: ## Parent Loop BB33_4 Depth=1
## => This Inner Loop Header: Depth=2
pmovzxbd (%r14,%r12), %xmm0 ## xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
cvtdq2ps %xmm0, %xmm0
divps %xmm3, %xmm0
cvtps2pd %xmm0, %xmm1
movaps %xmm1, -112(%rbp) ## 16-byte Spill
movhlps %xmm0, %xmm0 ## xmm0 = xmm0[1,1]
cvtps2pd %xmm0, %xmm0
movaps %xmm0, -160(%rbp) ## 16-byte Spill
movaps %xmm1, %xmm0
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
callq _pow
movaps %xmm0, -224(%rbp) ## 16-byte Spill
movaps -112(%rbp), %xmm0 ## 16-byte Reload
movhlps %xmm0, %xmm0 ## xmm0 = xmm0[1,1]
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
callq _pow
movaps %xmm0, -112(%rbp) ## 16-byte Spill
movaps -160(%rbp), %xmm0 ## 16-byte Reload
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
callq _pow
movaps %xmm0, -208(%rbp) ## 16-byte Spill
movapd -160(%rbp), %xmm0 ## 16-byte Reload
unpckhpd %xmm0, %xmm0 ## xmm0 = xmm0[1,1]
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
callq _pow
movaps LCPI33_0(%rip), %xmm3 ## xmm3 = [2.55E+2,2.55E+2,2.55E+2,2.55E+2]
movapd -224(%rbp), %xmm1 ## 16-byte Reload
unpcklpd -112(%rbp), %xmm1 ## 16-byte Folded Reload
## xmm1 = xmm1[0],mem[0]
cvtpd2ps %xmm1, %xmm1
movapd -208(%rbp), %xmm2 ## 16-byte Reload
unpcklpd %xmm0, %xmm2 ## xmm2 = xmm2[0],xmm0[0]
cvtpd2ps %xmm2, %xmm0
unpcklpd %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[0]
mulps -176(%rbp), %xmm1 ## 16-byte Folded Reload
movups %xmm1, (%r13,%r12,4)
addq $4, %r12
cmpq %r12, %rbx
jne LBB33_8
## %bb.9: ## in Loop: Header=BB33_4 Depth=1
movq %rbx, %rcx
movl -52(%rbp), %edx ## 4-byte Reload
movl %edx, %eax
cmpq %r15, %rbx
movq -72(%rbp), %r12 ## 8-byte Reload
movsd -48(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movq -128(%rbp), %rsi ## 8-byte Reload
movss LCPI33_1(%rip), %xmm2 ## xmm2 = mem[0],zero,zero,zero
je LBB33_13
LBB33_10: ## in Loop: Header=BB33_4 Depth=1
movq %rsi, %r12
.p2align 4, 0x90
LBB33_11: ## Parent Loop BB33_4 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r14,%rbx), %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
movsd -48(%rbp), %xmm1 ## 8-byte Reload
## xmm1 = mem[0],zero
callq _pow
movss LCPI33_1(%rip), %xmm2 ## xmm2 = mem[0],zero,zero,zero
cvtsd2ss %xmm0, %xmm0
mulss -192(%rbp), %xmm0 ## 16-byte Folded Reload
movss %xmm0, (%r13,%rbx,4)
incq %rbx
cmpq %rbx, %r15
jne LBB33_11
## %bb.12: ## in Loop: Header=BB33_4 Depth=1
movl -52(%rbp), %edx ## 4-byte Reload
movl %edx, %eax
movsd -48(%rbp), %xmm0 ## 8-byte Reload
## xmm0 = mem[0],zero
movq %r12, %rsi
movaps LCPI33_0(%rip), %xmm3 ## xmm3 = [2.55E+2,2.55E+2,2.55E+2,2.55E+2]
movq -72(%rbp), %r12 ## 8-byte Reload
LBB33_13: ## in Loop: Header=BB33_4 Depth=1
cmpl -80(%rbp), %eax ## 4-byte Folded Reload
jge LBB33_15
## %bb.14: ## in Loop: Header=BB33_4 Depth=1
movq %rsi, %rcx
imulq -88(%rbp), %rcx ## 8-byte Folded Reload
cltq
addq %rcx, %rax
movzbl (%r12,%rax), %ecx
movaps %xmm0, %xmm1
xorps %xmm0, %xmm0
cvtsi2ss %ecx, %xmm0
divss %xmm2, %xmm0
movq -64(%rbp), %rcx ## 8-byte Reload
movss %xmm0, (%rcx,%rax,4)
movaps %xmm1, %xmm0
jmp LBB33_15
LBB33_16:
movq %r12, %rdi
callq _free
movq -64(%rbp), %rax ## 8-byte Reload
jmp LBB33_17
LBB33_1:
movq %r12, %rdi
callq _free
leaq L_.str.5(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
LBB33_17:
addq $184, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_loadf_from_memory ## -- Begin function stbi_loadf_from_memory
.p2align 4, 0x90
_stbi_loadf_from_memory: ## @stbi_loadf_from_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $56, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r9d, -44(%rbp) ## 4-byte Spill
movq %r8, %r15
movq %rcx, %r12
movq %rdx, %r13
movl %esi, %r14d
movq %rdi, %rbx
callq _stbi_hdr_test_memory
testl %eax, %eax
je LBB34_2
## %bb.1:
movq $0, -72(%rbp)
movq %rbx, -64(%rbp)
movslq %r14d, %rax
addq %rbx, %rax
movq %rax, -56(%rbp)
leaq -88(%rbp), %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r15, %rcx
movl -44(%rbp), %r8d ## 4-byte Reload
callq _hdr_load
LBB34_7:
addq $56, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB34_2:
movq %rbx, %rdi
movl %r14d, %esi
movq %r13, %rdx
movq %r12, %rcx
movq %r15, %r8
movl -44(%rbp), %ebx ## 4-byte Reload
movl %ebx, %r9d
callq _stbi_load_from_memory
testq %rax, %rax
je LBB34_6
## %bb.3:
movl (%r13), %esi
movl (%r12), %edx
testl %ebx, %ebx
jne LBB34_5
## %bb.4:
movl (%r15), %ebx
LBB34_5:
movq %rax, %rdi
movl %ebx, %ecx
addq $56, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp _ldr_to_hdr ## TAILCALL
LBB34_6:
leaq L_.str.2(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
jmp LBB34_7
.cfi_endproc
## -- End function
.globl _stbi_is_hdr_from_memory ## -- Begin function stbi_is_hdr_from_memory
.p2align 4, 0x90
_stbi_is_hdr_from_memory: ## @stbi_is_hdr_from_memory
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _stbi_hdr_test_memory ## TAILCALL
.cfi_endproc
## -- End function
.globl _stbi_is_hdr ## -- Begin function stbi_is_hdr
.p2align 4, 0x90
_stbi_is_hdr: ## @stbi_is_hdr
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
leaq L_.str(%rip), %rsi
callq _fopen
testq %rax, %rax
je LBB36_2
## %bb.1:
movq %rax, %rbx
movq %rax, %rdi
callq _stbi_hdr_test_file
movl %eax, %r14d
movq %rbx, %rdi
callq _fclose
movl %r14d, %eax
jmp LBB36_3
LBB36_2:
xorl %eax, %eax
LBB36_3:
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_is_hdr_from_file ## -- Begin function stbi_is_hdr_from_file
.p2align 4, 0x90
_stbi_is_hdr_from_file: ## @stbi_is_hdr_from_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
popq %rbp
jmp _stbi_hdr_test_file ## TAILCALL
.cfi_endproc
## -- End function
.section __TEXT,__literal4,4byte_literals
.p2align 2 ## -- Begin function stbi_hdr_to_ldr_gamma
LCPI38_0:
.long 0x3f800000 ## float 1
.section __TEXT,__text,regular,pure_instructions
.globl _stbi_hdr_to_ldr_gamma
.p2align 4, 0x90
_stbi_hdr_to_ldr_gamma: ## @stbi_hdr_to_ldr_gamma
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movss LCPI38_0(%rip), %xmm1 ## xmm1 = mem[0],zero,zero,zero
divss %xmm0, %xmm1
movss %xmm1, _h2l_gamma_i(%rip)
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal4,4byte_literals
.p2align 2 ## -- Begin function stbi_hdr_to_ldr_scale
LCPI39_0:
.long 0x3f800000 ## float 1
.section __TEXT,__text,regular,pure_instructions
.globl _stbi_hdr_to_ldr_scale
.p2align 4, 0x90
_stbi_hdr_to_ldr_scale: ## @stbi_hdr_to_ldr_scale
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movss LCPI39_0(%rip), %xmm1 ## xmm1 = mem[0],zero,zero,zero
divss %xmm0, %xmm1
movss %xmm1, _h2l_scale_i(%rip)
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_ldr_to_hdr_gamma ## -- Begin function stbi_ldr_to_hdr_gamma
.p2align 4, 0x90
_stbi_ldr_to_hdr_gamma: ## @stbi_ldr_to_hdr_gamma
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movss %xmm0, _l2h_gamma(%rip)
popq %rbp
retq
.cfi_endproc
## -- End function
.globl _stbi_ldr_to_hdr_scale ## -- Begin function stbi_ldr_to_hdr_scale
.p2align 4, 0x90
_stbi_ldr_to_hdr_scale: ## @stbi_ldr_to_hdr_scale
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movss %xmm0, _l2h_scale(%rip)
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function load_jpeg_image
_load_jpeg_image: ## @load_jpeg_image
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $392, %rsp ## imm = 0x188
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movl %r8d, -300(%rbp) ## 4-byte Spill
cmpl $5, %r8d
jb LBB42_3
## %bb.1:
leaq L_.str.6(%rip), %rax
LBB42_2:
movq %rax, _failure_reason(%rip)
jmp LBB42_226
LBB42_3:
movq %rdx, %r13
movq %rdi, %r12
movq %rcx, -344(%rbp) ## 8-byte Spill
movq %rsi, -328(%rbp) ## 8-byte Spill
movl $0, 8(%rdi)
movl $0, 14084(%rdi)
xorl %esi, %esi
callq _decode_jpeg_header
testl %eax, %eax
je LBB42_144
## %bb.4:
movb 14056(%r12), %al
cmpb $-1, %al
je LBB42_6
## %bb.5:
movb $-1, 14056(%r12)
jmp LBB42_11
LBB42_6:
movq 16(%r12), %rdi
testq %rdi, %rdi
je LBB42_8
## %bb.7:
callq _fgetc
xorl %ecx, %ecx
cmpl $-1, %eax
cmovnel %eax, %ecx
jmp LBB42_10
LBB42_8:
movq 24(%r12), %rcx
movb $-1, %al
cmpq 32(%r12), %rcx
jae LBB42_11
## %bb.9:
leaq 1(%rcx), %rax
movq %rax, 24(%r12)
movzbl (%rcx), %ecx
LBB42_10:
movb $-1, %al
cmpb $-1, %cl
je LBB42_122
LBB42_11:
leaq 13760(%r12), %rcx
movq %rcx, -368(%rbp) ## 8-byte Spill
LBB42_12: ## =>This Loop Header: Depth=1
## Child Loop BB42_13 Depth 2
## Child Loop BB42_34 Depth 3
## Child Loop BB42_44 Depth 4
## Child Loop BB42_80 Depth 3
## Child Loop BB42_82 Depth 4
## Child Loop BB42_84 Depth 5
## Child Loop BB42_87 Depth 6
## Child Loop BB42_90 Depth 7
## Child Loop BB42_104 Depth 3
## Child Loop BB42_107 Depth 4
## Child Loop BB42_59 Depth 3
xorl %r14d, %r14d
LBB42_13: ## Parent Loop BB42_12 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB42_34 Depth 3
## Child Loop BB42_44 Depth 4
## Child Loop BB42_80 Depth 3
## Child Loop BB42_82 Depth 4
## Child Loop BB42_84 Depth 5
## Child Loop BB42_87 Depth 6
## Child Loop BB42_90 Depth 7
## Child Loop BB42_104 Depth 3
## Child Loop BB42_107 Depth 4
## Child Loop BB42_59 Depth 3
cmpb $-38, %al
je LBB42_19
## %bb.14: ## in Loop: Header=BB42_13 Depth=2
cmpb $-39, %al
je LBB42_129
## %bb.15: ## in Loop: Header=BB42_13 Depth=2
movzbl %al, %esi
movq %r12, %rdi
callq _process_marker
testl %eax, %eax
je LBB42_144
LBB42_16: ## in Loop: Header=BB42_13 Depth=2
movb 14056(%r12), %al
cmpb $-1, %al
jne LBB42_121
## %bb.17: ## in Loop: Header=BB42_13 Depth=2
movq 16(%r12), %rdi
testq %rdi, %rdi
movl $0, %r14d
je LBB42_54
## %bb.18: ## in Loop: Header=BB42_13 Depth=2
callq _fgetc
movl %eax, %ecx
cmpl $-1, %eax
cmovel %r14d, %ecx
jmp LBB42_56
.p2align 4, 0x90
LBB42_19: ## in Loop: Header=BB42_13 Depth=2
movq 16(%r12), %rdi
testq %rdi, %rdi
je LBB42_23
## %bb.20: ## in Loop: Header=BB42_13 Depth=2
callq _fgetc
movl %eax, %ebx
cmpl $-1, %eax
cmovel %r14d, %ebx
movq 16(%r12), %rdi
testq %rdi, %rdi
je LBB42_25
## %bb.21: ## in Loop: Header=BB42_13 Depth=2
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
movq 16(%r12), %rdi
shll $8, %ebx
addl %eax, %ebx
testq %rdi, %rdi
je LBB42_53
## %bb.22: ## in Loop: Header=BB42_13 Depth=2
callq _fgetc
## kill: def $eax killed $eax def $rax
cmpl $-1, %eax
cmovel %r14d, %eax
jmp LBB42_30
LBB42_23: ## in Loop: Header=BB42_13 Depth=2
movq 24(%r12), %rax
movq 32(%r12), %rcx
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB42_26
## %bb.24: ## in Loop: Header=BB42_13 Depth=2
leaq 1(%rax), %rdx
movq %rdx, 24(%r12)
movzbl (%rax), %ebx
movq %rdx, %rax
jmp LBB42_26
LBB42_25: ## in Loop: Header=BB42_13 Depth=2
movq 24(%r12), %rax
movq 32(%r12), %rcx
LBB42_26: ## in Loop: Header=BB42_13 Depth=2
xorl %edx, %edx
cmpq %rcx, %rax
jae LBB42_28
## %bb.27: ## in Loop: Header=BB42_13 Depth=2
leaq 1(%rax), %rsi
movq %rsi, 24(%r12)
movzbl (%rax), %edx
movq %rsi, %rax
LBB42_28: ## in Loop: Header=BB42_13 Depth=2
shll $8, %ebx
orl %edx, %ebx
cmpq %rcx, %rax
jae LBB42_143
LBB42_29: ## in Loop: Header=BB42_13 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%r12)
movzbl (%rax), %eax
LBB42_30: ## in Loop: Header=BB42_13 Depth=2
movl %eax, 14064(%r12)
leal -5(%rax), %ecx
cmpl $-4, %ecx
jb LBB42_140
## %bb.31: ## in Loop: Header=BB42_13 Depth=2
cmpl 8(%r12), %eax
jg LBB42_140
## %bb.32: ## in Loop: Header=BB42_13 Depth=2
addl %eax, %eax
addl $6, %eax
cmpl %eax, %ebx
jne LBB42_142
## %bb.33: ## in Loop: Header=BB42_13 Depth=2
xorl %r14d, %r14d
LBB42_34: ## Parent Loop BB42_12 Depth=1
## Parent Loop BB42_13 Depth=2
## => This Loop Header: Depth=3
## Child Loop BB42_44 Depth 4
movq 16(%r12), %rdi
testq %rdi, %rdi
je LBB42_37
## %bb.35: ## in Loop: Header=BB42_34 Depth=3
callq _fgetc
movl %eax, %ebx
cmpl $-1, %eax
movl $0, %r15d
cmovel %r15d, %ebx
movq 16(%r12), %rdi
testq %rdi, %rdi
je LBB42_39
## %bb.36: ## in Loop: Header=BB42_34 Depth=3
callq _fgetc
cmpl $-1, %eax
cmovel %r15d, %eax
jmp LBB42_42
.p2align 4, 0x90
LBB42_37: ## in Loop: Header=BB42_34 Depth=3
movq 24(%r12), %rcx
movq 32(%r12), %rdx
xorl %ebx, %ebx
cmpq %rdx, %rcx
jae LBB42_40
## %bb.38: ## in Loop: Header=BB42_34 Depth=3
leaq 1(%rcx), %rax
movq %rax, 24(%r12)
movzbl (%rcx), %ebx
movq %rax, %rcx
jmp LBB42_40
LBB42_39: ## in Loop: Header=BB42_34 Depth=3
movq 24(%r12), %rcx
movq 32(%r12), %rdx
LBB42_40: ## in Loop: Header=BB42_34 Depth=3
xorl %eax, %eax
cmpq %rdx, %rcx
jae LBB42_42
## %bb.41: ## in Loop: Header=BB42_34 Depth=3
leaq 1(%rcx), %rax
movq %rax, 24(%r12)
movzbl (%rcx), %eax
LBB42_42: ## in Loop: Header=BB42_34 Depth=3
movl 8(%r12), %edx
testl %edx, %edx
jle LBB42_46
## %bb.43: ## in Loop: Header=BB42_34 Depth=3
movq -368(%rbp), %rsi ## 8-byte Reload
xorl %ecx, %ecx
.p2align 4, 0x90
LBB42_44: ## Parent Loop BB42_12 Depth=1
## Parent Loop BB42_13 Depth=2
## Parent Loop BB42_34 Depth=3
## => This Inner Loop Header: Depth=4
cmpl %ebx, (%rsi)
je LBB42_47
## %bb.45: ## in Loop: Header=BB42_44 Depth=4
incq %rcx
addq $72, %rsi
cmpq %rcx, %rdx
jne LBB42_44
jmp LBB42_144
.p2align 4, 0x90
LBB42_46: ## in Loop: Header=BB42_34 Depth=3
xorl %ecx, %ecx
LBB42_47: ## in Loop: Header=BB42_34 Depth=3
cmpl %edx, %ecx
je LBB42_144
## %bb.48: ## in Loop: Header=BB42_34 Depth=3
movl %eax, %esi
sarl $4, %esi
movl %ecx, %edx
leaq (%rdx,%rdx,8), %rdx
movl %esi, 13776(%r12,%rdx,8)
cmpl $63, %eax
jg LBB42_127
## %bb.49: ## in Loop: Header=BB42_34 Depth=3
andl $15, %eax
movl %eax, 13780(%r12,%rdx,8)
cmpl $3, %eax
ja LBB42_128
## %bb.50: ## in Loop: Header=BB42_34 Depth=3
movl %ecx, 14068(%r12,%r14,4)
incq %r14
movslq 14064(%r12), %rax
cmpq %rax, %r14
jl LBB42_34
## %bb.51: ## in Loop: Header=BB42_13 Depth=2
movq 16(%r12), %rdi
testq %rdi, %rdi
je LBB42_63
## %bb.52: ## in Loop: Header=BB42_13 Depth=2
callq _fgetc
cmpl $-1, %eax
jne LBB42_65
jmp LBB42_66
LBB42_53: ## in Loop: Header=BB42_13 Depth=2
movq 24(%r12), %rax
movq 32(%r12), %rcx
cmpq %rcx, %rax
jb LBB42_29
jmp LBB42_143
LBB42_54: ## in Loop: Header=BB42_13 Depth=2
movq 24(%r12), %rcx
movb $-1, %al
cmpq 32(%r12), %rcx
jae LBB42_13
## %bb.55: ## in Loop: Header=BB42_13 Depth=2
leaq 1(%rcx), %rax
movq %rax, 24(%r12)
movzbl (%rcx), %ecx
LBB42_56: ## in Loop: Header=BB42_13 Depth=2
movb $-1, %al
cmpb $-1, %cl
jne LBB42_13
jmp LBB42_59
.p2align 4, 0x90
LBB42_57: ## in Loop: Header=BB42_59 Depth=3
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
cmpb $-1, %al
jne LBB42_13
LBB42_59: ## Parent Loop BB42_12 Depth=1
## Parent Loop BB42_13 Depth=2
## => This Inner Loop Header: Depth=3
movq 16(%r12), %rdi
testq %rdi, %rdi
jne LBB42_57
## %bb.60: ## in Loop: Header=BB42_59 Depth=3
movq 24(%r12), %rax
cmpq 32(%r12), %rax
jae LBB42_62
## %bb.61: ## in Loop: Header=BB42_59 Depth=3
leaq 1(%rax), %rcx
movq %rcx, 24(%r12)
movzbl (%rax), %eax
cmpb $-1, %al
je LBB42_59
jmp LBB42_13
LBB42_62: ## in Loop: Header=BB42_13 Depth=2
xorl %eax, %eax
jmp LBB42_13
LBB42_63: ## in Loop: Header=BB42_13 Depth=2
movq 24(%r12), %rax
cmpq 32(%r12), %rax
jae LBB42_66
## %bb.64: ## in Loop: Header=BB42_13 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%r12)
movzbl (%rax), %eax
LBB42_65: ## in Loop: Header=BB42_13 Depth=2
testl %eax, %eax
jne LBB42_225
LBB42_66: ## in Loop: Header=BB42_13 Depth=2
movq 16(%r12), %rdi
testq %rdi, %rdi
je LBB42_69
## %bb.67: ## in Loop: Header=BB42_13 Depth=2
callq _fgetc
movq 16(%r12), %rdi
testq %rdi, %rdi
je LBB42_71
## %bb.68: ## in Loop: Header=BB42_13 Depth=2
callq _fgetc
cmpl $-1, %eax
jne LBB42_74
jmp LBB42_75
LBB42_69: ## in Loop: Header=BB42_13 Depth=2
movq 24(%r12), %rax
movq 32(%r12), %rcx
cmpq %rcx, %rax
jae LBB42_72
## %bb.70: ## in Loop: Header=BB42_13 Depth=2
incq %rax
movq %rax, 24(%r12)
jmp LBB42_72
LBB42_71: ## in Loop: Header=BB42_13 Depth=2
movq 24(%r12), %rax
movq 32(%r12), %rcx
LBB42_72: ## in Loop: Header=BB42_13 Depth=2
cmpq %rcx, %rax
jae LBB42_75
## %bb.73: ## in Loop: Header=BB42_13 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%r12)
movzbl (%rax), %eax
LBB42_74: ## in Loop: Header=BB42_13 Depth=2
testl %eax, %eax
jne LBB42_225
LBB42_75: ## in Loop: Header=BB42_13 Depth=2
movq $0, 14048(%r12)
movl $0, 14060(%r12)
movl $0, 13928(%r12)
movl $0, 13856(%r12)
movl $0, 13784(%r12)
movb $-1, 14056(%r12)
movl 14064(%r12), %ecx
movl 14084(%r12), %eax
testl %eax, %eax
movl $2147483647, %edx ## imm = 0x7FFFFFFF
cmovel %edx, %eax
movl %eax, 14088(%r12)
cmpl $1, %ecx
jne LBB42_78
## %bb.76: ## in Loop: Header=BB42_13 Depth=2
movq %r13, -288(%rbp) ## 8-byte Spill
movslq 14068(%r12), %rax
movq %rax, -296(%rbp) ## 8-byte Spill
leaq (%rax,%rax,8), %rax
movl 13792(%r12,%rax,8), %ecx
testl %ecx, %ecx
jle LBB42_115
## %bb.77: ## in Loop: Header=BB42_13 Depth=2
addl $7, %ecx
sarl $3, %ecx
movl 13788(%r12,%rax,8), %edx
leal 7(%rdx), %esi
sarl $3, %esi
leaq (%r12,%rax,8), %r15
addq $13776, %r15 ## imm = 0x35D0
cmpl $2, %esi
movl $1, %eax
cmovll %eax, %esi
cmpl $2, %ecx
movl $1, %eax
cmovgel %ecx, %eax
movl %eax, -352(%rbp) ## 4-byte Spill
shlq $3, %rsi
movq %rsi, -336(%rbp) ## 8-byte Spill
movb $1, %al
xorl %r14d, %r14d
xorl %esi, %esi
movl %ecx, -320(%rbp) ## 4-byte Spill
movq %rdx, -360(%rbp) ## 8-byte Spill
jmp LBB42_104
LBB42_78: ## in Loop: Header=BB42_13 Depth=2
movl 13748(%r12), %edx
testl %edx, %edx
jle LBB42_16
## %bb.79: ## in Loop: Header=BB42_13 Depth=2
movl 13744(%r12), %ecx
movl $0, -312(%rbp) ## 4-byte Folded Spill
LBB42_80: ## Parent Loop BB42_12 Depth=1
## Parent Loop BB42_13 Depth=2
## => This Loop Header: Depth=3
## Child Loop BB42_82 Depth 4
## Child Loop BB42_84 Depth 5
## Child Loop BB42_87 Depth 6
## Child Loop BB42_90 Depth 7
testl %ecx, %ecx
jle LBB42_102
## %bb.81: ## in Loop: Header=BB42_80 Depth=3
movl $0, -276(%rbp) ## 4-byte Folded Spill
movq %r13, -288(%rbp) ## 8-byte Spill
LBB42_82: ## Parent Loop BB42_12 Depth=1
## Parent Loop BB42_13 Depth=2
## Parent Loop BB42_80 Depth=3
## => This Loop Header: Depth=4
## Child Loop BB42_84 Depth 5
## Child Loop BB42_87 Depth 6
## Child Loop BB42_90 Depth 7
movl 14064(%r12), %ecx
testl %ecx, %ecx
jle LBB42_95
## %bb.83: ## in Loop: Header=BB42_82 Depth=4
xorl %edx, %edx
LBB42_84: ## Parent Loop BB42_12 Depth=1
## Parent Loop BB42_13 Depth=2
## Parent Loop BB42_80 Depth=3
## Parent Loop BB42_82 Depth=4
## => This Loop Header: Depth=5
## Child Loop BB42_87 Depth 6
## Child Loop BB42_90 Depth 7
movslq 14068(%r12,%rdx,4), %rax
movq %rax, -336(%rbp) ## 8-byte Spill
leaq (%rax,%rax,8), %rax
movl 13768(%r12,%rax,8), %r14d
testl %r14d, %r14d
jle LBB42_93
## %bb.85: ## in Loop: Header=BB42_84 Depth=5
movq %rdx, -320(%rbp) ## 8-byte Spill
leaq (%r12,%rax,8), %r13
addq $13768, %r13 ## imm = 0x35C8
movl 13764(%r12,%rax,8), %r15d
movl $0, -296(%rbp) ## 4-byte Folded Spill
jmp LBB42_87
LBB42_86: ## in Loop: Header=BB42_87 Depth=6
movl -296(%rbp), %eax ## 4-byte Reload
incl %eax
movl %eax, -296(%rbp) ## 4-byte Spill
cmpl %r14d, %eax
jge LBB42_92
LBB42_87: ## Parent Loop BB42_12 Depth=1
## Parent Loop BB42_13 Depth=2
## Parent Loop BB42_80 Depth=3
## Parent Loop BB42_82 Depth=4
## Parent Loop BB42_84 Depth=5
## => This Loop Header: Depth=6
## Child Loop BB42_90 Depth 7
testl %r15d, %r15d
jle LBB42_86
## %bb.88: ## in Loop: Header=BB42_87 Depth=6
movslq 8(%r13), %rax
imulq $1680, %rax, %rax ## imm = 0x690
leaq (%r12,%rax), %rdx
addq $40, %rdx
movslq 12(%r13), %rax
imulq $1680, %rax, %rax ## imm = 0x690
leaq (%r12,%rax), %rcx
addq $6760, %rcx ## imm = 0x1A68
movq %r12, %rdi
leaq -272(%rbp), %rsi
movq -336(%rbp), %r8 ## 8-byte Reload
## kill: def $r8d killed $r8d killed $r8
callq _decode_block
xorl %ecx, %ecx
testl %eax, %eax
je LBB42_114
## %bb.89: ## in Loop: Header=BB42_87 Depth=6
movl $1, %ebx
LBB42_90: ## Parent Loop BB42_12 Depth=1
## Parent Loop BB42_13 Depth=2
## Parent Loop BB42_80 Depth=3
## Parent Loop BB42_82 Depth=4
## Parent Loop BB42_84 Depth=5
## Parent Loop BB42_87 Depth=6
## => This Inner Loop Header: Depth=7
imull -312(%rbp), %r14d ## 4-byte Folded Reload
addl -296(%rbp), %r14d ## 4-byte Folded Reload
imull -276(%rbp), %r15d ## 4-byte Folded Reload
addl %ebx, %r15d
leal -8(,%r15,8), %eax
movl 28(%r13), %esi
imull %esi, %r14d
shll $3, %r14d
movslq %r14d, %rcx
addq 40(%r13), %rcx
movslq %eax, %rdi
addq %rcx, %rdi
movslq 4(%r13), %rax
shlq $6, %rax
leaq (%r12,%rax), %rcx
addq $13480, %rcx ## imm = 0x34A8
leaq -272(%rbp), %rdx
callq _idct_block
movl -4(%r13), %r15d
movl (%r13), %r14d
cmpl %r15d, %ebx
jge LBB42_86
## %bb.91: ## in Loop: Header=BB42_90 Depth=7
movslq 8(%r13), %rax
imulq $1680, %rax, %rax ## imm = 0x690
leaq (%r12,%rax), %rdx
addq $40, %rdx
movslq 12(%r13), %rax
imulq $1680, %rax, %rax ## imm = 0x690
leaq (%r12,%rax), %rcx
addq $6760, %rcx ## imm = 0x1A68
movq %r12, %rdi
leaq -272(%rbp), %rsi
movq -336(%rbp), %r8 ## 8-byte Reload
## kill: def $r8d killed $r8d killed $r8
callq _decode_block
incl %ebx
testl %eax, %eax
jne LBB42_90
jmp LBB42_113
LBB42_92: ## in Loop: Header=BB42_84 Depth=5
movl 14064(%r12), %ecx
movq -288(%rbp), %r13 ## 8-byte Reload
movq -320(%rbp), %rdx ## 8-byte Reload
LBB42_93: ## in Loop: Header=BB42_84 Depth=5
incq %rdx
movslq %ecx, %rax
cmpq %rax, %rdx
jl LBB42_84
## %bb.94: ## in Loop: Header=BB42_82 Depth=4
movl 14088(%r12), %eax
LBB42_95: ## in Loop: Header=BB42_82 Depth=4
leal -1(%rax), %ecx
movl %ecx, 14088(%r12)
cmpl $1, %eax
movl %ecx, %eax
jg LBB42_100
## %bb.96: ## in Loop: Header=BB42_82 Depth=4
cmpl $23, 14052(%r12)
jg LBB42_98
## %bb.97: ## in Loop: Header=BB42_82 Depth=4
movq %r12, %rdi
callq _grow_buffer_unsafe
LBB42_98: ## in Loop: Header=BB42_82 Depth=4
movb 14056(%r12), %al
andb $-8, %al
cmpb $-48, %al
jne LBB42_120
## %bb.99: ## in Loop: Header=BB42_82 Depth=4
movq $0, 14048(%r12)
movl $0, 14060(%r12)
movl $0, 13928(%r12)
movl $0, 13856(%r12)
movl $0, 13784(%r12)
movb $-1, 14056(%r12)
movl 14084(%r12), %eax
testl %eax, %eax
movl $2147483647, %ecx ## imm = 0x7FFFFFFF
cmovel %ecx, %eax
movl %eax, 14088(%r12)
LBB42_100: ## in Loop: Header=BB42_82 Depth=4
movl -276(%rbp), %edx ## 4-byte Reload
incl %edx
movl 13744(%r12), %ecx
movl %edx, -276(%rbp) ## 4-byte Spill
cmpl %ecx, %edx
jl LBB42_82
## %bb.101: ## in Loop: Header=BB42_80 Depth=3
movl 13748(%r12), %edx
LBB42_102: ## in Loop: Header=BB42_80 Depth=3
movl -312(%rbp), %esi ## 4-byte Reload
incl %esi
movl %esi, -312(%rbp) ## 4-byte Spill
cmpl %edx, %esi
jl LBB42_80
jmp LBB42_16
LBB42_103: ## in Loop: Header=BB42_104 Depth=3
movl -312(%rbp), %esi ## 4-byte Reload
incl %esi
movl -320(%rbp), %ecx ## 4-byte Reload
cmpl %ecx, %esi
setl %al
addl $8, %r14d
cmpl -352(%rbp), %esi ## 4-byte Folded Reload
movq -360(%rbp), %rdx ## 8-byte Reload
je LBB42_115
LBB42_104: ## Parent Loop BB42_12 Depth=1
## Parent Loop BB42_13 Depth=2
## => This Loop Header: Depth=3
## Child Loop BB42_107 Depth 4
movl %esi, -312(%rbp) ## 4-byte Spill
movb %al, -276(%rbp) ## 1-byte Spill
testl %edx, %edx
jle LBB42_103
## %bb.105: ## in Loop: Header=BB42_104 Depth=3
xorl %ebx, %ebx
jmp LBB42_107
LBB42_106: ## in Loop: Header=BB42_107 Depth=4
addq $8, %rbx
cmpq %rbx, -336(%rbp) ## 8-byte Folded Reload
je LBB42_103
LBB42_107: ## Parent Loop BB42_12 Depth=1
## Parent Loop BB42_13 Depth=2
## Parent Loop BB42_104 Depth=3
## => This Inner Loop Header: Depth=4
movslq (%r15), %rax
imulq $1680, %rax, %rax ## imm = 0x690
leaq (%r12,%rax), %rdx
addq $40, %rdx
movslq 4(%r15), %rax
imulq $1680, %rax, %rax ## imm = 0x690
leaq (%r12,%rax), %rcx
addq $6760, %rcx ## imm = 0x1A68
movq %r12, %rdi
leaq -272(%rbp), %r13
movq %r13, %rsi
movq -296(%rbp), %r8 ## 8-byte Reload
## kill: def $r8d killed $r8d killed $r8
callq _decode_block
testl %eax, %eax
je LBB42_116
## %bb.108: ## in Loop: Header=BB42_107 Depth=4
movl 20(%r15), %esi
movl %r14d, %eax
imull %esi, %eax
movslq %eax, %rdi
addq 32(%r15), %rdi
addq %rbx, %rdi
movslq -4(%r15), %rax
shlq $6, %rax
leaq (%r12,%rax), %rcx
addq $13480, %rcx ## imm = 0x34A8
movq %r13, %rdx
callq _idct_block
movl 14088(%r12), %eax
leal -1(%rax), %ecx
movl %ecx, 14088(%r12)
cmpl $1, %eax
jg LBB42_106
## %bb.109: ## in Loop: Header=BB42_107 Depth=4
cmpl $23, 14052(%r12)
jg LBB42_111
## %bb.110: ## in Loop: Header=BB42_107 Depth=4
movq %r12, %rdi
callq _grow_buffer_unsafe
LBB42_111: ## in Loop: Header=BB42_107 Depth=4
movzbl 14056(%r12), %eax
andb $-8, %al
cmpb $-48, %al
jne LBB42_117
## %bb.112: ## in Loop: Header=BB42_107 Depth=4
movq $0, 14048(%r12)
movl $0, 14060(%r12)
movl $0, 13928(%r12)
movl $0, 13856(%r12)
movl $0, 13784(%r12)
movb $-1, 14056(%r12)
movl 14084(%r12), %eax
testl %eax, %eax
movl $2147483647, %ecx ## imm = 0x7FFFFFFF
cmovel %ecx, %eax
movl %eax, 14088(%r12)
jmp LBB42_106
LBB42_113: ## in Loop: Header=BB42_13 Depth=2
xorl %ecx, %ecx
LBB42_114: ## in Loop: Header=BB42_13 Depth=2
movq -288(%rbp), %r13 ## 8-byte Reload
jmp LBB42_119
LBB42_115: ## in Loop: Header=BB42_13 Depth=2
movq -288(%rbp), %r13 ## 8-byte Reload
jmp LBB42_16
LBB42_116: ## in Loop: Header=BB42_13 Depth=2
xorl %ecx, %ecx
jmp LBB42_118
LBB42_117: ## in Loop: Header=BB42_13 Depth=2
movl $1, %ecx
LBB42_118: ## in Loop: Header=BB42_13 Depth=2
testb $1, -276(%rbp) ## 1-byte Folded Reload
movq -288(%rbp), %r13 ## 8-byte Reload
je LBB42_16
LBB42_119: ## in Loop: Header=BB42_13 Depth=2
testl %ecx, %ecx
jne LBB42_16
jmp LBB42_144
LBB42_120: ## in Loop: Header=BB42_13 Depth=2
movl $1, %ecx
jmp LBB42_114
LBB42_121: ## in Loop: Header=BB42_12 Depth=1
movb $-1, 14056(%r12)
jmp LBB42_12
LBB42_122:
xorl %ebx, %ebx
jmp LBB42_124
.p2align 4, 0x90
LBB42_123: ## in Loop: Header=BB42_124 Depth=1
callq _fgetc
cmpl $-1, %eax
cmovel %ebx, %eax
cmpb $-1, %al
jne LBB42_11
LBB42_124: ## =>This Inner Loop Header: Depth=1
movq 16(%r12), %rdi
testq %rdi, %rdi
jne LBB42_123
## %bb.125: ## in Loop: Header=BB42_124 Depth=1
movq 24(%r12), %rax
cmpq 32(%r12), %rax
jae LBB42_138
## %bb.126: ## in Loop: Header=BB42_124 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%r12)
movzbl (%rax), %eax
cmpb $-1, %al
je LBB42_124
jmp LBB42_11
LBB42_140:
leaq L_.str.7(%rip), %rax
jmp LBB42_141
LBB42_127:
leaq L_.str.9(%rip), %rax
jmp LBB42_141
LBB42_128:
leaq L_.str.10(%rip), %rax
jmp LBB42_141
LBB42_129:
movq %r13, -288(%rbp) ## 8-byte Spill
movl -300(%rbp), %eax ## 4-byte Reload
testl %eax, %eax
movl 8(%r12), %ecx
cmovel %ecx, %eax
movl %eax, -300(%rbp) ## 4-byte Spill
cmpl $3, %eax
movl $1, %eax
cmovgel %ecx, %eax
cmpl $3, %ecx
movl %ecx, -296(%rbp) ## 4-byte Spill
cmovnel %ecx, %eax
movl %eax, -360(%rbp) ## 4-byte Spill
testl %eax, %eax
jle LBB42_151
## %bb.130:
movl -360(%rbp), %eax ## 4-byte Reload
movl (%r12), %r13d
leal 3(%r13), %ecx
movq %rcx, -368(%rbp) ## 8-byte Spill
decl %r13d
leaq -232(%rbp), %rbx
shlq $3, %rax
leaq (%rax,%rax,8), %r15
xorl %r14d, %r14d
jmp LBB42_134
LBB42_131: ## in Loop: Header=BB42_134 Depth=1
cmpl $2, %ecx
leaq _resample_row_generic(%rip), %rax
leaq _resample_row_hv_2(%rip), %rdx
cmoveq %rdx, %rax
cmpl $1, %ecx
leaq _resample_row_h_2(%rip), %rcx
LBB42_132: ## in Loop: Header=BB42_134 Depth=1
cmoveq %rcx, %rax
LBB42_133: ## in Loop: Header=BB42_134 Depth=1
movq %rax, -40(%rbx)
addq $48, %rbx
addq $72, %r14
cmpq %r14, %r15
je LBB42_151
LBB42_134: ## =>This Inner Loop Header: Depth=1
movq -368(%rbp), %rdi ## 8-byte Reload
callq _malloc
movq %rax, 13824(%r12,%r14)
testq %rax, %rax
je LBB42_197
## %bb.135: ## in Loop: Header=BB42_134 Depth=1
movl 13736(%r12), %eax
cltd
idivl 13764(%r12,%r14)
movl %eax, %esi
movl %eax, -16(%rbx)
movl 13740(%r12), %eax
cltd
idivl 13768(%r12,%r14)
movl %eax, %ecx
movl %eax, -12(%rbx)
sarl %eax
movl %eax, -4(%rbx)
leal (%rsi,%r13), %eax
xorl %edx, %edx
divl %esi
movl %eax, -8(%rbx)
movl $0, (%rbx)
movq 13808(%r12,%r14), %rax
movq %rax, -24(%rbx)
movq %rax, -32(%rbx)
cmpl $2, %esi
je LBB42_131
## %bb.136: ## in Loop: Header=BB42_134 Depth=1
leaq _resample_row_generic(%rip), %rax
cmpl $1, %esi
jne LBB42_133
## %bb.137: ## in Loop: Header=BB42_134 Depth=1
cmpl $2, %ecx
leaq _resample_row_generic(%rip), %rax
leaq _resample_row_v_2(%rip), %rdx
cmoveq %rdx, %rax
cmpl $1, %ecx
leaq _resample_row_1(%rip), %rcx
jmp LBB42_132
LBB42_138:
xorl %eax, %eax
jmp LBB42_11
LBB42_142:
leaq L_.str.8(%rip), %rax
LBB42_141:
movq %rax, _failure_reason(%rip)
cmpl $0, 8(%r12)
jg LBB42_145
LBB42_226:
xorl %r15d, %r15d
LBB42_227:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB42_229
## %bb.228:
movq %r15, %rax
addq $392, %rsp ## imm = 0x188
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB42_143:
movl $0, 14064(%r12)
leaq L_.str.7(%rip), %rax
movq %rax, _failure_reason(%rip)
LBB42_144:
cmpl $0, 8(%r12)
jle LBB42_226
LBB42_145:
leaq 13824(%r12), %rbx
xorl %r15d, %r15d
xorl %r14d, %r14d
jmp LBB42_147
.p2align 4, 0x90
LBB42_146: ## in Loop: Header=BB42_147 Depth=1
incq %r14
movslq 8(%r12), %rax
addq $72, %rbx
cmpq %rax, %r14
jge LBB42_227
LBB42_147: ## =>This Inner Loop Header: Depth=1
cmpq $0, -16(%rbx)
je LBB42_149
## %bb.148: ## in Loop: Header=BB42_147 Depth=1
movq -8(%rbx), %rdi
callq _free
movq $0, -16(%rbx)
LBB42_149: ## in Loop: Header=BB42_147 Depth=1
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB42_146
## %bb.150: ## in Loop: Header=BB42_147 Depth=1
callq _free
movq $0, (%rbx)
jmp LBB42_146
LBB42_151:
movl (%r12), %edi
movl 4(%r12), %ebx
movq %rdi, -320(%rbp) ## 8-byte Spill
## kill: def $edi killed $edi killed $rdi def $rdi
movl -300(%rbp), %r14d ## 4-byte Reload
imull %r14d, %edi
imull %ebx, %edi
incl %edi
callq _malloc
testq %rax, %rax
je LBB42_205
## %bb.152:
testl %ebx, %ebx
movq %rax, -352(%rbp) ## 8-byte Spill
je LBB42_213
## %bb.153:
movslq %r14d, %rcx
movl %r14d, %ebx
movl -360(%rbp), %edx ## 4-byte Reload
movq %rdx, -368(%rbp) ## 8-byte Spill
leaq 13792(%r12), %rdx
movq %rdx, -424(%rbp) ## 8-byte Spill
leaq 48(%rax), %rdx
movq %rdx, -384(%rbp) ## 8-byte Spill
movq %rcx, -416(%rbp) ## 8-byte Spill
leaq (%rcx,%rcx,2), %rcx
movq %rcx, -408(%rbp) ## 8-byte Spill
addq $3, %rax
movq %rax, -392(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq %rax, -312(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
movq -320(%rbp), %r10 ## 8-byte Reload
movq %r12, -376(%rbp) ## 8-byte Spill
movq %rbx, -400(%rbp) ## 8-byte Spill
jmp LBB42_156
LBB42_154: ## in Loop: Header=BB42_156 Depth=1
xorl %r10d, %r10d
LBB42_155: ## in Loop: Header=BB42_156 Depth=1
movl -276(%rbp), %ecx ## 4-byte Reload
incl %ecx
movl -300(%rbp), %r14d ## 4-byte Reload
movq -312(%rbp), %rax ## 8-byte Reload
addl %r14d, %eax
movq %rax, -312(%rbp) ## 8-byte Spill
cmpl 4(%r12), %ecx
jae LBB42_204
LBB42_156: ## =>This Loop Header: Depth=1
## Child Loop BB42_159 Depth 2
## Child Loop BB42_187 Depth 2
## Child Loop BB42_195 Depth 2
## Child Loop BB42_181 Depth 2
## Child Loop BB42_184 Depth 2
## Child Loop BB42_166 Depth 2
movl %ecx, -276(%rbp) ## 4-byte Spill
movl %ecx, %eax
imull %r14d, %eax
movq %r10, -320(%rbp) ## 8-byte Spill
imull %r10d, %eax
movq %rax, -296(%rbp) ## 8-byte Spill
cmpl $0, -360(%rbp) ## 4-byte Folded Reload
jle LBB42_162
## %bb.157: ## in Loop: Header=BB42_156 Depth=1
movq -424(%rbp), %r13 ## 8-byte Reload
leaq -232(%rbp), %rbx
xorl %r14d, %r14d
jmp LBB42_159
.p2align 4, 0x90
LBB42_158: ## in Loop: Header=BB42_159 Depth=2
incq %r14
addq $48, %rbx
addq $72, %r13
cmpq %r14, -368(%rbp) ## 8-byte Folded Reload
je LBB42_162
LBB42_159: ## Parent Loop BB42_156 Depth=1
## => This Inner Loop Header: Depth=2
movl -4(%rbx), %r12d
movl -16(%rbx), %r8d
movl -12(%rbx), %r15d
movl %r15d, %eax
sarl %eax
xorl %ecx, %ecx
xorl %edx, %edx
cmpl %eax, %r12d
setl %cl
setge %dl
movq 32(%r13), %rdi
movq -32(%rbx,%rdx,8), %rsi
movq -32(%rbx,%rcx,8), %rdx
movl -8(%rbx), %ecx
callq *-40(%rbx)
movq %rax, -80(%rbp,%r14,8)
incl %r12d
movl %r12d, -4(%rbx)
cmpl %r15d, %r12d
jl LBB42_158
## %bb.160: ## in Loop: Header=BB42_159 Depth=2
movl $0, -4(%rbx)
movq -24(%rbx), %rax
movq %rax, -32(%rbx)
movl (%rbx), %ecx
incl %ecx
movl %ecx, (%rbx)
cmpl (%r13), %ecx
jge LBB42_158
## %bb.161: ## in Loop: Header=BB42_159 Depth=2
movslq 4(%r13), %rcx
addq %rcx, %rax
movq %rax, -24(%rbx)
jmp LBB42_158
LBB42_162: ## in Loop: Header=BB42_156 Depth=1
movq -296(%rbp), %rdi ## 8-byte Reload
addq -352(%rbp), %rdi ## 8-byte Folded Reload
movq -80(%rbp), %rsi
movl -300(%rbp), %eax ## 4-byte Reload
cmpl $3, %eax
jl LBB42_168
## %bb.163: ## in Loop: Header=BB42_156 Depth=1
movq -376(%rbp), %r12 ## 8-byte Reload
cmpl $3, 8(%r12)
jne LBB42_171
## %bb.164: ## in Loop: Header=BB42_156 Depth=1
movl (%r12), %r10d
testl %r10d, %r10d
jle LBB42_177
## %bb.165: ## in Loop: Header=BB42_156 Depth=1
movq -72(%rbp), %rax
movq %rax, -296(%rbp) ## 8-byte Spill
movq -64(%rbp), %rax
movq %rax, -336(%rbp) ## 8-byte Spill
movq -320(%rbp), %r9 ## 8-byte Reload
imull -312(%rbp), %r9d ## 4-byte Folded Reload
addq -392(%rbp), %r9 ## 8-byte Folded Reload
xorl %edi, %edi
movq -400(%rbp), %r8 ## 8-byte Reload
.p2align 4, 0x90
LBB42_166: ## Parent Loop BB42_156 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rsi,%rdi), %ebx
shll $16, %ebx
movq -336(%rbp), %rax ## 8-byte Reload
movzbl (%rax,%rdi), %ecx
addl $-128, %ecx
movq -296(%rbp), %rax ## 8-byte Reload
movzbl (%rax,%rdi), %edx
addl $-128, %edx
imull $91881, %ecx, %eax ## imm = 0x166E9
leal (%rax,%rbx), %r14d
addl $32768, %r14d ## imm = 0x8000
imull $-46802, %ecx, %ecx ## imm = 0xFFFF492E
leal (%rcx,%rbx), %r11d
addl $32768, %r11d ## imm = 0x8000
imull $-22554, %edx, %ecx ## imm = 0xA7E6
addl %r11d, %ecx
imull $116130, %edx, %edx ## imm = 0x1C5A2
leal (%rdx,%rbx), %r15d
addl $32768, %r15d ## imm = 0x8000
movl %r14d, %r13d
shrl $16, %r13d
movl %ecx, %r12d
shrl $16, %r12d
movl %r15d, %r11d
shrl $16, %r11d
testl %r14d, %r14d
setns %dl
negb %dl
testl %ecx, %ecx
setns %bl
negb %bl
testl %r15d, %r15d
setns %al
negb %al
cmpl $16777216, %r14d ## imm = 0x1000000
movzbl %dl, %edx
cmovbl %r13d, %edx
cmpl $16777216, %ecx ## imm = 0x1000000
movzbl %bl, %ecx
cmovbl %r12d, %ecx
cmpl $16777216, %r15d ## imm = 0x1000000
movb %dl, -3(%r9)
movb %cl, -2(%r9)
movzbl %al, %eax
cmovbl %r11d, %eax
movb %al, -1(%r9)
movb $-1, (%r9)
incq %rdi
addq %r8, %r9
cmpq %rdi, %r10
jne LBB42_166
## %bb.167: ## in Loop: Header=BB42_156 Depth=1
## kill: def $r10d killed $r10d killed $r10 def $r10
movq -376(%rbp), %r12 ## 8-byte Reload
jmp LBB42_178
LBB42_168: ## in Loop: Header=BB42_156 Depth=1
movq -376(%rbp), %r12 ## 8-byte Reload
movl (%r12), %r14d
cmpl $1, %eax
jne LBB42_174
## %bb.169: ## in Loop: Header=BB42_156 Depth=1
testl %r14d, %r14d
movq -344(%rbp), %r15 ## 8-byte Reload
movq -288(%rbp), %r11 ## 8-byte Reload
movq -328(%rbp), %r13 ## 8-byte Reload
je LBB42_154
## %bb.170: ## in Loop: Header=BB42_156 Depth=1
movq %r11, %rbx
movq %r14, %rdx
callq _memcpy
movq %rbx, %r11
movl %r14d, %r10d
jmp LBB42_155
LBB42_171: ## in Loop: Header=BB42_156 Depth=1
movl (%r12), %r10d
testq %r10, %r10
movq -344(%rbp), %r15 ## 8-byte Reload
movq -288(%rbp), %r11 ## 8-byte Reload
movq -416(%rbp), %rax ## 8-byte Reload
movq -408(%rbp), %r14 ## 8-byte Reload
je LBB42_179
## %bb.172: ## in Loop: Header=BB42_156 Depth=1
leaq -1(%r10), %rcx
movl %r10d, %r8d
andl $3, %r8d
cmpq $3, %rcx
jae LBB42_180
## %bb.173: ## in Loop: Header=BB42_156 Depth=1
xorl %ecx, %ecx
jmp LBB42_182
LBB42_174: ## in Loop: Header=BB42_156 Depth=1
testl %r14d, %r14d
movq -344(%rbp), %r15 ## 8-byte Reload
movq -288(%rbp), %r11 ## 8-byte Reload
movq -328(%rbp), %r13 ## 8-byte Reload
pcmpeqd %xmm2, %xmm2
je LBB42_154
## %bb.175: ## in Loop: Header=BB42_156 Depth=1
cmpl $16, %r14d
jae LBB42_185
## %bb.176: ## in Loop: Header=BB42_156 Depth=1
xorl %r8d, %r8d
jmp LBB42_194
LBB42_177: ## in Loop: Header=BB42_156 Depth=1
## kill: def $r10d killed $r10d killed $r10 def $r10
LBB42_178: ## in Loop: Header=BB42_156 Depth=1
movq -344(%rbp), %r15 ## 8-byte Reload
movq -288(%rbp), %r11 ## 8-byte Reload
movq -328(%rbp), %r13 ## 8-byte Reload
jmp LBB42_155
LBB42_179: ## in Loop: Header=BB42_156 Depth=1
xorl %r10d, %r10d
movq -328(%rbp), %r13 ## 8-byte Reload
jmp LBB42_155
LBB42_180: ## in Loop: Header=BB42_156 Depth=1
movl %r10d, %r9d
andl $-4, %r9d
xorl %ecx, %ecx
movq %rdi, %rdx
.p2align 4, 0x90
LBB42_181: ## Parent Loop BB42_156 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rsi,%rcx), %ebx
movb %bl, 2(%rdx)
movb %bl, 1(%rdx)
movb %bl, (%rdx)
movb $-1, 3(%rdx)
leaq (%rdx,%rax), %rdi
movzbl 1(%rsi,%rcx), %ebx
movb %bl, 2(%rdx,%rax)
movb %bl, 1(%rdx,%rax)
movb %bl, (%rdx,%rax)
movb $-1, 3(%rdx,%rax)
addq %rax, %rdi
movzbl 2(%rsi,%rcx), %ebx
movb %bl, 2(%rdx,%rax,2)
movb %bl, 1(%rdx,%rax,2)
movb %bl, (%rdx,%rax,2)
movb $-1, 3(%rdx,%rax,2)
addq %rax, %rdi
movzbl 3(%rsi,%rcx), %ebx
movb %bl, 2(%rdx,%r14)
movb %bl, 1(%rdx,%r14)
movb %bl, (%rdx,%r14)
movb $-1, 3(%rdx,%r14)
addq %rax, %rdi
addq $4, %rcx
movq %rdi, %rdx
cmpq %rcx, %r9
jne LBB42_181
LBB42_182: ## in Loop: Header=BB42_156 Depth=1
testq %r8, %r8
je LBB42_188
## %bb.183: ## in Loop: Header=BB42_156 Depth=1
addq %rcx, %rsi
addq $3, %rdi
xorl %ecx, %ecx
movq -328(%rbp), %r13 ## 8-byte Reload
.p2align 4, 0x90
LBB42_184: ## Parent Loop BB42_156 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rsi,%rcx), %edx
movb %dl, -1(%rdi)
movb %dl, -2(%rdi)
movb %dl, -3(%rdi)
movb $-1, (%rdi)
incq %rcx
addq %rax, %rdi
cmpq %rcx, %r8
jne LBB42_184
jmp LBB42_155
LBB42_185: ## in Loop: Header=BB42_156 Depth=1
movq %r11, %r9
movl %r14d, %r8d
andl $-16, %r8d
leaq -16(%r8), %rdx
movq %rdx, %r10
shrq $4, %r10
incq %r10
testq %rdx, %rdx
je LBB42_189
## %bb.186: ## in Loop: Header=BB42_156 Depth=1
movq %r10, %rcx
andq $-2, %rcx
movq -320(%rbp), %rax ## 8-byte Reload
imull -312(%rbp), %eax ## 4-byte Folded Reload
addq -384(%rbp), %rax ## 8-byte Folded Reload
xorl %edx, %edx
LBB42_187: ## Parent Loop BB42_156 Depth=1
## => This Inner Loop Header: Depth=2
movq (%rsi,%rdx), %xmm0 ## xmm0 = mem[0],zero
movq 8(%rsi,%rdx), %xmm1 ## xmm1 = mem[0],zero
punpcklbw %xmm2, %xmm0 ## xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
movdqu %xmm0, -48(%rax,%rdx,2)
punpcklbw %xmm2, %xmm1 ## xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
movdqu %xmm1, -32(%rax,%rdx,2)
movq 16(%rsi,%rdx), %xmm0 ## xmm0 = mem[0],zero
movq 24(%rsi,%rdx), %xmm1 ## xmm1 = mem[0],zero
punpcklbw %xmm2, %xmm0 ## xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
movdqu %xmm0, -16(%rax,%rdx,2)
punpcklbw %xmm2, %xmm1 ## xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
movdqu %xmm1, (%rax,%rdx,2)
addq $32, %rdx
addq $-2, %rcx
jne LBB42_187
jmp LBB42_190
LBB42_188: ## in Loop: Header=BB42_156 Depth=1
movq -328(%rbp), %r13 ## 8-byte Reload
jmp LBB42_155
LBB42_189: ## in Loop: Header=BB42_156 Depth=1
xorl %edx, %edx
LBB42_190: ## in Loop: Header=BB42_156 Depth=1
testb $1, %r10b
movq %r9, %r11
je LBB42_192
## %bb.191: ## in Loop: Header=BB42_156 Depth=1
leaq (%rdx,%rdx), %rcx
orq $16, %rcx
movq (%rsi,%rdx), %xmm0 ## xmm0 = mem[0],zero
movq 8(%rsi,%rdx), %xmm1 ## xmm1 = mem[0],zero
punpcklbw %xmm2, %xmm0 ## xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
movdqu %xmm0, (%rdi,%rdx,2)
punpcklbw %xmm2, %xmm1 ## xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
movdqu %xmm1, (%rdi,%rcx)
LBB42_192: ## in Loop: Header=BB42_156 Depth=1
cmpq %r14, %r8
je LBB42_196
## %bb.193: ## in Loop: Header=BB42_156 Depth=1
leaq (%rdi,%r8,2), %rdi
LBB42_194: ## in Loop: Header=BB42_156 Depth=1
addq %r8, %rsi
movq %r14, %rcx
subq %r8, %rcx
xorl %eax, %eax
.p2align 4, 0x90
LBB42_195: ## Parent Loop BB42_156 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rsi,%rax), %edx
movb %dl, (%rdi,%rax,2)
movb $-1, 1(%rdi,%rax,2)
incq %rax
cmpq %rax, %rcx
jne LBB42_195
LBB42_196: ## in Loop: Header=BB42_156 Depth=1
movl %r14d, %r10d
jmp LBB42_155
LBB42_197:
cmpl $0, -296(%rbp) ## 4-byte Folded Reload
jle LBB42_212
## %bb.198:
leaq 13824(%r12), %rbx
xorl %r14d, %r14d
jmp LBB42_200
LBB42_199: ## in Loop: Header=BB42_200 Depth=1
incq %r14
movslq 8(%r12), %rax
addq $72, %rbx
cmpq %rax, %r14
jge LBB42_212
LBB42_200: ## =>This Inner Loop Header: Depth=1
cmpq $0, -16(%rbx)
je LBB42_202
## %bb.201: ## in Loop: Header=BB42_200 Depth=1
movq -8(%rbx), %rdi
callq _free
movq $0, -16(%rbx)
LBB42_202: ## in Loop: Header=BB42_200 Depth=1
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB42_199
## %bb.203: ## in Loop: Header=BB42_200 Depth=1
callq _free
movq $0, (%rbx)
jmp LBB42_199
LBB42_204:
movl 8(%r12), %eax
jmp LBB42_214
LBB42_205:
cmpl $0, -296(%rbp) ## 4-byte Folded Reload
jle LBB42_212
## %bb.206:
leaq 13824(%r12), %rbx
xorl %r14d, %r14d
jmp LBB42_208
LBB42_207: ## in Loop: Header=BB42_208 Depth=1
incq %r14
movslq 8(%r12), %rax
addq $72, %rbx
cmpq %rax, %r14
jge LBB42_212
LBB42_208: ## =>This Inner Loop Header: Depth=1
cmpq $0, -16(%rbx)
je LBB42_210
## %bb.209: ## in Loop: Header=BB42_208 Depth=1
movq -8(%rbx), %rdi
callq _free
movq $0, -16(%rbx)
LBB42_210: ## in Loop: Header=BB42_208 Depth=1
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB42_207
## %bb.211: ## in Loop: Header=BB42_208 Depth=1
callq _free
movq $0, (%rbx)
jmp LBB42_207
LBB42_212:
leaq L_.str.5(%rip), %rax
jmp LBB42_2
LBB42_213:
movq -344(%rbp), %r15 ## 8-byte Reload
movq -288(%rbp), %r11 ## 8-byte Reload
movq -328(%rbp), %r13 ## 8-byte Reload
movq -320(%rbp), %r10 ## 8-byte Reload
movl -296(%rbp), %eax ## 4-byte Reload
LBB42_214:
testl %eax, %eax
jle LBB42_222
## %bb.215:
leaq 13824(%r12), %rbx
xorl %r14d, %r14d
jmp LBB42_217
LBB42_216: ## in Loop: Header=BB42_217 Depth=1
incq %r14
movslq 8(%r12), %rax
addq $72, %rbx
cmpq %rax, %r14
jge LBB42_221
LBB42_217: ## =>This Inner Loop Header: Depth=1
cmpq $0, -16(%rbx)
je LBB42_219
## %bb.218: ## in Loop: Header=BB42_217 Depth=1
movq -8(%rbx), %rdi
callq _free
movq $0, -16(%rbx)
LBB42_219: ## in Loop: Header=BB42_217 Depth=1
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB42_216
## %bb.220: ## in Loop: Header=BB42_217 Depth=1
callq _free
movq $0, (%rbx)
jmp LBB42_216
LBB42_221:
movl (%r12), %r10d
movq -328(%rbp), %r13 ## 8-byte Reload
movq -288(%rbp), %r11 ## 8-byte Reload
LBB42_222:
movl %r10d, (%r13)
movl 4(%r12), %eax
movl %eax, (%r11)
testq %r15, %r15
je LBB42_224
## %bb.223:
movl 8(%r12), %eax
movl %eax, (%r15)
LBB42_224:
movq -352(%rbp), %r15 ## 8-byte Reload
jmp LBB42_227
LBB42_225:
leaq L_.str.11(%rip), %rax
jmp LBB42_141
LBB42_229:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.globl _stbi_jpeg_load ## -- Begin function stbi_jpeg_load
.p2align 4, 0x90
_stbi_jpeg_load: ## @stbi_jpeg_load
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movl $14104, %eax ## imm = 0x3718
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r14d
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, %r13
leaq L_.str(%rip), %rsi
callq _fopen
testq %rax, %rax
je LBB43_1
## %bb.2:
movq %rax, %rbx
movq %rax, -14120(%rbp)
leaq -14136(%rbp), %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r15, %rcx
movl %r14d, %r8d
callq _load_jpeg_image
movq %rax, %r14
movq %rbx, %rdi
callq _fclose
jmp LBB43_3
LBB43_1:
xorl %r14d, %r14d
LBB43_3:
movq %r14, %rax
addq $14104, %rsp ## imm = 0x3718
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function decode_jpeg_header
_decode_jpeg_header: ## @decode_jpeg_header
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %esi, %r15d
movq %rdi, %rbx
movb $-1, 14056(%rdi)
movq 16(%rdi), %rdi
testq %rdi, %rdi
je LBB44_2
## %bb.1:
callq _fgetc
xorl %ecx, %ecx
cmpl $-1, %eax
cmovnel %eax, %ecx
cmpb $-1, %cl
jne LBB44_15
jmp LBB44_4
LBB44_2:
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB44_15
## %bb.3:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %ecx
cmpb $-1, %cl
jne LBB44_15
LBB44_4:
xorl %r14d, %r14d
jmp LBB44_6
.p2align 4, 0x90
LBB44_5: ## in Loop: Header=BB44_6 Depth=1
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
cmpb $-1, %al
jne LBB44_9
LBB44_6: ## =>This Inner Loop Header: Depth=1
movq 16(%rbx), %rdi
testq %rdi, %rdi
jne LBB44_5
## %bb.7: ## in Loop: Header=BB44_6 Depth=1
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB44_15
## %bb.8: ## in Loop: Header=BB44_6 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %eax
cmpb $-1, %al
je LBB44_6
LBB44_9:
cmpb $-40, %al
jne LBB44_15
## %bb.10:
movl $1, %r14d
cmpl $1, %r15d
je LBB44_16
## %bb.11:
movb 14056(%rbx), %al
cmpb $-1, %al
je LBB44_17
## %bb.12:
movb $-1, 14056(%rbx)
LBB44_13:
movzbl %al, %esi
movl %esi, %eax
andl $-2, %eax
cmpl $192, %eax
jne LBB44_22
LBB44_14:
movq %rbx, %rdi
callq _process_frame_header
xorl %r14d, %r14d
testl %eax, %eax
setne %r14b
jmp LBB44_16
LBB44_15:
leaq L_.str.22(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %r14d, %r14d
LBB44_16:
movl %r14d, %eax
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
LBB44_17:
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB44_19
## %bb.18:
callq _fgetc
xorl %ecx, %ecx
cmpl $-1, %eax
cmovnel %eax, %ecx
jmp LBB44_21
LBB44_19:
movq 24(%rbx), %rax
movl $255, %esi
cmpq 32(%rbx), %rax
jae LBB44_22
## %bb.20:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %ecx
LBB44_21:
movl $255, %esi
cmpb $-1, %cl
je LBB44_52
LBB44_22:
xorl %r14d, %r14d
.p2align 4, 0x90
LBB44_23: ## =>This Loop Header: Depth=1
## Child Loop BB44_32 Depth 2
## Child Loop BB44_46 Depth 2
## Child Loop BB44_41 Depth 2
movq %rbx, %rdi
callq _process_marker
testl %eax, %eax
je LBB44_16
## %bb.24: ## in Loop: Header=BB44_23 Depth=1
movb 14056(%rbx), %al
cmpb $-1, %al
je LBB44_27
LBB44_25: ## in Loop: Header=BB44_23 Depth=1
movb $-1, 14056(%rbx)
LBB44_26: ## in Loop: Header=BB44_23 Depth=1
movzbl %al, %esi
movl %esi, %eax
andl $-2, %eax
cmpl $192, %eax
jne LBB44_23
jmp LBB44_14
LBB44_27: ## in Loop: Header=BB44_23 Depth=1
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB44_29
## %bb.28: ## in Loop: Header=BB44_23 Depth=1
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
cmpb $-1, %al
jne LBB44_32
jmp LBB44_41
LBB44_29: ## in Loop: Header=BB44_23 Depth=1
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB44_32
## %bb.30: ## in Loop: Header=BB44_23 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %eax
cmpb $-1, %al
jne LBB44_32
jmp LBB44_41
.p2align 4, 0x90
LBB44_31: ## in Loop: Header=BB44_32 Depth=2
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
cmpb $-1, %al
je LBB44_46
LBB44_32: ## Parent Loop BB44_23 Depth=1
## => This Inner Loop Header: Depth=2
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB44_34
## %bb.33: ## in Loop: Header=BB44_32 Depth=2
callq _feof
testl %eax, %eax
je LBB44_35
jmp LBB44_51
.p2align 4, 0x90
LBB44_34: ## in Loop: Header=BB44_32 Depth=2
movq 24(%rbx), %rcx
xorl %eax, %eax
cmpq 32(%rbx), %rcx
setae %al
testl %eax, %eax
jne LBB44_51
LBB44_35: ## in Loop: Header=BB44_32 Depth=2
movzbl 14056(%rbx), %eax
cmpb $-1, %al
jne LBB44_25
## %bb.36: ## in Loop: Header=BB44_32 Depth=2
movq 16(%rbx), %rdi
testq %rdi, %rdi
jne LBB44_31
## %bb.37: ## in Loop: Header=BB44_32 Depth=2
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB44_32
## %bb.38: ## in Loop: Header=BB44_32 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %eax
cmpb $-1, %al
jne LBB44_32
jmp LBB44_46
.p2align 4, 0x90
LBB44_39: ## in Loop: Header=BB44_41 Depth=2
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
cmpb $-1, %al
jne LBB44_26
LBB44_41: ## Parent Loop BB44_23 Depth=1
## => This Inner Loop Header: Depth=2
movq 16(%rbx), %rdi
testq %rdi, %rdi
jne LBB44_39
## %bb.42: ## in Loop: Header=BB44_41 Depth=2
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB44_50
## %bb.43: ## in Loop: Header=BB44_41 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %eax
cmpb $-1, %al
je LBB44_41
jmp LBB44_26
.p2align 4, 0x90
LBB44_44: ## in Loop: Header=BB44_46 Depth=2
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
cmpb $-1, %al
jne LBB44_26
LBB44_46: ## Parent Loop BB44_23 Depth=1
## => This Inner Loop Header: Depth=2
movq 16(%rbx), %rdi
testq %rdi, %rdi
jne LBB44_44
## %bb.47: ## in Loop: Header=BB44_46 Depth=2
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB44_50
## %bb.48: ## in Loop: Header=BB44_46 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %eax
cmpb $-1, %al
je LBB44_46
jmp LBB44_26
LBB44_50: ## in Loop: Header=BB44_23 Depth=1
xorl %eax, %eax
jmp LBB44_26
LBB44_51:
leaq L_.str.23(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB44_16
LBB44_52:
xorl %r14d, %r14d
jmp LBB44_54
LBB44_53: ## in Loop: Header=BB44_54 Depth=1
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
cmpb $-1, %al
jne LBB44_13
LBB44_54: ## =>This Inner Loop Header: Depth=1
movq 16(%rbx), %rdi
testq %rdi, %rdi
jne LBB44_53
## %bb.55: ## in Loop: Header=BB44_54 Depth=1
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB44_57
## %bb.56: ## in Loop: Header=BB44_54 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %eax
cmpb $-1, %al
je LBB44_54
jmp LBB44_13
LBB44_57:
xorl %esi, %esi
xorl %r14d, %r14d
jmp LBB44_23
.cfi_endproc
## -- End function
.globl _stbi_zlib_decode_malloc_guesssize ## -- Begin function stbi_zlib_decode_malloc_guesssize
.p2align 4, 0x90
_stbi_zlib_decode_malloc_guesssize: ## @stbi_zlib_decode_malloc_guesssize
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movl $4112, %eax ## imm = 0x1010
callq ____chkstk_darwin
subq %rax, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r14
movl %edx, %r15d
movl %esi, %r12d
movq %rdi, %rbx
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -40(%rbp)
movslq %edx, %rdi
callq _malloc
testq %rax, %rax
je LBB45_6
## %bb.1:
movq %rbx, -4136(%rbp)
movslq %r12d, %rcx
addq %rbx, %rcx
movq %rcx, -4128(%rbp)
leaq -4136(%rbp), %rdi
movq %rax, %rsi
movl %r15d, %edx
movl $1, %ecx
movl $1, %r8d
callq _do_zlib
testl %eax, %eax
je LBB45_5
## %bb.2:
testq %r14, %r14
je LBB45_3
## %bb.4:
movl -4112(%rbp), %ecx
movq -4104(%rbp), %rax
subl %eax, %ecx
movl %ecx, (%r14)
jmp LBB45_7
LBB45_5:
movq -4104(%rbp), %rdi
callq _free
LBB45_6:
xorl %eax, %eax
LBB45_7:
movq ___stack_chk_guard@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq -40(%rbp), %rcx
jne LBB45_9
## %bb.8:
addq $4112, %rsp ## imm = 0x1010
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
LBB45_3:
movq -4104(%rbp), %rax
jmp LBB45_7
LBB45_9:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function do_zlib
LCPI46_0:
.space 16,8
LCPI46_1:
.space 16,9
LCPI46_2:
.space 16,7
LCPI46_3:
.space 16,5
LCPI46_4:
.space 16
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_do_zlib: ## @do_zlib
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $2584, %rsp ## imm = 0xA18
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r13
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movq %rsi, 32(%rdi)
movq %rsi, 24(%rdi)
movslq %edx, %rax
addq %rsi, %rax
movq %rax, 40(%rdi)
movl %ecx, 48(%rdi)
testl %r8d, %r8d
je LBB46_9
## %bb.1:
movq (%r13), %rdx
movq 8(%r13), %rsi
xorl %ecx, %ecx
cmpq %rsi, %rdx
jae LBB46_3
## %bb.2:
leaq 1(%rdx), %rdi
movq %rdi, (%r13)
movzbl (%rdx), %eax
movq %rdi, %rdx
cmpq %rsi, %rdx
jb LBB46_4
jmp LBB46_5
LBB46_3:
xorl %eax, %eax
cmpq %rsi, %rdx
jae LBB46_5
LBB46_4:
leaq 1(%rdx), %rcx
movq %rcx, (%r13)
movzbl (%rdx), %ecx
LBB46_5:
movl %eax, %edx
shll $8, %edx
orl %ecx, %edx
imull $31711, %edx, %edx ## imm = 0x7BDF
movzwl %dx, %edx
cmpl $2114, %edx ## imm = 0x842
jbe LBB46_7
## %bb.6:
leaq L_.str.34(%rip), %rax
jmp LBB46_172
LBB46_7:
testb $32, %cl
jne LBB46_169
## %bb.8:
andl $15, %eax
cmpl $8, %eax
jne LBB46_171
LBB46_9:
leaq 16(%r13), %r12
movq $0, 16(%r13)
leaq 20(%r13), %r9
leaq 52(%r13), %rax
movq %rax, -2608(%rbp) ## 8-byte Spill
leaq 2072(%r13), %rax
movq %rax, -2600(%rbp) ## 8-byte Spill
xorl %ebx, %ebx
xorl %ecx, %ecx
movq %r9, -2584(%rbp) ## 8-byte Spill
testl %ecx, %ecx
jle LBB46_18
LBB46_10:
movl %ecx, %eax
LBB46_11:
movl %ebx, %esi
shrl %esi
movl %esi, (%r9)
leal -1(%rax), %edx
movl %edx, (%r12)
cmpl $1, %edx
movq %rbx, -2616(%rbp) ## 8-byte Spill
ja LBB46_22
## %bb.12:
addl $-9, %eax
jmp LBB46_14
.p2align 4, 0x90
LBB46_13: ## in Loop: Header=BB46_14 Depth=1
movl %edx, %ecx
shll %cl, %edi
orl %edi, %esi
movl %esi, (%r9)
addl $16, %eax
movl %eax, (%r12)
movl %edx, %eax
cmpl $17, %edx
jge LBB46_21
LBB46_14: ## =>This Inner Loop Header: Depth=1
leal 8(%rax), %edx
movl %esi, %edi
movl %edx, %ecx
shrl %cl, %edi
testl %edi, %edi
jne LBB46_192
## %bb.15: ## in Loop: Header=BB46_14 Depth=1
movq (%r13), %rcx
xorl %edi, %edi
cmpq 8(%r13), %rcx
jae LBB46_13
## %bb.16: ## in Loop: Header=BB46_14 Depth=1
leaq 1(%rcx), %rdi
movq %rdi, (%r13)
movzbl (%rcx), %edi
jmp LBB46_13
.p2align 4, 0x90
LBB46_21:
addl $8, %edx
LBB46_22:
movl %esi, %r15d
shrl $2, %r15d
movl %r15d, (%r9)
leal -2(%rdx), %eax
movl %eax, (%r12)
movl $0, -2564(%rbp) ## 4-byte Folded Spill
andl $3, %esi
leaq _compute_huffman_codes.length_dezigzag(%rip), %r10
je LBB46_31
## %bb.23:
cmpl $1, %esi
je LBB46_40
## %bb.24:
cmpl $3, %esi
je LBB46_174
## %bb.25:
cmpl $4, %eax
ja LBB46_45
## %bb.26:
addl $-10, %edx
jmp LBB46_28
.p2align 4, 0x90
LBB46_27: ## in Loop: Header=BB46_28 Depth=1
movl %eax, %ecx
shll %cl, %esi
orl %esi, %r15d
movl %r15d, (%r9)
addl $16, %edx
movl %edx, (%r12)
movl %eax, %edx
cmpl $17, %eax
jge LBB46_44
LBB46_28: ## =>This Inner Loop Header: Depth=1
leal 8(%rdx), %eax
movl %r15d, %esi
movl %eax, %ecx
shrl %cl, %esi
testl %esi, %esi
jne LBB46_200
## %bb.29: ## in Loop: Header=BB46_28 Depth=1
movq (%r13), %rcx
xorl %esi, %esi
cmpq 8(%r13), %rcx
jae LBB46_27
## %bb.30: ## in Loop: Header=BB46_28 Depth=1
leaq 1(%rcx), %rsi
movq %rsi, (%r13)
movzbl (%rcx), %esi
jmp LBB46_27
.p2align 4, 0x90
LBB46_31:
movl %eax, %ecx
andl $7, %ecx
je LBB46_33
## %bb.32:
## kill: def $cl killed $cl killed $ecx
shrl %cl, %r15d
movl %r15d, (%r9)
andl $-8, %eax
movl %eax, (%r12)
LBB46_33:
xorl %esi, %esi
testl %eax, %eax
je LBB46_37
.p2align 4, 0x90
LBB46_34: ## =>This Inner Loop Header: Depth=1
movl %eax, %ecx
movb %r15b, -2072(%rbp,%rsi)
incq %rsi
shrl $8, %r15d
leal -8(%rcx), %eax
cmpl $8, %ecx
ja LBB46_34
## %bb.35:
movl %r15d, (%r9)
movl %eax, (%r12)
testl %eax, %eax
jne LBB46_204
## %bb.36:
testl $-4, %esi
jne LBB46_148
LBB46_37:
movq (%r13), %rdx
movq 8(%r13), %rax
movl %esi, %ecx
testb $1, %sil
je LBB46_147
## %bb.38:
cmpq %rax, %rdx
jae LBB46_145
## %bb.39:
leaq 1(%rdx), %r8
movq %r8, (%r13)
movb (%rdx), %dil
movq %r8, %rdx
jmp LBB46_146
.p2align 4, 0x90
LBB46_40:
cmpb $0, _default_distance+31(%rip)
jne LBB46_42
## %bb.41:
movaps LCPI46_0(%rip), %xmm0 ## xmm0 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
movaps %xmm0, _default_length+128(%rip)
movaps %xmm0, _default_length+112(%rip)
movaps %xmm0, _default_length+96(%rip)
movaps %xmm0, _default_length+80(%rip)
movaps %xmm0, _default_length+64(%rip)
movaps %xmm0, _default_length+48(%rip)
movaps %xmm0, _default_length+32(%rip)
movaps %xmm0, _default_length+16(%rip)
movaps %xmm0, _default_length(%rip)
movaps LCPI46_1(%rip), %xmm0 ## xmm0 = [9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]
movaps %xmm0, _default_length+144(%rip)
movaps %xmm0, _default_length+160(%rip)
movaps %xmm0, _default_length+176(%rip)
movaps %xmm0, _default_length+192(%rip)
movaps %xmm0, _default_length+208(%rip)
movaps %xmm0, _default_length+224(%rip)
movaps %xmm0, _default_length+240(%rip)
movaps LCPI46_2(%rip), %xmm0 ## xmm0 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
movaps %xmm0, _default_length+256(%rip)
movabsq $506381209866536711, %rax ## imm = 0x707070707070707
movq %rax, _default_length+272(%rip)
movabsq $578721382704613384, %rax ## imm = 0x808080808080808
movq %rax, _default_length+280(%rip)
movaps LCPI46_3(%rip), %xmm0 ## xmm0 = [5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5]
movaps %xmm0, _default_distance+16(%rip)
movaps %xmm0, _default_distance(%rip)
LBB46_42:
movq -2608(%rbp), %rdi ## 8-byte Reload
leaq _default_length(%rip), %rsi
movl $288, %edx ## imm = 0x120
callq _zbuild_huffman
testl %eax, %eax
je LBB46_174
## %bb.43:
movq -2600(%rbp), %rdi ## 8-byte Reload
leaq _default_distance(%rip), %rsi
movl $32, %edx
callq _zbuild_huffman
testl %eax, %eax
jne LBB46_101
jmp LBB46_174
LBB46_44:
addl $8, %eax
LBB46_45:
movl %r15d, %r14d
shrl $5, %r14d
movl %r14d, (%r9)
leal -5(%rax), %edx
movl %edx, (%r12)
cmpl $4, %edx
ja LBB46_52
## %bb.46:
addl $-13, %eax
jmp LBB46_48
.p2align 4, 0x90
LBB46_47: ## in Loop: Header=BB46_48 Depth=1
movl %edx, %ecx
shll %cl, %esi
orl %esi, %r14d
movl %r14d, (%r9)
addl $16, %eax
movl %eax, (%r12)
movl %edx, %eax
cmpl $17, %edx
jge LBB46_51
LBB46_48: ## =>This Inner Loop Header: Depth=1
leal 8(%rax), %edx
movl %r14d, %esi
movl %edx, %ecx
shrl %cl, %esi
testl %esi, %esi
jne LBB46_201
## %bb.49: ## in Loop: Header=BB46_48 Depth=1
movq (%r13), %rcx
xorl %esi, %esi
cmpq 8(%r13), %rcx
jae LBB46_47
## %bb.50: ## in Loop: Header=BB46_48 Depth=1
leaq 1(%rcx), %rsi
movq %rsi, (%r13)
movzbl (%rcx), %esi
jmp LBB46_47
LBB46_51:
addl $8, %edx
LBB46_52:
movl %r14d, %eax
shrl $5, %eax
movl %eax, (%r9)
leal -5(%rdx), %ecx
movl %ecx, (%r12)
cmpl $3, %ecx
ja LBB46_59
## %bb.53:
addl $-13, %edx
jmp LBB46_55
.p2align 4, 0x90
LBB46_54: ## in Loop: Header=BB46_55 Depth=1
shll %cl, %esi
orl %esi, %eax
movl %eax, (%r9)
addl $16, %edx
movl %edx, (%r12)
movl %ecx, %edx
cmpl $17, %ecx
jge LBB46_58
LBB46_55: ## =>This Inner Loop Header: Depth=1
leal 8(%rdx), %ecx
movl %eax, %esi
shrl %cl, %esi
testl %esi, %esi
jne LBB46_202
## %bb.56: ## in Loop: Header=BB46_55 Depth=1
movq (%r13), %rdi
xorl %esi, %esi
cmpq 8(%r13), %rdi
jae LBB46_54
## %bb.57: ## in Loop: Header=BB46_55 Depth=1
leaq 1(%rdi), %rsi
movq %rsi, (%r13)
movzbl (%rdi), %esi
jmp LBB46_54
LBB46_58:
addl $8, %ecx
LBB46_59:
andl $31, %r15d
addl $257, %r15d ## imm = 0x101
andl $31, %r14d
incl %r14d
movl %eax, %r8d
andl $15, %r8d
shrl $4, %eax
movl %eax, (%r9)
addl $-4, %ecx
movl %ecx, (%r12)
addl $4, %r8d
xorps %xmm0, %xmm0
movaps %xmm0, -2560(%rbp)
movl $0, -2545(%rbp)
xorl %esi, %esi
jmp LBB46_61
.p2align 4, 0x90
LBB46_60: ## in Loop: Header=BB46_61 Depth=1
movl %eax, %ecx
shrl $3, %ecx
movl %ecx, (%r9)
addl $-3, %edi
movl %edi, (%r12)
andb $7, %al
movzbl (%rsi,%r10), %edx
movb %al, -2560(%rbp,%rdx)
incq %rsi
movl %ecx, %eax
movl %edi, %ecx
cmpq %r8, %rsi
je LBB46_67
LBB46_61: ## =>This Loop Header: Depth=1
## Child Loop BB46_64 Depth 2
cmpl $2, %ecx
jbe LBB46_64
## %bb.62: ## in Loop: Header=BB46_61 Depth=1
movl %ecx, %edi
jmp LBB46_60
.p2align 4, 0x90
LBB46_63: ## in Loop: Header=BB46_64 Depth=2
shll %cl, %edi
orl %edi, %eax
movl %eax, (%r9)
leal 8(%rcx), %edi
movl %edi, (%r12)
cmpl $17, %ecx
movl %edi, %ecx
jge LBB46_60
LBB46_64: ## Parent Loop BB46_61 Depth=1
## => This Inner Loop Header: Depth=2
movl %eax, %edx
shrl %cl, %edx
testl %edx, %edx
jne LBB46_178
## %bb.65: ## in Loop: Header=BB46_64 Depth=2
movq (%r13), %rdx
xorl %edi, %edi
cmpq 8(%r13), %rdx
jae LBB46_63
## %bb.66: ## in Loop: Header=BB46_64 Depth=2
leaq 1(%rdx), %rdi
movq %rdi, (%r13)
movzbl (%rdx), %edi
jmp LBB46_63
LBB46_67:
leaq -2072(%rbp), %rdi
leaq -2560(%rbp), %rsi
movl $19, %edx
callq _zbuild_huffman
testl %eax, %eax
je LBB46_174
## %bb.68:
leal (%r14,%r15), %ebx
xorl %edx, %edx
movq %r14, -2592(%rbp) ## 8-byte Spill
movl %ebx, -2568(%rbp) ## 4-byte Spill
jmp LBB46_70
.p2align 4, 0x90
LBB46_69: ## in Loop: Header=BB46_70 Depth=1
movq -2576(%rbp), %rdx ## 8-byte Reload
movslq %edx, %rcx
incl %edx
movb %al, -2528(%rbp,%rcx)
cmpl %ebx, %edx
jge LBB46_98
LBB46_70: ## =>This Loop Header: Depth=1
## Child Loop BB46_95 Depth 2
## Child Loop BB46_78 Depth 2
## Child Loop BB46_86 Depth 2
movq %rdx, -2576(%rbp) ## 8-byte Spill
movq %r13, %rdi
leaq -2072(%rbp), %rsi
callq _zhuffman_decode
cmpl $19, %eax
jae LBB46_199
## %bb.71: ## in Loop: Header=BB46_70 Depth=1
cmpl $15, %eax
jbe LBB46_69
## %bb.72: ## in Loop: Header=BB46_70 Depth=1
cmpl $16, %eax
je LBB46_81
## %bb.73: ## in Loop: Header=BB46_70 Depth=1
cmpl $17, %eax
movq -2584(%rbp), %rdi ## 8-byte Reload
jne LBB46_89
## %bb.74: ## in Loop: Header=BB46_70 Depth=1
movl (%r12), %ecx
movl (%rdi), %eax
cmpl $2, %ecx
jle LBB46_78
## %bb.75: ## in Loop: Header=BB46_70 Depth=1
movl %ecx, %edx
LBB46_76: ## in Loop: Header=BB46_70 Depth=1
movl %eax, %r14d
andl $7, %r14d
shrl $3, %eax
movl %eax, (%rdi)
addl $-3, %edx
movl %edx, (%r12)
leal 3(%r14), %esi
movq -2576(%rbp), %rbx ## 8-byte Reload
movslq %ebx, %rax
leaq (%rax,%rbp), %rdi
addq $-2528, %rdi ## imm = 0xF620
callq ___bzero
jmp LBB46_84
.p2align 4, 0x90
LBB46_77: ## in Loop: Header=BB46_78 Depth=2
shll %cl, %edx
orl %edx, %eax
movl %eax, (%rdi)
leal 8(%rcx), %edx
movl %edx, (%r12)
cmpl $17, %ecx
movl %edx, %ecx
jge LBB46_76
LBB46_78: ## Parent Loop BB46_70 Depth=1
## => This Inner Loop Header: Depth=2
movl %eax, %edx
shrl %cl, %edx
testl %edx, %edx
jne LBB46_189
## %bb.79: ## in Loop: Header=BB46_78 Depth=2
movq (%r13), %rsi
xorl %edx, %edx
cmpq 8(%r13), %rsi
jae LBB46_77
## %bb.80: ## in Loop: Header=BB46_78 Depth=2
leaq 1(%rsi), %rdx
movq %rdx, (%r13)
movzbl (%rsi), %edx
jmp LBB46_77
LBB46_81: ## in Loop: Header=BB46_70 Depth=1
movl (%r12), %ecx
movq -2584(%rbp), %rdi ## 8-byte Reload
movl (%rdi), %eax
cmpl $1, %ecx
jle LBB46_86
## %bb.82: ## in Loop: Header=BB46_70 Depth=1
movl %ecx, %edx
LBB46_83: ## in Loop: Header=BB46_70 Depth=1
movl %eax, %r14d
andl $3, %r14d
shrl $2, %eax
movl %eax, (%rdi)
addl $-2, %edx
movl %edx, (%r12)
leal 3(%r14), %edx
movq -2576(%rbp), %rbx ## 8-byte Reload
movslq %ebx, %rax
leaq (%rax,%rbp), %rdi
addq $-2528, %rdi ## imm = 0xF620
movzbl -2529(%rbp,%rax), %esi
callq _memset
LBB46_84: ## in Loop: Header=BB46_70 Depth=1
movq %rbx, %rdx
leal (%r14,%rbx), %edx
addl $3, %edx
jmp LBB46_93
.p2align 4, 0x90
LBB46_85: ## in Loop: Header=BB46_86 Depth=2
shll %cl, %edx
orl %edx, %eax
movl %eax, (%rdi)
leal 8(%rcx), %edx
movl %edx, (%r12)
cmpl $17, %ecx
movl %edx, %ecx
jge LBB46_83
LBB46_86: ## Parent Loop BB46_70 Depth=1
## => This Inner Loop Header: Depth=2
movl %eax, %edx
shrl %cl, %edx
testl %edx, %edx
jne LBB46_190
## %bb.87: ## in Loop: Header=BB46_86 Depth=2
movq (%r13), %rsi
xorl %edx, %edx
cmpq 8(%r13), %rsi
jae LBB46_85
## %bb.88: ## in Loop: Header=BB46_86 Depth=2
leaq 1(%rsi), %rdx
movq %rdx, (%r13)
movzbl (%rsi), %edx
jmp LBB46_85
LBB46_89: ## in Loop: Header=BB46_70 Depth=1
cmpl $18, %eax
jne LBB46_203
## %bb.90: ## in Loop: Header=BB46_70 Depth=1
movl (%r12), %ecx
movl (%rdi), %eax
cmpl $6, %ecx
jle LBB46_95
## %bb.91: ## in Loop: Header=BB46_70 Depth=1
movl %ecx, %edx
LBB46_92: ## in Loop: Header=BB46_70 Depth=1
movl %eax, %r14d
andl $127, %r14d
shrl $7, %eax
movl %eax, (%rdi)
addl $-7, %edx
movl %edx, (%r12)
leal 11(%r14), %esi
movslq -2576(%rbp), %rbx ## 4-byte Folded Reload
leaq (%rbx,%rbp), %rdi
addq $-2528, %rdi ## imm = 0xF620
callq ___bzero
leal (%r14,%rbx), %edx
addl $11, %edx
LBB46_93: ## in Loop: Header=BB46_70 Depth=1
movl -2568(%rbp), %ebx ## 4-byte Reload
movq -2592(%rbp), %r14 ## 8-byte Reload
cmpl %ebx, %edx
jl LBB46_70
jmp LBB46_98
.p2align 4, 0x90
LBB46_94: ## in Loop: Header=BB46_95 Depth=2
shll %cl, %edx
orl %edx, %eax
movl %eax, (%rdi)
leal 8(%rcx), %edx
movl %edx, (%r12)
cmpl $17, %ecx
movl %edx, %ecx
jge LBB46_92
LBB46_95: ## Parent Loop BB46_70 Depth=1
## => This Inner Loop Header: Depth=2
movl %eax, %edx
shrl %cl, %edx
testl %edx, %edx
jne LBB46_191
## %bb.96: ## in Loop: Header=BB46_95 Depth=2
movq (%r13), %rsi
xorl %edx, %edx
cmpq 8(%r13), %rsi
jae LBB46_94
## %bb.97: ## in Loop: Header=BB46_95 Depth=2
leaq 1(%rsi), %rdx
movq %rdx, (%r13)
movzbl (%rsi), %edx
jmp LBB46_94
LBB46_98:
jne LBB46_193
## %bb.99:
movq -2608(%rbp), %rdi ## 8-byte Reload
leaq -2528(%rbp), %rsi
movl %r15d, %edx
callq _zbuild_huffman
testl %eax, %eax
je LBB46_174
## %bb.100:
movl %r15d, %eax
leaq (%rax,%rbp), %rsi
addq $-2528, %rsi ## imm = 0xF620
movq -2600(%rbp), %rdi ## 8-byte Reload
movl %r14d, %edx
callq _zbuild_huffman
testl %eax, %eax
je LBB46_173
LBB46_101:
movq %r12, -2576(%rbp) ## 8-byte Spill
jmp LBB46_103
.p2align 4, 0x90
LBB46_102: ## in Loop: Header=BB46_103 Depth=1
leaq 1(%r14), %rax
movq %rax, 24(%r13)
movb %r15b, (%r14)
LBB46_103: ## =>This Loop Header: Depth=1
## Child Loop BB46_138 Depth 2
## Child Loop BB46_142 Depth 2
## Child Loop BB46_125 Depth 2
## Child Loop BB46_132 Depth 2
## Child Loop BB46_136 Depth 2
## Child Loop BB46_108 Depth 2
movq %r13, %rdi
movq -2608(%rbp), %rsi ## 8-byte Reload
callq _zhuffman_decode
movl %eax, %r15d
cmpl $255, %eax
jg LBB46_111
## %bb.104: ## in Loop: Header=BB46_103 Depth=1
testl %r15d, %r15d
js LBB46_170
## %bb.105: ## in Loop: Header=BB46_103 Depth=1
movq 24(%r13), %r14
movq 40(%r13), %rax
cmpq %rax, %r14
jb LBB46_102
## %bb.106: ## in Loop: Header=BB46_103 Depth=1
cmpl $0, 48(%r13)
je LBB46_196
## %bb.107: ## in Loop: Header=BB46_103 Depth=1
movq 32(%r13), %rdi
subq %rdi, %r14
subl %edi, %eax
.p2align 4, 0x90
LBB46_108: ## Parent Loop BB46_103 Depth=1
## => This Inner Loop Header: Depth=2
movl %eax, %ecx
leal (%rcx,%rcx), %eax
cmpl %r14d, %ecx
jle LBB46_108
## %bb.109: ## in Loop: Header=BB46_103 Depth=1
movslq %ecx, %r12
movq %r12, %rsi
callq _realloc
testq %rax, %rax
je LBB46_197
## %bb.110: ## in Loop: Header=BB46_103 Depth=1
movq %rax, 32(%r13)
movslq %r14d, %r14
addq %rax, %r14
addq %rax, %r12
movq %r12, 40(%r13)
jmp LBB46_102
.p2align 4, 0x90
LBB46_111: ## in Loop: Header=BB46_103 Depth=1
cmpl $256, %r15d ## imm = 0x100
movq -2576(%rbp), %r12 ## 8-byte Reload
movq -2584(%rbp), %r9 ## 8-byte Reload
je LBB46_157
## %bb.112: ## in Loop: Header=BB46_103 Depth=1
addl $-257, %r15d ## imm = 0xFEFF
leaq _length_base(%rip), %rax
movl (%rax,%r15,4), %ebx
leaq -28(%r15), %rax
cmpq $-20, %rax
jb LBB46_116
## %bb.113: ## in Loop: Header=BB46_103 Depth=1
leaq _length_extra(%rip), %rax
movl (%rax,%r15,4), %eax
movl (%r12), %ecx
movl (%r9), %edx
cmpl %eax, %ecx
jl LBB46_138
## %bb.114: ## in Loop: Header=BB46_103 Depth=1
movl %ecx, %esi
LBB46_115: ## in Loop: Header=BB46_103 Depth=1
movl $-1, %edi
movl %eax, %ecx
shll %cl, %edi
notl %edi
andl %edx, %edi
shrl %cl, %edx
movl %edx, (%r9)
subl %eax, %esi
movl %esi, (%r12)
addl %edi, %ebx
LBB46_116: ## in Loop: Header=BB46_103 Depth=1
movq %r13, %rdi
movq -2600(%rbp), %rsi ## 8-byte Reload
callq _zhuffman_decode
testl %eax, %eax
js LBB46_170
## %bb.117: ## in Loop: Header=BB46_103 Depth=1
movl %eax, %eax
leaq _dist_base(%rip), %rcx
movl (%rcx,%rax,4), %r8d
leaq -30(%rax), %rcx
cmpq $-26, %rcx
jb LBB46_121
## %bb.118: ## in Loop: Header=BB46_103 Depth=1
movq %rbx, %r9
leaq _dist_extra(%rip), %rcx
movl (%rcx,%rax,4), %eax
movl (%r12), %ecx
movq -2584(%rbp), %rbx ## 8-byte Reload
movl (%rbx), %esi
cmpl %eax, %ecx
jl LBB46_142
## %bb.119: ## in Loop: Header=BB46_103 Depth=1
movl %ecx, %edi
LBB46_120: ## in Loop: Header=BB46_103 Depth=1
movl $-1, %edx
movl %eax, %ecx
shll %cl, %edx
notl %edx
andl %esi, %edx
shrl %cl, %esi
movl %esi, (%rbx)
subl %eax, %edi
movl %edi, (%r12)
addl %edx, %r8d
movq %r9, %rbx
LBB46_121: ## in Loop: Header=BB46_103 Depth=1
movq 24(%r13), %rcx
movq 32(%r13), %rdi
movq %rcx, %r14
subq %rdi, %r14
movslq %r8d, %r12
cmpq %r12, %r14
jl LBB46_177
## %bb.122: ## in Loop: Header=BB46_103 Depth=1
movslq %ebx, %rdx
addq %rcx, %rdx
movq 40(%r13), %rax
cmpq %rax, %rdx
jbe LBB46_128
## %bb.123: ## in Loop: Header=BB46_103 Depth=1
cmpl $0, 48(%r13)
je LBB46_196
## %bb.124: ## in Loop: Header=BB46_103 Depth=1
subl %edi, %eax
leal (%rbx,%r14), %ecx
.p2align 4, 0x90
LBB46_125: ## Parent Loop BB46_103 Depth=1
## => This Inner Loop Header: Depth=2
movl %eax, %edx
leal (%rdx,%rdx), %eax
cmpl %edx, %ecx
jg LBB46_125
## %bb.126: ## in Loop: Header=BB46_103 Depth=1
movslq %edx, %r15
movq %r15, %rsi
callq _realloc
testq %rax, %rax
je LBB46_197
## %bb.127: ## in Loop: Header=BB46_103 Depth=1
movq %rax, 32(%r13)
movslq %r14d, %rcx
addq %rax, %rcx
movq %rcx, 24(%r13)
addq %rax, %r15
movq %r15, 40(%r13)
LBB46_128: ## in Loop: Header=BB46_103 Depth=1
testl %ebx, %ebx
je LBB46_103
## %bb.129: ## in Loop: Header=BB46_103 Depth=1
movq %r12, %rdx
negq %rdx
leaq 1(%rcx), %rsi
movl %ebx, %r9d
movb (%rcx,%rdx), %dl
movq %rsi, 24(%r13)
movb %dl, (%rcx)
decl %r9d
je LBB46_103
## %bb.130: ## in Loop: Header=BB46_103 Depth=1
subq %r12, %rcx
leal -2(%rbx), %r8d
testb $3, %r9b
je LBB46_134
## %bb.131: ## in Loop: Header=BB46_103 Depth=1
decb %bl
movzbl %bl, %r10d
andl $3, %r10d
xorl %edi, %edi
.p2align 4, 0x90
LBB46_132: ## Parent Loop BB46_103 Depth=1
## => This Inner Loop Header: Depth=2
movq 24(%r13), %rdx
movzbl 1(%rcx,%rdi), %eax
leaq 1(%rdx), %rsi
movq %rsi, 24(%r13)
movb %al, (%rdx)
incq %rdi
cmpl %edi, %r10d
jne LBB46_132
## %bb.133: ## in Loop: Header=BB46_103 Depth=1
subl %edi, %r9d
addq %rdi, %rcx
LBB46_134: ## in Loop: Header=BB46_103 Depth=1
cmpl $3, %r8d
jb LBB46_103
## %bb.135: ## in Loop: Header=BB46_103 Depth=1
movl %r9d, %r8d
xorl %edx, %edx
.p2align 4, 0x90
LBB46_136: ## Parent Loop BB46_103 Depth=1
## => This Inner Loop Header: Depth=2
movq 24(%r13), %rsi
movzbl 1(%rcx,%rdx), %eax
leaq 1(%rsi), %rdi
movq %rdi, 24(%r13)
movb %al, (%rsi)
movq 24(%r13), %rax
movzbl 2(%rcx,%rdx), %ebx
leaq 1(%rax), %rsi
movq %rsi, 24(%r13)
movb %bl, (%rax)
movq 24(%r13), %rax
movzbl 3(%rcx,%rdx), %ebx
leaq 1(%rax), %rsi
movq %rsi, 24(%r13)
movb %bl, (%rax)
movq 24(%r13), %rax
movzbl 4(%rcx,%rdx), %ebx
leaq 1(%rax), %rsi
movq %rsi, 24(%r13)
movb %bl, (%rax)
addq $4, %rdx
cmpl %edx, %r8d
jne LBB46_136
jmp LBB46_103
.p2align 4, 0x90
LBB46_137: ## in Loop: Header=BB46_138 Depth=2
shll %cl, %esi
orl %esi, %edx
movl %edx, (%r9)
leal 8(%rcx), %esi
movl %esi, (%r12)
cmpl $17, %ecx
movl %esi, %ecx
jge LBB46_115
LBB46_138: ## Parent Loop BB46_103 Depth=1
## => This Inner Loop Header: Depth=2
movl %edx, %esi
shrl %cl, %esi
testl %esi, %esi
jne LBB46_185
## %bb.139: ## in Loop: Header=BB46_138 Depth=2
movq (%r13), %rdi
xorl %esi, %esi
cmpq 8(%r13), %rdi
jae LBB46_137
## %bb.140: ## in Loop: Header=BB46_138 Depth=2
leaq 1(%rdi), %rsi
movq %rsi, (%r13)
movzbl (%rdi), %esi
jmp LBB46_137
.p2align 4, 0x90
LBB46_141: ## in Loop: Header=BB46_142 Depth=2
shll %cl, %edi
orl %edi, %esi
movl %esi, (%rbx)
leal 8(%rcx), %edi
movl %edi, (%r12)
cmpl $17, %ecx
movl %edi, %ecx
jge LBB46_120
LBB46_142: ## Parent Loop BB46_103 Depth=1
## => This Inner Loop Header: Depth=2
movl %esi, %edx
shrl %cl, %edx
testl %edx, %edx
jne LBB46_187
## %bb.143: ## in Loop: Header=BB46_142 Depth=2
movq (%r13), %rdx
xorl %edi, %edi
cmpq 8(%r13), %rdx
jae LBB46_141
## %bb.144: ## in Loop: Header=BB46_142 Depth=2
leaq 1(%rdx), %rdi
movq %rdi, (%r13)
movzbl (%rdx), %edi
jmp LBB46_141
LBB46_145:
xorl %edi, %edi
LBB46_146:
movb %dil, -2072(%rbp,%rcx)
incq %rcx
LBB46_147:
cmpq $3, %rsi
jne LBB46_162
LBB46_148:
movzwl -2072(%rbp), %r15d
movzwl -2070(%rbp), %eax
movl %r15d, %ecx
xorl $65535, %ecx ## imm = 0xFFFF
cmpl %ecx, %eax
jne LBB46_186
## %bb.149:
movq (%r13), %rsi
leaq (%rsi,%r15), %rax
cmpq 8(%r13), %rax
ja LBB46_188
## %bb.150:
movq 24(%r13), %rbx
movq 40(%r13), %rax
leaq (%rbx,%r15), %rcx
cmpq %rax, %rcx
jbe LBB46_156
## %bb.151:
cmpl $0, 48(%r13)
je LBB46_196
## %bb.152:
movq 32(%r13), %rdi
subq %rdi, %rbx
subl %edi, %eax
leal (%r15,%rbx), %ecx
.p2align 4, 0x90
LBB46_153: ## =>This Inner Loop Header: Depth=1
movl %eax, %edx
leal (%rdx,%rdx), %eax
cmpl %edx, %ecx
jg LBB46_153
## %bb.154:
movslq %edx, %r14
movq %r14, %rsi
callq _realloc
testq %rax, %rax
je LBB46_197
## %bb.155:
movq %rax, 32(%r13)
movslq %ebx, %rbx
addq %rax, %rbx
movq %rbx, 24(%r13)
addq %rax, %r14
movq %r14, 40(%r13)
movq (%r13), %rsi
LBB46_156:
movq %rbx, %rdi
movq %r15, %rdx
callq _memcpy
addq %r15, (%r13)
addq %r15, 24(%r13)
movq -2584(%rbp), %r9 ## 8-byte Reload
LBB46_157:
movq _stbi_png_partial@GOTPCREL(%rip), %rax
cmpl $0, (%rax)
je LBB46_167
## %bb.158:
movq 24(%r13), %rax
subq 32(%r13), %rax
movl $1, -2564(%rbp) ## 4-byte Folded Spill
cmpq $65536, %rax ## imm = 0x10000
movq -2616(%rbp), %rax ## 8-byte Reload
jg LBB46_174
## %bb.159:
andl $1, %eax
je LBB46_168
jmp LBB46_174
.p2align 4, 0x90
LBB46_160: ## in Loop: Header=BB46_162 Depth=1
leaq 1(%rdx), %rdi
movq %rdi, (%r13)
movzbl (%rdx), %esi
movq %rdi, %rdx
LBB46_161: ## in Loop: Header=BB46_162 Depth=1
movb %sil, -2071(%rbp,%rcx)
addq $2, %rcx
cmpq $4, %rcx
je LBB46_148
LBB46_162: ## =>This Inner Loop Header: Depth=1
cmpq %rax, %rdx
jae LBB46_164
## %bb.163: ## in Loop: Header=BB46_162 Depth=1
leaq 1(%rdx), %rdi
movq %rdi, (%r13)
movzbl (%rdx), %esi
movq %rdi, %rdx
jmp LBB46_165
.p2align 4, 0x90
LBB46_164: ## in Loop: Header=BB46_162 Depth=1
xorl %esi, %esi
LBB46_165: ## in Loop: Header=BB46_162 Depth=1
movb %sil, -2072(%rbp,%rcx)
cmpq %rax, %rdx
jb LBB46_160
## %bb.166: ## in Loop: Header=BB46_162 Depth=1
xorl %esi, %esi
jmp LBB46_161
LBB46_167:
testb $1, -2616(%rbp) ## 1-byte Folded Reload
jne LBB46_198
LBB46_168:
movl (%r12), %ecx
movl (%r9), %ebx
testl %ecx, %ecx
jg LBB46_10
jmp LBB46_18
.p2align 4, 0x90
LBB46_17: ## in Loop: Header=BB46_18 Depth=1
shll %cl, %eax
orl %eax, %ebx
movl %ebx, (%r9)
leal 8(%rcx), %eax
movl %eax, (%r12)
cmpl $17, %ecx
movl %eax, %ecx
jge LBB46_11
LBB46_18: ## =>This Inner Loop Header: Depth=1
movl %ebx, %eax
shrl %cl, %eax
testl %eax, %eax
jne LBB46_195
## %bb.19: ## in Loop: Header=BB46_18 Depth=1
movq (%r13), %rdx
xorl %eax, %eax
cmpq 8(%r13), %rdx
jae LBB46_17
## %bb.20: ## in Loop: Header=BB46_18 Depth=1
leaq 1(%rdx), %rax
movq %rax, (%r13)
movzbl (%rdx), %eax
jmp LBB46_17
LBB46_169:
leaq L_.str.35(%rip), %rax
jmp LBB46_172
LBB46_170:
leaq L_.str.12(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB46_174
LBB46_171:
leaq L_.str.36(%rip), %rax
LBB46_172:
movq %rax, _failure_reason(%rip)
LBB46_173:
movl $0, -2564(%rbp) ## 4-byte Folded Spill
LBB46_174:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB46_205
## %bb.175:
movl -2564(%rbp), %eax ## 4-byte Reload
LBB46_176:
addq $2584, %rsp ## imm = 0xA18
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB46_196:
leaq L_.str.41(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB46_174
LBB46_197:
leaq L_.str.5(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB46_174
LBB46_177:
leaq L_.str.48(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB46_174
LBB46_178:
callq _do_zlib.cold.8
LBB46_179:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB46_205
## %bb.180:
xorl %eax, %eax
jmp LBB46_176
LBB46_185:
callq _do_zlib.cold.3
jmp LBB46_179
LBB46_186:
leaq L_.str.39(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB46_174
LBB46_187:
callq _do_zlib.cold.2
jmp LBB46_179
LBB46_188:
leaq L_.str.40(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB46_174
LBB46_189:
callq _do_zlib.cold.6
jmp LBB46_179
LBB46_190:
callq _do_zlib.cold.5
jmp LBB46_179
LBB46_191:
callq _do_zlib.cold.7
jmp LBB46_179
LBB46_192:
callq _do_zlib.cold.13
jmp LBB46_179
LBB46_193:
leaq L_.str.43(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB46_174
LBB46_195:
callq _do_zlib.cold.14
jmp LBB46_179
LBB46_198:
movl $1, -2564(%rbp) ## 4-byte Folded Spill
jmp LBB46_174
LBB46_199:
callq _do_zlib.cold.1
jmp LBB46_179
LBB46_200:
callq _do_zlib.cold.11
jmp LBB46_179
LBB46_201:
callq _do_zlib.cold.10
jmp LBB46_179
LBB46_202:
callq _do_zlib.cold.9
jmp LBB46_179
LBB46_203:
callq _do_zlib.cold.4
jmp LBB46_179
LBB46_204:
callq _do_zlib.cold.12
jmp LBB46_179
LBB46_205:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.globl _stbi_zlib_decode_malloc ## -- Begin function stbi_zlib_decode_malloc
.p2align 4, 0x90
_stbi_zlib_decode_malloc: ## @stbi_zlib_decode_malloc
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movl $4104, %eax ## imm = 0x1008
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r14
movl %esi, %r15d
movq %rdi, %rbx
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -32(%rbp)
movl $16384, %edi ## imm = 0x4000
callq _malloc
testq %rax, %rax
je LBB47_6
## %bb.1:
movq %rbx, -4128(%rbp)
movslq %r15d, %rcx
addq %rbx, %rcx
movq %rcx, -4120(%rbp)
leaq -4128(%rbp), %rdi
movq %rax, %rsi
movl $16384, %edx ## imm = 0x4000
movl $1, %ecx
movl $1, %r8d
callq _do_zlib
testl %eax, %eax
je LBB47_5
## %bb.2:
testq %r14, %r14
je LBB47_3
## %bb.4:
movl -4104(%rbp), %ecx
movq -4096(%rbp), %rax
subl %eax, %ecx
movl %ecx, (%r14)
jmp LBB47_7
LBB47_5:
movq -4096(%rbp), %rdi
callq _free
LBB47_6:
xorl %eax, %eax
LBB47_7:
movq ___stack_chk_guard@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq -32(%rbp), %rcx
jne LBB47_9
## %bb.8:
addq $4104, %rsp ## imm = 0x1008
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
LBB47_3:
movq -4096(%rbp), %rax
jmp LBB47_7
LBB47_9:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.globl _stbi_zlib_decode_buffer ## -- Begin function stbi_zlib_decode_buffer
.p2align 4, 0x90
_stbi_zlib_decode_buffer: ## @stbi_zlib_decode_buffer
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
movl $4104, %eax ## imm = 0x1008
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -24
movl %esi, %eax
movq %rdi, %rsi
movq ___stack_chk_guard@GOTPCREL(%rip), %rdi
movq (%rdi), %rdi
movq %rdi, -16(%rbp)
movq %rdx, -4112(%rbp)
movslq %ecx, %rcx
addq %rdx, %rcx
movq %rcx, -4104(%rbp)
leaq -4112(%rbp), %rdi
xorl %ebx, %ebx
movl %eax, %edx
xorl %ecx, %ecx
movl $1, %r8d
callq _do_zlib
movl -4088(%rbp), %ecx
subl -4080(%rbp), %ecx
cmpl $1, %eax
sbbl %ebx, %ebx
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -16(%rbp), %rax
jne LBB48_2
## %bb.1:
orl %ecx, %ebx
movl %ebx, %eax
addq $4104, %rsp ## imm = 0x1008
popq %rbx
popq %rbp
retq
LBB48_2:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.globl _stbi_zlib_decode_noheader_malloc ## -- Begin function stbi_zlib_decode_noheader_malloc
.p2align 4, 0x90
_stbi_zlib_decode_noheader_malloc: ## @stbi_zlib_decode_noheader_malloc
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movl $4104, %eax ## imm = 0x1008
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdx, %r14
movl %esi, %r15d
movq %rdi, %rbx
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -32(%rbp)
movl $16384, %edi ## imm = 0x4000
callq _malloc
testq %rax, %rax
je LBB49_6
## %bb.1:
movq %rbx, -4128(%rbp)
movslq %r15d, %rcx
addq %rbx, %rcx
movq %rcx, -4120(%rbp)
leaq -4128(%rbp), %rdi
movq %rax, %rsi
movl $16384, %edx ## imm = 0x4000
movl $1, %ecx
xorl %r8d, %r8d
callq _do_zlib
testl %eax, %eax
je LBB49_5
## %bb.2:
testq %r14, %r14
je LBB49_3
## %bb.4:
movl -4104(%rbp), %ecx
movq -4096(%rbp), %rax
subl %eax, %ecx
movl %ecx, (%r14)
jmp LBB49_7
LBB49_5:
movq -4096(%rbp), %rdi
callq _free
LBB49_6:
xorl %eax, %eax
LBB49_7:
movq ___stack_chk_guard@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq -32(%rbp), %rcx
jne LBB49_9
## %bb.8:
addq $4104, %rsp ## imm = 0x1008
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
LBB49_3:
movq -4096(%rbp), %rax
jmp LBB49_7
LBB49_9:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.globl _stbi_zlib_decode_noheader_buffer ## -- Begin function stbi_zlib_decode_noheader_buffer
.p2align 4, 0x90
_stbi_zlib_decode_noheader_buffer: ## @stbi_zlib_decode_noheader_buffer
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
movl $4104, %eax ## imm = 0x1008
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -24
movl %esi, %eax
movq %rdi, %rsi
movq ___stack_chk_guard@GOTPCREL(%rip), %rdi
movq (%rdi), %rdi
movq %rdi, -16(%rbp)
movq %rdx, -4112(%rbp)
movslq %ecx, %rcx
addq %rdx, %rcx
movq %rcx, -4104(%rbp)
leaq -4112(%rbp), %rdi
xorl %ebx, %ebx
movl %eax, %edx
xorl %ecx, %ecx
xorl %r8d, %r8d
callq _do_zlib
movl -4088(%rbp), %ecx
subl -4080(%rbp), %ecx
cmpl $1, %eax
sbbl %ebx, %ebx
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -16(%rbp), %rax
jne LBB50_2
## %bb.1:
orl %ecx, %ebx
movl %ebx, %eax
addq $4104, %rsp ## imm = 0x1008
popq %rbx
popq %rbp
retq
LBB50_2:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_png
_do_png: ## @do_png
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
xorps %xmm0, %xmm0
movups %xmm0, 40(%rdi)
movq $0, 56(%rdi)
cmpl $5, %r8d
jb LBB51_2
## %bb.1:
leaq L_.str.6(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %r14d, %r14d
jmp LBB51_10
LBB51_2:
movl %r8d, %r12d
movq %rcx, %r15
movq %rsi, %r13
movq %rdi, %rbx
movq %rdx, -48(%rbp) ## 8-byte Spill
xorl %r14d, %r14d
xorl %esi, %esi
movl %r8d, %edx
callq _parse_png_file
testl %eax, %eax
je LBB51_9
## %bb.3:
movq 56(%rbx), %r14
movq $0, 56(%rbx)
testl %r12d, %r12d
je LBB51_7
## %bb.4:
movl 12(%rbx), %esi
cmpl %r12d, %esi
je LBB51_7
## %bb.5:
movl (%rbx), %ecx
movl 4(%rbx), %r8d
movq %r14, %rdi
movl %r12d, %edx
callq _convert_format
movq %rax, %r14
movl %r12d, 12(%rbx)
testq %rax, %rax
je LBB51_6
LBB51_7:
movl (%rbx), %eax
movl %eax, (%r13)
movl 4(%rbx), %eax
movq -48(%rbp), %rcx ## 8-byte Reload
movl %eax, (%rcx)
testq %r15, %r15
je LBB51_9
## %bb.8:
movl 8(%rbx), %eax
movl %eax, (%r15)
LBB51_9:
movq 56(%rbx), %rdi
callq _free
movq $0, 56(%rbx)
movq 48(%rbx), %rdi
callq _free
movq $0, 48(%rbx)
movq 40(%rbx), %rdi
callq _free
movq $0, 40(%rbx)
LBB51_10:
movq %r14, %rax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB51_6:
xorl %r14d, %r14d
jmp LBB51_10
.cfi_endproc
## -- End function
.globl _stbi_png_load ## -- Begin function stbi_png_load
.p2align 4, 0x90
_stbi_png_load: ## @stbi_png_load
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $72, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r14d
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, %r13
leaq L_.str(%rip), %rsi
callq _fopen
testq %rax, %rax
je LBB52_1
## %bb.2:
movq %rax, %rbx
movq %rax, -88(%rbp)
leaq -104(%rbp), %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r15, %rcx
movl %r14d, %r8d
callq _do_png
movq %rax, %r14
movq %rbx, %rdi
callq _fclose
jmp LBB52_3
LBB52_1:
xorl %r14d, %r14d
LBB52_3:
movq %r14, %rax
addq $72, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function parse_png_file
LCPI53_0:
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
LCPI53_1:
.byte 0 ## 0x0
.byte 2 ## 0x2
.byte 4 ## 0x4
.byte 6 ## 0x6
.byte 8 ## 0x8
.byte 10 ## 0xa
.byte 12 ## 0xc
.byte 14 ## 0xe
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_parse_png_file: ## @parse_png_file
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movl $5336, %eax ## imm = 0x14D8
callq ____chkstk_darwin
subq %rax, %rsp
popq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r12d
movl %esi, -5192(%rbp) ## 4-byte Spill
movq %rdi, %r13
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
xorl %r14d, %r14d
leaq _check_png_header.png_sig(%rip), %r15
xorl %ebx, %ebx
.p2align 4, 0x90
LBB53_1: ## =>This Inner Loop Header: Depth=1
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB53_3
## %bb.2: ## in Loop: Header=BB53_1 Depth=1
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
jmp LBB53_5
.p2align 4, 0x90
LBB53_3: ## in Loop: Header=BB53_1 Depth=1
movq 24(%r13), %rcx
xorl %eax, %eax
cmpq 32(%r13), %rcx
jae LBB53_5
## %bb.4: ## in Loop: Header=BB53_1 Depth=1
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %eax
.p2align 4, 0x90
LBB53_5: ## in Loop: Header=BB53_1 Depth=1
movzbl (%rbx,%r15), %ecx
cmpl %ecx, %eax
jne LBB53_10
## %bb.6: ## in Loop: Header=BB53_1 Depth=1
incq %rbx
cmpq $8, %rbx
jne LBB53_1
## %bb.7:
movl $1, %eax
cmpl $1, -5192(%rbp) ## 4-byte Folded Reload
je LBB53_13
## %bb.8:
movl %r12d, -5180(%rbp) ## 4-byte Spill
movq %r13, %rdi
callq _get32
movl %eax, %r12d
movq %r13, %rdi
callq _get32
cmpl $1229472850, %eax ## imm = 0x49484452
jne LBB53_15
## %bb.9:
movb $1, %cl
xorl %r14d, %r14d
movl $1229472850, %eax ## imm = 0x49484452
movl $0, -5172(%rbp) ## 4-byte Folded Spill
xorl %r15d, %r15d
xorl %edx, %edx
movq %rdx, -5208(%rbp) ## 8-byte Spill
xorl %edx, %edx
movq %rdx, -5272(%rbp) ## 8-byte Spill
xorl %edx, %edx
movq %rdx, -5224(%rbp) ## 8-byte Spill
movl $0, -5264(%rbp) ## 4-byte Folded Spill
jmp LBB53_18
LBB53_10:
leaq L_.str.68(%rip), %rax
LBB53_11:
movq %rax, _failure_reason(%rip)
LBB53_12:
xorl %eax, %eax
LBB53_13:
movq ___stack_chk_guard@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq -48(%rbp), %rcx
jne LBB53_215
LBB53_14:
addq $5336, %rsp ## imm = 0x14D8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB53_15:
leaq L_.str.51(%rip), %rax
jmp LBB53_11
LBB53_16: ## in Loop: Header=BB53_18 Depth=1
movb $4, %al
movl %eax, -5172(%rbp) ## 4-byte Spill
.p2align 4, 0x90
LBB53_17: ## in Loop: Header=BB53_18 Depth=1
movq %r13, %rdi
callq _get32
movq %r13, %rdi
callq _get32
movl %eax, %r12d
movq %r13, %rdi
callq _get32
xorl %ecx, %ecx
LBB53_18: ## =>This Loop Header: Depth=1
## Child Loop BB53_39 Depth 2
## Child Loop BB53_83 Depth 2
## Child Loop BB53_58 Depth 2
## Child Loop BB53_47 Depth 2
movl %r12d, %r12d
cmpl $1347179588, %eax ## imm = 0x504C5444
jg LBB53_28
## %bb.19: ## in Loop: Header=BB53_18 Depth=1
cmpl $1229209940, %eax ## imm = 0x49444154
je LBB53_42
## %bb.20: ## in Loop: Header=BB53_18 Depth=1
cmpl $1229472850, %eax ## imm = 0x49484452
jne LBB53_72
## %bb.21: ## in Loop: Header=BB53_18 Depth=1
testb $1, %cl
je LBB53_142
## %bb.22: ## in Loop: Header=BB53_18 Depth=1
cmpl $13, %r12d
jne LBB53_146
## %bb.23: ## in Loop: Header=BB53_18 Depth=1
movq %r13, %rdi
callq _get32
movl %eax, (%r13)
cmpl $16777217, %eax ## imm = 0x1000001
jae LBB53_115
## %bb.24: ## in Loop: Header=BB53_18 Depth=1
movq %r13, %rdi
callq _get32
movl %eax, 4(%r13)
cmpl $16777217, %eax ## imm = 0x1000001
jae LBB53_115
## %bb.25: ## in Loop: Header=BB53_18 Depth=1
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB53_92
## %bb.26: ## in Loop: Header=BB53_18 Depth=1
callq _fgetc
cmpl $-1, %eax
je LBB53_117
## %bb.27: ## in Loop: Header=BB53_18 Depth=1
cmpl $8, %eax
je LBB53_94
jmp LBB53_117
.p2align 4, 0x90
LBB53_28: ## in Loop: Header=BB53_18 Depth=1
cmpl $1347179589, %eax ## imm = 0x504C5445
je LBB53_52
## %bb.29: ## in Loop: Header=BB53_18 Depth=1
cmpl $1951551059, %eax ## imm = 0x74524E53
jne LBB53_73
## %bb.30: ## in Loop: Header=BB53_18 Depth=1
cmpq $0, 40(%r13)
jne LBB53_143
## %bb.31: ## in Loop: Header=BB53_18 Depth=1
cmpb $0, -5172(%rbp) ## 1-byte Folded Reload
je LBB53_77
## %bb.32: ## in Loop: Header=BB53_18 Depth=1
cmpl $2, -5192(%rbp) ## 4-byte Folded Reload
je LBB53_153
## %bb.33: ## in Loop: Header=BB53_18 Depth=1
movq -5224(%rbp), %rax ## 8-byte Reload
testl %eax, %eax
je LBB53_154
## %bb.34: ## in Loop: Header=BB53_18 Depth=1
cmpl %r12d, %eax
jb LBB53_148
## %bb.35: ## in Loop: Header=BB53_18 Depth=1
testq %r12, %r12
je LBB53_16
## %bb.36: ## in Loop: Header=BB53_18 Depth=1
shlq $2, %r12
xorl %ebx, %ebx
jmp LBB53_39
.p2align 4, 0x90
LBB53_37: ## in Loop: Header=BB53_39 Depth=2
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
LBB53_38: ## in Loop: Header=BB53_39 Depth=2
movl %ebx, %ecx
andl $-4, %ecx
movb %al, -5165(%rbp,%rcx)
addq $4, %rbx
cmpq %rbx, %r12
je LBB53_16
LBB53_39: ## Parent Loop BB53_18 Depth=1
## => This Inner Loop Header: Depth=2
movq 16(%r13), %rdi
testq %rdi, %rdi
jne LBB53_37
## %bb.40: ## in Loop: Header=BB53_39 Depth=2
movq 24(%r13), %rcx
xorl %eax, %eax
cmpq 32(%r13), %rcx
jae LBB53_38
## %bb.41: ## in Loop: Header=BB53_39 Depth=2
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %eax
jmp LBB53_38
LBB53_42: ## in Loop: Header=BB53_18 Depth=1
movl -5172(%rbp), %ecx ## 4-byte Reload
testb %cl, %cl
movq -5224(%rbp), %rax ## 8-byte Reload
je LBB53_44
## %bb.43: ## in Loop: Header=BB53_18 Depth=1
testl %eax, %eax
je LBB53_144
LBB53_44: ## in Loop: Header=BB53_18 Depth=1
cmpl $2, -5192(%rbp) ## 4-byte Folded Reload
je LBB53_147
## %bb.45: ## in Loop: Header=BB53_18 Depth=1
movq -5208(%rbp), %rax ## 8-byte Reload
leal (%r12,%rax), %r14d
movq -5272(%rbp), %rcx ## 8-byte Reload
cmpl %ecx, %r14d
jbe LBB53_50
## %bb.46: ## in Loop: Header=BB53_18 Depth=1
cmpl $4097, %r12d ## imm = 0x1001
movl $4096, %eax ## imm = 0x1000
cmovael %r12d, %eax
testl %ecx, %ecx
cmovnel %ecx, %eax
.p2align 4, 0x90
LBB53_47: ## Parent Loop BB53_18 Depth=1
## => This Inner Loop Header: Depth=2
movl %eax, %ecx
leal (%rcx,%rcx), %eax
cmpl %ecx, %r14d
ja LBB53_47
## %bb.48: ## in Loop: Header=BB53_18 Depth=1
movq 40(%r13), %rdi
movq %rcx, -5272(%rbp) ## 8-byte Spill
movl %ecx, %esi
callq _realloc
testq %rax, %rax
je LBB53_203
## %bb.49: ## in Loop: Header=BB53_18 Depth=1
movq %rax, 40(%r13)
LBB53_50: ## in Loop: Header=BB53_18 Depth=1
movq 16(%r13), %rcx
movl -5208(%rbp), %edi ## 4-byte Reload
addq 40(%r13), %rdi
testq %rcx, %rcx
je LBB53_91
## %bb.51: ## in Loop: Header=BB53_18 Depth=1
movl $1, %esi
movq %r12, %rdx
callq _fread
movl %r14d, %ecx
movq %rcx, -5208(%rbp) ## 8-byte Spill
cmpq %r12, %rax
movl $0, %r14d
je LBB53_17
jmp LBB53_155
LBB53_52: ## in Loop: Header=BB53_18 Depth=1
cmpl $769, %r12d ## imm = 0x301
jae LBB53_116
## %bb.53: ## in Loop: Header=BB53_18 Depth=1
movzwl %r12w, %eax
imull $43691, %eax, %ecx ## imm = 0xAAAB
shrl $17, %ecx
leal (%rcx,%rcx,2), %eax
cmpl %r12d, %eax
jne LBB53_116
## %bb.54: ## in Loop: Header=BB53_18 Depth=1
cmpw $3, %r12w
movq %rcx, -5224(%rbp) ## 8-byte Spill
jb LBB53_17
## %bb.55: ## in Loop: Header=BB53_18 Depth=1
movq %r15, -5256(%rbp) ## 8-byte Spill
cmpl $2, %ecx
movl $1, %eax
cmovael %ecx, %eax
shlq $2, %rax
movq %rax, -5280(%rbp) ## 8-byte Spill
xorl %r15d, %r15d
xorl %r14d, %r14d
jmp LBB53_58
.p2align 4, 0x90
LBB53_56: ## in Loop: Header=BB53_58 Depth=2
callq _fgetc
cmpl $-1, %eax
cmovel %ebx, %eax
movq %r15, %rcx
LBB53_57: ## in Loop: Header=BB53_58 Depth=2
movq %rcx, %rdx
orq $2, %rdx
movb %al, -5168(%rbp,%rdx)
orq $3, %rcx
movb $-1, -5168(%rbp,%rcx)
incq %r14
addq $4, %r15
cmpq %r15, -5280(%rbp) ## 8-byte Folded Reload
je LBB53_76
LBB53_58: ## Parent Loop BB53_18 Depth=1
## => This Inner Loop Header: Depth=2
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB53_61
## %bb.59: ## in Loop: Header=BB53_58 Depth=2
callq _fgetc
cmpl $-1, %eax
movq 16(%r13), %rdi
movl $0, %r12d
cmovel %r12d, %eax
movb %al, -5168(%rbp,%r15)
testq %rdi, %rdi
je LBB53_63
## %bb.60: ## in Loop: Header=BB53_58 Depth=2
callq _fgetc
cmpl $-1, %eax
movq 16(%r13), %rdi
movl $0, %ebx
cmovel %r12d, %eax
movb %al, -5167(%rbp,%r15)
movq %r15, %rcx
testq %rdi, %rdi
jne LBB53_56
jmp LBB53_70
.p2align 4, 0x90
LBB53_61: ## in Loop: Header=BB53_58 Depth=2
movq 24(%r13), %rax
cmpq 32(%r13), %rax
jae LBB53_64
## %bb.62: ## in Loop: Header=BB53_58 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %eax
jmp LBB53_65
.p2align 4, 0x90
LBB53_63: ## in Loop: Header=BB53_58 Depth=2
leaq (,%r14,4), %rcx
jmp LBB53_66
LBB53_64: ## in Loop: Header=BB53_58 Depth=2
xorl %eax, %eax
LBB53_65: ## in Loop: Header=BB53_58 Depth=2
movb %al, -5168(%rbp,%r15)
movq %r15, %rcx
LBB53_66: ## in Loop: Header=BB53_58 Depth=2
movq 24(%r13), %rax
cmpq 32(%r13), %rax
jae LBB53_68
## %bb.67: ## in Loop: Header=BB53_58 Depth=2
leaq 1(%rax), %rdx
movq %rdx, 24(%r13)
movzbl (%rax), %eax
jmp LBB53_69
.p2align 4, 0x90
LBB53_68: ## in Loop: Header=BB53_58 Depth=2
xorl %eax, %eax
LBB53_69: ## in Loop: Header=BB53_58 Depth=2
movq %rcx, %rdx
orq $1, %rdx
movb %al, -5168(%rbp,%rdx)
LBB53_70: ## in Loop: Header=BB53_58 Depth=2
movq 24(%r13), %rdx
xorl %eax, %eax
cmpq 32(%r13), %rdx
jae LBB53_57
## %bb.71: ## in Loop: Header=BB53_58 Depth=2
leaq 1(%rdx), %rax
movq %rax, 24(%r13)
movzbl (%rdx), %eax
jmp LBB53_57
LBB53_72: ## in Loop: Header=BB53_18 Depth=1
cmpl $1229278788, %eax ## imm = 0x49454E44
je LBB53_119
LBB53_73: ## in Loop: Header=BB53_18 Depth=1
testl $536870912, %eax ## imm = 0x20000000
je LBB53_145
## %bb.74: ## in Loop: Header=BB53_18 Depth=1
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB53_90
## %bb.75: ## in Loop: Header=BB53_18 Depth=1
movslq %r12d, %rsi
movl $1, %edx
callq _fseek
jmp LBB53_17
LBB53_76: ## in Loop: Header=BB53_18 Depth=1
movq -5256(%rbp), %r15 ## 8-byte Reload
xorl %r14d, %r14d
jmp LBB53_17
LBB53_77: ## in Loop: Header=BB53_18 Depth=1
movl 8(%r13), %eax
testb $1, %al
je LBB53_156
## %bb.78: ## in Loop: Header=BB53_18 Depth=1
leal (%rax,%rax), %ecx
cmpl %r12d, %ecx
jne LBB53_148
## %bb.79: ## in Loop: Header=BB53_18 Depth=1
movb $1, %r15b
testl %eax, %eax
jle LBB53_112
## %bb.80: ## in Loop: Header=BB53_18 Depth=1
xorl %ebx, %ebx
jmp LBB53_83
.p2align 4, 0x90
LBB53_81: ## in Loop: Header=BB53_83 Depth=2
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
LBB53_82: ## in Loop: Header=BB53_83 Depth=2
movb %al, -5175(%rbp,%rbx)
incq %rbx
movslq 8(%r13), %rax
cmpq %rax, %rbx
jge LBB53_112
LBB53_83: ## Parent Loop BB53_18 Depth=1
## => This Inner Loop Header: Depth=2
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB53_86
## %bb.84: ## in Loop: Header=BB53_83 Depth=2
callq _fgetc
movq 16(%r13), %rdi
testq %rdi, %rdi
jne LBB53_81
## %bb.85: ## in Loop: Header=BB53_83 Depth=2
movq 24(%r13), %rcx
movq 32(%r13), %rdx
jmp LBB53_88
.p2align 4, 0x90
LBB53_86: ## in Loop: Header=BB53_83 Depth=2
movq 24(%r13), %rcx
movq 32(%r13), %rdx
cmpq %rdx, %rcx
jae LBB53_88
## %bb.87: ## in Loop: Header=BB53_83 Depth=2
incq %rcx
movq %rcx, 24(%r13)
LBB53_88: ## in Loop: Header=BB53_83 Depth=2
xorl %eax, %eax
cmpq %rdx, %rcx
jae LBB53_82
## %bb.89: ## in Loop: Header=BB53_83 Depth=2
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %eax
jmp LBB53_82
LBB53_90: ## in Loop: Header=BB53_18 Depth=1
movslq %r12d, %rax
addq %rax, 24(%r13)
jmp LBB53_17
LBB53_91: ## in Loop: Header=BB53_18 Depth=1
movq 24(%r13), %rsi
movq %r12, %rdx
callq _memcpy
addq %r12, 24(%r13)
movl %r14d, %eax
movq %rax, -5208(%rbp) ## 8-byte Spill
xorl %r14d, %r14d
jmp LBB53_17
LBB53_92: ## in Loop: Header=BB53_18 Depth=1
movq 24(%r13), %rax
cmpq 32(%r13), %rax
jae LBB53_117
## %bb.93: ## in Loop: Header=BB53_18 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %eax
cmpl $8, %eax
jne LBB53_117
LBB53_94: ## in Loop: Header=BB53_18 Depth=1
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB53_97
## %bb.95: ## in Loop: Header=BB53_18 Depth=1
callq _fgetc
movl %eax, %r12d
cmpl $-1, %eax
jne LBB53_99
## %bb.96: ## in Loop: Header=BB53_18 Depth=1
xorl %r12d, %r12d
jmp LBB53_103
LBB53_97: ## in Loop: Header=BB53_18 Depth=1
movq 24(%r13), %rax
xorl %r12d, %r12d
cmpq 32(%r13), %rax
jae LBB53_103
## %bb.98: ## in Loop: Header=BB53_18 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %r12d
LBB53_99: ## in Loop: Header=BB53_18 Depth=1
cmpl $7, %r12d
jge LBB53_152
## %bb.100: ## in Loop: Header=BB53_18 Depth=1
cmpl $3, %r12d
jne LBB53_102
## %bb.101: ## in Loop: Header=BB53_18 Depth=1
movb $3, %al
movl %eax, -5172(%rbp) ## 4-byte Spill
movl $3, %r12d
jmp LBB53_103
LBB53_102: ## in Loop: Header=BB53_18 Depth=1
testb $1, %r12b
jne LBB53_152
LBB53_103: ## in Loop: Header=BB53_18 Depth=1
movq %r13, %rdi
callq _get8
testl %eax, %eax
jne LBB53_149
## %bb.104: ## in Loop: Header=BB53_18 Depth=1
movq %r13, %rdi
callq _get8
testl %eax, %eax
jne LBB53_150
## %bb.105: ## in Loop: Header=BB53_18 Depth=1
movq %r13, %rdi
callq _get8
cmpl $2, %eax
jge LBB53_151
## %bb.106: ## in Loop: Header=BB53_18 Depth=1
movl (%r13), %esi
testl %esi, %esi
je LBB53_118
## %bb.107: ## in Loop: Header=BB53_18 Depth=1
movl 4(%r13), %ecx
testl %ecx, %ecx
je LBB53_118
## %bb.108: ## in Loop: Header=BB53_18 Depth=1
movl %eax, -5264(%rbp) ## 4-byte Spill
cmpb $0, -5172(%rbp) ## 1-byte Folded Reload
je LBB53_110
## %bb.109: ## in Loop: Header=BB53_18 Depth=1
movl $1, 8(%r13)
movl $1073741824, %eax ## imm = 0x40000000
xorl %edx, %edx
divl %esi
shrl $2, %eax
cmpl %ecx, %eax
jae LBB53_17
jmp LBB53_115
LBB53_110: ## in Loop: Header=BB53_18 Depth=1
movl %r12d, %eax
andl $2, %eax
shrl $2, %r12d
andl $1, %r12d
leal (%r12,%rax), %edi
incl %edi
movl %edi, 8(%r13)
movl $1073741824, %eax ## imm = 0x40000000
xorl %edx, %edx
divl %esi
xorl %edx, %edx
divl %edi
cmpl %ecx, %eax
jb LBB53_115
## %bb.111: ## in Loop: Header=BB53_18 Depth=1
cmpl $2, -5192(%rbp) ## 4-byte Folded Reload
je LBB53_157
LBB53_112: ## in Loop: Header=BB53_18 Depth=1
movl $0, -5172(%rbp) ## 4-byte Folded Spill
jmp LBB53_17
LBB53_115:
leaq L_.str.33(%rip), %rax
jmp LBB53_11
LBB53_116:
leaq L_.str.60(%rip), %rax
jmp LBB53_11
LBB53_117:
leaq L_.str.54(%rip), %rax
jmp LBB53_11
LBB53_118:
leaq L_.str.59(%rip), %rax
jmp LBB53_11
LBB53_119:
movq %r15, -5256(%rbp) ## 8-byte Spill
movl $1, %eax
cmpl $0, -5192(%rbp) ## 4-byte Folded Reload
jne LBB53_13
## %bb.120:
movq 40(%r13), %rbx
testq %rbx, %rbx
je LBB53_158
## %bb.121:
movl $16384, %edi ## imm = 0x4000
callq _malloc
testq %rax, %rax
je LBB53_160
## %bb.122:
movq %rbx, -4144(%rbp)
movslq -5208(%rbp), %rcx ## 4-byte Folded Reload
addq %rbx, %rcx
movq %rcx, -4136(%rbp)
leaq -4144(%rbp), %rdi
movq %rax, %rsi
movl $16384, %edx ## imm = 0x4000
movl $1, %ecx
movl $1, %r8d
callq _do_zlib
testl %eax, %eax
je LBB53_159
## %bb.123:
movl -4120(%rbp), %edx
movq -4112(%rbp), %rcx
movq %rcx, 48(%r13)
xorl %eax, %eax
testq %rcx, %rcx
je LBB53_13
## %bb.124:
subl %ecx, %edx
movl %edx, -5212(%rbp) ## 4-byte Spill
movq 40(%r13), %rdi
callq _free
movq $0, 40(%r13)
movl 8(%r13), %eax
leal 1(%rax), %edx
cmpb $0, -5256(%rbp) ## 1-byte Folded Reload
cmovnel %edx, %eax
cmpb $0, -5172(%rbp) ## 1-byte Folded Reload
movl %edx, %ecx
cmovnel %eax, %ecx
movl -5180(%rbp), %esi ## 4-byte Reload
cmpl %esi, %edx
cmovnel %eax, %ecx
cmpl $3, %esi
cmovel %eax, %ecx
movl %ecx, 12(%r13)
movq 48(%r13), %rsi
cmpl $0, -5264(%rbp) ## 4-byte Folded Reload
je LBB53_162
## %bb.125:
movq _stbi_png_partial@GOTPCREL(%rip), %rax
movl (%rax), %edx
movl %edx, -5292(%rbp) ## 4-byte Spill
movl $0, (%rax)
movl %ecx, %ebx
movl (%r13), %r15d
movl 4(%r13), %r14d
movl %r15d, %edi
imull %ecx, %edi
imull %r14d, %edi
movq %rsi, -5288(%rbp) ## 8-byte Spill
callq _malloc
movq -5288(%rbp), %rsi ## 8-byte Reload
movq %rax, -5248(%rbp) ## 8-byte Spill
movl %ebx, -5196(%rbp) ## 4-byte Spill
movslq %ebx, %rax
movq %rax, -5192(%rbp) ## 8-byte Spill
addq %rax, %rax
movq %rax, -5336(%rbp) ## 8-byte Spill
xorl %r8d, %r8d
leaq l___const.create_png_image.xorig(%rip), %r9
leaq l___const.create_png_image.xspc(%rip), %r10
leaq l___const.create_png_image.yorig(%rip), %r11
LBB53_126: ## =>This Loop Header: Depth=1
## Child Loop BB53_132 Depth 2
## Child Loop BB53_136 Depth 3
movslq (%r9,%r8,4), %rax
movslq (%r10,%r8,4), %r12
movq %rax, -5320(%rbp) ## 8-byte Spill
movl %eax, %edi
notl %edi
addl %r15d, %edi
addl %r12d, %edi
movl %edi, %eax
xorl %edx, %edx
divl %r12d
movl %eax, %r15d
movl (%r11,%r8,4), %ecx
leaq l___const.create_png_image.yspc(%rip), %rax
movl (%rax,%r8,4), %ebx
movl %ecx, -5236(%rbp) ## 4-byte Spill
notl %ecx
addl %r14d, %ecx
addl %ebx, %ecx
movl %ecx, %eax
xorl %edx, %edx
divl %ebx
movq %r12, -5312(%rbp) ## 8-byte Spill
cmpl %edi, %r12d
ja LBB53_140
## %bb.127: ## in Loop: Header=BB53_126 Depth=1
cmpl %ecx, %ebx
movl -5196(%rbp), %ecx ## 4-byte Reload
ja LBB53_140
## %bb.128: ## in Loop: Header=BB53_126 Depth=1
movl %eax, %r14d
movl %ebx, -5232(%rbp) ## 4-byte Spill
movq %r8, -5344(%rbp) ## 8-byte Spill
movq %r13, %rdi
movq %rsi, -5288(%rbp) ## 8-byte Spill
movl -5212(%rbp), %edx ## 4-byte Reload
movl %r15d, %r8d
movl %eax, %r9d
callq _create_png_image_raw
testl %eax, %eax
je LBB53_173
## %bb.129: ## in Loop: Header=BB53_126 Depth=1
movl %r14d, -5228(%rbp) ## 4-byte Spill
testl %r14d, %r14d
movq -5192(%rbp), %rcx ## 8-byte Reload
jle LBB53_139
## %bb.130: ## in Loop: Header=BB53_126 Depth=1
movslq %r15d, %rax
movq %rax, -5360(%rbp) ## 8-byte Spill
movl -5228(%rbp), %eax ## 4-byte Reload
movq %rax, -5368(%rbp) ## 8-byte Spill
movl %r15d, %eax
movq %rax, -5352(%rbp) ## 8-byte Spill
## kill: def $eax killed $eax killed $rax def $rax
andl $-2, %eax
movq %rax, -5224(%rbp) ## 8-byte Spill
movq -5320(%rbp), %rdx ## 8-byte Reload
movq -5312(%rbp), %rsi ## 8-byte Reload
leaq (%rsi,%rdx), %rbx
imulq %rcx, %rbx
movl -5196(%rbp), %eax ## 4-byte Reload
movl %eax, %edi
imull -5236(%rbp), %edi ## 4-byte Folded Reload
imull -5232(%rbp), %eax ## 4-byte Folded Reload
movl %eax, -5304(%rbp) ## 4-byte Spill
movq -5336(%rbp), %rax ## 8-byte Reload
imulq %rsi, %rax
movq %rax, -5272(%rbp) ## 8-byte Spill
movq %rcx, %rax
imulq %rdx, %rax
xorl %esi, %esi
movq %rax, -5208(%rbp) ## 8-byte Spill
movl %r15d, -5296(%rbp) ## 4-byte Spill
movq %rbx, -5264(%rbp) ## 8-byte Spill
jmp LBB53_132
LBB53_131: ## in Loop: Header=BB53_132 Depth=2
incq %rsi
addl -5304(%rbp), %edi ## 4-byte Folded Reload
cmpq -5368(%rbp), %rsi ## 8-byte Folded Reload
je LBB53_139
LBB53_132: ## Parent Loop BB53_126 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB53_136 Depth 3
testl %r15d, %r15d
jle LBB53_131
## %bb.133: ## in Loop: Header=BB53_132 Depth=2
movl (%r13), %ebx
movq %rsi, -5328(%rbp) ## 8-byte Spill
movq %rsi, %rcx
imulq -5360(%rbp), %rcx ## 8-byte Folded Reload
movq 56(%r13), %r14
cmpl $1, %r15d
movl %edi, -5240(%rbp) ## 4-byte Spill
movl %ebx, -5300(%rbp) ## 4-byte Spill
jne LBB53_135
## %bb.134: ## in Loop: Header=BB53_132 Depth=2
xorl %r12d, %r12d
jmp LBB53_137
LBB53_135: ## in Loop: Header=BB53_132 Depth=2
movl %ebx, %r15d
imull %edi, %r15d
addq -5248(%rbp), %r15 ## 8-byte Folded Reload
xorl %r12d, %r12d
movq %rcx, -5280(%rbp) ## 8-byte Spill
.p2align 4, 0x90
LBB53_136: ## Parent Loop BB53_126 Depth=1
## Parent Loop BB53_132 Depth=2
## => This Inner Loop Header: Depth=3
leaq (%r15,%rax), %rdi
leaq (%r12,%rcx), %rsi
movq -5192(%rbp), %rbx ## 8-byte Reload
imulq %rbx, %rsi
addq %r14, %rsi
movq %rbx, %rdx
callq _memcpy
movq %r12, %rsi
orq $1, %rsi
movq -5264(%rbp), %rax ## 8-byte Reload
leaq (%r15,%rax), %rdi
addq -5280(%rbp), %rsi ## 8-byte Folded Reload
imulq %rbx, %rsi
addq %r14, %rsi
movq %rbx, %rdx
callq _memcpy
movq -5280(%rbp), %rcx ## 8-byte Reload
movq -5208(%rbp), %rax ## 8-byte Reload
addq $2, %r12
addq -5272(%rbp), %r15 ## 8-byte Folded Reload
cmpq -5224(%rbp), %r12 ## 8-byte Folded Reload
jne LBB53_136
LBB53_137: ## in Loop: Header=BB53_132 Depth=2
testb $1, -5352(%rbp) ## 1-byte Folded Reload
movq -5192(%rbp), %rdx ## 8-byte Reload
movl -5296(%rbp), %r15d ## 4-byte Reload
movl -5240(%rbp), %edi ## 4-byte Reload
movq -5328(%rbp), %rsi ## 8-byte Reload
je LBB53_131
## %bb.138: ## in Loop: Header=BB53_132 Depth=2
movl -5232(%rbp), %eax ## 4-byte Reload
## kill: def $eax killed $eax def $rax
imull %esi, %eax
addl -5236(%rbp), %eax ## 4-byte Folded Reload
imull -5196(%rbp), %eax ## 4-byte Folded Reload
imull -5300(%rbp), %eax ## 4-byte Folded Reload
addq -5248(%rbp), %rax ## 8-byte Folded Reload
movq %r12, %rdi
imulq -5312(%rbp), %rdi ## 8-byte Folded Reload
addq -5320(%rbp), %rdi ## 8-byte Folded Reload
imulq %rdx, %rdi
addq %rax, %rdi
addq %r12, %rcx
imulq %rdx, %rcx
addq %rcx, %r14
movq %r14, %rsi
callq _memcpy
movq -5328(%rbp), %rsi ## 8-byte Reload
movq -5208(%rbp), %rax ## 8-byte Reload
movl -5240(%rbp), %edi ## 4-byte Reload
jmp LBB53_131
LBB53_139: ## in Loop: Header=BB53_126 Depth=1
movq 56(%r13), %rdi
callq _free
imull -5196(%rbp), %r15d ## 4-byte Folded Reload
incl %r15d
movl -5228(%rbp), %eax ## 4-byte Reload
imull %r15d, %eax
cltq
movq -5288(%rbp), %rsi ## 8-byte Reload
addq %rax, %rsi
subl %eax, -5212(%rbp) ## 4-byte Folded Spill
movq -5344(%rbp), %r8 ## 8-byte Reload
leaq l___const.create_png_image.xorig(%rip), %r9
leaq l___const.create_png_image.xspc(%rip), %r10
leaq l___const.create_png_image.yorig(%rip), %r11
LBB53_140: ## in Loop: Header=BB53_126 Depth=1
incq %r8
cmpq $7, %r8
je LBB53_161
## %bb.141: ## in Loop: Header=BB53_126 Depth=1
movl (%r13), %r15d
movl 4(%r13), %r14d
jmp LBB53_126
LBB53_142:
leaq L_.str.52(%rip), %rax
jmp LBB53_11
LBB53_143:
leaq L_.str.61(%rip), %rax
jmp LBB53_11
LBB53_144:
leaq L_.str.65(%rip), %rax
jmp LBB53_11
LBB53_145:
leaq _parse_png_file.invalid_chunk(%rip), %rcx
bswapl %eax
movl %eax, _parse_png_file.invalid_chunk(%rip)
movq %rcx, _failure_reason(%rip)
jmp LBB53_12
LBB53_146:
leaq L_.str.53(%rip), %rax
jmp LBB53_11
LBB53_147:
movzbl %cl, %eax
movl %eax, 8(%r13)
movl $1, %eax
jmp LBB53_13
LBB53_148:
leaq L_.str.63(%rip), %rax
jmp LBB53_11
LBB53_149:
leaq L_.str.56(%rip), %rax
jmp LBB53_11
LBB53_150:
leaq L_.str.57(%rip), %rax
jmp LBB53_11
LBB53_151:
leaq L_.str.58(%rip), %rax
jmp LBB53_11
LBB53_152:
leaq L_.str.55(%rip), %rax
jmp LBB53_11
LBB53_153:
movl $4, 8(%r13)
movl $1, %eax
jmp LBB53_13
LBB53_154:
leaq L_.str.62(%rip), %rax
jmp LBB53_11
LBB53_155:
leaq L_.str.66(%rip), %rax
jmp LBB53_11
LBB53_156:
leaq L_.str.64(%rip), %rax
jmp LBB53_11
LBB53_157:
movl $1, %eax
jmp LBB53_13
LBB53_158:
leaq L_.str.67(%rip), %rax
jmp LBB53_11
LBB53_159:
movq -4112(%rbp), %rdi
callq _free
LBB53_160:
movq $0, 48(%r13)
jmp LBB53_12
LBB53_161:
movq -5248(%rbp), %rax ## 8-byte Reload
movq %rax, 56(%r13)
movq _stbi_png_partial@GOTPCREL(%rip), %rax
movl -5292(%rbp), %ecx ## 4-byte Reload
movl %ecx, (%rax)
movl -5180(%rbp), %edi ## 4-byte Reload
jmp LBB53_163
LBB53_162:
movl (%r13), %r8d
movl 4(%r13), %r9d
movq %r13, %rdi
movl -5212(%rbp), %edx ## 4-byte Reload
callq _create_png_image_raw
testl %eax, %eax
movl -5180(%rbp), %edi ## 4-byte Reload
je LBB53_12
LBB53_163:
cmpb $0, -5256(%rbp) ## 1-byte Folded Reload
je LBB53_197
## %bb.164:
movl 4(%r13), %eax
movl 12(%r13), %edx
imull (%r13), %eax
movq 56(%r13), %rcx
cmpl $2, %edx
je LBB53_174
## %bb.165:
cmpl $4, %edx
jne LBB53_213
## %bb.166:
testl %eax, %eax
je LBB53_197
## %bb.167:
movb -5175(%rbp), %dl
movb -5174(%rbp), %bl
movb -5173(%rbp), %sil
cmpl $1, %eax
jne LBB53_178
LBB53_168:
testb $1, %al
movl -5180(%rbp), %edi ## 4-byte Reload
je LBB53_197
## %bb.169:
cmpb %dl, (%rcx)
jne LBB53_197
## %bb.170:
cmpb %bl, 1(%rcx)
jne LBB53_197
## %bb.171:
cmpb %sil, 2(%rcx)
jne LBB53_197
## %bb.172:
movb $0, 3(%rcx)
jmp LBB53_197
LBB53_173:
movq -5248(%rbp), %rdi ## 8-byte Reload
callq _free
jmp LBB53_12
LBB53_174:
testl %eax, %eax
je LBB53_197
## %bb.175:
movb -5175(%rbp), %dl
leal -1(%rax), %ebx
xorl %r11d, %r11d
cmpl $8, %ebx
jb LBB53_195
## %bb.176:
leaq 1(%rbx), %r11
movzbl %dl, %r8d
cmpl $16, %ebx
jae LBB53_188
## %bb.177:
xorl %r9d, %r9d
jmp LBB53_192
LBB53_178:
movl %eax, %edi
andl $-2, %edi
jmp LBB53_180
LBB53_179: ## in Loop: Header=BB53_180 Depth=1
addq $8, %rcx
addl $-2, %edi
je LBB53_168
LBB53_180: ## =>This Inner Loop Header: Depth=1
cmpb %dl, (%rcx)
jne LBB53_184
## %bb.181: ## in Loop: Header=BB53_180 Depth=1
cmpb %bl, 1(%rcx)
jne LBB53_184
## %bb.182: ## in Loop: Header=BB53_180 Depth=1
cmpb %sil, 2(%rcx)
jne LBB53_184
## %bb.183: ## in Loop: Header=BB53_180 Depth=1
movb $0, 3(%rcx)
LBB53_184: ## in Loop: Header=BB53_180 Depth=1
cmpb %dl, 4(%rcx)
jne LBB53_179
## %bb.185: ## in Loop: Header=BB53_180 Depth=1
cmpb %bl, 5(%rcx)
jne LBB53_179
## %bb.186: ## in Loop: Header=BB53_180 Depth=1
cmpb %sil, 6(%rcx)
jne LBB53_179
## %bb.187: ## in Loop: Header=BB53_180 Depth=1
movb $0, 7(%rcx)
jmp LBB53_179
LBB53_188:
movl %r11d, %esi
andl $15, %esi
testq %rsi, %rsi
movl $16, %r10d
cmovneq %rsi, %r10
movq %r11, %r9
subq %r10, %r9
movd %r8d, %xmm0
pxor %xmm1, %xmm1
pshufb %xmm1, %xmm0
movq %rbx, %rdi
subq %r10, %rdi
movq $-1, %rsi
movdqa LCPI53_0(%rip), %xmm1 ## xmm1 = [255,255,255,255,255,255,255,255]
pcmpeqd %xmm2, %xmm2
LBB53_189: ## =>This Inner Loop Header: Depth=1
movdqu 2(%rcx,%rsi,2), %xmm3
movdqu 18(%rcx,%rsi,2), %xmm4
pand %xmm1, %xmm4
pand %xmm1, %xmm3
packuswb %xmm4, %xmm3
pcmpeqb %xmm0, %xmm3
pxor %xmm2, %xmm3
pextrb $0, %xmm3, 3(%rcx,%rsi,2)
pextrb $1, %xmm3, 5(%rcx,%rsi,2)
pextrb $2, %xmm3, 7(%rcx,%rsi,2)
pextrb $3, %xmm3, 9(%rcx,%rsi,2)
pextrb $4, %xmm3, 11(%rcx,%rsi,2)
pextrb $5, %xmm3, 13(%rcx,%rsi,2)
pextrb $6, %xmm3, 15(%rcx,%rsi,2)
pextrb $7, %xmm3, 17(%rcx,%rsi,2)
pextrb $8, %xmm3, 19(%rcx,%rsi,2)
pextrb $9, %xmm3, 21(%rcx,%rsi,2)
pextrb $10, %xmm3, 23(%rcx,%rsi,2)
pextrb $11, %xmm3, 25(%rcx,%rsi,2)
pextrb $12, %xmm3, 27(%rcx,%rsi,2)
pextrb $13, %xmm3, 29(%rcx,%rsi,2)
pextrb $14, %xmm3, 31(%rcx,%rsi,2)
pextrb $15, %xmm3, 33(%rcx,%rsi,2)
addq $16, %rsi
cmpq %rsi, %rdi
jne LBB53_189
## %bb.190:
cmpl $8, %r10d
ja LBB53_192
## %bb.191:
leaq (%rcx,%r9,2), %rcx
movl %r9d, %r11d
jmp LBB53_195
LBB53_192:
movl %r11d, %esi
andl $7, %esi
testq %rsi, %rsi
movl $8, %edi
cmovneq %rsi, %rdi
subq %rdi, %r11
leaq (%rcx,%r11,2), %rsi
movd %r8d, %xmm0
punpcklbw %xmm0, %xmm0 ## xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw $0, %xmm0, %xmm0 ## xmm0 = xmm0[0,0,0,0,4,5,6,7]
leaq (%rcx,%r9,2), %rcx
addq $15, %rcx
subq %rdi, %rbx
subq %r9, %rbx
movq $-1, %rdi
movdqa LCPI53_1(%rip), %xmm1 ## xmm1 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
pcmpeqd %xmm2, %xmm2
LBB53_193: ## =>This Inner Loop Header: Depth=1
movdqu -13(%rcx,%rdi,2), %xmm3
pshufb %xmm1, %xmm3
pcmpeqb %xmm0, %xmm3
pxor %xmm2, %xmm3
pextrb $0, %xmm3, -12(%rcx,%rdi,2)
pextrb $1, %xmm3, -10(%rcx,%rdi,2)
pextrb $2, %xmm3, -8(%rcx,%rdi,2)
pextrb $3, %xmm3, -6(%rcx,%rdi,2)
pextrb $4, %xmm3, -4(%rcx,%rdi,2)
pextrb $5, %xmm3, -2(%rcx,%rdi,2)
pextrb $6, %xmm3, (%rcx,%rdi,2)
pextrb $7, %xmm3, 2(%rcx,%rdi,2)
addq $8, %rdi
cmpq %rdi, %rbx
jne LBB53_193
## %bb.194:
movq %rsi, %rcx
LBB53_195:
subl %r11d, %eax
xorl %esi, %esi
movl -5180(%rbp), %edi ## 4-byte Reload
LBB53_196: ## =>This Inner Loop Header: Depth=1
cmpb %dl, (%rcx,%rsi,2)
setne %bl
negb %bl
movb %bl, 1(%rcx,%rsi,2)
incq %rsi
cmpl %esi, %eax
jne LBB53_196
LBB53_197:
movl -5172(%rbp), %eax ## 4-byte Reload
testb %al, %al
je LBB53_212
## %bb.198:
movzbl %al, %ebx
movl %ebx, 8(%r13)
cmpl $3, %edi
cmovgel %edi, %ebx
movl %ebx, 12(%r13)
movl 4(%r13), %r12d
imull (%r13), %r12d
movq 56(%r13), %r14
movl %r12d, %edi
imull %ebx, %edi
callq _malloc
testq %rax, %rax
je LBB53_203
## %bb.199:
movq %rax, %r15
cmpl $3, %ebx
jne LBB53_204
## %bb.200:
testl %r12d, %r12d
je LBB53_211
## %bb.201:
movl %r12d, %eax
cmpl $1, %r12d
jne LBB53_207
## %bb.202:
xorl %edx, %edx
movq %r15, %rcx
jmp LBB53_209
LBB53_203:
leaq L_.str.5(%rip), %rax
jmp LBB53_11
LBB53_204:
testl %r12d, %r12d
je LBB53_211
## %bb.205:
movl %r12d, %eax
xorl %ecx, %ecx
LBB53_206: ## =>This Inner Loop Header: Depth=1
movzbl (%r14,%rcx), %edx
movzbl -5168(%rbp,%rdx,4), %ebx
movb %bl, (%r15,%rcx,4)
movzbl -5167(%rbp,%rdx,4), %ebx
movb %bl, 1(%r15,%rcx,4)
movzbl -5166(%rbp,%rdx,4), %ebx
movb %bl, 2(%r15,%rcx,4)
movzbl -5165(%rbp,%rdx,4), %edx
movb %dl, 3(%r15,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne LBB53_206
jmp LBB53_211
LBB53_207:
movl %eax, %esi
andl $-2, %esi
xorl %edx, %edx
movq %r15, %rcx
LBB53_208: ## =>This Inner Loop Header: Depth=1
movzbl (%r14,%rdx), %edi
movzbl -5168(%rbp,%rdi,4), %ebx
movb %bl, (%rcx)
movzbl -5167(%rbp,%rdi,4), %ebx
movb %bl, 1(%rcx)
movzbl -5166(%rbp,%rdi,4), %ebx
movb %bl, 2(%rcx)
movzbl 1(%r14,%rdx), %edi
movzbl -5168(%rbp,%rdi,4), %ebx
movb %bl, 3(%rcx)
movzbl -5167(%rbp,%rdi,4), %ebx
movb %bl, 4(%rcx)
movzbl -5166(%rbp,%rdi,4), %ebx
movb %bl, 5(%rcx)
addq $6, %rcx
addq $2, %rdx
cmpq %rdx, %rsi
jne LBB53_208
LBB53_209:
testb $1, %al
je LBB53_211
## %bb.210:
movzbl (%r14,%rdx), %eax
movb -5168(%rbp,%rax,4), %dl
movb %dl, (%rcx)
movb -5167(%rbp,%rax,4), %dl
movb %dl, 1(%rcx)
movb -5166(%rbp,%rax,4), %al
movb %al, 2(%rcx)
LBB53_211:
movq %r14, %rdi
callq _free
movq %r15, 56(%r13)
LBB53_212:
movq 48(%r13), %rdi
callq _free
movq $0, 48(%r13)
movl $1, %eax
jmp LBB53_13
LBB53_213:
callq _parse_png_file.cold.1
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB53_215
## %bb.214:
xorl %eax, %eax
jmp LBB53_14
LBB53_215:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.globl _stbi_png_info ## -- Begin function stbi_png_info
.p2align 4, 0x90
_stbi_png_info: ## @stbi_png_info
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $72, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, %r14
movq %rdx, %r15
movq %rsi, %r12
leaq L_.str(%rip), %rsi
callq _fopen
testq %rax, %rax
je LBB54_1
## %bb.2:
movq %rax, %rbx
movq %rax, -88(%rbp)
leaq -104(%rbp), %rdi
xorl %r13d, %r13d
movl $2, %esi
xorl %edx, %edx
callq _parse_png_file
testl %eax, %eax
je LBB54_9
## %bb.3:
testq %r12, %r12
je LBB54_5
## %bb.4:
movl -104(%rbp), %eax
movl %eax, (%r12)
LBB54_5:
testq %r15, %r15
je LBB54_7
## %bb.6:
movl -100(%rbp), %eax
movl %eax, (%r15)
LBB54_7:
movl $1, %r13d
testq %r14, %r14
je LBB54_9
## %bb.8:
movl -96(%rbp), %eax
movl %eax, (%r14)
LBB54_9:
movq %rbx, %rdi
callq _fclose
jmp LBB54_10
LBB54_1:
xorl %r13d, %r13d
LBB54_10:
movl %r13d, %eax
addq $72, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function bmp_test
_bmp_test: ## @bmp_test
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %r14
movq 16(%rdi), %rdi
testq %rdi, %rdi
je LBB55_2
## %bb.1:
callq _fgetc
cmpl $-1, %eax
jne LBB55_4
LBB55_26:
xorl %ebx, %ebx
jmp LBB55_28
LBB55_2:
movq 24(%r14), %rax
xorl %ebx, %ebx
cmpq 32(%r14), %rax
jae LBB55_28
## %bb.3:
leaq 1(%rax), %rcx
movq %rcx, 24(%r14)
movzbl (%rax), %eax
LBB55_4:
xorl %ebx, %ebx
cmpl $66, %eax
jne LBB55_28
## %bb.5:
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB55_7
## %bb.6:
callq _fgetc
cmpl $-1, %eax
je LBB55_28
## %bb.9:
cmpl $77, %eax
je LBB55_10
jmp LBB55_28
LBB55_7:
movq 24(%r14), %rax
cmpq 32(%r14), %rax
jae LBB55_28
## %bb.8:
leaq 1(%rax), %rcx
movq %rcx, 24(%r14)
movzbl (%rax), %eax
cmpl $77, %eax
jne LBB55_28
LBB55_10:
movq %r14, %rdi
callq _get32le
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB55_11
## %bb.13:
callq _fgetc
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB55_14
## %bb.17:
callq _fgetc
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB55_18
## %bb.20:
callq _fgetc
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB55_21
## %bb.29:
callq _fgetc
jmp LBB55_24
LBB55_11:
movq 24(%r14), %rax
movq 32(%r14), %rcx
cmpq %rcx, %rax
jae LBB55_15
## %bb.12:
incq %rax
movq %rax, 24(%r14)
jmp LBB55_15
LBB55_14:
movq 24(%r14), %rax
movq 32(%r14), %rcx
LBB55_15:
cmpq %rcx, %rax
jae LBB55_18
## %bb.16:
incq %rax
movq %rax, 24(%r14)
LBB55_18:
movq 24(%r14), %rax
movq 32(%r14), %rcx
cmpq %rcx, %rax
jae LBB55_22
## %bb.19:
incq %rax
movq %rax, 24(%r14)
LBB55_22:
cmpq %rcx, %rax
jae LBB55_24
LBB55_23:
incq %rax
movq %rax, 24(%r14)
LBB55_24:
movq %r14, %rdi
callq _get32le
movq %r14, %rdi
callq _get32le
movl $1, %ebx
cmpl $56, %eax
ja LBB55_25
## %bb.27:
movl %eax, %ecx
movabsq $72058693549559808, %rdx ## imm = 0x100010000001000
btq %rcx, %rdx
jb LBB55_28
LBB55_25:
cmpl $108, %eax
jne LBB55_26
LBB55_28:
movl %ebx, %eax
popq %rbx
popq %r14
popq %rbp
retq
LBB55_21:
movq 24(%r14), %rax
movq 32(%r14), %rcx
cmpq %rcx, %rax
jb LBB55_23
jmp LBB55_24
.cfi_endproc
## -- End function
.globl _stbi_bmp_load ## -- Begin function stbi_bmp_load
.p2align 4, 0x90
_stbi_bmp_load: ## @stbi_bmp_load
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r14d
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, %r13
leaq L_.str(%rip), %rsi
callq _fopen
testq %rax, %rax
je LBB56_1
## %bb.2:
movq %rax, %rbx
movq %rax, -64(%rbp)
leaq -80(%rbp), %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r15, %rcx
movl %r14d, %r8d
callq _bmp_load
movq %rax, %r14
movq %rbx, %rdi
callq _fclose
jmp LBB56_3
LBB56_1:
xorl %r14d, %r14d
LBB56_3:
movq %r14, %rax
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function bmp_load
LCPI57_0:
.long 0 ## 0x0
.long 1 ## 0x1
.long 2 ## 0x2
.long 3 ## 0x3
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_bmp_load: ## @bmp_load
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $1256, %rsp ## imm = 0x4E8
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %ebx
movq %rcx, %r12
movq %rdx, %r14
movq %rsi, %r13
movq %rdi, %r15
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
movq 16(%rdi), %rdi
testq %rdi, %rdi
je LBB57_3
## %bb.1:
callq _fgetc
cmpl $-1, %eax
je LBB57_15
## %bb.2:
cmpl $66, %eax
je LBB57_5
jmp LBB57_15
LBB57_3:
movq 24(%r15), %rax
cmpq 32(%r15), %rax
jae LBB57_15
## %bb.4:
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %eax
cmpl $66, %eax
jne LBB57_15
LBB57_5:
leaq 16(%r15), %rax
movq %rax, -1080(%rbp) ## 8-byte Spill
movq (%rax), %rdi
testq %rdi, %rdi
je LBB57_8
## %bb.6:
callq _fgetc
cmpl $-1, %eax
je LBB57_15
## %bb.7:
cmpl $77, %eax
jne LBB57_15
jmp LBB57_10
LBB57_8:
movq 24(%r15), %rax
cmpq 32(%r15), %rax
jae LBB57_15
## %bb.9:
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %eax
cmpl $77, %eax
jne LBB57_15
LBB57_10:
movq %r15, %rdi
callq _get32le
movq 16(%r15), %rdi
testq %rdi, %rdi
movl %ebx, -1212(%rbp) ## 4-byte Spill
je LBB57_21
## %bb.11:
callq _fgetc
movq -1080(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB57_23
## %bb.12:
callq _fgetc
movq -1080(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB57_26
## %bb.13:
callq _fgetc
movq -1080(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB57_77
## %bb.14:
callq _fgetc
jmp LBB57_30
LBB57_15:
leaq L_.str.74(%rip), %rax
LBB57_16:
movq %rax, _failure_reason(%rip)
LBB57_17:
xorl %edi, %edi
LBB57_18:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB57_309
## %bb.19:
movq %rdi, %rax
LBB57_20:
addq $1256, %rsp ## imm = 0x4E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB57_21:
movq 24(%r15), %rax
movq 32(%r15), %rcx
cmpq %rcx, %rax
jae LBB57_24
## %bb.22:
incq %rax
movq %rax, 24(%r15)
jmp LBB57_24
LBB57_23:
movq 24(%r15), %rax
movq 32(%r15), %rcx
LBB57_24:
cmpq %rcx, %rax
jae LBB57_26
## %bb.25:
incq %rax
movq %rax, 24(%r15)
LBB57_26:
movq 24(%r15), %rax
movq 32(%r15), %rcx
cmpq %rcx, %rax
jae LBB57_28
## %bb.27:
incq %rax
movq %rax, 24(%r15)
LBB57_28:
cmpq %rcx, %rax
jae LBB57_30
LBB57_29:
incq %rax
movq %rax, 24(%r15)
LBB57_30:
movq %r15, %rdi
callq _get32le
## kill: def $eax killed $eax def $rax
movq %rax, -1112(%rbp) ## 8-byte Spill
movq %r15, %rdi
callq _get32le
cmpl $56, %eax
ja LBB57_32
## %bb.31:
movl %eax, %edx
movabsq $72058693549559808, %rcx ## imm = 0x100010000001000
btq %rdx, %rcx
jb LBB57_33
LBB57_32:
cmpl $108, %eax
jne LBB57_40
LBB57_33:
leaq L_.str.76(%rip), %rcx
movq %rcx, _failure_reason(%rip)
cmpl $12, %eax
movl %eax, -1136(%rbp) ## 4-byte Spill
movq %r14, -1288(%rbp) ## 8-byte Spill
movq %r13, -1280(%rbp) ## 8-byte Spill
jne LBB57_39
## %bb.34:
movq -1080(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB57_41
## %bb.35:
callq _fgetc
movl %eax, %r14d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r14d
movq -1080(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB57_43
## %bb.36:
callq _fgetc
movl %eax, %ecx
shll $8, %ecx
cmpl $-1, %eax
cmovel %ebx, %ecx
movq 16(%r15), %rdi
addl %r14d, %ecx
movl %ecx, (%r15)
testq %rdi, %rdi
je LBB57_47
## %bb.37:
callq _fgetc
movl %eax, %r14d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r14d
movq -1080(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB57_88
## %bb.38:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %ebx
jmp LBB57_51
LBB57_39:
movq %r15, %rdi
callq _get32le
movl %eax, (%r15)
movq %r15, %rdi
callq _get32le
movl %eax, %ebx
jmp LBB57_52
LBB57_40:
leaq L_.str.75(%rip), %rax
jmp LBB57_16
LBB57_41:
movq 24(%r15), %rax
movq 32(%r15), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB57_44
## %bb.42:
leaq 1(%rax), %rdx
movq %rdx, 24(%r15)
movzbl (%rax), %r14d
movq %rdx, %rax
jmp LBB57_44
LBB57_43:
movq 24(%r15), %rax
movq 32(%r15), %rcx
LBB57_44:
xorl %edx, %edx
cmpq %rcx, %rax
jae LBB57_46
## %bb.45:
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %edx
LBB57_46:
shll $8, %edx
addl %r14d, %edx
movl %edx, (%r15)
LBB57_47:
movq 24(%r15), %rax
movq 32(%r15), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB57_49
## %bb.48:
leaq 1(%rax), %rdx
movq %rdx, 24(%r15)
movzbl (%rax), %r14d
movq %rdx, %rax
LBB57_49:
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB57_51
## %bb.50:
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %ebx
LBB57_51:
shll $8, %ebx
addl %r14d, %ebx
LBB57_52:
movl %ebx, 4(%r15)
movq 16(%r15), %rdi
testq %rdi, %rdi
je LBB57_55
## %bb.53:
callq _fgetc
movl %eax, %r14d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r14d
movq -1080(%rbp), %rdx ## 8-byte Reload
movq (%rdx), %rdi
testq %rdi, %rdi
je LBB57_58
## %bb.54:
callq _fgetc
movq -1080(%rbp), %rdx ## 8-byte Reload
cmpl $-1, %eax
cmovnel %eax, %ebx
jmp LBB57_61
LBB57_55:
movq 24(%r15), %rax
movq 32(%r15), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB57_57
## %bb.56:
leaq 1(%rax), %rdx
movq %rdx, 24(%r15)
movzbl (%rax), %r14d
movq %rdx, %rax
LBB57_57:
movq -1080(%rbp), %rdx ## 8-byte Reload
jmp LBB57_59
LBB57_58:
movq 24(%r15), %rax
movq 32(%r15), %rcx
LBB57_59:
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB57_61
## %bb.60:
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %ebx
LBB57_61:
shll $8, %ebx
addl %r14d, %ebx
cmpl $1, %ebx
jne LBB57_17
## %bb.62:
movq (%rdx), %rdi
testq %rdi, %rdi
je LBB57_65
## %bb.63:
callq _fgetc
movl %eax, %r14d
xorl %eax, %eax
cmpl $-1, %r14d
cmovel %eax, %r14d
movq -1080(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB57_67
## %bb.64:
callq _fgetc
xorl %edx, %edx
cmpl $-1, %eax
cmovnel %eax, %edx
jmp LBB57_70
LBB57_65:
movq 24(%r15), %rax
movq 32(%r15), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB57_68
## %bb.66:
leaq 1(%rax), %rdx
movq %rdx, 24(%r15)
movzbl (%rax), %r14d
movq %rdx, %rax
jmp LBB57_68
LBB57_67:
movq 24(%r15), %rax
movq 32(%r15), %rcx
LBB57_68:
xorl %edx, %edx
cmpq %rcx, %rax
jae LBB57_70
## %bb.69:
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %edx
LBB57_70:
shll $8, %edx
addl %r14d, %edx
cmpl $1, %edx
jne LBB57_72
## %bb.71:
leaq L_.str.77(%rip), %rax
jmp LBB57_16
LBB57_72:
movl 4(%r15), %r13d
movl %r13d, %eax
negl %eax
cmovsl %r13d, %eax
movl %eax, 4(%r15)
cmpl $12, -1136(%rbp) ## 4-byte Folded Reload
movl %edx, -1104(%rbp) ## 4-byte Spill
jne LBB57_75
## %bb.73:
movq %r12, -1264(%rbp) ## 8-byte Spill
xorl %ebx, %ebx
cmpl $23, %edx
jg LBB57_78
## %bb.74:
movq -1112(%rbp), %rax ## 8-byte Reload
addl $-38, %eax
cltq
imulq $1431655766, %rax, %r12 ## imm = 0x55555556
movq %r12, %rax
shrq $63, %rax
shrq $32, %r12
addl %eax, %r12d
xorl %ebx, %ebx
jmp LBB57_79
LBB57_75:
movq %r15, %rdi
callq _get32le
movl %eax, %r14d
leal -1(%r14), %eax
cmpl $1, %eax
ja LBB57_80
## %bb.76:
leaq L_.str.78(%rip), %rax
jmp LBB57_16
LBB57_77:
movq 24(%r15), %rax
movq 32(%r15), %rcx
cmpq %rcx, %rax
jb LBB57_29
jmp LBB57_30
LBB57_78:
xorl %r12d, %r12d
LBB57_79:
movl $0, -1152(%rbp) ## 4-byte Folded Spill
xorl %r14d, %r14d
movl $0, -1120(%rbp) ## 4-byte Folded Spill
jmp LBB57_94
LBB57_80:
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movl -1136(%rbp), %ecx ## 4-byte Reload
movl %ecx, %eax
andl $-17, %eax
cmpl $40, %eax
movq %r12, -1264(%rbp) ## 8-byte Spill
jne LBB57_89
## %bb.81:
cmpl $56, %ecx
jne LBB57_83
## %bb.82:
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movl -1136(%rbp), %ecx ## 4-byte Reload
LBB57_83:
movl -1104(%rbp), %eax ## 4-byte Reload
cmpl $32, %eax
je LBB57_85
## %bb.84:
xorl %ebx, %ebx
movl $0, -1152(%rbp) ## 4-byte Folded Spill
movl $0, -1144(%rbp) ## 4-byte Folded Spill
movl $0, -1120(%rbp) ## 4-byte Folded Spill
cmpl $16, %eax
jne LBB57_91
LBB57_85:
xorl %r12d, %r12d
cmpl $3, %r14d
je LBB57_303
## %bb.86:
testl %r14d, %r14d
jne LBB57_17
## %bb.87:
cmpl $32, %eax
movl $255, %eax
movl $31, %ecx
cmovel %eax, %ecx
movl %ecx, -1152(%rbp) ## 4-byte Spill
movl $65280, %eax ## imm = 0xFF00
movl $992, %r14d ## imm = 0x3E0
cmovel %eax, %r14d
movl $16711680, %eax ## imm = 0xFF0000
movl $31744, %ecx ## imm = 0x7C00
cmovel %eax, %ecx
movl %ecx, -1120(%rbp) ## 4-byte Spill
movl $0, %ebx
setne %bl
shll $24, %ebx
addl $-16777216, %ebx ## imm = 0xFF000000
xorl %r12d, %r12d
jmp LBB57_94
LBB57_88:
movq 24(%r15), %rax
movq 32(%r15), %rcx
jmp LBB57_49
LBB57_89:
cmpl $108, %ecx
jne LBB57_307
## %bb.90:
movq %r15, %rdi
callq _get32le
movl %eax, -1120(%rbp) ## 4-byte Spill
movq %r15, %rdi
callq _get32le
movl %eax, -1144(%rbp) ## 4-byte Spill
movq %r15, %rdi
callq _get32le
movl %eax, -1152(%rbp) ## 4-byte Spill
movq %r15, %rdi
callq _get32le
movl %eax, %ebx
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movq %r15, %rdi
callq _get32le
movl -1104(%rbp), %eax ## 4-byte Reload
movl -1136(%rbp), %ecx ## 4-byte Reload
LBB57_91:
xorl %r12d, %r12d
cmpl $15, %eax
jg LBB57_93
## %bb.92:
movq -1112(%rbp), %r12 ## 8-byte Reload
## kill: def $r12d killed $r12d killed $r12 def $r12
subl %ecx, %r12d
addl $-14, %r12d
sarl $2, %r12d
LBB57_93:
movl -1144(%rbp), %r14d ## 4-byte Reload
LBB57_94:
movl %ebx, -1192(%rbp) ## 4-byte Spill
cmpl $1, %ebx
movl $4, %ecx
sbbl $0, %ecx
movl %ecx, 8(%r15)
movl -1212(%rbp), %eax ## 4-byte Reload
cmpl $3, %eax
cmovgel %eax, %ecx
movl (%r15), %edi
movl %ecx, -1092(%rbp) ## 4-byte Spill
imull %ecx, %edi
imull 4(%r15), %edi
callq _malloc
testq %rax, %rax
je LBB57_120
## %bb.95:
movq %rax, %rdx
movl -1104(%rbp), %r8d ## 4-byte Reload
cmpl $15, %r8d
movq %rax, -1088(%rbp) ## 8-byte Spill
jg LBB57_121
## %bb.96:
testl %r12d, %r12d
movq -1080(%rbp), %rbx ## 8-byte Reload
je LBB57_123
## %bb.97:
cmpl $256, %r12d ## imm = 0x100
jg LBB57_123
## %bb.98:
movl %r13d, -1156(%rbp) ## 4-byte Spill
testl %r12d, %r12d
jle LBB57_124
## %bb.99:
movl %r12d, %r13d
xorl %r14d, %r14d
jmp LBB57_102
LBB57_100: ## in Loop: Header=BB57_102 Depth=1
callq _fgetc
movq -1088(%rbp), %rdx ## 8-byte Reload
movl -1104(%rbp), %r8d ## 4-byte Reload
movq -1080(%rbp), %rbx ## 8-byte Reload
LBB57_101: ## in Loop: Header=BB57_102 Depth=1
movb $-1, -1069(%rbp,%r14,4)
incq %r14
cmpq %r14, %r13
je LBB57_124
LBB57_102: ## =>This Inner Loop Header: Depth=1
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB57_106
## %bb.103: ## in Loop: Header=BB57_102 Depth=1
callq _fgetc
movq -1080(%rbp), %rbx ## 8-byte Reload
cmpl $-1, %eax
movq (%rbx), %rdi
movl $0, %ecx
cmovel %ecx, %eax
movb %al, -1070(%rbp,%r14,4)
testq %rdi, %rdi
je LBB57_110
## %bb.104: ## in Loop: Header=BB57_102 Depth=1
callq _fgetc
movq -1080(%rbp), %rbx ## 8-byte Reload
cmpl $-1, %eax
movq (%rbx), %rdi
movl $0, %ecx
cmovel %ecx, %eax
movb %al, -1071(%rbp,%r14,4)
testq %rdi, %rdi
movl -1104(%rbp), %r8d ## 4-byte Reload
movq -1088(%rbp), %rdx ## 8-byte Reload
je LBB57_114
## %bb.105: ## in Loop: Header=BB57_102 Depth=1
callq _fgetc
movq -1088(%rbp), %rdx ## 8-byte Reload
movl -1104(%rbp), %r8d ## 4-byte Reload
movq -1080(%rbp), %rbx ## 8-byte Reload
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
jmp LBB57_116
LBB57_106: ## in Loop: Header=BB57_102 Depth=1
movq 24(%r15), %rax
cmpq 32(%r15), %rax
jae LBB57_108
## %bb.107: ## in Loop: Header=BB57_102 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %eax
jmp LBB57_109
LBB57_108: ## in Loop: Header=BB57_102 Depth=1
xorl %eax, %eax
LBB57_109: ## in Loop: Header=BB57_102 Depth=1
movb %al, -1070(%rbp,%r14,4)
LBB57_110: ## in Loop: Header=BB57_102 Depth=1
movq 24(%r15), %rax
cmpq 32(%r15), %rax
jae LBB57_112
## %bb.111: ## in Loop: Header=BB57_102 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %eax
jmp LBB57_113
LBB57_112: ## in Loop: Header=BB57_102 Depth=1
xorl %eax, %eax
LBB57_113: ## in Loop: Header=BB57_102 Depth=1
movl -1104(%rbp), %r8d ## 4-byte Reload
movq -1088(%rbp), %rdx ## 8-byte Reload
movb %al, -1071(%rbp,%r14,4)
LBB57_114: ## in Loop: Header=BB57_102 Depth=1
movq 24(%r15), %rcx
xorl %eax, %eax
cmpq 32(%r15), %rcx
jae LBB57_116
## %bb.115: ## in Loop: Header=BB57_102 Depth=1
leaq 1(%rcx), %rax
movq %rax, 24(%r15)
movzbl (%rcx), %eax
LBB57_116: ## in Loop: Header=BB57_102 Depth=1
movb %al, -1072(%rbp,%r14,4)
cmpl $12, -1136(%rbp) ## 4-byte Folded Reload
je LBB57_101
## %bb.117: ## in Loop: Header=BB57_102 Depth=1
movq (%rbx), %rdi
testq %rdi, %rdi
jne LBB57_100
## %bb.118: ## in Loop: Header=BB57_102 Depth=1
movq 24(%r15), %rax
cmpq 32(%r15), %rax
jae LBB57_101
## %bb.119: ## in Loop: Header=BB57_102 Depth=1
incq %rax
movq %rax, 24(%r15)
jmp LBB57_101
LBB57_120:
leaq L_.str.5(%rip), %rax
jmp LBB57_16
LBB57_121:
movq -1112(%rbp), %rcx ## 8-byte Reload
subl -1136(%rbp), %ecx ## 4-byte Folded Reload
addl $-14, %ecx
movq -1080(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
movslq %ecx, %rsi
testq %rdi, %rdi
je LBB57_126
## %bb.122:
movl $1, %edx
callq _fseek
movl -1104(%rbp), %r8d ## 4-byte Reload
jmp LBB57_127
LBB57_123:
movq %rdx, %rdi
callq _free
leaq L_.str.80(%rip), %rax
jmp LBB57_16
LBB57_124:
xorl %eax, %eax
movl -1136(%rbp), %esi ## 4-byte Reload
cmpl $12, %esi
sete %al
movq -1112(%rbp), %rcx ## 8-byte Reload
subl %esi, %ecx
orl $-4, %eax
imull %eax, %r12d
leal (%rcx,%r12), %eax
addl $-14, %eax
movq (%rbx), %rdi
movslq %eax, %rsi
testq %rdi, %rdi
je LBB57_240
## %bb.125:
movl $1, %edx
callq _fseek
movq -1088(%rbp), %rdx ## 8-byte Reload
movl -1104(%rbp), %r8d ## 4-byte Reload
movq -1080(%rbp), %rbx ## 8-byte Reload
jmp LBB57_241
LBB57_126:
addq %rsi, 24(%r15)
LBB57_127:
movl -1192(%rbp), %eax ## 4-byte Reload
cmpl $32, %r8d
movl %r14d, -1144(%rbp) ## 4-byte Spill
je LBB57_131
## %bb.128:
cmpl $24, %r8d
movq -1080(%rbp), %r10 ## 8-byte Reload
je LBB57_141
## %bb.129:
movl %r8d, %edx
xorl %r14d, %r14d
cmpl $16, %r8d
movl -1152(%rbp), %ecx ## 4-byte Reload
jne LBB57_136
## %bb.130:
movl (%r15), %r14d
addl %r14d, %r14d
andl $2, %r14d
jmp LBB57_136
LBB57_131:
xorl %r14d, %r14d
movl -1152(%rbp), %ecx ## 4-byte Reload
cmpl $255, %ecx
movq -1080(%rbp), %r10 ## 8-byte Reload
jne LBB57_136
## %bb.132:
cmpl $65280, -1144(%rbp) ## 4-byte Folded Reload
## imm = 0xFF00
jne LBB57_136
## %bb.133:
cmpl $-16777216, -1120(%rbp) ## 4-byte Folded Reload
## imm = 0xFF000000
jne LBB57_136
## %bb.134:
cmpl $-16777216, %eax ## imm = 0xFF000000
jne LBB57_136
## %bb.135:
movl %r13d, -1156(%rbp) ## 4-byte Spill
movb $1, %al
movl %eax, -1176(%rbp) ## 4-byte Spill
jmp LBB57_142
LBB57_136:
cmpl $0, -1120(%rbp) ## 4-byte Folded Reload
je LBB57_140
## %bb.137:
cmpl $0, -1144(%rbp) ## 4-byte Folded Reload
je LBB57_140
## %bb.138:
testl %ecx, %ecx
je LBB57_140
## %bb.139:
movl %r13d, -1156(%rbp) ## 4-byte Spill
movl -1120(%rbp), %ebx ## 4-byte Reload
movl %ebx, %edi
movl %ecx, %r13d
callq _high_bit
addl $-7, %eax
movl %eax, -1184(%rbp) ## 4-byte Spill
movl %ebx, %edi
callq _bitcount
movl %eax, %r12d
movl -1144(%rbp), %edi ## 4-byte Reload
callq _high_bit
addl $-7, %eax
movl %eax, -1168(%rbp) ## 4-byte Spill
movl %r13d, %edi
callq _high_bit
addl $-7, %eax
movl %eax, -1164(%rbp) ## 4-byte Spill
movl -1192(%rbp), %edi ## 4-byte Reload
callq _high_bit
movq -1080(%rbp), %r10 ## 8-byte Reload
addl $-7, %eax
movl %eax, -1160(%rbp) ## 4-byte Spill
movb $1, %al
movl %eax, -1172(%rbp) ## 4-byte Spill
movl $0, -1176(%rbp) ## 4-byte Folded Spill
jmp LBB57_143
LBB57_140:
leaq L_.str.82(%rip), %rax
jmp LBB57_16
LBB57_141:
movl %r13d, -1156(%rbp) ## 4-byte Spill
movl (%r15), %r14d
andl $3, %r14d
movl $0, -1176(%rbp) ## 4-byte Folded Spill
LBB57_142:
movl $0, -1172(%rbp) ## 4-byte Folded Spill
movl $0, -1184(%rbp) ## 4-byte Folded Spill
movl $0, -1168(%rbp) ## 4-byte Folded Spill
movl $0, -1164(%rbp) ## 4-byte Folded Spill
movl $0, -1160(%rbp) ## 4-byte Folded Spill
xorl %r12d, %r12d
LBB57_143:
movl 4(%r15), %edx
testl %edx, %edx
jle LBB57_270
## %bb.144:
movl -1184(%rbp), %eax ## 4-byte Reload
negl %eax
movl %eax, -1252(%rbp) ## 4-byte Spill
movl -1168(%rbp), %eax ## 4-byte Reload
negl %eax
movl %eax, -1248(%rbp) ## 4-byte Spill
movl -1164(%rbp), %eax ## 4-byte Reload
negl %eax
movl %eax, -1244(%rbp) ## 4-byte Spill
movl -1160(%rbp), %eax ## 4-byte Reload
negl %eax
movl %eax, -1240(%rbp) ## 4-byte Spill
movl %r14d, %eax
movq %rax, -1272(%rbp) ## 8-byte Spill
leal (%r12,%r12), %eax
cmpl $9, %eax
movl $8, %ecx
cmovgel %eax, %ecx
xorl %edx, %edx
cmpl $8, %eax
setl %dl
movq %rdx, %rax
movq %rdx, -1200(%rbp) ## 8-byte Spill
leal (%rdx,%r12,2), %eax
subl %eax, %ecx
movl %ecx, -1208(%rbp) ## 4-byte Spill
movd %r12d, %xmm0
pshufd $0, %xmm0, %xmm10 ## xmm10 = xmm0[0,0,0,0]
movdqa LCPI57_0(%rip), %xmm0 ## xmm0 = [0,1,2,3]
pmulld %xmm10, %xmm0
paddd %xmm10, %xmm0
movdqa %xmm0, -1232(%rbp) ## 16-byte Spill
pslld $2, %xmm10
xorl %r13d, %r13d
xorl %ecx, %ecx
movdqa %xmm10, -1136(%rbp) ## 16-byte Spill
jmp LBB57_147
LBB57_145: ## in Loop: Header=BB57_147 Depth=1
movq -1272(%rbp), %rax ## 8-byte Reload
addq %rax, 24(%r15)
LBB57_146: ## in Loop: Header=BB57_147 Depth=1
movl -1236(%rbp), %ecx ## 4-byte Reload
incl %ecx
movl 4(%r15), %edx
cmpl %edx, %ecx
jge LBB57_270
LBB57_147: ## =>This Loop Header: Depth=1
## Child Loop BB57_217 Depth 2
## Child Loop BB57_151 Depth 2
## Child Loop BB57_166 Depth 3
## Child Loop BB57_171 Depth 3
## Child Loop BB57_177 Depth 3
## Child Loop BB57_182 Depth 3
## Child Loop BB57_188 Depth 3
## Child Loop BB57_193 Depth 3
## Child Loop BB57_200 Depth 3
## Child Loop BB57_211 Depth 3
movl (%r15), %eax
cmpb $0, -1172(%rbp) ## 1-byte Folded Reload
movl %ecx, -1236(%rbp) ## 4-byte Spill
je LBB57_214
## %bb.148: ## in Loop: Header=BB57_147 Depth=1
testl %eax, %eax
jle LBB57_238
## %bb.149: ## in Loop: Header=BB57_147 Depth=1
xorl %edx, %edx
movl %r13d, %ebx
movq -1088(%rbp), %r11 ## 8-byte Reload
jmp LBB57_151
.p2align 4, 0x90
LBB57_150: ## in Loop: Header=BB57_151 Depth=2
movl -1112(%rbp), %edx ## 4-byte Reload
incl %edx
movl %r13d, %ebx
cmpl (%r15), %edx
jge LBB57_238
LBB57_151: ## Parent Loop BB57_147 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB57_166 Depth 3
## Child Loop BB57_171 Depth 3
## Child Loop BB57_177 Depth 3
## Child Loop BB57_182 Depth 3
## Child Loop BB57_188 Depth 3
## Child Loop BB57_193 Depth 3
## Child Loop BB57_200 Depth 3
## Child Loop BB57_211 Depth 3
cmpl $16, -1104(%rbp) ## 4-byte Folded Reload
movl %edx, -1112(%rbp) ## 4-byte Spill
jne LBB57_155
## %bb.152: ## in Loop: Header=BB57_151 Depth=2
movq (%r10), %rdi
testq %rdi, %rdi
movl -1144(%rbp), %r14d ## 4-byte Reload
je LBB57_156
## %bb.153: ## in Loop: Header=BB57_151 Depth=2
callq _fgetc
movq -1080(%rbp), %r10 ## 8-byte Reload
movl %eax, %r13d
cmpl $-1, %eax
movl $0, %eax
cmovel %eax, %r13d
movq (%r10), %rdi
testq %rdi, %rdi
je LBB57_158
## %bb.154: ## in Loop: Header=BB57_151 Depth=2
callq _fgetc
movq -1080(%rbp), %r10 ## 8-byte Reload
movl %eax, %edi
cmpl $-1, %eax
movl $0, %eax
cmovel %eax, %edi
movq -1088(%rbp), %r11 ## 8-byte Reload
movdqa -1136(%rbp), %xmm10 ## 16-byte Reload
jmp LBB57_161
.p2align 4, 0x90
LBB57_155: ## in Loop: Header=BB57_151 Depth=2
movq %r15, %rdi
callq _get32le
movdqa -1136(%rbp), %xmm10 ## 16-byte Reload
movq -1088(%rbp), %r11 ## 8-byte Reload
movq -1080(%rbp), %r10 ## 8-byte Reload
movl %eax, %edi
movl -1144(%rbp), %r14d ## 4-byte Reload
jmp LBB57_162
LBB57_156: ## in Loop: Header=BB57_151 Depth=2
movq 24(%r15), %rax
movq 32(%r15), %rcx
xorl %r13d, %r13d
cmpq %rcx, %rax
jae LBB57_159
## %bb.157: ## in Loop: Header=BB57_151 Depth=2
leaq 1(%rax), %rdx
movq %rdx, 24(%r15)
movzbl (%rax), %r13d
movq %rdx, %rax
jmp LBB57_159
LBB57_158: ## in Loop: Header=BB57_151 Depth=2
movq 24(%r15), %rax
movq 32(%r15), %rcx
movq -1088(%rbp), %r11 ## 8-byte Reload
movdqa -1136(%rbp), %xmm10 ## 16-byte Reload
LBB57_159: ## in Loop: Header=BB57_151 Depth=2
xorl %edi, %edi
cmpq %rcx, %rax
jae LBB57_161
## %bb.160: ## in Loop: Header=BB57_151 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %edi
LBB57_161: ## in Loop: Header=BB57_151 Depth=2
shll $8, %edi
addl %r13d, %edi
LBB57_162: ## in Loop: Header=BB57_151 Depth=2
movl %edi, %esi
andl -1120(%rbp), %esi ## 4-byte Folded Reload
movl %esi, %eax
movl -1252(%rbp), %ecx ## 4-byte Reload
## kill: def $cl killed $cl killed $ecx
shll %cl, %eax
movl -1184(%rbp), %ecx ## 4-byte Reload
sarl %cl, %esi
testl %ecx, %ecx
cmovsl %eax, %esi
cmpl $7, %r12d
jg LBB57_172
## %bb.163: ## in Loop: Header=BB57_151 Depth=2
movl -1208(%rbp), %eax ## 4-byte Reload
xorl %edx, %edx
divl %r12d
## kill: def $eax killed $eax def $rax
movq -1200(%rbp), %rcx ## 8-byte Reload
leal (%rax,%rcx), %r8d
incl %r8d
movl %r12d, %ecx
movl %esi, %eax
cmpl $8, %r8d
jb LBB57_171
## %bb.164: ## in Loop: Header=BB57_151 Depth=2
movl %ebx, %r9d
movl %r8d, %ecx
andl $-8, %ecx
movd %esi, %xmm1
pshufd $0, %xmm1, %xmm9 ## xmm9 = xmm1[0,0,0,0]
leal -8(%rcx), %eax
movl %eax, %ebx
shrl $3, %ebx
incl %ebx
testl %eax, %eax
je LBB57_203
## %bb.165: ## in Loop: Header=BB57_151 Depth=2
movl %ebx, %edx
andl $-2, %edx
xorl %eax, %eax
pxor %xmm2, %xmm2
movdqa -1232(%rbp), %xmm3 ## 16-byte Reload
.p2align 4, 0x90
LBB57_166: ## Parent Loop BB57_147 Depth=1
## Parent Loop BB57_151 Depth=2
## => This Inner Loop Header: Depth=3
pshuflw $254, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,3,3,4,5,6,7]
pshufd $238, %xmm3, %xmm5 ## xmm5 = xmm3[2,3,2,3]
pshuflw $84, %xmm3, %xmm8 ## xmm8 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm4, %xmm7
pshuflw $254, %xmm5, %xmm4 ## xmm4 = xmm5[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm0
psrad %xmm4, %xmm0
movdqa %xmm9, %xmm4
psrad %xmm8, %xmm4
paddd %xmm10, %xmm3
pshuflw $84, %xmm5, %xmm6 ## xmm6 = xmm5[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm6, %xmm5
pblendw $15, %xmm7, %xmm0 ## xmm0 = xmm7[0,1,2,3],xmm0[4,5,6,7]
pblendw $15, %xmm4, %xmm5 ## xmm5 = xmm4[0,1,2,3],xmm5[4,5,6,7]
pblendw $204, %xmm0, %xmm5 ## xmm5 = xmm5[0,1],xmm0[2,3],xmm5[4,5],xmm0[6,7]
paddd %xmm1, %xmm5
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm1
psrad %xmm0, %xmm1
pshufd $238, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,2,3]
pshuflw $254, %xmm0, %xmm4 ## xmm4 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm4, %xmm6
pblendw $15, %xmm1, %xmm6 ## xmm6 = xmm1[0,1,2,3],xmm6[4,5,6,7]
pshuflw $84, %xmm3, %xmm1 ## xmm1 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm1, %xmm7
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm4
psrad %xmm0, %xmm4
pblendw $15, %xmm7, %xmm4 ## xmm4 = xmm7[0,1,2,3],xmm4[4,5,6,7]
pblendw $204, %xmm6, %xmm4 ## xmm4 = xmm4[0,1],xmm6[2,3],xmm4[4,5],xmm6[6,7]
paddd %xmm2, %xmm4
paddd %xmm10, %xmm3
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
pshufd $238, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,2,3]
pshuflw $84, %xmm3, %xmm2 ## xmm2 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm0, %xmm6
pshuflw $254, %xmm1, %xmm0 ## xmm0 = xmm1[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm0, %xmm7
movdqa %xmm9, %xmm0
psrad %xmm2, %xmm0
paddd %xmm10, %xmm3
pshuflw $84, %xmm1, %xmm2 ## xmm2 = xmm1[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm1
psrad %xmm2, %xmm1
pblendw $15, %xmm6, %xmm7 ## xmm7 = xmm6[0,1,2,3],xmm7[4,5,6,7]
pblendw $15, %xmm0, %xmm1 ## xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
pblendw $204, %xmm7, %xmm1 ## xmm1 = xmm1[0,1],xmm7[2,3],xmm1[4,5],xmm7[6,7]
paddd %xmm5, %xmm1
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm2
psrad %xmm0, %xmm2
pshufd $238, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,2,3]
pshuflw $254, %xmm0, %xmm5 ## xmm5 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm5, %xmm6
pblendw $15, %xmm2, %xmm6 ## xmm6 = xmm2[0,1,2,3],xmm6[4,5,6,7]
pshuflw $84, %xmm3, %xmm2 ## xmm2 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm2, %xmm5
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm2
psrad %xmm0, %xmm2
pblendw $15, %xmm5, %xmm2 ## xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7]
pblendw $204, %xmm6, %xmm2 ## xmm2 = xmm2[0,1],xmm6[2,3],xmm2[4,5],xmm6[6,7]
paddd %xmm4, %xmm2
paddd %xmm10, %xmm3
addl $2, %eax
cmpl %edx, %eax
jne LBB57_166
## %bb.167: ## in Loop: Header=BB57_151 Depth=2
testb $1, %bl
je LBB57_169
LBB57_168: ## in Loop: Header=BB57_151 Depth=2
movdqa %xmm3, %xmm0
pshuflw $254, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm4, %xmm5
pshufd $238, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,2,3]
pshuflw $254, %xmm4, %xmm6 ## xmm6 = xmm4[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm6, %xmm7
paddd %xmm10, %xmm0
pshuflw $84, %xmm3, %xmm3 ## xmm3 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm3, %xmm6
pblendw $15, %xmm5, %xmm7 ## xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
pshuflw $84, %xmm4, %xmm3 ## xmm3 = xmm4[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm4
psrad %xmm3, %xmm4
pblendw $15, %xmm6, %xmm4 ## xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
pshuflw $254, %xmm0, %xmm3 ## xmm3 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm3, %xmm5
pshufd $238, %xmm0, %xmm3 ## xmm3 = xmm0[2,3,2,3]
pshuflw $254, %xmm3, %xmm8 ## xmm8 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm8, %xmm6
pblendw $204, %xmm7, %xmm4 ## xmm4 = xmm4[0,1],xmm7[2,3],xmm4[4,5],xmm7[6,7]
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm0, %xmm7
paddd %xmm4, %xmm1
pshuflw $84, %xmm3, %xmm0 ## xmm0 = xmm3[0,1,1,1,4,5,6,7]
psrad %xmm0, %xmm9
pblendw $15, %xmm5, %xmm6 ## xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
pblendw $15, %xmm7, %xmm9 ## xmm9 = xmm7[0,1,2,3],xmm9[4,5,6,7]
pblendw $204, %xmm6, %xmm9 ## xmm9 = xmm9[0,1],xmm6[2,3],xmm9[4,5],xmm6[6,7]
paddd %xmm9, %xmm2
LBB57_169: ## in Loop: Header=BB57_151 Depth=2
paddd %xmm2, %xmm1
pshufd $238, %xmm1, %xmm0 ## xmm0 = xmm1[2,3,2,3]
paddd %xmm1, %xmm0
pshufd $85, %xmm0, %xmm1 ## xmm1 = xmm0[1,1,1,1]
paddd %xmm0, %xmm1
movd %xmm1, %eax
cmpl %ecx, %r8d
movl %r9d, %ebx
je LBB57_173
## %bb.170: ## in Loop: Header=BB57_151 Depth=2
imull %r12d, %ecx
addl %r12d, %ecx
.p2align 4, 0x90
LBB57_171: ## Parent Loop BB57_147 Depth=1
## Parent Loop BB57_151 Depth=2
## => This Inner Loop Header: Depth=3
movl %esi, %edx
sarl %cl, %edx
addl %edx, %eax
addl %r12d, %ecx
cmpl $8, %ecx
jl LBB57_171
jmp LBB57_173
.p2align 4, 0x90
LBB57_172: ## in Loop: Header=BB57_151 Depth=2
movl %esi, %eax
LBB57_173: ## in Loop: Header=BB57_151 Depth=2
movl %edi, %esi
andl %r14d, %esi
movl %esi, %edx
movl -1248(%rbp), %ecx ## 4-byte Reload
## kill: def $cl killed $cl killed $ecx
shll %cl, %edx
movl -1168(%rbp), %ecx ## 4-byte Reload
sarl %cl, %esi
testl %ecx, %ecx
cmovsl %edx, %esi
movslq %ebx, %r8
movb %al, (%r11,%r8)
cmpl $7, %r12d
jg LBB57_183
## %bb.174: ## in Loop: Header=BB57_151 Depth=2
movl -1208(%rbp), %eax ## 4-byte Reload
xorl %edx, %edx
divl %r12d
## kill: def $eax killed $eax def $rax
movq -1200(%rbp), %rcx ## 8-byte Reload
leal (%rax,%rcx), %r9d
incl %r9d
movl %r12d, %ecx
movl %esi, %eax
cmpl $8, %r9d
jb LBB57_182
## %bb.175: ## in Loop: Header=BB57_151 Depth=2
movl %ebx, %r13d
movl %r9d, %ecx
andl $-8, %ecx
movd %esi, %xmm1
pshufd $0, %xmm1, %xmm9 ## xmm9 = xmm1[0,0,0,0]
leal -8(%rcx), %eax
movl %eax, %ebx
shrl $3, %ebx
incl %ebx
testl %eax, %eax
je LBB57_204
## %bb.176: ## in Loop: Header=BB57_151 Depth=2
movl %ebx, %edx
andl $-2, %edx
xorl %eax, %eax
pxor %xmm2, %xmm2
movdqa -1232(%rbp), %xmm3 ## 16-byte Reload
.p2align 4, 0x90
LBB57_177: ## Parent Loop BB57_147 Depth=1
## Parent Loop BB57_151 Depth=2
## => This Inner Loop Header: Depth=3
pshuflw $254, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,3,3,4,5,6,7]
pshufd $238, %xmm3, %xmm5 ## xmm5 = xmm3[2,3,2,3]
pshuflw $84, %xmm3, %xmm8 ## xmm8 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm4, %xmm7
pshuflw $254, %xmm5, %xmm4 ## xmm4 = xmm5[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm0
psrad %xmm4, %xmm0
movdqa %xmm9, %xmm4
psrad %xmm8, %xmm4
paddd %xmm10, %xmm3
pshuflw $84, %xmm5, %xmm6 ## xmm6 = xmm5[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm6, %xmm5
pblendw $15, %xmm7, %xmm0 ## xmm0 = xmm7[0,1,2,3],xmm0[4,5,6,7]
pblendw $15, %xmm4, %xmm5 ## xmm5 = xmm4[0,1,2,3],xmm5[4,5,6,7]
pblendw $204, %xmm0, %xmm5 ## xmm5 = xmm5[0,1],xmm0[2,3],xmm5[4,5],xmm0[6,7]
paddd %xmm1, %xmm5
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm1
psrad %xmm0, %xmm1
pshufd $238, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,2,3]
pshuflw $254, %xmm0, %xmm4 ## xmm4 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm4, %xmm6
pblendw $15, %xmm1, %xmm6 ## xmm6 = xmm1[0,1,2,3],xmm6[4,5,6,7]
pshuflw $84, %xmm3, %xmm1 ## xmm1 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm1, %xmm7
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm4
psrad %xmm0, %xmm4
pblendw $15, %xmm7, %xmm4 ## xmm4 = xmm7[0,1,2,3],xmm4[4,5,6,7]
pblendw $204, %xmm6, %xmm4 ## xmm4 = xmm4[0,1],xmm6[2,3],xmm4[4,5],xmm6[6,7]
paddd %xmm2, %xmm4
paddd %xmm10, %xmm3
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
pshufd $238, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,2,3]
pshuflw $84, %xmm3, %xmm2 ## xmm2 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm0, %xmm6
pshuflw $254, %xmm1, %xmm0 ## xmm0 = xmm1[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm0, %xmm7
movdqa %xmm9, %xmm0
psrad %xmm2, %xmm0
paddd %xmm10, %xmm3
pshuflw $84, %xmm1, %xmm2 ## xmm2 = xmm1[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm1
psrad %xmm2, %xmm1
pblendw $15, %xmm6, %xmm7 ## xmm7 = xmm6[0,1,2,3],xmm7[4,5,6,7]
pblendw $15, %xmm0, %xmm1 ## xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
pblendw $204, %xmm7, %xmm1 ## xmm1 = xmm1[0,1],xmm7[2,3],xmm1[4,5],xmm7[6,7]
paddd %xmm5, %xmm1
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm2
psrad %xmm0, %xmm2
pshufd $238, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,2,3]
pshuflw $254, %xmm0, %xmm5 ## xmm5 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm5, %xmm6
pblendw $15, %xmm2, %xmm6 ## xmm6 = xmm2[0,1,2,3],xmm6[4,5,6,7]
pshuflw $84, %xmm3, %xmm2 ## xmm2 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm2, %xmm5
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm2
psrad %xmm0, %xmm2
pblendw $15, %xmm5, %xmm2 ## xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7]
pblendw $204, %xmm6, %xmm2 ## xmm2 = xmm2[0,1],xmm6[2,3],xmm2[4,5],xmm6[6,7]
paddd %xmm4, %xmm2
paddd %xmm10, %xmm3
addl $2, %eax
cmpl %edx, %eax
jne LBB57_177
## %bb.178: ## in Loop: Header=BB57_151 Depth=2
testb $1, %bl
je LBB57_180
LBB57_179: ## in Loop: Header=BB57_151 Depth=2
movdqa %xmm3, %xmm0
pshuflw $254, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm4, %xmm5
pshufd $238, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,2,3]
pshuflw $254, %xmm4, %xmm6 ## xmm6 = xmm4[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm6, %xmm7
paddd %xmm10, %xmm0
pshuflw $84, %xmm3, %xmm3 ## xmm3 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm3, %xmm6
pblendw $15, %xmm5, %xmm7 ## xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
pshuflw $84, %xmm4, %xmm3 ## xmm3 = xmm4[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm4
psrad %xmm3, %xmm4
pblendw $15, %xmm6, %xmm4 ## xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
pshuflw $254, %xmm0, %xmm3 ## xmm3 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm3, %xmm5
pshufd $238, %xmm0, %xmm3 ## xmm3 = xmm0[2,3,2,3]
pshuflw $254, %xmm3, %xmm8 ## xmm8 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm8, %xmm6
pblendw $204, %xmm7, %xmm4 ## xmm4 = xmm4[0,1],xmm7[2,3],xmm4[4,5],xmm7[6,7]
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm0, %xmm7
paddd %xmm4, %xmm1
pshuflw $84, %xmm3, %xmm0 ## xmm0 = xmm3[0,1,1,1,4,5,6,7]
psrad %xmm0, %xmm9
pblendw $15, %xmm5, %xmm6 ## xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
pblendw $15, %xmm7, %xmm9 ## xmm9 = xmm7[0,1,2,3],xmm9[4,5,6,7]
pblendw $204, %xmm6, %xmm9 ## xmm9 = xmm9[0,1],xmm6[2,3],xmm9[4,5],xmm6[6,7]
paddd %xmm9, %xmm2
LBB57_180: ## in Loop: Header=BB57_151 Depth=2
paddd %xmm2, %xmm1
pshufd $238, %xmm1, %xmm0 ## xmm0 = xmm1[2,3,2,3]
paddd %xmm1, %xmm0
pshufd $85, %xmm0, %xmm1 ## xmm1 = xmm0[1,1,1,1]
paddd %xmm0, %xmm1
movd %xmm1, %eax
cmpl %ecx, %r9d
movl %r13d, %ebx
je LBB57_184
## %bb.181: ## in Loop: Header=BB57_151 Depth=2
imull %r12d, %ecx
addl %r12d, %ecx
.p2align 4, 0x90
LBB57_182: ## Parent Loop BB57_147 Depth=1
## Parent Loop BB57_151 Depth=2
## => This Inner Loop Header: Depth=3
movl %esi, %edx
sarl %cl, %edx
addl %edx, %eax
addl %r12d, %ecx
cmpl $8, %ecx
jl LBB57_182
jmp LBB57_184
.p2align 4, 0x90
LBB57_183: ## in Loop: Header=BB57_151 Depth=2
movl %esi, %eax
LBB57_184: ## in Loop: Header=BB57_151 Depth=2
movl %edi, %esi
andl -1152(%rbp), %esi ## 4-byte Folded Reload
movl %esi, %edx
movl -1244(%rbp), %ecx ## 4-byte Reload
## kill: def $cl killed $cl killed $ecx
shll %cl, %edx
movl -1164(%rbp), %ecx ## 4-byte Reload
sarl %cl, %esi
testl %ecx, %ecx
cmovsl %edx, %esi
movb %al, 1(%r11,%r8)
cmpl $7, %r12d
jg LBB57_194
## %bb.185: ## in Loop: Header=BB57_151 Depth=2
movl -1208(%rbp), %eax ## 4-byte Reload
xorl %edx, %edx
divl %r12d
## kill: def $eax killed $eax def $rax
movq -1200(%rbp), %rcx ## 8-byte Reload
leal (%rax,%rcx), %r9d
incl %r9d
movl %r12d, %ecx
movl %esi, %eax
cmpl $8, %r9d
jb LBB57_193
## %bb.186: ## in Loop: Header=BB57_151 Depth=2
movl %ebx, %r13d
movl %r9d, %ecx
andl $-8, %ecx
movd %esi, %xmm1
pshufd $0, %xmm1, %xmm9 ## xmm9 = xmm1[0,0,0,0]
leal -8(%rcx), %eax
movl %eax, %ebx
shrl $3, %ebx
incl %ebx
testl %eax, %eax
je LBB57_205
## %bb.187: ## in Loop: Header=BB57_151 Depth=2
movl %ebx, %edx
andl $-2, %edx
xorl %eax, %eax
pxor %xmm2, %xmm2
movdqa -1232(%rbp), %xmm3 ## 16-byte Reload
.p2align 4, 0x90
LBB57_188: ## Parent Loop BB57_147 Depth=1
## Parent Loop BB57_151 Depth=2
## => This Inner Loop Header: Depth=3
pshuflw $254, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,3,3,4,5,6,7]
pshufd $238, %xmm3, %xmm5 ## xmm5 = xmm3[2,3,2,3]
pshuflw $84, %xmm3, %xmm8 ## xmm8 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm4, %xmm7
pshuflw $254, %xmm5, %xmm4 ## xmm4 = xmm5[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm0
psrad %xmm4, %xmm0
movdqa %xmm9, %xmm4
psrad %xmm8, %xmm4
paddd %xmm10, %xmm3
pshuflw $84, %xmm5, %xmm6 ## xmm6 = xmm5[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm6, %xmm5
pblendw $15, %xmm7, %xmm0 ## xmm0 = xmm7[0,1,2,3],xmm0[4,5,6,7]
pblendw $15, %xmm4, %xmm5 ## xmm5 = xmm4[0,1,2,3],xmm5[4,5,6,7]
pblendw $204, %xmm0, %xmm5 ## xmm5 = xmm5[0,1],xmm0[2,3],xmm5[4,5],xmm0[6,7]
paddd %xmm1, %xmm5
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm1
psrad %xmm0, %xmm1
pshufd $238, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,2,3]
pshuflw $254, %xmm0, %xmm4 ## xmm4 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm4, %xmm6
pblendw $15, %xmm1, %xmm6 ## xmm6 = xmm1[0,1,2,3],xmm6[4,5,6,7]
pshuflw $84, %xmm3, %xmm1 ## xmm1 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm1, %xmm7
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm4
psrad %xmm0, %xmm4
pblendw $15, %xmm7, %xmm4 ## xmm4 = xmm7[0,1,2,3],xmm4[4,5,6,7]
pblendw $204, %xmm6, %xmm4 ## xmm4 = xmm4[0,1],xmm6[2,3],xmm4[4,5],xmm6[6,7]
paddd %xmm2, %xmm4
paddd %xmm10, %xmm3
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
pshufd $238, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,2,3]
pshuflw $84, %xmm3, %xmm2 ## xmm2 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm0, %xmm6
pshuflw $254, %xmm1, %xmm0 ## xmm0 = xmm1[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm0, %xmm7
movdqa %xmm9, %xmm0
psrad %xmm2, %xmm0
paddd %xmm10, %xmm3
pshuflw $84, %xmm1, %xmm2 ## xmm2 = xmm1[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm1
psrad %xmm2, %xmm1
pblendw $15, %xmm6, %xmm7 ## xmm7 = xmm6[0,1,2,3],xmm7[4,5,6,7]
pblendw $15, %xmm0, %xmm1 ## xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
pblendw $204, %xmm7, %xmm1 ## xmm1 = xmm1[0,1],xmm7[2,3],xmm1[4,5],xmm7[6,7]
paddd %xmm5, %xmm1
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm2
psrad %xmm0, %xmm2
pshufd $238, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,2,3]
pshuflw $254, %xmm0, %xmm5 ## xmm5 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm5, %xmm6
pblendw $15, %xmm2, %xmm6 ## xmm6 = xmm2[0,1,2,3],xmm6[4,5,6,7]
pshuflw $84, %xmm3, %xmm2 ## xmm2 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm2, %xmm5
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm2
psrad %xmm0, %xmm2
pblendw $15, %xmm5, %xmm2 ## xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7]
pblendw $204, %xmm6, %xmm2 ## xmm2 = xmm2[0,1],xmm6[2,3],xmm2[4,5],xmm6[6,7]
paddd %xmm4, %xmm2
paddd %xmm10, %xmm3
addl $2, %eax
cmpl %edx, %eax
jne LBB57_188
## %bb.189: ## in Loop: Header=BB57_151 Depth=2
testb $1, %bl
je LBB57_191
LBB57_190: ## in Loop: Header=BB57_151 Depth=2
movdqa %xmm3, %xmm0
pshuflw $254, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm4, %xmm5
pshufd $238, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,2,3]
pshuflw $254, %xmm4, %xmm6 ## xmm6 = xmm4[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm6, %xmm7
paddd %xmm10, %xmm0
pshuflw $84, %xmm3, %xmm3 ## xmm3 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm3, %xmm6
pblendw $15, %xmm5, %xmm7 ## xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
pshuflw $84, %xmm4, %xmm3 ## xmm3 = xmm4[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm4
psrad %xmm3, %xmm4
pblendw $15, %xmm6, %xmm4 ## xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
pshuflw $254, %xmm0, %xmm3 ## xmm3 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm3, %xmm5
pshufd $238, %xmm0, %xmm3 ## xmm3 = xmm0[2,3,2,3]
pshuflw $254, %xmm3, %xmm8 ## xmm8 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm8, %xmm6
pblendw $204, %xmm7, %xmm4 ## xmm4 = xmm4[0,1],xmm7[2,3],xmm4[4,5],xmm7[6,7]
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm0, %xmm7
paddd %xmm4, %xmm1
pshuflw $84, %xmm3, %xmm0 ## xmm0 = xmm3[0,1,1,1,4,5,6,7]
psrad %xmm0, %xmm9
pblendw $15, %xmm5, %xmm6 ## xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
pblendw $15, %xmm7, %xmm9 ## xmm9 = xmm7[0,1,2,3],xmm9[4,5,6,7]
pblendw $204, %xmm6, %xmm9 ## xmm9 = xmm9[0,1],xmm6[2,3],xmm9[4,5],xmm6[6,7]
paddd %xmm9, %xmm2
LBB57_191: ## in Loop: Header=BB57_151 Depth=2
paddd %xmm2, %xmm1
pshufd $238, %xmm1, %xmm0 ## xmm0 = xmm1[2,3,2,3]
paddd %xmm1, %xmm0
pshufd $85, %xmm0, %xmm1 ## xmm1 = xmm0[1,1,1,1]
paddd %xmm0, %xmm1
movd %xmm1, %eax
cmpl %ecx, %r9d
movl %r13d, %ebx
je LBB57_195
## %bb.192: ## in Loop: Header=BB57_151 Depth=2
imull %r12d, %ecx
addl %r12d, %ecx
.p2align 4, 0x90
LBB57_193: ## Parent Loop BB57_147 Depth=1
## Parent Loop BB57_151 Depth=2
## => This Inner Loop Header: Depth=3
movl %esi, %edx
sarl %cl, %edx
addl %edx, %eax
addl %r12d, %ecx
cmpl $8, %ecx
jl LBB57_193
jmp LBB57_195
.p2align 4, 0x90
LBB57_194: ## in Loop: Header=BB57_151 Depth=2
movl %esi, %eax
LBB57_195: ## in Loop: Header=BB57_151 Depth=2
leaq 3(%r8), %r13
movb %al, 2(%r11,%r8)
movl -1192(%rbp), %eax ## 4-byte Reload
testl %eax, %eax
je LBB57_201
## %bb.196: ## in Loop: Header=BB57_151 Depth=2
andl %eax, %edi
movl %edi, %eax
movl -1240(%rbp), %ecx ## 4-byte Reload
## kill: def $cl killed $cl killed $ecx
shll %cl, %eax
movl -1160(%rbp), %ecx ## 4-byte Reload
sarl %cl, %edi
testl %ecx, %ecx
cmovsl %eax, %edi
cmpl $7, %r12d
jg LBB57_202
## %bb.197: ## in Loop: Header=BB57_151 Depth=2
movl -1208(%rbp), %eax ## 4-byte Reload
xorl %edx, %edx
divl %r12d
## kill: def $eax killed $eax def $rax
movq -1200(%rbp), %rcx ## 8-byte Reload
addl %ecx, %eax
incl %eax
movl %r12d, %ecx
movl %edi, %edx
cmpl $8, %eax
jb LBB57_211
## %bb.198: ## in Loop: Header=BB57_151 Depth=2
movl %ebx, %r8d
movl %eax, %ecx
andl $-8, %ecx
movd %edi, %xmm1
pshufd $0, %xmm1, %xmm9 ## xmm9 = xmm1[0,0,0,0]
leal -8(%rcx), %esi
movl %esi, %edx
shrl $3, %edx
incl %edx
testl %esi, %esi
je LBB57_206
## %bb.199: ## in Loop: Header=BB57_151 Depth=2
movl %edx, %esi
andl $-2, %esi
xorl %ebx, %ebx
pxor %xmm2, %xmm2
movdqa -1232(%rbp), %xmm3 ## 16-byte Reload
.p2align 4, 0x90
LBB57_200: ## Parent Loop BB57_147 Depth=1
## Parent Loop BB57_151 Depth=2
## => This Inner Loop Header: Depth=3
pshuflw $254, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,3,3,4,5,6,7]
pshufd $238, %xmm3, %xmm5 ## xmm5 = xmm3[2,3,2,3]
pshuflw $84, %xmm3, %xmm8 ## xmm8 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm4, %xmm7
pshuflw $254, %xmm5, %xmm4 ## xmm4 = xmm5[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm0
psrad %xmm4, %xmm0
movdqa %xmm9, %xmm4
psrad %xmm8, %xmm4
paddd %xmm10, %xmm3
pshuflw $84, %xmm5, %xmm6 ## xmm6 = xmm5[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm6, %xmm5
pblendw $15, %xmm7, %xmm0 ## xmm0 = xmm7[0,1,2,3],xmm0[4,5,6,7]
pblendw $15, %xmm4, %xmm5 ## xmm5 = xmm4[0,1,2,3],xmm5[4,5,6,7]
pblendw $204, %xmm0, %xmm5 ## xmm5 = xmm5[0,1],xmm0[2,3],xmm5[4,5],xmm0[6,7]
paddd %xmm1, %xmm5
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm1
psrad %xmm0, %xmm1
pshufd $238, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,2,3]
pshuflw $254, %xmm0, %xmm4 ## xmm4 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm4, %xmm6
pblendw $15, %xmm1, %xmm6 ## xmm6 = xmm1[0,1,2,3],xmm6[4,5,6,7]
pshuflw $84, %xmm3, %xmm1 ## xmm1 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm1, %xmm7
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm4
psrad %xmm0, %xmm4
pblendw $15, %xmm7, %xmm4 ## xmm4 = xmm7[0,1,2,3],xmm4[4,5,6,7]
pblendw $204, %xmm6, %xmm4 ## xmm4 = xmm4[0,1],xmm6[2,3],xmm4[4,5],xmm6[6,7]
paddd %xmm2, %xmm4
paddd %xmm10, %xmm3
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
pshufd $238, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,2,3]
pshuflw $84, %xmm3, %xmm2 ## xmm2 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm0, %xmm6
pshuflw $254, %xmm1, %xmm0 ## xmm0 = xmm1[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm0, %xmm7
movdqa %xmm9, %xmm0
psrad %xmm2, %xmm0
paddd %xmm10, %xmm3
pshuflw $84, %xmm1, %xmm2 ## xmm2 = xmm1[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm1
psrad %xmm2, %xmm1
pblendw $15, %xmm6, %xmm7 ## xmm7 = xmm6[0,1,2,3],xmm7[4,5,6,7]
pblendw $15, %xmm0, %xmm1 ## xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
pblendw $204, %xmm7, %xmm1 ## xmm1 = xmm1[0,1],xmm7[2,3],xmm1[4,5],xmm7[6,7]
paddd %xmm5, %xmm1
pshuflw $254, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm2
psrad %xmm0, %xmm2
pshufd $238, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,2,3]
pshuflw $254, %xmm0, %xmm5 ## xmm5 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm5, %xmm6
pblendw $15, %xmm2, %xmm6 ## xmm6 = xmm2[0,1,2,3],xmm6[4,5,6,7]
pshuflw $84, %xmm3, %xmm2 ## xmm2 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm2, %xmm5
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm2
psrad %xmm0, %xmm2
pblendw $15, %xmm5, %xmm2 ## xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7]
pblendw $204, %xmm6, %xmm2 ## xmm2 = xmm2[0,1],xmm6[2,3],xmm2[4,5],xmm6[6,7]
paddd %xmm4, %xmm2
paddd %xmm10, %xmm3
addl $2, %ebx
cmpl %esi, %ebx
jne LBB57_200
jmp LBB57_207
LBB57_201: ## in Loop: Header=BB57_151 Depth=2
movl $255, %edx
jmp LBB57_212
LBB57_202: ## in Loop: Header=BB57_151 Depth=2
movl %edi, %edx
jmp LBB57_212
LBB57_203: ## in Loop: Header=BB57_151 Depth=2
pxor %xmm2, %xmm2
movdqa -1232(%rbp), %xmm3 ## 16-byte Reload
testb $1, %bl
jne LBB57_168
jmp LBB57_169
LBB57_204: ## in Loop: Header=BB57_151 Depth=2
pxor %xmm2, %xmm2
movdqa -1232(%rbp), %xmm3 ## 16-byte Reload
testb $1, %bl
jne LBB57_179
jmp LBB57_180
LBB57_205: ## in Loop: Header=BB57_151 Depth=2
pxor %xmm2, %xmm2
movdqa -1232(%rbp), %xmm3 ## 16-byte Reload
testb $1, %bl
jne LBB57_190
jmp LBB57_191
LBB57_206: ## in Loop: Header=BB57_151 Depth=2
pxor %xmm2, %xmm2
movdqa -1232(%rbp), %xmm3 ## 16-byte Reload
LBB57_207: ## in Loop: Header=BB57_151 Depth=2
testb $1, %dl
movl %r8d, %ebx
je LBB57_209
## %bb.208: ## in Loop: Header=BB57_151 Depth=2
movdqa %xmm3, %xmm0
pshuflw $254, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm4, %xmm5
pshufd $238, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,2,3]
pshuflw $254, %xmm4, %xmm6 ## xmm6 = xmm4[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm6, %xmm7
paddd %xmm10, %xmm0
pshuflw $84, %xmm3, %xmm3 ## xmm3 = xmm3[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm3, %xmm6
pblendw $15, %xmm5, %xmm7 ## xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
pshuflw $84, %xmm4, %xmm3 ## xmm3 = xmm4[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm4
psrad %xmm3, %xmm4
pblendw $15, %xmm6, %xmm4 ## xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
pshuflw $254, %xmm0, %xmm3 ## xmm3 = xmm0[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm5
psrad %xmm3, %xmm5
pshufd $238, %xmm0, %xmm3 ## xmm3 = xmm0[2,3,2,3]
pshuflw $254, %xmm3, %xmm8 ## xmm8 = xmm3[2,3,3,3,4,5,6,7]
movdqa %xmm9, %xmm6
psrad %xmm8, %xmm6
pblendw $204, %xmm7, %xmm4 ## xmm4 = xmm4[0,1],xmm7[2,3],xmm4[4,5],xmm7[6,7]
pshuflw $84, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,1,4,5,6,7]
movdqa %xmm9, %xmm7
psrad %xmm0, %xmm7
paddd %xmm4, %xmm1
pshuflw $84, %xmm3, %xmm0 ## xmm0 = xmm3[0,1,1,1,4,5,6,7]
psrad %xmm0, %xmm9
pblendw $15, %xmm5, %xmm6 ## xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
pblendw $15, %xmm7, %xmm9 ## xmm9 = xmm7[0,1,2,3],xmm9[4,5,6,7]
pblendw $204, %xmm6, %xmm9 ## xmm9 = xmm9[0,1],xmm6[2,3],xmm9[4,5],xmm6[6,7]
paddd %xmm9, %xmm2
LBB57_209: ## in Loop: Header=BB57_151 Depth=2
paddd %xmm2, %xmm1
pshufd $238, %xmm1, %xmm0 ## xmm0 = xmm1[2,3,2,3]
paddd %xmm1, %xmm0
pshufd $85, %xmm0, %xmm1 ## xmm1 = xmm0[1,1,1,1]
paddd %xmm0, %xmm1
movd %xmm1, %edx
cmpl %ecx, %eax
je LBB57_212
## %bb.210: ## in Loop: Header=BB57_151 Depth=2
imull %r12d, %ecx
addl %r12d, %ecx
.p2align 4, 0x90
LBB57_211: ## Parent Loop BB57_147 Depth=1
## Parent Loop BB57_151 Depth=2
## => This Inner Loop Header: Depth=3
movl %edi, %eax
sarl %cl, %eax
addl %eax, %edx
addl %r12d, %ecx
cmpl $8, %ecx
jl LBB57_211
LBB57_212: ## in Loop: Header=BB57_151 Depth=2
cmpl $4, -1092(%rbp) ## 4-byte Folded Reload
jne LBB57_150
## %bb.213: ## in Loop: Header=BB57_151 Depth=2
addl $4, %ebx
movb %dl, (%r11,%r13)
movl %ebx, %r13d
jmp LBB57_150
LBB57_214: ## in Loop: Header=BB57_147 Depth=1
testl %eax, %eax
jle LBB57_238
## %bb.215: ## in Loop: Header=BB57_147 Depth=1
xorl %ebx, %ebx
movl %r13d, %r14d
movq -1088(%rbp), %rdx ## 8-byte Reload
jmp LBB57_217
.p2align 4, 0x90
LBB57_216: ## in Loop: Header=BB57_217 Depth=2
incl %ebx
movl %r13d, %r14d
cmpl (%r15), %ebx
jge LBB57_238
LBB57_217: ## Parent Loop BB57_147 Depth=1
## => This Inner Loop Header: Depth=2
movq (%r10), %rdi
movslq %r14d, %r13
testq %rdi, %rdi
je LBB57_221
## %bb.218: ## in Loop: Header=BB57_217 Depth=2
callq _fgetc
movq -1088(%rbp), %rdx ## 8-byte Reload
movq -1080(%rbp), %r10 ## 8-byte Reload
cmpl $-1, %eax
movq (%r10), %rdi
movl $0, %ecx
cmovel %ecx, %eax
movb %al, 2(%r13,%rdx)
testq %rdi, %rdi
je LBB57_225
## %bb.219: ## in Loop: Header=BB57_217 Depth=2
callq _fgetc
movq -1088(%rbp), %rdx ## 8-byte Reload
movq -1080(%rbp), %r10 ## 8-byte Reload
cmpl $-1, %eax
movq (%r10), %rdi
movl $0, %ecx
cmovel %ecx, %eax
movb %al, 1(%r13,%rdx)
testq %rdi, %rdi
movdqa -1136(%rbp), %xmm10 ## 16-byte Reload
je LBB57_229
## %bb.220: ## in Loop: Header=BB57_217 Depth=2
callq _fgetc
movdqa -1136(%rbp), %xmm10 ## 16-byte Reload
movq -1088(%rbp), %rdx ## 8-byte Reload
movq -1080(%rbp), %r10 ## 8-byte Reload
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
jmp LBB57_231
LBB57_221: ## in Loop: Header=BB57_217 Depth=2
movq 24(%r15), %rax
cmpq 32(%r15), %rax
jae LBB57_223
## %bb.222: ## in Loop: Header=BB57_217 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %eax
jmp LBB57_224
LBB57_223: ## in Loop: Header=BB57_217 Depth=2
xorl %eax, %eax
LBB57_224: ## in Loop: Header=BB57_217 Depth=2
movb %al, 2(%r13,%rdx)
LBB57_225: ## in Loop: Header=BB57_217 Depth=2
movq 24(%r15), %rax
cmpq 32(%r15), %rax
jae LBB57_227
## %bb.226: ## in Loop: Header=BB57_217 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %eax
jmp LBB57_228
LBB57_227: ## in Loop: Header=BB57_217 Depth=2
xorl %eax, %eax
LBB57_228: ## in Loop: Header=BB57_217 Depth=2
movdqa -1136(%rbp), %xmm10 ## 16-byte Reload
movb %al, 1(%r13,%rdx)
LBB57_229: ## in Loop: Header=BB57_217 Depth=2
movq 24(%r15), %rcx
xorl %eax, %eax
cmpq 32(%r15), %rcx
jae LBB57_231
## %bb.230: ## in Loop: Header=BB57_217 Depth=2
leaq 1(%rcx), %rax
movq %rax, 24(%r15)
movzbl (%rcx), %eax
LBB57_231: ## in Loop: Header=BB57_217 Depth=2
movb %al, (%rdx,%r13)
addq $3, %r13
movl $255, %eax
cmpb $0, -1176(%rbp) ## 1-byte Folded Reload
je LBB57_236
## %bb.232: ## in Loop: Header=BB57_217 Depth=2
movq (%r10), %rdi
testq %rdi, %rdi
je LBB57_234
## %bb.233: ## in Loop: Header=BB57_217 Depth=2
callq _fgetc
movdqa -1136(%rbp), %xmm10 ## 16-byte Reload
movq -1088(%rbp), %rdx ## 8-byte Reload
movq -1080(%rbp), %r10 ## 8-byte Reload
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
jmp LBB57_236
LBB57_234: ## in Loop: Header=BB57_217 Depth=2
movq 24(%r15), %rcx
xorl %eax, %eax
cmpq 32(%r15), %rcx
jae LBB57_236
## %bb.235: ## in Loop: Header=BB57_217 Depth=2
leaq 1(%rcx), %rax
movq %rax, 24(%r15)
movzbl (%rcx), %eax
.p2align 4, 0x90
LBB57_236: ## in Loop: Header=BB57_217 Depth=2
cmpl $4, -1092(%rbp) ## 4-byte Folded Reload
jne LBB57_216
## %bb.237: ## in Loop: Header=BB57_217 Depth=2
addl $4, %r14d
movb %al, (%rdx,%r13)
movl %r14d, %r13d
jmp LBB57_216
LBB57_238: ## in Loop: Header=BB57_147 Depth=1
movq (%r10), %rdi
testq %rdi, %rdi
je LBB57_145
## %bb.239: ## in Loop: Header=BB57_147 Depth=1
movq -1272(%rbp), %rsi ## 8-byte Reload
movl $1, %edx
callq _fseek
movdqa -1136(%rbp), %xmm10 ## 16-byte Reload
movq -1080(%rbp), %r10 ## 8-byte Reload
jmp LBB57_146
LBB57_240:
addq %rsi, 24(%r15)
LBB57_241:
cmpl $4, %r8d
jne LBB57_243
## %bb.242:
movl (%r15), %eax
leal 1(%rax), %ecx
shrl %ecx
jmp LBB57_245
LBB57_243:
cmpl $8, %r8d
jne LBB57_306
## %bb.244:
movl (%r15), %eax
movl %eax, %ecx
LBB57_245:
movl 4(%r15), %edx
testl %edx, %edx
jle LBB57_270
## %bb.246:
negl %ecx
andl $3, %ecx
movq %rcx, -1112(%rbp) ## 8-byte Spill
xorl %r10d, %r10d
xorl %r13d, %r13d
movl $0, -1136(%rbp) ## 4-byte Folded Spill
LBB57_247: ## =>This Loop Header: Depth=1
## Child Loop BB57_250 Depth 2
testl %eax, %eax
movl -1104(%rbp), %r8d ## 4-byte Reload
movl -1092(%rbp), %r9d ## 4-byte Reload
jle LBB57_265
## %bb.248: ## in Loop: Header=BB57_247 Depth=1
xorl %r14d, %r14d
movq -1088(%rbp), %rsi ## 8-byte Reload
jmp LBB57_250
.p2align 4, 0x90
LBB57_249: ## in Loop: Header=BB57_250 Depth=2
addl $2, %r14d
cmpl (%r15), %r14d
jge LBB57_265
LBB57_250: ## Parent Loop BB57_247 Depth=1
## => This Inner Loop Header: Depth=2
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB57_252
## %bb.251: ## in Loop: Header=BB57_250 Depth=2
callq _fgetc
xorl %r10d, %r10d
movl -1092(%rbp), %r9d ## 4-byte Reload
movq -1088(%rbp), %rsi ## 8-byte Reload
movl -1104(%rbp), %r8d ## 4-byte Reload
movl %eax, %ecx
cmpl $-1, %eax
cmovel %r10d, %ecx
jmp LBB57_254
LBB57_252: ## in Loop: Header=BB57_250 Depth=2
movq 24(%r15), %rax
xorl %ecx, %ecx
cmpq 32(%r15), %rax
jae LBB57_254
## %bb.253: ## in Loop: Header=BB57_250 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %ecx
.p2align 4, 0x90
LBB57_254: ## in Loop: Header=BB57_250 Depth=2
movl %ecx, %eax
andl $15, %eax
movl %ecx, %edx
sarl $4, %edx
cmpl $4, %r8d
cmovnel %ecx, %edx
cmovnel %r10d, %eax
movslq %r13d, %rcx
leaq 3(%rcx), %r12
movslq %edx, %rdx
movzbl -1072(%rbp,%rdx,4), %ebx
movb %bl, (%rsi,%rcx)
movzbl -1071(%rbp,%rdx,4), %ebx
movb %bl, 1(%rsi,%rcx)
movzbl -1070(%rbp,%rdx,4), %edx
movb %dl, 2(%rsi,%rcx)
cmpl $4, %r9d
jne LBB57_256
## %bb.255: ## in Loop: Header=BB57_250 Depth=2
addl $4, %r13d
movb $-1, (%rsi,%r12)
movl %r13d, %r12d
LBB57_256: ## in Loop: Header=BB57_250 Depth=2
leal 1(%r14), %ecx
cmpl (%r15), %ecx
movq -1080(%rbp), %rbx ## 8-byte Reload
je LBB57_264
## %bb.257: ## in Loop: Header=BB57_250 Depth=2
cmpl $8, %r8d
jne LBB57_262
## %bb.258: ## in Loop: Header=BB57_250 Depth=2
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB57_260
## %bb.259: ## in Loop: Header=BB57_250 Depth=2
callq _fgetc
xorl %r10d, %r10d
movl -1092(%rbp), %r9d ## 4-byte Reload
movq -1088(%rbp), %rsi ## 8-byte Reload
movl -1104(%rbp), %r8d ## 4-byte Reload
movq -1080(%rbp), %rbx ## 8-byte Reload
cmpl $-1, %eax
cmovel %r10d, %eax
jmp LBB57_262
LBB57_260: ## in Loop: Header=BB57_250 Depth=2
movq 24(%r15), %rcx
xorl %eax, %eax
cmpq 32(%r15), %rcx
jae LBB57_262
## %bb.261: ## in Loop: Header=BB57_250 Depth=2
leaq 1(%rcx), %rax
movq %rax, 24(%r15)
movzbl (%rcx), %eax
.p2align 4, 0x90
LBB57_262: ## in Loop: Header=BB57_250 Depth=2
movslq %r12d, %rcx
leaq 3(%rcx), %r13
cltq
movzbl -1072(%rbp,%rax,4), %edx
movb %dl, (%rsi,%rcx)
movzbl -1071(%rbp,%rax,4), %edx
movb %dl, 1(%rsi,%rcx)
movzbl -1070(%rbp,%rax,4), %eax
movb %al, 2(%rsi,%rcx)
cmpl $4, %r9d
jne LBB57_249
## %bb.263: ## in Loop: Header=BB57_250 Depth=2
addl $4, %r12d
movb $-1, (%rsi,%r13)
movl %r12d, %r13d
jmp LBB57_249
LBB57_264: ## in Loop: Header=BB57_247 Depth=1
movl %r12d, %r13d
LBB57_265: ## in Loop: Header=BB57_247 Depth=1
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB57_267
## %bb.266: ## in Loop: Header=BB57_247 Depth=1
movq -1112(%rbp), %rsi ## 8-byte Reload
movl $1, %edx
callq _fseek
xorl %r10d, %r10d
movq -1080(%rbp), %rbx ## 8-byte Reload
jmp LBB57_268
LBB57_267: ## in Loop: Header=BB57_247 Depth=1
movq -1112(%rbp), %rax ## 8-byte Reload
addq %rax, 24(%r15)
LBB57_268: ## in Loop: Header=BB57_247 Depth=1
movl -1136(%rbp), %eax ## 4-byte Reload
incl %eax
movl 4(%r15), %edx
cmpl %edx, %eax
jge LBB57_270
## %bb.269: ## in Loop: Header=BB57_247 Depth=1
movl %eax, -1136(%rbp) ## 4-byte Spill
movl (%r15), %eax
jmp LBB57_247
LBB57_270:
movq %rdx, -1112(%rbp) ## 8-byte Spill
cmpl $0, -1156(%rbp) ## 4-byte Folded Reload
jle LBB57_297
## %bb.271:
cmpl $2, -1112(%rbp) ## 4-byte Folded Reload
jl LBB57_297
## %bb.272:
movq -1112(%rbp), %rdx ## 8-byte Reload
movl %edx, %ebx
shrl %ebx
movl (%r15), %ecx
imull -1092(%rbp), %ecx ## 4-byte Folded Reload
movq -1088(%rbp), %rax ## 8-byte Reload
leaq (%rax,%rcx), %rsi
movq %rsi, -1200(%rbp) ## 8-byte Spill
movl %ecx, %esi
andl $-32, %esi
movq %rsi, -1120(%rbp) ## 8-byte Spill
addq $-32, %rsi
movq %rsi, -1192(%rbp) ## 8-byte Spill
movq %rsi, %rdi
shrq $5, %rdi
incq %rdi
movq %rdi, %rsi
movq %rdi, -1152(%rbp) ## 8-byte Spill
andq $-2, %rdi
movq %rdi, -1144(%rbp) ## 8-byte Spill
movl %ecx, %r12d
andl $-8, %r12d
movq %rcx, %rdi
negq %rdi
movq %rdi, -1208(%rbp) ## 8-byte Spill
leaq 48(%rax), %rdi
movq %rdi, -1184(%rbp) ## 8-byte Spill
leal -1(%rdx), %r14d
imull %ecx, %r14d
incq %rax
movq %rax, -1104(%rbp) ## 8-byte Spill
xorl %r9d, %r9d
xorl %r10d, %r10d
movq %rbx, -1080(%rbp) ## 8-byte Spill
jmp LBB57_274
LBB57_273: ## in Loop: Header=BB57_274 Depth=1
incq %r10
addl %ecx, %r9d
subl %ecx, %r14d
cmpq %rbx, %r10
je LBB57_297
LBB57_274: ## =>This Loop Header: Depth=1
## Child Loop BB57_288 Depth 2
## Child Loop BB57_295 Depth 2
## Child Loop BB57_283 Depth 2
testl %ecx, %ecx
jle LBB57_273
## %bb.275: ## in Loop: Header=BB57_274 Depth=1
movq %r12, %rbx
movl %r14d, %r11d
movl %r9d, %r13d
movl %ecx, %eax
imull %r10d, %eax
movq -1088(%rbp), %rsi ## 8-byte Reload
leaq (%rsi,%rax), %r12
movl %r10d, %edx
notl %edx
addl -1112(%rbp), %edx ## 4-byte Folded Reload
movl %ecx, %r8d
imull %edx, %r8d
addq %rsi, %r8
cmpl $8, %ecx
movq %r12, -1136(%rbp) ## 8-byte Spill
jb LBB57_278
## %bb.276: ## in Loop: Header=BB57_274 Depth=1
imull %ecx, %edx
movq -1200(%rbp), %rsi ## 8-byte Reload
leaq (%rsi,%rdx), %rdi
cmpq %rdi, %r12
jae LBB57_284
## %bb.277: ## in Loop: Header=BB57_274 Depth=1
addq %rsi, %rax
addq -1088(%rbp), %rdx ## 8-byte Folded Reload
cmpq %rax, %rdx
jae LBB57_284
LBB57_278: ## in Loop: Header=BB57_274 Depth=1
xorl %eax, %eax
movq %rbx, %r12
movq -1080(%rbp), %rbx ## 8-byte Reload
LBB57_279: ## in Loop: Header=BB57_274 Depth=1
movq %rax, %rdi
testb $1, %cl
je LBB57_281
## %bb.280: ## in Loop: Header=BB57_274 Depth=1
movq -1136(%rbp), %rdi ## 8-byte Reload
movb (%rdi,%rax), %dl
movb (%r8,%rax), %bl
movb %bl, (%rdi,%rax)
movq -1080(%rbp), %rbx ## 8-byte Reload
movb %dl, (%r8,%rax)
movq %rax, %rdi
orq $1, %rdi
LBB57_281: ## in Loop: Header=BB57_274 Depth=1
notq %rax
cmpq -1208(%rbp), %rax ## 8-byte Folded Reload
je LBB57_273
## %bb.282: ## in Loop: Header=BB57_274 Depth=1
movq -1104(%rbp), %rax ## 8-byte Reload
addq %rax, %r11
addq %rax, %r13
.p2align 4, 0x90
LBB57_283: ## Parent Loop BB57_274 Depth=1
## => This Inner Loop Header: Depth=2
movzbl -1(%r13,%rdi), %eax
movzbl -1(%r11,%rdi), %edx
movb %dl, -1(%r13,%rdi)
movb %al, -1(%r11,%rdi)
movzbl (%r13,%rdi), %eax
movzbl (%r11,%rdi), %edx
movb %dl, (%r13,%rdi)
movb %al, (%r11,%rdi)
addq $2, %rdi
cmpq %rdi, %rcx
jne LBB57_283
jmp LBB57_273
LBB57_284: ## in Loop: Header=BB57_274 Depth=1
cmpl $32, %ecx
jae LBB57_286
## %bb.285: ## in Loop: Header=BB57_274 Depth=1
xorl %edx, %edx
movq %rbx, %r12
jmp LBB57_294
LBB57_286: ## in Loop: Header=BB57_274 Depth=1
cmpq $0, -1192(%rbp) ## 8-byte Folded Reload
je LBB57_289
## %bb.287: ## in Loop: Header=BB57_274 Depth=1
movq -1184(%rbp), %rax ## 8-byte Reload
leaq (%rax,%r13), %rdi
addq %r11, %rax
movq -1144(%rbp), %rdx ## 8-byte Reload
xorl %r12d, %r12d
LBB57_288: ## Parent Loop BB57_274 Depth=1
## => This Inner Loop Header: Depth=2
movups -48(%rdi,%r12), %xmm0
movups -32(%rdi,%r12), %xmm1
movups -48(%rax,%r12), %xmm2
movups -32(%rax,%r12), %xmm3
movups %xmm2, -48(%rdi,%r12)
movups %xmm3, -32(%rdi,%r12)
movups %xmm0, -48(%rax,%r12)
movups %xmm1, -32(%rax,%r12)
movdqu -16(%rdi,%r12), %xmm0
movdqu (%rdi,%r12), %xmm1
movdqu -16(%rax,%r12), %xmm2
movdqu (%rax,%r12), %xmm3
movdqu %xmm2, -16(%rdi,%r12)
movdqu %xmm3, (%rdi,%r12)
movdqu %xmm0, -16(%rax,%r12)
movdqu %xmm1, (%rax,%r12)
addq $64, %r12
addq $-2, %rdx
jne LBB57_288
jmp LBB57_290
LBB57_289: ## in Loop: Header=BB57_274 Depth=1
xorl %r12d, %r12d
LBB57_290: ## in Loop: Header=BB57_274 Depth=1
testb $1, -1152(%rbp) ## 1-byte Folded Reload
je LBB57_292
## %bb.291: ## in Loop: Header=BB57_274 Depth=1
movq -1136(%rbp), %rax ## 8-byte Reload
movdqu (%rax,%r12), %xmm0
movdqu 16(%rax,%r12), %xmm1
movdqu (%r8,%r12), %xmm2
movdqu 16(%r8,%r12), %xmm3
movdqu %xmm2, (%rax,%r12)
movdqu %xmm3, 16(%rax,%r12)
movdqu %xmm0, (%r8,%r12)
movdqu %xmm1, 16(%r8,%r12)
LBB57_292: ## in Loop: Header=BB57_274 Depth=1
cmpq %rcx, -1120(%rbp) ## 8-byte Folded Reload
movq %rbx, %r12
movq -1080(%rbp), %rbx ## 8-byte Reload
je LBB57_273
## %bb.293: ## in Loop: Header=BB57_274 Depth=1
movq -1120(%rbp), %rax ## 8-byte Reload
movq %rax, %rdx
testb $24, %cl
je LBB57_279
LBB57_294: ## in Loop: Header=BB57_274 Depth=1
movq -1088(%rbp), %rsi ## 8-byte Reload
leaq (%rsi,%r13), %rax
leaq (%rsi,%r11), %rdi
.p2align 4, 0x90
LBB57_295: ## Parent Loop BB57_274 Depth=1
## => This Inner Loop Header: Depth=2
movq (%rax,%rdx), %rbx
movq (%rdi,%rdx), %rsi
movq %rsi, (%rax,%rdx)
movq %rbx, (%rdi,%rdx)
addq $8, %rdx
cmpq %rdx, %r12
jne LBB57_295
## %bb.296: ## in Loop: Header=BB57_274 Depth=1
movq %r12, %rax
cmpq %rcx, %r12
movq -1080(%rbp), %rbx ## 8-byte Reload
je LBB57_273
jmp LBB57_279
LBB57_297:
movl -1212(%rbp), %edx ## 4-byte Reload
testl %edx, %edx
je LBB57_300
## %bb.298:
movl -1092(%rbp), %esi ## 4-byte Reload
cmpl %edx, %esi
movq -1288(%rbp), %r14 ## 8-byte Reload
movq -1280(%rbp), %rbx ## 8-byte Reload
movq -1088(%rbp), %rdi ## 8-byte Reload
je LBB57_301
## %bb.299:
movl (%r15), %ecx
movq -1112(%rbp), %r8 ## 8-byte Reload
## kill: def $r8d killed $r8d killed $r8
callq _convert_format
movq %rax, %rdi
testq %rax, %rax
jne LBB57_301
jmp LBB57_17
LBB57_300:
movq -1288(%rbp), %r14 ## 8-byte Reload
movq -1280(%rbp), %rbx ## 8-byte Reload
movq -1088(%rbp), %rdi ## 8-byte Reload
LBB57_301:
movl (%r15), %eax
movl %eax, (%rbx)
movl 4(%r15), %eax
movl %eax, (%r14)
movq -1264(%rbp), %rcx ## 8-byte Reload
testq %rcx, %rcx
je LBB57_18
## %bb.302:
movl -1092(%rbp), %eax ## 4-byte Reload
movl %eax, (%rcx)
jmp LBB57_18
LBB57_303:
movq %r15, %rdi
callq _get32le
movl %eax, %ebx
movq %r15, %rdi
callq _get32le
movl %eax, %r14d
movq %r15, %rdi
callq _get32le
movl %ebx, -1120(%rbp) ## 4-byte Spill
cmpl %r14d, %ebx
jne LBB57_305
## %bb.304:
cmpl %eax, %r14d
je LBB57_17
LBB57_305:
movl %eax, -1152(%rbp) ## 4-byte Spill
xorl %ebx, %ebx
jmp LBB57_94
LBB57_306:
movq %rdx, %rdi
callq _free
leaq L_.str.81(%rip), %rax
jmp LBB57_16
LBB57_307:
callq _bmp_load.cold.1
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB57_309
## %bb.308:
xorl %eax, %eax
jmp LBB57_20
LBB57_309:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function tga_test
_tga_test: ## @tga_test
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rbx
movq 16(%rdi), %rdi
testq %rdi, %rdi
je LBB58_1
## %bb.5:
callq _fgetc
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_3
## %bb.6:
callq _fgetc
xorl %ecx, %ecx
cmpl $-1, %eax
cmovnel %eax, %ecx
jmp LBB58_7
LBB58_1:
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB58_3
## %bb.2:
incq %rax
movq %rax, 24(%rbx)
LBB58_3:
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB58_10
## %bb.4:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %ecx
LBB58_7:
xorl %r14d, %r14d
testb $-2, %cl
jne LBB58_66
## %bb.8:
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_10
## %bb.9:
callq _fgetc
xorl %ecx, %ecx
cmpl $-1, %eax
cmovnel %eax, %ecx
jmp LBB58_12
LBB58_10:
movq 24(%rbx), %rax
xorl %r14d, %r14d
cmpq 32(%rbx), %rax
jae LBB58_66
## %bb.11:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %ecx
LBB58_12:
xorl %r14d, %r14d
cmpb $11, %cl
ja LBB58_66
## %bb.13:
movzbl %cl, %eax
movl $3598, %ecx ## imm = 0xE0E
btq %rax, %rcx
jae LBB58_66
## %bb.14:
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_15
## %bb.17:
callq _fgetc
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_18
## %bb.21:
callq _fgetc
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_22
## %bb.24:
callq _fgetc
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_25
## %bb.28:
callq _fgetc
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_29
## %bb.31:
callq _fgetc
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_32
## %bb.34:
callq _fgetc
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_35
## %bb.38:
callq _fgetc
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_39
## %bb.41:
callq _fgetc
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_42
## %bb.45:
callq _fgetc
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_46
## %bb.48:
callq _fgetc
movl %eax, %r15d
xorl %r12d, %r12d
cmpl $-1, %eax
cmovel %r12d, %r15d
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_49
## %bb.67:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r12d
jmp LBB58_52
LBB58_15:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
cmpq %rcx, %rax
jae LBB58_19
## %bb.16:
incq %rax
movq %rax, 24(%rbx)
LBB58_19:
cmpq %rcx, %rax
jae LBB58_22
LBB58_20:
incq %rax
movq %rax, 24(%rbx)
LBB58_22:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
cmpq %rcx, %rax
jae LBB58_26
## %bb.23:
incq %rax
movq %rax, 24(%rbx)
LBB58_26:
cmpq %rcx, %rax
jae LBB58_29
LBB58_27:
incq %rax
movq %rax, 24(%rbx)
LBB58_29:
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB58_32
## %bb.30:
incq %rax
movq %rax, 24(%rbx)
LBB58_32:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
cmpq %rcx, %rax
jae LBB58_36
## %bb.33:
incq %rax
movq %rax, 24(%rbx)
LBB58_36:
cmpq %rcx, %rax
jae LBB58_39
LBB58_37:
incq %rax
movq %rax, 24(%rbx)
LBB58_39:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
cmpq %rcx, %rax
jae LBB58_43
## %bb.40:
incq %rax
movq %rax, 24(%rbx)
LBB58_43:
cmpq %rcx, %rax
jae LBB58_46
LBB58_44:
incq %rax
movq %rax, 24(%rbx)
LBB58_46:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
xorl %r15d, %r15d
cmpq %rcx, %rax
jae LBB58_50
## %bb.47:
leaq 1(%rax), %rdx
movq %rdx, 24(%rbx)
movzbl (%rax), %r15d
movq %rdx, %rax
LBB58_50:
xorl %r12d, %r12d
cmpq %rcx, %rax
jae LBB58_52
## %bb.51:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %r12d
LBB58_52:
shll $8, %r15d
addl %r12d, %r15d
jle LBB58_66
## %bb.53:
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_54
## %bb.56:
callq _fgetc
movl %eax, %r15d
xorl %r12d, %r12d
cmpl $-1, %eax
cmovel %r12d, %r15d
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_57
## %bb.68:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r12d
jmp LBB58_60
LBB58_18:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
cmpq %rcx, %rax
jb LBB58_20
jmp LBB58_22
LBB58_54:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
xorl %r15d, %r15d
cmpq %rcx, %rax
jae LBB58_58
## %bb.55:
leaq 1(%rax), %rdx
movq %rdx, 24(%rbx)
movzbl (%rax), %r15d
movq %rdx, %rax
jmp LBB58_58
LBB58_57:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
LBB58_58:
xorl %r12d, %r12d
cmpq %rcx, %rax
jae LBB58_60
## %bb.59:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %r12d
LBB58_60:
shll $8, %r15d
addl %r12d, %r15d
jle LBB58_66
## %bb.61:
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB58_63
## %bb.62:
callq _fgetc
xorl %ecx, %ecx
cmpl $-1, %eax
cmovnel %eax, %ecx
jmp LBB58_65
LBB58_63:
movq 24(%rbx), %rax
xorl %ecx, %ecx
cmpq 32(%rbx), %rax
jae LBB58_65
## %bb.64:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %ecx
LBB58_65:
addl $-8, %ecx
xorl %r14d, %r14d
testl $-25, %ecx
sete %r14b
LBB58_66:
movl %r14d, %eax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
LBB58_25:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
cmpq %rcx, %rax
jb LBB58_27
jmp LBB58_29
LBB58_35:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
cmpq %rcx, %rax
jb LBB58_37
jmp LBB58_39
LBB58_42:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
cmpq %rcx, %rax
jb LBB58_44
jmp LBB58_46
LBB58_49:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
jmp LBB58_50
.cfi_endproc
## -- End function
.globl _stbi_tga_load ## -- Begin function stbi_tga_load
.p2align 4, 0x90
_stbi_tga_load: ## @stbi_tga_load
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r14d
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, %r13
leaq L_.str(%rip), %rsi
callq _fopen
testq %rax, %rax
je LBB59_1
## %bb.2:
movq %rax, %rbx
movq %rax, -64(%rbp)
leaq -80(%rbp), %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r15, %rcx
movl %r14d, %r8d
callq _tga_load
movq %rax, %r14
movq %rbx, %rdi
callq _fclose
jmp LBB59_3
LBB59_1:
xorl %r14d, %r14d
LBB59_3:
movq %r14, %rax
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function tga_load
_tga_load: ## @tga_load
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $152, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
## kill: def $r8d killed $r8d def $r8
movq %r8, -152(%rbp) ## 8-byte Spill
movq %rcx, %r8
movq %rdi, %r13
leaq 16(%rdi), %rax
movq %rax, -72(%rbp) ## 8-byte Spill
movq 16(%rdi), %rdi
testq %rdi, %rdi
je LBB60_19
## %bb.1:
movq %rsi, -56(%rbp) ## 8-byte Spill
movq %rdx, -48(%rbp) ## 8-byte Spill
movq %r8, -64(%rbp) ## 8-byte Spill
callq _fgetc
xorl %ebx, %ebx
cmpl $-1, %eax
movq -72(%rbp), %rcx ## 8-byte Reload
movq (%rcx), %rdi
movzbl %al, %r14d
cmovel %ebx, %r14d
testq %rdi, %rdi
je LBB60_21
## %bb.2:
callq _fgetc
cmpl $-1, %eax
movq -72(%rbp), %rcx ## 8-byte Reload
movq (%rcx), %rdi
cmovnel %eax, %ebx
testq %rdi, %rdi
je LBB60_25
## %bb.3:
callq _fgetc
movl %eax, %ecx
xorl %r15d, %r15d
cmpl $-1, %eax
cmovel %r15d, %ecx
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
movl %ecx, -124(%rbp) ## 4-byte Spill
movzbl %cl, %r9d
testq %rdi, %rdi
je LBB60_77
## %bb.4:
movq %r9, -80(%rbp) ## 8-byte Spill
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r15d
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
movl %ebx, -144(%rbp) ## 4-byte Spill
movl %r14d, -120(%rbp) ## 4-byte Spill
je LBB60_155
## %bb.5:
callq _fgetc
movl %eax, %r10d
shll $8, %r10d
xorl %r14d, %r14d
cmpl $-1, %eax
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
cmovel %r14d, %r10d
addl %r15d, %r10d
testq %rdi, %rdi
je LBB60_156
## %bb.6:
movl %r10d, -104(%rbp) ## 4-byte Spill
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r14d
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB60_157
## %bb.7:
callq _fgetc
movl %eax, %r15d
shll $8, %r15d
xorl %ebx, %ebx
cmpl $-1, %eax
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
cmovel %ebx, %r15d
addl %r14d, %r15d
testq %rdi, %rdi
je LBB60_158
## %bb.8:
callq _fgetc
cmpl $-1, %eax
movq -72(%rbp), %rcx ## 8-byte Reload
movq (%rcx), %rdi
movzbl %al, %r11d
cmovel %ebx, %r11d
testq %rdi, %rdi
je LBB60_159
## %bb.9:
movq %r11, -96(%rbp) ## 8-byte Spill
callq _fgetc
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB60_160
## %bb.10:
callq _fgetc
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB60_161
## %bb.11:
callq _fgetc
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB60_162
## %bb.12:
callq _fgetc
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB60_163
## %bb.13:
callq _fgetc
movl %eax, %r12d
xorl %r14d, %r14d
cmpl $-1, %eax
cmovel %r14d, %r12d
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB60_164
## %bb.14:
callq _fgetc
movl %eax, %ebx
shll $8, %ebx
cmpl $-1, %eax
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
cmovel %r14d, %ebx
addl %r12d, %ebx
testq %rdi, %rdi
je LBB60_165
## %bb.15:
callq _fgetc
movl %eax, %r12d
xorl %r14d, %r14d
cmpl $-1, %eax
cmovel %r14d, %r12d
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
testq %rdi, %rdi
je LBB60_166
## %bb.16:
callq _fgetc
movl %eax, %ecx
shll $8, %ecx
cmpl $-1, %eax
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
cmovel %r14d, %ecx
movl %ecx, %r14d
addl %r12d, %r14d
testq %rdi, %rdi
je LBB60_167
## %bb.17:
callq _fgetc
movl %eax, %r12d
xorl %eax, %eax
cmpl $-1, %r12d
cmovel %eax, %r12d
movq -72(%rbp), %rax ## 8-byte Reload
movq (%rax), %rdi
movzbl %r12b, %eax
movq %rax, -136(%rbp) ## 8-byte Spill
testq %rdi, %rdi
je LBB60_168
## %bb.18:
callq _fgetc
cmpl $-1, %eax
movl $0, %edi
cmovnel %eax, %edi
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
movq -96(%rbp), %r11 ## 8-byte Reload
jmp LBB60_64
LBB60_19:
movq 24(%r13), %rax
xorl %r14d, %r14d
cmpq 32(%r13), %rax
jae LBB60_22
## %bb.20:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %r14d
jmp LBB60_22
LBB60_21:
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
LBB60_22:
movq 24(%r13), %rax
cmpq 32(%r13), %rax
jae LBB60_24
## %bb.23:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movb (%rax), %bl
jmp LBB60_26
LBB60_24:
xorl %ebx, %ebx
jmp LBB60_26
LBB60_25:
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
LBB60_26:
movq 24(%r13), %rax
xorl %r9d, %r9d
cmpq 32(%r13), %rax
jae LBB60_28
## %bb.27:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %r9d
movl %r9d, %eax
movl %r9d, -124(%rbp) ## 4-byte Spill
jmp LBB60_29
LBB60_28:
movl $0, -124(%rbp) ## 4-byte Folded Spill
LBB60_29:
movq 24(%r13), %rax
movq 32(%r13), %rcx
xorl %r15d, %r15d
cmpq %rcx, %rax
movl %ebx, -144(%rbp) ## 4-byte Spill
movl %r14d, -120(%rbp) ## 4-byte Spill
jae LBB60_31
## %bb.30:
leaq 1(%rax), %rdi
movq %rdi, 24(%r13)
movzbl (%rax), %r15d
movq %rdi, %rax
LBB60_31:
xorl %r10d, %r10d
cmpq %rcx, %rax
jae LBB60_33
## %bb.32:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %r10d
LBB60_33:
shll $8, %r10d
addl %r15d, %r10d
LBB60_34:
movq 24(%r13), %rax
movq 32(%r13), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB60_36
## %bb.35:
leaq 1(%rax), %rdi
movq %rdi, 24(%r13)
movzbl (%rax), %r14d
movq %rdi, %rax
LBB60_36:
xorl %r15d, %r15d
cmpq %rcx, %rax
jae LBB60_38
## %bb.37:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %r15d
LBB60_38:
shll $8, %r15d
addl %r14d, %r15d
LBB60_39:
movq 24(%r13), %rax
xorl %r11d, %r11d
cmpq 32(%r13), %rax
jae LBB60_41
## %bb.40:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %r11d
LBB60_41:
movq 24(%r13), %rax
movq 32(%r13), %rcx
cmpq %rcx, %rax
jae LBB60_43
## %bb.42:
incq %rax
movq %rax, 24(%r13)
LBB60_43:
cmpq %rcx, %rax
jae LBB60_45
LBB60_44:
incq %rax
movq %rax, 24(%r13)
LBB60_45:
movq 24(%r13), %rax
movq 32(%r13), %rcx
cmpq %rcx, %rax
jae LBB60_47
## %bb.46:
incq %rax
movq %rax, 24(%r13)
LBB60_47:
cmpq %rcx, %rax
jae LBB60_49
LBB60_48:
incq %rax
movq %rax, 24(%r13)
LBB60_49:
movq 24(%r13), %rcx
movq 32(%r13), %rax
xorl %r12d, %r12d
cmpq %rax, %rcx
jae LBB60_51
## %bb.50:
leaq 1(%rcx), %rdi
movq %rdi, 24(%r13)
movzbl (%rcx), %r12d
movq %rdi, %rcx
LBB60_51:
xorl %ebx, %ebx
cmpq %rax, %rcx
jae LBB60_53
## %bb.52:
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %ebx
LBB60_53:
shll $8, %ebx
addl %r12d, %ebx
LBB60_54:
movq 24(%r13), %rcx
movq 32(%r13), %rax
xorl %r12d, %r12d
cmpq %rax, %rcx
jae LBB60_56
## %bb.55:
leaq 1(%rcx), %rdi
movq %rdi, 24(%r13)
movzbl (%rcx), %r12d
movq %rdi, %rcx
LBB60_56:
xorl %r14d, %r14d
cmpq %rax, %rcx
jae LBB60_58
## %bb.57:
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %r14d
shll $8, %r14d
LBB60_58:
addl %r12d, %r14d
LBB60_59:
movq 24(%r13), %rax
xorl %ecx, %ecx
cmpq 32(%r13), %rax
jae LBB60_61
## %bb.60:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %ecx
LBB60_61:
movq %rcx, -136(%rbp) ## 8-byte Spill
movq %rcx, %rax
movl %eax, %r12d
LBB60_62:
movq 24(%r13), %rax
xorl %edi, %edi
cmpq 32(%r13), %rax
jae LBB60_64
## %bb.63:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %edi
LBB60_64:
leal -8(%r9), %ecx
testb $-8, -124(%rbp) ## 1-byte Folded Reload
cmovel %r9d, %ecx
xorl %eax, %eax
testl %ebx, %ebx
jle LBB60_154
## %bb.65:
testl %r14d, %r14d
jle LBB60_154
## %bb.66:
testl %ecx, %ecx
jle LBB60_154
## %bb.67:
cmpl $3, %ecx
jg LBB60_154
## %bb.68:
addb $-8, %r12b
rolb $5, %r12b
cmpb $3, %r12b
ja LBB60_71
## %bb.69:
movl %edi, -176(%rbp) ## 4-byte Spill
movl %r10d, %r12d
movl %r15d, -156(%rbp) ## 4-byte Spill
cmpb $0, -144(%rbp) ## 1-byte Folded Reload
movq %r11, -96(%rbp) ## 8-byte Spill
movq -136(%rbp), %rcx ## 8-byte Reload
cmovnel %r11d, %ecx
movl %ebx, (%rsi)
movl %r14d, (%rdx)
movq -152(%rbp), %r15 ## 8-byte Reload
leal -5(%r15), %eax
movq %rcx, -136(%rbp) ## 8-byte Spill
## kill: def $ecx killed $ecx killed $rcx
shrl $3, %ecx
cmpl $-4, %eax
cmovbl %ecx, %r15d
movl %ecx, -184(%rbp) ## 4-byte Spill
movl %ecx, (%r8)
movl %r14d, -180(%rbp) ## 4-byte Spill
movl %ebx, -172(%rbp) ## 4-byte Spill
imull %ebx, %r14d
movq %r15, -152(%rbp) ## 8-byte Spill
movl %r15d, %eax
imull %r14d, %eax
movslq %eax, %rdi
callq _malloc
movq -72(%rbp), %rbx ## 8-byte Reload
movq %rax, -64(%rbp) ## 8-byte Spill
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB60_72
## %bb.70:
movl -120(%rbp), %esi ## 4-byte Reload
movl $1, %edx
callq _fseek
movq -72(%rbp), %rbx ## 8-byte Reload
jmp LBB60_73
LBB60_71:
xorl %eax, %eax
jmp LBB60_154
LBB60_72:
movl -120(%rbp), %eax ## 4-byte Reload
addq %rax, 24(%r13)
LBB60_73:
cmpb $0, -144(%rbp) ## 1-byte Folded Reload
movq %r13, -120(%rbp) ## 8-byte Spill
je LBB60_76
## %bb.74:
movq (%rbx), %rdi
movslq %r12d, %rsi
testq %rdi, %rdi
je LBB60_78
## %bb.75:
movl $1, %edx
callq _fseek
movq (%rbx), %r12
jmp LBB60_79
LBB60_76:
xorl %eax, %eax
movq %rax, -168(%rbp) ## 8-byte Spill
jmp LBB60_83
LBB60_77:
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
jmp LBB60_29
LBB60_78:
addq %rsi, 24(%r13)
xorl %r12d, %r12d
LBB60_79:
movq -96(%rbp), %rcx ## 8-byte Reload
imull -156(%rbp), %ecx ## 4-byte Folded Reload
leal 7(%rcx), %eax
testl %ecx, %ecx
cmovnsl %ecx, %eax
sarl $3, %eax
movslq %eax, %rbx
movq %rbx, %rdi
callq _malloc
testq %r12, %r12
movq %rax, -168(%rbp) ## 8-byte Spill
je LBB60_81
## %bb.80:
movl $1, %esi
movq %rax, %rdi
movq %rbx, %rdx
movq %r12, %rcx
callq _fread
jmp LBB60_82
LBB60_81:
movq 24(%r13), %r12
movq %rax, %rdi
movq %r12, %rsi
movq %rbx, %rdx
callq _memcpy
addq %rbx, %r12
movq %r12, 24(%r13)
LBB60_82:
movq -72(%rbp), %rbx ## 8-byte Reload
LBB60_83:
testl %r14d, %r14d
movq -64(%rbp), %rdi ## 8-byte Reload
jle LBB60_125
## %bb.84:
movq -136(%rbp), %rax ## 8-byte Reload
leal -8(%rax), %ecx
roll $29, %ecx
movl %ecx, -188(%rbp) ## 4-byte Spill
leal -1(%rax), %r15d
shrl $3, %r15d
incl %r15d
movl %r14d, %eax
movq %rax, -104(%rbp) ## 8-byte Spill
movb $1, %al
xorl %r13d, %r13d
movl $0, -96(%rbp) ## 4-byte Folded Spill
xorl %r12d, %r12d
## implicit-def: $cl
## kill: killed $cl
## implicit-def: $cl
## kill: killed $cl
## implicit-def: $r14b
## implicit-def: $cl
## kill: killed $cl
jmp LBB60_89
.p2align 4, 0x90
LBB60_85: ## in Loop: Header=BB60_89 Depth=1
movq %r13, %rcx
leal (,%r13,4), %eax
movb -48(%rbp), %dl ## 1-byte Reload
movb %dl, (%rdi,%rax)
movb -56(%rbp), %dl ## 1-byte Reload
movb %dl, 1(%rdi,%rax)
movb %r14b, 2(%rdi,%rax)
leal 3(,%r13,4), %ecx
LBB60_86: ## in Loop: Header=BB60_89 Depth=1
movb -80(%rbp), %al ## 1-byte Reload
## kill: def $al killed $al def $eax
LBB60_87: ## in Loop: Header=BB60_89 Depth=1
movb %al, (%rdi,%rcx)
LBB60_88: ## in Loop: Header=BB60_89 Depth=1
decl %r12d
movq %r13, %rcx
incq %rcx
xorl %eax, %eax
movq %rcx, %r13
cmpq -104(%rbp), %rcx ## 8-byte Folded Reload
je LBB60_125
LBB60_89: ## =>This Loop Header: Depth=1
## Child Loop BB60_105 Depth 2
testb $-8, -124(%rbp) ## 1-byte Folded Reload
je LBB60_98
## %bb.90: ## in Loop: Header=BB60_89 Depth=1
testl %r12d, %r12d
je LBB60_93
## %bb.91: ## in Loop: Header=BB60_89 Depth=1
xorl %ecx, %ecx
cmpl $0, -96(%rbp) ## 4-byte Folded Reload
setne %sil
sete %dl
orb %al, %dl
movl $1, -96(%rbp) ## 4-byte Folded Spill
testb $1, %dl
je LBB60_120
## %bb.92: ## in Loop: Header=BB60_89 Depth=1
movb %sil, %cl
movl %ecx, -96(%rbp) ## 4-byte Spill
jmp LBB60_98
.p2align 4, 0x90
LBB60_93: ## in Loop: Header=BB60_89 Depth=1
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB60_95
## %bb.94: ## in Loop: Header=BB60_89 Depth=1
callq _fgetc
movq -72(%rbp), %rbx ## 8-byte Reload
movl %eax, %edx
cmpl $-1, %eax
movl $0, %eax
cmovel %eax, %edx
jmp LBB60_97
LBB60_95: ## in Loop: Header=BB60_89 Depth=1
movq -120(%rbp), %rcx ## 8-byte Reload
movq 24(%rcx), %rax
xorl %edx, %edx
cmpq 32(%rcx), %rax
jae LBB60_97
## %bb.96: ## in Loop: Header=BB60_89 Depth=1
leaq 1(%rax), %rcx
movq -120(%rbp), %rdx ## 8-byte Reload
movq %rcx, 24(%rdx)
movzbl (%rax), %edx
.p2align 4, 0x90
LBB60_97: ## in Loop: Header=BB60_89 Depth=1
movl %edx, %r12d
andl $127, %r12d
incl %r12d
shrl $7, %edx
andl $1, %edx
movl %edx, -96(%rbp) ## 4-byte Spill
LBB60_98: ## in Loop: Header=BB60_89 Depth=1
cmpb $0, -144(%rbp) ## 1-byte Folded Reload
movb %r14b, -105(%rbp) ## 1-byte Spill
je LBB60_101
## %bb.99: ## in Loop: Header=BB60_89 Depth=1
movq (%rbx), %rdi
testq %rdi, %rdi
je LBB60_108
## %bb.100: ## in Loop: Header=BB60_89 Depth=1
callq _fgetc
movq -72(%rbp), %rbx ## 8-byte Reload
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
jmp LBB60_110
.p2align 4, 0x90
LBB60_101: ## in Loop: Header=BB60_89 Depth=1
cmpl $0, -136(%rbp) ## 4-byte Folded Reload
je LBB60_112
## %bb.102: ## in Loop: Header=BB60_89 Depth=1
xorl %r14d, %r14d
jmp LBB60_105
.p2align 4, 0x90
LBB60_103: ## in Loop: Header=BB60_105 Depth=2
callq _fgetc
movq -72(%rbp), %rbx ## 8-byte Reload
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
LBB60_104: ## in Loop: Header=BB60_105 Depth=2
movb %al, -84(%rbp,%r14)
incq %r14
cmpq %r14, %r15
je LBB60_112
LBB60_105: ## Parent Loop BB60_89 Depth=1
## => This Inner Loop Header: Depth=2
movq (%rbx), %rdi
testq %rdi, %rdi
jne LBB60_103
## %bb.106: ## in Loop: Header=BB60_105 Depth=2
movq -120(%rbp), %rdx ## 8-byte Reload
movq 24(%rdx), %rcx
xorl %eax, %eax
cmpq 32(%rdx), %rcx
jae LBB60_104
## %bb.107: ## in Loop: Header=BB60_105 Depth=2
leaq 1(%rcx), %rax
movq -120(%rbp), %rdx ## 8-byte Reload
movq %rax, 24(%rdx)
movzbl (%rcx), %eax
jmp LBB60_104
LBB60_108: ## in Loop: Header=BB60_89 Depth=1
movq -120(%rbp), %rdx ## 8-byte Reload
movq 24(%rdx), %rcx
xorl %eax, %eax
cmpq 32(%rdx), %rcx
jae LBB60_110
## %bb.109: ## in Loop: Header=BB60_89 Depth=1
leaq 1(%rcx), %rax
movq -120(%rbp), %rdx ## 8-byte Reload
movq %rax, 24(%rdx)
movzbl (%rcx), %eax
.p2align 4, 0x90
LBB60_110: ## in Loop: Header=BB60_89 Depth=1
cmpl $0, -136(%rbp) ## 4-byte Folded Reload
je LBB60_112
## %bb.111: ## in Loop: Header=BB60_89 Depth=1
movzbl %al, %esi
cmpl %esi, -156(%rbp) ## 4-byte Folded Reload
movl $0, %eax
cmovlel %eax, %esi
imull -184(%rbp), %esi ## 4-byte Folded Reload
addq -168(%rbp), %rsi ## 8-byte Folded Reload
leaq -84(%rbp), %rdi
movq %r15, %rdx
callq _memcpy
movq -72(%rbp), %rbx ## 8-byte Reload
LBB60_112: ## in Loop: Header=BB60_89 Depth=1
movl -188(%rbp), %eax ## 4-byte Reload
cmpl $3, %eax
ja LBB60_115
## %bb.113: ## in Loop: Header=BB60_89 Depth=1
movl %eax, %eax
leaq LJTI60_0(%rip), %rcx
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
movq -64(%rbp), %rdi ## 8-byte Reload
jmpq *%rax
LBB60_114: ## in Loop: Header=BB60_89 Depth=1
movb $-1, -80(%rbp) ## 1-byte Folded Spill
movb -84(%rbp), %r14b
jmp LBB60_117
LBB60_115: ## in Loop: Header=BB60_89 Depth=1
movq -64(%rbp), %rdi ## 8-byte Reload
movb -105(%rbp), %r14b ## 1-byte Reload
jmp LBB60_120
LBB60_116: ## in Loop: Header=BB60_89 Depth=1
movb -84(%rbp), %r14b
movb -83(%rbp), %al
movb %al, -80(%rbp) ## 1-byte Spill
LBB60_117: ## in Loop: Header=BB60_89 Depth=1
movb %r14b, -56(%rbp) ## 1-byte Spill
movb %r14b, -48(%rbp) ## 1-byte Spill
jmp LBB60_120
LBB60_118: ## in Loop: Header=BB60_89 Depth=1
movb -82(%rbp), %al
movb %al, -48(%rbp) ## 1-byte Spill
movb -84(%rbp), %r14b
movb -83(%rbp), %al
movb %al, -56(%rbp) ## 1-byte Spill
movb $-1, -80(%rbp) ## 1-byte Folded Spill
jmp LBB60_120
LBB60_119: ## in Loop: Header=BB60_89 Depth=1
movb -82(%rbp), %al
movb %al, -48(%rbp) ## 1-byte Spill
movb -84(%rbp), %r14b
movb -83(%rbp), %al
movb %al, -56(%rbp) ## 1-byte Spill
movb -81(%rbp), %al
movb %al, -80(%rbp) ## 1-byte Spill
.p2align 4, 0x90
LBB60_120: ## in Loop: Header=BB60_89 Depth=1
movq -152(%rbp), %rax ## 8-byte Reload
decl %eax
cmpl $3, %eax
ja LBB60_88
## %bb.121: ## in Loop: Header=BB60_89 Depth=1
leaq LJTI60_1(%rip), %rcx
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
LBB60_122: ## in Loop: Header=BB60_89 Depth=1
movzbl -48(%rbp), %eax ## 1-byte Folded Reload
movzbl -56(%rbp), %ecx ## 1-byte Folded Reload
movzbl %r14b, %edx
imull $77, %eax, %esi
imull $150, %ecx, %ecx
leal (%rdx,%rdx,8), %eax
leal (%rax,%rax,2), %eax
addl %edx, %eax
addl %edx, %eax
addl %ecx, %eax
addl %esi, %eax
shrl $8, %eax
movq %r13, %rcx
jmp LBB60_87
.p2align 4, 0x90
LBB60_123: ## in Loop: Header=BB60_89 Depth=1
movzbl -48(%rbp), %eax ## 1-byte Folded Reload
movzbl -56(%rbp), %ecx ## 1-byte Folded Reload
movzbl %r14b, %edx
imull $77, %eax, %esi
imull $150, %ecx, %ecx
leal (%rdx,%rdx,8), %eax
leal (%rax,%rax,2), %eax
addl %edx, %eax
addl %edx, %eax
addl %ecx, %eax
addl %esi, %eax
movq %r13, %rcx
movb %ah, (%rdi,%rcx,2)
leaq 1(,%r13), %rcx
addq %r13, %rcx
jmp LBB60_86
.p2align 4, 0x90
LBB60_124: ## in Loop: Header=BB60_89 Depth=1
movq %r13, %rcx
leal (%r13,%r13,2), %eax
movb -48(%rbp), %dl ## 1-byte Reload
movb %dl, (%rdi,%rax)
movb -56(%rbp), %dl ## 1-byte Reload
movb %dl, 1(%rdi,%rax)
leal 2(%r13,%r13,2), %ecx
movl %r14d, %eax
jmp LBB60_87
LBB60_125:
testb $32, -176(%rbp) ## 1-byte Folded Reload
movq -152(%rbp), %rsi ## 8-byte Reload
movl -180(%rbp), %r11d ## 4-byte Reload
jne LBB60_151
## %bb.126:
testl %r11d, %r11d
jle LBB60_151
## %bb.127:
movl %esi, %r14d
movl -172(%rbp), %eax ## 4-byte Reload
imull %eax, %r14d
decl %r11d
imull %r11d, %esi
imull %eax, %esi
shrl %r11d
leal -1(%r14), %r13d
movq -64(%rbp), %rax ## 8-byte Reload
leaq (%rax,%r13), %rcx
incq %rcx
movq %rcx, -72(%rbp) ## 8-byte Spill
leaq 1(%r13), %rdx
movq %rdx, %rcx
andq $-32, %rcx
leaq -32(%rcx), %rdi
movq %rdi, -152(%rbp) ## 8-byte Spill
movq %rdi, %rbx
shrq $5, %rbx
incq %rbx
movq %rbx, %rdi
movq %rbx, -96(%rbp) ## 8-byte Spill
andq $-2, %rbx
movq %rbx, -136(%rbp) ## 8-byte Spill
movl %r14d, %edi
movq %rcx, -48(%rbp) ## 8-byte Spill
subl %ecx, %edi
movl %edi, -80(%rbp) ## 4-byte Spill
movabsq $8589934560, %rdi ## imm = 0x1FFFFFFE0
orq $24, %rdi
movq %rdx, %rcx
movq %rdx, -120(%rbp) ## 8-byte Spill
andq %rdx, %rdi
movl %r14d, %ecx
movq %rdi, -56(%rbp) ## 8-byte Spill
subl %edi, %ecx
movl %ecx, -124(%rbp) ## 4-byte Spill
addq $48, %rax
movq %rax, -144(%rbp) ## 8-byte Spill
xorl %edi, %edi
xorl %r12d, %r12d
jmp LBB60_129
.p2align 4, 0x90
LBB60_128: ## in Loop: Header=BB60_129 Depth=1
leal 1(%r12), %eax
subl %r14d, %esi
addl %r14d, %edi
cmpl %r11d, %r12d
movl %eax, %r12d
je LBB60_151
LBB60_129: ## =>This Loop Header: Depth=1
## Child Loop BB60_139 Depth 2
## Child Loop BB60_146 Depth 2
## Child Loop BB60_150 Depth 2
testl %r14d, %r14d
jle LBB60_128
## %bb.130: ## in Loop: Header=BB60_129 Depth=1
movslq %edi, %r10
movslq %esi, %r15
cmpl $7, %r13d
jb LBB60_131
## %bb.132: ## in Loop: Header=BB60_129 Depth=1
movq -64(%rbp), %rax ## 8-byte Reload
leaq (%rax,%r10), %r8
leaq (%rax,%r15), %r9
movq -72(%rbp), %rax ## 8-byte Reload
leaq (%rax,%r15), %rcx
cmpq %rcx, %r8
jae LBB60_135
## %bb.133: ## in Loop: Header=BB60_129 Depth=1
leaq (%rax,%r10), %rcx
cmpq %rcx, %r9
jae LBB60_135
LBB60_131: ## in Loop: Header=BB60_129 Depth=1
movl %r14d, %edx
LBB60_149: ## in Loop: Header=BB60_129 Depth=1
incl %edx
movq -64(%rbp), %rcx ## 8-byte Reload
.p2align 4, 0x90
LBB60_150: ## Parent Loop BB60_129 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rcx,%r10), %eax
movzbl (%rcx,%r15), %ebx
movb %bl, (%rcx,%r10)
movb %al, (%rcx,%r15)
incq %rcx
decl %edx
cmpl $1, %edx
ja LBB60_150
jmp LBB60_128
LBB60_135: ## in Loop: Header=BB60_129 Depth=1
cmpl $31, %r13d
jae LBB60_137
## %bb.136: ## in Loop: Header=BB60_129 Depth=1
xorl %edx, %edx
jmp LBB60_145
LBB60_137: ## in Loop: Header=BB60_129 Depth=1
movq %r13, %rcx
movq %r14, %rbx
movl %r11d, %eax
cmpq $0, -152(%rbp) ## 8-byte Folded Reload
je LBB60_140
## %bb.138: ## in Loop: Header=BB60_129 Depth=1
movq -144(%rbp), %rdx ## 8-byte Reload
leaq (%rdx,%r15), %r11
addq %r10, %rdx
movq -136(%rbp), %r13 ## 8-byte Reload
xorl %r14d, %r14d
.p2align 4, 0x90
LBB60_139: ## Parent Loop BB60_129 Depth=1
## => This Inner Loop Header: Depth=2
movups -48(%rdx,%r14), %xmm0
movups -32(%rdx,%r14), %xmm1
movups -48(%r11,%r14), %xmm2
movups -32(%r11,%r14), %xmm3
movups %xmm2, -48(%rdx,%r14)
movups %xmm3, -32(%rdx,%r14)
movups %xmm0, -48(%r11,%r14)
movups %xmm1, -32(%r11,%r14)
movups -16(%rdx,%r14), %xmm0
movups (%rdx,%r14), %xmm1
movups -16(%r11,%r14), %xmm2
movups (%r11,%r14), %xmm3
movups %xmm2, -16(%rdx,%r14)
movups %xmm3, (%rdx,%r14)
movups %xmm0, -16(%r11,%r14)
movups %xmm1, (%r11,%r14)
addq $64, %r14
addq $-2, %r13
jne LBB60_139
jmp LBB60_141
LBB60_140: ## in Loop: Header=BB60_129 Depth=1
xorl %r14d, %r14d
LBB60_141: ## in Loop: Header=BB60_129 Depth=1
testb $1, -96(%rbp) ## 1-byte Folded Reload
movq %rcx, %r13
je LBB60_143
## %bb.142: ## in Loop: Header=BB60_129 Depth=1
leaq (%r14,%r15), %rcx
addq %r10, %r14
movq -64(%rbp), %rdx ## 8-byte Reload
movups (%rdx,%r14), %xmm0
movups 16(%rdx,%r14), %xmm1
movups (%rdx,%rcx), %xmm2
movups 16(%rdx,%rcx), %xmm3
movups %xmm2, (%rdx,%r14)
movups %xmm3, 16(%rdx,%r14)
movups %xmm0, (%rdx,%rcx)
movups %xmm1, 16(%rdx,%rcx)
LBB60_143: ## in Loop: Header=BB60_129 Depth=1
movq -48(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, -120(%rbp) ## 8-byte Folded Reload
movl %eax, %r11d
movq %rbx, %r14
je LBB60_128
## %bb.144: ## in Loop: Header=BB60_129 Depth=1
movq -48(%rbp), %rdx ## 8-byte Reload
testb $24, -120(%rbp) ## 1-byte Folded Reload
je LBB60_148
LBB60_145: ## in Loop: Header=BB60_129 Depth=1
movq -56(%rbp), %rbx ## 8-byte Reload
addq %rbx, %r10
addq %rbx, %r15
.p2align 4, 0x90
LBB60_146: ## Parent Loop BB60_129 Depth=1
## => This Inner Loop Header: Depth=2
movq (%r8,%rdx), %rcx
movq (%r9,%rdx), %rax
movq %rax, (%r8,%rdx)
movq %rcx, (%r9,%rdx)
addq $8, %rdx
cmpq %rdx, %rbx
jne LBB60_146
## %bb.147: ## in Loop: Header=BB60_129 Depth=1
movl -124(%rbp), %edx ## 4-byte Reload
cmpq %rbx, -120(%rbp) ## 8-byte Folded Reload
je LBB60_128
jmp LBB60_149
LBB60_148: ## in Loop: Header=BB60_129 Depth=1
movq -48(%rbp), %rax ## 8-byte Reload
addq %rax, %r15
addq %rax, %r10
movl -80(%rbp), %edx ## 4-byte Reload
jmp LBB60_149
LBB60_151:
movq -168(%rbp), %rdi ## 8-byte Reload
testq %rdi, %rdi
je LBB60_153
## %bb.152:
callq _free
LBB60_153:
movq -64(%rbp), %rax ## 8-byte Reload
LBB60_154:
addq $152, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB60_155:
movq 24(%r13), %rax
movq 32(%r13), %rcx
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
jmp LBB60_31
LBB60_156:
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
jmp LBB60_34
LBB60_157:
movq 24(%r13), %rax
movq 32(%r13), %rcx
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
jmp LBB60_36
LBB60_158:
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
jmp LBB60_39
LBB60_159:
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
jmp LBB60_41
LBB60_160:
movq 24(%r13), %rax
movq 32(%r13), %rcx
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
movq -96(%rbp), %r11 ## 8-byte Reload
cmpq %rcx, %rax
jb LBB60_44
jmp LBB60_45
LBB60_161:
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
movq -96(%rbp), %r11 ## 8-byte Reload
jmp LBB60_45
LBB60_162:
movq 24(%r13), %rax
movq 32(%r13), %rcx
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
movq -96(%rbp), %r11 ## 8-byte Reload
cmpq %rcx, %rax
jb LBB60_48
jmp LBB60_49
LBB60_163:
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
movq -96(%rbp), %r11 ## 8-byte Reload
jmp LBB60_49
LBB60_164:
movq 24(%r13), %rcx
movq 32(%r13), %rax
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
movq -96(%rbp), %r11 ## 8-byte Reload
jmp LBB60_51
LBB60_165:
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
movq -96(%rbp), %r11 ## 8-byte Reload
jmp LBB60_54
LBB60_166:
movq 24(%r13), %rcx
movq 32(%r13), %rax
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
movq -96(%rbp), %r11 ## 8-byte Reload
jmp LBB60_56
LBB60_167:
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
movq -96(%rbp), %r11 ## 8-byte Reload
jmp LBB60_59
LBB60_168:
movq -64(%rbp), %r8 ## 8-byte Reload
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rsi ## 8-byte Reload
movq -80(%rbp), %r9 ## 8-byte Reload
movl -104(%rbp), %r10d ## 4-byte Reload
movq -96(%rbp), %r11 ## 8-byte Reload
jmp LBB60_62
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L60_0_set_114, LBB60_114-LJTI60_0
.set L60_0_set_116, LBB60_116-LJTI60_0
.set L60_0_set_118, LBB60_118-LJTI60_0
.set L60_0_set_119, LBB60_119-LJTI60_0
LJTI60_0:
.long L60_0_set_114
.long L60_0_set_116
.long L60_0_set_118
.long L60_0_set_119
.set L60_1_set_122, LBB60_122-LJTI60_1
.set L60_1_set_123, LBB60_123-LJTI60_1
.set L60_1_set_124, LBB60_124-LJTI60_1
.set L60_1_set_85, LBB60_85-LJTI60_1
LJTI60_1:
.long L60_1_set_122
.long L60_1_set_123
.long L60_1_set_124
.long L60_1_set_85
.end_data_region
## -- End function
.globl _stbi_psd_load ## -- Begin function stbi_psd_load
.p2align 4, 0x90
_stbi_psd_load: ## @stbi_psd_load
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r14d
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, %r13
leaq L_.str(%rip), %rsi
callq _fopen
testq %rax, %rax
je LBB61_1
## %bb.2:
movq %rax, %rbx
movq %rax, -64(%rbp)
leaq -80(%rbp), %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r15, %rcx
movl %r14d, %r8d
callq _psd_load
movq %rax, %r14
movq %rbx, %rdi
callq _fclose
jmp LBB61_3
LBB61_1:
xorl %r14d, %r14d
LBB61_3:
movq %r14, %rax
addq $40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function psd_load
_psd_load: ## @psd_load
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $120, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r15d
movq %rcx, %r14
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %rbx
callq _get32
cmpl $943870035, %eax ## imm = 0x38425053
jne LBB62_1
## %bb.3:
movq 16(%rbx), %rdi
testq %rdi, %rdi
movq %r14, -136(%rbp) ## 8-byte Spill
movl %r15d, -100(%rbp) ## 4-byte Spill
je LBB62_4
## %bb.6:
callq _fgetc
movl %eax, %r14d
xorl %r15d, %r15d
cmpl $-1, %eax
cmovel %r15d, %r14d
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_7
## %bb.162:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r15d
jmp LBB62_10
LBB62_1:
leaq L_.str.83(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB62_160
LBB62_4:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB62_8
## %bb.5:
leaq 1(%rax), %rdx
movq %rdx, 24(%rbx)
movzbl (%rax), %r14d
movq %rdx, %rax
jmp LBB62_8
LBB62_7:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
LBB62_8:
xorl %r15d, %r15d
cmpq %rcx, %rax
jae LBB62_10
## %bb.9:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %r15d
LBB62_10:
shll $8, %r14d
addl %r15d, %r14d
cmpl $1, %r14d
jne LBB62_11
## %bb.12:
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_13
## %bb.14:
movl $6, %esi
movl $1, %edx
callq _fseek
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_15
## %bb.17:
callq _fgetc
movl %eax, %r15d
xorl %r14d, %r14d
cmpl $-1, %eax
cmovel %r14d, %r15d
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_18
## %bb.163:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r14d
jmp LBB62_21
LBB62_11:
leaq L_.str.84(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB62_160
LBB62_13:
addq $6, 24(%rbx)
LBB62_15:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
xorl %r15d, %r15d
cmpq %rcx, %rax
jae LBB62_19
## %bb.16:
leaq 1(%rax), %rdx
movq %rdx, 24(%rbx)
movzbl (%rax), %r15d
movq %rdx, %rax
jmp LBB62_19
LBB62_18:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
LBB62_19:
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB62_21
## %bb.20:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %r14d
LBB62_21:
shll $8, %r15d
addl %r14d, %r15d
cmpl $17, %r15d
jb LBB62_23
## %bb.22:
leaq L_.str.85(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB62_160
LBB62_23:
movl %r15d, -52(%rbp) ## 4-byte Spill
movq %rbx, %rdi
callq _get32
movl %eax, %r14d
movq %rbx, %rdi
callq _get32
movl %eax, -80(%rbp) ## 4-byte Spill
movq 16(%rbx), %rdi
testq %rdi, %rdi
movl %r14d, -76(%rbp) ## 4-byte Spill
je LBB62_24
## %bb.26:
callq _fgetc
movl %eax, %r15d
xorl %r14d, %r14d
cmpl $-1, %eax
cmovel %r14d, %r15d
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_27
## %bb.164:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r14d
jmp LBB62_30
LBB62_24:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
xorl %r15d, %r15d
cmpq %rcx, %rax
jae LBB62_28
## %bb.25:
leaq 1(%rax), %rdx
movq %rdx, 24(%rbx)
movzbl (%rax), %r15d
movq %rdx, %rax
jmp LBB62_28
LBB62_27:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
LBB62_28:
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB62_30
## %bb.29:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %r14d
LBB62_30:
shll $8, %r15d
addl %r14d, %r15d
cmpl $8, %r15d
jne LBB62_31
## %bb.32:
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_33
## %bb.35:
callq _fgetc
movl %eax, %r15d
xorl %r14d, %r14d
cmpl $-1, %eax
cmovel %r14d, %r15d
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_36
## %bb.165:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r14d
jmp LBB62_39
LBB62_31:
leaq L_.str.86(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB62_160
LBB62_33:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
xorl %r15d, %r15d
cmpq %rcx, %rax
jae LBB62_37
## %bb.34:
leaq 1(%rax), %rdx
movq %rdx, 24(%rbx)
movzbl (%rax), %r15d
movq %rdx, %rax
jmp LBB62_37
LBB62_36:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
LBB62_37:
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB62_39
## %bb.38:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %r14d
LBB62_39:
shll $8, %r15d
addl %r14d, %r15d
cmpl $3, %r15d
jne LBB62_40
## %bb.41:
movq %rbx, %rdi
callq _get32
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_43
## %bb.42:
movslq %eax, %rsi
movl $1, %edx
callq _fseek
jmp LBB62_44
LBB62_40:
leaq L_.str.87(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB62_160
LBB62_43:
cltq
addq %rax, 24(%rbx)
LBB62_44:
movl -52(%rbp), %r15d ## 4-byte Reload
movq %rbx, %rdi
callq _get32
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_46
## %bb.45:
movslq %eax, %rsi
movl $1, %edx
callq _fseek
jmp LBB62_47
LBB62_46:
cltq
addq %rax, 24(%rbx)
LBB62_47:
movq %rbx, %rdi
callq _get32
movq 16(%rbx), %rdi
testq %rdi, %rdi
movq %r12, -120(%rbp) ## 8-byte Spill
je LBB62_48
## %bb.49:
movslq %eax, %rsi
movl $1, %edx
callq _fseek
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_50
## %bb.52:
callq _fgetc
movl %eax, %r12d
xorl %r14d, %r14d
cmpl $-1, %eax
cmovel %r14d, %r12d
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_53
## %bb.166:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r14d
jmp LBB62_56
LBB62_48:
cltq
addq %rax, 24(%rbx)
LBB62_50:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
xorl %r12d, %r12d
cmpq %rcx, %rax
jae LBB62_54
## %bb.51:
leaq 1(%rax), %rdx
movq %rdx, 24(%rbx)
movzbl (%rax), %r12d
movq %rdx, %rax
jmp LBB62_54
LBB62_53:
movq 24(%rbx), %rax
movq 32(%rbx), %rcx
LBB62_54:
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB62_56
## %bb.55:
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %r14d
LBB62_56:
shll $8, %r12d
addl %r14d, %r12d
cmpl $2, %r12d
jl LBB62_58
## %bb.57:
leaq L_.str.36(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB62_160
LBB62_58:
movl -76(%rbp), %r14d ## 4-byte Reload
movl %r14d, %eax
imull -80(%rbp), %eax ## 4-byte Folded Reload
movq %rax, -48(%rbp) ## 8-byte Spill
leal (,%rax,4), %eax
movslq %eax, %rdi
callq _malloc
testq %rax, %rax
je LBB62_59
## %bb.60:
movq %rax, -88(%rbp) ## 8-byte Spill
testl %r12d, %r12d
movq %rax, -64(%rbp) ## 8-byte Spill
movq %r13, -112(%rbp) ## 8-byte Spill
je LBB62_101
## %bb.61:
movl %r15d, %eax
imull %r14d, %eax
addl %eax, %eax
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_63
## %bb.62:
movslq %eax, %rsi
movl $1, %edx
callq _fseek
jmp LBB62_64
LBB62_59:
leaq L_.str.5(%rip), %rax
movq %rax, _failure_reason(%rip)
jmp LBB62_160
LBB62_101:
movq -48(%rbp), %rax ## 8-byte Reload
testl %eax, %eax
jle LBB62_167
## %bb.102:
movl %eax, %r15d
xorl %r14d, %r14d
xorl %r12d, %r12d
movq -88(%rbp), %r13 ## 8-byte Reload
jmp LBB62_103
LBB62_104: ## in Loop: Header=BB62_103 Depth=1
callq _fgetc
cmpl $-1, %eax
cmovel %r14d, %eax
LBB62_107: ## in Loop: Header=BB62_103 Depth=1
movb %al, (%r13,%r12,4)
incq %r12
cmpl %r12d, %r15d
je LBB62_108
LBB62_103: ## =>This Inner Loop Header: Depth=1
movq 16(%rbx), %rdi
testq %rdi, %rdi
jne LBB62_104
## %bb.105: ## in Loop: Header=BB62_103 Depth=1
movq 24(%rbx), %rcx
xorl %eax, %eax
cmpq 32(%rbx), %rcx
jae LBB62_107
## %bb.106: ## in Loop: Header=BB62_103 Depth=1
leaq 1(%rcx), %rax
movq %rax, 24(%rbx)
movzbl (%rcx), %eax
jmp LBB62_107
LBB62_63:
cltq
addq %rax, 24(%rbx)
LBB62_64:
movq -48(%rbp), %rax ## 8-byte Reload
movl %r15d, %ecx
movq %rcx, -144(%rbp) ## 8-byte Spill
leal -1(%rax), %ecx
movl %ecx, -96(%rbp) ## 4-byte Spill
movl %eax, %ecx
andl $-8, %ecx
movl %ecx, -92(%rbp) ## 4-byte Spill
## kill: def $eax killed $eax killed $rax def $rax
andl $7, %eax
movq %rax, -152(%rbp) ## 8-byte Spill
xorl %r15d, %r15d
xorl %ecx, %ecx
jmp LBB62_65
LBB62_100: ## in Loop: Header=BB62_65 Depth=1
movq -128(%rbp), %rcx ## 8-byte Reload
incq %rcx
cmpq $4, %rcx
je LBB62_154
LBB62_65: ## =>This Loop Header: Depth=1
## Child Loop BB62_68 Depth 2
## Child Loop BB62_71 Depth 2
## Child Loop BB62_74 Depth 2
## Child Loop BB62_94 Depth 3
## Child Loop BB62_97 Depth 3
## Child Loop BB62_82 Depth 3
movq -64(%rbp), %rax ## 8-byte Reload
leaq (%rax,%rcx), %r13
movq %rcx, -128(%rbp) ## 8-byte Spill
cmpq -144(%rbp), %rcx ## 8-byte Folded Reload
jae LBB62_66
## %bb.72: ## in Loop: Header=BB62_65 Depth=1
movq -48(%rbp), %r14 ## 8-byte Reload
testl %r14d, %r14d
jle LBB62_100
## %bb.73: ## in Loop: Header=BB62_65 Depth=1
xorl %eax, %eax
movq %rax, -72(%rbp) ## 8-byte Spill
jmp LBB62_74
LBB62_98: ## in Loop: Header=BB62_74 Depth=2
movq -48(%rbp), %r14 ## 8-byte Reload
.p2align 4, 0x90
LBB62_99: ## in Loop: Header=BB62_74 Depth=2
cmpl %r14d, -72(%rbp) ## 4-byte Folded Reload
jge LBB62_100
LBB62_74: ## Parent Loop BB62_65 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB62_94 Depth 3
## Child Loop BB62_97 Depth 3
## Child Loop BB62_82 Depth 3
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_77
## %bb.75: ## in Loop: Header=BB62_74 Depth=2
callq _fgetc
movl %eax, %r12d
cmpl $-1, %eax
je LBB62_76
## %bb.79: ## in Loop: Header=BB62_74 Depth=2
cmpl $128, %r12d
je LBB62_99
jmp LBB62_80
.p2align 4, 0x90
LBB62_77: ## in Loop: Header=BB62_74 Depth=2
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB62_76
## %bb.78: ## in Loop: Header=BB62_74 Depth=2
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %r12d
cmpl $128, %r12d
je LBB62_99
LBB62_80: ## in Loop: Header=BB62_74 Depth=2
jge LBB62_87
## %bb.81: ## in Loop: Header=BB62_74 Depth=2
incl %r12d
movq -72(%rbp), %rax ## 8-byte Reload
leal (%r12,%rax), %eax
movq %rax, -72(%rbp) ## 8-byte Spill
jne LBB62_82
jmp LBB62_99
.p2align 4, 0x90
LBB62_76: ## in Loop: Header=BB62_74 Depth=2
movq -72(%rbp), %rax ## 8-byte Reload
incl %eax
movq %rax, -72(%rbp) ## 8-byte Spill
movl $1, %r12d
jmp LBB62_82
.p2align 4, 0x90
LBB62_83: ## in Loop: Header=BB62_82 Depth=3
callq _fgetc
cmpl $-1, %eax
cmovel %r15d, %eax
LBB62_86: ## in Loop: Header=BB62_82 Depth=3
movb %al, (%r13)
addq $4, %r13
decl %r12d
je LBB62_99
LBB62_82: ## Parent Loop BB62_65 Depth=1
## Parent Loop BB62_74 Depth=2
## => This Inner Loop Header: Depth=3
movq 16(%rbx), %rdi
testq %rdi, %rdi
jne LBB62_83
## %bb.84: ## in Loop: Header=BB62_82 Depth=3
movq 24(%rbx), %rcx
xorl %eax, %eax
cmpq 32(%rbx), %rcx
jae LBB62_86
## %bb.85: ## in Loop: Header=BB62_82 Depth=3
leaq 1(%rcx), %rax
movq %rax, 24(%rbx)
movzbl (%rcx), %eax
jmp LBB62_86
LBB62_87: ## in Loop: Header=BB62_74 Depth=2
xorl $255, %r12d
leal 2(%r12), %r14d
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB62_89
## %bb.88: ## in Loop: Header=BB62_74 Depth=2
callq _fgetc
cmpl $-1, %eax
cmovel %r15d, %eax
jmp LBB62_91
LBB62_89: ## in Loop: Header=BB62_74 Depth=2
movq 24(%rbx), %rcx
xorl %eax, %eax
cmpq 32(%rbx), %rcx
jae LBB62_91
## %bb.90: ## in Loop: Header=BB62_74 Depth=2
leaq 1(%rcx), %rax
movq %rax, 24(%rbx)
movzbl (%rcx), %eax
LBB62_91: ## in Loop: Header=BB62_74 Depth=2
movq -72(%rbp), %rcx ## 8-byte Reload
addl %r14d, %ecx
movq %rcx, -72(%rbp) ## 8-byte Spill
testl %r14d, %r14d
je LBB62_98
## %bb.92: ## in Loop: Header=BB62_74 Depth=2
leal 1(%r12), %ecx
testb $7, %r14b
je LBB62_96
## %bb.93: ## in Loop: Header=BB62_74 Depth=2
addb $2, %r12b
movzbl %r12b, %edx
andl $7, %edx
xorl %esi, %esi
.p2align 4, 0x90
LBB62_94: ## Parent Loop BB62_65 Depth=1
## Parent Loop BB62_74 Depth=2
## => This Inner Loop Header: Depth=3
movb %al, (%r13)
addq $4, %r13
incq %rsi
cmpl %esi, %edx
jne LBB62_94
## %bb.95: ## in Loop: Header=BB62_74 Depth=2
subl %esi, %r14d
LBB62_96: ## in Loop: Header=BB62_74 Depth=2
cmpl $7, %ecx
jb LBB62_98
.p2align 4, 0x90
LBB62_97: ## Parent Loop BB62_65 Depth=1
## Parent Loop BB62_74 Depth=2
## => This Inner Loop Header: Depth=3
movb %al, (%r13)
movb %al, 4(%r13)
movb %al, 8(%r13)
movb %al, 12(%r13)
movb %al, 16(%r13)
movb %al, 20(%r13)
movb %al, 24(%r13)
movb %al, 28(%r13)
addq $32, %r13
addl $-8, %r14d
jne LBB62_97
jmp LBB62_98
LBB62_66: ## in Loop: Header=BB62_65 Depth=1
movq -48(%rbp), %rdx ## 8-byte Reload
testl %edx, %edx
jle LBB62_100
## %bb.67: ## in Loop: Header=BB62_65 Depth=1
cmpq $3, -128(%rbp) ## 8-byte Folded Reload
sete %al
negb %al
movl -92(%rbp), %ecx ## 4-byte Reload
cmpl $7, -96(%rbp) ## 4-byte Folded Reload
jb LBB62_69
.p2align 4, 0x90
LBB62_68: ## Parent Loop BB62_65 Depth=1
## => This Inner Loop Header: Depth=2
movb %al, (%r13)
movb %al, 4(%r13)
movb %al, 8(%r13)
movb %al, 12(%r13)
movb %al, 16(%r13)
movb %al, 20(%r13)
movb %al, 24(%r13)
movb %al, 28(%r13)
addq $32, %r13
addl $-8, %ecx
jne LBB62_68
LBB62_69: ## in Loop: Header=BB62_65 Depth=1
testb $7, %dl
je LBB62_100
## %bb.70: ## in Loop: Header=BB62_65 Depth=1
xorl %ecx, %ecx
.p2align 4, 0x90
LBB62_71: ## Parent Loop BB62_65 Depth=1
## => This Inner Loop Header: Depth=2
movb %al, (%r13,%rcx,4)
incq %rcx
cmpl %ecx, -152(%rbp) ## 4-byte Folded Reload
jne LBB62_71
jmp LBB62_100
LBB62_108:
movq -64(%rbp), %rax ## 8-byte Reload
leaq 1(%rax), %r14
cmpl $0, -52(%rbp) ## 4-byte Folded Reload
jle LBB62_116
## %bb.109:
cmpl $0, -48(%rbp) ## 4-byte Folded Reload
jle LBB62_123
## %bb.110:
xorl %r12d, %r12d
xorl %r13d, %r13d
jmp LBB62_111
LBB62_112: ## in Loop: Header=BB62_111 Depth=1
callq _fgetc
cmpl $-1, %eax
cmovel %r12d, %eax
LBB62_115: ## in Loop: Header=BB62_111 Depth=1
movb %al, (%r14,%r13,4)
incq %r13
cmpl %r13d, %r15d
je LBB62_123
LBB62_111: ## =>This Inner Loop Header: Depth=1
movq 16(%rbx), %rdi
testq %rdi, %rdi
jne LBB62_112
## %bb.113: ## in Loop: Header=BB62_111 Depth=1
movq 24(%rbx), %rcx
xorl %eax, %eax
cmpq 32(%rbx), %rcx
jae LBB62_115
## %bb.114: ## in Loop: Header=BB62_111 Depth=1
leaq 1(%rcx), %rax
movq %rax, 24(%rbx)
movzbl (%rcx), %eax
jmp LBB62_115
LBB62_167:
testl %r15d, %r15d
jne LBB62_123
jmp LBB62_154
LBB62_116:
cmpl $0, -48(%rbp) ## 4-byte Folded Reload
jle LBB62_154
## %bb.117:
movq -48(%rbp), %rcx ## 8-byte Reload
leal -1(%rcx), %eax
cmpl $7, %eax
jb LBB62_120
## %bb.118:
movl %ecx, %eax
andl $-8, %eax
LBB62_119: ## =>This Inner Loop Header: Depth=1
movb $0, (%r14)
movb $0, 4(%r14)
movb $0, 8(%r14)
movb $0, 12(%r14)
movb $0, 16(%r14)
movb $0, 20(%r14)
movb $0, 24(%r14)
movb $0, 28(%r14)
addq $32, %r14
addl $-8, %eax
jne LBB62_119
LBB62_120:
testb $7, -48(%rbp) ## 1-byte Folded Reload
je LBB62_123
## %bb.121:
movq -48(%rbp), %rax ## 8-byte Reload
## kill: def $eax killed $eax killed $rax def $rax
andl $7, %eax
xorl %ecx, %ecx
LBB62_122: ## =>This Inner Loop Header: Depth=1
movb $0, (%r14,%rcx,4)
incq %rcx
cmpl %ecx, %eax
jne LBB62_122
LBB62_123:
movq -64(%rbp), %rax ## 8-byte Reload
leaq 2(%rax), %r14
cmpl $2, -52(%rbp) ## 4-byte Folded Reload
jge LBB62_124
## %bb.131:
cmpl $0, -48(%rbp) ## 4-byte Folded Reload
jle LBB62_154
## %bb.132:
movq -48(%rbp), %rcx ## 8-byte Reload
leal -1(%rcx), %eax
cmpl $7, %eax
jb LBB62_135
## %bb.133:
movl %ecx, %eax
andl $-8, %eax
LBB62_134: ## =>This Inner Loop Header: Depth=1
movb $0, (%r14)
movb $0, 4(%r14)
movb $0, 8(%r14)
movb $0, 12(%r14)
movb $0, 16(%r14)
movb $0, 20(%r14)
movb $0, 24(%r14)
movb $0, 28(%r14)
addq $32, %r14
addl $-8, %eax
jne LBB62_134
LBB62_135:
testb $7, -48(%rbp) ## 1-byte Folded Reload
je LBB62_138
## %bb.136:
movq -48(%rbp), %rax ## 8-byte Reload
## kill: def $eax killed $eax killed $rax def $rax
andl $7, %eax
xorl %ecx, %ecx
LBB62_137: ## =>This Inner Loop Header: Depth=1
movb $0, (%r14,%rcx,4)
incq %rcx
cmpl %ecx, %eax
jne LBB62_137
LBB62_138:
addq $3, -64(%rbp) ## 8-byte Folded Spill
cmpl $3, -52(%rbp) ## 4-byte Folded Reload
jge LBB62_139
## %bb.146:
cmpl $0, -48(%rbp) ## 4-byte Folded Reload
jle LBB62_154
## %bb.147:
movq -48(%rbp), %rdx ## 8-byte Reload
leal -1(%rdx), %eax
cmpl $7, %eax
jb LBB62_151
## %bb.148:
movl %edx, %eax
andl $-8, %eax
movq -64(%rbp), %rcx ## 8-byte Reload
LBB62_149: ## =>This Inner Loop Header: Depth=1
movb $-1, (%rcx)
movb $-1, 4(%rcx)
movb $-1, 8(%rcx)
movb $-1, 12(%rcx)
movb $-1, 16(%rcx)
movb $-1, 20(%rcx)
movb $-1, 24(%rcx)
movb $-1, 28(%rcx)
addq $32, %rcx
addl $-8, %eax
jne LBB62_149
## %bb.150:
movq %rcx, -64(%rbp) ## 8-byte Spill
LBB62_151:
testb $7, %dl
je LBB62_154
## %bb.152:
movq -48(%rbp), %rax ## 8-byte Reload
andl $7, %eax
movq %rax, -48(%rbp) ## 8-byte Spill
xorl %eax, %eax
movq -64(%rbp), %rcx ## 8-byte Reload
LBB62_153: ## =>This Inner Loop Header: Depth=1
movb $-1, (%rcx,%rax,4)
incq %rax
cmpl %eax, -48(%rbp) ## 4-byte Folded Reload
jne LBB62_153
jmp LBB62_154
LBB62_124:
cmpl $0, -48(%rbp) ## 4-byte Folded Reload
jle LBB62_154
## %bb.125:
movl -48(%rbp), %r15d ## 4-byte Reload
xorl %r12d, %r12d
xorl %r13d, %r13d
jmp LBB62_126
LBB62_127: ## in Loop: Header=BB62_126 Depth=1
callq _fgetc
cmpl $-1, %eax
cmovel %r12d, %eax
LBB62_130: ## in Loop: Header=BB62_126 Depth=1
movb %al, (%r14,%r13,4)
incq %r13
cmpl %r13d, %r15d
je LBB62_138
LBB62_126: ## =>This Inner Loop Header: Depth=1
movq 16(%rbx), %rdi
testq %rdi, %rdi
jne LBB62_127
## %bb.128: ## in Loop: Header=BB62_126 Depth=1
movq 24(%rbx), %rcx
xorl %eax, %eax
cmpq 32(%rbx), %rcx
jae LBB62_130
## %bb.129: ## in Loop: Header=BB62_126 Depth=1
leaq 1(%rcx), %rax
movq %rax, 24(%rbx)
movzbl (%rcx), %eax
jmp LBB62_130
LBB62_139:
cmpl $0, -48(%rbp) ## 4-byte Folded Reload
jle LBB62_154
## %bb.140:
movl -48(%rbp), %r14d ## 4-byte Reload
xorl %r15d, %r15d
xorl %r12d, %r12d
jmp LBB62_141
LBB62_142: ## in Loop: Header=BB62_141 Depth=1
callq _fgetc
cmpl $-1, %eax
cmovel %r15d, %eax
LBB62_145: ## in Loop: Header=BB62_141 Depth=1
movq -64(%rbp), %rcx ## 8-byte Reload
movb %al, (%rcx,%r12,4)
incq %r12
cmpl %r12d, %r14d
je LBB62_154
LBB62_141: ## =>This Inner Loop Header: Depth=1
movq 16(%rbx), %rdi
testq %rdi, %rdi
jne LBB62_142
## %bb.143: ## in Loop: Header=BB62_141 Depth=1
movq 24(%rbx), %rcx
xorl %eax, %eax
cmpq 32(%rbx), %rcx
jae LBB62_145
## %bb.144: ## in Loop: Header=BB62_141 Depth=1
leaq 1(%rcx), %rax
movq %rax, 24(%rbx)
movzbl (%rcx), %eax
jmp LBB62_145
LBB62_154:
movl -100(%rbp), %edx ## 4-byte Reload
testl $-5, %edx
je LBB62_155
## %bb.159:
movq -88(%rbp), %rdi ## 8-byte Reload
movl $4, %esi
movl -80(%rbp), %ecx ## 4-byte Reload
movl -76(%rbp), %ebx ## 4-byte Reload
movl %ebx, %r8d
callq _convert_format
testq %rax, %rax
movq -112(%rbp), %rdi ## 8-byte Reload
movq -120(%rbp), %rcx ## 8-byte Reload
movq -136(%rbp), %rdx ## 8-byte Reload
movl -52(%rbp), %esi ## 4-byte Reload
jne LBB62_156
LBB62_160:
xorl %eax, %eax
LBB62_161:
addq $120, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB62_155:
movq -112(%rbp), %rdi ## 8-byte Reload
movq -120(%rbp), %rcx ## 8-byte Reload
movq -136(%rbp), %rdx ## 8-byte Reload
movl -52(%rbp), %esi ## 4-byte Reload
movl -76(%rbp), %ebx ## 4-byte Reload
movq -88(%rbp), %rax ## 8-byte Reload
LBB62_156:
testq %rdx, %rdx
je LBB62_158
## %bb.157:
movl %esi, (%rdx)
LBB62_158:
movl %ebx, (%rcx)
movl -80(%rbp), %ecx ## 4-byte Reload
movl %ecx, (%rdi)
jmp LBB62_161
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function hdr_load
LCPI63_0:
.byte 51 ## 0x33
.byte 50 ## 0x32
.byte 45 ## 0x2d
.byte 98 ## 0x62
.byte 105 ## 0x69
.byte 116 ## 0x74
.byte 95 ## 0x5f
.byte 114 ## 0x72
.byte 108 ## 0x6c
.byte 101 ## 0x65
.byte 95 ## 0x5f
.byte 114 ## 0x72
.byte 103 ## 0x67
.byte 98 ## 0x62
.byte 101 ## 0x65
.byte 0 ## 0x0
LCPI63_1:
.byte 70 ## 0x46
.byte 79 ## 0x4f
.byte 82 ## 0x52
.byte 77 ## 0x4d
.byte 65 ## 0x41
.byte 84 ## 0x54
.byte 61 ## 0x3d
.byte 51 ## 0x33
.byte 50 ## 0x32
.byte 45 ## 0x2d
.byte 98 ## 0x62
.byte 105 ## 0x69
.byte 116 ## 0x74
.byte 95 ## 0x5f
.byte 114 ## 0x72
.byte 108 ## 0x6c
.section __TEXT,__literal8,8byte_literals
.p2align 3
LCPI63_2:
.quad 0x3ff0000000000000 ## double 1
.section __TEXT,__literal4,4byte_literals
.p2align 2
LCPI63_3:
.long 0x40400000 ## float 3
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_hdr_load: ## @hdr_load
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $1176, %rsp ## imm = 0x498
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r15d
movq %rcx, %r14
movq %rdx, %r12
movq %rsi, %rbx
movq %rdi, %r13
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
leaq -1072(%rbp), %rsi
callq _hdr_gettoken
movabsq $5638868765947084579, %rax ## imm = 0x4E41494441523F23
xorq -1072(%rbp), %rax
movabsq $19495776774865985, %rcx ## imm = 0x45434E41494441
xorq -1069(%rbp), %rcx
orq %rax, %rcx
je LBB63_5
## %bb.1:
leaq L_.str.90(%rip), %rax
jmp LBB63_2
LBB63_5:
movq %rbx, -1112(%rbp) ## 8-byte Spill
movq %r12, -1088(%rbp) ## 8-byte Spill
leaq -1072(%rbp), %r12
movq %r13, %rdi
movq %r12, %rsi
callq _hdr_gettoken
cmpb $0, -1072(%rbp)
je LBB63_14
## %bb.6:
movq %r14, -1128(%rbp) ## 8-byte Spill
movl %r15d, -1104(%rbp) ## 4-byte Spill
xorl %r15d, %r15d
movl $1, %ebx
leaq -1072(%rbp), %r12
.p2align 4, 0x90
LBB63_7: ## =>This Inner Loop Header: Depth=1
movdqu -1065(%rbp), %xmm0
pxor LCPI63_0(%rip), %xmm0
movdqa -1072(%rbp), %xmm1
pxor LCPI63_1(%rip), %xmm1
por %xmm0, %xmm1
ptest %xmm1, %xmm1
cmovel %ebx, %r15d
movq %r13, %rdi
movq %r12, %rsi
callq _hdr_gettoken
cmpb $0, -1072(%rbp)
jne LBB63_7
## %bb.8:
testl %r15d, %r15d
je LBB63_15
## %bb.9:
leaq -1072(%rbp), %rsi
movq %r13, %rdi
callq _hdr_gettoken
movl $22829, %eax ## imm = 0x592D
xorl -1072(%rbp), %eax
movzbl -1070(%rbp), %ecx
xorl $32, %ecx
orw %ax, %cx
jne LBB63_13
## %bb.10:
leaq -1069(%rbp), %rdi
movq %rdi, -1136(%rbp)
leaq -1136(%rbp), %rsi
movl $10, %edx
callq _strtol
movq %rax, %rbx
movq -1136(%rbp), %r12
decq %r12
.p2align 4, 0x90
LBB63_11: ## =>This Inner Loop Header: Depth=1
cmpb $32, 1(%r12)
leaq 1(%r12), %r12
je LBB63_11
## %bb.12:
leaq L_.str.95(%rip), %rsi
movl $3, %edx
movq %r12, %rdi
callq _strncmp
testl %eax, %eax
je LBB63_16
LBB63_13:
leaq L_.str.94(%rip), %rax
jmp LBB63_2
LBB63_14:
movq %r12, -1136(%rbp)
LBB63_15:
leaq L_.str.92(%rip), %rax
LBB63_2:
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
LBB63_3:
movq ___stack_chk_guard@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq -48(%rbp), %rcx
jne LBB63_78
## %bb.4:
addq $1176, %rsp ## imm = 0x498
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB63_16:
addq $3, %r12
movq %r12, -1136(%rbp)
movq %r12, %rdi
xorl %esi, %esi
movl $10, %edx
callq _strtol
movq %rax, %r15
movq -1112(%rbp), %rax ## 8-byte Reload
movl %r15d, (%rax)
movq -1088(%rbp), %rax ## 8-byte Reload
movl %ebx, (%rax)
movq -1128(%rbp), %rax ## 8-byte Reload
movl $3, (%rax)
movl -1104(%rbp), %edx ## 4-byte Reload
testl %edx, %edx
movl $3, %eax
cmovnel %edx, %eax
movq %rax, -1128(%rbp) ## 8-byte Spill
## kill: def $eax killed $eax killed $rax
imull %r15d, %eax
movl %eax, -1148(%rbp) ## 4-byte Spill
movq %rbx, -1160(%rbp) ## 8-byte Spill
imull %ebx, %eax
movslq %eax, %rdi
shlq $2, %rdi
callq _malloc
movq %rax, -1120(%rbp) ## 8-byte Spill
movq %r15, -1104(%rbp) ## 8-byte Spill
leal -32768(%r15), %eax
xorl %ebx, %ebx
cmpl $-32760, %eax ## imm = 0x8008
jb LBB63_80
## %bb.17:
xorl %r15d, %r15d
movq -1160(%rbp), %rcx ## 8-byte Reload
testl %ecx, %ecx
jle LBB63_74
## %bb.18:
movq -1104(%rbp), %rax ## 8-byte Reload
leal (,%rax,4), %edx
movq %rdx, -1168(%rbp) ## 8-byte Spill
movq -1128(%rbp), %rbx ## 8-byte Reload
movslq %ebx, %r12
cltq
movl %ecx, %ecx
movq %rcx, -1184(%rbp) ## 8-byte Spill
movl %eax, %ecx
movq %rcx, -1176(%rbp) ## 8-byte Spill
imulq %r12, %rax
shlq $2, %rax
movq %rax, -1192(%rbp) ## 8-byte Spill
shlq $2, %r12
movq -1120(%rbp), %rax ## 8-byte Reload
movq %rax, -1144(%rbp) ## 8-byte Spill
xorl %ecx, %ecx
xorl %r14d, %r14d
movq %r12, -1208(%rbp) ## 8-byte Spill
LBB63_19: ## =>This Loop Header: Depth=1
## Child Loop BB63_42 Depth 2
## Child Loop BB63_45 Depth 3
## Child Loop BB63_56 Depth 4
## Child Loop BB63_67 Depth 4
## Child Loop BB63_72 Depth 2
movq %rcx, -1200(%rbp) ## 8-byte Spill
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB63_23
## %bb.20: ## in Loop: Header=BB63_19 Depth=1
callq _fgetc
cmpl $-1, %eax
cmovel %r15d, %eax
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB63_25
## %bb.21: ## in Loop: Header=BB63_19 Depth=1
movl %eax, -1088(%rbp) ## 4-byte Spill
callq _fgetc
movl %eax, %esi
cmpl $-1, %eax
cmovel %r15d, %esi
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB63_27
## %bb.22: ## in Loop: Header=BB63_19 Depth=1
movq %rbx, %r15
movl %esi, %ebx
callq _fgetc
movl %ebx, %esi
movq %r15, %rbx
xorl %ecx, %ecx
movl %eax, %r15d
cmpl $-1, %eax
cmovel %ecx, %r15d
movl -1088(%rbp), %eax ## 4-byte Reload
jmp LBB63_30
LBB63_23: ## in Loop: Header=BB63_19 Depth=1
movq 24(%r13), %rdx
xorl %eax, %eax
cmpq 32(%r13), %rdx
jae LBB63_25
## %bb.24: ## in Loop: Header=BB63_19 Depth=1
leaq 1(%rdx), %rcx
movq %rcx, 24(%r13)
movzbl (%rdx), %eax
LBB63_25: ## in Loop: Header=BB63_19 Depth=1
movq 24(%r13), %rdx
xorl %esi, %esi
cmpq 32(%r13), %rdx
jae LBB63_28
## %bb.26: ## in Loop: Header=BB63_19 Depth=1
leaq 1(%rdx), %rcx
movq %rcx, 24(%r13)
movzbl (%rdx), %esi
jmp LBB63_28
LBB63_27: ## in Loop: Header=BB63_19 Depth=1
movl -1088(%rbp), %eax ## 4-byte Reload
LBB63_28: ## in Loop: Header=BB63_19 Depth=1
movq 24(%r13), %rdx
xorl %r15d, %r15d
cmpq 32(%r13), %rdx
jae LBB63_30
## %bb.29: ## in Loop: Header=BB63_19 Depth=1
leaq 1(%rdx), %rcx
movq %rcx, 24(%r13)
movzbl (%rdx), %r15d
LBB63_30: ## in Loop: Header=BB63_19 Depth=1
cmpl $2, %eax
jne LBB63_76
## %bb.31: ## in Loop: Header=BB63_19 Depth=1
cmpl $2, %esi
jne LBB63_76
## %bb.32: ## in Loop: Header=BB63_19 Depth=1
movl %r15d, %ecx
andl $128, %ecx
jne LBB63_76
## %bb.33: ## in Loop: Header=BB63_19 Depth=1
shll $8, %r15d
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB63_35
## %bb.34: ## in Loop: Header=BB63_19 Depth=1
callq _fgetc
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
jmp LBB63_37
LBB63_35: ## in Loop: Header=BB63_19 Depth=1
movq 24(%r13), %rcx
xorl %eax, %eax
cmpq 32(%r13), %rcx
jae LBB63_37
## %bb.36: ## in Loop: Header=BB63_19 Depth=1
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %eax
LBB63_37: ## in Loop: Header=BB63_19 Depth=1
orl %r15d, %eax
cmpl -1104(%rbp), %eax ## 4-byte Folded Reload
jne LBB63_77
## %bb.38: ## in Loop: Header=BB63_19 Depth=1
testq %r14, %r14
jne LBB63_40
## %bb.39: ## in Loop: Header=BB63_19 Depth=1
movq -1168(%rbp), %rdi ## 8-byte Reload
callq _malloc
movq %rax, %r14
LBB63_40: ## in Loop: Header=BB63_19 Depth=1
xorl %eax, %eax
movq %rax, -1112(%rbp) ## 8-byte Spill
movq -1104(%rbp), %rbx ## 8-byte Reload
jmp LBB63_42
.p2align 4, 0x90
LBB63_41: ## in Loop: Header=BB63_42 Depth=2
movq -1112(%rbp), %rax ## 8-byte Reload
incl %eax
movq %rax, -1112(%rbp) ## 8-byte Spill
cmpl $4, %eax
je LBB63_71
LBB63_42: ## Parent Loop BB63_19 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB63_45 Depth 3
## Child Loop BB63_56 Depth 4
## Child Loop BB63_67 Depth 4
xorl %r8d, %r8d
jmp LBB63_45
.p2align 4, 0x90
LBB63_65: ## in Loop: Header=BB63_45 Depth=3
movq -1088(%rbp), %r8 ## 8-byte Reload
LBB63_44: ## in Loop: Header=BB63_45 Depth=3
cmpl %ebx, %r8d
jge LBB63_41
LBB63_45: ## Parent Loop BB63_19 Depth=1
## Parent Loop BB63_42 Depth=2
## => This Loop Header: Depth=3
## Child Loop BB63_56 Depth 4
## Child Loop BB63_67 Depth 4
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB63_47
## %bb.46: ## in Loop: Header=BB63_45 Depth=3
movq %r8, -1088(%rbp) ## 8-byte Spill
callq _fgetc
movl %eax, %r15d
cmpl $-1, %eax
movl $0, %eax
cmovel %eax, %r15d
jmp LBB63_49
.p2align 4, 0x90
LBB63_47: ## in Loop: Header=BB63_45 Depth=3
movq 24(%r13), %rax
cmpq 32(%r13), %rax
jae LBB63_44
## %bb.48: ## in Loop: Header=BB63_45 Depth=3
movq %r8, -1088(%rbp) ## 8-byte Spill
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %r15d
LBB63_49: ## in Loop: Header=BB63_45 Depth=3
movzbl %r15b, %r12d
cmpl $128, %r12d
jbe LBB63_52
## %bb.50: ## in Loop: Header=BB63_45 Depth=3
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB63_59
## %bb.51: ## in Loop: Header=BB63_45 Depth=3
callq _fgetc
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
jmp LBB63_61
.p2align 4, 0x90
LBB63_52: ## in Loop: Header=BB63_45 Depth=3
testl %r12d, %r12d
je LBB63_65
## %bb.53: ## in Loop: Header=BB63_45 Depth=3
movq -1112(%rbp), %rax ## 8-byte Reload
movq -1088(%rbp), %rcx ## 8-byte Reload
leal (%rax,%rcx,4), %ebx
negl %r12d
xorl %r15d, %r15d
jmp LBB63_56
.p2align 4, 0x90
LBB63_54: ## in Loop: Header=BB63_56 Depth=4
callq _fgetc
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
LBB63_55: ## in Loop: Header=BB63_56 Depth=4
movslq %ebx, %rbx
movb %al, (%r14,%rbx)
addl $4, %ebx
decl %r15d
cmpl %r15d, %r12d
je LBB63_43
LBB63_56: ## Parent Loop BB63_19 Depth=1
## Parent Loop BB63_42 Depth=2
## Parent Loop BB63_45 Depth=3
## => This Inner Loop Header: Depth=4
movq 16(%r13), %rdi
testq %rdi, %rdi
jne LBB63_54
## %bb.57: ## in Loop: Header=BB63_56 Depth=4
movq 24(%r13), %rcx
xorl %eax, %eax
cmpq 32(%r13), %rcx
jae LBB63_55
## %bb.58: ## in Loop: Header=BB63_56 Depth=4
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %eax
jmp LBB63_55
.p2align 4, 0x90
LBB63_43: ## in Loop: Header=BB63_45 Depth=3
movq -1088(%rbp), %r8 ## 8-byte Reload
subl %r15d, %r8d
movq -1104(%rbp), %rbx ## 8-byte Reload
jmp LBB63_44
LBB63_59: ## in Loop: Header=BB63_45 Depth=3
movq 24(%r13), %rcx
xorl %eax, %eax
cmpq 32(%r13), %rcx
jae LBB63_61
## %bb.60: ## in Loop: Header=BB63_45 Depth=3
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %eax
.p2align 4, 0x90
LBB63_61: ## in Loop: Header=BB63_45 Depth=3
xorl $128, %r12d
je LBB63_65
## %bb.62: ## in Loop: Header=BB63_45 Depth=3
movq -1088(%rbp), %rsi ## 8-byte Reload
movl %esi, %ecx
andl $1, %r15d
cmpl $1, %r12d
jne LBB63_66
## %bb.63: ## in Loop: Header=BB63_45 Depth=3
movq %rsi, %r8
testl %r15d, %r15d
jne LBB63_69
jmp LBB63_70
LBB63_66: ## in Loop: Header=BB63_45 Depth=3
movq -1112(%rbp), %rdx ## 8-byte Reload
movq %rsi, %r8
leal (%rdx,%rsi,4), %edx
movl %r15d, %esi
subl %r12d, %esi
xorl %edi, %edi
.p2align 4, 0x90
LBB63_67: ## Parent Loop BB63_19 Depth=1
## Parent Loop BB63_42 Depth=2
## Parent Loop BB63_45 Depth=3
## => This Inner Loop Header: Depth=4
movslq %edx, %rdx
movb %al, (%r14,%rdx)
leal 4(%rdx), %ebx
movslq %ebx, %rbx
movb %al, (%r14,%rbx)
addl $8, %edx
addq $-2, %rdi
cmpl %edi, %esi
jne LBB63_67
## %bb.68: ## in Loop: Header=BB63_45 Depth=3
subq %rdi, %rcx
movq -1104(%rbp), %rbx ## 8-byte Reload
testl %r15d, %r15d
je LBB63_70
LBB63_69: ## in Loop: Header=BB63_45 Depth=3
movq -1112(%rbp), %rdx ## 8-byte Reload
leal (%rdx,%rcx,4), %ecx
movslq %ecx, %rcx
movb %al, (%r14,%rcx)
LBB63_70: ## in Loop: Header=BB63_45 Depth=3
addl %r12d, %r8d
jmp LBB63_44
LBB63_71: ## in Loop: Header=BB63_19 Depth=1
movq -1176(%rbp), %r12 ## 8-byte Reload
movq %r14, %r15
movq -1144(%rbp), %rbx ## 8-byte Reload
.p2align 4, 0x90
LBB63_72: ## Parent Loop BB63_19 Depth=1
## => This Inner Loop Header: Depth=2
movq %rbx, %rdi
movq %r15, %rsi
movq -1128(%rbp), %rdx ## 8-byte Reload
## kill: def $edx killed $edx killed $rdx
callq _hdr_convert
addq -1208(%rbp), %rbx ## 8-byte Folded Reload
addq $4, %r15
decq %r12
jne LBB63_72
## %bb.73: ## in Loop: Header=BB63_19 Depth=1
movq -1200(%rbp), %rcx ## 8-byte Reload
incq %rcx
movq -1144(%rbp), %rax ## 8-byte Reload
addq -1192(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -1144(%rbp) ## 8-byte Spill
cmpq -1184(%rbp), %rcx ## 8-byte Folded Reload
movq -1128(%rbp), %rbx ## 8-byte Reload
movl $0, %r15d
jne LBB63_19
jmp LBB63_75
LBB63_74:
xorl %r14d, %r14d
LBB63_75:
movq %r14, %rdi
callq _free
jmp LBB63_81
LBB63_76:
movb %al, -1092(%rbp)
movb %sil, -1091(%rbp)
movb %r15b, -1090(%rbp)
movq %r13, %rdi
callq _get8
movb %al, -1089(%rbp)
leaq -1092(%rbp), %rsi
movq -1120(%rbp), %rdi ## 8-byte Reload
movl %ebx, %edx
callq _hdr_convert
movq %r14, %rdi
callq _free
xorl %ebx, %ebx
movl $1, %r14d
jmp LBB63_83
LBB63_77:
movq -1120(%rbp), %rdi ## 8-byte Reload
callq _free
movq %r14, %rdi
callq _free
leaq L_.str.96(%rip), %rax
jmp LBB63_2
LBB63_78:
callq ___stack_chk_fail
LBB63_79:
incl %ebx
LBB63_80:
movl $0, %r14d
cmpl -1160(%rbp), %ebx ## 4-byte Folded Reload
jl LBB63_82
LBB63_81:
movq -1120(%rbp), %rax ## 8-byte Reload
jmp LBB63_3
LBB63_82:
cmpl -1104(%rbp), %r14d ## 4-byte Folded Reload
jge LBB63_79
LBB63_83:
movq 16(%r13), %rcx
testq %rcx, %rcx
movq %rbx, -1088(%rbp) ## 8-byte Spill
je LBB63_85
## %bb.84:
leaq -1076(%rbp), %rdi
movl $1, %esi
movl $4, %edx
callq _fread
movb -1073(%rbp), %al
jmp LBB63_86
LBB63_85:
movq 24(%r13), %rcx
movl (%rcx), %eax
movl %eax, -1076(%rbp)
addq $4, %rcx
movq %rcx, 24(%r13)
shrl $24, %eax
LBB63_86:
movl -1148(%rbp), %ecx ## 4-byte Reload
imull %ebx, %ecx
movslq %ecx, %rcx
movq -1120(%rbp), %rdx ## 8-byte Reload
leaq (%rdx,%rcx,4), %r15
movl %r14d, %ecx
movq -1128(%rbp), %rbx ## 8-byte Reload
imull %ebx, %ecx
movslq %ecx, %r12
testb %al, %al
je LBB63_91
## %bb.87:
movzbl %al, %edi
addl $-136, %edi
movsd LCPI63_2(%rip), %xmm0 ## xmm0 = mem[0],zero
callq _ldexp
cvtsd2ss %xmm0, %xmm0
movzbl -1076(%rbp), %eax
cmpl $2, %ebx
jg LBB63_95
## %bb.88:
movzbl -1075(%rbp), %ecx
addl %eax, %ecx
movzbl -1074(%rbp), %eax
addl %ecx, %eax
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
mulss %xmm1, %xmm0
divss LCPI63_3(%rip), %xmm0
movss %xmm0, (%r15,%r12,4)
cmpl $4, %ebx
je LBB63_96
LBB63_89:
cmpl $2, %ebx
jne LBB63_99
## %bb.90:
movl $1065353216, 4(%r15,%r12,4) ## imm = 0x3F800000
jmp LBB63_99
LBB63_91:
leal -1(%rbx), %eax
cmpl $3, %eax
ja LBB63_99
## %bb.92:
leaq LJTI63_0(%rip), %rcx
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
LBB63_93:
movl $1065353216, 4(%r15,%r12,4) ## imm = 0x3F800000
LBB63_94:
movl $0, (%r15,%r12,4)
jmp LBB63_99
LBB63_95:
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%r15,%r12,4)
movzbl -1075(%rbp), %eax
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
movzbl -1074(%rbp), %eax
xorps %xmm2, %xmm2
cvtsi2ss %eax, %xmm2
mulss %xmm0, %xmm1
movss %xmm1, 4(%r15,%r12,4)
mulss %xmm0, %xmm2
movss %xmm2, 8(%r15,%r12,4)
cmpl $4, %ebx
jne LBB63_89
LBB63_96:
movl $1065353216, 12(%r15,%r12,4) ## imm = 0x3F800000
jmp LBB63_99
LBB63_97:
movl $1065353216, 12(%r15,%r12,4) ## imm = 0x3F800000
LBB63_98:
movl $0, 8(%r15,%r12,4)
movq $0, (%r15,%r12,4)
LBB63_99:
incl %r14d
movq -1088(%rbp), %rbx ## 8-byte Reload
jmp LBB63_82
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L63_0_set_94, LBB63_94-LJTI63_0
.set L63_0_set_93, LBB63_93-LJTI63_0
.set L63_0_set_98, LBB63_98-LJTI63_0
.set L63_0_set_97, LBB63_97-LJTI63_0
LJTI63_0:
.long L63_0_set_94
.long L63_0_set_93
.long L63_0_set_98
.long L63_0_set_97
.end_data_region
## -- End function
.globl _stbi_write_bmp ## -- Begin function stbi_write_bmp
.p2align 4, 0x90
_stbi_write_bmp: ## @stbi_write_bmp
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
pushq %rax
.cfi_offset %rbx, -24
movq %r8, %r10
movl %ecx, %r9d
## kill: def $edx killed $edx def $rdx
movl %esi, %ecx
movl %esi, %r11d
andl $3, %r11d
leal (%rcx,%rcx,2), %esi
addl %r11d, %esi
imull %edx, %esi
addl $54, %esi
subq $8, %rsp
leaq L_.str.3(%rip), %rbx
movl %edx, %r8d
xorl %eax, %eax
pushq $0
pushq $0
pushq $0
pushq $0
pushq $0
pushq $0
pushq $24
pushq $1
pushq %rdx
pushq %rcx
pushq $40
pushq $54
pushq $0
pushq $0
pushq %rsi
pushq $77
pushq $66
pushq %rbx
pushq %r11
pushq $0
pushq %r10
callq _outfile
addq $184, %rsp
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function outfile
_outfile: ## @outfile
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $280, %rsp ## imm = 0x118
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
## kill: def $r9d killed $r9d def $r9
movq %r9, -104(%rbp) ## 8-byte Spill
movl %r8d, %r14d
movl %ecx, -84(%rbp) ## 4-byte Spill
testb %al, %al
je LBB65_23
## %bb.22:
movaps %xmm0, -272(%rbp)
movaps %xmm1, -256(%rbp)
movaps %xmm2, -240(%rbp)
movaps %xmm3, -224(%rbp)
movaps %xmm4, -208(%rbp)
movaps %xmm5, -192(%rbp)
movaps %xmm6, -176(%rbp)
movaps %xmm7, -160(%rbp)
LBB65_23:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
leaq L_.str.97(%rip), %rsi
callq _fopen
movq %rax, %rbx
testq %rax, %rax
je LBB65_18
## %bb.1:
movq 40(%rbp), %rsi
leaq -320(%rbp), %rax
movq %rax, -64(%rbp)
leaq 48(%rbp), %rax
movq %rax, -72(%rbp)
movabsq $206158430256, %rax ## imm = 0x3000000030
movq %rax, -80(%rbp)
leaq -80(%rbp), %rdx
movq %rbx, %rdi
callq _writefv
movl $0, -108(%rbp)
testl %r14d, %r14d
je LBB65_17
## %bb.2:
movq -104(%rbp), %rax ## 8-byte Reload
leal -1(%rax), %r13d
movslq %r13d, %rcx
movq %rcx, -144(%rbp) ## 8-byte Spill
movslq %eax, %r12
movslq %r14d, %rax
movslq -84(%rbp), %rcx ## 4-byte Folded Reload
movl %ecx, %edx
movq %rdx, -120(%rbp) ## 8-byte Spill
leaq -1(%rax), %rdx
imulq %r12, %rcx
imulq %rcx, %rdx
addq 16(%rbp), %rdx
movq %rdx, -96(%rbp) ## 8-byte Spill
negq %rcx
movq %rcx, -128(%rbp) ## 8-byte Spill
jmp LBB65_3
.p2align 4, 0x90
LBB65_16: ## in Loop: Header=BB65_3 Depth=1
movl $1, %edx
leaq -108(%rbp), %rdi
movl 32(%rbp), %esi
movq %rbx, %rcx
callq _fwrite
movq -96(%rbp), %rax ## 8-byte Reload
addq -128(%rbp), %rax ## 8-byte Folded Reload
movq %rax, -96(%rbp) ## 8-byte Spill
movq -136(%rbp), %rax ## 8-byte Reload
testl %eax, %eax
je LBB65_17
LBB65_3: ## =>This Loop Header: Depth=1
## Child Loop BB65_5 Depth 2
decq %rax
movq %rax, -136(%rbp) ## 8-byte Spill
cmpl $0, -84(%rbp) ## 4-byte Folded Reload
jle LBB65_16
## %bb.4: ## in Loop: Header=BB65_3 Depth=1
movq -96(%rbp), %r14 ## 8-byte Reload
movq -120(%rbp), %r15 ## 8-byte Reload
jmp LBB65_5
.p2align 4, 0x90
LBB65_15: ## in Loop: Header=BB65_5 Depth=2
addq %r12, %r14
decq %r15
je LBB65_16
LBB65_5: ## Parent Loop BB65_3 Depth=1
## => This Inner Loop Header: Depth=2
cmpl $2, %r13d
jb LBB65_20
## %bb.6: ## in Loop: Header=BB65_5 Depth=2
cmpl $3, -104(%rbp) ## 4-byte Folded Reload
je LBB65_10
## %bb.7: ## in Loop: Header=BB65_5 Depth=2
cmpl $4, -104(%rbp) ## 4-byte Folded Reload
jne LBB65_13
## %bb.8: ## in Loop: Header=BB65_5 Depth=2
cmpl $0, 24(%rbp)
je LBB65_9
LBB65_10: ## in Loop: Header=BB65_5 Depth=2
movzbl 2(%r14), %edx
movzbl 1(%r14), %ecx
movzbl (%r14), %r8d
LBB65_11: ## in Loop: Header=BB65_5 Depth=2
movq %rbx, %rdi
jmp LBB65_12
.p2align 4, 0x90
LBB65_20: ## in Loop: Header=BB65_5 Depth=2
movzbl (%r14), %ecx
movq %rbx, %rdi
movl %ecx, %edx
movl %ecx, %r8d
LBB65_12: ## in Loop: Header=BB65_5 Depth=2
xorl %eax, %eax
callq _writef
LBB65_13: ## in Loop: Header=BB65_5 Depth=2
cmpl $0, 24(%rbp)
jle LBB65_15
## %bb.14: ## in Loop: Header=BB65_5 Depth=2
movq -144(%rbp), %rax ## 8-byte Reload
leaq (%r14,%rax), %rdi
movl $1, %esi
movl $1, %edx
movq %rbx, %rcx
callq _fwrite
jmp LBB65_15
LBB65_9: ## in Loop: Header=BB65_5 Depth=2
movzbl 3(%r14), %eax
movzbl (%r14), %ecx
xorl $255, %ecx
imull %eax, %ecx
imull $32897, %ecx, %ecx ## imm = 0x8081
shrl $23, %ecx
movl $255, %r8d
subl %ecx, %r8d
movzbl 1(%r14), %ecx
imull %eax, %ecx
imull $32897, %ecx, %ecx ## imm = 0x8081
shrl $23, %ecx
movzbl 2(%r14), %edx
xorl $255, %edx
imull %eax, %edx
imull $32897, %edx, %eax ## imm = 0x8081
shrl $23, %eax
movl $255, %edx
subl %eax, %edx
jmp LBB65_11
LBB65_17:
movq %rbx, %rdi
callq _fclose
LBB65_18:
xorl %eax, %eax
testq %rbx, %rbx
setne %cl
movq ___stack_chk_guard@GOTPCREL(%rip), %rdx
movq (%rdx), %rdx
cmpq -48(%rbp), %rdx
jne LBB65_21
## %bb.19:
movb %cl, %al
addq $280, %rsp ## imm = 0x118
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB65_21:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.globl _stbi_write_tga ## -- Begin function stbi_write_tga
.p2align 4, 0x90
_stbi_write_tga: ## @stbi_write_tga
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %r8, %r10
movl %ecx, %r9d
## kill: def $edx killed $edx def $rdx
movl %esi, %ecx
movl %r9d, %esi
notl %esi
andl $1, %esi
leal (,%rsi,8), %r11d
leal 24(,%rsi,8), %ebx
leaq L_.str.4(%rip), %r14
movl %edx, %r8d
xorl %eax, %eax
pushq %r11
pushq %rbx
pushq %rdx
pushq %rcx
pushq $0
pushq $0
pushq $0
pushq $0
pushq $0
pushq $2
pushq $0
pushq $0
pushq %r14
pushq $0
pushq %rsi
pushq %r10
callq _outfile
addq $128, %rsp
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function resample_row_1
_resample_row_1: ## @resample_row_1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rsi, %rax
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function resample_row_v_2
LCPI68_0:
.short 3 ## 0x3
.short 3 ## 0x3
.short 3 ## 0x3
.short 3 ## 0x3
.short 3 ## 0x3
.short 3 ## 0x3
.short 3 ## 0x3
.short 3 ## 0x3
LCPI68_1:
.short 2 ## 0x2
.short 2 ## 0x2
.short 2 ## 0x2
.short 2 ## 0x2
.short 2 ## 0x2
.short 2 ## 0x2
.short 2 ## 0x2
.short 2 ## 0x2
LCPI68_2:
.byte 255 ## 0xff
.byte 0 ## 0x0
.byte 255 ## 0xff
.byte 0 ## 0x0
.byte 255 ## 0xff
.byte 0 ## 0x0
.byte 255 ## 0xff
.byte 0 ## 0x0
.byte 255 ## 0xff
.byte 0 ## 0x0
.byte 255 ## 0xff
.byte 0 ## 0x0
.byte 255 ## 0xff
.byte 0 ## 0x0
.byte 255 ## 0xff
.byte 0 ## 0x0
LCPI68_3:
.byte 0 ## 0x0
.byte 2 ## 0x2
.byte 4 ## 0x4
.byte 6 ## 0x6
.byte 8 ## 0x8
.byte 10 ## 0xa
.byte 12 ## 0xc
.byte 14 ## 0xe
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_resample_row_v_2: ## @resample_row_v_2
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rdi, %rax
testl %ecx, %ecx
jle LBB68_22
## %bb.1:
movl %ecx, %r8d
cmpl $8, %ecx
jae LBB68_3
## %bb.2:
xorl %r11d, %r11d
LBB68_18:
movq %r11, %rcx
notq %rcx
testb $1, %r8b
je LBB68_20
## %bb.19:
movzbl (%rsi,%r11), %edi
leal (%rdi,%rdi,2), %r9d
movzbl (%rdx,%r11), %edi
addl %r9d, %edi
addl $2, %edi
shrl $2, %edi
movb %dil, (%rax,%r11)
orq $1, %r11
LBB68_20:
addq %r8, %rcx
je LBB68_22
.p2align 4, 0x90
LBB68_21: ## =>This Inner Loop Header: Depth=1
movzbl (%rsi,%r11), %ecx
leal (%rcx,%rcx,2), %ecx
movzbl (%rdx,%r11), %edi
addl %edi, %ecx
addl $2, %ecx
shrl $2, %ecx
movb %cl, (%rax,%r11)
movzbl 1(%rsi,%r11), %ecx
leal (%rcx,%rcx,2), %ecx
movzbl 1(%rdx,%r11), %edi
addl %edi, %ecx
addl $2, %ecx
shrl $2, %ecx
movb %cl, 1(%rax,%r11)
addq $2, %r11
cmpq %r11, %r8
jne LBB68_21
LBB68_22:
popq %rbp
retq
LBB68_3:
movq %rax, %r9
subq %rsi, %r9
xorl %r11d, %r11d
cmpq $16, %r9
jb LBB68_18
## %bb.4:
movq %rax, %rdi
subq %rdx, %rdi
cmpq $16, %rdi
jb LBB68_18
## %bb.5:
cmpl $16, %ecx
jae LBB68_7
## %bb.6:
xorl %r11d, %r11d
LBB68_15:
movq %r11, %rcx
movl %r8d, %r11d
andl $-8, %r11d
movdqa LCPI68_0(%rip), %xmm0 ## xmm0 = [3,3,3,3,3,3,3,3]
movdqa LCPI68_1(%rip), %xmm1 ## xmm1 = [2,2,2,2,2,2,2,2]
movdqa LCPI68_3(%rip), %xmm2 ## xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
.p2align 4, 0x90
LBB68_16: ## =>This Inner Loop Header: Depth=1
pmovzxbw (%rsi,%rcx), %xmm3 ## xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw (%rdx,%rcx), %xmm4 ## xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmullw %xmm0, %xmm3
paddw %xmm3, %xmm4
paddw %xmm1, %xmm4
psrlw $2, %xmm4
pshufb %xmm2, %xmm4
movq %xmm4, (%rax,%rcx)
addq $8, %rcx
cmpq %rcx, %r11
jne LBB68_16
## %bb.17:
cmpq %r8, %r11
jne LBB68_18
jmp LBB68_22
LBB68_7:
movl %r8d, %r11d
andl $-16, %r11d
leaq -16(%r11), %rcx
movq %rcx, %r9
shrq $4, %r9
incq %r9
testq %rcx, %rcx
je LBB68_8
## %bb.9:
movq %r9, %r10
andq $-2, %r10
xorl %ecx, %ecx
movdqa LCPI68_0(%rip), %xmm0 ## xmm0 = [3,3,3,3,3,3,3,3]
movdqa LCPI68_1(%rip), %xmm1 ## xmm1 = [2,2,2,2,2,2,2,2]
movdqa LCPI68_2(%rip), %xmm2 ## xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
.p2align 4, 0x90
LBB68_10: ## =>This Inner Loop Header: Depth=1
pmovzxbw (%rsi,%rcx), %xmm3 ## xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw 8(%rsi,%rcx), %xmm4 ## xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmullw %xmm0, %xmm4
pmullw %xmm0, %xmm3
pmovzxbw 8(%rdx,%rcx), %xmm5 ## xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw (%rdx,%rcx), %xmm6 ## xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
paddw %xmm1, %xmm5
paddw %xmm4, %xmm5
paddw %xmm1, %xmm6
paddw %xmm3, %xmm6
psrlw $2, %xmm6
psrlw $2, %xmm5
pand %xmm2, %xmm5
pand %xmm2, %xmm6
packuswb %xmm5, %xmm6
movdqu %xmm6, (%rax,%rcx)
pmovzxbw 16(%rsi,%rcx), %xmm3 ## xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw 24(%rsi,%rcx), %xmm4 ## xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmullw %xmm0, %xmm4
pmullw %xmm0, %xmm3
pmovzxbw 24(%rdx,%rcx), %xmm5 ## xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw 16(%rdx,%rcx), %xmm6 ## xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
paddw %xmm1, %xmm5
paddw %xmm4, %xmm5
paddw %xmm1, %xmm6
paddw %xmm3, %xmm6
psrlw $2, %xmm6
psrlw $2, %xmm5
pand %xmm2, %xmm5
pand %xmm2, %xmm6
packuswb %xmm5, %xmm6
movdqu %xmm6, 16(%rax,%rcx)
addq $32, %rcx
addq $-2, %r10
jne LBB68_10
## %bb.11:
testb $1, %r9b
je LBB68_13
LBB68_12:
pmovzxbw (%rsi,%rcx), %xmm0 ## xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw 8(%rsi,%rcx), %xmm1 ## xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
movdqa LCPI68_0(%rip), %xmm2 ## xmm2 = [3,3,3,3,3,3,3,3]
pmullw %xmm2, %xmm1
pmullw %xmm2, %xmm0
pmovzxbw 8(%rdx,%rcx), %xmm2 ## xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw (%rdx,%rcx), %xmm3 ## xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
movdqa LCPI68_1(%rip), %xmm4 ## xmm4 = [2,2,2,2,2,2,2,2]
paddw %xmm4, %xmm2
paddw %xmm1, %xmm2
paddw %xmm4, %xmm3
paddw %xmm0, %xmm3
psrlw $2, %xmm3
psrlw $2, %xmm2
movdqa LCPI68_2(%rip), %xmm0 ## xmm0 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
pand %xmm0, %xmm2
pand %xmm0, %xmm3
packuswb %xmm2, %xmm3
movdqu %xmm3, (%rax,%rcx)
LBB68_13:
cmpq %r8, %r11
je LBB68_22
## %bb.14:
testb $8, %r8b
je LBB68_18
jmp LBB68_15
LBB68_8:
xorl %ecx, %ecx
testb $1, %r9b
jne LBB68_12
jmp LBB68_13
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function resample_row_h_2
LCPI69_0:
.short 3 ## 0x3
.space 2
.short 3 ## 0x3
.space 2
.short 3 ## 0x3
.space 2
.short 3 ## 0x3
.space 2
LCPI69_1:
.long 2 ## 0x2
.long 2 ## 0x2
.long 2 ## 0x2
.long 2 ## 0x2
LCPI69_2:
.byte 0 ## 0x0
.byte 4 ## 0x4
.byte 8 ## 0x8
.byte 12 ## 0xc
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_resample_row_h_2: ## @resample_row_h_2
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %rbx
.cfi_offset %rbx, -24
## kill: def $ecx killed $ecx def $rcx
movq %rdi, %rax
movb (%rsi), %dl
cmpl $1, %ecx
jne LBB69_2
## %bb.1:
movb %dl, 1(%rax)
movb %dl, (%rax)
jmp LBB69_12
LBB69_2:
movb %dl, (%rax)
movzbl (%rsi), %edx
leal (%rdx,%rdx,2), %edx
movzbl 1(%rsi), %edi
addl %edi, %edx
addl $2, %edx
shrl $2, %edx
movb %dl, 1(%rax)
leal -1(%rcx), %r8d
movl $2, %edx
cmpl $3, %ecx
jl LBB69_11
## %bb.3:
movl %r8d, %r11d
leaq -1(%r11), %r9
movl $1, %ebx
cmpq $8, %r9
jb LBB69_9
## %bb.4:
leaq 2(%rax), %r10
leaq (%r11,%rsi), %rdi
incq %rdi
cmpq %rdi, %r10
jae LBB69_6
## %bb.5:
leaq (%rax,%r11,2), %rdi
cmpq %rsi, %rdi
ja LBB69_9
LBB69_6:
movq %r9, %r10
andq $-8, %r10
leaq 1(%r10), %rbx
xorl %edi, %edi
movdqa LCPI69_0(%rip), %xmm8 ## xmm8 = <3,u,3,u,3,u,3,u>
movdqa LCPI69_1(%rip), %xmm1 ## xmm1 = [2,2,2,2]
movdqa LCPI69_2(%rip), %xmm2 ## xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
.p2align 4, 0x90
LBB69_7: ## =>This Inner Loop Header: Depth=1
pmovzxbd 1(%rsi,%rdi), %xmm4 ## xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
pmovzxbd 5(%rsi,%rdi), %xmm5 ## xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
pmaddwd %xmm8, %xmm5
pmaddwd %xmm8, %xmm4
paddd %xmm1, %xmm4
paddd %xmm1, %xmm5
pmovzxbd (%rsi,%rdi), %xmm3 ## xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
pmovzxbd 4(%rsi,%rdi), %xmm6 ## xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
paddd %xmm5, %xmm6
paddd %xmm4, %xmm3
psrld $2, %xmm3
psrld $2, %xmm6
pshufb %xmm2, %xmm6
pshufb %xmm2, %xmm3
pmovzxbd 2(%rsi,%rdi), %xmm7 ## xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
pmovzxbd 6(%rsi,%rdi), %xmm0 ## xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
punpckldq %xmm6, %xmm3 ## xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
paddd %xmm5, %xmm0
paddd %xmm4, %xmm7
psrld $2, %xmm7
psrld $2, %xmm0
pshufb %xmm2, %xmm0
pshufb %xmm2, %xmm7
punpckldq %xmm0, %xmm7 ## xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
punpcklbw %xmm7, %xmm3 ## xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
movdqu %xmm3, 2(%rax,%rdi,2)
addq $8, %rdi
cmpq %rdi, %r10
jne LBB69_7
## %bb.8:
cmpq %r10, %r9
je LBB69_10
.p2align 4, 0x90
LBB69_9: ## =>This Inner Loop Header: Depth=1
movzbl (%rsi,%rbx), %edi
leal (%rdi,%rdi,2), %edi
movzbl -1(%rsi,%rbx), %edx
addl %edi, %edx
addl $2, %edx
shrl $2, %edx
movb %dl, (%rax,%rbx,2)
movzbl 1(%rsi,%rbx), %edx
addl %edi, %edx
addl $2, %edx
shrl $2, %edx
movb %dl, 1(%rax,%rbx,2)
leaq 1(%rbx), %rdx
movq %rdx, %rbx
cmpq %rdx, %r11
jne LBB69_9
LBB69_10:
leal (%r8,%r8), %edx
LBB69_11:
movslq %ecx, %rcx
movzbl -2(%rcx,%rsi), %ecx
leal (%rcx,%rcx,2), %ecx
movslq %r8d, %rdi
movzbl (%rsi,%rdi), %ebx
addl %ebx, %ecx
addl $2, %ecx
shrl $2, %ecx
movl %edx, %edx
movb %cl, (%rax,%rdx)
movb (%rsi,%rdi), %cl
orq $1, %rdx
movb %cl, (%rax,%rdx)
LBB69_12:
popq %rbx
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function resample_row_hv_2
LCPI70_0:
.short 3 ## 0x3
.space 2
.short 3 ## 0x3
.space 2
.short 3 ## 0x3
.space 2
.short 3 ## 0x3
.space 2
LCPI70_1:
.long 8 ## 0x8
.long 8 ## 0x8
.long 8 ## 0x8
.long 8 ## 0x8
LCPI70_2:
.byte 0 ## 0x0
.byte 4 ## 0x4
.byte 8 ## 0x8
.byte 12 ## 0xc
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_resample_row_hv_2: ## @resample_row_hv_2
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %rax
movzbl (%rsi), %edi
cmpl $1, %ecx
jne LBB70_2
## %bb.1:
movzwl %di, %ecx
leal (%rcx,%rcx,2), %ecx
movzbl (%rdx), %edx
addl %edx, %ecx
addl $2, %ecx
shrl $2, %ecx
movb %cl, 1(%rax)
movb %cl, (%rax)
jmp LBB70_12
LBB70_2:
movzwl %di, %edi
leal (%rdi,%rdi,2), %r10d
movzbl (%rdx), %edi
leal (%rdi,%r10), %ebx
addl $2, %ebx
shrl $2, %ebx
movb %bl, (%rax)
cmpl $2, %ecx
jl LBB70_11
## %bb.3:
addl %edi, %r10d
movl %ecx, %r8d
leaq -1(%r8), %rdi
movl $1, %r9d
cmpq $8, %rdi
jb LBB70_9
## %bb.4:
leaq 1(%rax), %rbx
leaq (%rax,%r8,2), %r11
decq %r11
leaq 1(%rsi), %r14
leaq (%rsi,%r8), %r15
leaq 1(%rdx), %r12
leaq (%rdx,%r8), %r13
cmpq %r15, %rbx
setb %r15b
cmpq %r11, %r14
setb %r14b
cmpq %r13, %rbx
setb %bl
cmpq %r11, %r12
setb %r11b
testb %r14b, %r15b
jne LBB70_9
## %bb.5:
andb %r11b, %bl
jne LBB70_9
## %bb.6:
movq %rdi, %r11
andq $-8, %r11
leaq 1(%r11), %r9
movd %r10d, %xmm0
pshufd $0, %xmm0, %xmm3 ## xmm3 = xmm0[0,0,0,0]
xorl %r10d, %r10d
movdqa LCPI70_0(%rip), %xmm0 ## xmm0 = <3,u,3,u,3,u,3,u>
movdqa LCPI70_1(%rip), %xmm8 ## xmm8 = [8,8,8,8]
movdqa LCPI70_2(%rip), %xmm9 ## xmm9 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
.p2align 4, 0x90
LBB70_7: ## =>This Inner Loop Header: Depth=1
pmovzxbd 5(%rsi,%r10), %xmm6 ## xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
pmovzxbd 1(%rsi,%r10), %xmm4 ## xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
pmaddwd %xmm0, %xmm4
pmaddwd %xmm0, %xmm6
pmovzxbd 1(%rdx,%r10), %xmm5 ## xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
paddd %xmm4, %xmm5
movdqa %xmm5, %xmm4
palignr $12, %xmm3, %xmm4 ## xmm4 = xmm3[12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10,11]
pmovzxbd 5(%rdx,%r10), %xmm3 ## xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
paddd %xmm6, %xmm3
movdqa %xmm3, %xmm6
palignr $12, %xmm5, %xmm6 ## xmm6 = xmm5[12,13,14,15],xmm6[0,1,2,3,4,5,6,7,8,9,10,11]
movdqa %xmm6, %xmm7
pmulld %xmm0, %xmm7
movdqa %xmm4, %xmm1
pmulld %xmm0, %xmm1
movdqa %xmm3, %xmm2
paddd %xmm8, %xmm2
paddd %xmm7, %xmm2
movdqa %xmm5, %xmm7
paddd %xmm8, %xmm7
paddd %xmm1, %xmm7
psrld $4, %xmm7
psrld $4, %xmm2
pshufb %xmm9, %xmm2
pshufb %xmm9, %xmm7
punpckldq %xmm2, %xmm7 ## xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
movdqa %xmm3, %xmm1
pmaddwd %xmm0, %xmm1
pmaddwd %xmm0, %xmm5
paddd %xmm8, %xmm6
paddd %xmm1, %xmm6
paddd %xmm8, %xmm4
paddd %xmm5, %xmm4
psrld $4, %xmm4
psrld $4, %xmm6
pshufb %xmm9, %xmm6
pshufb %xmm9, %xmm4
punpckldq %xmm6, %xmm4 ## xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
punpcklbw %xmm4, %xmm7 ## xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3],xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
movdqu %xmm7, 1(%rax,%r10,2)
addq $8, %r10
cmpq %r10, %r11
jne LBB70_7
## %bb.8:
pextrd $3, %xmm3, %r10d
movl %r10d, %ebx
cmpq %r11, %rdi
je LBB70_10
.p2align 4, 0x90
LBB70_9: ## =>This Inner Loop Header: Depth=1
movzbl (%rsi,%r9), %ebx
leal (%rbx,%rbx,2), %edi
movzbl (%rdx,%r9), %ebx
addl %edi, %ebx
leal (%r10,%r10,2), %edi
addl %ebx, %edi
addl $8, %edi
shrl $4, %edi
movb %dil, -1(%rax,%r9,2)
leal (%rbx,%rbx,2), %edi
addl %r10d, %edi
addl $8, %edi
shrl $4, %edi
movb %dil, (%rax,%r9,2)
incq %r9
movl %ebx, %r10d
cmpq %r9, %r8
jne LBB70_9
LBB70_10:
addl $2, %ebx
shrl $2, %ebx
LBB70_11:
movslq %ecx, %rcx
movb %bl, -1(%rax,%rcx,2)
LBB70_12:
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function resample_row_generic
_resample_row_generic: ## @resample_row_generic
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, -56(%rbp) ## 8-byte Spill
testl %ecx, %ecx
jle LBB71_26
## %bb.1:
movslq %r8d, %r13
movl %ecx, %r14d
movl %r13d, %r9d
movl %r9d, %eax
andl $-32, %eax
movq %rax, -64(%rbp) ## 8-byte Spill
addq $-32, %rax
movq %rax, -88(%rbp) ## 8-byte Spill
movq %rax, %rcx
shrq $5, %rcx
incq %rcx
movq %rcx, %rax
movq %rcx, -80(%rbp) ## 8-byte Spill
andq $-2, %rcx
movq %rcx, -72(%rbp) ## 8-byte Spill
movl %r9d, %r11d
andl $-8, %r11d
movl %r9d, %r12d
andl $3, %r12d
movq -56(%rbp), %r15 ## 8-byte Reload
leaq 48(%r15), %r10
xorl %edx, %edx
pxor %xmm0, %xmm0
movq %r12, -48(%rbp) ## 8-byte Spill
jmp LBB71_2
.p2align 4, 0x90
LBB71_25: ## in Loop: Header=BB71_2 Depth=1
incq %rdx
addq %r13, %r10
addq %r13, %r15
cmpq %r14, %rdx
je LBB71_26
LBB71_2: ## =>This Loop Header: Depth=1
## Child Loop BB71_13 Depth 2
## Child Loop BB71_18 Depth 2
## Child Loop BB71_22 Depth 2
## Child Loop BB71_24 Depth 2
testl %r8d, %r8d
jle LBB71_25
## %bb.3: ## in Loop: Header=BB71_2 Depth=1
cmpl $8, %r8d
jae LBB71_5
## %bb.4: ## in Loop: Header=BB71_2 Depth=1
xorl %ebx, %ebx
jmp LBB71_20
.p2align 4, 0x90
LBB71_5: ## in Loop: Header=BB71_2 Depth=1
movq %r14, %r12
movq %rdx, %r14
imulq %r13, %r14
movq -56(%rbp), %rax ## 8-byte Reload
leaq (%rax,%r14), %rcx
leaq (%rsi,%rdx), %rdi
incq %rdi
cmpq %rdi, %rcx
jae LBB71_8
## %bb.6: ## in Loop: Header=BB71_2 Depth=1
leaq (%r14,%r9), %rcx
addq %rax, %rcx
leaq (%rsi,%rdx), %rdi
cmpq %rcx, %rdi
jae LBB71_8
## %bb.7: ## in Loop: Header=BB71_2 Depth=1
xorl %ebx, %ebx
movq %r12, %r14
movq -48(%rbp), %r12 ## 8-byte Reload
jmp LBB71_20
LBB71_8: ## in Loop: Header=BB71_2 Depth=1
cmpl $32, %r8d
jae LBB71_10
## %bb.9: ## in Loop: Header=BB71_2 Depth=1
xorl %edi, %edi
movq %r12, %r14
movq -48(%rbp), %r12 ## 8-byte Reload
jmp LBB71_18
LBB71_10: ## in Loop: Header=BB71_2 Depth=1
cmpq $0, -88(%rbp) ## 8-byte Folded Reload
je LBB71_11
## %bb.12: ## in Loop: Header=BB71_2 Depth=1
movzbl (%rsi,%rdx), %ecx
movd %ecx, %xmm1
pshufb %xmm0, %xmm1
movq -72(%rbp), %rdi ## 8-byte Reload
xorl %ebx, %ebx
.p2align 4, 0x90
LBB71_13: ## Parent Loop BB71_2 Depth=1
## => This Inner Loop Header: Depth=2
movdqu %xmm1, -48(%r10,%rbx)
movdqu %xmm1, -32(%r10,%rbx)
movdqu %xmm1, -16(%r10,%rbx)
movdqu %xmm1, (%r10,%rbx)
addq $64, %rbx
addq $-2, %rdi
jne LBB71_13
## %bb.14: ## in Loop: Header=BB71_2 Depth=1
testb $1, -80(%rbp) ## 1-byte Folded Reload
je LBB71_16
LBB71_15: ## in Loop: Header=BB71_2 Depth=1
movzbl (%rsi,%rdx), %ecx
movd %ecx, %xmm1
pshufb %xmm0, %xmm1
addq %r14, %rbx
movdqu %xmm1, (%rax,%rbx)
movdqu %xmm1, 16(%rax,%rbx)
LBB71_16: ## in Loop: Header=BB71_2 Depth=1
cmpq %r9, -64(%rbp) ## 8-byte Folded Reload
movq %r12, %r14
movq -48(%rbp), %r12 ## 8-byte Reload
je LBB71_25
## %bb.17: ## in Loop: Header=BB71_2 Depth=1
movq -64(%rbp), %rbx ## 8-byte Reload
movq %rbx, %rdi
testb $24, %r9b
je LBB71_20
.p2align 4, 0x90
LBB71_18: ## Parent Loop BB71_2 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rsi,%rdx), %ecx
movd %ecx, %xmm1
punpcklbw %xmm1, %xmm1 ## xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw $0, %xmm1, %xmm1 ## xmm1 = xmm1[0,0,0,0,4,5,6,7]
movq %xmm1, (%r15,%rdi)
addq $8, %rdi
cmpq %rdi, %r11
jne LBB71_18
## %bb.19: ## in Loop: Header=BB71_2 Depth=1
movq %r11, %rbx
cmpq %r9, %r11
je LBB71_25
.p2align 4, 0x90
LBB71_20: ## in Loop: Header=BB71_2 Depth=1
movq %rbx, %rdi
notq %rdi
addq %r9, %rdi
testq %r12, %r12
je LBB71_23
## %bb.21: ## in Loop: Header=BB71_2 Depth=1
movq %r12, %rcx
.p2align 4, 0x90
LBB71_22: ## Parent Loop BB71_2 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rsi,%rdx), %eax
movb %al, (%r15,%rbx)
incq %rbx
decq %rcx
jne LBB71_22
LBB71_23: ## in Loop: Header=BB71_2 Depth=1
cmpq $3, %rdi
jb LBB71_25
.p2align 4, 0x90
LBB71_24: ## Parent Loop BB71_2 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rsi,%rdx), %eax
movb %al, (%r15,%rbx)
movzbl (%rsi,%rdx), %eax
movb %al, 1(%r15,%rbx)
movzbl (%rsi,%rdx), %eax
movb %al, 2(%r15,%rbx)
movzbl (%rsi,%rdx), %eax
movb %al, 3(%r15,%rbx)
addq $4, %rbx
cmpq %rbx, %r9
jne LBB71_24
jmp LBB71_25
LBB71_11: ## in Loop: Header=BB71_2 Depth=1
xorl %ebx, %ebx
testb $1, -80(%rbp) ## 1-byte Folded Reload
jne LBB71_15
jmp LBB71_16
LBB71_26:
movq -56(%rbp), %rax ## 8-byte Reload
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function process_marker
_process_marker: ## @process_marker
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $88, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
## kill: def $esi killed $esi def $rsi
movq %rdi, %r13
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
leal -194(%rsi), %eax
cmpl $27, %eax
ja LBB72_3
## %bb.1:
leaq LJTI72_0(%rip), %rcx
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
LBB72_2:
leaq L_.str.16(%rip), %rax
jmp LBB72_95
LBB72_3:
cmpl $255, %esi
jne LBB72_5
## %bb.4:
leaq L_.str.15(%rip), %rax
jmp LBB72_95
LBB72_5:
cmpl $254, %esi
je LBB72_17
## %bb.6:
andl $-16, %esi
cmpl $224, %esi
jne LBB72_90
LBB72_17:
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_87
## %bb.18:
callq _fgetc
movl %eax, %r14d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r14d
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_96
## %bb.19:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %ebx
movq 16(%r13), %rdi
shll $8, %r14d
leal (%r14,%rbx), %eax
addl $-2, %eax
testq %rdi, %rdi
je LBB72_100
## %bb.20:
movslq %eax, %rsi
movl $1, %edx
callq _fseek
jmp LBB72_105
LBB72_8:
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_21
## %bb.9:
callq _fgetc
movl %eax, %r14d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r14d
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_27
## %bb.10:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %ebx
jmp LBB72_30
LBB72_11:
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_23
## %bb.12:
callq _fgetc
movl %eax, %r14d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r14d
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_58
## %bb.13:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %ebx
jmp LBB72_61
LBB72_14:
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_25
## %bb.15:
callq _fgetc
movl %eax, %r14d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r14d
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_79
## %bb.16:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %ebx
jmp LBB72_82
LBB72_21:
movq 24(%r13), %rax
movq 32(%r13), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB72_28
## %bb.22:
leaq 1(%rax), %rdx
movq %rdx, 24(%r13)
movzbl (%rax), %r14d
movq %rdx, %rax
jmp LBB72_28
LBB72_23:
movq 24(%r13), %rax
movq 32(%r13), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB72_59
## %bb.24:
leaq 1(%rax), %rdx
movq %rdx, 24(%r13)
movzbl (%rax), %r14d
movq %rdx, %rax
jmp LBB72_59
LBB72_25:
movq 24(%r13), %rax
movq 32(%r13), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB72_80
## %bb.26:
leaq 1(%rax), %rdx
movq %rdx, 24(%r13)
movzbl (%rax), %r14d
movq %rdx, %rax
jmp LBB72_80
LBB72_27:
movq 24(%r13), %rax
movq 32(%r13), %rcx
LBB72_28:
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB72_30
## %bb.29:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %ebx
LBB72_30:
shll $8, %r14d
addl %r14d, %ebx
addl $-2, %ebx
jmp LBB72_32
.p2align 4, 0x90
LBB72_31: ## in Loop: Header=BB72_32 Depth=1
movl -120(%rbp), %ebx ## 4-byte Reload
subl %r14d, %ebx
LBB72_32: ## =>This Loop Header: Depth=1
## Child Loop BB72_42 Depth 2
## Child Loop BB72_54 Depth 2
testl %ebx, %ebx
jle LBB72_57
## %bb.33: ## in Loop: Header=BB72_32 Depth=1
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_35
## %bb.34: ## in Loop: Header=BB72_32 Depth=1
callq _fgetc
movl %eax, %ecx
cmpl $-1, %eax
movl $0, %eax
cmovel %eax, %ecx
jmp LBB72_37
.p2align 4, 0x90
LBB72_35: ## in Loop: Header=BB72_32 Depth=1
movq 24(%r13), %rax
xorl %ecx, %ecx
cmpq 32(%r13), %rax
jae LBB72_37
## %bb.36: ## in Loop: Header=BB72_32 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %ecx
.p2align 4, 0x90
LBB72_37: ## in Loop: Header=BB72_32 Depth=1
cmpl $31, %ecx
jg LBB72_89
## %bb.38: ## in Loop: Header=BB72_32 Depth=1
movl %ecx, %r15d
andl $15, %r15d
cmpl $3, %r15d
ja LBB72_89
## %bb.39: ## in Loop: Header=BB72_32 Depth=1
movl %ecx, -124(%rbp) ## 4-byte Spill
xorl %r12d, %r12d
xorl %r14d, %r14d
jmp LBB72_42
.p2align 4, 0x90
LBB72_40: ## in Loop: Header=BB72_42 Depth=2
callq _fgetc
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
LBB72_41: ## in Loop: Header=BB72_42 Depth=2
movl %eax, -112(%rbp,%r12,4)
addl %eax, %r14d
incq %r12
cmpq $16, %r12
je LBB72_45
LBB72_42: ## Parent Loop BB72_32 Depth=1
## => This Inner Loop Header: Depth=2
movq 16(%r13), %rdi
testq %rdi, %rdi
jne LBB72_40
## %bb.43: ## in Loop: Header=BB72_42 Depth=2
movq 24(%r13), %rcx
xorl %eax, %eax
cmpq 32(%r13), %rcx
jae LBB72_41
## %bb.44: ## in Loop: Header=BB72_42 Depth=2
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %eax
jmp LBB72_41
.p2align 4, 0x90
LBB72_45: ## in Loop: Header=BB72_32 Depth=1
movl %ebx, -120(%rbp) ## 4-byte Spill
movl %r15d, %eax
cmpl $15, -124(%rbp) ## 4-byte Folded Reload
ja LBB72_48
## %bb.46: ## in Loop: Header=BB72_32 Depth=1
imulq $1680, %rax, %rbx ## imm = 0x690
leaq (%rbx,%r13), %rdi
addq $40, %rdi
leaq -112(%rbp), %rsi
callq _build_huffman
testl %eax, %eax
je LBB72_90
## %bb.47: ## in Loop: Header=BB72_32 Depth=1
leaq (%rbx,%r13), %r15
addq $1064, %r15 ## imm = 0x428
jmp LBB72_50
.p2align 4, 0x90
LBB72_48: ## in Loop: Header=BB72_32 Depth=1
imulq $1680, %rax, %rbx ## imm = 0x690
leaq (%rbx,%r13), %rdi
addq $6760, %rdi ## imm = 0x1A68
leaq -112(%rbp), %rsi
callq _build_huffman
testl %eax, %eax
je LBB72_90
## %bb.49: ## in Loop: Header=BB72_32 Depth=1
leaq (%rbx,%r13), %r15
addq $7784, %r15 ## imm = 0x1E68
LBB72_50: ## in Loop: Header=BB72_32 Depth=1
addl $-17, -120(%rbp) ## 4-byte Folded Spill
testl %r14d, %r14d
jle LBB72_31
## %bb.51: ## in Loop: Header=BB72_32 Depth=1
movl %r14d, %r12d
xorl %ebx, %ebx
jmp LBB72_54
.p2align 4, 0x90
LBB72_52: ## in Loop: Header=BB72_54 Depth=2
callq _fgetc
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
LBB72_53: ## in Loop: Header=BB72_54 Depth=2
movb %al, (%r15,%rbx)
incq %rbx
cmpq %rbx, %r12
je LBB72_31
LBB72_54: ## Parent Loop BB72_32 Depth=1
## => This Inner Loop Header: Depth=2
movq 16(%r13), %rdi
testq %rdi, %rdi
jne LBB72_52
## %bb.55: ## in Loop: Header=BB72_54 Depth=2
movq 24(%r13), %rcx
xorl %eax, %eax
cmpq 32(%r13), %rcx
jae LBB72_53
## %bb.56: ## in Loop: Header=BB72_54 Depth=2
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %eax
jmp LBB72_53
LBB72_57:
testl %ebx, %ebx
sete %al
jmp LBB72_106
LBB72_58:
movq 24(%r13), %rax
movq 32(%r13), %rcx
LBB72_59:
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB72_61
## %bb.60:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %ebx
LBB72_61:
shll $8, %r14d
addl %r14d, %ebx
addl $-2, %ebx
testl %ebx, %ebx
jle LBB72_77
## %bb.62:
xorl %r15d, %r15d
leaq _dezigzag(%rip), %r12
jmp LBB72_64
.p2align 4, 0x90
LBB72_63: ## in Loop: Header=BB72_64 Depth=1
movq -120(%rbp), %rcx ## 8-byte Reload
leal -65(%rcx), %eax
cmpl $65, %ecx
movl %eax, %ebx
jle LBB72_78
LBB72_64: ## =>This Loop Header: Depth=1
## Child Loop BB72_74 Depth 2
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_67
## %bb.65: ## in Loop: Header=BB72_64 Depth=1
callq _fgetc
cmpl $-1, %eax
jne LBB72_69
## %bb.66: ## in Loop: Header=BB72_64 Depth=1
xorl %eax, %eax
jmp LBB72_71
.p2align 4, 0x90
LBB72_67: ## in Loop: Header=BB72_64 Depth=1
movq 24(%r13), %rcx
xorl %eax, %eax
cmpq 32(%r13), %rcx
jae LBB72_71
## %bb.68: ## in Loop: Header=BB72_64 Depth=1
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %eax
LBB72_69: ## in Loop: Header=BB72_64 Depth=1
cmpl $16, %eax
jae LBB72_93
## %bb.70: ## in Loop: Header=BB72_64 Depth=1
andl $15, %eax
cmpl $3, %eax
ja LBB72_94
LBB72_71: ## in Loop: Header=BB72_64 Depth=1
movq %rbx, -120(%rbp) ## 8-byte Spill
movl %eax, %ebx
xorl %r14d, %r14d
jmp LBB72_74
.p2align 4, 0x90
LBB72_72: ## in Loop: Header=BB72_74 Depth=2
callq _fgetc
cmpl $-1, %eax
cmovel %r15d, %eax
LBB72_73: ## in Loop: Header=BB72_74 Depth=2
movzbl (%r14,%r12), %ecx
movq %rbx, %rdx
shlq $6, %rdx
addq %r13, %rdx
movb %al, 13480(%rcx,%rdx)
incq %r14
cmpq $64, %r14
je LBB72_63
LBB72_74: ## Parent Loop BB72_64 Depth=1
## => This Inner Loop Header: Depth=2
movq 16(%r13), %rdi
testq %rdi, %rdi
jne LBB72_72
## %bb.75: ## in Loop: Header=BB72_74 Depth=2
movq 24(%r13), %rcx
xorl %eax, %eax
cmpq 32(%r13), %rcx
jae LBB72_73
## %bb.76: ## in Loop: Header=BB72_74 Depth=2
leaq 1(%rcx), %rax
movq %rax, 24(%r13)
movzbl (%rcx), %eax
jmp LBB72_73
LBB72_77:
movl %ebx, %eax
LBB72_78:
testl %eax, %eax
sete %al
jmp LBB72_106
LBB72_79:
movq 24(%r13), %rax
movq 32(%r13), %rcx
LBB72_80:
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB72_82
## %bb.81:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %ebx
LBB72_82:
shll $8, %r14d
addl %ebx, %r14d
cmpl $4, %r14d
jne LBB72_86
## %bb.83:
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_91
## %bb.84:
callq _fgetc
movl %eax, %r14d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r14d
movq 16(%r13), %rdi
testq %rdi, %rdi
je LBB72_101
## %bb.85:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %ebx
jmp LBB72_104
LBB72_86:
leaq L_.str.17(%rip), %rax
jmp LBB72_95
LBB72_87:
movq 24(%r13), %rax
movq 32(%r13), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB72_97
## %bb.88:
leaq 1(%rax), %rdx
movq %rdx, 24(%r13)
movzbl (%rax), %r14d
movq %rdx, %rax
jmp LBB72_97
LBB72_89:
leaq L_.str.20(%rip), %rax
movq %rax, _failure_reason(%rip)
LBB72_90:
xorl %eax, %eax
jmp LBB72_106
LBB72_91:
movq 24(%r13), %rax
movq 32(%r13), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB72_102
## %bb.92:
leaq 1(%rax), %rdx
movq %rdx, 24(%r13)
movzbl (%rax), %r14d
movq %rdx, %rax
jmp LBB72_102
LBB72_93:
leaq L_.str.18(%rip), %rax
jmp LBB72_95
LBB72_94:
leaq L_.str.19(%rip), %rax
LBB72_95:
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
jmp LBB72_106
LBB72_96:
movq 24(%r13), %rax
movq 32(%r13), %rcx
LBB72_97:
xorl %edx, %edx
cmpq %rcx, %rax
jae LBB72_99
## %bb.98:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %edx
LBB72_99:
shll $8, %r14d
leal (%r14,%rdx), %eax
addl $-2, %eax
LBB72_100:
cltq
addq %rax, 24(%r13)
jmp LBB72_105
LBB72_101:
movq 24(%r13), %rax
movq 32(%r13), %rcx
LBB72_102:
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB72_104
## %bb.103:
leaq 1(%rax), %rcx
movq %rcx, 24(%r13)
movzbl (%rax), %ebx
LBB72_104:
shll $8, %r14d
addl %ebx, %r14d
movl %r14d, 14084(%r13)
LBB72_105:
movb $1, %al
LBB72_106:
movq ___stack_chk_guard@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq -48(%rbp), %rcx
jne LBB72_108
## %bb.107:
movzbl %al, %eax
addq $88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB72_108:
callq ___stack_chk_fail
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L72_0_set_2, LBB72_2-LJTI72_0
.set L72_0_set_5, LBB72_5-LJTI72_0
.set L72_0_set_8, LBB72_8-LJTI72_0
.set L72_0_set_11, LBB72_11-LJTI72_0
.set L72_0_set_14, LBB72_14-LJTI72_0
LJTI72_0:
.long L72_0_set_2
.long L72_0_set_5
.long L72_0_set_8
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_5
.long L72_0_set_11
.long L72_0_set_5
.long L72_0_set_14
.end_data_region
## -- End function
.p2align 4, 0x90 ## -- Begin function get8
_get8: ## @get8
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq 16(%rdi), %rax
testq %rax, %rax
je LBB73_2
## %bb.1:
movq %rax, %rdi
callq _fgetc
movl %eax, %ecx
xorl %eax, %eax
cmpl $-1, %ecx
cmovnel %ecx, %eax
popq %rbp
retq
LBB73_2:
movq 24(%rdi), %rcx
xorl %eax, %eax
cmpq 32(%rdi), %rcx
jae LBB73_4
## %bb.3:
leaq 1(%rcx), %rax
movq %rax, 24(%rdi)
movzbl (%rcx), %eax
LBB73_4:
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function decode_block
_decode_block: ## @decode_block
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %r8d, %r12d
movq %rcx, -48(%rbp) ## 8-byte Spill
movq %rsi, %r15
movq %rdi, %r13
movq %rdx, %rsi
callq _decode
testl %eax, %eax
js LBB74_18
## %bb.1:
xorps %xmm0, %xmm0
movups %xmm0, 112(%r15)
movups %xmm0, 96(%r15)
movups %xmm0, 80(%r15)
movups %xmm0, 64(%r15)
movups %xmm0, 48(%r15)
movups %xmm0, 32(%r15)
movups %xmm0, 16(%r15)
movups %xmm0, (%r15)
je LBB74_5
## %bb.2:
movl %eax, %ebx
movl 14052(%r13), %eax
cmpl %ebx, %eax
jge LBB74_4
## %bb.3:
movq %r13, %rdi
callq _grow_buffer_unsafe
movl 14052(%r13), %eax
LBB74_4:
movl 14048(%r13), %edx
subl %ebx, %eax
movl %eax, %ecx
shrl %cl, %edx
movl %ebx, %ecx
leaq _bmask(%rip), %rsi
andl (%rsi,%rcx,4), %edx
leal -1(%rbx), %ecx
movl %edx, %esi
## kill: def $cl killed $cl killed $ecx
shrl %cl, %esi
movl $-1, %edi
movl %ebx, %ecx
shll %cl, %edi
movl %eax, 14052(%r13)
incl %edi
xorl %eax, %eax
testl %esi, %esi
cmovel %edi, %eax
addl %edx, %eax
jmp LBB74_6
LBB74_5:
xorl %eax, %eax
LBB74_6:
movslq %r12d, %rcx
leaq (%rcx,%rcx,8), %rcx
addl 13784(%r13,%rcx,8), %eax
movl %eax, 13784(%r13,%rcx,8)
movw %ax, (%r15)
movl $1, %r14d
jmp LBB74_9
.p2align 4, 0x90
LBB74_7: ## in Loop: Header=BB74_9 Depth=1
xorl %edx, %edx
LBB74_8: ## in Loop: Header=BB74_9 Depth=1
addl %eax, %edx
movslq %ebx, %rax
incl %ebx
leaq _dezigzag(%rip), %rcx
movzbl (%rax,%rcx), %eax
movw %dx, (%r15,%rax,2)
movl %ebx, %r14d
cmpl $64, %r14d
jge LBB74_20
LBB74_9: ## =>This Inner Loop Header: Depth=1
movq %r13, %rdi
movq -48(%rbp), %rsi ## 8-byte Reload
callq _decode
testl %eax, %eax
js LBB74_18
## %bb.10: ## in Loop: Header=BB74_9 Depth=1
movl %eax, %ebx
movl %eax, %r12d
andl $15, %r12d
je LBB74_15
## %bb.11: ## in Loop: Header=BB74_9 Depth=1
shrl $4, %ebx
movl 14052(%r13), %ecx
cmpl %r12d, %ecx
jge LBB74_13
## %bb.12: ## in Loop: Header=BB74_9 Depth=1
movq %r13, %rdi
callq _grow_buffer_unsafe
movl 14052(%r13), %ecx
LBB74_13: ## in Loop: Header=BB74_9 Depth=1
addl %r14d, %ebx
movl 14048(%r13), %eax
subl %r12d, %ecx
shrl %cl, %eax
movl %r12d, %edx
leaq _bmask(%rip), %rsi
andl (%rsi,%rdx,4), %eax
movl %ecx, 14052(%r13)
leal -1(%r12), %ecx
movl %eax, %esi
## kill: def $cl killed $cl killed $ecx
shrl %cl, %esi
movl $-1, %edx
movl %r12d, %ecx
shll %cl, %edx
testl %esi, %esi
jne LBB74_7
## %bb.14: ## in Loop: Header=BB74_9 Depth=1
orl $1, %edx
jmp LBB74_8
.p2align 4, 0x90
LBB74_15: ## in Loop: Header=BB74_9 Depth=1
cmpl $240, %ebx
jne LBB74_20
## %bb.16: ## in Loop: Header=BB74_9 Depth=1
addl $16, %r14d
cmpl $64, %r14d
jl LBB74_9
LBB74_20:
movl $1, %eax
jmp LBB74_19
LBB74_18:
leaq L_.str.12(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
LBB74_19:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function idct_block
_idct_block: ## @idct_block
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $296, %rsp ## imm = 0x128
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rcx, -320(%rbp) ## 8-byte Spill
movl %esi, -324(%rbp) ## 4-byte Spill
movq %rdi, -312(%rbp) ## 8-byte Spill
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -48(%rbp)
xorl %r14d, %r14d
movq %rdx, -336(%rbp) ## 8-byte Spill
.p2align 4, 0x90
LBB75_1: ## =>This Inner Loop Header: Depth=1
movswl 16(%rdx,%r14,2), %r8d
movzwl 32(%rdx,%r14,2), %eax
movl %r8d, %ebx
orw %ax, %bx
jne LBB75_8
## %bb.2: ## in Loop: Header=BB75_1 Depth=1
cmpw $0, 48(%rdx,%r14,2)
jne LBB75_7
## %bb.3: ## in Loop: Header=BB75_1 Depth=1
cmpw $0, 64(%rdx,%r14,2)
jne LBB75_7
## %bb.4: ## in Loop: Header=BB75_1 Depth=1
cmpw $0, 80(%rdx,%r14,2)
jne LBB75_7
## %bb.5: ## in Loop: Header=BB75_1 Depth=1
cmpw $0, 96(%rdx,%r14,2)
jne LBB75_7
## %bb.6: ## in Loop: Header=BB75_1 Depth=1
cmpw $0, 112(%rdx,%r14,2)
je LBB75_14
.p2align 4, 0x90
LBB75_7: ## in Loop: Header=BB75_1 Depth=1
xorl %eax, %eax
LBB75_8: ## in Loop: Header=BB75_1 Depth=1
cwtl
movq -320(%rbp), %rcx ## 8-byte Reload
movzbl 16(%rcx,%r14), %r10d
imull %eax, %r10d
movswl 96(%rdx,%r14,2), %r9d
movzbl 48(%rcx,%r14), %eax
imull %r9d, %eax
leal (%rax,%r10), %ebx
imull $2217, %ebx, %ebx ## imm = 0x8A9
imull $-7567, %eax, %r9d ## imm = 0xE271
addl %ebx, %r9d
imull $3135, %r10d, %r10d ## imm = 0xC3F
addl %ebx, %r10d
movswl (%rdx,%r14,2), %eax
movzbl (%rcx,%r14), %r11d
imull %eax, %r11d
movswl 64(%rdx,%r14,2), %ebx
movzbl 32(%rcx,%r14), %eax
imull %ebx, %eax
leal (%rax,%r11), %ebx
shll $12, %ebx
subl %eax, %r11d
shll $12, %r11d
leal (%rbx,%r10), %r15d
addl $512, %r15d ## imm = 0x200
## kill: def $ebx killed $ebx killed $rbx def $rbx
subl %r10d, %ebx
leal (%r11,%r9), %r10d
addl $512, %r10d ## imm = 0x200
## kill: def $r11d killed $r11d killed $r11 def $r11
subl %r9d, %r11d
movswl 112(%rdx,%r14,2), %r9d
movzbl 56(%rcx,%r14), %r12d
imull %r9d, %r12d
movswl 80(%rdx,%r14,2), %r9d
movzbl 40(%rcx,%r14), %eax
imull %r9d, %eax
movswl 48(%rdx,%r14,2), %r9d
movzbl 24(%rcx,%r14), %r13d
imull %r9d, %r13d
movzbl 8(%rcx,%r14), %r9d
imull %r8d, %r9d
leal (%r12,%r13), %r8d
leal (%r9,%rax), %esi
leal (%r9,%r12), %edi
leal (%rax,%r13), %edx
leal (%rsi,%r8), %ecx
imull $4816, %ecx, %ecx ## imm = 0x12D0
imull $-3685, %edi, %edi ## imm = 0xF19B
addl %ecx, %edi
imull $-10497, %edx, %edx ## imm = 0xD6FF
addl %ecx, %edx
imull $8410, %eax, %eax ## imm = 0x20DA
imull $6149, %r9d, %ecx ## imm = 0x1805
imull $-1597, %esi, %esi ## imm = 0xF9C3
addl %esi, %ecx
addl %esi, %eax
imull $12586, %r13d, %esi ## imm = 0x312A
imull $-8034, %r8d, %r8d ## imm = 0xE09E
addl %r8d, %esi
addl %edx, %esi
addl %edx, %eax
imull $1223, %r12d, %edx ## imm = 0x4C7
addl %r8d, %edx
addl %edi, %ecx
addl %edi, %edx
leal (%rcx,%r15), %edi
sarl $10, %edi
movl %edi, -304(%rbp,%r14,4)
subl %ecx, %r15d
sarl $10, %r15d
movl %r15d, -80(%rbp,%r14,4)
leal (%rsi,%r10), %ecx
sarl $10, %ecx
movl %ecx, -272(%rbp,%r14,4)
subl %esi, %r10d
sarl $10, %r10d
movl %r10d, -112(%rbp,%r14,4)
leal (%r11,%rax), %ecx
addl $512, %ecx ## imm = 0x200
sarl $10, %ecx
movl %ecx, -240(%rbp,%r14,4)
leal 512(%r11), %ecx
subl %eax, %ecx
sarl $10, %ecx
movl %ecx, -144(%rbp,%r14,4)
leal (%rbx,%rdx), %eax
addl $512, %eax ## imm = 0x200
sarl $10, %eax
movl %eax, -208(%rbp,%r14,4)
leal 512(%rbx), %eax
subl %edx, %eax
movq -336(%rbp), %rdx ## 8-byte Reload
sarl $10, %eax
movl %eax, -176(%rbp,%r14,4)
LBB75_9: ## in Loop: Header=BB75_1 Depth=1
incq %r14
cmpl $8, %r14d
jne LBB75_1
jmp LBB75_10
LBB75_14: ## in Loop: Header=BB75_1 Depth=1
movswl (%rdx,%r14,2), %eax
movq -320(%rbp), %rcx ## 8-byte Reload
movzbl (%rcx,%r14), %ebx
imull %eax, %ebx
shll $2, %ebx
movl %ebx, -80(%rbp,%r14,4)
movl %ebx, -112(%rbp,%r14,4)
movl %ebx, -144(%rbp,%r14,4)
movl %ebx, -176(%rbp,%r14,4)
movl %ebx, -208(%rbp,%r14,4)
movl %ebx, -240(%rbp,%r14,4)
movl %ebx, -272(%rbp,%r14,4)
movl %ebx, -304(%rbp,%r14,4)
jmp LBB75_9
LBB75_10:
movslq -324(%rbp), %rax ## 4-byte Folded Reload
movq %rax, -320(%rbp) ## 8-byte Spill
movq -312(%rbp), %rax ## 8-byte Reload
addq $7, %rax
xorl %r14d, %r14d
.p2align 4, 0x90
LBB75_11: ## =>This Inner Loop Header: Depth=1
movq %rax, -312(%rbp) ## 8-byte Spill
movl -296(%rbp,%r14), %eax
movl -280(%rbp,%r14), %edx
leal (%rdx,%rax), %esi
imull $2217, %esi, %esi ## imm = 0x8A9
imull $-7567, %edx, %edx ## imm = 0xE271
addl %esi, %edx
imull $3135, %eax, %eax ## imm = 0xC3F
addl %esi, %eax
movl -288(%rbp,%r14), %esi
movl -304(%rbp,%r14), %r10d
movl -300(%rbp,%r14), %r11d
leal (%rsi,%r10), %r9d
shll $12, %r9d
subl %esi, %r10d
shll $12, %r10d
leal (%r9,%rax), %esi
addl $65536, %esi ## imm = 0x10000
## kill: def $r9d killed $r9d killed $r9 def $r9
subl %eax, %r9d
leal (%r10,%rdx), %r15d
addl $65536, %r15d ## imm = 0x10000
## kill: def $r10d killed $r10d killed $r10 def $r10
subl %edx, %r10d
movl -276(%rbp,%r14), %edi
movl -284(%rbp,%r14), %edx
movl -292(%rbp,%r14), %ebx
leal (%rbx,%rdi), %ecx
leal (%r11,%rdx), %eax
leal (%r11,%rdi), %r12d
leal (%rbx,%rdx), %r13d
leal (%rax,%rcx), %r8d
imull $4816, %r8d, %r8d ## imm = 0x12D0
imull $-3685, %r12d, %r12d ## imm = 0xF19B
addl %r8d, %r12d
imull $-10497, %r13d, %r13d ## imm = 0xD6FF
addl %r8d, %r13d
imull $8410, %edx, %r8d ## imm = 0x20DA
imull $6149, %r11d, %edx ## imm = 0x1805
imull $-1597, %eax, %eax ## imm = 0xF9C3
addl %eax, %edx
addl %eax, %r8d
imull $12586, %ebx, %ebx ## imm = 0x312A
imull $-8034, %ecx, %eax ## imm = 0xE09E
addl %eax, %ebx
addl %r13d, %ebx
addl %r13d, %r8d
imull $1223, %edi, %r11d ## imm = 0x4C7
addl %r12d, %edx
addl %eax, %r11d
addl %r12d, %r11d
leal (%rdx,%rsi), %eax
movl %eax, %ecx
sarl $17, %ecx
cmpl $-16777216, %eax ## imm = 0xFF000000
movsbl %cl, %edi
setge %al
negb %al
cmpl %ecx, %edi
leal -128(%rdi), %ecx
movzbl %al, %eax
movzbl %cl, %ecx
cmovnel %eax, %ecx
movq -312(%rbp), %rax ## 8-byte Reload
movb %cl, -7(%rax)
subl %edx, %esi
movl %esi, %eax
sarl $17, %eax
movsbl %al, %ecx
cmpl $-16777216, %esi ## imm = 0xFF000000
leal -128(%rcx), %esi
setge %dl
negb %dl
cmpl %eax, %ecx
movzbl %dl, %eax
movzbl %sil, %esi
cmovnel %eax, %esi
leal (%rbx,%r15), %eax
movl %eax, %edx
sarl $17, %edx
cmpl $-16777216, %eax ## imm = 0xFF000000
movsbl %dl, %eax
setge %cl
negb %cl
cmpl %edx, %eax
leal -128(%rax), %eax
movzbl %cl, %ecx
movzbl %al, %eax
cmovnel %ecx, %eax
movq -312(%rbp), %rcx ## 8-byte Reload
movb %sil, (%rcx)
movq -312(%rbp), %rcx ## 8-byte Reload
movb %al, -6(%rcx)
subl %ebx, %r15d
movl %r15d, %eax
sarl $17, %eax
cmpl $-16777216, %r15d ## imm = 0xFF000000
movsbl %al, %ecx
setge %dl
negb %dl
cmpl %eax, %ecx
leal -128(%rcx), %eax
movzbl %dl, %ecx
movzbl %al, %eax
cmovnel %ecx, %eax
movq -312(%rbp), %rcx ## 8-byte Reload
movb %al, -1(%rcx)
leal (%r10,%r8), %eax
addl $65536, %eax ## imm = 0x10000
movl %eax, %ecx
sarl $17, %ecx
cmpl $-16777216, %eax ## imm = 0xFF000000
movsbl %cl, %eax
setge %dl
negb %dl
cmpl %ecx, %eax
leal -128(%rax), %eax
movzbl %dl, %ecx
movzbl %al, %eax
cmovnel %ecx, %eax
leal 65536(%r10), %ecx
subl %r8d, %ecx
movl %ecx, %edx
sarl $17, %edx
cmpl $-16777216, %ecx ## imm = 0xFF000000
movsbl %dl, %ecx
setge %bl
negb %bl
cmpl %edx, %ecx
leal -128(%rcx), %ecx
movzbl %bl, %edx
movzbl %cl, %ecx
cmovnel %edx, %ecx
movq -312(%rbp), %rdx ## 8-byte Reload
movb %al, -5(%rdx)
movq -312(%rbp), %rax ## 8-byte Reload
movb %cl, -2(%rax)
leal (%r9,%r11), %eax
addl $65536, %eax ## imm = 0x10000
movl %eax, %ecx
sarl $17, %ecx
cmpl $-16777216, %eax ## imm = 0xFF000000
movsbl %cl, %eax
setge %dl
negb %dl
cmpl %ecx, %eax
leal -128(%rax), %eax
movzbl %dl, %ecx
movzbl %al, %eax
cmovnel %ecx, %eax
movq -312(%rbp), %rcx ## 8-byte Reload
movb %al, -4(%rcx)
leal 65536(%r9), %eax
subl %r11d, %eax
movl %eax, %ecx
sarl $17, %ecx
cmpl $-16777216, %eax ## imm = 0xFF000000
movsbl %cl, %eax
setge %dl
negb %dl
cmpl %ecx, %eax
leal -128(%rax), %eax
movzbl %dl, %ecx
movzbl %al, %eax
cmovnel %ecx, %eax
movq -312(%rbp), %rcx ## 8-byte Reload
movb %al, -3(%rcx)
movq -312(%rbp), %rax ## 8-byte Reload
addq $32, %r14
addq -320(%rbp), %rax ## 8-byte Folded Reload
cmpl $256, %r14d ## imm = 0x100
jne LBB75_11
## %bb.12:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -48(%rbp), %rax
jne LBB75_15
## %bb.13:
addq $296, %rsp ## imm = 0x128
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB75_15:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function grow_buffer_unsafe
_grow_buffer_unsafe: ## @grow_buffer_unsafe
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
jmp LBB76_1
LBB76_13: ## in Loop: Header=BB76_1 Depth=1
movl $255, %r14d
testl %eax, %eax
jne LBB76_14
.p2align 4, 0x90
LBB76_4: ## in Loop: Header=BB76_1 Depth=1
movl 14048(%rbx), %eax
movl 14052(%rbx), %ecx
shll $8, %eax
orl %r14d, %eax
movl %eax, 14048(%rbx)
leal 8(%rcx), %eax
movl %eax, 14052(%rbx)
cmpl $17, %ecx
jge LBB76_15
LBB76_1: ## =>This Inner Loop Header: Depth=1
xorl %r14d, %r14d
cmpl $0, 14060(%rbx)
jne LBB76_4
## %bb.2: ## in Loop: Header=BB76_1 Depth=1
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB76_5
## %bb.3: ## in Loop: Header=BB76_1 Depth=1
callq _fgetc
cmpl $-1, %eax
jne LBB76_7
jmp LBB76_4
LBB76_5: ## in Loop: Header=BB76_1 Depth=1
movq 24(%rbx), %rax
cmpq 32(%rbx), %rax
jae LBB76_4
## %bb.6: ## in Loop: Header=BB76_1 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %eax
LBB76_7: ## in Loop: Header=BB76_1 Depth=1
movl %eax, %r14d
cmpl $255, %eax
jne LBB76_4
## %bb.8: ## in Loop: Header=BB76_1 Depth=1
movq 16(%rbx), %rdi
testq %rdi, %rdi
je LBB76_11
## %bb.9: ## in Loop: Header=BB76_1 Depth=1
callq _fgetc
cmpl $-1, %eax
jne LBB76_13
## %bb.10: ## in Loop: Header=BB76_1 Depth=1
movl $255, %r14d
jmp LBB76_4
LBB76_11: ## in Loop: Header=BB76_1 Depth=1
movq 24(%rbx), %rax
movl $255, %r14d
cmpq 32(%rbx), %rax
jae LBB76_4
## %bb.12: ## in Loop: Header=BB76_1 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%rbx)
movzbl (%rax), %eax
jmp LBB76_13
LBB76_14:
movb %al, 14056(%rbx)
movl $1, 14060(%rbx)
LBB76_15:
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function decode
_decode: ## @decode
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rsi, %rbx
movq %rdi, %r14
movl 14052(%rdi), %r9d
cmpl $15, %r9d
jg LBB77_2
## %bb.1:
movq %r14, %rdi
callq _grow_buffer_unsafe
movl 14052(%r14), %r9d
LBB77_2:
movl 14048(%r14), %r8d
leal -9(%r9), %ecx
movq %r8, %rax
## kill: def $cl killed $cl killed $ecx
shrq %cl, %rax
andl $511, %eax ## imm = 0x1FF
movzbl (%rbx,%rax), %ecx
cmpq $255, %rcx
je LBB77_5
## %bb.3:
movzbl 1280(%rbx,%rcx), %edx
movl $-1, %eax
subl %edx, %r9d
jl LBB77_13
## %bb.4:
movl %r9d, 14052(%r14)
movzbl 1024(%rbx,%rcx), %eax
jmp LBB77_13
LBB77_5:
movb $16, %cl
subb %r9b, %cl
movl %r8d, %eax
shll %cl, %eax
leal -16(%r9), %ecx
movl %r8d, %edx
shrl %cl, %edx
cmpl $16, %r9d
cmovll %eax, %edx
movzwl %dx, %eax
movl $9, %edx
movl $395, %edi ## imm = 0x18B
.p2align 4, 0x90
LBB77_6: ## =>This Inner Loop Header: Depth=1
movq %rdi, %rsi
incq %rdx
incq %rdi
cmpl (%rbx,%rsi,4), %eax
jae LBB77_6
## %bb.7:
cmpl $403, %edi ## imm = 0x193
jne LBB77_9
## %bb.8:
movl %ecx, 14052(%r14)
movl $-1, %eax
jmp LBB77_13
LBB77_9:
movl $-1, %eax
cmpl %edx, %r9d
jl LBB77_13
## %bb.10:
movl %r9d, %eax
subq %rdx, %rax
movl %r8d, %esi
movl %eax, %ecx
shrl %cl, %esi
movl %edx, %ecx
leaq _bmask(%rip), %rdi
andl (%rdi,%rcx,4), %esi
addl 1612(%rbx,%rcx,4), %esi
movslq %esi, %rdx
movzbl 1280(%rbx,%rdx), %esi
subl %esi, %r9d
movl %r9d, %ecx
shrl %cl, %r8d
andl (%rdi,%rsi,4), %r8d
movzwl 512(%rbx,%rdx,2), %ecx
cmpl %ecx, %r8d
jne LBB77_11
## %bb.12:
movl %eax, 14052(%r14)
movzbl 1024(%rbx,%rdx), %eax
LBB77_13:
popq %rbx
popq %r14
popq %rbp
retq
LBB77_11:
callq _decode.cold.1
xorl %eax, %eax
jmp LBB77_13
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function build_huffman
_build_huffman: ## @build_huffman
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r15
leaq 1280(%rdi), %r8
xorl %ecx, %ecx
xorl %edx, %edx
jmp LBB78_1
.p2align 4, 0x90
LBB78_5: ## in Loop: Header=BB78_1 Depth=1
incq %rcx
cmpq $16, %rcx
je LBB78_6
LBB78_1: ## =>This Loop Header: Depth=1
## Child Loop BB78_3 Depth 2
cmpl $0, (%rsi,%rcx,4)
jle LBB78_5
## %bb.2: ## in Loop: Header=BB78_1 Depth=1
leal 1(%rcx), %edi
movslq %edx, %rbx
addq %r8, %rbx
xorl %eax, %eax
.p2align 4, 0x90
LBB78_3: ## Parent Loop BB78_1 Depth=1
## => This Inner Loop Header: Depth=2
movb %dil, (%rbx,%rax)
incq %rax
cmpl (%rsi,%rcx,4), %eax
jl LBB78_3
## %bb.4: ## in Loop: Header=BB78_1 Depth=1
addl %eax, %edx
jmp LBB78_5
LBB78_6:
movslq %edx, %rax
movb $0, 1280(%r15,%rax)
leaq 512(%r15), %r8
leaq 1281(%r15), %r12
xorl %esi, %esi
movl $1, %eax
xorl %ebx, %ebx
jmp LBB78_7
.p2align 4, 0x90
LBB78_12: ## in Loop: Header=BB78_7 Depth=1
movb $16, %cl
subb %al, %cl
movl %esi, %edx
shll %cl, %edx
movl %edx, 1540(%r15,%rax,4)
addl %esi, %esi
incq %rax
cmpq $17, %rax
je LBB78_13
LBB78_7: ## =>This Loop Header: Depth=1
## Child Loop BB78_9 Depth 2
movl %ebx, %ecx
subl %esi, %ecx
movl %ecx, 1612(%r15,%rax,4)
movslq %ebx, %rcx
movzbl 1280(%r15,%rcx), %edi
cmpq %rdi, %rax
jne LBB78_12
## %bb.8: ## in Loop: Header=BB78_7 Depth=1
leaq (%r8,%rcx,2), %rdi
addq %r12, %rcx
.p2align 4, 0x90
LBB78_9: ## Parent Loop BB78_7 Depth=1
## => This Inner Loop Header: Depth=2
## kill: def $esi killed $esi killed $rsi def $rsi
movw %si, (%rdi)
incl %esi
movzbl (%rcx), %edx
addq $2, %rdi
incl %ebx
incq %rcx
cmpq %rdx, %rax
je LBB78_9
## %bb.10: ## in Loop: Header=BB78_7 Depth=1
movl $1, %edx
movl %eax, %ecx
shll %cl, %edx
leal -1(%rsi), %ecx
cmpl %edx, %ecx
jl LBB78_12
## %bb.11:
leaq L_.str.21(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %r14d, %r14d
jmp LBB78_25
LBB78_13:
movl $-1, 1608(%r15)
movl $512, %edx ## imm = 0x200
movq %r15, %rdi
movl $255, %esi
callq _memset
movl $1, %r14d
testl %ebx, %ebx
jle LBB78_25
## %bb.14:
movl %ebx, %r13d
cmpl $1, %ebx
movq %r13, -48(%rbp) ## 8-byte Spill
jne LBB78_16
## %bb.15:
xorl %ebx, %ebx
LBB78_22:
testb $1, -48(%rbp) ## 1-byte Folded Reload
je LBB78_25
## %bb.23:
movzbl 1280(%r15,%rbx), %eax
cmpq $9, %rax
ja LBB78_25
## %bb.24:
movb $9, %cl
subb %al, %cl
movl $1, %r14d
movl $1, %edx
shll %cl, %edx
movzwl 512(%r15,%rbx,2), %edi
shlq %cl, %rdi
addq %r15, %rdi
movl %ebx, %esi
callq _memset
LBB78_25:
movl %r14d, %eax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB78_16:
## kill: def $r13d killed $r13d killed $r13 def $r13
andl $-2, %r13d
xorl %ebx, %ebx
jmp LBB78_17
.p2align 4, 0x90
LBB78_21: ## in Loop: Header=BB78_17 Depth=1
addq $2, %rbx
addq $2, %r12
cmpq %rbx, %r13
je LBB78_22
LBB78_17: ## =>This Inner Loop Header: Depth=1
movzbl -1(%r12), %eax
cmpq $9, %rax
ja LBB78_19
## %bb.18: ## in Loop: Header=BB78_17 Depth=1
movb $9, %cl
subb %al, %cl
movl $1, %edx
shll %cl, %edx
movzwl 512(%r15,%rbx,2), %edi
shlq %cl, %rdi
addq %r15, %rdi
movl %ebx, %esi
callq _memset
LBB78_19: ## in Loop: Header=BB78_17 Depth=1
movzbl (%r12), %eax
cmpq $9, %rax
ja LBB78_21
## %bb.20: ## in Loop: Header=BB78_17 Depth=1
movb $9, %cl
subb %al, %cl
movl $1, %edx
shll %cl, %edx
movzwl 514(%r15,%rbx,2), %edi
shlq %cl, %rdi
leal 1(%rbx), %esi
addq %r15, %rdi
callq _memset
jmp LBB78_21
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function process_frame_header
LCPI79_0:
.long 1 ## 0x1
.long 1 ## 0x1
.long 1 ## 0x1
.long 1 ## 0x1
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_process_frame_header: ## @process_frame_header
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $56, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r14
movq 16(%rdi), %rdi
testq %rdi, %rdi
je LBB79_3
## %bb.1:
callq _fgetc
movl %eax, %r13d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r13d
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB79_5
## %bb.2:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %ebx
jmp LBB79_8
LBB79_3:
movq 24(%r14), %rax
movq 32(%r14), %rcx
xorl %r13d, %r13d
cmpq %rcx, %rax
jae LBB79_6
## %bb.4:
leaq 1(%rax), %rdx
movq %rdx, 24(%r14)
movzbl (%rax), %r13d
movq %rdx, %rax
jmp LBB79_6
LBB79_5:
movq 24(%r14), %rax
movq 32(%r14), %rcx
LBB79_6:
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB79_8
## %bb.7:
leaq 1(%rax), %rcx
movq %rcx, 24(%r14)
movzbl (%rax), %ebx
LBB79_8:
shll $8, %r13d
addl %ebx, %r13d
leaq L_.str.24(%rip), %r12
cmpl $11, %r13d
jge LBB79_10
## %bb.9:
movq %r12, %r15
jmp LBB79_92
LBB79_10:
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB79_16
## %bb.11:
callq _fgetc
cmpl $-1, %eax
je LBB79_19
## %bb.12:
cmpl $8, %eax
jne LBB79_19
jmp LBB79_13
LBB79_16:
movq 24(%r14), %rax
cmpq 32(%r14), %rax
jae LBB79_19
## %bb.17:
leaq 1(%rax), %rcx
movq %rcx, 24(%r14)
movzbl (%rax), %eax
cmpl $8, %eax
jne LBB79_19
LBB79_13:
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB79_21
## %bb.14:
callq _fgetc
movl %eax, %r15d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r15d
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB79_23
## %bb.15:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %ebx
jmp LBB79_26
LBB79_19:
leaq L_.str.25(%rip), %r15
LBB79_92:
movq %r15, _failure_reason(%rip)
xorl %ebx, %ebx
LBB79_93:
movl %ebx, %eax
addq $56, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB79_21:
movq 24(%r14), %rax
movq 32(%r14), %rcx
xorl %r15d, %r15d
cmpq %rcx, %rax
jae LBB79_24
## %bb.22:
leaq 1(%rax), %rdx
movq %rdx, 24(%r14)
movzbl (%rax), %r15d
movq %rdx, %rax
jmp LBB79_24
LBB79_23:
movq 24(%r14), %rax
movq 32(%r14), %rcx
LBB79_24:
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB79_26
## %bb.25:
leaq 1(%rax), %rcx
movq %rcx, 24(%r14)
movzbl (%rax), %ebx
LBB79_26:
shll $8, %r15d
addl %ebx, %r15d
movl %r15d, 4(%r14)
je LBB79_30
## %bb.27:
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB79_31
## %bb.28:
callq _fgetc
movl %eax, %r15d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r15d
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB79_33
## %bb.29:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %ebx
jmp LBB79_36
LBB79_30:
leaq L_.str.26(%rip), %r15
jmp LBB79_92
LBB79_31:
movq 24(%r14), %rax
movq 32(%r14), %rcx
xorl %r15d, %r15d
cmpq %rcx, %rax
jae LBB79_34
## %bb.32:
leaq 1(%rax), %rdx
movq %rdx, 24(%r14)
movzbl (%rax), %r15d
movq %rdx, %rax
jmp LBB79_34
LBB79_33:
movq 24(%r14), %rax
movq 32(%r14), %rcx
LBB79_34:
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB79_36
## %bb.35:
leaq 1(%rax), %rcx
movq %rcx, 24(%r14)
movzbl (%rax), %ebx
LBB79_36:
shll $8, %r15d
addl %ebx, %r15d
movl %r15d, (%r14)
je LBB79_39
## %bb.37:
movq 16(%r14), %rdi
leaq L_.str.28(%rip), %r15
testq %rdi, %rdi
je LBB79_40
## %bb.38:
callq _fgetc
xorl %ecx, %ecx
cmpl $-1, %eax
cmovnel %eax, %ecx
jmp LBB79_42
LBB79_39:
leaq L_.str.27(%rip), %r15
jmp LBB79_92
LBB79_40:
movq 24(%r14), %rax
cmpq 32(%r14), %rax
jae LBB79_92
## %bb.41:
leaq 1(%rax), %rcx
movq %rcx, 24(%r14)
movzbl (%rax), %ecx
LBB79_42:
movl %ecx, %eax
andl $-3, %eax
cmpl $1, %eax
jne LBB79_92
## %bb.43:
movl %ecx, 8(%r14)
movl %ecx, %eax
cmpl $1, %ecx
jne LBB79_45
## %bb.44:
xorl %edx, %edx
jmp LBB79_47
LBB79_45:
movl %eax, %esi
andl $-2, %esi
leaq 13896(%r14), %rdi
xorl %edx, %edx
.p2align 4, 0x90
LBB79_46: ## =>This Inner Loop Header: Depth=1
movq $0, -88(%rdi)
movq $0, -72(%rdi)
movq $0, -16(%rdi)
movq $0, (%rdi)
addq $2, %rdx
addq $144, %rdi
cmpq %rdx, %rsi
jne LBB79_46
LBB79_47:
testb $1, %al
je LBB79_49
## %bb.48:
leaq (%rdx,%rdx,8), %rax
movq $0, 13808(%r14,%rax,8)
movq $0, 13824(%r14,%rax,8)
LBB79_49:
leal (%rcx,%rcx,2), %eax
addl $8, %eax
movq %r12, %r15
cmpl %eax, %r13d
jne LBB79_92
## %bb.50:
testl %ecx, %ecx
jle LBB79_73
## %bb.51:
leaq 13772(%r14), %rbx
leaq L_.str.30(%rip), %r15
xorl %r13d, %r13d
jmp LBB79_54
LBB79_52: ## in Loop: Header=BB79_54 Depth=1
movl $0, (%rbx)
LBB79_53: ## in Loop: Header=BB79_54 Depth=1
movslq 8(%r14), %rcx
addq $72, %rbx
movq %r12, %r13
cmpq %rcx, %r12
jge LBB79_73
LBB79_54: ## =>This Inner Loop Header: Depth=1
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB79_56
## %bb.55: ## in Loop: Header=BB79_54 Depth=1
callq _fgetc
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
jmp LBB79_58
LBB79_56: ## in Loop: Header=BB79_54 Depth=1
movq 24(%r14), %rcx
xorl %eax, %eax
cmpq 32(%r14), %rcx
jae LBB79_58
## %bb.57: ## in Loop: Header=BB79_54 Depth=1
leaq 1(%rcx), %rax
movq %rax, 24(%r14)
movzbl (%rcx), %eax
LBB79_58: ## in Loop: Header=BB79_54 Depth=1
movl %eax, -12(%rbx)
leaq 1(%r13), %r12
movl %eax, %eax
cmpq %rax, %r12
je LBB79_60
## %bb.59: ## in Loop: Header=BB79_54 Depth=1
cmpq %rax, %r13
jne LBB79_78
LBB79_60: ## in Loop: Header=BB79_54 Depth=1
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB79_62
## %bb.61: ## in Loop: Header=BB79_54 Depth=1
callq _fgetc
cmpl $-1, %eax
movl $0, %ecx
cmovel %ecx, %eax
jmp LBB79_64
LBB79_62: ## in Loop: Header=BB79_54 Depth=1
movq 24(%r14), %rax
cmpq 32(%r14), %rax
jae LBB79_91
## %bb.63: ## in Loop: Header=BB79_54 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%r14)
movzbl (%rax), %eax
LBB79_64: ## in Loop: Header=BB79_54 Depth=1
movl %eax, %ecx
sarl $4, %ecx
movl %ecx, -8(%rbx)
cmpl $16, %eax
jb LBB79_92
## %bb.65: ## in Loop: Header=BB79_54 Depth=1
cmpl $79, %eax
jg LBB79_92
## %bb.66: ## in Loop: Header=BB79_54 Depth=1
andl $15, %eax
movl %eax, -4(%rbx)
addl $-5, %eax
cmpl $-4, %eax
jb LBB79_79
## %bb.67: ## in Loop: Header=BB79_54 Depth=1
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB79_69
## %bb.68: ## in Loop: Header=BB79_54 Depth=1
callq _fgetc
cmpl $-1, %eax
jne LBB79_71
jmp LBB79_52
LBB79_69: ## in Loop: Header=BB79_54 Depth=1
movq 24(%r14), %rax
cmpq 32(%r14), %rax
jae LBB79_52
## %bb.70: ## in Loop: Header=BB79_54 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%r14)
movzbl (%rax), %eax
LBB79_71: ## in Loop: Header=BB79_54 Depth=1
movl %eax, (%rbx)
cmpl $3, %eax
jle LBB79_53
## %bb.72:
leaq L_.str.32(%rip), %r15
jmp LBB79_92
LBB79_73:
movl (%r14), %esi
movl 4(%r14), %edi
movl $1073741824, %eax ## imm = 0x40000000
xorl %edx, %edx
movq %rsi, -72(%rbp) ## 8-byte Spill
divl %esi
xorl %edx, %edx
divl %ecx
movq %rdi, -64(%rbp) ## 8-byte Spill
cmpl %edi, %eax
jae LBB79_75
## %bb.74:
leaq L_.str.33(%rip), %r15
jmp LBB79_92
LBB79_75:
movl $1, %ebx
movl %ecx, %eax
movq %rax, -48(%rbp) ## 8-byte Spill
movl $1, %r9d
movl $1, %r11d
testl %ecx, %ecx
jle LBB79_85
## %bb.76:
cmpl $8, %ecx
jae LBB79_80
## %bb.77:
movl $1, %r11d
xorl %eax, %eax
movl $1, %r9d
jmp LBB79_83
LBB79_78:
leaq L_.str.29(%rip), %r15
jmp LBB79_92
LBB79_79:
leaq L_.str.31(%rip), %r15
jmp LBB79_92
LBB79_80:
movq -48(%rbp), %rdx ## 8-byte Reload
movl %edx, %eax
andl $-8, %eax
leaq (,%rdx,8), %rdx
andq $-64, %rdx
leaq (%rdx,%rdx,8), %rdx
movdqa LCPI79_0(%rip), %xmm0 ## xmm0 = [1,1,1,1]
xorl %esi, %esi
movdqa %xmm0, %xmm1
movdqa %xmm0, %xmm2
movdqa %xmm0, %xmm3
LBB79_81: ## =>This Inner Loop Header: Depth=1
movd 13764(%r14,%rsi), %xmm4 ## xmm4 = mem[0],zero,zero,zero
pinsrd $1, 13836(%r14,%rsi), %xmm4
pinsrd $2, 13908(%r14,%rsi), %xmm4
pinsrd $3, 13980(%r14,%rsi), %xmm4
movd 14052(%r14,%rsi), %xmm5 ## xmm5 = mem[0],zero,zero,zero
pinsrd $1, 14124(%r14,%rsi), %xmm5
pinsrd $2, 14196(%r14,%rsi), %xmm5
pinsrd $3, 14268(%r14,%rsi), %xmm5
pmaxsd %xmm4, %xmm2
movd 13768(%r14,%rsi), %xmm4 ## xmm4 = mem[0],zero,zero,zero
pinsrd $1, 13840(%r14,%rsi), %xmm4
pinsrd $2, 13912(%r14,%rsi), %xmm4
pinsrd $3, 13984(%r14,%rsi), %xmm4
pmaxsd %xmm5, %xmm3
movd 14056(%r14,%rsi), %xmm5 ## xmm5 = mem[0],zero,zero,zero
pinsrd $1, 14128(%r14,%rsi), %xmm5
pinsrd $2, 14200(%r14,%rsi), %xmm5
pinsrd $3, 14272(%r14,%rsi), %xmm5
pmaxsd %xmm4, %xmm0
pmaxsd %xmm5, %xmm1
addq $576, %rsi ## imm = 0x240
cmpq %rsi, %rdx
jne LBB79_81
## %bb.82:
pmaxsd %xmm3, %xmm2
pshufd $238, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,2,3]
pmaxsd %xmm2, %xmm3
pshufd $85, %xmm3, %xmm2 ## xmm2 = xmm3[1,1,1,1]
pmaxsd %xmm3, %xmm2
movd %xmm2, %r9d
pmaxsd %xmm1, %xmm0
pshufd $238, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,2,3]
pmaxsd %xmm0, %xmm1
pshufd $85, %xmm1, %xmm0 ## xmm0 = xmm1[1,1,1,1]
pmaxsd %xmm1, %xmm0
movd %xmm0, %r11d
cmpq -48(%rbp), %rax ## 8-byte Folded Reload
je LBB79_85
LBB79_83:
leaq (%rax,%rax,8), %rdx
leaq (%r14,%rdx,8), %rdx
addq $13768, %rdx ## imm = 0x35C8
movq -48(%rbp), %rsi ## 8-byte Reload
subq %rax, %rsi
LBB79_84: ## =>This Inner Loop Header: Depth=1
movl -4(%rdx), %eax
movl (%rdx), %edi
cmpl %r9d, %eax
cmovgl %eax, %r9d
cmpl %r11d, %edi
cmovgl %edi, %r11d
addq $72, %rdx
decq %rsi
jne LBB79_84
LBB79_85:
leal (,%r9,8), %r8d
leal (,%r11,8), %esi
movq -72(%rbp), %rax ## 8-byte Reload
leal (%rax,%r9,8), %eax
decl %eax
xorl %edx, %edx
divl %r8d
movl %eax, %r10d
movq -64(%rbp), %rax ## 8-byte Reload
leal (%rax,%r11,8), %eax
decl %eax
xorl %edx, %edx
divl %esi
movl %r9d, 13736(%r14)
movl %r11d, 13740(%r14)
movl %r8d, 13752(%r14)
movl %esi, 13756(%r14)
movl %r10d, 13744(%r14)
movl %eax, 13748(%r14)
testl %ecx, %ecx
jle LBB79_93
## %bb.86:
movl %eax, %edi
leal -1(%r9), %eax
movl %eax, -56(%rbp) ## 4-byte Spill
leal -1(%r11), %eax
movl %eax, -52(%rbp) ## 4-byte Spill
shll $3, %r10d
shll $3, %edi
addq $13744, %r14 ## imm = 0x35B0
negq -48(%rbp) ## 8-byte Folded Spill
movl $1, %r12d
movq %r9, -88(%rbp) ## 8-byte Spill
movq %r11, -80(%rbp) ## 8-byte Spill
LBB79_87: ## =>This Inner Loop Header: Depth=1
movl 20(%r14), %esi
movl 24(%r14), %ecx
movl %esi, %eax
imull -72(%rbp), %eax ## 4-byte Folded Reload
addl -56(%rbp), %eax ## 4-byte Folded Reload
xorl %edx, %edx
divl %r9d
movl %eax, 44(%r14)
movl %ecx, %eax
imull -64(%rbp), %eax ## 4-byte Folded Reload
addl -52(%rbp), %eax ## 4-byte Folded Reload
xorl %edx, %edx
divl %r11d
movl %eax, 48(%r14)
movl %r10d, %r13d
imull %r10d, %esi
movl %esi, 52(%r14)
movl %edi, %r15d
imull %edi, %ecx
movl %ecx, 56(%r14)
imull %esi, %ecx
orl $15, %ecx
movslq %ecx, %rdi
callq _malloc
movq %rax, 72(%r14)
testq %rax, %rax
je LBB79_89
## %bb.88: ## in Loop: Header=BB79_87 Depth=1
addq $15, %rax
andq $-16, %rax
movq %rax, 64(%r14)
movq $0, 80(%r14)
addq $72, %r14
movq -48(%rbp), %rax ## 8-byte Reload
addq %r12, %rax
incq %rax
incq %r12
cmpq $1, %rax
movl %r13d, %r10d
movq -88(%rbp), %r9 ## 8-byte Reload
movl %r15d, %edi
movq -80(%rbp), %r11 ## 8-byte Reload
jne LBB79_87
jmp LBB79_93
LBB79_89:
leaq L_.str.5(%rip), %r15
cmpl $1, %r12d
je LBB79_92
LBB79_90: ## =>This Inner Loop Header: Depth=1
movq (%r14), %rdi
callq _free
movq $0, -8(%r14)
decq %r12
addq $-72, %r14
cmpq $1, %r12
jg LBB79_90
jmp LBB79_92
LBB79_91:
movl $0, -8(%rbx)
jmp LBB79_92
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function zbuild_huffman
LCPI80_0:
.long 64 ## 0x40
.long 128 ## 0x80
.long 256 ## 0x100
.long 512 ## 0x200
LCPI80_1:
.long 4 ## 0x4
.long 8 ## 0x8
.long 16 ## 0x10
.long 32 ## 0x20
LCPI80_2:
.long 4096 ## 0x1000
.long 8192 ## 0x2000
.long 16384 ## 0x4000
.long 32768 ## 0x8000
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_zbuild_huffman: ## @zbuild_huffman
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $160, %rsp
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r15d
movq %rsi, %r14
movq %rdi, %r12
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -40(%rbp)
pxor %xmm0, %xmm0
movdqa %xmm0, -144(%rbp)
movdqa %xmm0, -160(%rbp)
movdqa %xmm0, -176(%rbp)
movdqa %xmm0, -192(%rbp)
movl $0, -128(%rbp)
movl $1024, %edx ## imm = 0x400
movl $255, %esi
callq _memset
testl %r15d, %r15d
jle LBB80_1
## %bb.2:
movl %r15d, %edx
leaq -1(%rdx), %rcx
movl %edx, %eax
andl $3, %eax
cmpq $3, %rcx
jae LBB80_4
## %bb.3:
xorl %ecx, %ecx
jmp LBB80_6
LBB80_1:
movl $0, -192(%rbp)
xorl %r8d, %r8d
jmp LBB80_10
LBB80_4:
andl $-4, %edx
xorl %ecx, %ecx
.p2align 4, 0x90
LBB80_5: ## =>This Inner Loop Header: Depth=1
movzbl (%r14,%rcx), %esi
incl -192(%rbp,%rsi,4)
movzbl 1(%r14,%rcx), %esi
incl -192(%rbp,%rsi,4)
movzbl 2(%r14,%rcx), %esi
incl -192(%rbp,%rsi,4)
movzbl 3(%r14,%rcx), %esi
incl -192(%rbp,%rsi,4)
addq $4, %rcx
cmpq %rcx, %rdx
jne LBB80_5
LBB80_6:
testq %rax, %rax
je LBB80_9
## %bb.7:
addq %r14, %rcx
xorl %edx, %edx
.p2align 4, 0x90
LBB80_8: ## =>This Inner Loop Header: Depth=1
movzbl (%rcx,%rdx), %esi
incl -192(%rbp,%rsi,4)
incq %rdx
cmpq %rdx, %rax
jne LBB80_8
LBB80_9:
movl -188(%rbp), %r8d
movl $0, -192(%rbp)
cmpl $2, %r8d
jg LBB80_23
LBB80_10:
movdqu -184(%rbp), %xmm0
movdqu -168(%rbp), %xmm1
pcmpgtd LCPI80_0(%rip), %xmm1
pcmpgtd LCPI80_1(%rip), %xmm0
packssdw %xmm1, %xmm0
pmovmskb %xmm0, %eax
testl %eax, %eax
jne LBB80_23
## %bb.11:
cmpl $1024, -152(%rbp) ## imm = 0x400
jg LBB80_23
## %bb.12:
cmpl $2048, -148(%rbp) ## imm = 0x800
jg LBB80_23
## %bb.13:
movdqa -144(%rbp), %xmm0
pcmpgtd LCPI80_2(%rip), %xmm0
movmskps %xmm0, %eax
testb $1, %al
jne LBB80_23
## %bb.14:
movl %eax, %ecx
andb $2, %cl
shrb %cl
jne LBB80_23
## %bb.15:
movl %eax, %ecx
andb $4, %cl
shrb $2, %cl
jne LBB80_23
## %bb.16:
shrb $3, %al
jne LBB80_23
## %bb.17:
movl $0, -108(%rbp)
movw $0, 1026(%r12)
movw $0, 1126(%r12)
cmpl $2, %r8d
jg LBB80_22
## %bb.18:
xorl %esi, %esi
movl $15, %eax
xorl %edi, %edi
movl %r8d, %edx
.p2align 4, 0x90
LBB80_19: ## =>This Inner Loop Header: Depth=1
movl %r8d, %ebx
movl %eax, %ecx
shll %cl, %ebx
movl %ebx, 1060(%r12,%rsi,4)
cmpq $14, %rsi
je LBB80_25
## %bb.20: ## in Loop: Header=BB80_19 Depth=1
addl %edx, %edi
leal (%r8,%r8), %ecx
movl -184(%rbp,%rsi,4), %edx
movl %ecx, -104(%rbp,%rsi,4)
movw %cx, 1028(%r12,%rsi,2)
movw %di, 1128(%r12,%rsi,2)
leal 2(%rsi), %ecx
movl $1, %ebx
## kill: def $cl killed $cl killed $ecx
shll %cl, %ebx
leal (%rdx,%r8,2), %r8d
decl %eax
incq %rsi
testl %edx, %edx
je LBB80_19
## %bb.21: ## in Loop: Header=BB80_19 Depth=1
cmpl %ebx, %r8d
jle LBB80_19
LBB80_22:
leaq L_.str.43(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %eax, %eax
jmp LBB80_35
LBB80_25:
movl $65536, 1120(%r12) ## imm = 0x10000
testl %r15d, %r15d
jle LBB80_34
## %bb.26:
movl %r15d, %r8d
xorl %esi, %esi
jmp LBB80_27
.p2align 4, 0x90
LBB80_32: ## in Loop: Header=BB80_27 Depth=1
leal 1(%r9), %ecx
movl %ecx, -112(%rbp,%rax,4)
LBB80_33: ## in Loop: Header=BB80_27 Depth=1
incq %rsi
cmpq %r8, %rsi
je LBB80_34
LBB80_27: ## =>This Loop Header: Depth=1
## Child Loop BB80_31 Depth 2
movzbl (%r14,%rsi), %eax
testl %eax, %eax
je LBB80_33
## %bb.28: ## in Loop: Header=BB80_27 Depth=1
movslq -112(%rbp,%rax,4), %r9
movzwl 1024(%r12,%rax,2), %ecx
movq %r9, %rdi
subq %rcx, %rdi
movzwl 1124(%r12,%rax,2), %edx
addq %rdi, %rdx
movb %al, 1156(%r12,%rdx)
movw %si, 1444(%r12,%rdx,2)
cmpb $9, %al
ja LBB80_32
## %bb.29: ## in Loop: Header=BB80_27 Depth=1
movl %r9d, %ecx
rolw $8, %cx
movl %ecx, %edi
andl $3855, %edi ## imm = 0xF0F
shll $4, %edi
shrl $4, %ecx
andl $3855, %ecx ## imm = 0xF0F
orl %edi, %ecx
movl %ecx, %edi
andl $13107, %edi ## imm = 0x3333
shrl $2, %ecx
andl $13107, %ecx ## imm = 0x3333
leal (%rcx,%rdi,4), %ecx
movl %ecx, %edi
andl $21845, %edi ## imm = 0x5555
shrl %ecx
andl $21845, %ecx ## imm = 0x5555
leal (%rcx,%rdi,2), %ebx
movb $16, %cl
subb %al, %cl
shrl %cl, %ebx
cmpl $511, %ebx ## imm = 0x1FF
ja LBB80_32
## %bb.30: ## in Loop: Header=BB80_27 Depth=1
movl $1, %edi
movl %eax, %ecx
shll %cl, %edi
movl %ebx, %ecx
movslq %edi, %rbx
.p2align 4, 0x90
LBB80_31: ## Parent Loop BB80_27 Depth=1
## => This Inner Loop Header: Depth=2
movw %dx, (%r12,%rcx,2)
addq %rbx, %rcx
cmpq $512, %rcx ## imm = 0x200
jl LBB80_31
jmp LBB80_32
LBB80_34:
movl $1, %eax
LBB80_35:
movq ___stack_chk_guard@GOTPCREL(%rip), %rcx
movq (%rcx), %rcx
cmpq -40(%rbp), %rcx
jne LBB80_37
LBB80_36:
addq $160, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
LBB80_23:
callq _zbuild_huffman.cold.1
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -40(%rbp), %rax
jne LBB80_37
## %bb.24:
xorl %eax, %eax
jmp LBB80_36
LBB80_37:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function zhuffman_decode
_zhuffman_decode: ## @zhuffman_decode
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl 16(%rdi), %r8d
cmpl $16, %r8d
jge LBB81_1
## %bb.2:
movl 20(%rdi), %r9d
movl %r8d, %ecx
jmp LBB81_3
.p2align 4, 0x90
LBB81_8: ## in Loop: Header=BB81_3 Depth=1
shll %cl, %eax
orl %eax, %r9d
movl %r9d, 20(%rdi)
leal 8(%rcx), %r8d
movl %r8d, 16(%rdi)
cmpl $17, %ecx
movl %r8d, %ecx
jge LBB81_9
LBB81_3: ## =>This Inner Loop Header: Depth=1
movl %r9d, %eax
shrl %cl, %eax
testl %eax, %eax
jne LBB81_4
## %bb.6: ## in Loop: Header=BB81_3 Depth=1
movq (%rdi), %rdx
xorl %eax, %eax
cmpq 8(%rdi), %rdx
jae LBB81_8
## %bb.7: ## in Loop: Header=BB81_3 Depth=1
leaq 1(%rdx), %rax
movq %rax, (%rdi)
movzbl (%rdx), %eax
jmp LBB81_8
LBB81_1:
movl 20(%rdi), %r9d
LBB81_9:
movl %r9d, %eax
andl $511, %eax ## imm = 0x1FF
movzwl (%rsi,%rax,2), %eax
cmpq $65535, %rax ## imm = 0xFFFF
je LBB81_13
## %bb.10:
movzbl 1156(%rsi,%rax), %r10d
jmp LBB81_11
LBB81_13:
movl %r9d, %eax
rolw $8, %ax
movl %eax, %ecx
andl $3855, %ecx ## imm = 0xF0F
shll $4, %ecx
shrl $4, %eax
andl $3855, %eax ## imm = 0xF0F
orl %ecx, %eax
movl %eax, %ecx
andl $13107, %ecx ## imm = 0x3333
shrl $2, %eax
andl $13107, %eax ## imm = 0x3333
leal (%rax,%rcx,4), %eax
movl %eax, %ecx
andl $21845, %ecx ## imm = 0x5555
shrl %eax
andl $21845, %eax ## imm = 0x5555
leal (%rax,%rcx,2), %r11d
xorl %edx, %edx
.p2align 4, 0x90
LBB81_14: ## =>This Inner Loop Header: Depth=1
movq %rdx, %rax
incq %rdx
cmpl %r11d, 1096(%rsi,%rax,4)
jle LBB81_14
## %bb.15:
movl $-1, %eax
cmpl $7, %edx
je LBB81_12
## %bb.16:
leaq 9(%rdx), %r10
movl %r10d, %eax
movb $7, %cl
subb %dl, %cl
shrl %cl, %r11d
movzwl 1024(%rsi,%rax,2), %ecx
subl %ecx, %r11d
movzwl 1124(%rsi,%rax,2), %ecx
movslq %r11d, %rax
addq %rcx, %rax
movzbl 1156(%rsi,%rax), %ecx
subl %ecx, %edx
cmpl $-9, %edx
jne LBB81_17
LBB81_11:
movl %r10d, %ecx
shrl %cl, %r9d
movl %r9d, 20(%rdi)
subl %r10d, %r8d
movl %r8d, 16(%rdi)
movzwl 1444(%rsi,%rax,2), %eax
LBB81_12:
popq %rbp
retq
LBB81_4:
callq _zhuffman_decode.cold.2
LBB81_5:
xorl %eax, %eax
popq %rbp
retq
LBB81_17:
callq _zhuffman_decode.cold.1
jmp LBB81_5
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function convert_format
LCPI82_0:
.byte 1 ## 0x1
.byte 5 ## 0x5
.byte 9 ## 0x9
.byte 13 ## 0xd
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
LCPI82_1:
.byte 2 ## 0x2
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 6 ## 0x6
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 10 ## 0xa
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 14 ## 0xe
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
LCPI82_2:
.byte 1 ## 0x1
.byte 128 ## 0x80
.byte 0 ## 0x0
.byte 128 ## 0x80
.byte 5 ## 0x5
.byte 128 ## 0x80
.byte 4 ## 0x4
.byte 128 ## 0x80
.byte 9 ## 0x9
.byte 128 ## 0x80
.byte 8 ## 0x8
.byte 128 ## 0x80
.byte 13 ## 0xd
.byte 128 ## 0x80
.byte 12 ## 0xc
.byte 128 ## 0x80
LCPI82_3:
.short 150 ## 0x96
.short 77 ## 0x4d
.short 150 ## 0x96
.short 77 ## 0x4d
.short 150 ## 0x96
.short 77 ## 0x4d
.short 150 ## 0x96
.short 77 ## 0x4d
LCPI82_4:
.short 29 ## 0x1d
.space 2
.short 29 ## 0x1d
.space 2
.short 29 ## 0x1d
.space 2
.short 29 ## 0x1d
.space 2
LCPI82_5:
.byte 3 ## 0x3
.byte 7 ## 0x7
.byte 11 ## 0xb
.byte 15 ## 0xf
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
LCPI82_6:
.short 29 ## 0x1d
.short 0 ## 0x0
.short 29 ## 0x1d
.short 0 ## 0x0
.short 29 ## 0x1d
.short 0 ## 0x0
.short 29 ## 0x1d
.short 0 ## 0x0
LCPI82_7:
.byte 1 ## 0x1
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 5 ## 0x5
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 9 ## 0x9
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 13 ## 0xd
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
LCPI82_8:
.byte 2 ## 0x2
.byte 5 ## 0x5
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
LCPI82_9:
.byte 12 ## 0xc
.byte 15 ## 0xf
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
LCPI82_10:
.byte 0 ## 0x0
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 3 ## 0x3
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 6 ## 0x6
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 9 ## 0x9
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
LCPI82_11:
.byte 2 ## 0x2
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 5 ## 0x5
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 8 ## 0x8
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 11 ## 0xb
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
LCPI82_12:
.byte 0 ## 0x0
.byte 128 ## 0x80
.byte 3 ## 0x3
.byte 128 ## 0x80
.byte 6 ## 0x6
.byte 128 ## 0x80
.byte 9 ## 0x9
.byte 128 ## 0x80
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
LCPI82_13:
.byte 1 ## 0x1
.byte 128 ## 0x80
.byte 0 ## 0x0
.byte 128 ## 0x80
.byte 4 ## 0x4
.byte 128 ## 0x80
.byte 3 ## 0x3
.byte 128 ## 0x80
.byte 7 ## 0x7
.byte 128 ## 0x80
.byte 6 ## 0x6
.byte 128 ## 0x80
.byte 10 ## 0xa
.byte 128 ## 0x80
.byte 9 ## 0x9
.byte 128 ## 0x80
LCPI82_14:
.byte 0 ## 0x0
.byte 3 ## 0x3
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
LCPI82_15:
.byte 10 ## 0xa
.byte 13 ## 0xd
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
LCPI82_16:
.byte 6 ## 0x6
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 9 ## 0x9
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 12 ## 0xc
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 15 ## 0xf
.byte 128 ## 0x80
.byte 128 ## 0x80
.byte 128 ## 0x80
LCPI82_17:
.byte 0 ## 0x0
.byte 128 ## 0x80
.byte 3 ## 0x3
.byte 128 ## 0x80
.byte 6 ## 0x6
.byte 128 ## 0x80
.byte 9 ## 0x9
.byte 128 ## 0x80
.space 1
.byte 128 ## 0x80
.space 1
.byte 128 ## 0x80
.space 1
.byte 128 ## 0x80
.space 1
.byte 128 ## 0x80
LCPI82_18:
.byte 5 ## 0x5
.byte 128 ## 0x80
.byte 4 ## 0x4
.byte 128 ## 0x80
.byte 8 ## 0x8
.byte 128 ## 0x80
.byte 7 ## 0x7
.byte 128 ## 0x80
.byte 11 ## 0xb
.byte 128 ## 0x80
.byte 10 ## 0xa
.byte 128 ## 0x80
.byte 14 ## 0xe
.byte 128 ## 0x80
.byte 13 ## 0xd
.byte 128 ## 0x80
LCPI82_19:
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
.short 255 ## 0xff
LCPI82_20:
.byte 0 ## 0x0
.byte 2 ## 0x2
.byte 4 ## 0x4
.byte 6 ## 0x6
.byte 8 ## 0x8
.byte 10 ## 0xa
.byte 12 ## 0xc
.byte 14 ## 0xe
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.space 1
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_convert_format: ## @convert_format
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $264, %rsp ## imm = 0x108
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
## kill: def $ecx killed $ecx def $rcx
movq %rcx, -56(%rbp) ## 8-byte Spill
## kill: def $esi killed $esi def $rsi
movq %rdi, %r13
movq %rsi, %rax
movq %rsi, -104(%rbp) ## 8-byte Spill
cmpl %esi, %edx
je LBB82_120
## %bb.1:
movl %edx, %ebx
leal -5(%rbx), %eax
cmpl $-5, %eax
jbe LBB82_124
## %bb.2:
movl %r8d, %r12d
movq -56(%rbp), %rax ## 8-byte Reload
## kill: def $eax killed $eax killed $rax
imull %ebx, %eax
movl %eax, -112(%rbp) ## 4-byte Spill
movl %eax, %edi
imull %r8d, %edi
callq _malloc
testq %rax, %rax
je LBB82_122
## %bb.3:
movq %rax, %rdi
testl %r12d, %r12d
jle LBB82_119
## %bb.4:
movq -104(%rbp), %rax ## 8-byte Reload
leal (%rbx,%rax,8), %esi
cmpl $35, %esi
ja LBB82_123
## %bb.5:
movabsq $60500352000, %rax ## imm = 0xE161A1C00
btq %rsi, %rax
jae LBB82_123
## %bb.6:
movq -56(%rbp), %rdx ## 8-byte Reload
leal -1(%rdx), %r14d
movl %r14d, %r9d
leaq 1(%r9), %rcx
movq %rcx, %r10
andq $-16, %r10
leaq -16(%r10), %rax
movq %rax, -256(%rbp) ## 8-byte Spill
movq %rax, %r11
shrq $4, %r11
incq %r11
movabsq $8589934576, %r15 ## imm = 0x1FFFFFFF0
orq $8, %r15
andq %rcx, %r15
movl %r14d, %eax
subl %r15d, %eax
movl %eax, -60(%rbp) ## 4-byte Spill
movl %ecx, %eax
andl $15, %eax
testq %rax, %rax
movl $16, %edx
cmovneq %rax, %rdx
movq %rcx, %r8
movq %rdx, %rax
movq %rdx, -120(%rbp) ## 8-byte Spill
subq %rdx, %r8
movl %r14d, %eax
subl %r8d, %eax
movl %eax, -76(%rbp) ## 4-byte Spill
movq %rcx, %rax
movq %rcx, -72(%rbp) ## 8-byte Spill
andl $7, %eax
testq %rax, %rax
movl $8, %ecx
cmovneq %rax, %rcx
movl %r12d, %eax
movq %rax, -272(%rbp) ## 8-byte Spill
negq %rcx
movq %r9, -160(%rbp) ## 8-byte Spill
addq %r9, %rcx
incq %rcx
movl %r14d, %eax
subl %ecx, %eax
movl %eax, -84(%rbp) ## 4-byte Spill
movl %r14d, -44(%rbp) ## 4-byte Spill
subl %r10d, %r14d
movl %r14d, -80(%rbp) ## 4-byte Spill
movq %r11, %rax
movq %r11, -248(%rbp) ## 8-byte Spill
andq $-2, %r11
movq %r11, -216(%rbp) ## 8-byte Spill
movq -56(%rbp), %rdx ## 8-byte Reload
movl %edx, %eax
imull -104(%rbp), %eax ## 4-byte Folded Reload
movl %eax, -108(%rbp) ## 4-byte Spill
movl %edx, %r14d
andl $3, %r14d
pcmpeqd %xmm12, %xmm12
movdqa LCPI82_20(%rip), %xmm8 ## xmm8 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
movdqa LCPI82_19(%rip), %xmm11 ## xmm11 = [255,255,255,255,255,255,255,255]
movdqa LCPI82_8(%rip), %xmm3 ## xmm3 = <2,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
movdqa LCPI82_10(%rip), %xmm5 ## xmm5 = [0,128,128,128,3,128,128,128,6,128,128,128,9,128,128,128]
movdqa LCPI82_11(%rip), %xmm4 ## xmm4 = [2,128,128,128,5,128,128,128,8,128,128,128,11,128,128,128]
movdqa LCPI82_12(%rip), %xmm9 ## xmm9 = <0,128,3,128,6,128,9,128,u,u,u,u,u,u,u,u>
movdqa LCPI82_3(%rip), %xmm14 ## xmm14 = [150,77,150,77,150,77,150,77]
movdqa LCPI82_13(%rip), %xmm15 ## xmm15 = [1,128,0,128,4,128,3,128,7,128,6,128,10,128,9,128]
movdqa LCPI82_4(%rip), %xmm10 ## xmm10 = <29,u,29,u,29,u,29,u>
movdqa LCPI82_0(%rip), %xmm13 ## xmm13 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
leaq (%r15,%r15), %rax
movq %rax, -144(%rbp) ## 8-byte Spill
leaq (,%r15,4), %rax
movq %rax, -240(%rbp) ## 8-byte Spill
leaq (,%r8,4), %rax
movq %rax, -184(%rbp) ## 8-byte Spill
leaq (,%rcx,4), %rax
movq %rax, -232(%rbp) ## 8-byte Spill
leaq (%r15,%r15,2), %rax
movq %rax, -136(%rbp) ## 8-byte Spill
leaq (%r10,%r10,2), %rax
movq %rax, -168(%rbp) ## 8-byte Spill
leal -2(%rdx), %eax
movl %eax, -64(%rbp) ## 4-byte Spill
movq %r8, -152(%rbp) ## 8-byte Spill
leaq (%r8,%r8), %rax
movq %rax, -176(%rbp) ## 8-byte Spill
movq %rcx, -128(%rbp) ## 8-byte Spill
leaq (%rcx,%rcx), %rax
movq %rax, -224(%rbp) ## 8-byte Spill
movq %r10, -96(%rbp) ## 8-byte Spill
leaq (%r10,%r10), %rax
movq %rax, -192(%rbp) ## 8-byte Spill
leaq (%r14,%r14,2), %rax
movq %rax, -264(%rbp) ## 8-byte Spill
leaq 48(%rdi), %rax
movq %rax, -208(%rbp) ## 8-byte Spill
leaq 24(%r13), %rax
movq %rax, -200(%rbp) ## 8-byte Spill
xorl %r9d, %r9d
xorl %r8d, %r8d
xorl %r12d, %r12d
movdqa LCPI82_7(%rip), %xmm6 ## xmm6 = [1,128,128,128,5,128,128,128,9,128,128,128,13,128,128,128]
movdqa LCPI82_2(%rip), %xmm1 ## xmm1 = [1,128,0,128,5,128,4,128,9,128,8,128,13,128,12,128]
movdqa LCPI82_5(%rip), %xmm7 ## xmm7 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
movq %rdi, -288(%rbp) ## 8-byte Spill
movq %rbx, -296(%rbp) ## 8-byte Spill
movq %rsi, -280(%rbp) ## 8-byte Spill
jmp LBB82_8
.p2align 4, 0x90
LBB82_7: ## in Loop: Header=BB82_8 Depth=1
incq %r12
addl -108(%rbp), %r8d ## 4-byte Folded Reload
addl -112(%rbp), %r9d ## 4-byte Folded Reload
cmpq -272(%rbp), %r12 ## 8-byte Folded Reload
movq -296(%rbp), %rbx ## 8-byte Reload
movq -288(%rbp), %rdi ## 8-byte Reload
movq -280(%rbp), %rsi ## 8-byte Reload
je LBB82_119
LBB82_8: ## =>This Loop Header: Depth=1
## Child Loop BB82_64 Depth 2
## Child Loop BB82_67 Depth 2
## Child Loop BB82_109 Depth 2
## Child Loop BB82_113 Depth 2
## Child Loop BB82_117 Depth 2
## Child Loop BB82_85 Depth 2
## Child Loop BB82_50 Depth 2
## Child Loop BB82_53 Depth 2
## Child Loop BB82_102 Depth 2
## Child Loop BB82_45 Depth 2
## Child Loop BB82_107 Depth 2
## Child Loop BB82_82 Depth 2
## Child Loop BB82_36 Depth 2
## Child Loop BB82_91 Depth 2
## Child Loop BB82_93 Depth 2
## Child Loop BB82_97 Depth 2
## Child Loop BB82_100 Depth 2
## Child Loop BB82_21 Depth 2
## Child Loop BB82_25 Depth 2
## Child Loop BB82_16 Depth 2
## Child Loop BB82_88 Depth 2
## Child Loop BB82_70 Depth 2
## Child Loop BB82_76 Depth 2
## Child Loop BB82_79 Depth 2
movl %r8d, %r8d
movl %r12d, %eax
imull -56(%rbp), %eax ## 4-byte Folded Reload
movl %eax, %r10d
imull -104(%rbp), %r10d ## 4-byte Folded Reload
addq %r13, %r10
imull %ebx, %eax
addq %rdi, %rax
leal -10(%rsi), %edx
cmpl $24, %edx
ja LBB82_26
## %bb.9: ## in Loop: Header=BB82_8 Depth=1
movl %r9d, %ecx
leaq (%r8,%r13), %rsi
leaq (%rdi,%rcx), %r11
leaq LJTI82_0(%rip), %rdi
movslq (%rdi,%rdx,4), %rdx
addq %rdi, %rdx
jmpq *%rdx
LBB82_10: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.11: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %edx ## 4-byte Reload
cmpl $15, %edx
jae LBB82_68
## %bb.12: ## in Loop: Header=BB82_8 Depth=1
movl %edx, %ecx
jmp LBB82_75
LBB82_13: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.14: ## in Loop: Header=BB82_8 Depth=1
testb $3, -56(%rbp) ## 1-byte Folded Reload
je LBB82_86
## %bb.15: ## in Loop: Header=BB82_8 Depth=1
xorl %edx, %edx
movl -44(%rbp), %edi ## 4-byte Reload
.p2align 4, 0x90
LBB82_16: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rsi,%rdx), %ecx
movb %cl, 2(%rax)
movb %cl, 1(%rax)
movb %cl, (%rax)
addq $3, %rax
incq %rdx
cmpl %edx, %r14d
jne LBB82_16
## %bb.17: ## in Loop: Header=BB82_8 Depth=1
movq -160(%rbp), %rcx ## 8-byte Reload
## kill: def $ecx killed $ecx killed $rcx
subl %edx, %ecx
addq %rdx, %rsi
movq %rsi, %r10
cmpl $3, %edi
jb LBB82_7
jmp LBB82_87
LBB82_18: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.19: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %edi ## 4-byte Reload
movl %edi, %ecx
testb $3, -56(%rbp) ## 1-byte Folded Reload
je LBB82_23
## %bb.20: ## in Loop: Header=BB82_8 Depth=1
xorl %edx, %edx
xorl %eax, %eax
.p2align 4, 0x90
LBB82_21: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%rsi,%rax), %ecx
movb %cl, 2(%r11,%rax,4)
movb %cl, 1(%r11,%rax,4)
movb %cl, (%r11,%rax,4)
movb $-1, 3(%r11,%rax,4)
incq %rax
addq $-4, %rdx
cmpl %eax, %r14d
jne LBB82_21
## %bb.22: ## in Loop: Header=BB82_8 Depth=1
movq -160(%rbp), %rcx ## 8-byte Reload
## kill: def $ecx killed $ecx killed $rcx
subl %eax, %ecx
subq %rdx, %r11
addq %rax, %rsi
movq %r11, %rax
movq %rsi, %r10
LBB82_23: ## in Loop: Header=BB82_8 Depth=1
cmpl $3, %edi
jb LBB82_7
## %bb.24: ## in Loop: Header=BB82_8 Depth=1
addl $4, %ecx
xorl %edi, %edi
.p2align 4, 0x90
LBB82_25: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10,%rdi), %edx
movb %dl, 2(%rax,%rdi,4)
movb %dl, 1(%rax,%rdi,4)
movb %dl, (%rax,%rdi,4)
movb $-1, 3(%rax,%rdi,4)
movzbl 1(%r10,%rdi), %edx
movb %dl, 6(%rax,%rdi,4)
movb %dl, 5(%rax,%rdi,4)
movb %dl, 4(%rax,%rdi,4)
movb $-1, 7(%rax,%rdi,4)
movzbl 2(%r10,%rdi), %edx
movb %dl, 10(%rax,%rdi,4)
movb %dl, 9(%rax,%rdi,4)
movb %dl, 8(%rax,%rdi,4)
movb $-1, 11(%rax,%rdi,4)
movzbl 3(%r10,%rdi), %edx
movb %dl, 14(%rax,%rdi,4)
movb %dl, 13(%rax,%rdi,4)
movb %dl, 12(%rax,%rdi,4)
movb $-1, 15(%rax,%rdi,4)
addl $-4, %ecx
addq $4, %rdi
cmpl $3, %ecx
jg LBB82_25
jmp LBB82_7
LBB82_26: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.27: ## in Loop: Header=BB82_8 Depth=1
testb $1, -56(%rbp) ## 1-byte Folded Reload
jne LBB82_77
## %bb.28: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %edx ## 4-byte Reload
movl %edx, %ecx
testl %edx, %edx
jne LBB82_78
jmp LBB82_7
LBB82_29: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.30: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %ecx ## 4-byte Reload
cmpl $8, %ecx
jb LBB82_99
## %bb.31: ## in Loop: Header=BB82_8 Depth=1
cmpl $16, %ecx
movq -152(%rbp), %rdx ## 8-byte Reload
jae LBB82_92
## %bb.32: ## in Loop: Header=BB82_8 Depth=1
xorl %ecx, %ecx
jmp LBB82_96
LBB82_33: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.34: ## in Loop: Header=BB82_8 Depth=1
testb $3, -56(%rbp) ## 1-byte Folded Reload
je LBB82_89
## %bb.35: ## in Loop: Header=BB82_8 Depth=1
xorl %eax, %eax
movl -44(%rbp), %edx ## 4-byte Reload
movl %edx, %edi
movq -264(%rbp), %rsi ## 8-byte Reload
.p2align 4, 0x90
LBB82_36: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10), %ecx
movb %cl, 2(%r11,%rax)
movb %cl, 1(%r11,%rax)
movb %cl, (%r11,%rax)
addq $2, %r10
decl %edi
addq $3, %rax
cmpl %eax, %esi
jne LBB82_36
## %bb.37: ## in Loop: Header=BB82_8 Depth=1
addq %rax, %r11
movq %r11, %rax
cmpl $3, %edx
jb LBB82_7
jmp LBB82_90
LBB82_38: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.39: ## in Loop: Header=BB82_8 Depth=1
testb $1, -56(%rbp) ## 1-byte Folded Reload
jne LBB82_80
## %bb.40: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %edx ## 4-byte Reload
movl %edx, %ecx
testl %edx, %edx
jne LBB82_81
jmp LBB82_7
LBB82_41: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.42: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %ecx ## 4-byte Reload
cmpl $7, %ecx
jb LBB82_106
## %bb.43: ## in Loop: Header=BB82_8 Depth=1
xorl %edi, %edi
cmpl $15, %ecx
jae LBB82_101
LBB82_44: ## in Loop: Header=BB82_8 Depth=1
addq %r15, %rax
addq -136(%rbp), %r10 ## 8-byte Folded Reload
leaq (%rdi,%rdi,2), %rcx
addq %r8, %rcx
addq %r13, %rcx
movdqa %xmm4, %xmm6
movdqa LCPI82_9(%rip), %xmm7 ## xmm7 = <12,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
movdqa %xmm5, %xmm8
.p2align 4, 0x90
LBB82_45: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movdqu (%rcx), %xmm1
movq 16(%rcx), %xmm0 ## xmm0 = mem[0],zero
movdqa %xmm0, %xmm2
pshufb %xmm3, %xmm2
movdqa %xmm3, %xmm5
movdqa %xmm1, %xmm3
pshufb %xmm7, %xmm3
punpcklwd %xmm2, %xmm3 ## xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
movdqa %xmm0, %xmm2
palignr $13, %xmm1, %xmm2 ## xmm2 = xmm1[13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11,12]
palignr $14, %xmm1, %xmm0 ## xmm0 = xmm1[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
pshufb %xmm8, %xmm0
movdqa %xmm1, %xmm4
pshufb %xmm6, %xmm4
pshufb %xmm9, %xmm2
pmovzxbw %xmm3, %xmm3 ## xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
punpcklwd %xmm3, %xmm2 ## xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
movdqa LCPI82_13(%rip), %xmm15 ## xmm15 = [1,128,0,128,4,128,3,128,7,128,6,128,10,128,9,128]
movdqa %xmm5, %xmm3
pmaddwd %xmm14, %xmm2
pshufb %xmm15, %xmm1
pmaddwd %xmm14, %xmm1
pmaddwd %xmm10, %xmm4
paddd %xmm1, %xmm4
pmaddwd %xmm10, %xmm0
paddd %xmm2, %xmm0
pshufb %xmm13, %xmm4
pshufb %xmm13, %xmm0
punpckldq %xmm0, %xmm4 ## xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
movq %xmm4, (%r11,%rdi)
addq $8, %rdi
addq $24, %rcx
cmpq %rdi, %r15
jne LBB82_45
## %bb.46: ## in Loop: Header=BB82_8 Depth=1
movl -60(%rbp), %ecx ## 4-byte Reload
cmpq %r15, -72(%rbp) ## 8-byte Folded Reload
movdqa %xmm6, %xmm4
movdqa LCPI82_7(%rip), %xmm6 ## xmm6 = [1,128,128,128,5,128,128,128,9,128,128,128,13,128,128,128]
movdqa LCPI82_2(%rip), %xmm1 ## xmm1 = [1,128,0,128,5,128,4,128,9,128,8,128,13,128,12,128]
movdqa %xmm8, %xmm5
movdqa LCPI82_20(%rip), %xmm8 ## xmm8 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
movdqa LCPI82_5(%rip), %xmm7 ## xmm7 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
je LBB82_7
jmp LBB82_106
LBB82_47: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.48: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %ecx ## 4-byte Reload
cmpl $7, %ecx
jb LBB82_52
## %bb.49: ## in Loop: Header=BB82_8 Depth=1
addq -144(%rbp), %rax ## 8-byte Folded Reload
addq -136(%rbp), %r10 ## 8-byte Folded Reload
xorl %ecx, %ecx
movdqa %xmm4, %xmm6
movdqa LCPI82_9(%rip), %xmm7 ## xmm7 = <12,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
.p2align 4, 0x90
LBB82_50: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movdqu (%rsi), %xmm1
movq 16(%rsi), %xmm0 ## xmm0 = mem[0],zero
movdqa %xmm0, %xmm2
pshufb %xmm3, %xmm2
movdqa %xmm3, %xmm8
movdqa %xmm1, %xmm3
pshufb %xmm7, %xmm3
punpcklwd %xmm2, %xmm3 ## xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
movdqa %xmm0, %xmm2
palignr $13, %xmm1, %xmm2 ## xmm2 = xmm1[13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11,12]
palignr $14, %xmm1, %xmm0 ## xmm0 = xmm1[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
pshufb %xmm5, %xmm0
movdqa %xmm1, %xmm4
pshufb %xmm6, %xmm4
pshufb %xmm9, %xmm2
pmovzxbw %xmm3, %xmm3 ## xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
punpcklwd %xmm3, %xmm2 ## xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
movdqa LCPI82_13(%rip), %xmm15 ## xmm15 = [1,128,0,128,4,128,3,128,7,128,6,128,10,128,9,128]
movdqa %xmm8, %xmm3
pmaddwd %xmm14, %xmm2
pshufb %xmm15, %xmm1
pmaddwd %xmm14, %xmm1
pmaddwd %xmm10, %xmm4
paddd %xmm1, %xmm4
pmaddwd %xmm10, %xmm0
paddd %xmm2, %xmm0
pshufb %xmm13, %xmm4
pshufb %xmm13, %xmm0
punpckldq %xmm0, %xmm4 ## xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
punpcklbw %xmm12, %xmm4 ## xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
movdqu %xmm4, (%r11,%rcx,2)
addq $8, %rcx
addq $24, %rsi
cmpq %rcx, %r15
jne LBB82_50
## %bb.51: ## in Loop: Header=BB82_8 Depth=1
movl -60(%rbp), %ecx ## 4-byte Reload
cmpq %r15, -72(%rbp) ## 8-byte Folded Reload
movdqa %xmm6, %xmm4
movdqa LCPI82_7(%rip), %xmm6 ## xmm6 = [1,128,128,128,5,128,128,128,9,128,128,128,13,128,128,128]
movdqa LCPI82_2(%rip), %xmm1 ## xmm1 = [1,128,0,128,5,128,4,128,9,128,8,128,13,128,12,128]
movdqa LCPI82_20(%rip), %xmm8 ## xmm8 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
movdqa LCPI82_5(%rip), %xmm7 ## xmm7 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
je LBB82_7
LBB82_52: ## in Loop: Header=BB82_8 Depth=1
incl %ecx
.p2align 4, 0x90
LBB82_53: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10), %edx
movzbl 1(%r10), %edi
movzbl 2(%r10), %ebx
imull $77, %edx, %edx
imull $150, %edi, %edi
addl %edx, %edi
leal (%rbx,%rbx,8), %edx
leal (%rdx,%rdx,2), %edx
addl %ebx, %edx
addl %ebx, %edx
addl %edi, %edx
movb %dh, (%rax)
movb $-1, 1(%rax)
decl %ecx
addq $3, %r10
addq $2, %rax
testl %ecx, %ecx
jg LBB82_53
jmp LBB82_7
LBB82_54: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.55: ## in Loop: Header=BB82_8 Depth=1
testb $1, -56(%rbp) ## 1-byte Folded Reload
jne LBB82_83
## %bb.56: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %edx ## 4-byte Reload
movl %edx, %ecx
testl %edx, %edx
jne LBB82_84
jmp LBB82_7
LBB82_57: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.58: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %ecx ## 4-byte Reload
cmpl $8, %ecx
jb LBB82_116
## %bb.59: ## in Loop: Header=BB82_8 Depth=1
cmpl $16, %ecx
movq -152(%rbp), %rdx ## 8-byte Reload
jae LBB82_108
## %bb.60: ## in Loop: Header=BB82_8 Depth=1
xorl %ecx, %ecx
jmp LBB82_112
LBB82_61: ## in Loop: Header=BB82_8 Depth=1
cmpl $0, -44(%rbp) ## 4-byte Folded Reload
js LBB82_7
## %bb.62: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %ecx ## 4-byte Reload
cmpl $7, %ecx
jb LBB82_66
## %bb.63: ## in Loop: Header=BB82_8 Depth=1
movdqa %xmm8, %xmm11
movdqa %xmm4, %xmm12
movdqa %xmm5, %xmm9
addq -144(%rbp), %rax ## 8-byte Folded Reload
addq -240(%rbp), %r10 ## 8-byte Folded Reload
xorl %ecx, %ecx
movdqa %xmm7, %xmm8
movdqa %xmm1, %xmm7
movdqa LCPI82_1(%rip), %xmm1 ## xmm1 = [2,128,128,128,6,128,128,128,10,128,128,128,14,128,128,128]
.p2align 4, 0x90
LBB82_64: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movdqu (%rsi,%rcx,4), %xmm0
movdqu 16(%rsi,%rcx,4), %xmm3
movdqa %xmm0, %xmm2
pshufb %xmm1, %xmm2
movdqa %xmm3, %xmm4
pshufb %xmm1, %xmm4
movdqa %xmm3, %xmm5
pshufb %xmm7, %xmm5
pmaddwd %xmm14, %xmm5
movdqa %xmm0, %xmm6
pshufb %xmm7, %xmm6
pmaddwd %xmm14, %xmm6
pmaddwd %xmm10, %xmm4
paddd %xmm5, %xmm4
pmaddwd %xmm10, %xmm2
paddd %xmm6, %xmm2
pshufb %xmm13, %xmm4
pshufb %xmm13, %xmm2
punpckldq %xmm4, %xmm2 ## xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
pshufb %xmm8, %xmm3
pshufb %xmm8, %xmm0
punpckldq %xmm3, %xmm0 ## xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
punpcklbw %xmm0, %xmm2 ## xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
movdqu %xmm2, (%r11,%rcx,2)
addq $8, %rcx
cmpq %rcx, %r15
jne LBB82_64
## %bb.65: ## in Loop: Header=BB82_8 Depth=1
movl -60(%rbp), %ecx ## 4-byte Reload
cmpq %r15, -72(%rbp) ## 8-byte Folded Reload
movdqa %xmm9, %xmm5
movdqa %xmm12, %xmm4
movdqa LCPI82_12(%rip), %xmm9 ## xmm9 = <0,128,3,128,6,128,9,128,u,u,u,u,u,u,u,u>
movdqa LCPI82_13(%rip), %xmm15 ## xmm15 = [1,128,0,128,4,128,3,128,7,128,6,128,10,128,9,128]
movdqa LCPI82_7(%rip), %xmm6 ## xmm6 = [1,128,128,128,5,128,128,128,9,128,128,128,13,128,128,128]
movdqa %xmm7, %xmm1
movdqa %xmm8, %xmm7
pcmpeqd %xmm12, %xmm12
movdqa %xmm11, %xmm8
movdqa LCPI82_19(%rip), %xmm11 ## xmm11 = [255,255,255,255,255,255,255,255]
movdqa LCPI82_8(%rip), %xmm3 ## xmm3 = <2,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
je LBB82_7
LBB82_66: ## in Loop: Header=BB82_8 Depth=1
incl %ecx
xorl %edi, %edi
.p2align 4, 0x90
LBB82_67: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10,%rdi,2), %edx
movzbl 1(%r10,%rdi,2), %esi
movzbl 2(%r10,%rdi,2), %ebx
imull $77, %edx, %edx
imull $150, %esi, %esi
addl %edx, %esi
leal (%rbx,%rbx,8), %edx
leal (%rdx,%rdx,2), %edx
addl %ebx, %edx
addl %ebx, %edx
addl %esi, %edx
movb %dh, (%rax,%rdi)
movzbl 3(%r10,%rdi,2), %edx
movb %dl, 1(%rax,%rdi)
decl %ecx
addq $2, %rdi
testl %ecx, %ecx
jg LBB82_67
jmp LBB82_7
LBB82_68: ## in Loop: Header=BB82_8 Depth=1
cmpq $0, -256(%rbp) ## 8-byte Folded Reload
je LBB82_118
## %bb.69: ## in Loop: Header=BB82_8 Depth=1
addq -208(%rbp), %rcx ## 8-byte Folded Reload
movq -200(%rbp), %rdx ## 8-byte Reload
leaq (%rdx,%r8), %rbx
movq -216(%rbp), %rdx ## 8-byte Reload
xorl %edi, %edi
.p2align 4, 0x90
LBB82_70: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movq -24(%rbx,%rdi), %xmm0 ## xmm0 = mem[0],zero
movq -16(%rbx,%rdi), %xmm1 ## xmm1 = mem[0],zero
punpcklbw %xmm12, %xmm0 ## xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
movdqu %xmm0, -48(%rcx,%rdi,2)
punpcklbw %xmm12, %xmm1 ## xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3],xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
movdqu %xmm1, -32(%rcx,%rdi,2)
movq -8(%rbx,%rdi), %xmm0 ## xmm0 = mem[0],zero
movq (%rbx,%rdi), %xmm1 ## xmm1 = mem[0],zero
punpcklbw %xmm12, %xmm0 ## xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
movdqu %xmm0, -16(%rcx,%rdi,2)
punpcklbw %xmm12, %xmm1 ## xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3],xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
movdqu %xmm1, (%rcx,%rdi,2)
addq $32, %rdi
addq $-2, %rdx
jne LBB82_70
## %bb.71: ## in Loop: Header=BB82_8 Depth=1
testb $1, -248(%rbp) ## 1-byte Folded Reload
je LBB82_73
LBB82_72: ## in Loop: Header=BB82_8 Depth=1
leaq (%rdi,%rdi), %rcx
orq $16, %rcx
movq (%r10,%rdi), %xmm0 ## xmm0 = mem[0],zero
movq 8(%r10,%rdi), %xmm1 ## xmm1 = mem[0],zero
punpcklbw %xmm12, %xmm0 ## xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
movdqu %xmm0, (%rax,%rdi,2)
punpcklbw %xmm12, %xmm1 ## xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3],xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
movdqu %xmm1, (%rax,%rcx)
LBB82_73: ## in Loop: Header=BB82_8 Depth=1
movq -96(%rbp), %rcx ## 8-byte Reload
cmpq %rcx, -72(%rbp) ## 8-byte Folded Reload
movdqa LCPI82_2(%rip), %xmm1 ## xmm1 = [1,128,0,128,5,128,4,128,9,128,8,128,13,128,12,128]
je LBB82_7
## %bb.74: ## in Loop: Header=BB82_8 Depth=1
addq -192(%rbp), %rax ## 8-byte Folded Reload
addq -96(%rbp), %r10 ## 8-byte Folded Reload
movl -80(%rbp), %ecx ## 4-byte Reload
LBB82_75: ## in Loop: Header=BB82_8 Depth=1
incl %ecx
xorl %edx, %edx
.p2align 4, 0x90
LBB82_76: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10,%rdx), %ebx
movb %bl, (%rax,%rdx,2)
movb $-1, 1(%rax,%rdx,2)
decl %ecx
incq %rdx
testl %ecx, %ecx
jg LBB82_76
jmp LBB82_7
LBB82_77: ## in Loop: Header=BB82_8 Depth=1
movb (%r10), %cl
movb %cl, (%rax)
movb 1(%r10), %cl
movb %cl, 1(%rax)
movb 2(%r10), %cl
movb %cl, 2(%rax)
addq $4, %r10
addq $3, %rax
movl -64(%rbp), %ecx ## 4-byte Reload
movl -44(%rbp), %edx ## 4-byte Reload
testl %edx, %edx
je LBB82_7
LBB82_78: ## in Loop: Header=BB82_8 Depth=1
addl $2, %ecx
.p2align 4, 0x90
LBB82_79: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10), %edx
movb %dl, (%rax)
movzbl 1(%r10), %edx
movb %dl, 1(%rax)
movzbl 2(%r10), %edx
movb %dl, 2(%rax)
movzbl 4(%r10), %edx
movb %dl, 3(%rax)
movzbl 5(%r10), %edx
movb %dl, 4(%rax)
movzbl 6(%r10), %edx
movb %dl, 5(%rax)
addl $-2, %ecx
addq $8, %r10
addq $6, %rax
cmpl $1, %ecx
jg LBB82_79
jmp LBB82_7
LBB82_80: ## in Loop: Header=BB82_8 Depth=1
movb (%r10), %cl
movb %cl, 2(%rax)
movb %cl, 1(%rax)
movb %cl, (%rax)
movb 1(%r10), %cl
movb %cl, 3(%rax)
addq $2, %r10
addq $4, %rax
movl -64(%rbp), %ecx ## 4-byte Reload
movl -44(%rbp), %edx ## 4-byte Reload
testl %edx, %edx
je LBB82_7
LBB82_81: ## in Loop: Header=BB82_8 Depth=1
addl $2, %ecx
xorl %edx, %edx
.p2align 4, 0x90
LBB82_82: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10,%rdx), %ebx
movb %bl, 2(%rax,%rdx,2)
movb %bl, 1(%rax,%rdx,2)
movb %bl, (%rax,%rdx,2)
movzbl 1(%r10,%rdx), %ebx
movb %bl, 3(%rax,%rdx,2)
movzbl 2(%r10,%rdx), %ebx
movb %bl, 6(%rax,%rdx,2)
movb %bl, 5(%rax,%rdx,2)
movb %bl, 4(%rax,%rdx,2)
movzbl 3(%r10,%rdx), %ebx
movb %bl, 7(%rax,%rdx,2)
addl $-2, %ecx
addq $4, %rdx
cmpl $1, %ecx
jg LBB82_82
jmp LBB82_7
LBB82_83: ## in Loop: Header=BB82_8 Depth=1
movb (%r10), %cl
movb %cl, (%rax)
movb 1(%r10), %cl
movb %cl, 1(%rax)
movb 2(%r10), %cl
movb %cl, 2(%rax)
movb $-1, 3(%rax)
addq $3, %r10
addq $4, %rax
movl -64(%rbp), %ecx ## 4-byte Reload
movl -44(%rbp), %edx ## 4-byte Reload
testl %edx, %edx
je LBB82_7
LBB82_84: ## in Loop: Header=BB82_8 Depth=1
addl $2, %ecx
.p2align 4, 0x90
LBB82_85: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10), %edx
movb %dl, (%rax)
movzbl 1(%r10), %edx
movb %dl, 1(%rax)
movzbl 2(%r10), %edx
movb %dl, 2(%rax)
movb $-1, 3(%rax)
movzbl 3(%r10), %edx
movb %dl, 4(%rax)
movzbl 4(%r10), %edx
movb %dl, 5(%rax)
movzbl 5(%r10), %edx
movb %dl, 6(%rax)
movb $-1, 7(%rax)
addl $-2, %ecx
addq $6, %r10
addq $8, %rax
cmpl $1, %ecx
jg LBB82_85
jmp LBB82_7
LBB82_86: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %edi ## 4-byte Reload
movl %edi, %ecx
cmpl $3, %edi
jb LBB82_7
LBB82_87: ## in Loop: Header=BB82_8 Depth=1
addl $4, %ecx
addq $11, %rax
.p2align 4, 0x90
LBB82_88: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10), %edx
movb %dl, -9(%rax)
movb %dl, -10(%rax)
movb %dl, -11(%rax)
movzbl 1(%r10), %edx
movb %dl, -6(%rax)
movb %dl, -7(%rax)
movb %dl, -8(%rax)
movzbl 2(%r10), %edx
movb %dl, -3(%rax)
movb %dl, -4(%rax)
movb %dl, -5(%rax)
movzbl 3(%r10), %edx
movb %dl, (%rax)
movb %dl, -1(%rax)
movb %dl, -2(%rax)
addl $-4, %ecx
addq $12, %rax
addq $4, %r10
cmpl $3, %ecx
jg LBB82_88
jmp LBB82_7
LBB82_89: ## in Loop: Header=BB82_8 Depth=1
movl -44(%rbp), %edx ## 4-byte Reload
movl %edx, %edi
cmpl $3, %edx
jb LBB82_7
LBB82_90: ## in Loop: Header=BB82_8 Depth=1
addl $4, %edi
addq $11, %rax
.p2align 4, 0x90
LBB82_91: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10), %ecx
movb %cl, -9(%rax)
movb %cl, -10(%rax)
movb %cl, -11(%rax)
movzbl 2(%r10), %ecx
movb %cl, -6(%rax)
movb %cl, -7(%rax)
movb %cl, -8(%rax)
movzbl 4(%r10), %ecx
movb %cl, -3(%rax)
movb %cl, -4(%rax)
movb %cl, -5(%rax)
movzbl 6(%r10), %ecx
movb %cl, (%rax)
movb %cl, -1(%rax)
movb %cl, -2(%rax)
addl $-4, %edi
addq $12, %rax
addq $8, %r10
cmpl $3, %edi
jg LBB82_91
jmp LBB82_7
LBB82_92: ## in Loop: Header=BB82_8 Depth=1
xorl %ecx, %ecx
.p2align 4, 0x90
LBB82_93: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movdqu (%rsi,%rcx,2), %xmm0
movdqu 16(%rsi,%rcx,2), %xmm1
pand %xmm11, %xmm1
pand %xmm11, %xmm0
packuswb %xmm1, %xmm0
movdqu %xmm0, (%r11,%rcx)
addq $16, %rcx
cmpq %rcx, %rdx
jne LBB82_93
## %bb.94: ## in Loop: Header=BB82_8 Depth=1
movq %rdx, %rcx
cmpl $8, -120(%rbp) ## 4-byte Folded Reload
movdqa LCPI82_2(%rip), %xmm1 ## xmm1 = [1,128,0,128,5,128,4,128,9,128,8,128,13,128,12,128]
ja LBB82_96
## %bb.95: ## in Loop: Header=BB82_8 Depth=1
addq -176(%rbp), %r10 ## 8-byte Folded Reload
addq %rdx, %rax
movl -76(%rbp), %ecx ## 4-byte Reload
jmp LBB82_99
LBB82_96: ## in Loop: Header=BB82_8 Depth=1
movq -128(%rbp), %rdx ## 8-byte Reload
addq %rdx, %rax
addq -224(%rbp), %r10 ## 8-byte Folded Reload
.p2align 4, 0x90
LBB82_97: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movdqu (%rsi,%rcx,2), %xmm0
pshufb %xmm8, %xmm0
movq %xmm0, (%r11,%rcx)
addq $8, %rcx
cmpq %rcx, %rdx
jne LBB82_97
## %bb.98: ## in Loop: Header=BB82_8 Depth=1
movl -84(%rbp), %ecx ## 4-byte Reload
LBB82_99: ## in Loop: Header=BB82_8 Depth=1
incl %ecx
xorl %edx, %edx
.p2align 4, 0x90
LBB82_100: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10,%rdx,2), %ebx
movb %bl, (%rax,%rdx)
decl %ecx
incq %rdx
testl %ecx, %ecx
jg LBB82_100
jmp LBB82_7
LBB82_101: ## in Loop: Header=BB82_8 Depth=1
movq -96(%rbp), %rcx ## 8-byte Reload
movdqa LCPI82_14(%rip), %xmm13 ## xmm13 = <0,3,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
movdqa %xmm3, %xmm8
movdqa LCPI82_17(%rip), %xmm3 ## xmm3 = <0,128,3,128,6,128,9,128,u,128,u,128,u,128,u,128>
movdqa %xmm15, %xmm7
movdqa %xmm4, %xmm12
movdqa LCPI82_6(%rip), %xmm11 ## xmm11 = [29,0,29,0,29,0,29,0]
movdqa LCPI82_9(%rip), %xmm5 ## xmm5 = <12,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
.p2align 4, 0x90
LBB82_102: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movdqu (%rsi), %xmm9
movdqu 16(%rsi), %xmm15
movdqu 32(%rsi), %xmm10
movdqa %xmm15, %xmm0
pshufb %xmm8, %xmm0
movdqa %xmm9, %xmm1
pshufb %xmm5, %xmm1
punpcklwd %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
movdqa %xmm10, %xmm0
palignr $8, %xmm15, %xmm0 ## xmm0 = xmm15[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
movdqa %xmm10, %xmm2
pshufb %xmm13, %xmm2
movdqa %xmm15, %xmm4
pshufb LCPI82_15(%rip), %xmm4 ## xmm4 = xmm4[10,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
punpcklwd %xmm2, %xmm4 ## xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
movdqa %xmm15, %xmm5
palignr $13, %xmm9, %xmm5 ## xmm5 = xmm9[13,14,15],xmm5[0,1,2,3,4,5,6,7,8,9,10,11,12]
pshufb %xmm3, %xmm5
pmovzxbw %xmm1, %xmm1 ## xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
punpcklwd %xmm1, %xmm5 ## xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
movdqa %xmm10, %xmm1
palignr $9, %xmm15, %xmm1 ## xmm1 = xmm15[9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8]
palignr $14, %xmm9, %xmm15 ## xmm15 = xmm9[14,15],xmm15[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
pshufb %xmm3, %xmm0
pshufb %xmm3, %xmm1
punpcklwd %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
movdqa %xmm9, %xmm2
pshufb %xmm12, %xmm2
pshufb %xmm7, %xmm9
pmaddwd %xmm14, %xmm9
pmaddwd %xmm11, %xmm2
paddd %xmm9, %xmm2
pshufb LCPI82_10(%rip), %xmm15 ## xmm15 = xmm15[0],zero,zero,zero,xmm15[3],zero,zero,zero,xmm15[6],zero,zero,zero,xmm15[9],zero,zero,zero
pmaddwd %xmm14, %xmm5
pmaddwd %xmm11, %xmm15
paddd %xmm5, %xmm15
movdqa LCPI82_9(%rip), %xmm5 ## xmm5 = <12,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
pmovzxbd %xmm4, %xmm0 ## xmm0 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
pmaddwd %xmm14, %xmm1
pmaddwd %xmm11, %xmm0
paddd %xmm1, %xmm0
movdqa %xmm10, %xmm1
pshufb LCPI82_16(%rip), %xmm1 ## xmm1 = xmm1[6],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[15],zero,zero,zero
pshufb LCPI82_18(%rip), %xmm10 ## xmm10 = xmm10[5],zero,xmm10[4],zero,xmm10[8],zero,xmm10[7],zero,xmm10[11],zero,xmm10[10],zero,xmm10[14],zero,xmm10[13],zero
pmaddwd %xmm14, %xmm10
pmaddwd LCPI82_4(%rip), %xmm1
paddd %xmm10, %xmm1
pshufb %xmm6, %xmm2
pshufb %xmm6, %xmm15
packusdw %xmm15, %xmm2
pshufb %xmm6, %xmm0
pshufb %xmm6, %xmm1
packusdw %xmm1, %xmm0
packuswb %xmm0, %xmm2
movdqu %xmm2, (%r11,%rdi)
addq $16, %rdi
addq $48, %rsi
cmpq %rdi, %rcx
jne LBB82_102
## %bb.103: ## in Loop: Header=BB82_8 Depth=1
cmpq %rcx, -72(%rbp) ## 8-byte Folded Reload
movdqa LCPI82_20(%rip), %xmm0 ## xmm0 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
movdqa LCPI82_19(%rip), %xmm11 ## xmm11 = [255,255,255,255,255,255,255,255]
movdqa %xmm8, %xmm3
movdqa LCPI82_10(%rip), %xmm5 ## xmm5 = [0,128,128,128,3,128,128,128,6,128,128,128,9,128,128,128]
movdqa %xmm12, %xmm4
movdqa LCPI82_12(%rip), %xmm9 ## xmm9 = <0,128,3,128,6,128,9,128,u,u,u,u,u,u,u,u>
movdqa %xmm7, %xmm15
movdqa LCPI82_0(%rip), %xmm13 ## xmm13 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
movdqa LCPI82_4(%rip), %xmm10 ## xmm10 = <29,u,29,u,29,u,29,u>
movdqa %xmm0, %xmm8
movdqa LCPI82_2(%rip), %xmm1 ## xmm1 = [1,128,0,128,5,128,4,128,9,128,8,128,13,128,12,128]
movdqa LCPI82_5(%rip), %xmm7 ## xmm7 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
pcmpeqd %xmm12, %xmm12
je LBB82_7
## %bb.104: ## in Loop: Header=BB82_8 Depth=1
movq -96(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rdi
testb $8, -72(%rbp) ## 1-byte Folded Reload
jne LBB82_44
## %bb.105: ## in Loop: Header=BB82_8 Depth=1
addq -168(%rbp), %r10 ## 8-byte Folded Reload
addq %rcx, %rax
movl -80(%rbp), %ecx ## 4-byte Reload
LBB82_106: ## in Loop: Header=BB82_8 Depth=1
incl %ecx
.p2align 4, 0x90
LBB82_107: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10), %edx
movzbl 1(%r10), %edi
movzbl 2(%r10), %ebx
imull $77, %edx, %edx
imull $150, %edi, %edi
addl %edx, %edi
leal (%rbx,%rbx,8), %edx
leal (%rdx,%rdx,2), %edx
addl %ebx, %edx
addl %ebx, %edx
addl %edi, %edx
movb %dh, (%rax)
incq %rax
decl %ecx
addq $3, %r10
testl %ecx, %ecx
jg LBB82_107
jmp LBB82_7
LBB82_108: ## in Loop: Header=BB82_8 Depth=1
movdqa %xmm10, %xmm8
xorl %edi, %edi
movdqa LCPI82_6(%rip), %xmm9 ## xmm9 = [29,0,29,0,29,0,29,0]
movdqa LCPI82_1(%rip), %xmm12 ## xmm12 = [2,128,128,128,6,128,128,128,10,128,128,128,14,128,128,128]
movdqa LCPI82_7(%rip), %xmm11 ## xmm11 = [1,128,128,128,5,128,128,128,9,128,128,128,13,128,128,128]
movdqa LCPI82_2(%rip), %xmm10 ## xmm10 = [1,128,0,128,5,128,4,128,9,128,8,128,13,128,12,128]
.p2align 4, 0x90
LBB82_109: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movdqu (%rsi,%rdi,4), %xmm2
movdqu 16(%rsi,%rdi,4), %xmm3
movdqu 32(%rsi,%rdi,4), %xmm7
movdqu 48(%rsi,%rdi,4), %xmm1
movdqa %xmm2, %xmm4
pshufb %xmm12, %xmm4
movdqa %xmm3, %xmm6
pshufb %xmm12, %xmm6
movdqa %xmm7, %xmm5
pshufb %xmm12, %xmm5
movdqa %xmm1, %xmm0
pshufb %xmm12, %xmm0
pshufb %xmm10, %xmm1
pmaddwd %xmm14, %xmm1
pshufb %xmm10, %xmm7
pmaddwd %xmm14, %xmm7
pshufb %xmm10, %xmm3
pmaddwd %xmm14, %xmm3
pshufb %xmm10, %xmm2
pmaddwd %xmm14, %xmm2
pmaddwd %xmm8, %xmm0
paddd %xmm1, %xmm0
pmaddwd %xmm9, %xmm5
paddd %xmm7, %xmm5
pmaddwd %xmm9, %xmm6
paddd %xmm3, %xmm6
pmaddwd %xmm9, %xmm4
paddd %xmm2, %xmm4
pshufb %xmm11, %xmm0
pshufb %xmm11, %xmm5
packusdw %xmm0, %xmm5
pshufb %xmm11, %xmm6
pshufb %xmm11, %xmm4
packusdw %xmm6, %xmm4
packuswb %xmm5, %xmm4
movdqu %xmm4, (%r11,%rdi)
addq $16, %rdi
cmpq %rdi, %rdx
jne LBB82_109
## %bb.110: ## in Loop: Header=BB82_8 Depth=1
movq %rdx, %rcx
cmpl $8, -120(%rbp) ## 4-byte Folded Reload
movdqa LCPI82_10(%rip), %xmm5 ## xmm5 = [0,128,128,128,3,128,128,128,6,128,128,128,9,128,128,128]
movdqa LCPI82_11(%rip), %xmm4 ## xmm4 = [2,128,128,128,5,128,128,128,8,128,128,128,11,128,128,128]
movdqa LCPI82_12(%rip), %xmm9 ## xmm9 = <0,128,3,128,6,128,9,128,u,u,u,u,u,u,u,u>
movdqa %xmm8, %xmm10
pcmpeqd %xmm12, %xmm12
movdqa LCPI82_20(%rip), %xmm8 ## xmm8 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
movdqa LCPI82_19(%rip), %xmm11 ## xmm11 = [255,255,255,255,255,255,255,255]
ja LBB82_112
## %bb.111: ## in Loop: Header=BB82_8 Depth=1
addq -184(%rbp), %r10 ## 8-byte Folded Reload
addq %rdx, %rax
movl -76(%rbp), %ecx ## 4-byte Reload
movdqa LCPI82_13(%rip), %xmm15 ## xmm15 = [1,128,0,128,4,128,3,128,7,128,6,128,10,128,9,128]
movdqa LCPI82_7(%rip), %xmm6 ## xmm6 = [1,128,128,128,5,128,128,128,9,128,128,128,13,128,128,128]
movdqa LCPI82_2(%rip), %xmm1 ## xmm1 = [1,128,0,128,5,128,4,128,9,128,8,128,13,128,12,128]
jmp LBB82_115
LBB82_112: ## in Loop: Header=BB82_8 Depth=1
movq -128(%rbp), %rdx ## 8-byte Reload
addq %rdx, %rax
addq -232(%rbp), %r10 ## 8-byte Folded Reload
movdqa LCPI82_1(%rip), %xmm6 ## xmm6 = [2,128,128,128,6,128,128,128,10,128,128,128,14,128,128,128]
movdqa LCPI82_2(%rip), %xmm7 ## xmm7 = [1,128,0,128,5,128,4,128,9,128,8,128,13,128,12,128]
.p2align 4, 0x90
LBB82_113: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movdqu (%rsi,%rcx,4), %xmm0
movdqu 16(%rsi,%rcx,4), %xmm1
movdqa %xmm0, %xmm2
pshufb %xmm6, %xmm2
movdqa %xmm1, %xmm3
pshufb %xmm6, %xmm3
pshufb %xmm7, %xmm1
pmaddwd %xmm14, %xmm1
pshufb %xmm7, %xmm0
pmaddwd %xmm14, %xmm0
pmaddwd %xmm10, %xmm3
paddd %xmm1, %xmm3
pmaddwd %xmm10, %xmm2
paddd %xmm0, %xmm2
pshufb %xmm13, %xmm3
pshufb %xmm13, %xmm2
punpckldq %xmm3, %xmm2 ## xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
movq %xmm2, (%r11,%rcx)
addq $8, %rcx
cmpq %rcx, %rdx
jne LBB82_113
## %bb.114: ## in Loop: Header=BB82_8 Depth=1
movl -84(%rbp), %ecx ## 4-byte Reload
movdqa LCPI82_13(%rip), %xmm15 ## xmm15 = [1,128,0,128,4,128,3,128,7,128,6,128,10,128,9,128]
movdqa LCPI82_7(%rip), %xmm6 ## xmm6 = [1,128,128,128,5,128,128,128,9,128,128,128,13,128,128,128]
movdqa %xmm7, %xmm1
LBB82_115: ## in Loop: Header=BB82_8 Depth=1
movdqa LCPI82_5(%rip), %xmm7 ## xmm7 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
movdqa LCPI82_8(%rip), %xmm3 ## xmm3 = <2,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
LBB82_116: ## in Loop: Header=BB82_8 Depth=1
incl %ecx
xorl %edi, %edi
.p2align 4, 0x90
LBB82_117: ## Parent Loop BB82_8 Depth=1
## => This Inner Loop Header: Depth=2
movzbl (%r10,%rdi,4), %edx
movzbl 1(%r10,%rdi,4), %ebx
movzbl 2(%r10,%rdi,4), %esi
imull $77, %edx, %edx
imull $150, %ebx, %ebx
addl %edx, %ebx
leal (%rsi,%rsi,8), %edx
leal (%rdx,%rdx,2), %edx
addl %esi, %edx
addl %esi, %edx
addl %ebx, %edx
movb %dh, (%rax,%rdi)
decl %ecx
incq %rdi
testl %ecx, %ecx
jg LBB82_117
jmp LBB82_7
LBB82_118: ## in Loop: Header=BB82_8 Depth=1
xorl %edi, %edi
testb $1, -248(%rbp) ## 1-byte Folded Reload
jne LBB82_72
jmp LBB82_73
LBB82_119:
movq %rdi, %rbx
movq %r13, %rdi
callq _free
movq %rbx, %r13
jmp LBB82_120
LBB82_122:
movq %r13, %rdi
callq _free
leaq L_.str.5(%rip), %rax
movq %rax, _failure_reason(%rip)
xorl %r13d, %r13d
LBB82_120:
movq %r13, %rax
LBB82_121:
addq $264, %rsp ## imm = 0x108
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB82_123:
callq _convert_format.cold.2
jmp LBB82_125
LBB82_124:
callq _convert_format.cold.1
LBB82_125:
xorl %eax, %eax
jmp LBB82_121
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L82_0_set_10, LBB82_10-LJTI82_0
.set L82_0_set_13, LBB82_13-LJTI82_0
.set L82_0_set_18, LBB82_18-LJTI82_0
.set L82_0_set_26, LBB82_26-LJTI82_0
.set L82_0_set_29, LBB82_29-LJTI82_0
.set L82_0_set_33, LBB82_33-LJTI82_0
.set L82_0_set_38, LBB82_38-LJTI82_0
.set L82_0_set_41, LBB82_41-LJTI82_0
.set L82_0_set_47, LBB82_47-LJTI82_0
.set L82_0_set_54, LBB82_54-LJTI82_0
.set L82_0_set_57, LBB82_57-LJTI82_0
.set L82_0_set_61, LBB82_61-LJTI82_0
LJTI82_0:
.long L82_0_set_10
.long L82_0_set_13
.long L82_0_set_18
.long L82_0_set_26
.long L82_0_set_26
.long L82_0_set_26
.long L82_0_set_26
.long L82_0_set_29
.long L82_0_set_26
.long L82_0_set_33
.long L82_0_set_38
.long L82_0_set_26
.long L82_0_set_26
.long L82_0_set_26
.long L82_0_set_26
.long L82_0_set_41
.long L82_0_set_47
.long L82_0_set_26
.long L82_0_set_54
.long L82_0_set_26
.long L82_0_set_26
.long L82_0_set_26
.long L82_0_set_26
.long L82_0_set_57
.long L82_0_set_61
.end_data_region
## -- End function
.p2align 4, 0x90 ## -- Begin function get32
_get32: ## @get32
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r14
movq 16(%rdi), %rdi
testq %rdi, %rdi
je LBB83_1
## %bb.3:
callq _fgetc
movl %eax, %r15d
xorl %r12d, %r12d
cmpl $-1, %eax
cmovel %r12d, %r15d
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB83_4
## %bb.7:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r12d
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB83_8
## %bb.10:
callq _fgetc
movl %eax, %ebx
xorl %r13d, %r13d
cmpl $-1, %eax
cmovel %r13d, %ebx
movq 16(%r14), %rdi
testq %rdi, %rdi
je LBB83_11
## %bb.15:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r13d
jmp LBB83_14
LBB83_1:
movq 24(%r14), %rax
movq 32(%r14), %rcx
xorl %r15d, %r15d
cmpq %rcx, %rax
jae LBB83_5
## %bb.2:
leaq 1(%rax), %rdx
movq %rdx, 24(%r14)
movzbl (%rax), %r15d
movq %rdx, %rax
jmp LBB83_5
LBB83_4:
movq 24(%r14), %rax
movq 32(%r14), %rcx
LBB83_5:
xorl %r12d, %r12d
cmpq %rcx, %rax
jae LBB83_8
## %bb.6:
leaq 1(%rax), %rcx
movq %rcx, 24(%r14)
movzbl (%rax), %r12d
LBB83_8:
movq 24(%r14), %rax
movq 32(%r14), %rcx
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB83_12
## %bb.9:
leaq 1(%rax), %rdx
movq %rdx, 24(%r14)
movzbl (%rax), %ebx
movq %rdx, %rax
jmp LBB83_12
LBB83_11:
movq 24(%r14), %rax
movq 32(%r14), %rcx
LBB83_12:
xorl %r13d, %r13d
cmpq %rcx, %rax
jae LBB83_14
## %bb.13:
leaq 1(%rax), %rcx
movq %rcx, 24(%r14)
movzbl (%rax), %r13d
LBB83_14:
shll $8, %r15d
addl %r12d, %r15d
shll $16, %r15d
shll $8, %ebx
addl %r15d, %ebx
addl %r13d, %ebx
movl %ebx, %eax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ## -- Begin function create_png_image_raw
LCPI84_0:
.space 16,127
LCPI84_1:
.space 16,255
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_create_png_image_raw: ## @create_png_image_raw
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $488, %rsp ## imm = 0x1E8
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %ebx
movq %rsi, %r15
movq %rdi, -232(%rbp) ## 8-byte Spill
movl 8(%rdi), %r13d
leal 1(%r13), %eax
cmpl %ecx, %r13d
je LBB84_2
## %bb.1:
cmpl %ecx, %eax
jne LBB84_439
LBB84_2:
movl %eax, -284(%rbp) ## 4-byte Spill
movl %r8d, -48(%rbp) ## 4-byte Spill
movl %ecx, -44(%rbp) ## 4-byte Spill
imull %ecx, %r8d
movq _stbi_png_partial@GOTPCREL(%rip), %rax
movl (%rax), %r12d
testl %r12d, %r12d
movl $1, %r14d
cmovel %r9d, %r14d
movl %r8d, -180(%rbp) ## 4-byte Spill
movl %r8d, %edi
imull %r14d, %edi
callq _malloc
movq %rax, %r11
movq -232(%rbp), %rax ## 8-byte Reload
movq %r11, 56(%rax)
testq %r11, %r11
je LBB84_432
## %bb.3:
testl %r12d, %r12d
movl -48(%rbp), %eax ## 4-byte Reload
jne LBB84_7
## %bb.4:
movq -232(%rbp), %rcx ## 8-byte Reload
cmpl %eax, (%rcx)
jne LBB84_7
## %bb.5:
movq -232(%rbp), %rax ## 8-byte Reload
cmpl %r14d, 4(%rax)
jne LBB84_7
## %bb.6:
movl %r13d, %eax
imull -48(%rbp), %eax ## 4-byte Folded Reload
incl %eax
imull %r14d, %eax
cmpl %ebx, %eax
jne LBB84_437
LBB84_7:
movl $1, %eax
testl %r14d, %r14d
je LBB84_435
## %bb.8:
movb (%r15), %al
leaq L_.str.71(%rip), %rcx
cmpb $4, %al
ja LBB84_433
## %bb.9:
movslq %r13d, %r12
movl -180(%rbp), %r10d ## 4-byte Reload
movl -44(%rbp), %r8d ## 4-byte Reload
movslq %r8d, %r9
movq %r9, %rbx
subq %r12, %rbx
leaq 3(%r9), %rdx
movq %rdx, %rdi
movq %rdx, -440(%rbp) ## 8-byte Spill
subq %r10, %rdx
movq %rdx, -432(%rbp) ## 8-byte Spill
movq %rbx, %rdx
subq %r10, %rdx
movq %rdx, -456(%rbp) ## 8-byte Spill
negq %r10
movq %r10, -376(%rbp) ## 8-byte Spill
decl -48(%rbp) ## 4-byte Folded Spill
movl %r14d, %edx
movq %rdx, -504(%rbp) ## 8-byte Spill
movl %r8d, %r10d
movl %r13d, %edi
andl $-32, %edi
movq %rdi, -88(%rbp) ## 8-byte Spill
addq $-32, %rdi
movq %rdi, -168(%rbp) ## 8-byte Spill
movq %rdi, %rcx
shrq $5, %rcx
incq %rcx
movl %r10d, %edi
andl $-32, %edi
movq %rdi, -96(%rbp) ## 8-byte Spill
addq $-32, %rdi
movq %rdi, -176(%rbp) ## 8-byte Spill
movq %rdi, %rsi
shrq $5, %rsi
incq %rsi
movabsq $1152921504606846972, %rdx ## imm = 0xFFFFFFFFFFFFFFC
orq $2, %rdx
movq %rcx, %rdi
andq %rdx, %rdi
movq %rdi, -192(%rbp) ## 8-byte Spill
movl %r13d, %edi
andl $-8, %edi
movq %rdi, -112(%rbp) ## 8-byte Spill
movq %r13, %rdi
negq %rdi
movq %rdi, -248(%rbp) ## 8-byte Spill
movl %r13d, %edi
andl $-16, %edi
movq %rdi, -360(%rbp) ## 8-byte Spill
movl %r13d, %edi
andl $3, %edi
movq %rdi, -280(%rbp) ## 8-byte Spill
movl %ecx, %r14d
andl $3, %r14d
movq %rcx, %rdi
movq %rcx, -208(%rbp) ## 8-byte Spill
andq $-4, %rcx
movq %rcx, -464(%rbp) ## 8-byte Spill
andq %rsi, %rdx
movq %rdx, -200(%rbp) ## 8-byte Spill
movl %r10d, %ecx
andl $-8, %ecx
movq %rcx, -152(%rbp) ## 8-byte Spill
movq %r10, %rcx
negq %rcx
movq %rcx, -256(%rbp) ## 8-byte Spill
movl %r10d, %ecx
andl $-16, %ecx
movq %rcx, -368(%rbp) ## 8-byte Spill
movq %r10, -104(%rbp) ## 8-byte Spill
## kill: def $r10d killed $r10d killed $r10 def $r10
andl $3, %r10d
movq %r10, -384(%rbp) ## 8-byte Spill
movl %esi, %edx
andl $3, %edx
movq %rsi, %rcx
movq %rsi, -216(%rbp) ## 8-byte Spill
andq $-4, %rsi
movq %rsi, -472(%rbp) ## 8-byte Spill
movq %r14, -480(%rbp) ## 8-byte Spill
shlq $5, %r14
movq %r14, -512(%rbp) ## 8-byte Spill
leaq 16(%r9), %rcx
movq %rcx, -352(%rbp) ## 8-byte Spill
subq %r12, %rcx
movq %rcx, -448(%rbp) ## 8-byte Spill
movq %rdx, -488(%rbp) ## 8-byte Spill
shlq $5, %rdx
movq %rdx, -520(%rbp) ## 8-byte Spill
pcmpeqd %xmm15, %xmm15
movdqa LCPI84_0(%rip), %xmm7 ## xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
leaq 4(%r12), %rcx
movq %rcx, -424(%rbp) ## 8-byte Spill
movq %r12, -72(%rbp) ## 8-byte Spill
leaq 17(%r12), %rcx
movq %rcx, -344(%rbp) ## 8-byte Spill
movq %rbx, -160(%rbp) ## 8-byte Spill
leaq 48(%rbx), %rcx
movq %rcx, -264(%rbp) ## 8-byte Spill
xorl %r14d, %r14d
leaq LJTI84_0(%rip), %rdx
movq %r13, -56(%rbp) ## 8-byte Spill
movq %r9, -64(%rbp) ## 8-byte Spill
.p2align 4, 0x90
LBB84_10: ## =>This Loop Header: Depth=1
## Child Loop BB84_17 Depth 2
## Child Loop BB84_390 Depth 2
## Child Loop BB84_400 Depth 3
## Child Loop BB84_406 Depth 3
## Child Loop BB84_414 Depth 3
## Child Loop BB84_361 Depth 2
## Child Loop BB84_371 Depth 3
## Child Loop BB84_380 Depth 3
## Child Loop BB84_386 Depth 3
## Child Loop BB84_334 Depth 2
## Child Loop BB84_348 Depth 3
## Child Loop BB84_345 Depth 3
## Child Loop BB84_356 Depth 3
## Child Loop BB84_309 Depth 2
## Child Loop BB84_318 Depth 3
## Child Loop BB84_322 Depth 3
## Child Loop BB84_330 Depth 3
## Child Loop BB84_278 Depth 2
## Child Loop BB84_288 Depth 3
## Child Loop BB84_294 Depth 3
## Child Loop BB84_300 Depth 3
## Child Loop BB84_304 Depth 3
## Child Loop BB84_249 Depth 2
## Child Loop BB84_259 Depth 3
## Child Loop BB84_265 Depth 3
## Child Loop BB84_273 Depth 3
## Child Loop BB84_56 Depth 2
## Child Loop BB84_65 Depth 3
## Child Loop BB84_68 Depth 3
## Child Loop BB84_72 Depth 3
## Child Loop BB84_76 Depth 3
## Child Loop BB84_78 Depth 3
## Child Loop BB84_220 Depth 2
## Child Loop BB84_230 Depth 3
## Child Loop BB84_236 Depth 3
## Child Loop BB84_243 Depth 3
## Child Loop BB84_192 Depth 2
## Child Loop BB84_202 Depth 3
## Child Loop BB84_208 Depth 3
## Child Loop BB84_215 Depth 3
## Child Loop BB84_165 Depth 2
## Child Loop BB84_176 Depth 3
## Child Loop BB84_180 Depth 3
## Child Loop BB84_187 Depth 3
## Child Loop BB84_140 Depth 2
## Child Loop BB84_150 Depth 3
## Child Loop BB84_153 Depth 3
## Child Loop BB84_161 Depth 3
## Child Loop BB84_111 Depth 2
## Child Loop BB84_121 Depth 3
## Child Loop BB84_126 Depth 3
## Child Loop BB84_132 Depth 3
## Child Loop BB84_135 Depth 3
## Child Loop BB84_83 Depth 2
## Child Loop BB84_93 Depth 3
## Child Loop BB84_99 Depth 3
## Child Loop BB84_106 Depth 3
## Child Loop BB84_27 Depth 2
## Child Loop BB84_36 Depth 3
## Child Loop BB84_39 Depth 3
## Child Loop BB84_43 Depth 3
## Child Loop BB84_47 Depth 3
## Child Loop BB84_49 Depth 3
testq %r14, %r14
jne LBB84_12
## %bb.11: ## in Loop: Header=BB84_10 Depth=1
movzbl %al, %eax
leaq _first_row_filter(%rip), %rcx
movb (%rax,%rcx), %al
LBB84_12: ## in Loop: Header=BB84_10 Depth=1
leaq 1(%r15), %rbx
movzbl %al, %esi
testl %r13d, %r13d
jle LBB84_21
## %bb.13: ## in Loop: Header=BB84_10 Depth=1
movq -376(%rbp), %rcx ## 8-byte Reload
leaq (%r11,%rcx), %r10
xorl %edi, %edi
jmp LBB84_17
.p2align 4, 0x90
LBB84_14: ## in Loop: Header=BB84_17 Depth=2
movzbl (%rbx,%rdi), %ecx
LBB84_15: ## in Loop: Header=BB84_17 Depth=2
movb %cl, (%r11,%rdi)
LBB84_16: ## in Loop: Header=BB84_17 Depth=2
incq %rdi
cmpq %rdi, %r13
je LBB84_21
LBB84_17: ## Parent Loop BB84_10 Depth=1
## => This Inner Loop Header: Depth=2
cmpb $6, %al
ja LBB84_16
## %bb.18: ## in Loop: Header=BB84_17 Depth=2
movslq (%rdx,%rsi,4), %rcx
addq %rdx, %rcx
jmpq *%rcx
LBB84_19: ## in Loop: Header=BB84_17 Depth=2
movzbl (%r10,%rdi), %ecx
addb (%rbx,%rdi), %cl
jmp LBB84_15
LBB84_20: ## in Loop: Header=BB84_17 Depth=2
movzbl (%r10,%rdi), %ecx
shrb %cl
addb (%rbx,%rdi), %cl
jmp LBB84_15
.p2align 4, 0x90
LBB84_21: ## in Loop: Header=BB84_10 Depth=1
movq -376(%rbp), %rcx ## 8-byte Reload
leaq (%r11,%rcx), %rdi
cmpl %r8d, %r13d
movq %r14, -528(%rbp) ## 8-byte Spill
jne LBB84_50
## %bb.22: ## in Loop: Header=BB84_10 Depth=1
movq -72(%rbp), %rcx ## 8-byte Reload
leaq (%rbx,%rcx), %r8
cmpb $6, %al
ja LBB84_79
## %bb.23: ## in Loop: Header=BB84_10 Depth=1
movq %rcx, %rdx
leaq (%r11,%r9), %r10
leaq (%rdi,%r9), %r12
leaq LJTI84_2(%rip), %rcx
movslq (%rcx,%rsi,4), %rax
addq %rcx, %rax
jmpq *%rax
LBB84_24: ## in Loop: Header=BB84_10 Depth=1
movl -48(%rbp), %r14d ## 4-byte Reload
testl %r14d, %r14d
je LBB84_79
## %bb.25: ## in Loop: Header=BB84_10 Depth=1
movq %rbx, %r13
leaq (%r11,%r9), %rax
movq %rax, -144(%rbp) ## 8-byte Spill
movq %rdx, %r9
addq %rdx, %r13
addq -352(%rbp), %r11 ## 8-byte Folded Reload
addq -344(%rbp), %r15 ## 8-byte Folded Reload
movq %r15, %r12
xorl %edi, %edi
movq %r8, %r15
jmp LBB84_27
.p2align 4, 0x90
LBB84_26: ## in Loop: Header=BB84_27 Depth=2
addq %r9, %r15
addq %r9, %r10
incq %rdi
addq %r9, %r11
addq %r9, %r12
decl %r14d
je LBB84_429
LBB84_27: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_36 Depth 3
## Child Loop BB84_39 Depth 3
## Child Loop BB84_43 Depth 3
## Child Loop BB84_47 Depth 3
## Child Loop BB84_49 Depth 3
cmpl $0, -56(%rbp) ## 4-byte Folded Reload
movq -104(%rbp), %rsi ## 8-byte Reload
movq -384(%rbp), %r8 ## 8-byte Reload
movq -520(%rbp), %rbx ## 8-byte Reload
jle LBB84_26
## %bb.28: ## in Loop: Header=BB84_27 Depth=2
cmpl $8, -44(%rbp) ## 4-byte Folded Reload
jb LBB84_32
## %bb.29: ## in Loop: Header=BB84_27 Depth=2
movq %rdi, %rcx
imulq %r9, %rcx
leaq (%rcx,%r13), %rax
addq -144(%rbp), %rcx ## 8-byte Folded Reload
subq %rax, %rcx
movl $0, %eax
cmpq $32, %rcx
jb LBB84_45
## %bb.30: ## in Loop: Header=BB84_27 Depth=2
cmpl $32, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_33
## %bb.31: ## in Loop: Header=BB84_27 Depth=2
xorl %ecx, %ecx
jmp LBB84_42
LBB84_32: ## in Loop: Header=BB84_27 Depth=2
xorl %eax, %eax
jmp LBB84_45
LBB84_33: ## in Loop: Header=BB84_27 Depth=2
cmpq $96, -176(%rbp) ## 8-byte Folded Reload
jae LBB84_35
## %bb.34: ## in Loop: Header=BB84_27 Depth=2
xorl %eax, %eax
jmp LBB84_37
LBB84_35: ## in Loop: Header=BB84_27 Depth=2
movq -472(%rbp), %rcx ## 8-byte Reload
xorl %eax, %eax
.p2align 4, 0x90
LBB84_36: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_27 Depth=2
## => This Inner Loop Header: Depth=3
movups (%r15,%rax), %xmm0
movups 16(%r15,%rax), %xmm1
movups %xmm0, (%r10,%rax)
movups %xmm1, 16(%r10,%rax)
movups 32(%r15,%rax), %xmm0
movups 48(%r15,%rax), %xmm1
movups %xmm0, 32(%r10,%rax)
movups %xmm1, 48(%r10,%rax)
movups 64(%r15,%rax), %xmm0
movups 80(%r15,%rax), %xmm1
movups %xmm0, 64(%r10,%rax)
movups %xmm1, 80(%r10,%rax)
movdqu 96(%r15,%rax), %xmm0
movdqu 112(%r15,%rax), %xmm1
movdqu %xmm0, 96(%r10,%rax)
movdqu %xmm1, 112(%r10,%rax)
subq $-128, %rax
addq $-4, %rcx
jne LBB84_36
LBB84_37: ## in Loop: Header=BB84_27 Depth=2
cmpq $0, -488(%rbp) ## 8-byte Folded Reload
je LBB84_40
## %bb.38: ## in Loop: Header=BB84_27 Depth=2
leaq (%r11,%rax), %rcx
addq %r12, %rax
xorl %edx, %edx
.p2align 4, 0x90
LBB84_39: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_27 Depth=2
## => This Inner Loop Header: Depth=3
movdqu -16(%rax,%rdx), %xmm0
movdqu (%rax,%rdx), %xmm1
movdqu %xmm0, -16(%rcx,%rdx)
movdqu %xmm1, (%rcx,%rdx)
addq $32, %rdx
cmpq %rdx, %rbx
jne LBB84_39
LBB84_40: ## in Loop: Header=BB84_27 Depth=2
cmpq %rsi, -96(%rbp) ## 8-byte Folded Reload
je LBB84_26
## %bb.41: ## in Loop: Header=BB84_27 Depth=2
movq -96(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
testb $24, %sil
je LBB84_45
LBB84_42: ## in Loop: Header=BB84_27 Depth=2
movq -152(%rbp), %rdx ## 8-byte Reload
.p2align 4, 0x90
LBB84_43: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_27 Depth=2
## => This Inner Loop Header: Depth=3
movq (%r15,%rcx), %rax
movq %rax, (%r10,%rcx)
addq $8, %rcx
cmpq %rcx, %rdx
jne LBB84_43
## %bb.44: ## in Loop: Header=BB84_27 Depth=2
movq %rdx, %rax
cmpq %rsi, %rdx
je LBB84_26
LBB84_45: ## in Loop: Header=BB84_27 Depth=2
movq %rax, %rcx
notq %rcx
addq %rsi, %rcx
testb $3, %sil
je LBB84_48
## %bb.46: ## in Loop: Header=BB84_27 Depth=2
movq %r8, %rdx
.p2align 4, 0x90
LBB84_47: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_27 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r15,%rax), %ebx
movb %bl, (%r10,%rax)
incq %rax
decq %rdx
jne LBB84_47
LBB84_48: ## in Loop: Header=BB84_27 Depth=2
cmpq $3, %rcx
movq -72(%rbp), %r9 ## 8-byte Reload
jb LBB84_26
.p2align 4, 0x90
LBB84_49: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_27 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r15,%rax), %ecx
movb %cl, (%r10,%rax)
movzbl 1(%r15,%rax), %ecx
movb %cl, 1(%r10,%rax)
movzbl 2(%r15,%rax), %ecx
movb %cl, 2(%r10,%rax)
movzbl 3(%r15,%rax), %ecx
movb %cl, 3(%r10,%rax)
addq $4, %rax
cmpq %rax, %rsi
jne LBB84_49
jmp LBB84_26
.p2align 4, 0x90
LBB84_50: ## in Loop: Header=BB84_10 Depth=1
movq -72(%rbp), %rdx ## 8-byte Reload
movb $-1, (%r11,%rdx)
cmpl %r8d, -284(%rbp) ## 4-byte Folded Reload
jne LBB84_438
## %bb.51: ## in Loop: Header=BB84_10 Depth=1
leaq (%rbx,%rdx), %r12
cmpb $6, %al
ja LBB84_428
## %bb.52: ## in Loop: Header=BB84_10 Depth=1
leaq (%rdi,%r9), %r10
leaq (%r11,%r9), %r8
leaq LJTI84_1(%rip), %rcx
movslq (%rcx,%rsi,4), %rax
addq %rcx, %rax
jmpq *%rax
LBB84_53: ## in Loop: Header=BB84_10 Depth=1
movl -48(%rbp), %r10d ## 4-byte Reload
testl %r10d, %r10d
je LBB84_428
## %bb.54: ## in Loop: Header=BB84_10 Depth=1
leaq (%r11,%r9), %rax
movq %rax, -144(%rbp) ## 8-byte Spill
addq %rdx, %rbx
movq %rbx, %r9
addq -352(%rbp), %r11 ## 8-byte Folded Reload
addq -344(%rbp), %r15 ## 8-byte Folded Reload
movq %r15, %r14
xorl %r13d, %r13d
movq %r12, %r15
jmp LBB84_56
.p2align 4, 0x90
LBB84_55: ## in Loop: Header=BB84_56 Depth=2
movb $-1, (%r8,%rdx)
addq %rdx, %r15
movq -64(%rbp), %rax ## 8-byte Reload
addq %rax, %r8
incq %r13
addq %rax, %r11
addq %rdx, %r14
decl %r10d
je LBB84_429
LBB84_56: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_65 Depth 3
## Child Loop BB84_68 Depth 3
## Child Loop BB84_72 Depth 3
## Child Loop BB84_76 Depth 3
## Child Loop BB84_78 Depth 3
movq -56(%rbp), %rdi ## 8-byte Reload
testl %edi, %edi
movq -280(%rbp), %r12 ## 8-byte Reload
movq -512(%rbp), %rsi ## 8-byte Reload
jle LBB84_55
## %bb.57: ## in Loop: Header=BB84_56 Depth=2
cmpl $8, %edi
jb LBB84_61
## %bb.58: ## in Loop: Header=BB84_56 Depth=2
movq %r13, %rax
imulq %rdx, %rax
addq %r9, %rax
movq %r13, %rcx
imulq -64(%rbp), %rcx ## 8-byte Folded Reload
addq -144(%rbp), %rcx ## 8-byte Folded Reload
subq %rax, %rcx
movl $0, %eax
cmpq $32, %rcx
jb LBB84_74
## %bb.59: ## in Loop: Header=BB84_56 Depth=2
cmpl $32, %edi
jae LBB84_62
## %bb.60: ## in Loop: Header=BB84_56 Depth=2
xorl %ecx, %ecx
jmp LBB84_71
LBB84_61: ## in Loop: Header=BB84_56 Depth=2
xorl %eax, %eax
jmp LBB84_74
LBB84_62: ## in Loop: Header=BB84_56 Depth=2
cmpq $96, -168(%rbp) ## 8-byte Folded Reload
jae LBB84_64
## %bb.63: ## in Loop: Header=BB84_56 Depth=2
xorl %eax, %eax
movq -88(%rbp), %rdx ## 8-byte Reload
jmp LBB84_66
LBB84_64: ## in Loop: Header=BB84_56 Depth=2
movq -464(%rbp), %rcx ## 8-byte Reload
xorl %eax, %eax
movq -88(%rbp), %rdx ## 8-byte Reload
.p2align 4, 0x90
LBB84_65: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_56 Depth=2
## => This Inner Loop Header: Depth=3
movups (%r15,%rax), %xmm0
movups 16(%r15,%rax), %xmm1
movups %xmm0, (%r8,%rax)
movups %xmm1, 16(%r8,%rax)
movups 32(%r15,%rax), %xmm0
movups 48(%r15,%rax), %xmm1
movups %xmm0, 32(%r8,%rax)
movups %xmm1, 48(%r8,%rax)
movups 64(%r15,%rax), %xmm0
movups 80(%r15,%rax), %xmm1
movups %xmm0, 64(%r8,%rax)
movups %xmm1, 80(%r8,%rax)
movdqu 96(%r15,%rax), %xmm0
movdqu 112(%r15,%rax), %xmm1
movdqu %xmm0, 96(%r8,%rax)
movdqu %xmm1, 112(%r8,%rax)
subq $-128, %rax
addq $-4, %rcx
jne LBB84_65
LBB84_66: ## in Loop: Header=BB84_56 Depth=2
cmpq $0, -480(%rbp) ## 8-byte Folded Reload
je LBB84_69
## %bb.67: ## in Loop: Header=BB84_56 Depth=2
leaq (%r11,%rax), %rcx
addq %r14, %rax
xorl %ebx, %ebx
.p2align 4, 0x90
LBB84_68: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_56 Depth=2
## => This Inner Loop Header: Depth=3
movdqu -16(%rax,%rbx), %xmm0
movdqu (%rax,%rbx), %xmm1
movdqu %xmm0, -16(%rcx,%rbx)
movdqu %xmm1, (%rcx,%rbx)
addq $32, %rbx
cmpq %rbx, %rsi
jne LBB84_68
LBB84_69: ## in Loop: Header=BB84_56 Depth=2
cmpq %rdi, %rdx
movq -72(%rbp), %rdx ## 8-byte Reload
je LBB84_55
## %bb.70: ## in Loop: Header=BB84_56 Depth=2
movq -88(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
testb $24, %dil
je LBB84_74
LBB84_71: ## in Loop: Header=BB84_56 Depth=2
movq -112(%rbp), %rbx ## 8-byte Reload
.p2align 4, 0x90
LBB84_72: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_56 Depth=2
## => This Inner Loop Header: Depth=3
movq (%r15,%rcx), %rax
movq %rax, (%r8,%rcx)
addq $8, %rcx
cmpq %rcx, %rbx
jne LBB84_72
## %bb.73: ## in Loop: Header=BB84_56 Depth=2
movq %rbx, %rax
cmpq %rdi, %rbx
je LBB84_55
LBB84_74: ## in Loop: Header=BB84_56 Depth=2
movq %rax, %rcx
notq %rcx
addq %rdi, %rcx
testb $3, %dil
je LBB84_77
## %bb.75: ## in Loop: Header=BB84_56 Depth=2
movq %r12, %rbx
.p2align 4, 0x90
LBB84_76: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_56 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r15,%rax), %edx
movb %dl, (%r8,%rax)
incq %rax
decq %rbx
jne LBB84_76
LBB84_77: ## in Loop: Header=BB84_56 Depth=2
cmpq $3, %rcx
movq -72(%rbp), %rdx ## 8-byte Reload
jb LBB84_55
.p2align 4, 0x90
LBB84_78: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_56 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r15,%rax), %ecx
movb %cl, (%r8,%rax)
movzbl 1(%r15,%rax), %ecx
movb %cl, 1(%r8,%rax)
movzbl 2(%r15,%rax), %ecx
movb %cl, 2(%r8,%rax)
movzbl 3(%r15,%rax), %ecx
movb %cl, 3(%r8,%rax)
addq $4, %rax
cmpq %rax, %rdi
jne LBB84_78
jmp LBB84_55
LBB84_80: ## in Loop: Header=BB84_10 Depth=1
movl -48(%rbp), %r14d ## 4-byte Reload
testl %r14d, %r14d
je LBB84_79
## %bb.81: ## in Loop: Header=BB84_10 Depth=1
movq %rbx, %r12
addq %r11, %r9
addq %rdx, %r12
movq -264(%rbp), %rax ## 8-byte Reload
leaq (%r11,%rax), %rsi
addq -160(%rbp), %r11 ## 8-byte Folded Reload
xorl %edi, %edi
movq %r8, %r15
movq %rdx, %r8
jmp LBB84_83
.p2align 4, 0x90
LBB84_82: ## in Loop: Header=BB84_83 Depth=2
addq %r8, %r15
addq %r8, %r10
incq %rdi
addq %r8, %rsi
addq %r8, %r11
decl %r14d
je LBB84_429
LBB84_83: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_93 Depth 3
## Child Loop BB84_99 Depth 3
## Child Loop BB84_106 Depth 3
movq -56(%rbp), %rdx ## 8-byte Reload
testl %edx, %edx
movq -104(%rbp), %rbx ## 8-byte Reload
jle LBB84_82
## %bb.84: ## in Loop: Header=BB84_83 Depth=2
cmpl $8, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_86
LBB84_102: ## in Loop: Header=BB84_83 Depth=2
xorl %eax, %eax
LBB84_103: ## in Loop: Header=BB84_83 Depth=2
movq %rax, %rcx
testb $1, %bl
je LBB84_105
## %bb.104: ## in Loop: Header=BB84_83 Depth=2
movq %rax, %rcx
subq %r8, %rcx
movb (%r10,%rcx), %cl
addb (%r15,%rax), %cl
movb %cl, (%r10,%rax)
movq %rax, %rcx
orq $1, %rcx
LBB84_105: ## in Loop: Header=BB84_83 Depth=2
notq %rax
cmpq -256(%rbp), %rax ## 8-byte Folded Reload
je LBB84_82
.p2align 4, 0x90
LBB84_106: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_83 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r11,%rcx), %eax
addb (%r15,%rcx), %al
movb %al, (%r10,%rcx)
movzbl 1(%r11,%rcx), %eax
addb 1(%r15,%rcx), %al
movb %al, 1(%r10,%rcx)
addq $2, %rcx
cmpq %rcx, %rbx
jne LBB84_106
jmp LBB84_82
.p2align 4, 0x90
LBB84_86: ## in Loop: Header=BB84_83 Depth=2
movq %rdi, %rax
imulq %r8, %rax
leaq (%r9,%rax), %rcx
addq %r12, %rax
subq %rax, %rcx
cmpq $32, %rcx
jb LBB84_102
## %bb.87: ## in Loop: Header=BB84_83 Depth=2
jb LBB84_102
## %bb.88: ## in Loop: Header=BB84_83 Depth=2
movl $0, %eax
cmpl $32, %edx
jb LBB84_103
## %bb.89: ## in Loop: Header=BB84_83 Depth=2
cmpl $32, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_91
## %bb.90: ## in Loop: Header=BB84_83 Depth=2
xorl %ecx, %ecx
jmp LBB84_98
LBB84_91: ## in Loop: Header=BB84_83 Depth=2
cmpq $0, -176(%rbp) ## 8-byte Folded Reload
je LBB84_107
## %bb.92: ## in Loop: Header=BB84_83 Depth=2
movq -200(%rbp), %rcx ## 8-byte Reload
xorl %eax, %eax
.p2align 4, 0x90
LBB84_93: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_83 Depth=2
## => This Inner Loop Header: Depth=3
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movdqu -48(%rsi,%rax), %xmm2
paddb %xmm0, %xmm2
movdqu -32(%rsi,%rax), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, (%r10,%rax)
movdqu %xmm0, 16(%r10,%rax)
movdqu 32(%r15,%rax), %xmm0
movdqu 48(%r15,%rax), %xmm1
movdqu -16(%rsi,%rax), %xmm2
paddb %xmm0, %xmm2
movdqu (%rsi,%rax), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, 32(%r10,%rax)
movdqu %xmm0, 48(%r10,%rax)
addq $64, %rax
addq $-2, %rcx
jne LBB84_93
## %bb.94: ## in Loop: Header=BB84_83 Depth=2
testb $1, -216(%rbp) ## 1-byte Folded Reload
je LBB84_96
LBB84_95: ## in Loop: Header=BB84_83 Depth=2
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movq %rax, %rcx
subq %r8, %rcx
movdqu (%r10,%rcx), %xmm2
paddb %xmm0, %xmm2
movdqu 16(%r10,%rcx), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, (%r10,%rax)
movdqu %xmm0, 16(%r10,%rax)
LBB84_96: ## in Loop: Header=BB84_83 Depth=2
cmpq %rbx, -96(%rbp) ## 8-byte Folded Reload
je LBB84_82
## %bb.97: ## in Loop: Header=BB84_83 Depth=2
movq -96(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
testb $24, %bl
je LBB84_103
LBB84_98: ## in Loop: Header=BB84_83 Depth=2
movq -152(%rbp), %rdx ## 8-byte Reload
.p2align 4, 0x90
LBB84_99: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_83 Depth=2
## => This Inner Loop Header: Depth=3
movq (%r15,%rcx), %xmm0 ## xmm0 = mem[0],zero
movq (%r11,%rcx), %xmm1 ## xmm1 = mem[0],zero
paddb %xmm0, %xmm1
movq %xmm1, (%r10,%rcx)
addq $8, %rcx
cmpq %rcx, %rdx
jne LBB84_99
## %bb.100: ## in Loop: Header=BB84_83 Depth=2
movq %rdx, %rax
cmpq %rbx, %rdx
je LBB84_82
jmp LBB84_103
LBB84_107: ## in Loop: Header=BB84_83 Depth=2
xorl %eax, %eax
testb $1, -216(%rbp) ## 1-byte Folded Reload
jne LBB84_95
jmp LBB84_96
LBB84_108: ## in Loop: Header=BB84_10 Depth=1
movl -48(%rbp), %r9d ## 4-byte Reload
testl %r9d, %r9d
je LBB84_79
## %bb.109: ## in Loop: Header=BB84_10 Depth=1
movq -64(%rbp), %rax ## 8-byte Reload
addq %rax, %r11
addq %rdx, %rbx
movq %rbx, -80(%rbp) ## 8-byte Spill
addq %rax, %rdi
movq %rdi, -120(%rbp) ## 8-byte Spill
xorl %r14d, %r14d
movq %r8, %r15
movq %rdx, %rbx
jmp LBB84_111
.p2align 4, 0x90
LBB84_110: ## in Loop: Header=BB84_111 Depth=2
addq %rbx, %r15
addq %rbx, %r10
addq %rbx, %r12
incq %r14
decl %r9d
je LBB84_429
LBB84_111: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_121 Depth 3
## Child Loop BB84_126 Depth 3
## Child Loop BB84_132 Depth 3
## Child Loop BB84_135 Depth 3
cmpl $0, -56(%rbp) ## 4-byte Folded Reload
movq -104(%rbp), %r8 ## 8-byte Reload
movq -384(%rbp), %r13 ## 8-byte Reload
jle LBB84_110
## %bb.112: ## in Loop: Header=BB84_111 Depth=2
cmpl $8, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_114
LBB84_129: ## in Loop: Header=BB84_111 Depth=2
xorl %edi, %edi
LBB84_130: ## in Loop: Header=BB84_111 Depth=2
movq %rbx, %rax
movq %r8, %rbx
movq %rdi, %r8
notq %r8
addq %rbx, %r8
testq %r13, %r13
je LBB84_134
## %bb.131: ## in Loop: Header=BB84_111 Depth=2
leaq (%r10,%rdi), %rax
leaq (%r12,%rdi), %rdx
leaq (%r15,%rdi), %rsi
xorl %ebx, %ebx
.p2align 4, 0x90
LBB84_132: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_111 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%rdx,%rbx), %ecx
addb (%rsi,%rbx), %cl
movb %cl, (%rax,%rbx)
incq %rbx
cmpq %rbx, %r13
jne LBB84_132
## %bb.133: ## in Loop: Header=BB84_111 Depth=2
addq %rbx, %rdi
movq -72(%rbp), %rax ## 8-byte Reload
LBB84_134: ## in Loop: Header=BB84_111 Depth=2
cmpq $3, %r8
movq -104(%rbp), %rcx ## 8-byte Reload
movq %rax, %rbx
jb LBB84_110
.p2align 4, 0x90
LBB84_135: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_111 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r12,%rdi), %eax
addb (%r15,%rdi), %al
movb %al, (%r10,%rdi)
movzbl 1(%r12,%rdi), %eax
addb 1(%r15,%rdi), %al
movb %al, 1(%r10,%rdi)
movzbl 2(%r12,%rdi), %eax
addb 2(%r15,%rdi), %al
movb %al, 2(%r10,%rdi)
movzbl 3(%r12,%rdi), %eax
addb 3(%r15,%rdi), %al
movb %al, 3(%r10,%rdi)
addq $4, %rdi
cmpq %rdi, %rcx
jne LBB84_135
jmp LBB84_110
.p2align 4, 0x90
LBB84_114: ## in Loop: Header=BB84_111 Depth=2
movq %r14, %rcx
imulq %rbx, %rcx
leaq (%r11,%rcx), %rax
movq -80(%rbp), %rdx ## 8-byte Reload
addq %rcx, %rdx
addq -120(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rsi
subq %rdx, %rsi
subq %rcx, %rax
cmpq $32, %rsi
jb LBB84_129
## %bb.115: ## in Loop: Header=BB84_111 Depth=2
movq -152(%rbp), %rdx ## 8-byte Reload
jb LBB84_129
## %bb.116: ## in Loop: Header=BB84_111 Depth=2
movl $0, %edi
cmpq $32, %rax
jb LBB84_130
## %bb.117: ## in Loop: Header=BB84_111 Depth=2
cmpl $32, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_119
## %bb.118: ## in Loop: Header=BB84_111 Depth=2
xorl %eax, %eax
jmp LBB84_126
LBB84_119: ## in Loop: Header=BB84_111 Depth=2
cmpq $0, -176(%rbp) ## 8-byte Folded Reload
je LBB84_136
## %bb.120: ## in Loop: Header=BB84_111 Depth=2
movq -200(%rbp), %rcx ## 8-byte Reload
xorl %edi, %edi
movq -96(%rbp), %rax ## 8-byte Reload
.p2align 4, 0x90
LBB84_121: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_111 Depth=2
## => This Inner Loop Header: Depth=3
movdqu (%r15,%rdi), %xmm0
movdqu 16(%r15,%rdi), %xmm1
movdqu (%r12,%rdi), %xmm2
paddb %xmm0, %xmm2
movdqu 16(%r12,%rdi), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, (%r10,%rdi)
movdqu %xmm0, 16(%r10,%rdi)
movdqu 32(%r15,%rdi), %xmm0
movdqu 48(%r15,%rdi), %xmm1
movdqu 32(%r12,%rdi), %xmm2
paddb %xmm0, %xmm2
movdqu 48(%r12,%rdi), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, 32(%r10,%rdi)
movdqu %xmm0, 48(%r10,%rdi)
addq $64, %rdi
addq $-2, %rcx
jne LBB84_121
## %bb.122: ## in Loop: Header=BB84_111 Depth=2
testb $1, -216(%rbp) ## 1-byte Folded Reload
je LBB84_124
LBB84_123: ## in Loop: Header=BB84_111 Depth=2
movdqu (%r15,%rdi), %xmm0
movdqu 16(%r15,%rdi), %xmm1
movdqu (%r12,%rdi), %xmm2
paddb %xmm0, %xmm2
movdqu 16(%r12,%rdi), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, (%r10,%rdi)
movdqu %xmm0, 16(%r10,%rdi)
LBB84_124: ## in Loop: Header=BB84_111 Depth=2
cmpq %r8, %rax
je LBB84_110
## %bb.125: ## in Loop: Header=BB84_111 Depth=2
movq -96(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
testb $24, %r8b
je LBB84_130
.p2align 4, 0x90
LBB84_126: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_111 Depth=2
## => This Inner Loop Header: Depth=3
movq (%r15,%rax), %xmm0 ## xmm0 = mem[0],zero
movq (%r12,%rax), %xmm1 ## xmm1 = mem[0],zero
paddb %xmm0, %xmm1
movq %xmm1, (%r10,%rax)
addq $8, %rax
cmpq %rax, %rdx
jne LBB84_126
## %bb.127: ## in Loop: Header=BB84_111 Depth=2
movq %rdx, %rdi
cmpq %r8, %rdx
je LBB84_110
jmp LBB84_130
LBB84_136: ## in Loop: Header=BB84_111 Depth=2
xorl %edi, %edi
movq -96(%rbp), %rax ## 8-byte Reload
testb $1, -216(%rbp) ## 1-byte Folded Reload
jne LBB84_123
jmp LBB84_124
LBB84_137: ## in Loop: Header=BB84_10 Depth=1
movl -48(%rbp), %r9d ## 4-byte Reload
testl %r9d, %r9d
je LBB84_79
## %bb.138: ## in Loop: Header=BB84_10 Depth=1
movq -64(%rbp), %rax ## 8-byte Reload
leaq (%r11,%rax), %rcx
movq %rcx, -144(%rbp) ## 8-byte Spill
addq %rdx, %rbx
movq %rbx, -80(%rbp) ## 8-byte Spill
addq %rax, %rdi
movq %rdi, -120(%rbp) ## 8-byte Spill
movq -448(%rbp), %rax ## 8-byte Reload
leaq (%r11,%rax), %r13
addq -160(%rbp), %r11 ## 8-byte Folded Reload
xorl %r14d, %r14d
movq %r8, %r15
movq %rdx, %r8
jmp LBB84_140
.p2align 4, 0x90
LBB84_139: ## in Loop: Header=BB84_140 Depth=2
addq %r8, %r15
addq %r8, %r10
addq %r8, %r12
incq %r14
addq %r8, %r13
addq %r8, %r11
decl %r9d
je LBB84_429
LBB84_140: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_150 Depth 3
## Child Loop BB84_153 Depth 3
## Child Loop BB84_161 Depth 3
cmpl $0, -56(%rbp) ## 4-byte Folded Reload
movq -104(%rbp), %rbx ## 8-byte Reload
jle LBB84_139
## %bb.141: ## in Loop: Header=BB84_140 Depth=2
cmpl $8, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_143
LBB84_157: ## in Loop: Header=BB84_140 Depth=2
xorl %edi, %edi
LBB84_158: ## in Loop: Header=BB84_140 Depth=2
movq %rdi, %rsi
testb $1, %bl
je LBB84_160
## %bb.159: ## in Loop: Header=BB84_140 Depth=2
movzbl (%r12,%rdi), %eax
movq %rdi, %rcx
subq %r8, %rcx
movzbl (%r10,%rcx), %ecx
addl %eax, %ecx
shrl %ecx
addb (%r15,%rdi), %cl
movb %cl, (%r10,%rdi)
movq %rdi, %rsi
orq $1, %rsi
LBB84_160: ## in Loop: Header=BB84_140 Depth=2
notq %rdi
cmpq -256(%rbp), %rdi ## 8-byte Folded Reload
je LBB84_139
.p2align 4, 0x90
LBB84_161: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_140 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r12,%rsi), %eax
movzbl (%r11,%rsi), %ecx
addl %eax, %ecx
shrl %ecx
addb (%r15,%rsi), %cl
movb %cl, (%r10,%rsi)
movzbl 1(%r12,%rsi), %eax
movzbl 1(%r11,%rsi), %ecx
addl %eax, %ecx
shrl %ecx
addb 1(%r15,%rsi), %cl
movb %cl, 1(%r10,%rsi)
addq $2, %rsi
cmpq %rsi, %rbx
jne LBB84_161
jmp LBB84_139
.p2align 4, 0x90
LBB84_143: ## in Loop: Header=BB84_140 Depth=2
movq %r14, %rcx
imulq %r8, %rcx
movq -144(%rbp), %rax ## 8-byte Reload
addq %rcx, %rax
movq -80(%rbp), %rdx ## 8-byte Reload
addq %rcx, %rdx
addq -120(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rsi
subq %rdx, %rsi
subq %rcx, %rax
cmpq $32, %rsi
jb LBB84_157
## %bb.144: ## in Loop: Header=BB84_140 Depth=2
movq -152(%rbp), %rcx ## 8-byte Reload
jb LBB84_157
## %bb.145: ## in Loop: Header=BB84_140 Depth=2
cmpq $32, %rax
jb LBB84_157
## %bb.146: ## in Loop: Header=BB84_140 Depth=2
movl $0, %edi
cmpl $32, -56(%rbp) ## 4-byte Folded Reload
jb LBB84_158
## %bb.147: ## in Loop: Header=BB84_140 Depth=2
cmpl $32, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_149
## %bb.148: ## in Loop: Header=BB84_140 Depth=2
xorl %eax, %eax
jmp LBB84_153
LBB84_149: ## in Loop: Header=BB84_140 Depth=2
xorl %esi, %esi
movq -96(%rbp), %rax ## 8-byte Reload
.p2align 4, 0x90
LBB84_150: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_140 Depth=2
## => This Inner Loop Header: Depth=3
pmovzxbw 8(%r12,%rsi), %xmm0 ## xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw (%r12,%rsi), %xmm1 ## xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw 24(%r12,%rsi), %xmm2 ## xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw 16(%r12,%rsi), %xmm3 ## xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw -8(%r13,%rsi), %xmm4 ## xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
movdqu (%r15,%rsi), %xmm5
paddw %xmm0, %xmm4
pmovzxbw -16(%r13,%rsi), %xmm0 ## xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
movdqu 16(%r15,%rsi), %xmm6
paddw %xmm1, %xmm0
pmovzxbw 8(%r13,%rsi), %xmm1 ## xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
paddw %xmm2, %xmm1
pmovzxbw (%r13,%rsi), %xmm2 ## xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
paddw %xmm3, %xmm2
psrlw $1, %xmm4
psrlw $1, %xmm0
packuswb %xmm4, %xmm0
paddb %xmm5, %xmm0
psrlw $1, %xmm1
psrlw $1, %xmm2
packuswb %xmm1, %xmm2
paddb %xmm6, %xmm2
movdqu %xmm0, (%r10,%rsi)
movdqu %xmm2, 16(%r10,%rsi)
addq $32, %rsi
cmpq %rsi, %rax
jne LBB84_150
## %bb.151: ## in Loop: Header=BB84_140 Depth=2
cmpq %rbx, %rax
je LBB84_139
## %bb.152: ## in Loop: Header=BB84_140 Depth=2
movq -96(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
testb $24, %bl
je LBB84_158
.p2align 4, 0x90
LBB84_153: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_140 Depth=2
## => This Inner Loop Header: Depth=3
pmovzxbw (%r12,%rax), %xmm0 ## xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw (%r11,%rax), %xmm1 ## xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
movq (%r15,%rax), %xmm2 ## xmm2 = mem[0],zero
paddw %xmm0, %xmm1
psrlw $1, %xmm1
packuswb %xmm1, %xmm1
paddb %xmm2, %xmm1
movq %xmm1, (%r10,%rax)
addq $8, %rax
cmpq %rax, %rcx
jne LBB84_153
## %bb.154: ## in Loop: Header=BB84_140 Depth=2
movq %rcx, %rdi
cmpq %rbx, %rcx
je LBB84_139
jmp LBB84_158
LBB84_162: ## in Loop: Header=BB84_10 Depth=1
movl -48(%rbp), %eax ## 4-byte Reload
testl %eax, %eax
je LBB84_79
## %bb.163: ## in Loop: Header=BB84_10 Depth=1
leaq (%r11,%r9), %rcx
movq %rcx, -272(%rbp) ## 8-byte Spill
addq %rdx, %rbx
movq %rbx, -80(%rbp) ## 8-byte Spill
movq -160(%rbp), %rcx ## 8-byte Reload
leaq (%rcx,%rdi), %rdx
movq %rdx, -496(%rbp) ## 8-byte Spill
addq %r9, %rdi
movq %rdi, -120(%rbp) ## 8-byte Spill
movq -456(%rbp), %rdx ## 8-byte Reload
leaq (%r11,%rdx), %r14
addq %rcx, %r11
xorl %ecx, %ecx
movq %rcx, -224(%rbp) ## 8-byte Spill
movq %r8, %r15
jmp LBB84_165
.p2align 4, 0x90
LBB84_164: ## in Loop: Header=BB84_165 Depth=2
movq -72(%rbp), %rax ## 8-byte Reload
addq %rax, %r15
addq %rax, %r10
addq %rax, %r12
incq -224(%rbp) ## 8-byte Folded Spill
addq %rax, %r14
addq %rax, %r11
movl -240(%rbp), %eax ## 4-byte Reload
decl %eax
je LBB84_429
LBB84_165: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_176 Depth 3
## Child Loop BB84_180 Depth 3
## Child Loop BB84_187 Depth 3
movl %eax, -240(%rbp) ## 4-byte Spill
cmpl $0, -56(%rbp) ## 4-byte Folded Reload
jle LBB84_164
## %bb.166: ## in Loop: Header=BB84_165 Depth=2
cmpl $8, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_168
## %bb.167: ## in Loop: Header=BB84_165 Depth=2
xorl %edi, %edi
jmp LBB84_187
.p2align 4, 0x90
LBB84_168: ## in Loop: Header=BB84_165 Depth=2
movq -224(%rbp), %rdx ## 8-byte Reload
imulq -72(%rbp), %rdx ## 8-byte Folded Reload
movq -272(%rbp), %rax ## 8-byte Reload
addq %rdx, %rax
movq -80(%rbp), %rcx ## 8-byte Reload
addq %rdx, %rcx
movq -120(%rbp), %rsi ## 8-byte Reload
addq %rdx, %rsi
addq -496(%rbp), %rdx ## 8-byte Folded Reload
movq %rax, %rdi
subq %rcx, %rdi
movq %rax, %rcx
subq %rsi, %rcx
subq %rdx, %rax
cmpq $16, %rdi
jb LBB84_184
## %bb.169: ## in Loop: Header=BB84_165 Depth=2
jb LBB84_185
## %bb.170: ## in Loop: Header=BB84_165 Depth=2
cmpl $16, -56(%rbp) ## 4-byte Folded Reload
jb LBB84_183
## %bb.171: ## in Loop: Header=BB84_165 Depth=2
cmpq $16, %rcx
jb LBB84_182
## %bb.172: ## in Loop: Header=BB84_165 Depth=2
movl $0, %edi
cmpq $16, %rax
jb LBB84_187
## %bb.173: ## in Loop: Header=BB84_165 Depth=2
cmpl $16, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_175
## %bb.174: ## in Loop: Header=BB84_165 Depth=2
xorl %esi, %esi
jmp LBB84_179
LBB84_175: ## in Loop: Header=BB84_165 Depth=2
xorl %r8d, %r8d
movq -368(%rbp), %rax ## 8-byte Reload
.p2align 4, 0x90
LBB84_176: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_165 Depth=2
## => This Inner Loop Header: Depth=3
movdqu (%r11,%r8), %xmm10
pshufd $238, %xmm10, %xmm0 ## xmm0 = xmm10[2,3,2,3]
pmovzxbd %xmm0, %xmm5 ## xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
pmovzxbd %xmm10, %xmm14 ## xmm14 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero
pshufd $85, %xmm10, %xmm0 ## xmm0 = xmm10[1,1,1,1]
pmovzxbd %xmm0, %xmm6 ## xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
movdqu (%r12,%r8), %xmm1
pshufd $238, %xmm1, %xmm0 ## xmm0 = xmm1[2,3,2,3]
pmovzxbd %xmm0, %xmm13 ## xmm13 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
pmovzxbd %xmm1, %xmm9 ## xmm9 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
pshufd $85, %xmm1, %xmm0 ## xmm0 = xmm1[1,1,1,1]
movdqa %xmm1, %xmm3
movdqa %xmm1, -144(%rbp) ## 16-byte Spill
pmovzxbd %xmm0, %xmm7 ## xmm7 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
movdqu (%r14,%r8), %xmm4
pshufd $85, %xmm4, %xmm0 ## xmm0 = xmm4[1,1,1,1]
pmovzxbd %xmm0, %xmm12 ## xmm12 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
pmovzxbd %xmm4, %xmm8 ## xmm8 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
pshufd $238, %xmm4, %xmm0 ## xmm0 = xmm4[2,3,2,3]
movdqa %xmm4, -336(%rbp) ## 16-byte Spill
pmovzxbd %xmm0, %xmm11 ## xmm11 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
movdqa %xmm7, %xmm1
paddd %xmm6, %xmm1
movdqa %xmm9, %xmm15
paddd %xmm14, %xmm15
movdqa %xmm13, %xmm2
paddd %xmm5, %xmm2
psubd %xmm11, %xmm2
psubd %xmm8, %xmm15
psubd %xmm12, %xmm1
movdqa %xmm1, %xmm0
psubd %xmm6, %xmm0
movdqa %xmm0, -320(%rbp) ## 16-byte Spill
movdqa %xmm15, %xmm0
psubd %xmm14, %xmm0
movdqa %xmm0, -416(%rbp) ## 16-byte Spill
movdqa %xmm2, %xmm6
psubd %xmm5, %xmm6
pshufd $255, %xmm10, %xmm5 ## xmm5 = xmm10[3,3,3,3]
pmovzxbd %xmm5, %xmm5 ## xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
pshufd $255, %xmm3, %xmm3 ## xmm3 = xmm3[3,3,3,3]
pmovzxbd %xmm3, %xmm0 ## xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
pshufd $255, %xmm4, %xmm4 ## xmm4 = xmm4[3,3,3,3]
pmovzxbd %xmm4, %xmm14 ## xmm14 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
movdqa %xmm0, %xmm3
paddd %xmm5, %xmm3
psubd %xmm14, %xmm3
movdqa %xmm3, %xmm4
psubd %xmm5, %xmm4
movdqa %xmm1, %xmm5
psubd %xmm7, %xmm5
movdqa %xmm5, -304(%rbp) ## 16-byte Spill
movdqa %xmm15, %xmm5
psubd %xmm9, %xmm5
movdqa %xmm2, %xmm9
psubd %xmm13, %xmm9
movdqa %xmm3, %xmm7
psubd %xmm0, %xmm7
psubd %xmm12, %xmm1
psubd %xmm8, %xmm15
psubd %xmm11, %xmm2
pabsd %xmm4, %xmm13
pabsd %xmm6, %xmm12
pabsd %xmm7, %xmm0
pabsd %xmm9, %xmm11
psubd %xmm14, %xmm3
pabsd %xmm3, %xmm6
pabsd %xmm2, %xmm3
movdqa %xmm12, %xmm2
pminud %xmm3, %xmm2
movdqa %xmm13, %xmm9
pminud %xmm6, %xmm9
movdqa %xmm0, -400(%rbp) ## 16-byte Spill
pminud %xmm0, %xmm6
pcmpeqd %xmm0, %xmm6
pxor LCPI84_1(%rip), %xmm6
pminud %xmm11, %xmm3
pcmpeqd %xmm11, %xmm3
pxor LCPI84_1(%rip), %xmm3
packssdw %xmm6, %xmm3
pabsd -416(%rbp), %xmm6 ## 16-byte Folded Reload
pabsd -320(%rbp), %xmm7 ## 16-byte Folded Reload
pabsd %xmm5, %xmm14
pabsd -304(%rbp), %xmm4 ## 16-byte Folded Reload
pabsd %xmm15, %xmm0
pabsd %xmm1, %xmm5
movdqa %xmm7, %xmm15
pminud %xmm5, %xmm15
movdqa %xmm6, %xmm1
pminud %xmm0, %xmm1
pminud %xmm14, %xmm0
pcmpeqd %xmm14, %xmm0
pxor LCPI84_1(%rip), %xmm0
pminud %xmm4, %xmm5
pcmpeqd %xmm4, %xmm5
pxor LCPI84_1(%rip), %xmm5
packssdw %xmm5, %xmm0
packsswb %xmm3, %xmm0
movdqa -144(%rbp), %xmm8 ## 16-byte Reload
pblendvb %xmm0, -336(%rbp), %xmm8 ## 16-byte Folded Reload
movdqa %xmm7, %xmm0
pminud %xmm4, %xmm0
movdqa %xmm6, %xmm3
pminud %xmm14, %xmm3
movdqa %xmm12, %xmm4
pminud %xmm11, %xmm4
movdqa %xmm13, %xmm5
pminud -400(%rbp), %xmm5 ## 16-byte Folded Reload
pcmpeqd %xmm7, %xmm0
pcmpeqd %xmm7, %xmm15
pxor LCPI84_1(%rip), %xmm0
pxor LCPI84_1(%rip), %xmm15
por %xmm0, %xmm15
pcmpeqd %xmm6, %xmm3
pcmpeqd %xmm6, %xmm1
pxor LCPI84_1(%rip), %xmm3
pxor LCPI84_1(%rip), %xmm1
por %xmm3, %xmm1
packssdw %xmm15, %xmm1
pcmpeqd %xmm15, %xmm15
pcmpeqd %xmm12, %xmm4
pcmpeqd %xmm12, %xmm2
pxor %xmm15, %xmm4
pxor %xmm15, %xmm2
por %xmm4, %xmm2
pcmpeqd %xmm13, %xmm5
pcmpeqd %xmm13, %xmm9
pxor %xmm15, %xmm5
pxor %xmm15, %xmm9
por %xmm5, %xmm9
packssdw %xmm9, %xmm2
packsswb %xmm2, %xmm1
movdqa %xmm1, %xmm0
pblendvb %xmm0, %xmm8, %xmm10
movdqu (%r15,%r8), %xmm0
paddb %xmm0, %xmm10
movdqu %xmm10, (%r10,%r8)
addq $16, %r8
cmpq %r8, %rax
jne LBB84_176
## %bb.177: ## in Loop: Header=BB84_165 Depth=2
cmpq -104(%rbp), %rax ## 8-byte Folded Reload
movdqa LCPI84_0(%rip), %xmm7 ## xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
je LBB84_164
## %bb.178: ## in Loop: Header=BB84_165 Depth=2
movq -368(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rsi
testb $8, -104(%rbp) ## 1-byte Folded Reload
je LBB84_187
LBB84_179: ## in Loop: Header=BB84_165 Depth=2
movq -152(%rbp), %rax ## 8-byte Reload
.p2align 4, 0x90
LBB84_180: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_165 Depth=2
## => This Inner Loop Header: Depth=3
movq (%r15,%rsi), %xmm8 ## xmm8 = mem[0],zero
movq (%r11,%rsi), %xmm2 ## xmm2 = mem[0],zero
pmovzxbd %xmm2, %xmm0 ## xmm0 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
pshufd $85, %xmm2, %xmm1 ## xmm1 = xmm2[1,1,1,1]
pmovzxbd %xmm1, %xmm1 ## xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
movq (%r12,%rsi), %xmm10 ## xmm10 = mem[0],zero
pmovzxbd %xmm10, %xmm12 ## xmm12 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero
pshufd $85, %xmm10, %xmm5 ## xmm5 = xmm10[1,1,1,1]
pmovzxbd %xmm5, %xmm4 ## xmm4 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
movq (%r14,%rsi), %xmm9 ## xmm9 = mem[0],zero
pshufd $85, %xmm9, %xmm5 ## xmm5 = xmm9[1,1,1,1]
pmovzxbd %xmm5, %xmm13 ## xmm13 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
pmovzxbd %xmm9, %xmm11 ## xmm11 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
movdqa %xmm4, %xmm6
paddd %xmm1, %xmm6
movdqa %xmm12, %xmm3
paddd %xmm0, %xmm3
psubd %xmm11, %xmm3
psubd %xmm13, %xmm6
movdqa %xmm6, %xmm7
psubd %xmm1, %xmm7
movdqa %xmm3, %xmm1
psubd %xmm0, %xmm1
pabsd %xmm1, %xmm5
pabsd %xmm7, %xmm1
movdqa %xmm6, %xmm0
psubd %xmm4, %xmm0
movdqa %xmm3, %xmm4
psubd %xmm12, %xmm4
pabsd %xmm4, %xmm14
pabsd %xmm0, %xmm12
psubd %xmm13, %xmm6
psubd %xmm11, %xmm3
pabsd %xmm3, %xmm0
pabsd %xmm6, %xmm7
movdqa %xmm1, %xmm3
pminud %xmm12, %xmm3
pcmpeqd %xmm1, %xmm3
pxor %xmm15, %xmm3
movdqa %xmm5, %xmm6
pminud %xmm14, %xmm6
pcmpeqd %xmm5, %xmm6
pxor %xmm15, %xmm6
movdqa %xmm1, %xmm4
pminud %xmm7, %xmm4
pcmpeqd %xmm1, %xmm4
pxor %xmm15, %xmm4
por %xmm3, %xmm4
movdqa %xmm5, %xmm1
pminud %xmm0, %xmm1
pcmpeqd %xmm5, %xmm1
pxor %xmm15, %xmm1
por %xmm6, %xmm1
packssdw %xmm4, %xmm1
packsswb %xmm1, %xmm1
pminud %xmm14, %xmm0
pcmpeqd %xmm14, %xmm0
pxor %xmm15, %xmm0
pminud %xmm12, %xmm7
pcmpeqd %xmm12, %xmm7
pxor %xmm15, %xmm7
packssdw %xmm7, %xmm0
packsswb %xmm0, %xmm0
pblendvb %xmm0, %xmm9, %xmm10
movdqa %xmm1, %xmm0
pblendvb %xmm0, %xmm10, %xmm2
paddb %xmm8, %xmm2
movq %xmm2, (%r10,%rsi)
addq $8, %rsi
cmpq %rsi, %rax
jne LBB84_180
## %bb.181: ## in Loop: Header=BB84_165 Depth=2
movq %rax, %rdi
cmpq -104(%rbp), %rax ## 8-byte Folded Reload
movdqa LCPI84_0(%rip), %xmm7 ## xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
jne LBB84_187
jmp LBB84_164
LBB84_182: ## in Loop: Header=BB84_165 Depth=2
xorl %edi, %edi
jmp LBB84_187
LBB84_183: ## in Loop: Header=BB84_165 Depth=2
xorl %edi, %edi
jmp LBB84_187
LBB84_184: ## in Loop: Header=BB84_165 Depth=2
xorl %edi, %edi
jmp LBB84_187
LBB84_185: ## in Loop: Header=BB84_165 Depth=2
xorl %edi, %edi
jmp LBB84_187
.p2align 4, 0x90
LBB84_186: ## in Loop: Header=BB84_187 Depth=3
cmpl %r9d, %ebx
cmoval %ecx, %esi
cmpl %r13d, %ebx
cmoval %ecx, %esi
addb (%r15,%rdi), %sil
movb %sil, (%r10,%rdi)
incq %rdi
cmpq %rdi, -104(%rbp) ## 8-byte Folded Reload
je LBB84_164
LBB84_187: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_165 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r11,%rdi), %esi
movzbl (%r12,%rdi), %r8d
movzbl (%r14,%rdi), %ecx
leal (%r8,%rsi), %eax
subl %ecx, %eax
movl %eax, %edx
subl %esi, %edx
movl %edx, %ebx
negl %ebx
cmovsl %edx, %ebx
movl %eax, %edx
subl %r8d, %edx
movl %edx, %r13d
negl %r13d
cmovsl %edx, %r13d
subl %ecx, %eax
movl %eax, %r9d
negl %r9d
cmovsl %eax, %r9d
cmpl %r9d, %r13d
ja LBB84_186
## %bb.188: ## in Loop: Header=BB84_187 Depth=3
movl %r8d, %ecx
jmp LBB84_186
LBB84_189: ## in Loop: Header=BB84_10 Depth=1
movl -48(%rbp), %r13d ## 4-byte Reload
testl %r13d, %r13d
je LBB84_79
## %bb.190: ## in Loop: Header=BB84_10 Depth=1
addq %r11, %r9
addq %rdx, %rbx
movq %rbx, %r12
movq -264(%rbp), %rax ## 8-byte Reload
leaq (%r11,%rax), %rsi
addq -160(%rbp), %r11 ## 8-byte Folded Reload
xorl %r14d, %r14d
movq %r8, %r15
movq %rdx, %r8
jmp LBB84_192
.p2align 4, 0x90
LBB84_191: ## in Loop: Header=BB84_192 Depth=2
addq %r8, %r15
addq %r8, %r10
incq %r14
addq %r8, %rsi
addq %r8, %r11
decl %r13d
je LBB84_429
LBB84_192: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_202 Depth 3
## Child Loop BB84_208 Depth 3
## Child Loop BB84_215 Depth 3
movq -56(%rbp), %rdx ## 8-byte Reload
testl %edx, %edx
movq -104(%rbp), %rbx ## 8-byte Reload
jle LBB84_191
## %bb.193: ## in Loop: Header=BB84_192 Depth=2
cmpl $8, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_195
LBB84_211: ## in Loop: Header=BB84_192 Depth=2
xorl %eax, %eax
LBB84_212: ## in Loop: Header=BB84_192 Depth=2
movq %rax, %rcx
testb $1, %bl
je LBB84_214
## %bb.213: ## in Loop: Header=BB84_192 Depth=2
movq %rax, %rcx
subq %r8, %rcx
movb (%r10,%rcx), %cl
shrb %cl
addb (%r15,%rax), %cl
movb %cl, (%r10,%rax)
movq %rax, %rcx
orq $1, %rcx
LBB84_214: ## in Loop: Header=BB84_192 Depth=2
notq %rax
cmpq -256(%rbp), %rax ## 8-byte Folded Reload
je LBB84_191
.p2align 4, 0x90
LBB84_215: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_192 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r11,%rcx), %eax
shrb %al
addb (%r15,%rcx), %al
movb %al, (%r10,%rcx)
movzbl 1(%r11,%rcx), %eax
shrb %al
addb 1(%r15,%rcx), %al
movb %al, 1(%r10,%rcx)
addq $2, %rcx
cmpq %rcx, %rbx
jne LBB84_215
jmp LBB84_191
.p2align 4, 0x90
LBB84_195: ## in Loop: Header=BB84_192 Depth=2
movq %r14, %rax
imulq %r8, %rax
leaq (%r9,%rax), %rcx
addq %r12, %rax
subq %rax, %rcx
cmpq $32, %rcx
jb LBB84_211
## %bb.196: ## in Loop: Header=BB84_192 Depth=2
jb LBB84_211
## %bb.197: ## in Loop: Header=BB84_192 Depth=2
movl $0, %eax
cmpl $32, %edx
jb LBB84_212
## %bb.198: ## in Loop: Header=BB84_192 Depth=2
cmpl $32, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_200
## %bb.199: ## in Loop: Header=BB84_192 Depth=2
xorl %ecx, %ecx
jmp LBB84_207
LBB84_200: ## in Loop: Header=BB84_192 Depth=2
cmpq $0, -176(%rbp) ## 8-byte Folded Reload
je LBB84_216
## %bb.201: ## in Loop: Header=BB84_192 Depth=2
movq -200(%rbp), %rdi ## 8-byte Reload
xorl %eax, %eax
.p2align 4, 0x90
LBB84_202: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_192 Depth=2
## => This Inner Loop Header: Depth=3
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movdqu -48(%rsi,%rax), %xmm2
movdqu -32(%rsi,%rax), %xmm3
psrlw $1, %xmm2
pand %xmm7, %xmm2
paddb %xmm0, %xmm2
psrlw $1, %xmm3
pand %xmm7, %xmm3
paddb %xmm1, %xmm3
movdqu %xmm2, (%r10,%rax)
movdqu %xmm3, 16(%r10,%rax)
movdqu 32(%r15,%rax), %xmm0
movdqu 48(%r15,%rax), %xmm1
movdqu -16(%rsi,%rax), %xmm2
movdqu (%rsi,%rax), %xmm3
psrlw $1, %xmm2
pand %xmm7, %xmm2
paddb %xmm0, %xmm2
psrlw $1, %xmm3
pand %xmm7, %xmm3
paddb %xmm1, %xmm3
movdqu %xmm2, 32(%r10,%rax)
movdqu %xmm3, 48(%r10,%rax)
addq $64, %rax
addq $-2, %rdi
jne LBB84_202
## %bb.203: ## in Loop: Header=BB84_192 Depth=2
testb $1, -216(%rbp) ## 1-byte Folded Reload
je LBB84_205
LBB84_204: ## in Loop: Header=BB84_192 Depth=2
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movq %rax, %rcx
subq %r8, %rcx
movdqu (%r10,%rcx), %xmm2
movdqu 16(%r10,%rcx), %xmm3
psrlw $1, %xmm2
pand %xmm7, %xmm2
paddb %xmm0, %xmm2
psrlw $1, %xmm3
pand %xmm7, %xmm3
paddb %xmm1, %xmm3
movdqu %xmm2, (%r10,%rax)
movdqu %xmm3, 16(%r10,%rax)
LBB84_205: ## in Loop: Header=BB84_192 Depth=2
cmpq %rbx, -96(%rbp) ## 8-byte Folded Reload
je LBB84_191
## %bb.206: ## in Loop: Header=BB84_192 Depth=2
movq -96(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
testb $24, %bl
je LBB84_212
LBB84_207: ## in Loop: Header=BB84_192 Depth=2
movq -152(%rbp), %rdx ## 8-byte Reload
.p2align 4, 0x90
LBB84_208: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_192 Depth=2
## => This Inner Loop Header: Depth=3
movq (%r15,%rcx), %xmm0 ## xmm0 = mem[0],zero
movq (%r11,%rcx), %xmm1 ## xmm1 = mem[0],zero
psrlw $1, %xmm1
pand %xmm7, %xmm1
paddb %xmm0, %xmm1
movq %xmm1, (%r10,%rcx)
addq $8, %rcx
cmpq %rcx, %rdx
jne LBB84_208
## %bb.209: ## in Loop: Header=BB84_192 Depth=2
movq %rdx, %rax
cmpq %rbx, %rdx
je LBB84_191
jmp LBB84_212
LBB84_216: ## in Loop: Header=BB84_192 Depth=2
xorl %eax, %eax
testb $1, -216(%rbp) ## 1-byte Folded Reload
jne LBB84_204
jmp LBB84_205
LBB84_217: ## in Loop: Header=BB84_10 Depth=1
movl -48(%rbp), %r14d ## 4-byte Reload
testl %r14d, %r14d
je LBB84_79
## %bb.218: ## in Loop: Header=BB84_10 Depth=1
movq %rbx, %r12
addq %r11, %r9
addq %rdx, %r12
movq -264(%rbp), %rax ## 8-byte Reload
leaq (%r11,%rax), %rsi
addq -160(%rbp), %r11 ## 8-byte Folded Reload
xorl %edi, %edi
movq %r8, %r15
movq %rdx, %r8
jmp LBB84_220
.p2align 4, 0x90
LBB84_219: ## in Loop: Header=BB84_220 Depth=2
addq %r8, %r15
addq %r8, %r10
incq %rdi
addq %r8, %rsi
addq %r8, %r11
decl %r14d
je LBB84_429
LBB84_220: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_230 Depth 3
## Child Loop BB84_236 Depth 3
## Child Loop BB84_243 Depth 3
movq -56(%rbp), %rdx ## 8-byte Reload
testl %edx, %edx
movq -104(%rbp), %rbx ## 8-byte Reload
jle LBB84_219
## %bb.221: ## in Loop: Header=BB84_220 Depth=2
cmpl $8, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_223
LBB84_239: ## in Loop: Header=BB84_220 Depth=2
xorl %eax, %eax
LBB84_240: ## in Loop: Header=BB84_220 Depth=2
movq %rax, %rcx
testb $1, %bl
je LBB84_242
## %bb.241: ## in Loop: Header=BB84_220 Depth=2
movq %rax, %rcx
subq %r8, %rcx
movb (%r10,%rcx), %cl
addb (%r15,%rax), %cl
movb %cl, (%r10,%rax)
movq %rax, %rcx
orq $1, %rcx
LBB84_242: ## in Loop: Header=BB84_220 Depth=2
notq %rax
cmpq -256(%rbp), %rax ## 8-byte Folded Reload
je LBB84_219
.p2align 4, 0x90
LBB84_243: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_220 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r11,%rcx), %eax
addb (%r15,%rcx), %al
movb %al, (%r10,%rcx)
movzbl 1(%r11,%rcx), %eax
addb 1(%r15,%rcx), %al
movb %al, 1(%r10,%rcx)
addq $2, %rcx
cmpq %rcx, %rbx
jne LBB84_243
jmp LBB84_219
.p2align 4, 0x90
LBB84_223: ## in Loop: Header=BB84_220 Depth=2
movq %rdi, %rax
imulq %r8, %rax
leaq (%r9,%rax), %rcx
addq %r12, %rax
subq %rax, %rcx
cmpq $32, %rcx
jb LBB84_239
## %bb.224: ## in Loop: Header=BB84_220 Depth=2
jb LBB84_239
## %bb.225: ## in Loop: Header=BB84_220 Depth=2
movl $0, %eax
cmpl $32, %edx
jb LBB84_240
## %bb.226: ## in Loop: Header=BB84_220 Depth=2
cmpl $32, -44(%rbp) ## 4-byte Folded Reload
jae LBB84_228
## %bb.227: ## in Loop: Header=BB84_220 Depth=2
xorl %ecx, %ecx
jmp LBB84_235
LBB84_228: ## in Loop: Header=BB84_220 Depth=2
cmpq $0, -176(%rbp) ## 8-byte Folded Reload
je LBB84_244
## %bb.229: ## in Loop: Header=BB84_220 Depth=2
movq -200(%rbp), %rcx ## 8-byte Reload
xorl %eax, %eax
.p2align 4, 0x90
LBB84_230: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_220 Depth=2
## => This Inner Loop Header: Depth=3
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movdqu -48(%rsi,%rax), %xmm2
paddb %xmm0, %xmm2
movdqu -32(%rsi,%rax), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, (%r10,%rax)
movdqu %xmm0, 16(%r10,%rax)
movdqu 32(%r15,%rax), %xmm0
movdqu 48(%r15,%rax), %xmm1
movdqu -16(%rsi,%rax), %xmm2
paddb %xmm0, %xmm2
movdqu (%rsi,%rax), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, 32(%r10,%rax)
movdqu %xmm0, 48(%r10,%rax)
addq $64, %rax
addq $-2, %rcx
jne LBB84_230
## %bb.231: ## in Loop: Header=BB84_220 Depth=2
testb $1, -216(%rbp) ## 1-byte Folded Reload
je LBB84_233
LBB84_232: ## in Loop: Header=BB84_220 Depth=2
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movq %rax, %rcx
subq %r8, %rcx
movdqu (%r10,%rcx), %xmm2
paddb %xmm0, %xmm2
movdqu 16(%r10,%rcx), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, (%r10,%rax)
movdqu %xmm0, 16(%r10,%rax)
LBB84_233: ## in Loop: Header=BB84_220 Depth=2
cmpq %rbx, -96(%rbp) ## 8-byte Folded Reload
je LBB84_219
## %bb.234: ## in Loop: Header=BB84_220 Depth=2
movq -96(%rbp), %rax ## 8-byte Reload
movq %rax, %rcx
testb $24, %bl
je LBB84_240
LBB84_235: ## in Loop: Header=BB84_220 Depth=2
movq -152(%rbp), %rdx ## 8-byte Reload
.p2align 4, 0x90
LBB84_236: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_220 Depth=2
## => This Inner Loop Header: Depth=3
movq (%r15,%rcx), %xmm0 ## xmm0 = mem[0],zero
movq (%r11,%rcx), %xmm1 ## xmm1 = mem[0],zero
paddb %xmm0, %xmm1
movq %xmm1, (%r10,%rcx)
addq $8, %rcx
cmpq %rcx, %rdx
jne LBB84_236
## %bb.237: ## in Loop: Header=BB84_220 Depth=2
movq %rdx, %rax
cmpq %rbx, %rdx
je LBB84_219
jmp LBB84_240
LBB84_244: ## in Loop: Header=BB84_220 Depth=2
xorl %eax, %eax
testb $1, -216(%rbp) ## 1-byte Folded Reload
jne LBB84_232
jmp LBB84_233
.p2align 4, 0x90
LBB84_79: ## in Loop: Header=BB84_10 Depth=1
movq %r8, %r15
jmp LBB84_429
LBB84_246: ## in Loop: Header=BB84_10 Depth=1
movq %rbx, -80(%rbp) ## 8-byte Spill
movl -48(%rbp), %r14d ## 4-byte Reload
testl %r14d, %r14d
je LBB84_428
## %bb.247: ## in Loop: Header=BB84_10 Depth=1
addq %r11, %r9
addq %rdx, -80(%rbp) ## 8-byte Folded Spill
leaq 48(%r11), %rdx
leaq 1(%r11), %r13
xorl %r10d, %r10d
movq %r12, %r15
jmp LBB84_249
.p2align 4, 0x90
LBB84_248: ## in Loop: Header=BB84_249 Depth=2
movq -72(%rbp), %rax ## 8-byte Reload
movb $-1, (%r8,%rax)
addq %rax, %r15
movq -64(%rbp), %rax ## 8-byte Reload
addq %rax, %r8
incq %r10
addq %rax, %rdx
addq %rax, %r11
addq %rax, %r13
decl %r14d
je LBB84_429
LBB84_249: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_259 Depth 3
## Child Loop BB84_265 Depth 3
## Child Loop BB84_273 Depth 3
movq -56(%rbp), %r12 ## 8-byte Reload
testl %r12d, %r12d
jle LBB84_248
## %bb.250: ## in Loop: Header=BB84_249 Depth=2
cmpl $8, %r12d
jae LBB84_252
LBB84_268: ## in Loop: Header=BB84_249 Depth=2
xorl %esi, %esi
LBB84_269: ## in Loop: Header=BB84_249 Depth=2
movq %rsi, %rax
testb $1, %r12b
je LBB84_271
## %bb.270: ## in Loop: Header=BB84_249 Depth=2
movq %rsi, %rax
subq -64(%rbp), %rax ## 8-byte Folded Reload
movb (%r8,%rax), %al
addb (%r15,%rsi), %al
movb %al, (%r8,%rsi)
movq %rsi, %rax
orq $1, %rax
LBB84_271: ## in Loop: Header=BB84_249 Depth=2
notq %rsi
cmpq -248(%rbp), %rsi ## 8-byte Folded Reload
je LBB84_248
## %bb.272: ## in Loop: Header=BB84_249 Depth=2
movq %r12, %rcx
subq %rax, %rcx
leaq (%rax,%r13), %rsi
incq %rax
.p2align 4, 0x90
LBB84_273: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_249 Depth=2
## => This Inner Loop Header: Depth=3
movzbl -1(%rsi), %ebx
addb -1(%r15,%rax), %bl
movb %bl, -1(%r8,%rax)
movzbl (%rsi), %ebx
addb (%r15,%rax), %bl
movb %bl, (%r8,%rax)
addq $2, %rsi
addq $2, %rax
addq $-2, %rcx
jne LBB84_273
jmp LBB84_248
.p2align 4, 0x90
LBB84_252: ## in Loop: Header=BB84_249 Depth=2
movq %r10, %rax
imulq -64(%rbp), %rax ## 8-byte Folded Reload
addq %r9, %rax
movq %r10, %rcx
imulq -72(%rbp), %rcx ## 8-byte Folded Reload
addq -80(%rbp), %rcx ## 8-byte Folded Reload
subq %rcx, %rax
cmpq $32, %rax
jb LBB84_268
## %bb.253: ## in Loop: Header=BB84_249 Depth=2
jb LBB84_268
## %bb.254: ## in Loop: Header=BB84_249 Depth=2
movl $0, %esi
cmpl $32, -44(%rbp) ## 4-byte Folded Reload
jb LBB84_269
## %bb.255: ## in Loop: Header=BB84_249 Depth=2
cmpl $32, %r12d
jae LBB84_257
## %bb.256: ## in Loop: Header=BB84_249 Depth=2
xorl %edi, %edi
jmp LBB84_264
LBB84_257: ## in Loop: Header=BB84_249 Depth=2
cmpq $0, -168(%rbp) ## 8-byte Folded Reload
je LBB84_274
## %bb.258: ## in Loop: Header=BB84_249 Depth=2
movq -64(%rbp), %rax ## 8-byte Reload
leaq (%rdx,%rax), %rsi
movq -192(%rbp), %rcx ## 8-byte Reload
xorl %eax, %eax
.p2align 4, 0x90
LBB84_259: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_249 Depth=2
## => This Inner Loop Header: Depth=3
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movdqu -48(%rdx,%rax), %xmm2
paddb %xmm0, %xmm2
movdqu -32(%rdx,%rax), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, -48(%rsi,%rax)
movdqu %xmm0, -32(%rsi,%rax)
movdqu 32(%r15,%rax), %xmm0
movdqu 48(%r15,%rax), %xmm1
movdqu -16(%rdx,%rax), %xmm2
paddb %xmm0, %xmm2
movdqu (%rdx,%rax), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, -16(%rsi,%rax)
movdqu %xmm0, (%rsi,%rax)
addq $64, %rax
addq $-2, %rcx
jne LBB84_259
## %bb.260: ## in Loop: Header=BB84_249 Depth=2
testb $1, -208(%rbp) ## 1-byte Folded Reload
je LBB84_262
LBB84_261: ## in Loop: Header=BB84_249 Depth=2
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movq %rax, %rcx
subq -64(%rbp), %rcx ## 8-byte Folded Reload
movdqu (%r8,%rcx), %xmm2
paddb %xmm0, %xmm2
movdqu 16(%r8,%rcx), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, (%r8,%rax)
movdqu %xmm0, 16(%r8,%rax)
LBB84_262: ## in Loop: Header=BB84_249 Depth=2
cmpq %r12, -88(%rbp) ## 8-byte Folded Reload
je LBB84_248
## %bb.263: ## in Loop: Header=BB84_249 Depth=2
movq -88(%rbp), %rsi ## 8-byte Reload
movq %rsi, %rdi
testb $24, %r12b
je LBB84_269
LBB84_264: ## in Loop: Header=BB84_249 Depth=2
leaq (%r15,%rdi), %rax
leaq (%r11,%rdi), %rcx
movq -64(%rbp), %rsi ## 8-byte Reload
addq %rcx, %rsi
movq -112(%rbp), %rbx ## 8-byte Reload
subq %rdi, %rbx
xorl %edi, %edi
.p2align 4, 0x90
LBB84_265: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_249 Depth=2
## => This Inner Loop Header: Depth=3
movq (%rax,%rdi), %xmm0 ## xmm0 = mem[0],zero
movq (%rcx,%rdi), %xmm1 ## xmm1 = mem[0],zero
paddb %xmm0, %xmm1
movq %xmm1, (%rsi,%rdi)
addq $8, %rdi
cmpq %rdi, %rbx
jne LBB84_265
## %bb.266: ## in Loop: Header=BB84_249 Depth=2
movq -112(%rbp), %rax ## 8-byte Reload
movq %rax, %rsi
cmpq %r12, %rax
je LBB84_248
jmp LBB84_269
LBB84_274: ## in Loop: Header=BB84_249 Depth=2
xorl %eax, %eax
testb $1, -208(%rbp) ## 1-byte Folded Reload
jne LBB84_261
jmp LBB84_262
LBB84_275: ## in Loop: Header=BB84_10 Depth=1
movl -48(%rbp), %r9d ## 4-byte Reload
testl %r9d, %r9d
je LBB84_428
## %bb.276: ## in Loop: Header=BB84_10 Depth=1
movq -64(%rbp), %rax ## 8-byte Reload
leaq (%r11,%rax), %rcx
movq %rcx, -320(%rbp) ## 8-byte Spill
addq %rdx, %rbx
movq %rbx, -80(%rbp) ## 8-byte Spill
addq %rax, %rdi
movq %rdi, -120(%rbp) ## 8-byte Spill
movq -440(%rbp), %rax ## 8-byte Reload
leaq (%r11,%rax), %r13
addq -432(%rbp), %r11 ## 8-byte Folded Reload
addq -424(%rbp), %r15 ## 8-byte Folded Reload
movq %r15, %r14
movq %r12, %r15
xorl %r12d, %r12d
jmp LBB84_278
.p2align 4, 0x90
LBB84_277: ## in Loop: Header=BB84_278 Depth=2
movq -72(%rbp), %rax ## 8-byte Reload
movb $-1, (%r8,%rax)
addq %rax, %r15
movq -64(%rbp), %rcx ## 8-byte Reload
addq %rcx, %r8
addq %rcx, %r10
incq %r12
addq %rcx, %r13
addq %rcx, %r11
addq %rax, %r14
decl %r9d
je LBB84_429
LBB84_278: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_288 Depth 3
## Child Loop BB84_294 Depth 3
## Child Loop BB84_300 Depth 3
## Child Loop BB84_304 Depth 3
cmpl $0, -56(%rbp) ## 4-byte Folded Reload
jle LBB84_277
## %bb.279: ## in Loop: Header=BB84_278 Depth=2
cmpl $8, -56(%rbp) ## 4-byte Folded Reload
jae LBB84_281
LBB84_297: ## in Loop: Header=BB84_278 Depth=2
xorl %esi, %esi
LBB84_298: ## in Loop: Header=BB84_278 Depth=2
movq %rsi, %rdi
notq %rdi
addq -56(%rbp), %rdi ## 8-byte Folded Reload
cmpq $0, -280(%rbp) ## 8-byte Folded Reload
je LBB84_302
## %bb.299: ## in Loop: Header=BB84_278 Depth=2
movl %r9d, -336(%rbp) ## 4-byte Spill
movq %r13, -144(%rbp) ## 8-byte Spill
movq %rsi, %rax
negq %rax
leaq (%r8,%rsi), %rcx
leaq (%r10,%rsi), %rdx
addq %r15, %rsi
xorl %ebx, %ebx
movq -280(%rbp), %r9 ## 8-byte Reload
.p2align 4, 0x90
LBB84_300: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_278 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%rdx,%rbx), %r13d
addb (%rsi,%rbx), %r13b
movb %r13b, (%rcx,%rbx)
incq %rbx
decq %rax
cmpq %rbx, %r9
jne LBB84_300
## %bb.301: ## in Loop: Header=BB84_278 Depth=2
negq %rax
movq %rax, %rsi
movq -144(%rbp), %r13 ## 8-byte Reload
movl -336(%rbp), %r9d ## 4-byte Reload
LBB84_302: ## in Loop: Header=BB84_278 Depth=2
cmpq $3, %rdi
jb LBB84_277
## %bb.303: ## in Loop: Header=BB84_278 Depth=2
movq -56(%rbp), %rcx ## 8-byte Reload
subq %rsi, %rcx
leaq (%rsi,%r13), %rdi
leaq (%r11,%rsi), %rax
addq %r14, %rsi
xorl %edx, %edx
.p2align 4, 0x90
LBB84_304: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_278 Depth=2
## => This Inner Loop Header: Depth=3
movzbl -3(%rax,%rdx), %ebx
addb -3(%rsi,%rdx), %bl
movb %bl, -3(%rdi,%rdx)
movzbl -2(%rax,%rdx), %ebx
addb -2(%rsi,%rdx), %bl
movb %bl, -2(%rdi,%rdx)
movzbl -1(%rax,%rdx), %ebx
addb -1(%rsi,%rdx), %bl
movb %bl, -1(%rdi,%rdx)
movzbl (%rax,%rdx), %ebx
addb (%rsi,%rdx), %bl
movb %bl, (%rdi,%rdx)
addq $4, %rdx
cmpq %rdx, %rcx
jne LBB84_304
jmp LBB84_277
.p2align 4, 0x90
LBB84_281: ## in Loop: Header=BB84_278 Depth=2
movq %r12, %rcx
imulq -64(%rbp), %rcx ## 8-byte Folded Reload
movq -320(%rbp), %rax ## 8-byte Reload
addq %rcx, %rax
movq %r12, %rdx
imulq -72(%rbp), %rdx ## 8-byte Folded Reload
addq -80(%rbp), %rdx ## 8-byte Folded Reload
addq -120(%rbp), %rcx ## 8-byte Folded Reload
movq %rax, %rsi
subq %rdx, %rsi
subq %rcx, %rax
cmpq $32, %rsi
jb LBB84_297
## %bb.282: ## in Loop: Header=BB84_278 Depth=2
jb LBB84_297
## %bb.283: ## in Loop: Header=BB84_278 Depth=2
movl $0, %esi
cmpq $32, %rax
jb LBB84_298
## %bb.284: ## in Loop: Header=BB84_278 Depth=2
cmpl $32, -56(%rbp) ## 4-byte Folded Reload
jae LBB84_286
## %bb.285: ## in Loop: Header=BB84_278 Depth=2
xorl %eax, %eax
jmp LBB84_293
LBB84_286: ## in Loop: Header=BB84_278 Depth=2
cmpq $0, -168(%rbp) ## 8-byte Folded Reload
je LBB84_305
## %bb.287: ## in Loop: Header=BB84_278 Depth=2
movq -192(%rbp), %rcx ## 8-byte Reload
xorl %esi, %esi
movq -88(%rbp), %rax ## 8-byte Reload
.p2align 4, 0x90
LBB84_288: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_278 Depth=2
## => This Inner Loop Header: Depth=3
movdqu (%r15,%rsi), %xmm0
movdqu 16(%r15,%rsi), %xmm1
movdqu (%r10,%rsi), %xmm2
paddb %xmm0, %xmm2
movdqu 16(%r10,%rsi), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, (%r8,%rsi)
movdqu %xmm0, 16(%r8,%rsi)
movdqu 32(%r15,%rsi), %xmm0
movdqu 48(%r15,%rsi), %xmm1
movdqu 32(%r10,%rsi), %xmm2
paddb %xmm0, %xmm2
movdqu 48(%r10,%rsi), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, 32(%r8,%rsi)
movdqu %xmm0, 48(%r8,%rsi)
addq $64, %rsi
addq $-2, %rcx
jne LBB84_288
## %bb.289: ## in Loop: Header=BB84_278 Depth=2
testb $1, -208(%rbp) ## 1-byte Folded Reload
je LBB84_291
LBB84_290: ## in Loop: Header=BB84_278 Depth=2
movdqu (%r15,%rsi), %xmm0
movdqu 16(%r15,%rsi), %xmm1
movdqu (%r10,%rsi), %xmm2
paddb %xmm0, %xmm2
movdqu 16(%r10,%rsi), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, (%r8,%rsi)
movdqu %xmm0, 16(%r8,%rsi)
LBB84_291: ## in Loop: Header=BB84_278 Depth=2
cmpq -56(%rbp), %rax ## 8-byte Folded Reload
je LBB84_277
## %bb.292: ## in Loop: Header=BB84_278 Depth=2
movq -88(%rbp), %rsi ## 8-byte Reload
movq %rsi, %rax
testb $24, -56(%rbp) ## 1-byte Folded Reload
je LBB84_298
LBB84_293: ## in Loop: Header=BB84_278 Depth=2
movq -112(%rbp), %rcx ## 8-byte Reload
.p2align 4, 0x90
LBB84_294: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_278 Depth=2
## => This Inner Loop Header: Depth=3
movq (%r15,%rax), %xmm0 ## xmm0 = mem[0],zero
movq (%r10,%rax), %xmm1 ## xmm1 = mem[0],zero
paddb %xmm0, %xmm1
movq %xmm1, (%r8,%rax)
addq $8, %rax
cmpq %rax, %rcx
jne LBB84_294
## %bb.295: ## in Loop: Header=BB84_278 Depth=2
movq %rcx, %rsi
cmpq -56(%rbp), %rcx ## 8-byte Folded Reload
je LBB84_277
jmp LBB84_298
LBB84_305: ## in Loop: Header=BB84_278 Depth=2
xorl %esi, %esi
movq -88(%rbp), %rax ## 8-byte Reload
testb $1, -208(%rbp) ## 1-byte Folded Reload
jne LBB84_290
jmp LBB84_291
LBB84_306: ## in Loop: Header=BB84_10 Depth=1
movq %r12, %r15
movl -48(%rbp), %r14d ## 4-byte Reload
testl %r14d, %r14d
je LBB84_429
## %bb.307: ## in Loop: Header=BB84_10 Depth=1
movq %rbx, %r12
movq %r9, %rax
leaq (%r11,%r9), %rcx
movq %rcx, -144(%rbp) ## 8-byte Spill
addq %rdx, %r12
addq %r9, %rdi
movq %rdi, %r9
leaq 16(%r11), %rsi
xorl %r13d, %r13d
jmp LBB84_309
.p2align 4, 0x90
LBB84_308: ## in Loop: Header=BB84_309 Depth=2
movq -72(%rbp), %rax ## 8-byte Reload
movb $-1, (%r8,%rax)
addq %rax, %r15
movq -64(%rbp), %rax ## 8-byte Reload
addq %rax, %r8
addq %rax, %r10
incq %r13
addq %rax, %rsi
addq %rax, %r11
decl %r14d
je LBB84_429
LBB84_309: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_318 Depth 3
## Child Loop BB84_322 Depth 3
## Child Loop BB84_330 Depth 3
movq -56(%rbp), %rbx ## 8-byte Reload
testl %ebx, %ebx
jle LBB84_308
## %bb.310: ## in Loop: Header=BB84_309 Depth=2
cmpl $8, %ebx
jae LBB84_312
LBB84_326: ## in Loop: Header=BB84_309 Depth=2
xorl %edi, %edi
LBB84_327: ## in Loop: Header=BB84_309 Depth=2
movq %rdi, %rax
testb $1, %bl
je LBB84_329
## %bb.328: ## in Loop: Header=BB84_309 Depth=2
movzbl (%r10,%rdi), %eax
movq %rdi, %rcx
subq -64(%rbp), %rcx ## 8-byte Folded Reload
movzbl (%r8,%rcx), %ecx
addl %eax, %ecx
shrl %ecx
addb (%r15,%rdi), %cl
movb %cl, (%r8,%rdi)
movq %rdi, %rax
orq $1, %rax
LBB84_329: ## in Loop: Header=BB84_309 Depth=2
notq %rdi
cmpq -248(%rbp), %rdi ## 8-byte Folded Reload
je LBB84_308
.p2align 4, 0x90
LBB84_330: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_309 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r10,%rax), %ecx
movzbl (%r11,%rax), %edx
addl %ecx, %edx
shrl %edx
addb (%r15,%rax), %dl
movb %dl, (%r8,%rax)
movzbl 1(%r10,%rax), %ecx
movzbl 1(%r11,%rax), %edx
addl %ecx, %edx
shrl %edx
addb 1(%r15,%rax), %dl
movb %dl, 1(%r8,%rax)
addq $2, %rax
cmpq %rax, %rbx
jne LBB84_330
jmp LBB84_308
.p2align 4, 0x90
LBB84_312: ## in Loop: Header=BB84_309 Depth=2
movq %r13, %rcx
imulq -64(%rbp), %rcx ## 8-byte Folded Reload
movq -144(%rbp), %rax ## 8-byte Reload
addq %rcx, %rax
movq %r13, %rdx
imulq -72(%rbp), %rdx ## 8-byte Folded Reload
addq %r12, %rdx
addq %r9, %rcx
movq %rax, %rdi
subq %rdx, %rdi
subq %rcx, %rax
cmpq $32, %rdi
jb LBB84_326
## %bb.313: ## in Loop: Header=BB84_309 Depth=2
jb LBB84_326
## %bb.314: ## in Loop: Header=BB84_309 Depth=2
cmpq $32, %rax
jb LBB84_326
## %bb.315: ## in Loop: Header=BB84_309 Depth=2
movl $0, %edi
cmpl $32, -44(%rbp) ## 4-byte Folded Reload
jb LBB84_327
## %bb.316: ## in Loop: Header=BB84_309 Depth=2
xorl %eax, %eax
cmpl $32, %ebx
jb LBB84_321
## %bb.317: ## in Loop: Header=BB84_309 Depth=2
movq -88(%rbp), %rcx ## 8-byte Reload
.p2align 4, 0x90
LBB84_318: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_309 Depth=2
## => This Inner Loop Header: Depth=3
pmovzxbw 8(%r10,%rax), %xmm0 ## xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw (%r10,%rax), %xmm1 ## xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw 24(%r10,%rax), %xmm2 ## xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw 16(%r10,%rax), %xmm3 ## xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw -8(%rsi,%rax), %xmm4 ## xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
movdqu (%r15,%rax), %xmm5
paddw %xmm0, %xmm4
pmovzxbw -16(%rsi,%rax), %xmm0 ## xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
movdqu 16(%r15,%rax), %xmm6
paddw %xmm1, %xmm0
pmovzxbw 8(%rsi,%rax), %xmm1 ## xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
paddw %xmm2, %xmm1
pmovzxbw (%rsi,%rax), %xmm2 ## xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
paddw %xmm3, %xmm2
psrlw $1, %xmm4
psrlw $1, %xmm0
packuswb %xmm4, %xmm0
paddb %xmm5, %xmm0
psrlw $1, %xmm1
psrlw $1, %xmm2
packuswb %xmm1, %xmm2
paddb %xmm6, %xmm2
movdqu %xmm0, (%r8,%rax)
movdqu %xmm2, 16(%r8,%rax)
addq $32, %rax
cmpq %rax, %rcx
jne LBB84_318
## %bb.319: ## in Loop: Header=BB84_309 Depth=2
cmpq %rbx, %rcx
je LBB84_308
## %bb.320: ## in Loop: Header=BB84_309 Depth=2
movq -88(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rax
testb $24, %bl
je LBB84_327
LBB84_321: ## in Loop: Header=BB84_309 Depth=2
movq -112(%rbp), %rcx ## 8-byte Reload
.p2align 4, 0x90
LBB84_322: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_309 Depth=2
## => This Inner Loop Header: Depth=3
pmovzxbw (%r10,%rax), %xmm0 ## xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
pmovzxbw (%r11,%rax), %xmm1 ## xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
movq (%r15,%rax), %xmm2 ## xmm2 = mem[0],zero
paddw %xmm0, %xmm1
psrlw $1, %xmm1
packuswb %xmm1, %xmm1
paddb %xmm2, %xmm1
movq %xmm1, (%r8,%rax)
addq $8, %rax
cmpq %rax, %rcx
jne LBB84_322
## %bb.323: ## in Loop: Header=BB84_309 Depth=2
movq %rcx, %rdi
cmpq %rbx, %rcx
je LBB84_308
jmp LBB84_327
LBB84_331: ## in Loop: Header=BB84_10 Depth=1
movl -48(%rbp), %eax ## 4-byte Reload
testl %eax, %eax
je LBB84_428
## %bb.332: ## in Loop: Header=BB84_10 Depth=1
leaq (%r11,%r9), %rcx
movq %rcx, -224(%rbp) ## 8-byte Spill
addq %rdx, %rbx
movq %rbx, -80(%rbp) ## 8-byte Spill
leaq (%rdi,%r9), %rcx
movq %rcx, -272(%rbp) ## 8-byte Spill
movq %rdi, -120(%rbp) ## 8-byte Spill
movq %r12, %r15
movq %rdi, %r12
xorl %ecx, %ecx
movq %rcx, -240(%rbp) ## 8-byte Spill
jmp LBB84_334
.p2align 4, 0x90
LBB84_333: ## in Loop: Header=BB84_334 Depth=2
movq -72(%rbp), %rax ## 8-byte Reload
movb $-1, (%r8,%rax)
addq %rax, %r15
movq -64(%rbp), %rax ## 8-byte Reload
addq %rax, %r8
addq %rax, %r10
incq -240(%rbp) ## 8-byte Folded Spill
addq %rax, %r12
addq %rax, %r11
movl -400(%rbp), %eax ## 4-byte Reload
decl %eax
je LBB84_429
LBB84_334: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_348 Depth 3
## Child Loop BB84_345 Depth 3
## Child Loop BB84_356 Depth 3
movl %eax, -400(%rbp) ## 4-byte Spill
cmpl $0, -56(%rbp) ## 4-byte Folded Reload
jle LBB84_333
## %bb.335: ## in Loop: Header=BB84_334 Depth=2
cmpl $8, -56(%rbp) ## 4-byte Folded Reload
jae LBB84_337
## %bb.336: ## in Loop: Header=BB84_334 Depth=2
xorl %edi, %edi
jmp LBB84_356
.p2align 4, 0x90
LBB84_337: ## in Loop: Header=BB84_334 Depth=2
movq -240(%rbp), %rcx ## 8-byte Reload
movq %rcx, %rdx
imulq -64(%rbp), %rdx ## 8-byte Folded Reload
movq -224(%rbp), %rax ## 8-byte Reload
addq %rdx, %rax
imulq -72(%rbp), %rcx ## 8-byte Folded Reload
addq -80(%rbp), %rcx ## 8-byte Folded Reload
movq -272(%rbp), %rsi ## 8-byte Reload
addq %rdx, %rsi
addq -120(%rbp), %rdx ## 8-byte Folded Reload
movq %rax, %rdi
subq %rcx, %rdi
movq %rax, %rcx
subq %rsi, %rcx
subq %rdx, %rax
cmpq $16, %rdi
jb LBB84_353
## %bb.338: ## in Loop: Header=BB84_334 Depth=2
jb LBB84_354
## %bb.339: ## in Loop: Header=BB84_334 Depth=2
cmpl $16, -44(%rbp) ## 4-byte Folded Reload
jb LBB84_352
## %bb.340: ## in Loop: Header=BB84_334 Depth=2
cmpq $16, %rcx
jb LBB84_351
## %bb.341: ## in Loop: Header=BB84_334 Depth=2
movl $0, %edi
cmpq $16, %rax
jb LBB84_356
## %bb.342: ## in Loop: Header=BB84_334 Depth=2
cmpl $16, -56(%rbp) ## 4-byte Folded Reload
jae LBB84_347
## %bb.343: ## in Loop: Header=BB84_334 Depth=2
xorl %esi, %esi
LBB84_344: ## in Loop: Header=BB84_334 Depth=2
movq -112(%rbp), %rax ## 8-byte Reload
.p2align 4, 0x90
LBB84_345: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_334 Depth=2
## => This Inner Loop Header: Depth=3
movq (%r15,%rsi), %xmm8 ## xmm8 = mem[0],zero
movq (%r11,%rsi), %xmm2 ## xmm2 = mem[0],zero
pmovzxbd %xmm2, %xmm0 ## xmm0 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
pshufd $85, %xmm2, %xmm1 ## xmm1 = xmm2[1,1,1,1]
pmovzxbd %xmm1, %xmm1 ## xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
movq (%r10,%rsi), %xmm10 ## xmm10 = mem[0],zero
pmovzxbd %xmm10, %xmm12 ## xmm12 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero
pshufd $85, %xmm10, %xmm5 ## xmm5 = xmm10[1,1,1,1]
pmovzxbd %xmm5, %xmm4 ## xmm4 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
movq (%r12,%rsi), %xmm9 ## xmm9 = mem[0],zero
pshufd $85, %xmm9, %xmm5 ## xmm5 = xmm9[1,1,1,1]
pmovzxbd %xmm5, %xmm13 ## xmm13 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
pmovzxbd %xmm9, %xmm11 ## xmm11 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
movdqa %xmm4, %xmm6
paddd %xmm1, %xmm6
movdqa %xmm12, %xmm3
paddd %xmm0, %xmm3
psubd %xmm11, %xmm3
psubd %xmm13, %xmm6
movdqa %xmm6, %xmm7
psubd %xmm1, %xmm7
movdqa %xmm3, %xmm1
psubd %xmm0, %xmm1
pabsd %xmm1, %xmm5
pabsd %xmm7, %xmm1
movdqa %xmm6, %xmm0
psubd %xmm4, %xmm0
movdqa %xmm3, %xmm4
psubd %xmm12, %xmm4
pabsd %xmm4, %xmm14
pabsd %xmm0, %xmm12
psubd %xmm13, %xmm6
psubd %xmm11, %xmm3
pabsd %xmm3, %xmm0
pabsd %xmm6, %xmm7
movdqa %xmm1, %xmm3
pminud %xmm12, %xmm3
pcmpeqd %xmm1, %xmm3
pxor %xmm15, %xmm3
movdqa %xmm5, %xmm6
pminud %xmm14, %xmm6
pcmpeqd %xmm5, %xmm6
pxor %xmm15, %xmm6
movdqa %xmm1, %xmm4
pminud %xmm7, %xmm4
pcmpeqd %xmm1, %xmm4
pxor %xmm15, %xmm4
por %xmm3, %xmm4
movdqa %xmm5, %xmm1
pminud %xmm0, %xmm1
pcmpeqd %xmm5, %xmm1
pxor %xmm15, %xmm1
por %xmm6, %xmm1
packssdw %xmm4, %xmm1
packsswb %xmm1, %xmm1
pminud %xmm14, %xmm0
pcmpeqd %xmm14, %xmm0
pxor %xmm15, %xmm0
pminud %xmm12, %xmm7
pcmpeqd %xmm12, %xmm7
pxor %xmm15, %xmm7
packssdw %xmm7, %xmm0
packsswb %xmm0, %xmm0
pblendvb %xmm0, %xmm9, %xmm10
movdqa %xmm1, %xmm0
pblendvb %xmm0, %xmm10, %xmm2
paddb %xmm8, %xmm2
movq %xmm2, (%r8,%rsi)
addq $8, %rsi
cmpq %rsi, %rax
jne LBB84_345
## %bb.346: ## in Loop: Header=BB84_334 Depth=2
movq %rax, %rdi
cmpq -56(%rbp), %rax ## 8-byte Folded Reload
movdqa LCPI84_0(%rip), %xmm7 ## xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
jne LBB84_356
jmp LBB84_333
LBB84_347: ## in Loop: Header=BB84_334 Depth=2
xorl %esi, %esi
movq -64(%rbp), %rcx ## 8-byte Reload
movq -360(%rbp), %rdx ## 8-byte Reload
.p2align 4, 0x90
LBB84_348: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_334 Depth=2
## => This Inner Loop Header: Depth=3
movq %rsi, %rax
subq %rcx, %rax
movdqu (%r8,%rax), %xmm9
pshufd $238, %xmm9, %xmm0 ## xmm0 = xmm9[2,3,2,3]
pmovzxbd %xmm0, %xmm15 ## xmm15 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
pmovzxbd %xmm9, %xmm5 ## xmm5 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
pshufd $85, %xmm9, %xmm0 ## xmm0 = xmm9[1,1,1,1]
pmovzxbd %xmm0, %xmm6 ## xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
movdqu (%r10,%rsi), %xmm1
pshufd $238, %xmm1, %xmm0 ## xmm0 = xmm1[2,3,2,3]
pmovzxbd %xmm0, %xmm13 ## xmm13 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
pmovzxbd %xmm1, %xmm8 ## xmm8 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
pshufd $85, %xmm1, %xmm0 ## xmm0 = xmm1[1,1,1,1]
movdqa %xmm1, -144(%rbp) ## 16-byte Spill
pmovzxbd %xmm0, %xmm4 ## xmm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
movdqu (%r10,%rax), %xmm3
pshufd $85, %xmm3, %xmm0 ## xmm0 = xmm3[1,1,1,1]
pmovzxbd %xmm0, %xmm12 ## xmm12 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
pmovzxbd %xmm3, %xmm10 ## xmm10 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
pshufd $238, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,2,3]
movdqa %xmm3, -336(%rbp) ## 16-byte Spill
pmovzxbd %xmm0, %xmm0 ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
movdqa %xmm0, -304(%rbp) ## 16-byte Spill
movdqa %xmm4, %xmm14
paddd %xmm6, %xmm14
movdqa %xmm8, %xmm11
paddd %xmm5, %xmm11
movdqa %xmm13, %xmm7
paddd %xmm15, %xmm7
psubd %xmm0, %xmm7
psubd %xmm10, %xmm11
psubd %xmm12, %xmm14
movdqa %xmm14, %xmm0
psubd %xmm6, %xmm0
movdqa %xmm0, -320(%rbp) ## 16-byte Spill
movdqa %xmm11, %xmm0
psubd %xmm5, %xmm0
movdqa %xmm0, -416(%rbp) ## 16-byte Spill
movdqa %xmm7, %xmm2
psubd %xmm15, %xmm2
pshufd $255, %xmm9, %xmm6 ## xmm6 = xmm9[3,3,3,3]
pmovzxbd %xmm6, %xmm6 ## xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
pshufd $255, %xmm1, %xmm5 ## xmm5 = xmm1[3,3,3,3]
pmovzxbd %xmm5, %xmm1 ## xmm1 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
pshufd $255, %xmm3, %xmm3 ## xmm3 = xmm3[3,3,3,3]
pmovzxbd %xmm3, %xmm3 ## xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
movdqa %xmm1, %xmm5
paddd %xmm6, %xmm5
psubd %xmm3, %xmm5
movdqa %xmm5, %xmm0
psubd %xmm6, %xmm0
movdqa %xmm14, %xmm6
psubd %xmm4, %xmm6
movdqa %xmm6, %xmm4
movdqa %xmm11, %xmm15
psubd %xmm8, %xmm15
movdqa %xmm7, %xmm8
psubd %xmm13, %xmm8
movdqa %xmm5, %xmm6
psubd %xmm1, %xmm6
psubd %xmm12, %xmm14
psubd %xmm10, %xmm11
psubd -304(%rbp), %xmm7 ## 16-byte Folded Reload
pabsd %xmm0, %xmm13
pabsd %xmm2, %xmm12
pabsd %xmm6, %xmm0
pabsd %xmm8, %xmm10
psubd %xmm3, %xmm5
pabsd %xmm5, %xmm8
pabsd %xmm7, %xmm3
movdqa %xmm12, %xmm2
pminud %xmm3, %xmm2
movdqa %xmm13, %xmm7
pminud %xmm8, %xmm7
movdqa %xmm0, -304(%rbp) ## 16-byte Spill
pminud %xmm0, %xmm8
pcmpeqd %xmm0, %xmm8
pxor LCPI84_1(%rip), %xmm8
pminud %xmm10, %xmm3
pcmpeqd %xmm10, %xmm3
pxor LCPI84_1(%rip), %xmm3
packssdw %xmm8, %xmm3
pabsd -416(%rbp), %xmm5 ## 16-byte Folded Reload
pabsd -320(%rbp), %xmm6 ## 16-byte Folded Reload
pabsd %xmm15, %xmm8
pabsd %xmm4, %xmm15
pabsd %xmm11, %xmm0
pabsd %xmm14, %xmm11
movdqa %xmm6, %xmm14
pminud %xmm11, %xmm14
movdqa %xmm5, %xmm1
pminud %xmm0, %xmm1
pminud %xmm8, %xmm0
pcmpeqd %xmm8, %xmm0
pxor LCPI84_1(%rip), %xmm0
pminud %xmm15, %xmm11
pcmpeqd %xmm15, %xmm11
pxor LCPI84_1(%rip), %xmm11
packssdw %xmm11, %xmm0
packsswb %xmm3, %xmm0
movdqa -144(%rbp), %xmm4 ## 16-byte Reload
pblendvb %xmm0, -336(%rbp), %xmm4 ## 16-byte Folded Reload
movdqa %xmm6, %xmm0
pminud %xmm15, %xmm0
pcmpeqd %xmm15, %xmm15
movdqa %xmm5, %xmm3
pminud %xmm8, %xmm3
movdqa %xmm12, %xmm8
pminud %xmm10, %xmm8
movdqa %xmm13, %xmm11
pminud -304(%rbp), %xmm11 ## 16-byte Folded Reload
pcmpeqd %xmm6, %xmm0
pcmpeqd %xmm6, %xmm14
pxor %xmm15, %xmm0
pxor %xmm15, %xmm14
por %xmm0, %xmm14
pcmpeqd %xmm5, %xmm3
pcmpeqd %xmm5, %xmm1
pxor %xmm15, %xmm3
pxor %xmm15, %xmm1
por %xmm3, %xmm1
packssdw %xmm14, %xmm1
pcmpeqd %xmm12, %xmm8
pcmpeqd %xmm12, %xmm2
pxor %xmm15, %xmm8
pxor %xmm15, %xmm2
por %xmm8, %xmm2
pcmpeqd %xmm13, %xmm11
pcmpeqd %xmm13, %xmm7
pxor %xmm15, %xmm11
pxor %xmm15, %xmm7
por %xmm11, %xmm7
packssdw %xmm7, %xmm2
packsswb %xmm2, %xmm1
movdqa %xmm1, %xmm0
pblendvb %xmm0, %xmm4, %xmm9
movdqu (%r15,%rsi), %xmm0
paddb %xmm0, %xmm9
movdqu %xmm9, (%r8,%rsi)
addq $16, %rsi
cmpq %rdx, %rsi
jne LBB84_348
## %bb.349: ## in Loop: Header=BB84_334 Depth=2
cmpq -56(%rbp), %rdx ## 8-byte Folded Reload
movdqa LCPI84_0(%rip), %xmm7 ## xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
je LBB84_333
## %bb.350: ## in Loop: Header=BB84_334 Depth=2
movq -360(%rbp), %rdi ## 8-byte Reload
movq %rdi, %rsi
testb $8, -56(%rbp) ## 1-byte Folded Reload
je LBB84_356
jmp LBB84_344
LBB84_351: ## in Loop: Header=BB84_334 Depth=2
xorl %edi, %edi
jmp LBB84_356
LBB84_352: ## in Loop: Header=BB84_334 Depth=2
xorl %edi, %edi
jmp LBB84_356
LBB84_353: ## in Loop: Header=BB84_334 Depth=2
xorl %edi, %edi
jmp LBB84_356
LBB84_354: ## in Loop: Header=BB84_334 Depth=2
xorl %edi, %edi
jmp LBB84_356
.p2align 4, 0x90
LBB84_355: ## in Loop: Header=BB84_356 Depth=3
cmpl %r14d, %edx
cmoval %ecx, %esi
cmpl %r9d, %edx
cmoval %ecx, %esi
addb (%r15,%rdi), %sil
movb %sil, (%r8,%rdi)
incq %rdi
cmpq %rdi, -56(%rbp) ## 8-byte Folded Reload
je LBB84_333
LBB84_356: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_334 Depth=2
## => This Inner Loop Header: Depth=3
movzbl (%r11,%rdi), %esi
movzbl (%r10,%rdi), %r13d
movzbl (%r12,%rdi), %ecx
leal (%rsi,%r13), %eax
subl %ecx, %eax
movl %eax, %ebx
subl %esi, %ebx
movl %ebx, %edx
negl %edx
cmovsl %ebx, %edx
movl %eax, %ebx
subl %r13d, %ebx
movl %ebx, %r9d
negl %r9d
cmovsl %ebx, %r9d
subl %ecx, %eax
movl %eax, %r14d
negl %r14d
cmovsl %eax, %r14d
cmpl %r14d, %r9d
ja LBB84_355
## %bb.357: ## in Loop: Header=BB84_356 Depth=3
movl %r13d, %ecx
jmp LBB84_355
LBB84_358: ## in Loop: Header=BB84_10 Depth=1
movl -48(%rbp), %r13d ## 4-byte Reload
testl %r13d, %r13d
je LBB84_428
## %bb.359: ## in Loop: Header=BB84_10 Depth=1
leaq (%r11,%r9), %rax
movq %rax, -144(%rbp) ## 8-byte Spill
addq %rdx, %rbx
movq %rbx, -80(%rbp) ## 8-byte Spill
leaq 48(%r11), %rdx
leaq 1(%r11), %r14
xorl %r10d, %r10d
movq %r12, %r15
jmp LBB84_361
.p2align 4, 0x90
LBB84_360: ## in Loop: Header=BB84_361 Depth=2
movq -72(%rbp), %rax ## 8-byte Reload
movb $-1, (%r8,%rax)
addq %rax, %r15
addq %r9, %r8
incq %r10
addq %r9, %rdx
addq %r9, %r11
addq %r9, %r14
decl %r13d
je LBB84_429
LBB84_361: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_371 Depth 3
## Child Loop BB84_380 Depth 3
## Child Loop BB84_386 Depth 3
movq -56(%rbp), %r12 ## 8-byte Reload
testl %r12d, %r12d
movq -64(%rbp), %r9 ## 8-byte Reload
jle LBB84_360
## %bb.362: ## in Loop: Header=BB84_361 Depth=2
cmpl $8, %r12d
jae LBB84_364
LBB84_363: ## in Loop: Header=BB84_361 Depth=2
xorl %esi, %esi
LBB84_382: ## in Loop: Header=BB84_361 Depth=2
movq %rsi, %rax
testb $1, %r12b
je LBB84_384
## %bb.383: ## in Loop: Header=BB84_361 Depth=2
movq %rsi, %rax
subq %r9, %rax
movb (%r8,%rax), %al
shrb %al
addb (%r15,%rsi), %al
movb %al, (%r8,%rsi)
movq %rsi, %rax
orq $1, %rax
LBB84_384: ## in Loop: Header=BB84_361 Depth=2
notq %rsi
cmpq -248(%rbp), %rsi ## 8-byte Folded Reload
je LBB84_360
## %bb.385: ## in Loop: Header=BB84_361 Depth=2
movq %r12, %rcx
subq %rax, %rcx
leaq (%r14,%rax), %rsi
incq %rax
.p2align 4, 0x90
LBB84_386: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_361 Depth=2
## => This Inner Loop Header: Depth=3
movzbl -1(%rsi), %ebx
shrb %bl
addb -1(%r15,%rax), %bl
movb %bl, -1(%r8,%rax)
movzbl (%rsi), %ebx
shrb %bl
addb (%r15,%rax), %bl
movb %bl, (%r8,%rax)
addq $2, %rsi
addq $2, %rax
addq $-2, %rcx
jne LBB84_386
jmp LBB84_360
.p2align 4, 0x90
LBB84_364: ## in Loop: Header=BB84_361 Depth=2
movq %r10, %rax
imulq %r9, %rax
addq -144(%rbp), %rax ## 8-byte Folded Reload
movq %r10, %rcx
imulq -72(%rbp), %rcx ## 8-byte Folded Reload
addq -80(%rbp), %rcx ## 8-byte Folded Reload
subq %rcx, %rax
cmpq $32, %rax
jb LBB84_363
## %bb.365: ## in Loop: Header=BB84_361 Depth=2
jb LBB84_363
## %bb.366: ## in Loop: Header=BB84_361 Depth=2
movl $0, %esi
cmpl $32, -44(%rbp) ## 4-byte Folded Reload
jb LBB84_382
## %bb.367: ## in Loop: Header=BB84_361 Depth=2
cmpl $32, %r12d
jae LBB84_369
## %bb.368: ## in Loop: Header=BB84_361 Depth=2
xorl %edi, %edi
jmp LBB84_379
LBB84_369: ## in Loop: Header=BB84_361 Depth=2
cmpq $0, -168(%rbp) ## 8-byte Folded Reload
je LBB84_374
## %bb.370: ## in Loop: Header=BB84_361 Depth=2
leaq (%rdx,%r9), %rsi
movq -192(%rbp), %r9 ## 8-byte Reload
xorl %eax, %eax
.p2align 4, 0x90
LBB84_371: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_361 Depth=2
## => This Inner Loop Header: Depth=3
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movdqu -48(%rdx,%rax), %xmm2
movdqu -32(%rdx,%rax), %xmm3
psrlw $1, %xmm2
pand %xmm7, %xmm2
paddb %xmm0, %xmm2
psrlw $1, %xmm3
pand %xmm7, %xmm3
paddb %xmm1, %xmm3
movdqu %xmm2, -48(%rsi,%rax)
movdqu %xmm3, -32(%rsi,%rax)
movdqu 32(%r15,%rax), %xmm0
movdqu 48(%r15,%rax), %xmm1
movdqu -16(%rdx,%rax), %xmm2
movdqu (%rdx,%rax), %xmm3
psrlw $1, %xmm2
pand %xmm7, %xmm2
paddb %xmm0, %xmm2
psrlw $1, %xmm3
pand %xmm7, %xmm3
paddb %xmm1, %xmm3
movdqu %xmm2, -16(%rsi,%rax)
movdqu %xmm3, (%rsi,%rax)
addq $64, %rax
addq $-2, %r9
jne LBB84_371
jmp LBB84_375
LBB84_374: ## in Loop: Header=BB84_361 Depth=2
xorl %eax, %eax
LBB84_375: ## in Loop: Header=BB84_361 Depth=2
testb $1, -208(%rbp) ## 1-byte Folded Reload
movq -64(%rbp), %r9 ## 8-byte Reload
je LBB84_377
## %bb.376: ## in Loop: Header=BB84_361 Depth=2
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movq %rax, %rcx
subq %r9, %rcx
movdqu (%r8,%rcx), %xmm2
movdqu 16(%r8,%rcx), %xmm3
psrlw $1, %xmm2
pand %xmm7, %xmm2
paddb %xmm0, %xmm2
psrlw $1, %xmm3
pand %xmm7, %xmm3
paddb %xmm1, %xmm3
movdqu %xmm2, (%r8,%rax)
movdqu %xmm3, 16(%r8,%rax)
LBB84_377: ## in Loop: Header=BB84_361 Depth=2
cmpq %r12, -88(%rbp) ## 8-byte Folded Reload
je LBB84_360
## %bb.378: ## in Loop: Header=BB84_361 Depth=2
movq -88(%rbp), %rsi ## 8-byte Reload
movq %rsi, %rdi
testb $24, %r12b
je LBB84_382
LBB84_379: ## in Loop: Header=BB84_361 Depth=2
leaq (%r15,%rdi), %rax
leaq (%r11,%rdi), %rcx
leaq (%rcx,%r9), %rsi
movq -112(%rbp), %rbx ## 8-byte Reload
subq %rdi, %rbx
xorl %edi, %edi
.p2align 4, 0x90
LBB84_380: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_361 Depth=2
## => This Inner Loop Header: Depth=3
movq (%rax,%rdi), %xmm0 ## xmm0 = mem[0],zero
movq (%rcx,%rdi), %xmm1 ## xmm1 = mem[0],zero
psrlw $1, %xmm1
pand %xmm7, %xmm1
paddb %xmm0, %xmm1
movq %xmm1, (%rsi,%rdi)
addq $8, %rdi
cmpq %rdi, %rbx
jne LBB84_380
## %bb.381: ## in Loop: Header=BB84_361 Depth=2
movq -112(%rbp), %rax ## 8-byte Reload
movq %rax, %rsi
cmpq %r12, %rax
je LBB84_360
jmp LBB84_382
LBB84_387: ## in Loop: Header=BB84_10 Depth=1
movq %rbx, -80(%rbp) ## 8-byte Spill
movl -48(%rbp), %r14d ## 4-byte Reload
testl %r14d, %r14d
je LBB84_428
## %bb.388: ## in Loop: Header=BB84_10 Depth=1
addq %r11, %r9
addq %rdx, -80(%rbp) ## 8-byte Folded Spill
leaq 48(%r11), %rdx
leaq 1(%r11), %r13
xorl %r10d, %r10d
movq %r12, %r15
jmp LBB84_390
.p2align 4, 0x90
LBB84_389: ## in Loop: Header=BB84_390 Depth=2
movq -72(%rbp), %rax ## 8-byte Reload
movb $-1, (%r8,%rax)
addq %rax, %r15
movq -64(%rbp), %rax ## 8-byte Reload
addq %rax, %r8
incq %r10
addq %rax, %rdx
addq %rax, %r11
addq %rax, %r13
decl %r14d
je LBB84_429
LBB84_390: ## Parent Loop BB84_10 Depth=1
## => This Loop Header: Depth=2
## Child Loop BB84_400 Depth 3
## Child Loop BB84_406 Depth 3
## Child Loop BB84_414 Depth 3
movq -56(%rbp), %r12 ## 8-byte Reload
testl %r12d, %r12d
jle LBB84_389
## %bb.391: ## in Loop: Header=BB84_390 Depth=2
cmpl $8, %r12d
jae LBB84_393
LBB84_409: ## in Loop: Header=BB84_390 Depth=2
xorl %esi, %esi
LBB84_410: ## in Loop: Header=BB84_390 Depth=2
movq %rsi, %rax
testb $1, %r12b
je LBB84_412
## %bb.411: ## in Loop: Header=BB84_390 Depth=2
movq %rsi, %rax
subq -64(%rbp), %rax ## 8-byte Folded Reload
movb (%r8,%rax), %al
addb (%r15,%rsi), %al
movb %al, (%r8,%rsi)
movq %rsi, %rax
orq $1, %rax
LBB84_412: ## in Loop: Header=BB84_390 Depth=2
notq %rsi
cmpq -248(%rbp), %rsi ## 8-byte Folded Reload
je LBB84_389
## %bb.413: ## in Loop: Header=BB84_390 Depth=2
movq %r12, %rcx
subq %rax, %rcx
leaq (%rax,%r13), %rsi
incq %rax
.p2align 4, 0x90
LBB84_414: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_390 Depth=2
## => This Inner Loop Header: Depth=3
movzbl -1(%rsi), %ebx
addb -1(%r15,%rax), %bl
movb %bl, -1(%r8,%rax)
movzbl (%rsi), %ebx
addb (%r15,%rax), %bl
movb %bl, (%r8,%rax)
addq $2, %rsi
addq $2, %rax
addq $-2, %rcx
jne LBB84_414
jmp LBB84_389
.p2align 4, 0x90
LBB84_393: ## in Loop: Header=BB84_390 Depth=2
movq %r10, %rax
imulq -64(%rbp), %rax ## 8-byte Folded Reload
addq %r9, %rax
movq %r10, %rcx
imulq -72(%rbp), %rcx ## 8-byte Folded Reload
addq -80(%rbp), %rcx ## 8-byte Folded Reload
subq %rcx, %rax
cmpq $32, %rax
jb LBB84_409
## %bb.394: ## in Loop: Header=BB84_390 Depth=2
jb LBB84_409
## %bb.395: ## in Loop: Header=BB84_390 Depth=2
movl $0, %esi
cmpl $32, -44(%rbp) ## 4-byte Folded Reload
jb LBB84_410
## %bb.396: ## in Loop: Header=BB84_390 Depth=2
cmpl $32, %r12d
jae LBB84_398
## %bb.397: ## in Loop: Header=BB84_390 Depth=2
xorl %edi, %edi
jmp LBB84_405
LBB84_398: ## in Loop: Header=BB84_390 Depth=2
cmpq $0, -168(%rbp) ## 8-byte Folded Reload
je LBB84_415
## %bb.399: ## in Loop: Header=BB84_390 Depth=2
movq -64(%rbp), %rax ## 8-byte Reload
leaq (%rdx,%rax), %rsi
movq -192(%rbp), %rcx ## 8-byte Reload
xorl %eax, %eax
.p2align 4, 0x90
LBB84_400: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_390 Depth=2
## => This Inner Loop Header: Depth=3
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movdqu -48(%rdx,%rax), %xmm2
paddb %xmm0, %xmm2
movdqu -32(%rdx,%rax), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, -48(%rsi,%rax)
movdqu %xmm0, -32(%rsi,%rax)
movdqu 32(%r15,%rax), %xmm0
movdqu 48(%r15,%rax), %xmm1
movdqu -16(%rdx,%rax), %xmm2
paddb %xmm0, %xmm2
movdqu (%rdx,%rax), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, -16(%rsi,%rax)
movdqu %xmm0, (%rsi,%rax)
addq $64, %rax
addq $-2, %rcx
jne LBB84_400
## %bb.401: ## in Loop: Header=BB84_390 Depth=2
testb $1, -208(%rbp) ## 1-byte Folded Reload
je LBB84_403
LBB84_402: ## in Loop: Header=BB84_390 Depth=2
movdqu (%r15,%rax), %xmm0
movdqu 16(%r15,%rax), %xmm1
movq %rax, %rcx
subq -64(%rbp), %rcx ## 8-byte Folded Reload
movdqu (%r8,%rcx), %xmm2
paddb %xmm0, %xmm2
movdqu 16(%r8,%rcx), %xmm0
paddb %xmm1, %xmm0
movdqu %xmm2, (%r8,%rax)
movdqu %xmm0, 16(%r8,%rax)
LBB84_403: ## in Loop: Header=BB84_390 Depth=2
cmpq %r12, -88(%rbp) ## 8-byte Folded Reload
je LBB84_389
## %bb.404: ## in Loop: Header=BB84_390 Depth=2
movq -88(%rbp), %rsi ## 8-byte Reload
movq %rsi, %rdi
testb $24, %r12b
je LBB84_410
LBB84_405: ## in Loop: Header=BB84_390 Depth=2
leaq (%r15,%rdi), %rax
leaq (%r11,%rdi), %rcx
movq -64(%rbp), %rsi ## 8-byte Reload
addq %rcx, %rsi
movq -112(%rbp), %rbx ## 8-byte Reload
subq %rdi, %rbx
xorl %edi, %edi
.p2align 4, 0x90
LBB84_406: ## Parent Loop BB84_10 Depth=1
## Parent Loop BB84_390 Depth=2
## => This Inner Loop Header: Depth=3
movq (%rax,%rdi), %xmm0 ## xmm0 = mem[0],zero
movq (%rcx,%rdi), %xmm1 ## xmm1 = mem[0],zero
paddb %xmm0, %xmm1
movq %xmm1, (%rsi,%rdi)
addq $8, %rdi
cmpq %rdi, %rbx
jne LBB84_406
## %bb.407: ## in Loop: Header=BB84_390 Depth=2
movq -112(%rbp), %rax ## 8-byte Reload
movq %rax, %rsi
cmpq %r12, %rax
je LBB84_389
jmp LBB84_410
LBB84_415: ## in Loop: Header=BB84_390 Depth=2
xorl %eax, %eax
testb $1, -208(%rbp) ## 1-byte Folded Reload
jne LBB84_402
jmp LBB84_403
.p2align 4, 0x90
LBB84_428: ## in Loop: Header=BB84_10 Depth=1
movq %r12, %r15
LBB84_429: ## in Loop: Header=BB84_10 Depth=1
movq -528(%rbp), %r14 ## 8-byte Reload
incq %r14
cmpq -504(%rbp), %r14 ## 8-byte Folded Reload
je LBB84_436
## %bb.430: ## in Loop: Header=BB84_10 Depth=1
movl -180(%rbp), %eax ## 4-byte Reload
movl %eax, %r11d
imull %r14d, %r11d
movq -232(%rbp), %rax ## 8-byte Reload
addq 56(%rax), %r11
movb (%r15), %al
cmpb $4, %al
movl -44(%rbp), %r8d ## 4-byte Reload
movq -56(%rbp), %r13 ## 8-byte Reload
movq -64(%rbp), %r9 ## 8-byte Reload
leaq LJTI84_0(%rip), %rdx
jbe LBB84_10
## %bb.431:
leaq L_.str.71(%rip), %rcx
jmp LBB84_433
LBB84_432:
leaq L_.str.5(%rip), %rcx
LBB84_433:
movq %rcx, _failure_reason(%rip)
LBB84_434:
xorl %eax, %eax
LBB84_435:
addq $488, %rsp ## imm = 0x1E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB84_436:
movl $1, %eax
jmp LBB84_435
LBB84_437:
leaq L_.str.70(%rip), %rcx
jmp LBB84_433
LBB84_438:
callq _create_png_image_raw.cold.1
jmp LBB84_434
LBB84_439:
callq _create_png_image_raw.cold.2
jmp LBB84_434
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L84_0_set_14, LBB84_14-LJTI84_0
.set L84_0_set_19, LBB84_19-LJTI84_0
.set L84_0_set_20, LBB84_20-LJTI84_0
LJTI84_0:
.long L84_0_set_14
.long L84_0_set_14
.long L84_0_set_19
.long L84_0_set_20
.long L84_0_set_19
.long L84_0_set_14
.long L84_0_set_14
.set L84_1_set_53, LBB84_53-LJTI84_1
.set L84_1_set_246, LBB84_246-LJTI84_1
.set L84_1_set_275, LBB84_275-LJTI84_1
.set L84_1_set_306, LBB84_306-LJTI84_1
.set L84_1_set_331, LBB84_331-LJTI84_1
.set L84_1_set_358, LBB84_358-LJTI84_1
.set L84_1_set_387, LBB84_387-LJTI84_1
LJTI84_1:
.long L84_1_set_53
.long L84_1_set_246
.long L84_1_set_275
.long L84_1_set_306
.long L84_1_set_331
.long L84_1_set_358
.long L84_1_set_387
.set L84_2_set_24, LBB84_24-LJTI84_2
.set L84_2_set_80, LBB84_80-LJTI84_2
.set L84_2_set_108, LBB84_108-LJTI84_2
.set L84_2_set_137, LBB84_137-LJTI84_2
.set L84_2_set_162, LBB84_162-LJTI84_2
.set L84_2_set_189, LBB84_189-LJTI84_2
.set L84_2_set_217, LBB84_217-LJTI84_2
LJTI84_2:
.long L84_2_set_24
.long L84_2_set_80
.long L84_2_set_108
.long L84_2_set_137
.long L84_2_set_162
.long L84_2_set_189
.long L84_2_set_217
.end_data_region
## -- End function
.p2align 4, 0x90 ## -- Begin function get32le
_get32le: ## @get32le
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rdi, %r12
movq 16(%rdi), %rdi
testq %rdi, %rdi
je LBB85_1
## %bb.3:
callq _fgetc
movl %eax, %r14d
xorl %r13d, %r13d
cmpl $-1, %eax
cmovel %r13d, %r14d
movq 16(%r12), %rdi
testq %rdi, %rdi
je LBB85_4
## %bb.7:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %r13d
movq 16(%r12), %rdi
testq %rdi, %rdi
je LBB85_8
## %bb.10:
callq _fgetc
movl %eax, %r15d
xorl %ebx, %ebx
cmpl $-1, %eax
cmovel %ebx, %r15d
movq 16(%r12), %rdi
testq %rdi, %rdi
je LBB85_11
## %bb.15:
callq _fgetc
cmpl $-1, %eax
cmovnel %eax, %ebx
jmp LBB85_14
LBB85_1:
movq 24(%r12), %rax
movq 32(%r12), %rcx
xorl %r14d, %r14d
cmpq %rcx, %rax
jae LBB85_5
## %bb.2:
leaq 1(%rax), %rdx
movq %rdx, 24(%r12)
movzbl (%rax), %r14d
movq %rdx, %rax
jmp LBB85_5
LBB85_4:
movq 24(%r12), %rax
movq 32(%r12), %rcx
LBB85_5:
xorl %r13d, %r13d
cmpq %rcx, %rax
jae LBB85_8
## %bb.6:
leaq 1(%rax), %rcx
movq %rcx, 24(%r12)
movzbl (%rax), %r13d
LBB85_8:
movq 24(%r12), %rax
movq 32(%r12), %rcx
xorl %r15d, %r15d
cmpq %rcx, %rax
jae LBB85_12
## %bb.9:
leaq 1(%rax), %rdx
movq %rdx, 24(%r12)
movzbl (%rax), %r15d
movq %rdx, %rax
jmp LBB85_12
LBB85_11:
movq 24(%r12), %rax
movq 32(%r12), %rcx
LBB85_12:
xorl %ebx, %ebx
cmpq %rcx, %rax
jae LBB85_14
## %bb.13:
leaq 1(%rax), %rcx
movq %rcx, 24(%r12)
movzbl (%rax), %ebx
LBB85_14:
shll $8, %r13d
addl %r14d, %r13d
shll $8, %ebx
addl %r15d, %ebx
shll $16, %ebx
addl %r13d, %ebx
movl %ebx, %eax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function high_bit
_high_bit: ## @high_bit
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
testl %edi, %edi
je LBB86_1
## %bb.2:
movl %edi, %eax
shrl $16, %eax
xorl %ecx, %ecx
cmpl $65536, %edi ## imm = 0x10000
setae %cl
cmovbl %edi, %eax
shll $4, %ecx
leal 8(%rcx), %edx
movl %eax, %esi
shrl $8, %esi
cmpl $256, %eax ## imm = 0x100
cmovbl %eax, %esi
cmovbl %ecx, %edx
leal 4(%rdx), %ecx
movl %esi, %edi
shrl $4, %edi
cmpl $16, %esi
cmovbl %esi, %edi
cmovbl %edx, %ecx
leal 2(%rcx), %eax
movl %edi, %edx
shrl $2, %edx
cmpl $4, %edi
cmovbl %edi, %edx
cmovbl %ecx, %eax
cmpl $2, %edx
sbbl $-1, %eax
popq %rbp
retq
LBB86_1:
movl $-1, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function bitcount
_bitcount: ## @bitcount
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movl %edi, %eax
andl $1431655765, %eax ## imm = 0x55555555
shrl %edi
andl $1431655765, %edi ## imm = 0x55555555
addl %eax, %edi
movl %edi, %eax
andl $858993459, %eax ## imm = 0x33333333
shrl $2, %edi
andl $858993459, %edi ## imm = 0x33333333
addl %eax, %edi
movl %edi, %eax
shrl $4, %eax
addl %edi, %eax
andl $252645135, %eax ## imm = 0xF0F0F0F
movl %eax, %ecx
shrl $8, %ecx
addl %eax, %ecx
movl %ecx, %eax
shrl $16, %eax
addl %ecx, %eax
movzbl %al, %eax
popq %rbp
retq
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function hdr_gettoken
_hdr_gettoken: ## @hdr_gettoken
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq %rsi, %r14
movq %rdi, %r15
movq 16(%rdi), %rdi
testq %rdi, %rdi
je LBB88_2
## %bb.1:
callq _fgetc
xorl %ebx, %ebx
cmpl $-1, %eax
cmovnel %eax, %ebx
jmp LBB88_4
LBB88_2:
movq 24(%r15), %rax
xorl %ebx, %ebx
cmpq 32(%r15), %rax
jae LBB88_4
## %bb.3:
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %ebx
LBB88_4:
xorl %r13d, %r13d
xorl %r12d, %r12d
jmp LBB88_5
.p2align 4, 0x90
LBB88_23: ## in Loop: Header=BB88_5 Depth=1
callq _fgetc
movl %eax, %ebx
cmpl $-1, %eax
cmovel %r13d, %ebx
LBB88_24: ## in Loop: Header=BB88_5 Depth=1
incq %r12
LBB88_5: ## =>This Inner Loop Header: Depth=1
movq 16(%r15), %rdi
testq %rdi, %rdi
je LBB88_7
## %bb.6: ## in Loop: Header=BB88_5 Depth=1
callq _feof
testl %eax, %eax
je LBB88_9
jmp LBB88_21
.p2align 4, 0x90
LBB88_7: ## in Loop: Header=BB88_5 Depth=1
movq 24(%r15), %rcx
xorl %eax, %eax
cmpq 32(%r15), %rcx
setae %al
testl %eax, %eax
jne LBB88_21
LBB88_9: ## in Loop: Header=BB88_5 Depth=1
movzbl %bl, %eax
cmpl $10, %eax
je LBB88_21
## %bb.10: ## in Loop: Header=BB88_5 Depth=1
movb %bl, (%r14,%r12)
cmpq $1022, %r12 ## imm = 0x3FE
je LBB88_11
## %bb.22: ## in Loop: Header=BB88_5 Depth=1
movq 16(%r15), %rdi
testq %rdi, %rdi
jne LBB88_23
## %bb.25: ## in Loop: Header=BB88_5 Depth=1
movq 24(%r15), %rax
xorl %ebx, %ebx
cmpq 32(%r15), %rax
jae LBB88_24
## %bb.26: ## in Loop: Header=BB88_5 Depth=1
leaq 1(%rax), %rcx
movq %rcx, 24(%r15)
movzbl (%rax), %ebx
incq %r12
jmp LBB88_5
LBB88_11:
xorl %ebx, %ebx
movl $1023, %r12d ## imm = 0x3FF
jmp LBB88_12
.p2align 4, 0x90
LBB88_17: ## in Loop: Header=BB88_12 Depth=1
callq _fgetc
cmpl $-1, %eax
cmovel %ebx, %eax
LBB88_20: ## in Loop: Header=BB88_12 Depth=1
cmpl $10, %eax
je LBB88_21
LBB88_12: ## =>This Inner Loop Header: Depth=1
movq 16(%r15), %rdi
testq %rdi, %rdi
je LBB88_14
## %bb.13: ## in Loop: Header=BB88_12 Depth=1
callq _feof
testl %eax, %eax
je LBB88_16
jmp LBB88_21
.p2align 4, 0x90
LBB88_14: ## in Loop: Header=BB88_12 Depth=1
movq 24(%r15), %rcx
xorl %eax, %eax
cmpq 32(%r15), %rcx
setae %al
testl %eax, %eax
jne LBB88_21
LBB88_16: ## in Loop: Header=BB88_12 Depth=1
movq 16(%r15), %rdi
testq %rdi, %rdi
jne LBB88_17
## %bb.18: ## in Loop: Header=BB88_12 Depth=1
movq 24(%r15), %rcx
xorl %eax, %eax
cmpq 32(%r15), %rcx
jae LBB88_20
## %bb.19: ## in Loop: Header=BB88_12 Depth=1
leaq 1(%rcx), %rax
movq %rax, 24(%r15)
movzbl (%rcx), %eax
jmp LBB88_20
LBB88_21:
movb $0, (%r14,%r12)
movq %r14, %rax
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
## -- End function
.section __TEXT,__literal8,8byte_literals
.p2align 3 ## -- Begin function hdr_convert
LCPI89_0:
.quad 0x3ff0000000000000 ## double 1
.section __TEXT,__literal4,4byte_literals
.p2align 2
LCPI89_1:
.long 0x40400000 ## float 3
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
_hdr_convert: ## @hdr_convert
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %r15d
movq %rdi, %r14
movzbl 3(%rsi), %edi
testl %edi, %edi
je LBB89_8
## %bb.1:
movq %rsi, %rbx
addl $-136, %edi
movsd LCPI89_0(%rip), %xmm0 ## xmm0 = mem[0],zero
callq _ldexp
cvtsd2ss %xmm0, %xmm0
movzbl (%rbx), %eax
cmpl $2, %r15d
jg LBB89_3
## %bb.2:
movzbl 1(%rbx), %ecx
addl %eax, %ecx
movzbl 2(%rbx), %eax
addl %ecx, %eax
cvtsi2ss %eax, %xmm1
mulss %xmm1, %xmm0
divss LCPI89_1(%rip), %xmm0
movss %xmm0, (%r14)
cmpl $4, %r15d
je LBB89_7
LBB89_5:
cmpl $2, %r15d
jne LBB89_14
## %bb.6:
movl $1065353216, 4(%r14) ## imm = 0x3F800000
jmp LBB89_14
LBB89_8:
decl %r15d
cmpl $3, %r15d
ja LBB89_14
## %bb.9:
leaq LJTI89_0(%rip), %rax
movslq (%rax,%r15,4), %rcx
addq %rax, %rcx
jmpq *%rcx
LBB89_12:
movl $1065353216, 4(%r14) ## imm = 0x3F800000
LBB89_13:
movl $0, (%r14)
jmp LBB89_14
LBB89_3:
cvtsi2ss %eax, %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%r14)
movzbl 1(%rbx), %eax
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
mulss %xmm0, %xmm1
movss %xmm1, 4(%r14)
movzbl 2(%rbx), %eax
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
mulss %xmm0, %xmm1
movss %xmm1, 8(%r14)
cmpl $4, %r15d
jne LBB89_5
LBB89_7:
movl $1065353216, 12(%r14) ## imm = 0x3F800000
jmp LBB89_14
LBB89_10:
movl $1065353216, 12(%r14) ## imm = 0x3F800000
LBB89_11:
movl $0, 8(%r14)
movq $0, (%r14)
LBB89_14:
addq $8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L89_0_set_13, LBB89_13-LJTI89_0
.set L89_0_set_12, LBB89_12-LJTI89_0
.set L89_0_set_11, LBB89_11-LJTI89_0
.set L89_0_set_10, LBB89_10-LJTI89_0
LJTI89_0:
.long L89_0_set_13
.long L89_0_set_12
.long L89_0_set_11
.long L89_0_set_10
.end_data_region
## -- End function
.p2align 4, 0x90 ## -- Begin function writefv
_writefv: ## @writefv
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movb (%rsi), %al
testb %al, %al
je LBB90_19
## %bb.1:
movq %rdx, %r14
movq %rsi, %rbx
movq %rdi, %r15
incq %rbx
leaq LJTI90_0(%rip), %r13
jmp LBB90_2
LBB90_6: ## in Loop: Header=BB90_2 Depth=1
movq 8(%r14), %rax
leaq 8(%rax), %rcx
movq %rcx, 8(%r14)
LBB90_7: ## in Loop: Header=BB90_2 Depth=1
movzbl (%rax), %eax
movb %al, -41(%rbp)
movl $1, %esi
movl $1, %edx
leaq -41(%rbp), %rdi
LBB90_8: ## in Loop: Header=BB90_2 Depth=1
movq %r15, %rcx
callq _fwrite
LBB90_9: ## in Loop: Header=BB90_2 Depth=1
movzbl (%rbx), %eax
incq %rbx
testb %al, %al
je LBB90_19
LBB90_2: ## =>This Inner Loop Header: Depth=1
movsbl %al, %eax
addl $-32, %eax
cmpl $20, %eax
ja LBB90_18
## %bb.3: ## in Loop: Header=BB90_2 Depth=1
movslq (%r13,%rax,4), %rax
addq %r13, %rax
jmpq *%rax
LBB90_4: ## in Loop: Header=BB90_2 Depth=1
movl (%r14), %ecx
cmpq $40, %rcx
ja LBB90_6
## %bb.5: ## in Loop: Header=BB90_2 Depth=1
movq %rcx, %rax
addq 16(%r14), %rax
addl $8, %ecx
movl %ecx, (%r14)
jmp LBB90_7
.p2align 4, 0x90
LBB90_10: ## in Loop: Header=BB90_2 Depth=1
movl (%r14), %ecx
cmpq $40, %rcx
ja LBB90_12
## %bb.11: ## in Loop: Header=BB90_2 Depth=1
movq %rcx, %rax
addq 16(%r14), %rax
addl $8, %ecx
movl %ecx, (%r14)
jmp LBB90_13
.p2align 4, 0x90
LBB90_14: ## in Loop: Header=BB90_2 Depth=1
movl (%r14), %ecx
cmpq $40, %rcx
ja LBB90_16
## %bb.15: ## in Loop: Header=BB90_2 Depth=1
movq %rcx, %rax
addq 16(%r14), %rax
addl $8, %ecx
movl %ecx, (%r14)
jmp LBB90_17
LBB90_12: ## in Loop: Header=BB90_2 Depth=1
movq 8(%r14), %rax
leaq 8(%rax), %rcx
movq %rcx, 8(%r14)
LBB90_13: ## in Loop: Header=BB90_2 Depth=1
movl (%rax), %eax
movb %al, -42(%rbp)
movl %eax, %r12d
movl $1, %esi
movl $1, %edx
leaq -42(%rbp), %rdi
movq %r15, %rcx
callq _fwrite
movl %r12d, %eax
movb %ah, -43(%rbp)
movl $1, %esi
movl $1, %edx
leaq -43(%rbp), %rdi
jmp LBB90_8
LBB90_16: ## in Loop: Header=BB90_2 Depth=1
movq 8(%r14), %rax
leaq 8(%rax), %rcx
movq %rcx, 8(%r14)
LBB90_17: ## in Loop: Header=BB90_2 Depth=1
movl (%rax), %r12d
movb %r12b, -44(%rbp)
movl $1, %esi
movl $1, %edx
leaq -44(%rbp), %rdi
movq %r15, %rcx
callq _fwrite
movl %r12d, %eax
movb %ah, -45(%rbp)
movl $1, %esi
movl $1, %edx
leaq -45(%rbp), %rdi
movq %r15, %rcx
callq _fwrite
movl %r12d, %eax
shrl $16, %eax
movb %al, -46(%rbp)
movl $1, %esi
movl $1, %edx
leaq -46(%rbp), %rdi
movq %r15, %rcx
callq _fwrite
shrl $24, %r12d
movb %r12b, -47(%rbp)
movl $1, %esi
movl $1, %edx
leaq -47(%rbp), %rdi
jmp LBB90_8
LBB90_18:
callq _writefv.cold.1
LBB90_19:
addq $8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
.p2align 2, 0x90
.data_region jt32
.set L90_0_set_9, LBB90_9-LJTI90_0
.set L90_0_set_18, LBB90_18-LJTI90_0
.set L90_0_set_4, LBB90_4-LJTI90_0
.set L90_0_set_10, LBB90_10-LJTI90_0
.set L90_0_set_14, LBB90_14-LJTI90_0
LJTI90_0:
.long L90_0_set_9
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_18
.long L90_0_set_4
.long L90_0_set_10
.long L90_0_set_18
.long L90_0_set_14
.end_data_region
## -- End function
.p2align 4, 0x90 ## -- Begin function writef
_writef: ## @writef
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
subq $208, %rsp
movq %rdx, -192(%rbp)
movq %rcx, -184(%rbp)
movq %r8, -176(%rbp)
movq %r9, -168(%rbp)
testb %al, %al
je LBB91_4
## %bb.3:
movaps %xmm0, -160(%rbp)
movaps %xmm1, -144(%rbp)
movaps %xmm2, -128(%rbp)
movaps %xmm3, -112(%rbp)
movaps %xmm4, -96(%rbp)
movaps %xmm5, -80(%rbp)
movaps %xmm6, -64(%rbp)
movaps %xmm7, -48(%rbp)
LBB91_4:
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
movq %rax, -8(%rbp)
leaq -208(%rbp), %rax
movq %rax, -16(%rbp)
leaq 16(%rbp), %rax
movq %rax, -24(%rbp)
movabsq $206158430224, %rax ## imm = 0x3000000010
movq %rax, -32(%rbp)
leaq L_.str.98(%rip), %rsi
leaq -32(%rbp), %rdx
callq _writefv
movq ___stack_chk_guard@GOTPCREL(%rip), %rax
movq (%rax), %rax
cmpq -8(%rbp), %rax
jne LBB91_2
## %bb.1:
addq $208, %rsp
popq %rbp
retq
LBB91_2:
callq ___stack_chk_fail
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.1
_do_zlib.cold.1: ## @do_zlib.cold.1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.compute_huffman_codes(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.45(%rip), %rcx
movl $2121, %edx ## imm = 0x849
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.2
_do_zlib.cold.2: ## @do_zlib.cold.2
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.3
_do_zlib.cold.3: ## @do_zlib.cold.3
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.4
_do_zlib.cold.4: ## @do_zlib.cold.4
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.compute_huffman_codes(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.46(%rip), %rcx
movl $2133, %edx ## imm = 0x855
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.5
_do_zlib.cold.5: ## @do_zlib.cold.5
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.6
_do_zlib.cold.6: ## @do_zlib.cold.6
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.7
_do_zlib.cold.7: ## @do_zlib.cold.7
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.8
_do_zlib.cold.8: ## @do_zlib.cold.8
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.9
_do_zlib.cold.9: ## @do_zlib.cold.9
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.10
_do_zlib.cold.10: ## @do_zlib.cold.10
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.11
_do_zlib.cold.11: ## @do_zlib.cold.11
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.12
_do_zlib.cold.12: ## @do_zlib.cold.12
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.parse_uncompressed_block(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.38(%rip), %rcx
movl $2158, %edx ## imm = 0x86E
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.13
_do_zlib.cold.13: ## @do_zlib.cold.13
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function do_zlib.cold.14
_do_zlib.cold.14: ## @do_zlib.cold.14
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function parse_png_file.cold.1
_parse_png_file.cold.1: ## @parse_png_file.cold.1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.compute_transparency(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.73(%rip), %rcx
movl $2491, %edx ## imm = 0x9BB
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function bmp_load.cold.1
_bmp_load.cold.1: ## @bmp_load.cold.1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.bmp_load(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.79(%rip), %rcx
movl $2933, %edx ## imm = 0xB75
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function decode.cold.1
_decode.cold.1: ## @decode.cold.1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.decode(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.14(%rip), %rcx
movl $1027, %edx ## imm = 0x403
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function zbuild_huffman.cold.1
_zbuild_huffman.cold.1: ## @zbuild_huffman.cold.1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.zbuild_huffman(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.42(%rip), %rcx
movl $1935, %edx ## imm = 0x78F
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function zhuffman_decode.cold.1
_zhuffman_decode.cold.1: ## @zhuffman_decode.cold.1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.zhuffman_decode(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.47(%rip), %rcx
movl $2034, %edx ## imm = 0x7F2
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function zhuffman_decode.cold.2
_zhuffman_decode.cold.2: ## @zhuffman_decode.cold.2
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.fill_bits(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.37(%rip), %rcx
movl $1997, %edx ## imm = 0x7CD
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function convert_format.cold.1
_convert_format.cold.1: ## @convert_format.cold.1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.convert_format(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.49(%rip), %rcx
movl $760, %edx ## imm = 0x2F8
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function convert_format.cold.2
_convert_format.cold.2: ## @convert_format.cold.2
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.convert_format(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.50(%rip), %rcx
movl $789, %edx ## imm = 0x315
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function create_png_image_raw.cold.1
_create_png_image_raw.cold.1: ## @create_png_image_raw.cold.1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.create_png_image_raw(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.72(%rip), %rcx
movl $2422, %edx ## imm = 0x976
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function create_png_image_raw.cold.2
_create_png_image_raw.cold.2: ## @create_png_image_raw.cold.2
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.create_png_image_raw(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.69(%rip), %rcx
movl $2372, %edx ## imm = 0x944
callq ___assert_rtn
.cfi_endproc
## -- End function
.p2align 4, 0x90 ## -- Begin function writefv.cold.1
_writefv.cold.1: ## @writefv.cold.1
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
leaq L___func__.writefv(%rip), %rdi
leaq L_.str.13(%rip), %rsi
leaq L_.str.50(%rip), %rcx
movl $3812, %edx ## imm = 0xEE4
callq ___assert_rtn
.cfi_endproc
## -- End function
.zerofill __DATA,__bss,_failure_reason,8,3 ## @failure_reason
.comm _loaders,256,4 ## @loaders
.zerofill __DATA,__bss,_max_loaders,4,2 ## @max_loaders
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "rb"
L_.str.1: ## @.str.1
.asciz "can't fopen"
L_.str.2: ## @.str.2
.asciz "unknown image type"
.section __DATA,__data
.p2align 2 ## @h2l_gamma_i
_h2l_gamma_i:
.long 0x3ee8ba2e ## float 0.454545438
.p2align 2 ## @h2l_scale_i
_h2l_scale_i:
.long 0x3f800000 ## float 1
.p2align 2 ## @l2h_gamma
_l2h_gamma:
.long 0x400ccccd ## float 2.20000005
.p2align 2 ## @l2h_scale
_l2h_scale:
.long 0x3f800000 ## float 1
.section __TEXT,__cstring,cstring_literals
L_.str.3: ## @.str.3
.asciz "11 4 22 44 44 22 444444"
L_.str.4: ## @.str.4
.asciz "111 221 2222 11"
.comm _stbi_png_partial,4,2 ## @stbi_png_partial
L_.str.5: ## @.str.5
.asciz "outofmem"
L_.str.6: ## @.str.6
.asciz "bad req_comp"
L_.str.7: ## @.str.7
.asciz "bad SOS component count"
L_.str.8: ## @.str.8
.asciz "bad SOS len"
L_.str.9: ## @.str.9
.asciz "bad DC huff"
L_.str.10: ## @.str.10
.asciz "bad AC huff"
L_.str.11: ## @.str.11
.asciz "bad SOS"
L_.str.12: ## @.str.12
.asciz "bad huffman code"
.section __TEXT,__const
.p2align 4 ## @dezigzag
_dezigzag:
.ascii "\000\001\b\020\t\002\003\n\021\030 \031\022\013\004\005\f\023\032!(0)\"\033\024\r\006\007\016\025\034#*1892+$\035\026\017\027\036%,3:;4-&\037'.5<=6/7>????????????????"
.p2align 4 ## @bmask
_bmask:
.long 0 ## 0x0
.long 1 ## 0x1
.long 3 ## 0x3
.long 7 ## 0x7
.long 15 ## 0xf
.long 31 ## 0x1f
.long 63 ## 0x3f
.long 127 ## 0x7f
.long 255 ## 0xff
.long 511 ## 0x1ff
.long 1023 ## 0x3ff
.long 2047 ## 0x7ff
.long 4095 ## 0xfff
.long 8191 ## 0x1fff
.long 16383 ## 0x3fff
.long 32767 ## 0x7fff
.long 65535 ## 0xffff
.section __TEXT,__cstring,cstring_literals
L___func__.decode: ## @__func__.decode
.asciz "decode"
L_.str.13: ## @.str.13
.asciz "247019479.c"
L_.str.14: ## @.str.14
.asciz "(((j->code_buffer) >> (j->code_bits - h->size[c])) & bmask[h->size[c]]) == h->code[c]"
L_.str.15: ## @.str.15
.asciz "expected marker"
L_.str.16: ## @.str.16
.asciz "progressive jpeg"
L_.str.17: ## @.str.17
.asciz "bad DRI len"
L_.str.18: ## @.str.18
.asciz "bad DQT type"
L_.str.19: ## @.str.19
.asciz "bad DQT table"
L_.str.20: ## @.str.20
.asciz "bad DHT header"
L_.str.21: ## @.str.21
.asciz "bad code lengths"
L_.str.22: ## @.str.22
.asciz "no SOI"
L_.str.23: ## @.str.23
.asciz "no SOF"
L_.str.24: ## @.str.24
.asciz "bad SOF len"
L_.str.25: ## @.str.25
.asciz "only 8-bit"
L_.str.26: ## @.str.26
.asciz "no header height"
L_.str.27: ## @.str.27
.asciz "0 width"
L_.str.28: ## @.str.28
.asciz "bad component count"
L_.str.29: ## @.str.29
.asciz "bad component ID"
L_.str.30: ## @.str.30
.asciz "bad H"
L_.str.31: ## @.str.31
.asciz "bad V"
L_.str.32: ## @.str.32
.asciz "bad TQ"
L_.str.33: ## @.str.33
.asciz "too large"
.zerofill __DATA,__bss,_default_distance,32,4 ## @default_distance
.zerofill __DATA,__bss,_default_length,288,4 ## @default_length
L_.str.34: ## @.str.34
.asciz "bad zlib header"
L_.str.35: ## @.str.35
.asciz "no preset dict"
L_.str.36: ## @.str.36
.asciz "bad compression"
L___func__.fill_bits: ## @__func__.fill_bits
.asciz "fill_bits"
L_.str.37: ## @.str.37
.asciz "z->code_buffer < (1U << z->num_bits)"
L___func__.parse_uncompressed_block: ## @__func__.parse_uncompressed_block
.asciz "parse_uncompressed_block"
L_.str.38: ## @.str.38
.asciz "a->num_bits == 0"
L_.str.39: ## @.str.39
.asciz "zlib corrupt"
L_.str.40: ## @.str.40
.asciz "read past buffer"
L_.str.41: ## @.str.41
.asciz "output buffer limit"
L___func__.zbuild_huffman: ## @__func__.zbuild_huffman
.asciz "zbuild_huffman"
L_.str.42: ## @.str.42
.asciz "sizes[i] <= (1 << i)"
L_.str.43: ## @.str.43
.asciz "bad codelengths"
.section __TEXT,__const
.p2align 4 ## @compute_huffman_codes.length_dezigzag
_compute_huffman_codes.length_dezigzag:
.ascii "\020\021\022\000\b\007\t\006\n\005\013\004\f\003\r\002\016\001\017"
.section __TEXT,__cstring,cstring_literals
L___func__.compute_huffman_codes: ## @__func__.compute_huffman_codes
.asciz "compute_huffman_codes"
L_.str.45: ## @.str.45
.asciz "c >= 0 && c < 19"
L_.str.46: ## @.str.46
.asciz "c == 18"
L___func__.zhuffman_decode: ## @__func__.zhuffman_decode
.asciz "zhuffman_decode"
L_.str.47: ## @.str.47
.asciz "z->size[b] == s"
.section __TEXT,__const
.p2align 4 ## @length_base
_length_base:
.long 3 ## 0x3
.long 4 ## 0x4
.long 5 ## 0x5
.long 6 ## 0x6
.long 7 ## 0x7
.long 8 ## 0x8
.long 9 ## 0x9
.long 10 ## 0xa
.long 11 ## 0xb
.long 13 ## 0xd
.long 15 ## 0xf
.long 17 ## 0x11
.long 19 ## 0x13
.long 23 ## 0x17
.long 27 ## 0x1b
.long 31 ## 0x1f
.long 35 ## 0x23
.long 43 ## 0x2b
.long 51 ## 0x33
.long 59 ## 0x3b
.long 67 ## 0x43
.long 83 ## 0x53
.long 99 ## 0x63
.long 115 ## 0x73
.long 131 ## 0x83
.long 163 ## 0xa3
.long 195 ## 0xc3
.long 227 ## 0xe3
.long 258 ## 0x102
.long 0 ## 0x0
.long 0 ## 0x0
.p2align 4 ## @length_extra
_length_extra:
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 1 ## 0x1
.long 1 ## 0x1
.long 1 ## 0x1
.long 1 ## 0x1
.long 2 ## 0x2
.long 2 ## 0x2
.long 2 ## 0x2
.long 2 ## 0x2
.long 3 ## 0x3
.long 3 ## 0x3
.long 3 ## 0x3
.long 3 ## 0x3
.long 4 ## 0x4
.long 4 ## 0x4
.long 4 ## 0x4
.long 4 ## 0x4
.long 5 ## 0x5
.long 5 ## 0x5
.long 5 ## 0x5
.long 5 ## 0x5
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.p2align 4 ## @dist_base
_dist_base:
.long 1 ## 0x1
.long 2 ## 0x2
.long 3 ## 0x3
.long 4 ## 0x4
.long 5 ## 0x5
.long 7 ## 0x7
.long 9 ## 0x9
.long 13 ## 0xd
.long 17 ## 0x11
.long 25 ## 0x19
.long 33 ## 0x21
.long 49 ## 0x31
.long 65 ## 0x41
.long 97 ## 0x61
.long 129 ## 0x81
.long 193 ## 0xc1
.long 257 ## 0x101
.long 385 ## 0x181
.long 513 ## 0x201
.long 769 ## 0x301
.long 1025 ## 0x401
.long 1537 ## 0x601
.long 2049 ## 0x801
.long 3073 ## 0xc01
.long 4097 ## 0x1001
.long 6145 ## 0x1801
.long 8193 ## 0x2001
.long 12289 ## 0x3001
.long 16385 ## 0x4001
.long 24577 ## 0x6001
.long 0 ## 0x0
.long 0 ## 0x0
.p2align 4 ## @dist_extra
_dist_extra:
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 0 ## 0x0
.long 1 ## 0x1
.long 1 ## 0x1
.long 2 ## 0x2
.long 2 ## 0x2
.long 3 ## 0x3
.long 3 ## 0x3
.long 4 ## 0x4
.long 4 ## 0x4
.long 5 ## 0x5
.long 5 ## 0x5
.long 6 ## 0x6
.long 6 ## 0x6
.long 7 ## 0x7
.long 7 ## 0x7
.long 8 ## 0x8
.long 8 ## 0x8
.long 9 ## 0x9
.long 9 ## 0x9
.long 10 ## 0xa
.long 10 ## 0xa
.long 11 ## 0xb
.long 11 ## 0xb
.long 12 ## 0xc
.long 12 ## 0xc
.long 13 ## 0xd
.long 13 ## 0xd
.long 0 ## 0x0
.long 0 ## 0x0
.section __TEXT,__cstring,cstring_literals
L_.str.48: ## @.str.48
.asciz "bad dist"
L___func__.convert_format: ## @__func__.convert_format
.asciz "convert_format"
L_.str.49: ## @.str.49
.asciz "req_comp >= 1 && req_comp <= 4"
L_.str.50: ## @.str.50
.asciz "0"
L_.str.51: ## @.str.51
.asciz "first not IHDR"
L_.str.52: ## @.str.52
.asciz "multiple IHDR"
L_.str.53: ## @.str.53
.asciz "bad IHDR len"
L_.str.54: ## @.str.54
.asciz "8bit only"
L_.str.55: ## @.str.55
.asciz "bad ctype"
L_.str.56: ## @.str.56
.asciz "bad comp method"
L_.str.57: ## @.str.57
.asciz "bad filter method"
L_.str.58: ## @.str.58
.asciz "bad interlace method"
L_.str.59: ## @.str.59
.asciz "0-pixel image"
L_.str.60: ## @.str.60
.asciz "invalid PLTE"
L_.str.61: ## @.str.61
.asciz "tRNS after IDAT"
L_.str.62: ## @.str.62
.asciz "tRNS before PLTE"
L_.str.63: ## @.str.63
.asciz "bad tRNS len"
L_.str.64: ## @.str.64
.asciz "tRNS with alpha"
L_.str.65: ## @.str.65
.asciz "no PLTE"
L_.str.66: ## @.str.66
.asciz "outofdata"
L_.str.67: ## @.str.67
.asciz "no IDAT"
.section __DATA,__data
.p2align 4 ## @parse_png_file.invalid_chunk
_parse_png_file.invalid_chunk:
.asciz "XXXX chunk not known"
.section __TEXT,__const
_check_png_header.png_sig: ## @check_png_header.png_sig
.ascii "\211PNG\r\n\032\n"
.section __TEXT,__cstring,cstring_literals
L_.str.68: ## @.str.68
.asciz "bad png sig"
.section __TEXT,__const
.p2align 4 ## @__const.create_png_image.xorig
l___const.create_png_image.xorig:
.long 0 ## 0x0
.long 4 ## 0x4
.long 0 ## 0x0
.long 2 ## 0x2
.long 0 ## 0x0
.long 1 ## 0x1
.long 0 ## 0x0
.p2align 4 ## @__const.create_png_image.yorig
l___const.create_png_image.yorig:
.long 0 ## 0x0
.long 0 ## 0x0
.long 4 ## 0x4
.long 0 ## 0x0
.long 2 ## 0x2
.long 0 ## 0x0
.long 1 ## 0x1
.p2align 4 ## @__const.create_png_image.xspc
l___const.create_png_image.xspc:
.long 8 ## 0x8
.long 8 ## 0x8
.long 4 ## 0x4
.long 4 ## 0x4
.long 2 ## 0x2
.long 2 ## 0x2
.long 1 ## 0x1
.p2align 4 ## @__const.create_png_image.yspc
l___const.create_png_image.yspc:
.long 8 ## 0x8
.long 8 ## 0x8
.long 8 ## 0x8
.long 4 ## 0x4
.long 4 ## 0x4
.long 2 ## 0x2
.long 2 ## 0x2
.section __TEXT,__cstring,cstring_literals
L___func__.create_png_image_raw: ## @__func__.create_png_image_raw
.asciz "create_png_image_raw"
L_.str.69: ## @.str.69
.asciz "out_n == s->img_n || out_n == s->img_n+1"
L_.str.70: ## @.str.70
.asciz "not enough pixels"
L_.str.71: ## @.str.71
.asciz "invalid filter"
.section __TEXT,__const
_first_row_filter: ## @first_row_filter
.ascii "\000\001\000\005\006"
.section __TEXT,__cstring,cstring_literals
L_.str.72: ## @.str.72
.asciz "img_n+1 == out_n"
L___func__.compute_transparency: ## @__func__.compute_transparency
.asciz "compute_transparency"
L_.str.73: ## @.str.73
.asciz "out_n == 2 || out_n == 4"
L_.str.74: ## @.str.74
.asciz "not BMP"
L_.str.75: ## @.str.75
.asciz "unknown BMP"
L_.str.76: ## @.str.76
.asciz "bad BMP"
L_.str.77: ## @.str.77
.asciz "monochrome"
L_.str.78: ## @.str.78
.asciz "BMP RLE"
L___func__.bmp_load: ## @__func__.bmp_load
.asciz "bmp_load"
L_.str.79: ## @.str.79
.asciz "hsz == 108"
L_.str.80: ## @.str.80
.asciz "invalid"
L_.str.81: ## @.str.81
.asciz "bad bpp"
L_.str.82: ## @.str.82
.asciz "bad masks"
L_.str.83: ## @.str.83
.asciz "not PSD"
L_.str.84: ## @.str.84
.asciz "wrong version"
L_.str.85: ## @.str.85
.asciz "wrong channel count"
L_.str.86: ## @.str.86
.asciz "unsupported bit depth"
L_.str.87: ## @.str.87
.asciz "wrong color format"
L_.str.89: ## @.str.89
.asciz "#?RADIANCE"
L_.str.90: ## @.str.90
.asciz "not HDR"
L_.str.91: ## @.str.91
.asciz "FORMAT=32-bit_rle_rgbe"
L_.str.92: ## @.str.92
.asciz "unsupported format"
L_.str.93: ## @.str.93
.asciz "-Y "
L_.str.94: ## @.str.94
.asciz "unsupported data layout"
L_.str.95: ## @.str.95
.asciz "+X "
L_.str.96: ## @.str.96
.asciz "invalid decoded scanline length"
L_.str.97: ## @.str.97
.asciz "wb"
L___func__.writefv: ## @__func__.writefv
.asciz "writefv"
L_.str.98: ## @.str.98
.asciz "111"
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _stbi_failure_reason ; -- Begin function stbi_failure_reason
.p2align 2
_stbi_failure_reason: ; @stbi_failure_reason
.cfi_startproc
; %bb.0:
Lloh0:
adrp x8, _failure_reason@PAGE
Lloh1:
ldr x0, [x8, _failure_reason@PAGEOFF]
ret
.loh AdrpLdr Lloh0, Lloh1
.cfi_endproc
; -- End function
.globl _stbi_image_free ; -- Begin function stbi_image_free
.p2align 2
_stbi_image_free: ; @stbi_image_free
.cfi_startproc
; %bb.0:
b _free
.cfi_endproc
; -- End function
.globl _stbi_register_loader ; -- Begin function stbi_register_loader
.p2align 2
_stbi_register_loader: ; @stbi_register_loader
.cfi_startproc
; %bb.0:
mov x8, #0
Lloh2:
adrp x9, _loaders@GOTPAGE
Lloh3:
ldr x9, [x9, _loaders@GOTPAGEOFF]
LBB2_1: ; =>This Inner Loop Header: Depth=1
ldr x10, [x9, x8, lsl #3]
cmp x10, x0
b.eq LBB2_5
; %bb.2: ; in Loop: Header=BB2_1 Depth=1
cbz x10, LBB2_6
; %bb.3: ; in Loop: Header=BB2_1 Depth=1
add x8, x8, #1
cmp x8, #32
b.ne LBB2_1
; %bb.4:
mov w0, #0
ret
LBB2_5:
mov w0, #1
ret
LBB2_6:
str x0, [x9, x8, lsl #3]
add w8, w8, #1
adrp x9, _max_loaders@PAGE
str w8, [x9, _max_loaders@PAGEOFF]
mov w0, #1
ret
.loh AdrpLdrGot Lloh2, Lloh3
.cfi_endproc
; -- End function
.globl _stbi_load ; -- Begin function stbi_load
.p2align 2
_stbi_load: ; @stbi_load
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x4
mov x20, x3
mov x21, x2
mov x22, x1
Lloh4:
adrp x1, l_.str@PAGE
Lloh5:
add x1, x1, l_.str@PAGEOFF
bl _fopen
cbz x0, LBB3_2
; %bb.1:
mov x23, x0
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _stbi_load_from_file
mov x19, x0
mov x0, x23
bl _fclose
mov x0, x19
b LBB3_3
LBB3_2:
Lloh6:
adrp x8, l_.str.1@PAGE
Lloh7:
add x8, x8, l_.str.1@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB3_3:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh4, Lloh5
.loh AdrpAdd Lloh6, Lloh7
.cfi_endproc
; -- End function
.globl _stbi_load_from_file ; -- Begin function stbi_load_from_file
.p2align 2
_stbi_load_from_file: ; @stbi_load_from_file
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov w9, #14096
Lloh8:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh9:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #3, lsl #12 ; =12288
sub sp, sp, #1808
mov x19, x4
mov x20, x3
mov x21, x2
mov x22, x1
mov x23, x0
bl _ftell
mov x24, x0
mov x0, x23
bl _fgetc
cmn w0, #1
b.eq LBB4_6
; %bb.1:
mvn w8, w0
tst w8, #0xff
b.ne LBB4_6
LBB4_2: ; =>This Inner Loop Header: Depth=1
mov x0, x23
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
and w8, w8, #0xff
cmp w8, #255
b.ne LBB4_4
; %bb.3: ; in Loop: Header=BB4_2 Depth=1
cbnz x23, LBB4_2
b LBB4_6
LBB4_4:
cmp w8, #216
b.ne LBB4_6
; %bb.5:
sxtw x1, w24
mov x0, x23
mov w2, #0
bl _fseek
str x23, [sp, #16]
mov x0, sp
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _load_jpeg_image
b LBB4_12
LBB4_6:
Lloh10:
adrp x8, l_.str.22@PAGE
Lloh11:
add x8, x8, l_.str.22@PAGEOFF
adrp x26, _failure_reason@PAGE
str x8, [x26, _failure_reason@PAGEOFF]
sxtw x1, w24
mov x0, x23
mov w2, #0
bl _fseek
mov x0, x23
bl _stbi_png_test_file
cbz w0, LBB4_8
; %bb.7:
str x23, [sp, #16]
mov x0, sp
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _do_png
b LBB4_12
LBB4_8:
mov x0, x23
bl _ftell
mov x24, x0
str x23, [sp, #16]
mov x0, sp
bl _bmp_test
mov x25, x0
sxtw x1, w24
mov x0, x23
mov w2, #0
bl _fseek
cbz w25, LBB4_10
; %bb.9:
str x23, [sp, #16]
mov x0, sp
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _bmp_load
b LBB4_12
LBB4_10:
mov x0, x23
bl _ftell
mov x24, x0
str x23, [sp, #16]
mov x0, sp
bl _get32
mov x25, x0
sxtw x1, w24
mov x0, x23
mov w2, #0
bl _fseek
mov w8, #20563
movk w8, #14402, lsl #16
cmp w25, w8
b.ne LBB4_13
; %bb.11:
str x23, [sp, #16]
mov x0, sp
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _psd_load
LBB4_12:
add sp, sp, #3, lsl #12 ; =12288
add sp, sp, #1808
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB4_13:
mov x0, x23
bl _stbi_hdr_test_file
cbz w0, LBB4_17
; %bb.14:
str x23, [sp, #16]
mov x0, sp
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _hdr_load
ldr w1, [x22]
ldr w2, [x21]
cbnz w19, LBB4_16
; %bb.15:
ldr w19, [x20]
LBB4_16:
mov x3, x19
add sp, sp, #3, lsl #12 ; =12288
add sp, sp, #1808
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
b _hdr_to_ldr
LBB4_17:
adrp x24, _max_loaders@PAGE
ldr w8, [x24, _max_loaders@PAGEOFF]
cmp w8, #1
b.lt LBB4_21
; %bb.18:
mov x25, #0
Lloh12:
adrp x27, _loaders@GOTPAGE
Lloh13:
ldr x27, [x27, _loaders@GOTPAGEOFF]
LBB4_19: ; =>This Inner Loop Header: Depth=1
ldr x8, [x27, x25, lsl #3]
ldr x8, [x8, #16]
mov x0, x23
blr x8
cbnz w0, LBB4_23
; %bb.20: ; in Loop: Header=BB4_19 Depth=1
add x25, x25, #1
ldrsw x8, [x24, _max_loaders@PAGEOFF]
cmp x25, x8
b.lt LBB4_19
LBB4_21:
mov x0, x23
bl _stbi_tga_test_file
cbz w0, LBB4_24
; %bb.22:
str x23, [sp, #16]
mov x0, sp
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _tga_load
b LBB4_12
LBB4_23:
ldr x8, [x27, x25, lsl #3]
ldr x5, [x8, #24]
mov x0, x23
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
add sp, sp, #3, lsl #12 ; =12288
add sp, sp, #1808
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
br x5
LBB4_24:
mov x0, #0
Lloh14:
adrp x8, l_.str.2@PAGE
Lloh15:
add x8, x8, l_.str.2@PAGEOFF
str x8, [x26, _failure_reason@PAGEOFF]
b LBB4_12
.loh AdrpLdrGot Lloh8, Lloh9
.loh AdrpAdd Lloh10, Lloh11
.loh AdrpLdrGot Lloh12, Lloh13
.loh AdrpAdd Lloh14, Lloh15
.cfi_endproc
; -- End function
.globl _stbi_jpeg_test_file ; -- Begin function stbi_jpeg_test_file
.p2align 2
_stbi_jpeg_test_file: ; @stbi_jpeg_test_file
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
bl _ftell
mov x20, x0
mov x0, x19
bl _fgetc
cmn w0, #1
b.eq LBB5_6
; %bb.1:
mvn w8, w0
tst w8, #0xff
b.ne LBB5_6
LBB5_2: ; =>This Inner Loop Header: Depth=1
mov x0, x19
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
and w8, w8, #0xff
cmp w8, #255
b.ne LBB5_4
; %bb.3: ; in Loop: Header=BB5_2 Depth=1
cbnz x19, LBB5_2
b LBB5_6
LBB5_4:
cmp w8, #216
b.ne LBB5_6
; %bb.5:
mov w21, #1
b LBB5_7
LBB5_6:
mov w21, #0
Lloh16:
adrp x8, l_.str.22@PAGE
Lloh17:
add x8, x8, l_.str.22@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB5_7:
sxtw x1, w20
mov x0, x19
mov w2, #0
bl _fseek
mov x0, x21
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh16, Lloh17
.cfi_endproc
; -- End function
.globl _stbi_jpeg_load_from_file ; -- Begin function stbi_jpeg_load_from_file
.p2align 2
_stbi_jpeg_load_from_file: ; @stbi_jpeg_load_from_file
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w27, -24
.cfi_offset w28, -32
mov w9, #14096
Lloh18:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh19:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #3, lsl #12 ; =12288
sub sp, sp, #1808
str x0, [sp, #16]
mov x0, sp
bl _load_jpeg_image
add sp, sp, #3, lsl #12 ; =12288
add sp, sp, #1808
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh18, Lloh19
.cfi_endproc
; -- End function
.globl _stbi_png_test_file ; -- Begin function stbi_png_test_file
.p2align 2
_stbi_png_test_file: ; @stbi_png_test_file
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
bl _ftell
mov x20, x0
cbz x19, LBB7_10
; %bb.1:
mov x0, x19
bl _fgetc
cmp w0, #137
b.ne LBB7_10
; %bb.2:
mov x0, x19
bl _fgetc
cmp w0, #80
b.ne LBB7_10
; %bb.3:
mov x0, x19
bl _fgetc
cmp w0, #78
b.ne LBB7_10
; %bb.4:
mov x0, x19
bl _fgetc
cmp w0, #71
b.ne LBB7_10
; %bb.5:
mov x0, x19
bl _fgetc
cmp w0, #13
b.ne LBB7_10
; %bb.6:
mov x0, x19
bl _fgetc
cmp w0, #10
b.ne LBB7_10
; %bb.7:
mov x0, x19
bl _fgetc
cmp w0, #26
b.ne LBB7_10
; %bb.8:
mov x0, x19
bl _fgetc
cmp w0, #10
b.ne LBB7_10
; %bb.9:
mov w21, #1
b LBB7_11
LBB7_10:
mov w21, #0
Lloh20:
adrp x8, l_.str.68@PAGE
Lloh21:
add x8, x8, l_.str.68@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB7_11:
sxtw x1, w20
mov x0, x19
mov w2, #0
bl _fseek
mov x0, x21
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh20, Lloh21
.cfi_endproc
; -- End function
.globl _stbi_png_load_from_file ; -- Begin function stbi_png_load_from_file
.p2align 2
_stbi_png_load_from_file: ; @stbi_png_load_from_file
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
str x0, [sp, #16]
mov x0, sp
bl _do_png
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
add sp, sp, #80
ret
.cfi_endproc
; -- End function
.globl _stbi_bmp_test_file ; -- Begin function stbi_bmp_test_file
.p2align 2
_stbi_bmp_test_file: ; @stbi_bmp_test_file
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
bl _ftell
mov x20, x0
str x19, [sp, #24]
add x0, sp, #8
bl _bmp_test
mov x21, x0
sxtw x1, w20
mov x0, x19
mov w2, #0
bl _fseek
mov x0, x21
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #96
ret
.cfi_endproc
; -- End function
.globl _stbi_bmp_load_from_file ; -- Begin function stbi_bmp_load_from_file
.p2align 2
_stbi_bmp_load_from_file: ; @stbi_bmp_load_from_file
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
str x0, [sp, #24]
add x0, sp, #8
bl _bmp_load
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.globl _stbi_psd_test_file ; -- Begin function stbi_psd_test_file
.p2align 2
_stbi_psd_test_file: ; @stbi_psd_test_file
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
bl _ftell
mov x20, x0
str x19, [sp, #24]
add x0, sp, #8
bl _get32
mov w8, #20563
movk w8, #14402, lsl #16
cmp w0, w8
cset w21, eq
sxtw x1, w20
mov x0, x19
mov w2, #0
bl _fseek
mov x0, x21
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #96
ret
.cfi_endproc
; -- End function
.globl _stbi_psd_load_from_file ; -- Begin function stbi_psd_load_from_file
.p2align 2
_stbi_psd_load_from_file: ; @stbi_psd_load_from_file
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
str x0, [sp, #24]
add x0, sp, #8
bl _psd_load
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.globl _stbi_hdr_test_file ; -- Begin function stbi_hdr_test_file
.p2align 2
_stbi_hdr_test_file: ; @stbi_hdr_test_file
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
bl _ftell
mov x20, x0
cbz x19, LBB13_12
; %bb.1:
mov x0, x19
bl _fgetc
cmp w0, #35
b.ne LBB13_12
; %bb.2:
mov x0, x19
bl _fgetc
cmp w0, #63
b.ne LBB13_12
; %bb.3:
mov x0, x19
bl _fgetc
cmp w0, #82
b.ne LBB13_12
; %bb.4:
mov x0, x19
bl _fgetc
cmp w0, #65
b.ne LBB13_12
; %bb.5:
mov x0, x19
bl _fgetc
cmp w0, #68
b.ne LBB13_12
; %bb.6:
mov x0, x19
bl _fgetc
cmp w0, #73
b.ne LBB13_12
; %bb.7:
mov x0, x19
bl _fgetc
cmp w0, #65
b.ne LBB13_12
; %bb.8:
mov x0, x19
bl _fgetc
cmp w0, #78
b.ne LBB13_12
; %bb.9:
mov x0, x19
bl _fgetc
cmp w0, #67
b.ne LBB13_12
; %bb.10:
mov x0, x19
bl _fgetc
cmp w0, #69
b.ne LBB13_12
; %bb.11:
mov x0, x19
bl _fgetc
cmp w0, #10
cset w21, eq
b LBB13_13
LBB13_12:
mov w21, #0
LBB13_13:
sxtw x1, w20
mov x0, x19
mov w2, #0
bl _fseek
mov x0, x21
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.globl _stbi_hdr_load_from_file ; -- Begin function stbi_hdr_load_from_file
.p2align 2
_stbi_hdr_load_from_file: ; @stbi_hdr_load_from_file
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
str x0, [sp, #24]
add x0, sp, #8
bl _hdr_load
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ; -- Begin function hdr_to_ldr
lCPI15_0:
.byte 0 ; 0x0
.byte 4 ; 0x4
.byte 8 ; 0x8
.byte 12 ; 0xc
.byte 16 ; 0x10
.byte 20 ; 0x14
.byte 24 ; 0x18
.byte 28 ; 0x1c
.byte 32 ; 0x20
.byte 36 ; 0x24
.byte 40 ; 0x28
.byte 44 ; 0x2c
.byte 48 ; 0x30
.byte 52 ; 0x34
.byte 56 ; 0x38
.byte 60 ; 0x3c
lCPI15_1:
.byte 0 ; 0x0
.byte 4 ; 0x4
.byte 8 ; 0x8
.byte 12 ; 0xc
.byte 16 ; 0x10
.byte 20 ; 0x14
.byte 24 ; 0x18
.byte 28 ; 0x1c
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.section __TEXT,__text,regular,pure_instructions
.p2align 2
_hdr_to_ldr: ; @hdr_to_ldr
.cfi_startproc
; %bb.0:
stp d11, d10, [sp, #-128]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 128
stp d9, d8, [sp, #16] ; 16-byte Folded Spill
stp x28, x27, [sp, #32] ; 16-byte Folded Spill
stp x26, x25, [sp, #48] ; 16-byte Folded Spill
stp x24, x23, [sp, #64] ; 16-byte Folded Spill
stp x22, x21, [sp, #80] ; 16-byte Folded Spill
stp x20, x19, [sp, #96] ; 16-byte Folded Spill
stp x29, x30, [sp, #112] ; 16-byte Folded Spill
add x29, sp, #112
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
.cfi_offset b8, -104
.cfi_offset b9, -112
.cfi_offset b10, -120
.cfi_offset b11, -128
sub sp, sp, #400
; kill: def $w3 killed $w3 def $x3
mov x19, x0
mul w20, w2, w1
str x3, [sp, #40] ; 8-byte Folded Spill
mul w8, w20, w3
sxtw x0, w8
bl _malloc
mov x28, x0
cbz x0, LBB15_22
; %bb.1:
str x19, [sp, #16] ; 8-byte Folded Spill
cmp w20, #1
b.lt LBB15_21
; %bb.2:
mov x10, x20
mov x23, #0
ldr x9, [sp, #40] ; 8-byte Folded Reload
and w8, w9, #0x1
add w8, w9, w8
sub w24, w8, #1
Lloh22:
adrp x8, _h2l_scale_i@PAGE
Lloh23:
ldr s2, [x8, _h2l_scale_i@PAGEOFF]
Lloh24:
adrp x8, _h2l_gamma_i@PAGE
Lloh25:
ldr s0, [x8, _h2l_gamma_i@PAGEOFF]
fcvt d8, s0
sxtw x25, w9
and x8, x24, #0xfffffff0
str x8, [sp, #8] ; 8-byte Folded Spill
dup.4s v4, v2[0]
and x27, x24, #0xfffffff8
sbfiz x8, x9, #2, #32
str x8, [sp, #32] ; 8-byte Folded Spill
mov w26, #1132396544
fmov s9, #0.50000000
movi d10, #0000000000000000
dup.4s v0, w26
str q0, [sp, #208] ; 16-byte Folded Spill
Lloh26:
adrp x8, lCPI15_1@PAGE
Lloh27:
ldr q0, [x8, lCPI15_1@PAGEOFF]
str q0, [sp, #64] ; 16-byte Folded Spill
Lloh28:
adrp x8, lCPI15_0@PAGE
Lloh29:
ldr q0, [x8, lCPI15_0@PAGEOFF]
str q0, [sp, #48] ; 16-byte Folded Spill
ldr x22, [sp, #16] ; 8-byte Folded Reload
mov x21, x28
str x20, [sp, #24] ; 8-byte Folded Spill
stur q2, [x29, #-144] ; 16-byte Folded Spill
str q4, [sp, #224] ; 16-byte Folded Spill
str x28, [sp] ; 8-byte Folded Spill
b LBB15_4
LBB15_3: ; in Loop: Header=BB15_4 Depth=1
add x23, x23, #1
add x21, x21, x25
ldr x8, [sp, #32] ; 8-byte Folded Reload
add x22, x22, x8
cmp x23, x10
b.eq LBB15_21
LBB15_4: ; =>This Loop Header: Depth=1
; Child Loop BB15_14 Depth 2
; Child Loop BB15_11 Depth 2
; Child Loop BB15_17 Depth 2
cmp w24, #1
b.lt LBB15_7
; %bb.5: ; in Loop: Header=BB15_4 Depth=1
cmp w24, #8
b.hs LBB15_8
; %bb.6: ; in Loop: Header=BB15_4 Depth=1
mov x20, #0
b LBB15_17
LBB15_7: ; in Loop: Header=BB15_4 Depth=1
mov w8, #0
b LBB15_19
LBB15_8: ; in Loop: Header=BB15_4 Depth=1
cmp w24, #16
b.hs LBB15_13
; %bb.9: ; in Loop: Header=BB15_4 Depth=1
mov x19, #0
LBB15_10: ; in Loop: Header=BB15_4 Depth=1
lsl x20, x19, #2
LBB15_11: ; Parent Loop BB15_4 Depth=1
; => This Inner Loop Header: Depth=2
add x8, x22, x20
ldp q1, q0, [x8]
fmul.4s v1, v1, v4
fmul.4s v0, v0, v2[0]
fcvtl2 v2.2d, v0.4s
fcvtl v0.2d, v0.2s
stp q0, q2, [x29, #-176] ; 32-byte Folded Spill
fcvtl2 v0.2d, v1.4s
stur q0, [x29, #-192] ; 16-byte Folded Spill
fcvtl v0.2d, v1.2s
stur q0, [x29, #-224] ; 16-byte Folded Spill
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
stur q0, [x29, #-208] ; 16-byte Folded Spill
ldur q0, [x29, #-224] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-224] ; 16-byte Folded Spill
ldur q0, [x29, #-192] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
stur q0, [x29, #-240] ; 16-byte Folded Spill
ldur q0, [x29, #-192] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
str q0, [sp, #256] ; 16-byte Folded Spill
ldur q0, [x29, #-176] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
stur q0, [x29, #-192] ; 16-byte Folded Spill
ldur q0, [x29, #-176] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-176] ; 16-byte Folded Spill
ldur q0, [x29, #-160] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #240] ; 16-byte Folded Spill
ldur q0, [x29, #-160] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
movi.2d v5, #0000000000000000
ldp q3, q4, [sp, #208] ; 32-byte Folded Reload
; kill: def $d0 killed $d0 def $q0
ldp q2, q6, [x29, #-240] ; 32-byte Folded Reload
ldp q7, q1, [sp, #240] ; 32-byte Folded Reload
mov.d v2[1], v1[0]
ldur q1, [x29, #-208] ; 16-byte Folded Reload
mov.d v1[1], v6[0]
mov.d v7[1], v0[0]
ldp q0, q6, [x29, #-192] ; 32-byte Folded Reload
mov.d v0[1], v6[0]
fcvtn v0.2s, v0.2d
fcvtn2 v0.4s, v7.2d
fcvtn v1.2s, v1.2d
fcvtn2 v1.4s, v2.2d
movi.4s v2, #63, lsl #24
fmla.4s v2, v3, v1
movi.4s v1, #63, lsl #24
fmla.4s v1, v3, v0
fmax.4s v0, v1, v5
fmax.4s v1, v2, v5
fmin.4s v1, v1, v3
fmin.4s v0, v0, v3
fcvtzs.4s v3, v0
fcvtzs.4s v2, v1
ldr q0, [sp, #64] ; 16-byte Folded Reload
tbl.16b v0, { v2, v3 }, v0
ldur q2, [x29, #-144] ; 16-byte Folded Reload
str d0, [x21, x19]
add x19, x19, #8
add x20, x20, #32
cmp x27, x19
b.ne LBB15_11
; %bb.12: ; in Loop: Header=BB15_4 Depth=1
mov x20, x27
mov x8, x24
cmp x27, x24
ldr x10, [sp, #24] ; 8-byte Folded Reload
b.ne LBB15_17
b LBB15_19
LBB15_13: ; in Loop: Header=BB15_4 Depth=1
ldr x28, [sp, #8] ; 8-byte Folded Reload
mov x20, x22
mov x19, x21
LBB15_14: ; Parent Loop BB15_4 Depth=1
; => This Inner Loop Header: Depth=2
ldp q1, q0, [x20, #32]
ldp q3, q2, [x20]
fmul.4s v3, v3, v4
fmul.4s v2, v2, v4
fmul.4s v1, v1, v4
fmul.4s v0, v0, v4
fcvtl2 v4.2d, v0.4s
fcvtl v0.2d, v0.2s
stp q0, q4, [x29, #-176] ; 32-byte Folded Spill
fcvtl2 v4.2d, v1.4s
fcvtl v0.2d, v1.2s
stp q0, q4, [x29, #-208] ; 32-byte Folded Spill
fcvtl2 v4.2d, v2.4s
fcvtl v0.2d, v2.2s
stp q0, q4, [x29, #-240] ; 32-byte Folded Spill
fcvtl2 v0.2d, v3.4s
str q0, [sp, #256] ; 16-byte Folded Spill
fcvtl v0.2d, v3.2s
str q0, [sp, #192] ; 16-byte Folded Spill
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #240] ; 16-byte Folded Spill
ldr q0, [sp, #192] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
str q0, [sp, #192] ; 16-byte Folded Spill
ldr q0, [sp, #256] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #176] ; 16-byte Folded Spill
ldr q0, [sp, #256] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
str q0, [sp, #160] ; 16-byte Folded Spill
ldur q0, [x29, #-240] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #256] ; 16-byte Folded Spill
ldur q0, [x29, #-240] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-240] ; 16-byte Folded Spill
ldur q0, [x29, #-224] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #144] ; 16-byte Folded Spill
ldur q0, [x29, #-224] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-224] ; 16-byte Folded Spill
ldur q0, [x29, #-208] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #128] ; 16-byte Folded Spill
ldur q0, [x29, #-208] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-208] ; 16-byte Folded Spill
ldur q0, [x29, #-192] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #112] ; 16-byte Folded Spill
ldur q0, [x29, #-192] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-192] ; 16-byte Folded Spill
ldur q0, [x29, #-176] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #96] ; 16-byte Folded Spill
ldur q0, [x29, #-176] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-176] ; 16-byte Folded Spill
ldur q0, [x29, #-160] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #80] ; 16-byte Folded Spill
ldur q0, [x29, #-160] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
movi.2d v6, #0000000000000000
; kill: def $d0 killed $d0 def $q0
ldp q18, q17, [sp, #80] ; 32-byte Folded Reload
mov.d v18[1], v0[0]
ldur q0, [x29, #-176] ; 16-byte Folded Reload
mov.d v17[1], v0[0]
ldp q16, q7, [sp, #112] ; 32-byte Folded Reload
ldur q0, [x29, #-192] ; 16-byte Folded Reload
mov.d v16[1], v0[0]
ldur q0, [x29, #-208] ; 16-byte Folded Reload
mov.d v7[1], v0[0]
ldp q0, q2, [sp, #160] ; 32-byte Folded Reload
mov.d v2[1], v0[0]
ldr q1, [sp, #240] ; 16-byte Folded Reload
ldp q0, q5, [sp, #192] ; 32-byte Folded Reload
mov.d v1[1], v0[0]
ldr q4, [sp, #144] ; 16-byte Folded Reload
ldp q3, q0, [x29, #-240] ; 32-byte Folded Reload
mov.d v4[1], v0[0]
ldr q0, [sp, #256] ; 16-byte Folded Reload
mov.d v0[1], v3[0]
fcvtn v0.2s, v0.2d
fcvtn2 v0.4s, v4.2d
fcvtn v1.2s, v1.2d
fcvtn2 v1.4s, v2.2d
fcvtn v2.2s, v7.2d
fcvtn2 v2.4s, v16.2d
fcvtn v3.2s, v17.2d
fcvtn2 v3.4s, v18.2d
movi.4s v4, #63, lsl #24
fmla.4s v4, v5, v3
movi.4s v3, #63, lsl #24
fmla.4s v3, v5, v2
movi.4s v2, #63, lsl #24
fmla.4s v2, v5, v1
movi.4s v1, #63, lsl #24
fmla.4s v1, v5, v0
fmax.4s v0, v1, v6
fmax.4s v1, v2, v6
fmax.4s v2, v3, v6
fmax.4s v3, v4, v6
fmin.4s v3, v3, v5
fmin.4s v2, v2, v5
fmin.4s v1, v1, v5
fmin.4s v0, v0, v5
fcvtzs.4s v5, v0
fcvtzs.4s v4, v1
fcvtzs.4s v6, v2
fcvtzs.4s v7, v3
ldr q0, [sp, #48] ; 16-byte Folded Reload
tbl.16b v0, { v4, v5, v6, v7 }, v0
ldr q4, [sp, #224] ; 16-byte Folded Reload
str q0, [x19], #16
add x20, x20, #64
subs x28, x28, #16
b.ne LBB15_14
; %bb.15: ; in Loop: Header=BB15_4 Depth=1
mov x8, x24
ldp x28, x9, [sp] ; 16-byte Folded Reload
cmp x9, x24
ldr x10, [sp, #24] ; 8-byte Folded Reload
ldur q2, [x29, #-144] ; 16-byte Folded Reload
b.eq LBB15_19
; %bb.16: ; in Loop: Header=BB15_4 Depth=1
ldr x20, [sp, #8] ; 8-byte Folded Reload
mov x19, x20
tbnz w24, #3, LBB15_10
LBB15_17: ; Parent Loop BB15_4 Depth=1
; => This Inner Loop Header: Depth=2
ldr s0, [x22, x20, lsl #2]
fmul s0, s0, s2
fcvt d0, s0
fmov d1, d8
bl _pow
ldur q2, [x29, #-144] ; 16-byte Folded Reload
fcvt s0, d0
fmov s1, w26
fmadd s0, s0, s1, s9
fmax s0, s0, s10
fmin s0, s0, s1
fcvtzs w8, s0
strb w8, [x21, x20]
add x20, x20, #1
cmp x24, x20
b.ne LBB15_17
; %bb.18: ; in Loop: Header=BB15_4 Depth=1
mov x8, x24
ldr x10, [sp, #24] ; 8-byte Folded Reload
ldr q4, [sp, #224] ; 16-byte Folded Reload
LBB15_19: ; in Loop: Header=BB15_4 Depth=1
ldr x9, [sp, #40] ; 8-byte Folded Reload
cmp w8, w9
b.ge LBB15_3
; %bb.20: ; in Loop: Header=BB15_4 Depth=1
mul x9, x23, x25
add x8, x9, w8, sxtw
ldr x9, [sp, #16] ; 8-byte Folded Reload
ldr s0, [x9, x8, lsl #2]
fmov s1, w26
fmadd s0, s0, s1, s9
fmax s0, s0, s10
fmin s0, s0, s1
fcvtzs w9, s0
strb w9, [x28, x8]
b LBB15_3
LBB15_21:
ldr x0, [sp, #16] ; 8-byte Folded Reload
bl _free
b LBB15_23
LBB15_22:
mov x0, x19
bl _free
Lloh30:
adrp x8, l_.str.5@PAGE
Lloh31:
add x8, x8, l_.str.5@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB15_23:
mov x0, x28
add sp, sp, #400
ldp x29, x30, [sp, #112] ; 16-byte Folded Reload
ldp x20, x19, [sp, #96] ; 16-byte Folded Reload
ldp x22, x21, [sp, #80] ; 16-byte Folded Reload
ldp x24, x23, [sp, #64] ; 16-byte Folded Reload
ldp x26, x25, [sp, #48] ; 16-byte Folded Reload
ldp x28, x27, [sp, #32] ; 16-byte Folded Reload
ldp d9, d8, [sp, #16] ; 16-byte Folded Reload
ldp d11, d10, [sp], #128 ; 16-byte Folded Reload
ret
.loh AdrpLdr Lloh28, Lloh29
.loh AdrpAdrp Lloh26, Lloh28
.loh AdrpLdr Lloh26, Lloh27
.loh AdrpLdr Lloh24, Lloh25
.loh AdrpAdrp Lloh22, Lloh24
.loh AdrpLdr Lloh22, Lloh23
.loh AdrpAdd Lloh30, Lloh31
.cfi_endproc
; -- End function
.globl _stbi_tga_test_file ; -- Begin function stbi_tga_test_file
.p2align 2
_stbi_tga_test_file: ; @stbi_tga_test_file
.cfi_startproc
; %bb.0:
sub sp, sp, #96
.cfi_def_cfa_offset 96
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
bl _ftell
mov x20, x0
str x19, [sp, #24]
add x0, sp, #8
bl _tga_test
mov x21, x0
sxtw x1, w20
mov x0, x19
mov w2, #0
bl _fseek
mov x0, x21
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #96
ret
.cfi_endproc
; -- End function
.globl _stbi_tga_load_from_file ; -- Begin function stbi_tga_load_from_file
.p2align 2
_stbi_tga_load_from_file: ; @stbi_tga_load_from_file
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
str x0, [sp, #24]
add x0, sp, #8
bl _tga_load
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.globl _stbi_load_from_memory ; -- Begin function stbi_load_from_memory
.p2align 2
_stbi_load_from_memory: ; @stbi_load_from_memory
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov w9, #14112
Lloh32:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh33:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #3, lsl #12 ; =12288
sub sp, sp, #1824
mov x19, x5
mov x20, x4
mov x21, x3
mov x22, x2
mov x24, x1
mov x23, x0
add x25, x0, w1, sxtw
adrp x11, _failure_reason@PAGE
cmp w1, #1
b.lt LBB18_23
; %bb.1:
ldrb w8, [x23]
cmp w8, #255
b.ne LBB18_7
; %bb.2:
add x9, x23, #1
LBB18_3: ; =>This Inner Loop Header: Depth=1
cmp x9, x25
b.hs LBB18_7
; %bb.4: ; in Loop: Header=BB18_3 Depth=1
ldrb w10, [x9], #1
cmp w10, #255
b.eq LBB18_3
; %bb.5:
cmp w10, #216
b.ne LBB18_7
; %bb.6:
stp xzr, x23, [sp, #24]
str x25, [sp, #40]
add x0, sp, #8
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _load_jpeg_image
b LBB18_27
LBB18_7:
Lloh34:
adrp x9, l_.str.22@PAGE
Lloh35:
add x9, x9, l_.str.22@PAGEOFF
str x9, [x11, _failure_reason@PAGEOFF]
cmp w24, #1
b.eq LBB18_23
; %bb.8:
cmp w8, #137
b.ne LBB18_23
; %bb.9:
cmp w24, #3
b.lo LBB18_23
; %bb.10:
ldrb w8, [x23, #1]
cmp w8, #80
b.ne LBB18_23
; %bb.11:
cmp w24, #4
b.lo LBB18_23
; %bb.12:
ldrb w8, [x23, #2]
cmp w8, #78
b.ne LBB18_23
; %bb.13:
cmp w24, #5
b.lo LBB18_23
; %bb.14:
ldrb w8, [x23, #3]
cmp w8, #71
b.ne LBB18_23
; %bb.15:
cmp w24, #6
b.lo LBB18_23
; %bb.16:
ldrb w8, [x23, #4]
cmp w8, #13
b.ne LBB18_23
; %bb.17:
cmp w24, #7
b.lo LBB18_23
; %bb.18:
ldrb w8, [x23, #5]
cmp w8, #10
b.ne LBB18_23
; %bb.19:
cmp w24, #8
b.lo LBB18_23
; %bb.20:
ldrb w8, [x23, #6]
cmp w8, #26
b.ne LBB18_23
; %bb.21:
ldrb w8, [x23, #7]
cmp w8, #10
b.ne LBB18_23
; %bb.22:
stp xzr, x23, [sp, #24]
str x25, [sp, #40]
add x0, sp, #8
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _do_png
b LBB18_27
LBB18_23:
Lloh36:
adrp x8, l_.str.68@PAGE
Lloh37:
add x8, x8, l_.str.68@PAGEOFF
str x8, [x11, _failure_reason@PAGEOFF]
stp xzr, x23, [sp, #24]
str x25, [sp, #40]
add x0, sp, #8
bl _bmp_test
cbz w0, LBB18_25
; %bb.24:
stp xzr, x23, [sp, #24]
str x25, [sp, #40]
add x0, sp, #8
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _bmp_load
b LBB18_27
LBB18_25:
stp xzr, x23, [sp, #24]
str x25, [sp, #40]
add x0, sp, #8
bl _get32
mov w8, #20563
movk w8, #14402, lsl #16
cmp w0, w8
b.ne LBB18_28
; %bb.26:
stp xzr, x23, [sp, #24]
str x25, [sp, #40]
add x0, sp, #8
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _psd_load
LBB18_27:
add sp, sp, #3, lsl #12 ; =12288
add sp, sp, #1824
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB18_28:
mov x0, x23
mov x1, x24
bl _stbi_hdr_test_memory
cbz w0, LBB18_32
; %bb.29:
stp xzr, x23, [sp, #24]
str x25, [sp, #40]
add x0, sp, #8
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _hdr_load
ldr w1, [x22]
ldr w2, [x21]
cbnz w19, LBB18_31
; %bb.30:
ldr w19, [x20]
LBB18_31:
mov x3, x19
add sp, sp, #3, lsl #12 ; =12288
add sp, sp, #1824
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
b _hdr_to_ldr
LBB18_32:
adrp x27, _max_loaders@PAGE
ldr w8, [x27, _max_loaders@PAGEOFF]
cmp w8, #1
b.lt LBB18_36
; %bb.33:
mov x28, #0
Lloh38:
adrp x26, _loaders@GOTPAGE
Lloh39:
ldr x26, [x26, _loaders@GOTPAGEOFF]
LBB18_34: ; =>This Inner Loop Header: Depth=1
ldr x8, [x26, x28, lsl #3]
ldr x8, [x8]
mov x0, x23
mov x1, x24
blr x8
cbnz w0, LBB18_38
; %bb.35: ; in Loop: Header=BB18_34 Depth=1
add x28, x28, #1
ldrsw x8, [x27, _max_loaders@PAGEOFF]
cmp x28, x8
b.lt LBB18_34
LBB18_36:
stp xzr, x23, [sp, #24]
str x25, [sp, #40]
add x0, sp, #8
bl _tga_test
cbz w0, LBB18_39
; %bb.37:
stp xzr, x23, [sp, #24]
str x25, [sp, #40]
add x0, sp, #8
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _tga_load
b LBB18_27
LBB18_38:
ldr x8, [x26, x28, lsl #3]
ldr x6, [x8, #8]
mov x0, x23
mov x1, x24
mov x2, x22
mov x3, x21
mov x4, x20
mov x5, x19
add sp, sp, #3, lsl #12 ; =12288
add sp, sp, #1824
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
br x6
LBB18_39:
mov x0, #0
Lloh40:
adrp x8, l_.str.2@PAGE
Lloh41:
add x8, x8, l_.str.2@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
b LBB18_27
.loh AdrpLdrGot Lloh32, Lloh33
.loh AdrpAdd Lloh34, Lloh35
.loh AdrpAdd Lloh36, Lloh37
.loh AdrpLdrGot Lloh38, Lloh39
.loh AdrpAdd Lloh40, Lloh41
.cfi_endproc
; -- End function
.globl _stbi_jpeg_test_memory ; -- Begin function stbi_jpeg_test_memory
.p2align 2
_stbi_jpeg_test_memory: ; @stbi_jpeg_test_memory
.cfi_startproc
; %bb.0:
cmp w1, #1
b.lt LBB19_2
; %bb.1:
ldrb w8, [x0]
cmp w8, #255
b.eq LBB19_3
LBB19_2:
mov w0, #0
Lloh42:
adrp x8, l_.str.22@PAGE
Lloh43:
add x8, x8, l_.str.22@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
ret
LBB19_3:
add x8, x0, w1, sxtw
add x9, x0, #1
LBB19_4: ; =>This Inner Loop Header: Depth=1
cmp x9, x8
b.hs LBB19_2
; %bb.5: ; in Loop: Header=BB19_4 Depth=1
ldrb w10, [x9], #1
cmp w10, #255
b.eq LBB19_4
; %bb.6:
cmp w10, #216
b.ne LBB19_2
; %bb.7:
mov w0, #1
ret
.loh AdrpAdd Lloh42, Lloh43
.cfi_endproc
; -- End function
.globl _stbi_jpeg_load_from_memory ; -- Begin function stbi_jpeg_load_from_memory
.p2align 2
_stbi_jpeg_load_from_memory: ; @stbi_jpeg_load_from_memory
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w27, -24
.cfi_offset w28, -32
mov w9, #14096
Lloh44:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh45:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #3, lsl #12 ; =12288
sub sp, sp, #1808
stp xzr, x0, [sp, #16]
add x8, x0, w1, sxtw
str x8, [sp, #32]
mov x0, sp
mov x1, x2
mov x2, x3
mov x3, x4
mov x4, x5
bl _load_jpeg_image
add sp, sp, #3, lsl #12 ; =12288
add sp, sp, #1808
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh44, Lloh45
.cfi_endproc
; -- End function
.globl _stbi_png_test_memory ; -- Begin function stbi_png_test_memory
.p2align 2
_stbi_png_test_memory: ; @stbi_png_test_memory
.cfi_startproc
; %bb.0:
cmp w1, #0
b.le LBB21_14
; %bb.1:
ldrb w8, [x0]
cmp w8, #137
ccmp w1, #1, #4, eq
b.eq LBB21_14
; %bb.2:
ldrb w8, [x0, #1]
cmp w8, #80
ccmp w1, #3, #8, eq
b.lt LBB21_14
; %bb.3:
ldrb w8, [x0, #2]
cmp w8, #78
ccmp w1, #4, #8, eq
b.lt LBB21_14
; %bb.4:
ldrb w8, [x0, #3]
cmp w8, #71
b.ne LBB21_14
; %bb.5:
cmp w1, #5
b.lt LBB21_14
; %bb.6:
ldrb w8, [x0, #4]
cmp w8, #13
b.ne LBB21_14
; %bb.7:
cmp w1, #6
b.lt LBB21_14
; %bb.8:
ldrb w8, [x0, #5]
cmp w8, #10
b.ne LBB21_14
; %bb.9:
cmp w1, #7
b.lt LBB21_14
; %bb.10:
ldrb w8, [x0, #6]
cmp w8, #26
b.ne LBB21_14
; %bb.11:
cmp w1, #8
b.lt LBB21_14
; %bb.12:
ldrb w8, [x0, #7]
cmp w8, #10
b.ne LBB21_14
; %bb.13:
mov w0, #1
ret
LBB21_14:
mov w0, #0
Lloh46:
adrp x8, l_.str.68@PAGE
Lloh47:
add x8, x8, l_.str.68@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
ret
.loh AdrpAdd Lloh46, Lloh47
.cfi_endproc
; -- End function
.globl _stbi_png_load_from_memory ; -- Begin function stbi_png_load_from_memory
.p2align 2
_stbi_png_load_from_memory: ; @stbi_png_load_from_memory
.cfi_startproc
; %bb.0:
sub sp, sp, #80
.cfi_def_cfa_offset 80
stp x29, x30, [sp, #64] ; 16-byte Folded Spill
add x29, sp, #64
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
stp xzr, x0, [sp, #16]
add x8, x0, w1, sxtw
str x8, [sp, #32]
mov x0, sp
mov x1, x2
mov x2, x3
mov x3, x4
mov x4, x5
bl _do_png
ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
add sp, sp, #80
ret
.cfi_endproc
; -- End function
.globl _stbi_bmp_test_memory ; -- Begin function stbi_bmp_test_memory
.p2align 2
_stbi_bmp_test_memory: ; @stbi_bmp_test_memory
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
stp xzr, x0, [sp, #24]
add x8, x0, w1, sxtw
str x8, [sp, #40]
add x0, sp, #8
bl _bmp_test
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.globl _stbi_bmp_load_from_memory ; -- Begin function stbi_bmp_load_from_memory
.p2align 2
_stbi_bmp_load_from_memory: ; @stbi_bmp_load_from_memory
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
stp xzr, x0, [sp, #24]
add x8, x0, w1, sxtw
str x8, [sp, #40]
add x0, sp, #8
mov x1, x2
mov x2, x3
mov x3, x4
mov x4, x5
bl _bmp_load
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.globl _stbi_psd_test_memory ; -- Begin function stbi_psd_test_memory
.p2align 2
_stbi_psd_test_memory: ; @stbi_psd_test_memory
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
stp xzr, x0, [sp, #24]
add x8, x0, w1, sxtw
str x8, [sp, #40]
add x0, sp, #8
bl _get32
mov w8, #20563
movk w8, #14402, lsl #16
cmp w0, w8
cset w0, eq
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.globl _stbi_psd_load_from_memory ; -- Begin function stbi_psd_load_from_memory
.p2align 2
_stbi_psd_load_from_memory: ; @stbi_psd_load_from_memory
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
stp xzr, x0, [sp, #24]
add x8, x0, w1, sxtw
str x8, [sp, #40]
add x0, sp, #8
mov x1, x2
mov x2, x3
mov x3, x4
mov x4, x5
bl _psd_load
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.globl _stbi_hdr_test_memory ; -- Begin function stbi_hdr_test_memory
.p2align 2
_stbi_hdr_test_memory: ; @stbi_hdr_test_memory
.cfi_startproc
; %bb.0:
cmp w1, #0
b.le LBB27_4
; %bb.1:
mov x8, x0
ldrb w9, [x0]
cmp w9, #35
ccmp w1, #1, #4, eq
b.eq LBB27_4
; %bb.2:
ldrb w9, [x8, #1]
cmp w9, #63
ccmp w1, #3, #8, eq
b.lt LBB27_4
; %bb.3:
ldrb w9, [x8, #2]
cmp w9, #82
ccmp w1, #4, #8, eq
b.ge LBB27_6
LBB27_4:
mov w0, #0
LBB27_5:
ret
LBB27_6:
mov w0, #0
ldrb w9, [x8, #3]
cmp w9, #65
b.ne LBB27_5
; %bb.7:
cmp w1, #5
b.lt LBB27_5
; %bb.8:
mov w0, #0
ldrb w9, [x8, #4]
cmp w9, #68
b.ne LBB27_5
; %bb.9:
cmp w1, #6
b.lt LBB27_5
; %bb.10:
mov w0, #0
ldrb w9, [x8, #5]
cmp w9, #73
b.ne LBB27_5
; %bb.11:
cmp w1, #7
b.lt LBB27_5
; %bb.12:
mov w0, #0
ldrb w9, [x8, #6]
cmp w9, #65
b.ne LBB27_5
; %bb.13:
cmp w1, #8
b.lt LBB27_5
; %bb.14:
mov w0, #0
ldrb w9, [x8, #7]
cmp w9, #78
b.ne LBB27_5
; %bb.15:
cmp w1, #9
b.lt LBB27_5
; %bb.16:
mov w0, #0
ldrb w9, [x8, #8]
cmp w9, #67
b.ne LBB27_5
; %bb.17:
cmp w1, #10
b.lt LBB27_5
; %bb.18:
mov w0, #0
ldrb w9, [x8, #9]
cmp w9, #69
b.ne LBB27_5
; %bb.19:
cmp w1, #11
b.lt LBB27_5
; %bb.20:
ldrb w8, [x8, #10]
cmp w8, #10
cset w0, eq
ret
.cfi_endproc
; -- End function
.globl _stbi_hdr_load_from_memory ; -- Begin function stbi_hdr_load_from_memory
.p2align 2
_stbi_hdr_load_from_memory: ; @stbi_hdr_load_from_memory
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
stp xzr, x0, [sp, #24]
add x8, x0, w1, sxtw
str x8, [sp, #40]
add x0, sp, #8
mov x1, x2
mov x2, x3
mov x3, x4
mov x4, x5
bl _hdr_load
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.globl _stbi_tga_test_memory ; -- Begin function stbi_tga_test_memory
.p2align 2
_stbi_tga_test_memory: ; @stbi_tga_test_memory
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
stp xzr, x0, [sp, #24]
add x8, x0, w1, sxtw
str x8, [sp, #40]
add x0, sp, #8
bl _tga_test
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.globl _stbi_tga_load_from_memory ; -- Begin function stbi_tga_load_from_memory
.p2align 2
_stbi_tga_load_from_memory: ; @stbi_tga_load_from_memory
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
stp xzr, x0, [sp, #24]
add x8, x0, w1, sxtw
str x8, [sp, #40]
add x0, sp, #8
mov x1, x2
mov x2, x3
mov x3, x4
mov x4, x5
bl _tga_load
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #64
ret
.cfi_endproc
; -- End function
.globl _stbi_loadf ; -- Begin function stbi_loadf
.p2align 2
_stbi_loadf: ; @stbi_loadf
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x20, x4
mov x21, x3
mov x22, x2
mov x23, x1
Lloh48:
adrp x1, l_.str@PAGE
Lloh49:
add x1, x1, l_.str@PAGEOFF
bl _fopen
cbz x0, LBB31_3
; %bb.1:
mov x19, x0
bl _stbi_hdr_test_file
cbz w0, LBB31_4
; %bb.2:
str x19, [sp, #24]
add x0, sp, #8
mov x1, x23
mov x2, x22
mov x3, x21
mov x4, x20
bl _hdr_load
mov x20, x0
b LBB31_9
LBB31_3:
mov x20, #0
Lloh50:
adrp x8, l_.str.1@PAGE
Lloh51:
add x8, x8, l_.str.1@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
b LBB31_10
LBB31_4:
mov x0, x19
mov x1, x23
mov x2, x22
mov x3, x21
mov x4, x20
bl _stbi_load_from_file
cbz x0, LBB31_8
; %bb.5:
ldr w1, [x23]
ldr w2, [x22]
cbnz w20, LBB31_7
; %bb.6:
ldr w20, [x21]
LBB31_7:
mov x3, x20
bl _ldr_to_hdr
mov x20, x0
b LBB31_9
LBB31_8:
mov x20, #0
Lloh52:
adrp x8, l_.str.2@PAGE
Lloh53:
add x8, x8, l_.str.2@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB31_9:
mov x0, x19
bl _fclose
LBB31_10:
mov x0, x20
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpAdd Lloh48, Lloh49
.loh AdrpAdd Lloh50, Lloh51
.loh AdrpAdd Lloh52, Lloh53
.cfi_endproc
; -- End function
.globl _stbi_loadf_from_file ; -- Begin function stbi_loadf_from_file
.p2align 2
_stbi_loadf_from_file: ; @stbi_loadf_from_file
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x4
mov x20, x3
mov x21, x2
mov x22, x1
mov x23, x0
bl _stbi_hdr_test_file
cbz w0, LBB32_3
; %bb.1:
str x23, [sp, #24]
add x0, sp, #8
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _hdr_load
LBB32_2:
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #112
ret
LBB32_3:
mov x0, x23
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _stbi_load_from_file
cbz x0, LBB32_7
; %bb.4:
ldr w1, [x22]
ldr w2, [x21]
cbnz w19, LBB32_6
; %bb.5:
ldr w19, [x20]
LBB32_6:
mov x3, x19
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #112
b _ldr_to_hdr
LBB32_7:
Lloh54:
adrp x8, l_.str.2@PAGE
Lloh55:
add x8, x8, l_.str.2@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
b LBB32_2
.loh AdrpAdd Lloh54, Lloh55
.cfi_endproc
; -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ; -- Begin function ldr_to_hdr
lCPI33_0:
.byte 0 ; 0x0
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 1 ; 0x1
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 2 ; 0x2
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 3 ; 0x3
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
lCPI33_1:
.byte 4 ; 0x4
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 5 ; 0x5
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 6 ; 0x6
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 7 ; 0x7
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
lCPI33_2:
.byte 8 ; 0x8
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 9 ; 0x9
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 10 ; 0xa
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 11 ; 0xb
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
lCPI33_3:
.byte 12 ; 0xc
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 13 ; 0xd
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 14 ; 0xe
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 15 ; 0xf
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.section __TEXT,__text,regular,pure_instructions
.p2align 2
_ldr_to_hdr: ; @ldr_to_hdr
.cfi_startproc
; %bb.0:
stp d9, d8, [sp, #-112]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 112
stp x28, x27, [sp, #16] ; 16-byte Folded Spill
stp x26, x25, [sp, #32] ; 16-byte Folded Spill
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
.cfi_offset b8, -104
.cfi_offset b9, -112
sub sp, sp, #432
; kill: def $w3 killed $w3 def $x3
mov x19, x0
mul w20, w2, w1
str x3, [sp, #40] ; 8-byte Folded Spill
mul w8, w20, w3
sbfiz x0, x8, #2, #32
bl _malloc
mov x28, x0
cbz x0, LBB33_22
; %bb.1:
str x19, [sp, #16] ; 8-byte Folded Spill
cmp w20, #1
b.lt LBB33_21
; %bb.2:
mov x10, x20
mov x23, #0
ldr x9, [sp, #40] ; 8-byte Folded Reload
and w8, w9, #0x1
add w8, w9, w8
sub w24, w8, #1
Lloh56:
adrp x8, _l2h_gamma@PAGE
Lloh57:
ldr s0, [x8, _l2h_gamma@PAGEOFF]
fcvt d8, s0
Lloh58:
adrp x8, _l2h_scale@PAGE
Lloh59:
ldr s0, [x8, _l2h_scale@PAGEOFF]
stur q0, [x29, #-128] ; 16-byte Folded Spill
sxtw x25, w9
and x8, x24, #0xfffffff0
str x8, [sp, #8] ; 8-byte Folded Spill
dup.4s v0, v0[0]
str q0, [sp, #208] ; 16-byte Folded Spill
and x27, x24, #0xfffffff8
sbfiz x8, x9, #2, #32
str x8, [sp, #32] ; 8-byte Folded Spill
mov w26, #1132396544
Lloh60:
adrp x8, lCPI33_0@PAGE
Lloh61:
ldr q2, [x8, lCPI33_0@PAGEOFF]
Lloh62:
adrp x8, lCPI33_1@PAGE
Lloh63:
ldr q3, [x8, lCPI33_1@PAGEOFF]
dup.4s v4, w26
Lloh64:
adrp x8, lCPI33_2@PAGE
Lloh65:
ldr q5, [x8, lCPI33_2@PAGEOFF]
Lloh66:
adrp x8, lCPI33_3@PAGE
Lloh67:
ldr q6, [x8, lCPI33_3@PAGEOFF]
ldr x22, [sp, #16] ; 8-byte Folded Reload
mov x21, x28
str x20, [sp, #24] ; 8-byte Folded Spill
stp q3, q2, [sp, #240] ; 32-byte Folded Spill
str q4, [sp, #224] ; 16-byte Folded Spill
stp q6, q5, [sp, #48] ; 32-byte Folded Spill
str x28, [sp] ; 8-byte Folded Spill
b LBB33_4
LBB33_3: ; in Loop: Header=BB33_4 Depth=1
add x23, x23, #1
ldr x8, [sp, #32] ; 8-byte Folded Reload
add x21, x21, x8
add x22, x22, x25
cmp x23, x10
b.eq LBB33_21
LBB33_4: ; =>This Loop Header: Depth=1
; Child Loop BB33_14 Depth 2
; Child Loop BB33_11 Depth 2
; Child Loop BB33_17 Depth 2
cmp w24, #1
b.lt LBB33_7
; %bb.5: ; in Loop: Header=BB33_4 Depth=1
cmp w24, #8
b.hs LBB33_8
; %bb.6: ; in Loop: Header=BB33_4 Depth=1
mov x20, #0
b LBB33_17
LBB33_7: ; in Loop: Header=BB33_4 Depth=1
mov w8, #0
b LBB33_19
LBB33_8: ; in Loop: Header=BB33_4 Depth=1
cmp w24, #16
b.hs LBB33_13
; %bb.9: ; in Loop: Header=BB33_4 Depth=1
mov x19, #0
LBB33_10: ; in Loop: Header=BB33_4 Depth=1
lsl x20, x19, #2
LBB33_11: ; Parent Loop BB33_4 Depth=1
; => This Inner Loop Header: Depth=2
ldr d0, [x22, x19]
tbl.16b v1, { v0 }, v2
tbl.16b v0, { v0 }, v3
ucvtf.4s v0, v0
ucvtf.4s v1, v1
fdiv.4s v1, v1, v4
fdiv.4s v0, v0, v4
fcvtl2 v2.2d, v0.4s
fcvtl v0.2d, v0.2s
stp q0, q2, [x29, #-160] ; 32-byte Folded Spill
fcvtl2 v0.2d, v1.4s
stur q0, [x29, #-176] ; 16-byte Folded Spill
fcvtl v0.2d, v1.2s
stur q0, [x29, #-208] ; 16-byte Folded Spill
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
stur q0, [x29, #-192] ; 16-byte Folded Spill
ldur q0, [x29, #-208] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-208] ; 16-byte Folded Spill
ldur q0, [x29, #-176] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
stur q0, [x29, #-224] ; 16-byte Folded Spill
ldur q0, [x29, #-176] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-240] ; 16-byte Folded Spill
ldur q0, [x29, #-160] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
stur q0, [x29, #-176] ; 16-byte Folded Spill
ldur q0, [x29, #-160] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-160] ; 16-byte Folded Spill
ldur q0, [x29, #-144] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
stur q0, [x29, #-256] ; 16-byte Folded Spill
ldur q0, [x29, #-144] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
ldp q4, q3, [sp, #224] ; 32-byte Folded Reload
; kill: def $d0 killed $d0 def $q0
ldp q1, q2, [x29, #-240] ; 32-byte Folded Reload
mov.d v2[1], v1[0]
ldp q5, q1, [x29, #-208] ; 32-byte Folded Reload
mov.d v1[1], v5[0]
ldur q6, [x29, #-256] ; 16-byte Folded Reload
mov.d v6[1], v0[0]
ldp q0, q5, [x29, #-176] ; 32-byte Folded Reload
mov.d v0[1], v5[0]
fcvtn v0.2s, v0.2d
fcvtn2 v0.4s, v6.2d
fcvtn v1.2s, v1.2d
fcvtn2 v1.4s, v2.2d
ldr q2, [sp, #208] ; 16-byte Folded Reload
fmul.4s v1, v2, v1
ldur q2, [x29, #-128] ; 16-byte Folded Reload
fmul.4s v0, v0, v2[0]
ldr q2, [sp, #256] ; 16-byte Folded Reload
add x8, x21, x20
stp q1, q0, [x8]
add x19, x19, #8
add x20, x20, #32
cmp x27, x19
b.ne LBB33_11
; %bb.12: ; in Loop: Header=BB33_4 Depth=1
mov x20, x27
mov x8, x24
cmp x27, x24
ldr x10, [sp, #24] ; 8-byte Folded Reload
ldp q6, q5, [sp, #48] ; 32-byte Folded Reload
b.ne LBB33_17
b LBB33_19
LBB33_13: ; in Loop: Header=BB33_4 Depth=1
ldr x28, [sp, #8] ; 8-byte Folded Reload
mov x20, x22
mov x19, x21
LBB33_14: ; Parent Loop BB33_4 Depth=1
; => This Inner Loop Header: Depth=2
ldr q0, [x20], #16
tbl.16b v1, { v0 }, v2
tbl.16b v2, { v0 }, v3
tbl.16b v3, { v0 }, v5
tbl.16b v0, { v0 }, v6
ucvtf.4s v0, v0
ucvtf.4s v3, v3
ucvtf.4s v2, v2
ucvtf.4s v1, v1
fdiv.4s v1, v1, v4
fdiv.4s v2, v2, v4
fdiv.4s v3, v3, v4
fdiv.4s v0, v0, v4
fcvtl2 v4.2d, v0.4s
fcvtl v0.2d, v0.2s
stp q0, q4, [x29, #-160] ; 32-byte Folded Spill
fcvtl2 v4.2d, v3.4s
fcvtl v0.2d, v3.2s
stp q0, q4, [x29, #-192] ; 32-byte Folded Spill
fcvtl2 v4.2d, v2.4s
fcvtl v0.2d, v2.2s
stp q0, q4, [x29, #-224] ; 32-byte Folded Spill
fcvtl2 v0.2d, v1.4s
stur q0, [x29, #-240] ; 16-byte Folded Spill
fcvtl v0.2d, v1.2s
str q0, [sp, #192] ; 16-byte Folded Spill
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
stur q0, [x29, #-256] ; 16-byte Folded Spill
ldr q0, [sp, #192] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
str q0, [sp, #192] ; 16-byte Folded Spill
ldur q0, [x29, #-240] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #176] ; 16-byte Folded Spill
ldur q0, [x29, #-240] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
str q0, [sp, #160] ; 16-byte Folded Spill
ldur q0, [x29, #-224] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
stur q0, [x29, #-240] ; 16-byte Folded Spill
ldur q0, [x29, #-224] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-224] ; 16-byte Folded Spill
ldur q0, [x29, #-208] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #144] ; 16-byte Folded Spill
ldur q0, [x29, #-208] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
str q0, [sp, #128] ; 16-byte Folded Spill
ldur q0, [x29, #-192] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
stur q0, [x29, #-208] ; 16-byte Folded Spill
ldur q0, [x29, #-192] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-192] ; 16-byte Folded Spill
ldur q0, [x29, #-176] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #112] ; 16-byte Folded Spill
ldur q0, [x29, #-176] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
str q0, [sp, #96] ; 16-byte Folded Spill
ldur q0, [x29, #-160] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
stur q0, [x29, #-176] ; 16-byte Folded Spill
ldur q0, [x29, #-160] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
stur q0, [x29, #-160] ; 16-byte Folded Spill
ldur q0, [x29, #-144] ; 16-byte Folded Reload
; kill: def $d0 killed $d0 killed $q0
fmov d1, d8
bl _pow
str q0, [sp, #80] ; 16-byte Folded Spill
ldur q0, [x29, #-144] ; 16-byte Folded Reload
mov d0, v0[1]
fmov d1, d8
bl _pow
ldp q6, q5, [sp, #48] ; 32-byte Folded Reload
; kill: def $d0 killed $d0 def $q0
ldp q1, q7, [sp, #160] ; 32-byte Folded Reload
mov.d v7[1], v1[0]
ldp q3, q2, [x29, #-256] ; 32-byte Folded Reload
ldp q1, q4, [sp, #192] ; 32-byte Folded Reload
mov.d v3[1], v1[0]
ldp q1, q16, [sp, #128] ; 32-byte Folded Reload
mov.d v16[1], v1[0]
ldur q1, [x29, #-224] ; 16-byte Folded Reload
mov.d v2[1], v1[0]
ldp q1, q17, [sp, #96] ; 32-byte Folded Reload
mov.d v17[1], v1[0]
ldp q1, q18, [x29, #-208] ; 32-byte Folded Reload
mov.d v1[1], v18[0]
ldr q19, [sp, #80] ; 16-byte Folded Reload
mov.d v19[1], v0[0]
ldp q0, q18, [x29, #-176] ; 32-byte Folded Reload
mov.d v0[1], v18[0]
fcvtn v0.2s, v0.2d
fcvtn2 v0.4s, v19.2d
fcvtn v1.2s, v1.2d
fcvtn2 v1.4s, v17.2d
fcvtn v2.2s, v2.2d
fcvtn2 v2.4s, v16.2d
fcvtn v3.2s, v3.2d
fcvtn2 v3.4s, v7.2d
fmul.4s v3, v4, v3
fmul.4s v1, v4, v1
fmul.4s v0, v4, v0
stp q1, q0, [x19, #32]
fmul.4s v0, v4, v2
ldr q2, [sp, #256] ; 16-byte Folded Reload
stp q3, q0, [x19], #64
ldp q4, q3, [sp, #224] ; 32-byte Folded Reload
subs x28, x28, #16
b.ne LBB33_14
; %bb.15: ; in Loop: Header=BB33_4 Depth=1
mov x8, x24
ldp x28, x9, [sp] ; 16-byte Folded Reload
cmp x9, x24
ldr x10, [sp, #24] ; 8-byte Folded Reload
b.eq LBB33_19
; %bb.16: ; in Loop: Header=BB33_4 Depth=1
ldr x20, [sp, #8] ; 8-byte Folded Reload
mov x19, x20
tbnz w24, #3, LBB33_10
LBB33_17: ; Parent Loop BB33_4 Depth=1
; => This Inner Loop Header: Depth=2
ldr b0, [x22, x20]
ucvtf s0, s0
fmov s1, w26
fdiv s0, s0, s1
fcvt d0, s0
fmov d1, d8
bl _pow
fcvt s0, d0
ldur q1, [x29, #-128] ; 16-byte Folded Reload
fmul s0, s1, s0
str s0, [x21, x20, lsl #2]
add x20, x20, #1
cmp x24, x20
b.ne LBB33_17
; %bb.18: ; in Loop: Header=BB33_4 Depth=1
mov x8, x24
ldr x10, [sp, #24] ; 8-byte Folded Reload
ldp q3, q2, [sp, #240] ; 32-byte Folded Reload
ldr q4, [sp, #224] ; 16-byte Folded Reload
ldp q6, q5, [sp, #48] ; 32-byte Folded Reload
LBB33_19: ; in Loop: Header=BB33_4 Depth=1
ldr x9, [sp, #40] ; 8-byte Folded Reload
cmp w8, w9
b.ge LBB33_3
; %bb.20: ; in Loop: Header=BB33_4 Depth=1
mul x9, x23, x25
add x8, x9, w8, sxtw
ldr x9, [sp, #16] ; 8-byte Folded Reload
ldr b0, [x9, x8]
ucvtf s0, s0
fmov s1, w26
fdiv s0, s0, s1
str s0, [x28, x8, lsl #2]
b LBB33_3
LBB33_21:
ldr x0, [sp, #16] ; 8-byte Folded Reload
bl _free
b LBB33_23
LBB33_22:
mov x0, x19
bl _free
Lloh68:
adrp x8, l_.str.5@PAGE
Lloh69:
add x8, x8, l_.str.5@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB33_23:
mov x0, x28
add sp, sp, #432
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
ldp x26, x25, [sp, #32] ; 16-byte Folded Reload
ldp x28, x27, [sp, #16] ; 16-byte Folded Reload
ldp d9, d8, [sp], #112 ; 16-byte Folded Reload
ret
.loh AdrpLdr Lloh66, Lloh67
.loh AdrpAdrp Lloh64, Lloh66
.loh AdrpLdr Lloh64, Lloh65
.loh AdrpAdrp Lloh62, Lloh64
.loh AdrpLdr Lloh62, Lloh63
.loh AdrpAdrp Lloh60, Lloh62
.loh AdrpLdr Lloh60, Lloh61
.loh AdrpLdr Lloh58, Lloh59
.loh AdrpAdrp Lloh56, Lloh58
.loh AdrpLdr Lloh56, Lloh57
.loh AdrpAdd Lloh68, Lloh69
.cfi_endproc
; -- End function
.globl _stbi_loadf_from_memory ; -- Begin function stbi_loadf_from_memory
.p2align 2
_stbi_loadf_from_memory: ; @stbi_loadf_from_memory
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x5
mov x20, x4
mov x21, x3
mov x22, x2
mov x23, x1
mov x24, x0
bl _stbi_hdr_test_memory
cbz w0, LBB34_3
; %bb.1:
stp xzr, x24, [sp, #24]
add x8, x24, w23, sxtw
str x8, [sp, #40]
add x0, sp, #8
mov x1, x22
mov x2, x21
mov x3, x20
mov x4, x19
bl _hdr_load
LBB34_2:
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #112
ret
LBB34_3:
mov x0, x24
mov x1, x23
mov x2, x22
mov x3, x21
mov x4, x20
mov x5, x19
bl _stbi_load_from_memory
cbz x0, LBB34_7
; %bb.4:
ldr w1, [x22]
ldr w2, [x21]
cbnz w19, LBB34_6
; %bb.5:
ldr w19, [x20]
LBB34_6:
mov x3, x19
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #112
b _ldr_to_hdr
LBB34_7:
Lloh70:
adrp x8, l_.str.2@PAGE
Lloh71:
add x8, x8, l_.str.2@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
b LBB34_2
.loh AdrpAdd Lloh70, Lloh71
.cfi_endproc
; -- End function
.globl _stbi_is_hdr_from_memory ; -- Begin function stbi_is_hdr_from_memory
.p2align 2
_stbi_is_hdr_from_memory: ; @stbi_is_hdr_from_memory
.cfi_startproc
; %bb.0:
b _stbi_hdr_test_memory
.cfi_endproc
; -- End function
.globl _stbi_is_hdr ; -- Begin function stbi_is_hdr
.p2align 2
_stbi_is_hdr: ; @stbi_is_hdr
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
Lloh72:
adrp x1, l_.str@PAGE
Lloh73:
add x1, x1, l_.str@PAGEOFF
bl _fopen
cbz x0, LBB36_2
; %bb.1:
mov x19, x0
bl _stbi_hdr_test_file
mov x20, x0
mov x0, x19
bl _fclose
mov x0, x20
LBB36_2:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh72, Lloh73
.cfi_endproc
; -- End function
.globl _stbi_is_hdr_from_file ; -- Begin function stbi_is_hdr_from_file
.p2align 2
_stbi_is_hdr_from_file: ; @stbi_is_hdr_from_file
.cfi_startproc
; %bb.0:
b _stbi_hdr_test_file
.cfi_endproc
; -- End function
.globl _stbi_hdr_to_ldr_gamma ; -- Begin function stbi_hdr_to_ldr_gamma
.p2align 2
_stbi_hdr_to_ldr_gamma: ; @stbi_hdr_to_ldr_gamma
.cfi_startproc
; %bb.0:
fmov s1, #1.00000000
fdiv s0, s1, s0
adrp x8, _h2l_gamma_i@PAGE
str s0, [x8, _h2l_gamma_i@PAGEOFF]
ret
.cfi_endproc
; -- End function
.globl _stbi_hdr_to_ldr_scale ; -- Begin function stbi_hdr_to_ldr_scale
.p2align 2
_stbi_hdr_to_ldr_scale: ; @stbi_hdr_to_ldr_scale
.cfi_startproc
; %bb.0:
fmov s1, #1.00000000
fdiv s0, s1, s0
adrp x8, _h2l_scale_i@PAGE
str s0, [x8, _h2l_scale_i@PAGEOFF]
ret
.cfi_endproc
; -- End function
.globl _stbi_ldr_to_hdr_gamma ; -- Begin function stbi_ldr_to_hdr_gamma
.p2align 2
_stbi_ldr_to_hdr_gamma: ; @stbi_ldr_to_hdr_gamma
.cfi_startproc
; %bb.0:
adrp x8, _l2h_gamma@PAGE
str s0, [x8, _l2h_gamma@PAGEOFF]
ret
.cfi_endproc
; -- End function
.globl _stbi_ldr_to_hdr_scale ; -- Begin function stbi_ldr_to_hdr_scale
.p2align 2
_stbi_ldr_to_hdr_scale: ; @stbi_ldr_to_hdr_scale
.cfi_startproc
; %bb.0:
adrp x8, _l2h_scale@PAGE
str s0, [x8, _l2h_scale@PAGEOFF]
ret
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function load_jpeg_image
_load_jpeg_image: ; @load_jpeg_image
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #432
Lloh74:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh75:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh76:
ldr x8, [x8]
stur x8, [x29, #-96]
cmp w4, #5
b.lo LBB42_3
; %bb.1:
mov x0, #0
Lloh77:
adrp x8, l_.str.6@PAGE
Lloh78:
add x8, x8, l_.str.6@PAGEOFF
LBB42_2:
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
b LBB42_154
LBB42_3:
mov x23, x4
mov x22, x3
mov x24, x2
mov x26, x1
mov x19, x0
str wzr, [x0, #8]
str wzr, [x0, #14084]
mov w1, #0
bl _decode_jpeg_header
cbz w0, LBB42_146
; %bb.4:
mov w8, #14056
add x25, x19, x8
ldrb w1, [x25]
cmp w1, #255
b.ne LBB42_7
; %bb.5:
ldr x0, [x19, #16]
cbz x0, LBB42_8
; %bb.6:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB42_10
LBB42_7:
mov w8, #255
strb w8, [x25]
b LBB42_19
LBB42_8:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB42_11
; %bb.9:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB42_10:
mvn w8, w8
tst w8, #0xff
b.eq LBB42_14
LBB42_11:
mov w1, #255
b LBB42_19
LBB42_12: ; in Loop: Header=BB42_14 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB42_13: ; in Loop: Header=BB42_14 Depth=1
mvn w9, w8
tst w9, #0xff
b.ne LBB42_17
LBB42_14: ; =>This Inner Loop Header: Depth=1
ldr x0, [x19, #16]
cbnz x0, LBB42_12
; %bb.15: ; in Loop: Header=BB42_14 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB42_18
; %bb.16: ; in Loop: Header=BB42_14 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB42_13
LBB42_17:
and w1, w8, #0xff
b LBB42_19
LBB42_18:
mov w1, #0
LBB42_19:
mov w8, #13760
add x28, x19, x8
Lloh79:
adrp x27, l_.str.7@PAGE
Lloh80:
add x27, x27, l_.str.7@PAGEOFF
LBB42_20: ; =>This Loop Header: Depth=1
; Child Loop BB42_42 Depth 2
; Child Loop BB42_52 Depth 3
; Child Loop BB42_91 Depth 2
; Child Loop BB42_93 Depth 3
; Child Loop BB42_95 Depth 4
; Child Loop BB42_99 Depth 5
; Child Loop BB42_102 Depth 6
; Child Loop BB42_116 Depth 2
; Child Loop BB42_119 Depth 3
; Child Loop BB42_65 Depth 2
cmp w1, #218
b.eq LBB42_26
; %bb.21: ; in Loop: Header=BB42_20 Depth=1
cmp w1, #217
b.eq LBB42_135
; %bb.22: ; in Loop: Header=BB42_20 Depth=1
mov x0, x19
bl _process_marker
cbz w0, LBB42_146
LBB42_23: ; in Loop: Header=BB42_20 Depth=1
ldrb w1, [x25]
cmp w1, #255
b.ne LBB42_30
; %bb.24: ; in Loop: Header=BB42_20 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB42_60
; %bb.25: ; in Loop: Header=BB42_20 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB42_62
LBB42_26: ; in Loop: Header=BB42_20 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB42_31
; %bb.27: ; in Loop: Header=BB42_20 Depth=1
bl _fgetc
cmn w0, #1
csel w21, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB42_34
; %bb.28: ; in Loop: Header=BB42_20 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
ldr x0, [x19, #16]
add w20, w8, w21, lsl #8
cbz x0, LBB42_69
; %bb.29: ; in Loop: Header=BB42_20 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB42_38
LBB42_30: ; in Loop: Header=BB42_20 Depth=1
mov w8, #255
strb w8, [x25]
b LBB42_20
LBB42_31: ; in Loop: Header=BB42_20 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB42_68
; %bb.32: ; in Loop: Header=BB42_20 Depth=1
add x10, x8, #1
str x10, [x19, #24]
ldrb w21, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB42_35
LBB42_33: ; in Loop: Header=BB42_20 Depth=1
mov w20, #0
b LBB42_36
LBB42_34: ; in Loop: Header=BB42_20 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB42_33
LBB42_35: ; in Loop: Header=BB42_20 Depth=1
add x10, x8, #1
str x10, [x19, #24]
ldrb w20, [x8]
mov x8, x10
LBB42_36: ; in Loop: Header=BB42_20 Depth=1
bfi w20, w21, #8, #24
cmp x8, x9
b.hs LBB42_144
LBB42_37: ; in Loop: Header=BB42_20 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB42_38: ; in Loop: Header=BB42_20 Depth=1
str w8, [x19, #14064]
sub w9, w8, #5
cmn w9, #4
b.lo LBB42_145
; %bb.39: ; in Loop: Header=BB42_20 Depth=1
ldr w9, [x19, #8]
cmp w8, w9
b.gt LBB42_145
; %bb.40: ; in Loop: Header=BB42_20 Depth=1
lsl w8, w8, #1
add w8, w8, #6
cmp w20, w8
b.ne LBB42_143
; %bb.41: ; in Loop: Header=BB42_20 Depth=1
mov x20, #0
LBB42_42: ; Parent Loop BB42_20 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB42_52 Depth 3
ldr x0, [x19, #16]
cbz x0, LBB42_45
; %bb.43: ; in Loop: Header=BB42_42 Depth=2
bl _fgetc
cmn w0, #1
csel w21, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB42_48
; %bb.44: ; in Loop: Header=BB42_42 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB42_50
LBB42_45: ; in Loop: Header=BB42_42 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB42_59
; %bb.46: ; in Loop: Header=BB42_42 Depth=2
add x10, x8, #1
str x10, [x19, #24]
ldrb w21, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB42_49
LBB42_47: ; in Loop: Header=BB42_42 Depth=2
mov w8, #0
b LBB42_50
LBB42_48: ; in Loop: Header=BB42_42 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB42_47
LBB42_49: ; in Loop: Header=BB42_42 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB42_50: ; in Loop: Header=BB42_42 Depth=2
ldr w10, [x19, #8]
cmp w10, #1
b.lt LBB42_54
; %bb.51: ; in Loop: Header=BB42_42 Depth=2
mov x9, #0
mov x11, x28
LBB42_52: ; Parent Loop BB42_20 Depth=1
; Parent Loop BB42_42 Depth=2
; => This Inner Loop Header: Depth=3
ldr w12, [x11]
cmp w12, w21
b.eq LBB42_55
; %bb.53: ; in Loop: Header=BB42_52 Depth=3
add x9, x9, #1
add x11, x11, #72
cmp x10, x9
b.ne LBB42_52
b LBB42_146
LBB42_54: ; in Loop: Header=BB42_42 Depth=2
mov w9, #0
LBB42_55: ; in Loop: Header=BB42_42 Depth=2
cmp w9, w10
b.eq LBB42_146
; %bb.56: ; in Loop: Header=BB42_42 Depth=2
asr w10, w8, #4
mov w12, #72
umaddl x11, w9, w12, x19
str w10, [x11, #13776]
cmp w8, #63
b.gt LBB42_133
; %bb.57: ; in Loop: Header=BB42_42 Depth=2
mov w10, w9
and w8, w8, #0xf
madd x10, x10, x12, x19
str w8, [x10, #13780]
cmp w8, #3
b.hi LBB42_134
; %bb.58: ; in Loop: Header=BB42_42 Depth=2
mov w21, #72
add x8, x19, x20, lsl #2
str w9, [x8, #14068]
add x20, x20, #1
ldrsw x8, [x19, #14064]
cmp x20, x8
b.lt LBB42_42
b LBB42_71
LBB42_59: ; in Loop: Header=BB42_42 Depth=2
mov w21, #0
cmp x8, x9
b.lo LBB42_49
b LBB42_47
LBB42_60: ; in Loop: Header=BB42_20 Depth=1
ldp x8, x9, [x19, #24]
mov w1, #255
cmp x8, x9
b.hs LBB42_20
; %bb.61: ; in Loop: Header=BB42_20 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB42_62: ; in Loop: Header=BB42_20 Depth=1
mvn w8, w8
mov w1, #255
tst w8, #0xff
b.ne LBB42_20
b LBB42_65
LBB42_63: ; in Loop: Header=BB42_65 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB42_64: ; in Loop: Header=BB42_65 Depth=2
mvn w9, w8
tst w9, #0xff
b.ne LBB42_70
LBB42_65: ; Parent Loop BB42_20 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x19, #16]
cbnz x0, LBB42_63
; %bb.66: ; in Loop: Header=BB42_65 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB42_73
; %bb.67: ; in Loop: Header=BB42_65 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB42_64
LBB42_68: ; in Loop: Header=BB42_20 Depth=1
mov w21, #0
cmp x8, x9
b.lo LBB42_35
b LBB42_33
LBB42_69: ; in Loop: Header=BB42_20 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.lo LBB42_37
b LBB42_144
LBB42_70: ; in Loop: Header=BB42_20 Depth=1
and w1, w8, #0xff
b LBB42_20
LBB42_71: ; in Loop: Header=BB42_20 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB42_74
; %bb.72: ; in Loop: Header=BB42_20 Depth=1
bl _fgetc
cmn w0, #1
b.ne LBB42_76
b LBB42_77
LBB42_73: ; in Loop: Header=BB42_20 Depth=1
mov w1, #0
b LBB42_20
LBB42_74: ; in Loop: Header=BB42_20 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB42_77
; %bb.75: ; in Loop: Header=BB42_20 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w0, [x8]
LBB42_76: ; in Loop: Header=BB42_20 Depth=1
cbnz w0, LBB42_224
LBB42_77: ; in Loop: Header=BB42_20 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB42_80
; %bb.78: ; in Loop: Header=BB42_20 Depth=1
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB42_82
; %bb.79: ; in Loop: Header=BB42_20 Depth=1
bl _fgetc
cmn w0, #1
b.ne LBB42_85
b LBB42_86
LBB42_80: ; in Loop: Header=BB42_20 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB42_83
; %bb.81: ; in Loop: Header=BB42_20 Depth=1
add x8, x8, #1
str x8, [x19, #24]
b LBB42_83
LBB42_82: ; in Loop: Header=BB42_20 Depth=1
ldp x8, x9, [x19, #24]
LBB42_83: ; in Loop: Header=BB42_20 Depth=1
cmp x8, x9
b.hs LBB42_86
; %bb.84: ; in Loop: Header=BB42_20 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w0, [x8]
LBB42_85: ; in Loop: Header=BB42_20 Depth=1
cbnz w0, LBB42_224
LBB42_86: ; in Loop: Header=BB42_20 Depth=1
movi.2d v0, #0000000000000000
str d0, [x19, #14048]
str wzr, [x19, #14060]
str wzr, [x19, #13928]
str wzr, [x19, #13856]
str wzr, [x19, #13784]
mov w8, #255
strb w8, [x25]
ldr w8, [x19, #14084]
cmp w8, #0
mov w9, #2147483647
csel w8, w9, w8, eq
str w8, [x19, #14088]
ldr w9, [x19, #14064]
cmp w9, #1
b.ne LBB42_89
; %bb.87: ; in Loop: Header=BB42_20 Depth=1
stp x28, x26, [sp, #48] ; 16-byte Folded Spill
stp x24, x22, [sp, #64] ; 16-byte Folded Spill
ldrsw x24, [x19, #14068]
madd x8, x24, x21, x19
ldr w8, [x8, #13792]
cmp w8, #1
b.lt LBB42_127
; %bb.88: ; in Loop: Header=BB42_20 Depth=1
mov w20, #0
mov w11, #0
add w8, w8, #7
asr w12, w8, #3
madd x8, x24, x21, x19
ldr w13, [x8, #13788]
add w9, w13, #7
asr w9, w9, #3
mov w10, #13776
add x10, x8, x10
str x10, [sp, #184] ; 8-byte Folded Spill
mov w10, #13780
add x22, x8, x10
mov w10, #13808
add x10, x8, x10
str x10, [sp, #176] ; 8-byte Folded Spill
mov w10, #13796
add x26, x8, x10
mov w10, #13772
add x21, x8, x10
cmp w9, #1
csinc w8, w9, wzr, gt
cmp w12, #1
str w12, [sp, #96] ; 4-byte Folded Spill
csinc w9, w12, wzr, gt
str w9, [sp, #80] ; 4-byte Folded Spill
lsl x27, x8, #3
mov w8, #1
str w13, [sp, #88] ; 4-byte Folded Spill
b LBB42_116
LBB42_89: ; in Loop: Header=BB42_20 Depth=1
ldr w10, [x19, #13748]
cmp w10, #1
mov w11, #72
b.lt LBB42_23
; %bb.90: ; in Loop: Header=BB42_20 Depth=1
str wzr, [sp, #184] ; 4-byte Folded Spill
ldr w9, [x19, #13744]
LBB42_91: ; Parent Loop BB42_20 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB42_93 Depth 3
; Child Loop BB42_95 Depth 4
; Child Loop BB42_99 Depth 5
; Child Loop BB42_102 Depth 6
cmp w9, #1
b.lt LBB42_114
; %bb.92: ; in Loop: Header=BB42_91 Depth=2
str wzr, [sp, #176] ; 4-byte Folded Spill
stp x24, x22, [sp, #64] ; 16-byte Folded Spill
stp x28, x26, [sp, #48] ; 16-byte Folded Spill
LBB42_93: ; Parent Loop BB42_20 Depth=1
; Parent Loop BB42_91 Depth=2
; => This Loop Header: Depth=3
; Child Loop BB42_95 Depth 4
; Child Loop BB42_99 Depth 5
; Child Loop BB42_102 Depth 6
ldr w9, [x19, #14064]
cmp w9, #1
b.lt LBB42_107
; %bb.94: ; in Loop: Header=BB42_93 Depth=3
mov x10, #0
LBB42_95: ; Parent Loop BB42_20 Depth=1
; Parent Loop BB42_91 Depth=2
; Parent Loop BB42_93 Depth=3
; => This Loop Header: Depth=4
; Child Loop BB42_99 Depth 5
; Child Loop BB42_102 Depth 6
add x8, x19, x10, lsl #2
ldrsw x4, [x8, #14068]
madd x8, x4, x11, x19
ldr w26, [x8, #13768]
cmp w26, #1
b.lt LBB42_105
; %bb.96: ; in Loop: Header=BB42_95 Depth=4
str x10, [sp, #40] ; 8-byte Folded Spill
mov w22, #0
mov w9, #13768
add x8, x8, x9
str x8, [sp, #96] ; 8-byte Folded Spill
madd x8, x4, x11, x19
mov w9, #13764
add x9, x8, x9
str x9, [sp, #88] ; 8-byte Folded Spill
mov w9, #13776
add x9, x8, x9
str x9, [sp, #168] ; 8-byte Folded Spill
mov w9, #13780
add x9, x8, x9
str x9, [sp, #128] ; 8-byte Folded Spill
mov w9, #13808
add x9, x8, x9
str x9, [sp, #80] ; 8-byte Folded Spill
mov w9, #13796
add x20, x8, x9
mov w9, #13772
add x24, x8, x9
ldr w21, [x8, #13764]
b LBB42_99
LBB42_97: ; in Loop: Header=BB42_99 Depth=5
mov w11, #72
mov x4, x28
LBB42_98: ; in Loop: Header=BB42_99 Depth=5
add w22, w22, #1
cmp w22, w26
b.ge LBB42_104
LBB42_99: ; Parent Loop BB42_20 Depth=1
; Parent Loop BB42_91 Depth=2
; Parent Loop BB42_93 Depth=3
; Parent Loop BB42_95 Depth=4
; => This Loop Header: Depth=5
; Child Loop BB42_102 Depth 6
cmp w21, #1
b.lt LBB42_98
; %bb.100: ; in Loop: Header=BB42_99 Depth=5
ldr x8, [sp, #168] ; 8-byte Folded Reload
ldrsw x8, [x8]
mov w9, #1680
madd x8, x8, x9, x19
add x2, x8, #40
ldr x8, [sp, #128] ; 8-byte Folded Reload
ldrsw x8, [x8]
madd x8, x8, x9, x19
mov w9, #6760
add x3, x8, x9
add x1, sp, #192
mov x0, x19
mov x28, x4
; kill: def $w4 killed $w4 killed $x4
bl _decode_block
cbz w0, LBB42_125
; %bb.101: ; in Loop: Header=BB42_99 Depth=5
mov w27, #1
LBB42_102: ; Parent Loop BB42_20 Depth=1
; Parent Loop BB42_91 Depth=2
; Parent Loop BB42_93 Depth=3
; Parent Loop BB42_95 Depth=4
; Parent Loop BB42_99 Depth=5
; => This Inner Loop Header: Depth=6
ldr w8, [sp, #184] ; 4-byte Folded Reload
madd w8, w26, w8, w22
ldr w9, [sp, #176] ; 4-byte Folded Reload
madd w9, w9, w21, w27
lsl w9, w9, #3
sub w9, w9, #8
ldr x10, [sp, #80] ; 8-byte Folded Reload
ldr x10, [x10]
ldr w1, [x20]
mul w8, w8, w1
lsl w8, w8, #3
add x8, x10, w8, sxtw
add x0, x8, w9, sxtw
ldrsw x8, [x24]
add x8, x19, x8, lsl #6
mov w9, #13480
add x3, x8, x9
add x2, sp, #192
bl _idct_block
ldr x8, [sp, #88] ; 8-byte Folded Reload
ldr w21, [x8]
ldr x8, [sp, #96] ; 8-byte Folded Reload
ldr w26, [x8]
cmp w27, w21
b.ge LBB42_97
; %bb.103: ; in Loop: Header=BB42_102 Depth=6
ldr x8, [sp, #168] ; 8-byte Folded Reload
ldrsw x8, [x8]
mov w9, #1680
madd x8, x8, x9, x19
add x2, x8, #40
ldr x8, [sp, #128] ; 8-byte Folded Reload
ldrsw x8, [x8]
madd x8, x8, x9, x19
mov w9, #6760
add x3, x8, x9
add x1, sp, #192
mov x0, x19
mov x4, x28
bl _decode_block
add w27, w27, #1
cbnz w0, LBB42_102
b LBB42_125
LBB42_104: ; in Loop: Header=BB42_95 Depth=4
ldr w9, [x19, #14064]
Lloh81:
adrp x27, l_.str.7@PAGE
Lloh82:
add x27, x27, l_.str.7@PAGEOFF
ldp x10, x28, [sp, #40] ; 16-byte Folded Reload
LBB42_105: ; in Loop: Header=BB42_95 Depth=4
add x10, x10, #1
cmp x10, w9, sxtw
b.lt LBB42_95
; %bb.106: ; in Loop: Header=BB42_93 Depth=3
ldr w8, [x19, #14088]
ldp x24, x22, [sp, #64] ; 16-byte Folded Reload
ldr x26, [sp, #56] ; 8-byte Folded Reload
LBB42_107: ; in Loop: Header=BB42_93 Depth=3
subs w8, w8, #1
str w8, [x19, #14088]
b.gt LBB42_112
; %bb.108: ; in Loop: Header=BB42_93 Depth=3
ldr w8, [x19, #14052]
cmp w8, #23
b.gt LBB42_110
; %bb.109: ; in Loop: Header=BB42_93 Depth=3
mov x0, x19
bl _grow_buffer_unsafe
mov w11, #72
LBB42_110: ; in Loop: Header=BB42_93 Depth=3
ldrb w8, [x25]
and w8, w8, #0xf8
cmp w8, #208
b.ne LBB42_132
; %bb.111: ; in Loop: Header=BB42_93 Depth=3
movi.2d v0, #0000000000000000
str d0, [x19, #14048]
str wzr, [x19, #14060]
str wzr, [x19, #13928]
str wzr, [x19, #13856]
str wzr, [x19, #13784]
mov w8, #255
strb w8, [x25]
ldr w8, [x19, #14084]
cmp w8, #0
mov w9, #2147483647
csel w8, w9, w8, eq
str w8, [x19, #14088]
LBB42_112: ; in Loop: Header=BB42_93 Depth=3
ldr w10, [sp, #176] ; 4-byte Folded Reload
add w10, w10, #1
ldr w9, [x19, #13744]
str w10, [sp, #176] ; 4-byte Folded Spill
cmp w10, w9
b.lt LBB42_93
; %bb.113: ; in Loop: Header=BB42_91 Depth=2
ldr w10, [x19, #13748]
LBB42_114: ; in Loop: Header=BB42_91 Depth=2
ldr w12, [sp, #184] ; 4-byte Folded Reload
add w12, w12, #1
str w12, [sp, #184] ; 4-byte Folded Spill
cmp w12, w10
b.lt LBB42_91
b LBB42_23
LBB42_115: ; in Loop: Header=BB42_116 Depth=2
ldr w11, [sp, #168] ; 4-byte Folded Reload
add w11, w11, #1
ldr w8, [sp, #96] ; 4-byte Folded Reload
cmp w11, w8
cset w8, lt
add w20, w20, #8
ldr w9, [sp, #80] ; 4-byte Folded Reload
cmp w11, w9
ldr w13, [sp, #88] ; 4-byte Folded Reload
b.eq LBB42_127
LBB42_116: ; Parent Loop BB42_20 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB42_119 Depth 3
str w8, [sp, #128] ; 4-byte Folded Spill
str w11, [sp, #168] ; 4-byte Folded Spill
cmp w13, #1
b.lt LBB42_115
; %bb.117: ; in Loop: Header=BB42_116 Depth=2
mov x28, #0
b LBB42_119
LBB42_118: ; in Loop: Header=BB42_119 Depth=3
add x28, x28, #8
cmp x27, x28
b.eq LBB42_115
LBB42_119: ; Parent Loop BB42_20 Depth=1
; Parent Loop BB42_116 Depth=2
; => This Inner Loop Header: Depth=3
ldr x8, [sp, #184] ; 8-byte Folded Reload
ldrsw x8, [x8]
mov w9, #1680
madd x8, x8, x9, x19
add x2, x8, #40
ldrsw x8, [x22]
madd x8, x8, x9, x19
mov w9, #6760
add x3, x8, x9
add x1, sp, #192
mov x0, x19
mov x4, x24
bl _decode_block
cbz w0, LBB42_128
; %bb.120: ; in Loop: Header=BB42_119 Depth=3
ldr x8, [sp, #176] ; 8-byte Folded Reload
ldr x8, [x8]
ldr w1, [x26]
mul w9, w20, w1
add x8, x8, w9, sxtw
add x0, x8, x28
ldrsw x8, [x21]
add x8, x19, x8, lsl #6
mov w9, #13480
add x3, x8, x9
add x2, sp, #192
bl _idct_block
ldr w8, [x19, #14088]
subs w8, w8, #1
str w8, [x19, #14088]
b.gt LBB42_118
; %bb.121: ; in Loop: Header=BB42_119 Depth=3
ldr w8, [x19, #14052]
cmp w8, #23
b.gt LBB42_123
; %bb.122: ; in Loop: Header=BB42_119 Depth=3
mov x0, x19
bl _grow_buffer_unsafe
LBB42_123: ; in Loop: Header=BB42_119 Depth=3
ldrb w8, [x25]
and w8, w8, #0xf8
cmp w8, #208
b.ne LBB42_129
; %bb.124: ; in Loop: Header=BB42_119 Depth=3
movi.2d v0, #0000000000000000
str d0, [x19, #14048]
str wzr, [x19, #14060]
str wzr, [x19, #13928]
str wzr, [x19, #13856]
str wzr, [x19, #13784]
mov w8, #255
strb w8, [x25]
ldr w8, [x19, #14084]
cmp w8, #0
mov w9, #2147483647
csel w8, w9, w8, eq
str w8, [x19, #14088]
b LBB42_118
LBB42_125: ; in Loop: Header=BB42_20 Depth=1
mov w8, #0
LBB42_126: ; in Loop: Header=BB42_20 Depth=1
ldp x24, x22, [sp, #64] ; 16-byte Folded Reload
ldp x28, x26, [sp, #48] ; 16-byte Folded Reload
Lloh83:
adrp x27, l_.str.7@PAGE
Lloh84:
add x27, x27, l_.str.7@PAGEOFF
b LBB42_131
LBB42_127: ; in Loop: Header=BB42_20 Depth=1
ldp x24, x22, [sp, #64] ; 16-byte Folded Reload
ldp x28, x26, [sp, #48] ; 16-byte Folded Reload
Lloh85:
adrp x27, l_.str.7@PAGE
Lloh86:
add x27, x27, l_.str.7@PAGEOFF
b LBB42_23
LBB42_128: ; in Loop: Header=BB42_20 Depth=1
mov w8, #0
b LBB42_130
LBB42_129: ; in Loop: Header=BB42_20 Depth=1
mov w8, #1
LBB42_130: ; in Loop: Header=BB42_20 Depth=1
ldp x24, x22, [sp, #64] ; 16-byte Folded Reload
ldp x28, x26, [sp, #48] ; 16-byte Folded Reload
Lloh87:
adrp x27, l_.str.7@PAGE
Lloh88:
add x27, x27, l_.str.7@PAGEOFF
ldr w9, [sp, #128] ; 4-byte Folded Reload
tbz w9, #0, LBB42_23
LBB42_131: ; in Loop: Header=BB42_20 Depth=1
cbnz w8, LBB42_23
b LBB42_146
LBB42_132: ; in Loop: Header=BB42_20 Depth=1
mov w8, #1
b LBB42_126
LBB42_133:
Lloh89:
adrp x27, l_.str.9@PAGE
Lloh90:
add x27, x27, l_.str.9@PAGEOFF
b LBB42_145
LBB42_134:
Lloh91:
adrp x27, l_.str.10@PAGE
Lloh92:
add x27, x27, l_.str.10@PAGEOFF
b LBB42_145
LBB42_135:
stp x26, x24, [sp, #56] ; 16-byte Folded Spill
ldr w20, [x19, #8]
cmp w23, #0
csel w8, w20, w23, eq
stp x22, x8, [sp, #72] ; 16-byte Folded Spill
cmp w8, #3
ccmp w20, #3, #0, lt
csinc w8, w20, wzr, ne
str x8, [sp, #88] ; 8-byte Folded Spill
cmp w8, #1
b.lt LBB42_156
; %bb.136:
ldr w8, [x19]
add w23, w8, #3
sub w21, w8, #1
add x8, sp, #192
add x22, x8, #24
mov w8, #13764
Lloh93:
adrp x24, _resample_row_generic@PAGE
Lloh94:
add x24, x24, _resample_row_generic@PAGEOFF
Lloh95:
adrp x25, _resample_row_hv_2@PAGE
Lloh96:
add x25, x25, _resample_row_hv_2@PAGEOFF
add x28, x19, x8
Lloh97:
adrp x27, _resample_row_1@PAGE
Lloh98:
add x27, x27, _resample_row_1@PAGEOFF
ldr x26, [sp, #88] ; 8-byte Folded Reload
b LBB42_139
LBB42_137: ; in Loop: Header=BB42_139 Depth=1
cmp w8, #2
csel x9, x25, x24, eq
cmp w8, #1
Lloh99:
adrp x8, _resample_row_h_2@PAGE
Lloh100:
add x8, x8, _resample_row_h_2@PAGEOFF
csel x10, x8, x9, eq
LBB42_138: ; in Loop: Header=BB42_139 Depth=1
stur x10, [x22, #-24]
add x22, x22, #48
add x28, x28, #72
subs x26, x26, #1
b.eq LBB42_156
LBB42_139: ; =>This Inner Loop Header: Depth=1
mov x0, x23
bl _malloc
stur x0, [x28, #60]
cbz x0, LBB42_196
; %bb.140: ; in Loop: Header=BB42_139 Depth=1
ldr w8, [x19, #13736]
ldp w9, w10, [x28]
sdiv w9, w8, w9
ldr w8, [x19, #13740]
sdiv w8, w8, w10
stp w9, w8, [x22]
asr w10, w8, #1
add w11, w21, w9
udiv w11, w11, w9
stp w11, w10, [x22, #8]
str wzr, [x22, #16]
ldur x10, [x28, #44]
stp x10, x10, [x22, #-16]
cmp w9, #2
b.eq LBB42_137
; %bb.141: ; in Loop: Header=BB42_139 Depth=1
mov x10, x24
cmp w9, #1
b.ne LBB42_138
; %bb.142: ; in Loop: Header=BB42_139 Depth=1
cmp w8, #2
Lloh101:
adrp x9, _resample_row_v_2@PAGE
Lloh102:
add x9, x9, _resample_row_v_2@PAGEOFF
csel x9, x9, x24, eq
cmp w8, #1
csel x10, x27, x9, eq
b LBB42_138
LBB42_143:
Lloh103:
adrp x27, l_.str.8@PAGE
Lloh104:
add x27, x27, l_.str.8@PAGEOFF
b LBB42_145
LBB42_144:
str wzr, [x19, #14064]
Lloh105:
adrp x27, l_.str.7@PAGE
Lloh106:
add x27, x27, l_.str.7@PAGEOFF
LBB42_145:
adrp x8, _failure_reason@PAGE
str x27, [x8, _failure_reason@PAGEOFF]
LBB42_146:
ldr w8, [x19, #8]
cmp w8, #1
b.lt LBB42_153
; %bb.147:
mov x20, #0
mov w8, #13824
add x21, x19, x8
b LBB42_149
LBB42_148: ; in Loop: Header=BB42_149 Depth=1
add x20, x20, #1
ldrsw x8, [x19, #8]
add x21, x21, #72
cmp x20, x8
b.ge LBB42_153
LBB42_149: ; =>This Inner Loop Header: Depth=1
ldur x8, [x21, #-16]
cbz x8, LBB42_151
; %bb.150: ; in Loop: Header=BB42_149 Depth=1
ldur x0, [x21, #-8]
bl _free
stur xzr, [x21, #-16]
LBB42_151: ; in Loop: Header=BB42_149 Depth=1
ldr x0, [x21]
cbz x0, LBB42_148
; %bb.152: ; in Loop: Header=BB42_149 Depth=1
bl _free
str xzr, [x21]
b LBB42_148
LBB42_153:
mov x0, #0
LBB42_154:
ldur x8, [x29, #-96]
Lloh107:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh108:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh109:
ldr x9, [x9]
cmp x9, x8
b.ne LBB42_225
; %bb.155:
add sp, sp, #432
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB42_156:
ldp w24, w21, [x19]
ldr x22, [sp, #80] ; 8-byte Folded Reload
mul w8, w24, w22
orr w9, wzr, #0x1
madd w0, w8, w21, w9
bl _malloc
cbz x0, LBB42_204
; %bb.157:
str x0, [sp, #40] ; 8-byte Folded Spill
cbz w21, LBB42_212
; %bb.158:
mov w26, #0
mov w27, #0
mov x8, x22
sxtw x10, w8
mov w8, #48
ldr x9, [sp, #88] ; 8-byte Folded Reload
umull x20, w9, w8
mov w8, #13792
add x8, x19, x8
str x8, [sp, #48] ; 8-byte Folded Spill
add x11, x0, #64
add x8, x0, #3
stp x8, x10, [sp, #24] ; 16-byte Folded Spill
add x8, x0, #1
stp x11, x8, [sp, #8] ; 16-byte Folded Spill
movi.2d v7, #0xffffffffffffffff
movi.2d v17, #0xffffffffffffffff
b LBB42_161
LBB42_159: ; in Loop: Header=BB42_161 Depth=1
mov x24, x8
LBB42_160: ; in Loop: Header=BB42_161 Depth=1
add w27, w27, #1
ldr w8, [x19, #4]
add w26, w26, w21
cmp w27, w8
b.hs LBB42_203
LBB42_161: ; =>This Loop Header: Depth=1
; Child Loop BB42_164 Depth 2
; Child Loop BB42_186 Depth 2
; Child Loop BB42_190 Depth 2
; Child Loop BB42_194 Depth 2
; Child Loop BB42_178 Depth 2
; Child Loop BB42_171 Depth 2
str w24, [sp, #176] ; 4-byte Folded Spill
add x8, sp, #96
st1.2d { v16, v17 }, [x8] ; 32-byte Folded Spill
add x8, sp, #128
st1.2d { v6, v7 }, [x8] ; 32-byte Folded Spill
str w27, [sp, #184] ; 4-byte Folded Spill
str w26, [sp, #168] ; 4-byte Folded Spill
ldr x8, [sp, #88] ; 8-byte Folded Reload
cmp w8, #1
b.lt LBB42_167
; %bb.162: ; in Loop: Header=BB42_161 Depth=1
mov x24, #0
sub x26, x29, #128
ldr x27, [sp, #48] ; 8-byte Folded Reload
b LBB42_164
LBB42_163: ; in Loop: Header=BB42_164 Depth=2
add x26, x26, #8
add x24, x24, #48
add x27, x27, #72
cmp x20, x24
b.eq LBB42_167
LBB42_164: ; Parent Loop BB42_161 Depth=1
; => This Inner Loop Header: Depth=2
add x8, sp, #192
add x28, x8, x24
ldr x8, [x28]
ldr x0, [x27, #32]
add x25, x28, #16
add x21, x28, #8
ldp w3, w22, [x28, #32]
ldp w4, w23, [x28, #24]
cmp w22, w23, asr #1
csel x9, x21, x25, lt
ldr x1, [x9]
csel x9, x25, x21, lt
ldr x2, [x9]
blr x8
str x0, [x26]
add w8, w22, #1
str w8, [x28, #36]
cmp w8, w23
b.lt LBB42_163
; %bb.165: ; in Loop: Header=BB42_164 Depth=2
ldr x8, [x25]
str x8, [x21]
ldr w9, [x28, #40]
add w9, w9, #1
stp wzr, w9, [x28, #36]
ldr w10, [x27]
cmp w9, w10
b.ge LBB42_163
; %bb.166: ; in Loop: Header=BB42_164 Depth=2
ldrsw x9, [x27, #4]
add x8, x8, x9
str x8, [x25]
b LBB42_163
LBB42_167: ; in Loop: Header=BB42_161 Depth=1
ldur x1, [x29, #-128]
ldr x21, [sp, #80] ; 8-byte Folded Reload
cmp w21, #3
b.lt LBB42_172
; %bb.168: ; in Loop: Header=BB42_161 Depth=1
ldr w8, [x19, #8]
cmp w8, #3
ldr w27, [sp, #184] ; 4-byte Folded Reload
ldr w11, [sp, #176] ; 4-byte Folded Reload
b.ne LBB42_176
; %bb.169: ; in Loop: Header=BB42_161 Depth=1
ldr w8, [x19]
cmp w8, #1
ldp x23, x22, [sp, #64] ; 16-byte Folded Reload
ldr x25, [sp, #56] ; 8-byte Folded Reload
ldr w26, [sp, #168] ; 4-byte Folded Reload
mov w3, #26345
movk w3, #1, lsl #16
mov w4, #-46802
mov w5, #-22554
add x9, sp, #128
ld1.2d { v6, v7 }, [x9] ; 32-byte Folded Reload
mov w6, #50594
movk w6, #1, lsl #16
add x9, sp, #96
ld1.2d { v16, v17 }, [x9] ; 32-byte Folded Reload
mov w7, #255
b.lt LBB42_159
; %bb.170: ; in Loop: Header=BB42_161 Depth=1
ldp x9, x10, [x29, #-120]
mul w11, w11, w26
ldr x12, [sp, #16] ; 8-byte Folded Reload
add x11, x12, x11
mov x12, x8
LBB42_171: ; Parent Loop BB42_161 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w13, [x1], #1
mov w14, #32768
bfi w14, w13, #16, #8
ldrb w13, [x10], #1
ldrb w15, [x9], #1
sub w13, w13, #128
sub w15, w15, #128
madd w16, w13, w3, w14
madd w13, w13, w4, w14
madd w13, w15, w5, w13
madd w14, w15, w6, w14
lsr w15, w16, #16
lsr w17, w13, #16
lsr w0, w14, #16
cmp w16, #0
csel w2, wzr, w7, lt
lsr w16, w16, #24
cmp w16, #0
csel w15, w2, w15, ne
cmp w13, #0
csel w16, wzr, w7, lt
lsr w13, w13, #24
cmp w13, #0
csel w13, w16, w17, ne
cmp w14, #0
csel w16, wzr, w7, lt
lsr w14, w14, #24
sturb w15, [x11, #-1]
strb w13, [x11]
cmp w14, #0
csel w13, w16, w0, ne
strb w13, [x11, #1]
strb w7, [x11, #2]
add x11, x11, x21
subs x12, x12, #1
b.ne LBB42_171
b LBB42_159
LBB42_172: ; in Loop: Header=BB42_161 Depth=1
ldr w27, [sp, #184] ; 4-byte Folded Reload
mul w8, w27, w21
ldr w14, [sp, #176] ; 4-byte Folded Reload
mul w8, w8, w14
ldr x9, [sp, #40] ; 8-byte Folded Reload
add x0, x9, x8
ldr w24, [x19]
cmp w21, #1
b.ne LBB42_179
; %bb.173: ; in Loop: Header=BB42_161 Depth=1
ldp x23, x22, [sp, #64] ; 16-byte Folded Reload
ldr x25, [sp, #56] ; 8-byte Folded Reload
ldr w26, [sp, #168] ; 4-byte Folded Reload
cbz w24, LBB42_175
; %bb.174: ; in Loop: Header=BB42_161 Depth=1
mov x2, x24
bl _memcpy
; kill: def $w24 killed $w24 killed $x24
LBB42_175: ; in Loop: Header=BB42_161 Depth=1
add x8, sp, #128
ld1.2d { v6, v7 }, [x8] ; 32-byte Folded Reload
add x8, sp, #96
ld1.2d { v16, v17 }, [x8] ; 32-byte Folded Reload
b LBB42_160
LBB42_176: ; in Loop: Header=BB42_161 Depth=1
ldr w8, [x19]
ldp x23, x22, [sp, #64] ; 16-byte Folded Reload
ldr x25, [sp, #56] ; 8-byte Folded Reload
ldr w26, [sp, #168] ; 4-byte Folded Reload
add x9, sp, #128
ld1.2d { v6, v7 }, [x9] ; 32-byte Folded Reload
add x9, sp, #96
ld1.2d { v16, v17 }, [x9] ; 32-byte Folded Reload
mov w13, #255
cbz w8, LBB42_182
; %bb.177: ; in Loop: Header=BB42_161 Depth=1
mul w9, w11, w26
ldp x10, x12, [sp, #24] ; 16-byte Folded Reload
add x9, x10, x9
mov x10, x8
LBB42_178: ; Parent Loop BB42_161 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w11, [x1], #1
sturb w11, [x9, #-1]
sturb w11, [x9, #-2]
sturb w11, [x9, #-3]
strb w13, [x9]
add x9, x9, x12
subs x10, x10, #1
b.ne LBB42_178
b LBB42_159
LBB42_179: ; in Loop: Header=BB42_161 Depth=1
ldp x23, x22, [sp, #64] ; 16-byte Folded Reload
ldr x25, [sp, #56] ; 8-byte Folded Reload
ldr w26, [sp, #168] ; 4-byte Folded Reload
add x8, sp, #128
ld1.2d { v6, v7 }, [x8] ; 32-byte Folded Reload
add x8, sp, #96
ld1.2d { v16, v17 }, [x8] ; 32-byte Folded Reload
mov w13, #255
cbz w24, LBB42_160
; %bb.180: ; in Loop: Header=BB42_161 Depth=1
cmp w24, #8
b.hs LBB42_183
; %bb.181: ; in Loop: Header=BB42_161 Depth=1
mov x8, #0
b LBB42_193
LBB42_182: ; in Loop: Header=BB42_161 Depth=1
mov w24, #0
b LBB42_160
LBB42_183: ; in Loop: Header=BB42_161 Depth=1
cmp w24, #64
b.hs LBB42_185
; %bb.184: ; in Loop: Header=BB42_161 Depth=1
mov x8, #0
b LBB42_189
LBB42_185: ; in Loop: Header=BB42_161 Depth=1
and x8, x24, #0xffffffc0
add x9, x1, #32
mul w10, w14, w26
ldr x11, [sp, #8] ; 8-byte Folded Reload
add x11, x11, x10
mov x10, x8
LBB42_186: ; Parent Loop BB42_161 Depth=1
; => This Inner Loop Header: Depth=2
ldp q6, q0, [x9, #-32]
mov.16b v1, v7
ldp q2, q4, [x9], #64
sub x12, x11, #64
st2.16b { v6, v7 }, [x12]
mov.16b v3, v7
sub x12, x11, #32
st2.16b { v0, v1 }, [x12]
add x12, x11, #128
st2.16b { v2, v3 }, [x11], #32
mov.16b v5, v7
st2.16b { v4, v5 }, [x11]
mov x11, x12
subs x10, x10, #64
b.ne LBB42_186
; %bb.187: ; in Loop: Header=BB42_161 Depth=1
cmp x8, x24
b.eq LBB42_195
; %bb.188: ; in Loop: Header=BB42_161 Depth=1
tst x24, #0x38
b.eq LBB42_192
LBB42_189: ; in Loop: Header=BB42_161 Depth=1
mov x11, x8
and x8, x24, #0xfffffff8
add x0, x0, x8, lsl #1
add x9, x1, x11
mul w10, w14, w26
add x10, x10, x11, lsl #1
ldr x12, [sp, #40] ; 8-byte Folded Reload
add x10, x12, x10
sub x11, x11, x8
LBB42_190: ; Parent Loop BB42_161 Depth=1
; => This Inner Loop Header: Depth=2
ldr d16, [x9], #8
st2.8b { v16, v17 }, [x10], #16
adds x11, x11, #8
b.ne LBB42_190
; %bb.191: ; in Loop: Header=BB42_161 Depth=1
cmp x8, x24
b.ne LBB42_193
b LBB42_195
LBB42_192: ; in Loop: Header=BB42_161 Depth=1
add x0, x0, x8, lsl #1
LBB42_193: ; in Loop: Header=BB42_161 Depth=1
add x9, x1, x8
sub x8, x24, x8
LBB42_194: ; Parent Loop BB42_161 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x9], #1
strb w10, [x0]
strb w13, [x0, #1]
add x0, x0, #2
subs x8, x8, #1
b.ne LBB42_194
LBB42_195: ; in Loop: Header=BB42_161 Depth=1
; kill: def $w24 killed $w24 killed $x24
b LBB42_160
LBB42_196:
cmp w20, #1
b.lt LBB42_211
; %bb.197:
mov x20, #0
mov w8, #13824
add x21, x19, x8
b LBB42_199
LBB42_198: ; in Loop: Header=BB42_199 Depth=1
add x20, x20, #1
ldrsw x8, [x19, #8]
add x21, x21, #72
cmp x20, x8
b.ge LBB42_211
LBB42_199: ; =>This Inner Loop Header: Depth=1
ldur x8, [x21, #-16]
cbz x8, LBB42_201
; %bb.200: ; in Loop: Header=BB42_199 Depth=1
ldur x0, [x21, #-8]
bl _free
stur xzr, [x21, #-16]
LBB42_201: ; in Loop: Header=BB42_199 Depth=1
ldr x0, [x21]
cbz x0, LBB42_198
; %bb.202: ; in Loop: Header=BB42_199 Depth=1
bl _free
str xzr, [x21]
b LBB42_198
LBB42_203:
ldr w20, [x19, #8]
b LBB42_213
LBB42_204:
cmp w20, #1
b.lt LBB42_211
; %bb.205:
mov x20, #0
mov w8, #13824
add x21, x19, x8
b LBB42_207
LBB42_206: ; in Loop: Header=BB42_207 Depth=1
add x20, x20, #1
ldrsw x8, [x19, #8]
add x21, x21, #72
cmp x20, x8
b.ge LBB42_211
LBB42_207: ; =>This Inner Loop Header: Depth=1
ldur x8, [x21, #-16]
cbz x8, LBB42_209
; %bb.208: ; in Loop: Header=BB42_207 Depth=1
ldur x0, [x21, #-8]
bl _free
stur xzr, [x21, #-16]
LBB42_209: ; in Loop: Header=BB42_207 Depth=1
ldr x0, [x21]
cbz x0, LBB42_206
; %bb.210: ; in Loop: Header=BB42_207 Depth=1
bl _free
str xzr, [x21]
b LBB42_206
LBB42_211:
mov x0, #0
Lloh110:
adrp x8, l_.str.5@PAGE
Lloh111:
add x8, x8, l_.str.5@PAGEOFF
b LBB42_2
LBB42_212:
ldp x23, x22, [sp, #64] ; 16-byte Folded Reload
ldr x25, [sp, #56] ; 8-byte Folded Reload
LBB42_213:
cmp w20, #1
b.lt LBB42_221
; %bb.214:
mov x20, #0
mov w8, #13824
add x21, x19, x8
b LBB42_216
LBB42_215: ; in Loop: Header=BB42_216 Depth=1
add x20, x20, #1
ldrsw x8, [x19, #8]
add x21, x21, #72
cmp x20, x8
b.ge LBB42_220
LBB42_216: ; =>This Inner Loop Header: Depth=1
ldur x8, [x21, #-16]
cbz x8, LBB42_218
; %bb.217: ; in Loop: Header=BB42_216 Depth=1
ldur x0, [x21, #-8]
bl _free
stur xzr, [x21, #-16]
LBB42_218: ; in Loop: Header=BB42_216 Depth=1
ldr x0, [x21]
cbz x0, LBB42_215
; %bb.219: ; in Loop: Header=BB42_216 Depth=1
bl _free
str xzr, [x21]
b LBB42_215
LBB42_220:
ldr w24, [x19]
LBB42_221:
str w24, [x25]
ldr w8, [x19, #4]
str w8, [x23]
cbz x22, LBB42_223
; %bb.222:
ldr w8, [x19, #8]
str w8, [x22]
LBB42_223:
ldr x0, [sp, #40] ; 8-byte Folded Reload
b LBB42_154
LBB42_224:
Lloh112:
adrp x27, l_.str.11@PAGE
Lloh113:
add x27, x27, l_.str.11@PAGEOFF
b LBB42_145
LBB42_225:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh74, Lloh75, Lloh76
.loh AdrpAdd Lloh77, Lloh78
.loh AdrpAdd Lloh79, Lloh80
.loh AdrpAdd Lloh81, Lloh82
.loh AdrpAdd Lloh83, Lloh84
.loh AdrpAdd Lloh85, Lloh86
.loh AdrpAdd Lloh87, Lloh88
.loh AdrpAdd Lloh89, Lloh90
.loh AdrpAdd Lloh91, Lloh92
.loh AdrpAdd Lloh97, Lloh98
.loh AdrpAdd Lloh95, Lloh96
.loh AdrpAdd Lloh93, Lloh94
.loh AdrpAdd Lloh99, Lloh100
.loh AdrpAdd Lloh101, Lloh102
.loh AdrpAdd Lloh103, Lloh104
.loh AdrpAdd Lloh105, Lloh106
.loh AdrpLdrGotLdr Lloh107, Lloh108, Lloh109
.loh AdrpAdd Lloh110, Lloh111
.loh AdrpAdd Lloh112, Lloh113
.cfi_endproc
; -- End function
.globl _stbi_jpeg_load ; -- Begin function stbi_jpeg_load
.p2align 2
_stbi_jpeg_load: ; @stbi_jpeg_load
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov w9, #14096
Lloh114:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh115:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #3, lsl #12 ; =12288
sub sp, sp, #1808
mov x19, x4
mov x20, x3
mov x21, x2
mov x23, x1
Lloh116:
adrp x1, l_.str@PAGE
Lloh117:
add x1, x1, l_.str@PAGEOFF
bl _fopen
cbz x0, LBB43_2
; %bb.1:
mov x22, x0
str x0, [sp, #16]
mov x0, sp
mov x1, x23
mov x2, x21
mov x3, x20
mov x4, x19
bl _load_jpeg_image
mov x19, x0
mov x0, x22
bl _fclose
b LBB43_3
LBB43_2:
mov x19, #0
LBB43_3:
mov x0, x19
add sp, sp, #3, lsl #12 ; =12288
add sp, sp, #1808
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh116, Lloh117
.loh AdrpLdrGot Lloh114, Lloh115
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function decode_jpeg_header
_decode_jpeg_header: ; @decode_jpeg_header
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x1
mov x19, x0
mov w8, #14056
add x21, x0, x8
mov w8, #255
strb w8, [x21]
ldr x0, [x0, #16]
cbz x0, LBB44_2
; %bb.1:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB44_4
LBB44_2:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB44_13
; %bb.3:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB44_4:
mvn w8, w8
tst w8, #0xff
b.ne LBB44_13
b LBB44_7
LBB44_5: ; in Loop: Header=BB44_7 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB44_6: ; in Loop: Header=BB44_7 Depth=1
and w8, w8, #0xff
cmp w8, #255
b.ne LBB44_10
LBB44_7: ; =>This Inner Loop Header: Depth=1
ldr x0, [x19, #16]
cbnz x0, LBB44_5
; %bb.8: ; in Loop: Header=BB44_7 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB44_13
; %bb.9: ; in Loop: Header=BB44_7 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB44_6
LBB44_10:
cmp w8, #216
b.ne LBB44_13
; %bb.11:
cmp w20, #1
b.ne LBB44_16
; %bb.12:
mov w0, #1
b LBB44_15
LBB44_13:
mov w0, #0
Lloh118:
adrp x8, l_.str.22@PAGE
Lloh119:
add x8, x8, l_.str.22@PAGEOFF
LBB44_14:
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB44_15:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
LBB44_16:
ldrb w1, [x21]
cmp w1, #255
b.ne LBB44_19
; %bb.17:
ldr x0, [x19, #16]
cbz x0, LBB44_23
; %bb.18:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB44_25
LBB44_19:
mov w8, #255
strb w8, [x21]
LBB44_20:
and w8, w1, #0xfe
cmp w8, #192
b.ne LBB44_22
LBB44_21:
mov x0, x19
bl _process_frame_header
cmp w0, #0
cset w0, ne
b LBB44_15
LBB44_22:
mov w20, #255
b LBB44_37
LBB44_23:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB44_33
; %bb.24:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB44_25:
mvn w8, w8
tst w8, #0xff
b.eq LBB44_29
; %bb.26:
mov w1, #255
mov w20, #255
b LBB44_37
LBB44_27: ; in Loop: Header=BB44_29 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB44_28: ; in Loop: Header=BB44_29 Depth=1
mvn w9, w8
tst w9, #0xff
b.ne LBB44_32
LBB44_29: ; =>This Inner Loop Header: Depth=1
ldr x0, [x19, #16]
cbnz x0, LBB44_27
; %bb.30: ; in Loop: Header=BB44_29 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB44_34
; %bb.31: ; in Loop: Header=BB44_29 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB44_28
LBB44_32:
and w1, w8, #0xff
b LBB44_20
LBB44_33:
mov w1, #255
mov w20, #255
b LBB44_37
LBB44_34:
mov w1, #0
mov w20, #255
b LBB44_37
LBB44_35: ; in Loop: Header=BB44_37 Depth=1
strb w20, [x21]
LBB44_36: ; in Loop: Header=BB44_37 Depth=1
and w8, w1, #0xfe
cmp w8, #192
b.eq LBB44_21
LBB44_37: ; =>This Loop Header: Depth=1
; Child Loop BB44_46 Depth 2
; Child Loop BB44_60 Depth 2
; Child Loop BB44_55 Depth 2
mov x0, x19
bl _process_marker
cbz w0, LBB44_15
; %bb.38: ; in Loop: Header=BB44_37 Depth=1
ldrb w1, [x21]
cmp w1, #255
b.ne LBB44_35
; %bb.39: ; in Loop: Header=BB44_37 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB44_41
; %bb.40: ; in Loop: Header=BB44_37 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB44_43
LBB44_41: ; in Loop: Header=BB44_37 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB44_46
; %bb.42: ; in Loop: Header=BB44_37 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB44_43: ; in Loop: Header=BB44_37 Depth=1
mvn w8, w8
tst w8, #0xff
b.ne LBB44_46
b LBB44_55
LBB44_44: ; in Loop: Header=BB44_46 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB44_45: ; in Loop: Header=BB44_46 Depth=2
mvn w8, w8
tst w8, #0xff
b.eq LBB44_60
LBB44_46: ; Parent Loop BB44_37 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x19, #16]
cbz x0, LBB44_48
; %bb.47: ; in Loop: Header=BB44_46 Depth=2
bl _feof
cbz w0, LBB44_49
b LBB44_65
LBB44_48: ; in Loop: Header=BB44_46 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
cset w0, hs
cbnz w0, LBB44_65
LBB44_49: ; in Loop: Header=BB44_46 Depth=2
ldrb w1, [x21]
cmp w1, #255
b.ne LBB44_35
; %bb.50: ; in Loop: Header=BB44_46 Depth=2
ldr x0, [x19, #16]
cbnz x0, LBB44_44
; %bb.51: ; in Loop: Header=BB44_46 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB44_46
; %bb.52: ; in Loop: Header=BB44_46 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB44_45
LBB44_53: ; in Loop: Header=BB44_55 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB44_54: ; in Loop: Header=BB44_55 Depth=2
mvn w9, w8
tst w9, #0xff
b.ne LBB44_63
LBB44_55: ; Parent Loop BB44_37 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x19, #16]
cbnz x0, LBB44_53
; %bb.56: ; in Loop: Header=BB44_55 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB44_64
; %bb.57: ; in Loop: Header=BB44_55 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB44_54
LBB44_58: ; in Loop: Header=BB44_60 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB44_59: ; in Loop: Header=BB44_60 Depth=2
mvn w9, w8
tst w9, #0xff
b.ne LBB44_63
LBB44_60: ; Parent Loop BB44_37 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x19, #16]
cbnz x0, LBB44_58
; %bb.61: ; in Loop: Header=BB44_60 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB44_64
; %bb.62: ; in Loop: Header=BB44_60 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB44_59
LBB44_63: ; in Loop: Header=BB44_37 Depth=1
and w1, w8, #0xff
b LBB44_36
LBB44_64: ; in Loop: Header=BB44_37 Depth=1
mov w1, #0
b LBB44_36
LBB44_65:
mov w0, #0
Lloh120:
adrp x8, l_.str.23@PAGE
Lloh121:
add x8, x8, l_.str.23@PAGEOFF
b LBB44_14
.loh AdrpAdd Lloh118, Lloh119
.loh AdrpAdd Lloh120, Lloh121
.cfi_endproc
; -- End function
.globl _stbi_zlib_decode_malloc_guesssize ; -- Begin function stbi_zlib_decode_malloc_guesssize
.p2align 2
_stbi_zlib_decode_malloc_guesssize: ; @stbi_zlib_decode_malloc_guesssize
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w27, -56
.cfi_offset w28, -64
mov w9, #4112
Lloh122:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh123:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #1, lsl #12 ; =4096
sub sp, sp, #16
mov x19, x3
mov x20, x2
mov x22, x1
mov x21, x0
Lloh124:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh125:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh126:
ldr x8, [x8]
stur x8, [x29, #-56]
sxtw x0, w20
bl _malloc
cbz x0, LBB45_6
; %bb.1:
mov x1, x0
add x8, x21, w22, sxtw
stp x21, x8, [sp, #8]
add x0, sp, #8
mov x2, x20
mov w3, #1
mov w4, #1
bl _do_zlib
cbz w0, LBB45_4
; %bb.2:
cbz x19, LBB45_5
; %bb.3:
ldr w8, [sp, #32]
ldr x0, [sp, #40]
sub w8, w8, w0
str w8, [x19]
b LBB45_6
LBB45_4:
ldr x0, [sp, #40]
bl _free
mov x0, #0
b LBB45_6
LBB45_5:
ldr x0, [sp, #40]
LBB45_6:
ldur x8, [x29, #-56]
Lloh127:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh128:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh129:
ldr x9, [x9]
cmp x9, x8
b.ne LBB45_8
; %bb.7:
add sp, sp, #1, lsl #12 ; =4096
add sp, sp, #16
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #64 ; 16-byte Folded Reload
ret
LBB45_8:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh124, Lloh125, Lloh126
.loh AdrpLdrGot Lloh122, Lloh123
.loh AdrpLdrGotLdr Lloh127, Lloh128, Lloh129
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib
_do_zlib: ; @do_zlib
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #2544
mov x19, x0
Lloh130:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh131:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh132:
ldr x8, [x8]
stur x8, [x29, #-96]
stp x1, x1, [x0, #24]
add x8, x1, w2, sxtw
str x8, [x0, #40]
str w3, [x0, #48]
cbz w4, LBB46_10
; %bb.1:
ldp x9, x10, [x19]
cmp x9, x10
b.hs LBB46_4
; %bb.2:
add x11, x9, #1
str x11, [x19]
ldrb w8, [x9]
mov x9, x11
cmp x9, x10
b.lo LBB46_5
LBB46_3:
mov w9, #0
b LBB46_6
LBB46_4:
mov w8, #0
cmp x9, x10
b.hs LBB46_3
LBB46_5:
add x10, x9, #1
str x10, [x19]
ldrb w9, [x9]
LBB46_6:
mov x10, x9
bfi w10, w8, #8, #8
mov w11, #31711
movk w11, #48623, lsl #16
mul w10, w10, w11
mov w11, #4228
movk w11, #2114, lsl #16
cmp w10, w11
b.ls LBB46_8
; %bb.7:
Lloh133:
adrp x8, l_.str.34@PAGE
Lloh134:
add x8, x8, l_.str.34@PAGEOFF
b LBB46_169
LBB46_8:
tbnz w9, #5, LBB46_166
; %bb.9:
and w8, w8, #0xf
cmp w8, #8
b.ne LBB46_168
LBB46_10:
mov w27, #0
mov w8, #0
mov x28, x19
str wzr, [x28, #16]!
mov x23, x28
str wzr, [x23, #4]!
add x20, x28, #36
add x9, x28, #2056
str x9, [sp, #16] ; 8-byte Folded Spill
add x22, sp, #504
add x15, sp, #24
add x21, sp, #49
Lloh135:
adrp x16, _compute_huffman_codes.length_dezigzag@PAGE
Lloh136:
add x16, x16, _compute_huffman_codes.length_dezigzag@PAGEOFF
cmp w8, #0
b.le LBB46_163
LBB46_11:
mov x10, x8
LBB46_12:
lsr w9, w27, #1
str w9, [x23]
sub w11, w10, #1
str w11, [x28]
cmp w11, #1
b.hi LBB46_20
; %bb.13:
sub w8, w10, #9
b LBB46_16
LBB46_14: ; in Loop: Header=BB46_16 Depth=1
add x12, x11, #1
str x12, [x19]
ldrb w11, [x11]
LBB46_15: ; in Loop: Header=BB46_16 Depth=1
lsl w10, w11, w10
orr w9, w10, w9
str w9, [x23]
add w10, w8, #16
str w10, [x28]
add w8, w8, #8
cmp w8, #17
b.ge LBB46_19
LBB46_16: ; =>This Inner Loop Header: Depth=1
add w10, w8, #8
lsr w11, w9, w10
cbnz w11, LBB46_186
; %bb.17: ; in Loop: Header=BB46_16 Depth=1
ldp x11, x12, [x19]
cmp x11, x12
b.lo LBB46_14
; %bb.18: ; in Loop: Header=BB46_16 Depth=1
mov w11, #0
b LBB46_15
LBB46_19:
add w11, w8, #8
LBB46_20:
lsr w8, w9, #2
str w8, [x23]
sub w10, w11, #2
str w10, [x28]
and w9, w9, #0x3
cbz w9, LBB46_30
; %bb.21:
cmp w9, #1
b.eq LBB46_47
; %bb.22:
cmp w9, #3
b.eq LBB46_177
; %bb.23:
cmp w10, #4
b.hi LBB46_57
; %bb.24:
sub w9, w11, #10
b LBB46_27
LBB46_25: ; in Loop: Header=BB46_27 Depth=1
add x12, x11, #1
str x12, [x19]
ldrb w11, [x11]
LBB46_26: ; in Loop: Header=BB46_27 Depth=1
lsl w10, w11, w10
orr w8, w10, w8
str w8, [x23]
add w10, w9, #16
str w10, [x28]
add w9, w9, #8
cmp w9, #17
b.ge LBB46_56
LBB46_27: ; =>This Inner Loop Header: Depth=1
add w10, w9, #8
lsr w11, w8, w10
cbnz w11, LBB46_190
; %bb.28: ; in Loop: Header=BB46_27 Depth=1
ldp x11, x12, [x19]
cmp x11, x12
b.lo LBB46_25
; %bb.29: ; in Loop: Header=BB46_27 Depth=1
mov w11, #0
b LBB46_26
LBB46_30:
ands w9, w10, #0x7
b.eq LBB46_32
; %bb.31:
lsr w8, w8, w9
str w8, [x23]
and w10, w10, #0xfffffff8
str w10, [x28]
LBB46_32:
mov x9, #0
cbz w10, LBB46_51
LBB46_33: ; =>This Inner Loop Header: Depth=1
strb w8, [x22, x9]
add x9, x9, #1
lsr w8, w8, #8
subs w10, w10, #8
b.hi LBB46_33
; %bb.34:
str w8, [x23]
str w10, [x28]
cbnz w10, LBB46_194
; %bb.35:
tst x9, #0xfffffffc
b.eq LBB46_51
LBB46_36:
ldrh w24, [sp, #504]
ldrh w8, [sp, #506]
eor w9, w24, #0xffff
cmp w8, w9
b.ne LBB46_180
; %bb.37:
ldp x1, x8, [x19]
add x9, x1, x24
cmp x9, x8
b.hi LBB46_182
; %bb.38:
ldr x0, [x19, #24]
ldr x9, [x19, #40]
add x8, x0, x24
cmp x8, x9
b.ls LBB46_44
; %bb.39:
ldr w8, [x19, #48]
cbz w8, LBB46_170
; %bb.40:
ldr x8, [x19, #32]
sub w9, w9, w8
sub x21, x0, x8
add w10, w24, w21
LBB46_41: ; =>This Inner Loop Header: Depth=1
mov x11, x9
lsl w9, w9, #1
cmp w10, w11
b.gt LBB46_41
; %bb.42:
sxtw x25, w11
mov x0, x8
mov x1, x25
bl _realloc
cbz x0, LBB46_171
; %bb.43:
mov x8, x0
add x0, x0, w21, sxtw
stp x0, x8, [x19, #24]
add x8, x8, x25
str x8, [x19, #40]
ldr x1, [x19]
add x21, sp, #49
LBB46_44:
mov x2, x24
bl _memcpy
ldr x8, [x19]
add x8, x8, x24
str x8, [x19]
ldr x8, [x19, #24]
add x8, x8, x24
str x8, [x19, #24]
LBB46_45:
Lloh137:
adrp x8, _stbi_png_partial@GOTPAGE
Lloh138:
ldr x8, [x8, _stbi_png_partial@GOTPAGEOFF]
Lloh139:
ldr w8, [x8]
cbz w8, LBB46_159
; %bb.46:
ldp x8, x9, [x19, #24]
sub x8, x8, x9
and w9, w27, #0x1
cmp x8, #16, lsl #12 ; =65536
ccmp w9, #0, #0, le
add x15, sp, #24
Lloh140:
adrp x16, _compute_huffman_codes.length_dezigzag@PAGE
Lloh141:
add x16, x16, _compute_huffman_codes.length_dezigzag@PAGEOFF
b.eq LBB46_160
b LBB46_178
LBB46_47:
adrp x8, _default_distance@PAGE+31
ldrb w8, [x8, _default_distance@PAGEOFF+31]
cbnz w8, LBB46_49
; %bb.48:
Lloh142:
adrp x8, _default_length@PAGE
Lloh143:
add x8, x8, _default_length@PAGEOFF
movi.16b v1, #8
stp q1, q1, [x8, #96]
stp q1, q1, [x8, #64]
stp q1, q1, [x8, #32]
stp q1, q1, [x8]
movi.16b v0, #9
stp q1, q0, [x8, #128]
stp q0, q0, [x8, #160]
stp q0, q0, [x8, #192]
stp q0, q0, [x8, #224]
mov x10, #506381209866536711
stp x10, x10, [x8, #256]
mov x9, #578721382704613384
stp x10, x9, [x8, #272]
Lloh144:
adrp x8, _default_distance@PAGE
Lloh145:
add x8, x8, _default_distance@PAGEOFF
movi.16b v0, #5
stp q0, q0, [x8]
LBB46_49:
mov x0, x20
Lloh146:
adrp x1, _default_length@PAGE
Lloh147:
add x1, x1, _default_length@PAGEOFF
mov w2, #288
bl _zbuild_huffman
cbz w0, LBB46_174
; %bb.50:
ldr x0, [sp, #16] ; 8-byte Folded Reload
Lloh148:
adrp x1, _default_distance@PAGE
Lloh149:
add x1, x1, _default_distance@PAGEOFF
mov w2, #32
bl _zbuild_huffman
cbnz w0, LBB46_120
b LBB46_174
LBB46_51:
and x9, x9, #0xffffffff
ldp x10, x8, [x19]
b LBB46_54
LBB46_52: ; in Loop: Header=BB46_54 Depth=1
add x12, x10, #1
str x12, [x19]
ldrb w11, [x10]
mov x10, x12
LBB46_53: ; in Loop: Header=BB46_54 Depth=1
strb w11, [x22, x9]
add x11, x9, #1
mov x9, x11
cmp x11, #4
b.eq LBB46_36
LBB46_54: ; =>This Inner Loop Header: Depth=1
cmp x10, x8
b.lo LBB46_52
; %bb.55: ; in Loop: Header=BB46_54 Depth=1
mov w11, #0
b LBB46_53
LBB46_56:
add w10, w9, #8
LBB46_57:
lsr w9, w8, #5
str w9, [x23]
sub w11, w10, #5
str w11, [x28]
cmp w11, #4
b.hi LBB46_65
; %bb.58:
sub w10, w10, #13
b LBB46_61
LBB46_59: ; in Loop: Header=BB46_61 Depth=1
add x13, x12, #1
str x13, [x19]
ldrb w12, [x12]
LBB46_60: ; in Loop: Header=BB46_61 Depth=1
lsl w11, w12, w11
orr w9, w11, w9
str w9, [x23]
add w11, w10, #16
str w11, [x28]
add w10, w10, #8
cmp w10, #17
b.ge LBB46_64
LBB46_61: ; =>This Inner Loop Header: Depth=1
add w11, w10, #8
lsr w12, w9, w11
cbnz w12, LBB46_191
; %bb.62: ; in Loop: Header=BB46_61 Depth=1
ldp x12, x13, [x19]
cmp x12, x13
b.lo LBB46_59
; %bb.63: ; in Loop: Header=BB46_61 Depth=1
mov w12, #0
b LBB46_60
LBB46_64:
add w11, w10, #8
LBB46_65:
lsr w12, w9, #5
str w12, [x23]
sub w13, w11, #5
str w13, [x28]
cmp w13, #3
b.hi LBB46_73
; %bb.66:
sub w10, w11, #13
b LBB46_69
LBB46_67: ; in Loop: Header=BB46_69 Depth=1
add x14, x13, #1
str x14, [x19]
ldrb w13, [x13]
LBB46_68: ; in Loop: Header=BB46_69 Depth=1
lsl w11, w13, w11
orr w12, w11, w12
str w12, [x23]
add w11, w10, #16
str w11, [x28]
add w10, w10, #8
cmp w10, #17
b.ge LBB46_72
LBB46_69: ; =>This Inner Loop Header: Depth=1
add w11, w10, #8
lsr w13, w12, w11
cbnz w13, LBB46_192
; %bb.70: ; in Loop: Header=BB46_69 Depth=1
ldp x13, x14, [x19]
cmp x13, x14
b.lo LBB46_67
; %bb.71: ; in Loop: Header=BB46_69 Depth=1
mov w13, #0
b LBB46_68
LBB46_72:
add w13, w10, #8
LBB46_73:
mov x10, #0
lsr w11, w12, #4
str w11, [x23]
and w8, w8, #0x1f
add w24, w8, #257
and w8, w9, #0x1f
add w25, w8, #1
sub w9, w13, #4
str w9, [x28]
and w8, w12, #0xf
stp xzr, xzr, [sp, #24]
add w8, w8, #4
stur wzr, [sp, #39]
b LBB46_75
LBB46_74: ; in Loop: Header=BB46_75 Depth=1
and w13, w11, #0x7
lsr w11, w11, #3
str w11, [x23]
sub w9, w12, #3
ldrb w12, [x16, x10]
str w9, [x28]
strb w13, [x15, x12]
add x10, x10, #1
cmp x10, x8
b.eq LBB46_82
LBB46_75: ; =>This Loop Header: Depth=1
; Child Loop BB46_79 Depth 2
cmp w9, #2
b.ls LBB46_79
; %bb.76: ; in Loop: Header=BB46_75 Depth=1
mov x12, x9
b LBB46_74
LBB46_77: ; in Loop: Header=BB46_79 Depth=2
add x13, x12, #1
str x13, [x19]
ldrb w12, [x12]
LBB46_78: ; in Loop: Header=BB46_79 Depth=2
lsl w12, w12, w9
orr w11, w12, w11
str w11, [x23]
add w12, w9, #8
str w12, [x28]
cmp w9, #17
mov x9, x12
b.ge LBB46_74
LBB46_79: ; Parent Loop BB46_75 Depth=1
; => This Inner Loop Header: Depth=2
lsr w12, w11, w9
cbnz w12, LBB46_176
; %bb.80: ; in Loop: Header=BB46_79 Depth=2
ldp x12, x13, [x19]
cmp x12, x13
b.lo LBB46_77
; %bb.81: ; in Loop: Header=BB46_79 Depth=2
mov w12, #0
b LBB46_78
LBB46_82:
add x0, sp, #504
add x1, sp, #24
mov w2, #19
bl _zbuild_huffman
cbz w0, LBB46_177
; %bb.83:
mov x8, x24
mov w24, #0
stp w8, w25, [sp, #8] ; 8-byte Folded Spill
add w25, w25, w8
b LBB46_85
LBB46_84: ; in Loop: Header=BB46_85 Depth=1
strb w0, [x21, w24, sxtw]
add w24, w24, #1
cmp w24, w25
b.ge LBB46_116
LBB46_85: ; =>This Loop Header: Depth=1
; Child Loop BB46_113 Depth 2
; Child Loop BB46_94 Depth 2
; Child Loop BB46_102 Depth 2
add x1, sp, #504
mov x0, x19
bl _zhuffman_decode
cmp w0, #19
b.hs LBB46_189
; %bb.86: ; in Loop: Header=BB46_85 Depth=1
cmp w0, #15
b.ls LBB46_84
; %bb.87: ; in Loop: Header=BB46_85 Depth=1
cmp w0, #16
b.eq LBB46_97
; %bb.88: ; in Loop: Header=BB46_85 Depth=1
cmp w0, #17
b.ne LBB46_105
; %bb.89: ; in Loop: Header=BB46_85 Depth=1
ldr w9, [x28]
ldr w8, [x23]
cmp w9, #2
b.le LBB46_94
; %bb.90: ; in Loop: Header=BB46_85 Depth=1
mov x10, x9
LBB46_91: ; in Loop: Header=BB46_85 Depth=1
and w9, w8, #0x7
lsr w8, w8, #3
str w8, [x23]
sub w8, w10, #3
str w8, [x28]
add w26, w9, #3
b LBB46_109
LBB46_92: ; in Loop: Header=BB46_94 Depth=2
add x11, x10, #1
str x11, [x19]
ldrb w10, [x10]
LBB46_93: ; in Loop: Header=BB46_94 Depth=2
lsl w10, w10, w9
orr w8, w10, w8
str w8, [x23]
add w10, w9, #8
str w10, [x28]
cmp w9, #17
mov x9, x10
b.ge LBB46_91
LBB46_94: ; Parent Loop BB46_85 Depth=1
; => This Inner Loop Header: Depth=2
lsr w10, w8, w9
cbnz w10, LBB46_183
; %bb.95: ; in Loop: Header=BB46_94 Depth=2
ldp x10, x11, [x19]
cmp x10, x11
b.lo LBB46_92
; %bb.96: ; in Loop: Header=BB46_94 Depth=2
mov w10, #0
b LBB46_93
LBB46_97: ; in Loop: Header=BB46_85 Depth=1
ldr w9, [x28]
ldr w8, [x23]
cmp w9, #1
b.le LBB46_102
; %bb.98: ; in Loop: Header=BB46_85 Depth=1
mov x10, x9
LBB46_99: ; in Loop: Header=BB46_85 Depth=1
and w9, w8, #0x3
lsr w8, w8, #2
str w8, [x23]
sub w8, w10, #2
str w8, [x28]
add w26, w9, #3
add x0, x21, w24, sxtw
ldurb w1, [x0, #-1]
mov x2, x26
bl _memset
b LBB46_110
LBB46_100: ; in Loop: Header=BB46_102 Depth=2
add x11, x10, #1
str x11, [x19]
ldrb w10, [x10]
LBB46_101: ; in Loop: Header=BB46_102 Depth=2
lsl w10, w10, w9
orr w8, w10, w8
str w8, [x23]
add w10, w9, #8
str w10, [x28]
cmp w9, #17
mov x9, x10
b.ge LBB46_99
LBB46_102: ; Parent Loop BB46_85 Depth=1
; => This Inner Loop Header: Depth=2
lsr w10, w8, w9
cbnz w10, LBB46_184
; %bb.103: ; in Loop: Header=BB46_102 Depth=2
ldp x10, x11, [x19]
cmp x10, x11
b.lo LBB46_100
; %bb.104: ; in Loop: Header=BB46_102 Depth=2
mov w10, #0
b LBB46_101
LBB46_105: ; in Loop: Header=BB46_85 Depth=1
cmp w0, #18
b.ne LBB46_193
; %bb.106: ; in Loop: Header=BB46_85 Depth=1
ldr w9, [x28]
ldr w8, [x23]
cmp w9, #6
b.le LBB46_113
; %bb.107: ; in Loop: Header=BB46_85 Depth=1
mov x10, x9
LBB46_108: ; in Loop: Header=BB46_85 Depth=1
and w9, w8, #0x7f
lsr w8, w8, #7
str w8, [x23]
sub w8, w10, #7
str w8, [x28]
add w26, w9, #11
LBB46_109: ; in Loop: Header=BB46_85 Depth=1
add x0, x21, w24, sxtw
mov x1, x26
bl _bzero
LBB46_110: ; in Loop: Header=BB46_85 Depth=1
add w24, w26, w24
cmp w24, w25
b.lt LBB46_85
b LBB46_116
LBB46_111: ; in Loop: Header=BB46_113 Depth=2
add x11, x10, #1
str x11, [x19]
ldrb w10, [x10]
LBB46_112: ; in Loop: Header=BB46_113 Depth=2
lsl w10, w10, w9
orr w8, w10, w8
str w8, [x23]
add w10, w9, #8
str w10, [x28]
cmp w9, #17
mov x9, x10
b.ge LBB46_108
LBB46_113: ; Parent Loop BB46_85 Depth=1
; => This Inner Loop Header: Depth=2
lsr w10, w8, w9
cbnz w10, LBB46_185
; %bb.114: ; in Loop: Header=BB46_113 Depth=2
ldp x10, x11, [x19]
cmp x10, x11
b.lo LBB46_111
; %bb.115: ; in Loop: Header=BB46_113 Depth=2
mov w10, #0
b LBB46_112
LBB46_116:
b.ne LBB46_187
; %bb.117:
add x1, sp, #49
mov x0, x20
ldr w24, [sp, #8] ; 4-byte Folded Reload
mov x2, x24
bl _zbuild_huffman
ldr w2, [sp, #12] ; 4-byte Folded Reload
cbz w0, LBB46_177
; %bb.118:
add x1, x21, w24, uxtw
ldr x0, [sp, #16] ; 8-byte Folded Reload
bl _zbuild_huffman
cbnz w0, LBB46_120
b LBB46_174
LBB46_119: ; in Loop: Header=BB46_120 Depth=1
add x9, x8, #1
str x9, [x19, #24]
strb w24, [x8]
LBB46_120: ; =>This Loop Header: Depth=1
; Child Loop BB46_151 Depth 2
; Child Loop BB46_156 Depth 2
; Child Loop BB46_142 Depth 2
; Child Loop BB46_148 Depth 2
; Child Loop BB46_125 Depth 2
mov x0, x19
mov x1, x20
bl _zhuffman_decode
mov x24, x0
cmp w0, #255
b.gt LBB46_128
; %bb.121: ; in Loop: Header=BB46_120 Depth=1
tbnz w24, #31, LBB46_167
; %bb.122: ; in Loop: Header=BB46_120 Depth=1
ldr x8, [x19, #24]
ldr x9, [x19, #40]
cmp x8, x9
b.lo LBB46_119
; %bb.123: ; in Loop: Header=BB46_120 Depth=1
ldr w10, [x19, #48]
cbz w10, LBB46_170
; %bb.124: ; in Loop: Header=BB46_120 Depth=1
ldr x0, [x19, #32]
sub x21, x8, x0
sub w8, w9, w0
LBB46_125: ; Parent Loop BB46_120 Depth=1
; => This Inner Loop Header: Depth=2
mov x9, x8
lsl w8, w8, #1
cmp w9, w21
b.le LBB46_125
; %bb.126: ; in Loop: Header=BB46_120 Depth=1
sxtw x25, w9
mov x1, x25
bl _realloc
cbz x0, LBB46_171
; %bb.127: ; in Loop: Header=BB46_120 Depth=1
add x8, x0, w21, sxtw
add x9, x0, x25
stp x0, x9, [x19, #32]
add x21, sp, #49
b LBB46_119
LBB46_128: ; in Loop: Header=BB46_120 Depth=1
cmp w24, #256
b.eq LBB46_45
; %bb.129: ; in Loop: Header=BB46_120 Depth=1
sub w8, w24, #257
Lloh150:
adrp x9, _length_base@PAGE
Lloh151:
add x9, x9, _length_base@PAGEOFF
ldr w25, [x9, w8, uxtw #2]
sub x9, x8, #28
cmn x9, #20
b.lo LBB46_133
; %bb.130: ; in Loop: Header=BB46_120 Depth=1
Lloh152:
adrp x9, _length_extra@PAGE
Lloh153:
add x9, x9, _length_extra@PAGEOFF
ldr w8, [x9, x8, lsl #2]
ldr w10, [x28]
ldr w9, [x23]
cmp w10, w8
b.lt LBB46_151
; %bb.131: ; in Loop: Header=BB46_120 Depth=1
mov x11, x10
LBB46_132: ; in Loop: Header=BB46_120 Depth=1
mov w10, #-1
lsl w10, w10, w8
bic w10, w9, w10
lsr w9, w9, w8
str w9, [x23]
sub w8, w11, w8
str w8, [x28]
add w25, w10, w25
LBB46_133: ; in Loop: Header=BB46_120 Depth=1
mov x0, x19
ldr x1, [sp, #16] ; 8-byte Folded Reload
bl _zhuffman_decode
tbnz w0, #31, LBB46_167
; %bb.134: ; in Loop: Header=BB46_120 Depth=1
mov w9, w0
Lloh154:
adrp x8, _dist_base@PAGE
Lloh155:
add x8, x8, _dist_base@PAGEOFF
ldr w8, [x8, w0, uxtw #2]
sub x10, x9, #30
cmn x10, #26
b.lo LBB46_138
; %bb.135: ; in Loop: Header=BB46_120 Depth=1
Lloh156:
adrp x10, _dist_extra@PAGE
Lloh157:
add x10, x10, _dist_extra@PAGEOFF
ldr w9, [x10, x9, lsl #2]
ldr w11, [x28]
ldr w10, [x23]
cmp w11, w9
b.lt LBB46_156
; %bb.136: ; in Loop: Header=BB46_120 Depth=1
mov x12, x11
LBB46_137: ; in Loop: Header=BB46_120 Depth=1
mov w11, #-1
lsl w11, w11, w9
bic w11, w10, w11
lsr w10, w10, w9
str w10, [x23]
sub w9, w12, w9
str w9, [x28]
add w8, w11, w8
LBB46_138: ; in Loop: Header=BB46_120 Depth=1
ldp x9, x0, [x19, #24]
sxtw x26, w8
sub x21, x9, x0
cmp x21, x26
b.lt LBB46_172
; %bb.139: ; in Loop: Header=BB46_120 Depth=1
add x10, x9, w25, sxtw
ldr x8, [x19, #40]
cmp x10, x8
b.ls LBB46_145
; %bb.140: ; in Loop: Header=BB46_120 Depth=1
ldr w9, [x19, #48]
cbz w9, LBB46_170
; %bb.141: ; in Loop: Header=BB46_120 Depth=1
sub w8, w8, w0
add w9, w25, w21
LBB46_142: ; Parent Loop BB46_120 Depth=1
; => This Inner Loop Header: Depth=2
mov x10, x8
lsl w8, w8, #1
cmp w9, w10
b.gt LBB46_142
; %bb.143: ; in Loop: Header=BB46_120 Depth=1
sxtw x24, w10
mov x1, x24
bl _realloc
cbz x0, LBB46_171
; %bb.144: ; in Loop: Header=BB46_120 Depth=1
add x9, x0, w21, sxtw
stp x9, x0, [x19, #24]
add x8, x0, x24
str x8, [x19, #40]
LBB46_145: ; in Loop: Header=BB46_120 Depth=1
add x21, sp, #49
cbz w25, LBB46_120
; %bb.146: ; in Loop: Header=BB46_120 Depth=1
sub x8, x9, x26
ldrb w8, [x8]
add x10, x9, #1
str x10, [x19, #24]
strb w8, [x9]
subs w8, w25, #1
b.eq LBB46_120
; %bb.147: ; in Loop: Header=BB46_120 Depth=1
neg x10, x26
add x9, x9, x10
add x9, x9, #1
LBB46_148: ; Parent Loop BB46_120 Depth=1
; => This Inner Loop Header: Depth=2
ldr x10, [x19, #24]
ldrb w11, [x9], #1
add x12, x10, #1
str x12, [x19, #24]
strb w11, [x10]
subs w8, w8, #1
b.ne LBB46_148
b LBB46_120
LBB46_149: ; in Loop: Header=BB46_151 Depth=2
add x12, x11, #1
str x12, [x19]
ldrb w11, [x11]
LBB46_150: ; in Loop: Header=BB46_151 Depth=2
lsl w11, w11, w10
orr w9, w11, w9
str w9, [x23]
add w11, w10, #8
str w11, [x28]
cmp w10, #17
mov x10, x11
b.ge LBB46_132
LBB46_151: ; Parent Loop BB46_120 Depth=1
; => This Inner Loop Header: Depth=2
lsr w11, w9, w10
cbnz w11, LBB46_179
; %bb.152: ; in Loop: Header=BB46_151 Depth=2
ldp x11, x12, [x19]
cmp x11, x12
b.lo LBB46_149
; %bb.153: ; in Loop: Header=BB46_151 Depth=2
mov w11, #0
b LBB46_150
LBB46_154: ; in Loop: Header=BB46_156 Depth=2
add x13, x12, #1
str x13, [x19]
ldrb w12, [x12]
LBB46_155: ; in Loop: Header=BB46_156 Depth=2
lsl w12, w12, w11
orr w10, w12, w10
str w10, [x23]
add w12, w11, #8
str w12, [x28]
cmp w11, #17
mov x11, x12
b.ge LBB46_137
LBB46_156: ; Parent Loop BB46_120 Depth=1
; => This Inner Loop Header: Depth=2
lsr w12, w10, w11
cbnz w12, LBB46_181
; %bb.157: ; in Loop: Header=BB46_156 Depth=2
ldp x12, x13, [x19]
cmp x12, x13
b.lo LBB46_154
; %bb.158: ; in Loop: Header=BB46_156 Depth=2
mov w12, #0
b LBB46_155
LBB46_159:
add x15, sp, #24
Lloh158:
adrp x16, _compute_huffman_codes.length_dezigzag@PAGE
Lloh159:
add x16, x16, _compute_huffman_codes.length_dezigzag@PAGEOFF
tbnz w27, #0, LBB46_178
LBB46_160:
ldr w8, [x28]
ldr w27, [x23]
cmp w8, #0
b.gt LBB46_11
b LBB46_163
LBB46_161: ; in Loop: Header=BB46_163 Depth=1
add x10, x9, #1
str x10, [x19]
ldrb w9, [x9]
LBB46_162: ; in Loop: Header=BB46_163 Depth=1
lsl w9, w9, w8
orr w27, w9, w27
str w27, [x23]
add w10, w8, #8
str w10, [x28]
cmp w8, #17
mov x8, x10
b.ge LBB46_12
LBB46_163: ; =>This Inner Loop Header: Depth=1
lsr w9, w27, w8
cbnz w9, LBB46_188
; %bb.164: ; in Loop: Header=BB46_163 Depth=1
ldp x9, x10, [x19]
cmp x9, x10
b.lo LBB46_161
; %bb.165: ; in Loop: Header=BB46_163 Depth=1
mov w9, #0
b LBB46_162
LBB46_166:
Lloh160:
adrp x8, l_.str.35@PAGE
Lloh161:
add x8, x8, l_.str.35@PAGEOFF
b LBB46_169
LBB46_167:
Lloh162:
adrp x9, l_.str.12@PAGE
Lloh163:
add x9, x9, l_.str.12@PAGEOFF
b LBB46_173
LBB46_168:
Lloh164:
adrp x8, l_.str.36@PAGE
Lloh165:
add x8, x8, l_.str.36@PAGEOFF
LBB46_169:
mov w0, #0
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
b LBB46_174
LBB46_170:
Lloh166:
adrp x9, l_.str.41@PAGE
Lloh167:
add x9, x9, l_.str.41@PAGEOFF
b LBB46_173
LBB46_171:
Lloh168:
adrp x9, l_.str.5@PAGE
Lloh169:
add x9, x9, l_.str.5@PAGEOFF
b LBB46_173
LBB46_172:
Lloh170:
adrp x9, l_.str.48@PAGE
Lloh171:
add x9, x9, l_.str.48@PAGEOFF
LBB46_173:
mov w0, #0
adrp x8, _failure_reason@PAGE
str x9, [x8, _failure_reason@PAGEOFF]
LBB46_174:
ldur x8, [x29, #-96]
Lloh172:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh173:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh174:
ldr x9, [x9]
cmp x9, x8
b.ne LBB46_195
; %bb.175:
add sp, sp, #2544
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB46_176:
bl _do_zlib.cold.8
LBB46_177:
mov w0, #0
b LBB46_174
LBB46_178:
mov w0, #1
b LBB46_174
LBB46_179:
bl _do_zlib.cold.3
LBB46_180:
Lloh175:
adrp x9, l_.str.39@PAGE
Lloh176:
add x9, x9, l_.str.39@PAGEOFF
b LBB46_173
LBB46_181:
bl _do_zlib.cold.2
LBB46_182:
Lloh177:
adrp x9, l_.str.40@PAGE
Lloh178:
add x9, x9, l_.str.40@PAGEOFF
b LBB46_173
LBB46_183:
bl _do_zlib.cold.6
LBB46_184:
bl _do_zlib.cold.5
LBB46_185:
bl _do_zlib.cold.7
LBB46_186:
bl _do_zlib.cold.13
LBB46_187:
Lloh179:
adrp x8, l_.str.43@PAGE
Lloh180:
add x8, x8, l_.str.43@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
mov w0, #0
b LBB46_174
LBB46_188:
bl _do_zlib.cold.14
LBB46_189:
bl _do_zlib.cold.1
LBB46_190:
bl _do_zlib.cold.11
LBB46_191:
bl _do_zlib.cold.10
LBB46_192:
bl _do_zlib.cold.9
LBB46_193:
bl _do_zlib.cold.4
LBB46_194:
bl _do_zlib.cold.12
LBB46_195:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh130, Lloh131, Lloh132
.loh AdrpAdd Lloh133, Lloh134
.loh AdrpAdd Lloh135, Lloh136
.loh AdrpLdrGotLdr Lloh137, Lloh138, Lloh139
.loh AdrpAdd Lloh140, Lloh141
.loh AdrpAdd Lloh144, Lloh145
.loh AdrpAdd Lloh142, Lloh143
.loh AdrpAdd Lloh146, Lloh147
.loh AdrpAdd Lloh148, Lloh149
.loh AdrpAdd Lloh150, Lloh151
.loh AdrpAdd Lloh152, Lloh153
.loh AdrpAdd Lloh154, Lloh155
.loh AdrpAdd Lloh156, Lloh157
.loh AdrpAdd Lloh158, Lloh159
.loh AdrpAdd Lloh160, Lloh161
.loh AdrpAdd Lloh162, Lloh163
.loh AdrpAdd Lloh164, Lloh165
.loh AdrpAdd Lloh166, Lloh167
.loh AdrpAdd Lloh168, Lloh169
.loh AdrpAdd Lloh170, Lloh171
.loh AdrpLdrGotLdr Lloh172, Lloh173, Lloh174
.loh AdrpAdd Lloh175, Lloh176
.loh AdrpAdd Lloh177, Lloh178
.loh AdrpAdd Lloh179, Lloh180
.cfi_endproc
; -- End function
.globl _stbi_zlib_decode_malloc ; -- Begin function stbi_zlib_decode_malloc
.p2align 2
_stbi_zlib_decode_malloc: ; @stbi_zlib_decode_malloc
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov w9, #4112
Lloh181:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh182:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #1, lsl #12 ; =4096
sub sp, sp, #16
mov x19, x2
mov x21, x1
mov x20, x0
Lloh183:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh184:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh185:
ldr x8, [x8]
stur x8, [x29, #-40]
mov w0, #16384
bl _malloc
cbz x0, LBB47_6
; %bb.1:
mov x1, x0
add x8, x20, w21, sxtw
stp x20, x8, [sp, #8]
add x0, sp, #8
mov w2, #16384
mov w3, #1
mov w4, #1
bl _do_zlib
cbz w0, LBB47_4
; %bb.2:
cbz x19, LBB47_5
; %bb.3:
ldr w8, [sp, #32]
ldr x0, [sp, #40]
sub w8, w8, w0
str w8, [x19]
b LBB47_6
LBB47_4:
ldr x0, [sp, #40]
bl _free
mov x0, #0
b LBB47_6
LBB47_5:
ldr x0, [sp, #40]
LBB47_6:
ldur x8, [x29, #-40]
Lloh186:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh187:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh188:
ldr x9, [x9]
cmp x9, x8
b.ne LBB47_8
; %bb.7:
add sp, sp, #1, lsl #12 ; =4096
add sp, sp, #16
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
LBB47_8:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh183, Lloh184, Lloh185
.loh AdrpLdrGot Lloh181, Lloh182
.loh AdrpLdrGotLdr Lloh186, Lloh187, Lloh188
.cfi_endproc
; -- End function
.globl _stbi_zlib_decode_buffer ; -- Begin function stbi_zlib_decode_buffer
.p2align 2
_stbi_zlib_decode_buffer: ; @stbi_zlib_decode_buffer
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w27, -24
.cfi_offset w28, -32
mov w9, #4112
Lloh189:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh190:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #1, lsl #12 ; =4096
sub sp, sp, #16
mov x8, x1
mov x1, x0
Lloh191:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh192:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh193:
ldr x9, [x9]
stur x9, [x29, #-24]
add x9, x2, w3, sxtw
stp x2, x9, [sp, #8]
add x0, sp, #8
mov x2, x8
mov w3, #0
mov w4, #1
bl _do_zlib
ldr w8, [sp, #32]
ldr w9, [sp, #40]
sub w8, w8, w9
cmp w0, #0
csinv w0, w8, wzr, ne
ldur x8, [x29, #-24]
Lloh194:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh195:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh196:
ldr x9, [x9]
cmp x9, x8
b.ne LBB48_2
; %bb.1:
add sp, sp, #1, lsl #12 ; =4096
add sp, sp, #16
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #32 ; 16-byte Folded Reload
ret
LBB48_2:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh194, Lloh195, Lloh196
.loh AdrpLdrGotLdr Lloh191, Lloh192, Lloh193
.loh AdrpLdrGot Lloh189, Lloh190
.cfi_endproc
; -- End function
.globl _stbi_zlib_decode_noheader_malloc ; -- Begin function stbi_zlib_decode_noheader_malloc
.p2align 2
_stbi_zlib_decode_noheader_malloc: ; @stbi_zlib_decode_noheader_malloc
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov w9, #4112
Lloh197:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh198:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #1, lsl #12 ; =4096
sub sp, sp, #16
mov x19, x2
mov x21, x1
mov x20, x0
Lloh199:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh200:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh201:
ldr x8, [x8]
stur x8, [x29, #-40]
mov w0, #16384
bl _malloc
cbz x0, LBB49_6
; %bb.1:
mov x1, x0
add x8, x20, w21, sxtw
stp x20, x8, [sp, #8]
add x0, sp, #8
mov w2, #16384
mov w3, #1
mov w4, #0
bl _do_zlib
cbz w0, LBB49_4
; %bb.2:
cbz x19, LBB49_5
; %bb.3:
ldr w8, [sp, #32]
ldr x0, [sp, #40]
sub w8, w8, w0
str w8, [x19]
b LBB49_6
LBB49_4:
ldr x0, [sp, #40]
bl _free
mov x0, #0
b LBB49_6
LBB49_5:
ldr x0, [sp, #40]
LBB49_6:
ldur x8, [x29, #-40]
Lloh202:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh203:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh204:
ldr x9, [x9]
cmp x9, x8
b.ne LBB49_8
; %bb.7:
add sp, sp, #1, lsl #12 ; =4096
add sp, sp, #16
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
LBB49_8:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh199, Lloh200, Lloh201
.loh AdrpLdrGot Lloh197, Lloh198
.loh AdrpLdrGotLdr Lloh202, Lloh203, Lloh204
.cfi_endproc
; -- End function
.globl _stbi_zlib_decode_noheader_buffer ; -- Begin function stbi_zlib_decode_noheader_buffer
.p2align 2
_stbi_zlib_decode_noheader_buffer: ; @stbi_zlib_decode_noheader_buffer
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w27, -24
.cfi_offset w28, -32
mov w9, #4112
Lloh205:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh206:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #1, lsl #12 ; =4096
sub sp, sp, #16
mov x8, x1
mov x1, x0
Lloh207:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh208:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh209:
ldr x9, [x9]
stur x9, [x29, #-24]
add x9, x2, w3, sxtw
stp x2, x9, [sp, #8]
add x0, sp, #8
mov x2, x8
mov w3, #0
mov w4, #0
bl _do_zlib
ldr w8, [sp, #32]
ldr w9, [sp, #40]
sub w8, w8, w9
cmp w0, #0
csinv w0, w8, wzr, ne
ldur x8, [x29, #-24]
Lloh210:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh211:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh212:
ldr x9, [x9]
cmp x9, x8
b.ne LBB50_2
; %bb.1:
add sp, sp, #1, lsl #12 ; =4096
add sp, sp, #16
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #32 ; 16-byte Folded Reload
ret
LBB50_2:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh210, Lloh211, Lloh212
.loh AdrpLdrGotLdr Lloh207, Lloh208, Lloh209
.loh AdrpLdrGot Lloh205, Lloh206
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_png
_do_png: ; @do_png
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
stp xzr, xzr, [x0, #40]
str xzr, [x0, #56]
cmp w4, #5
b.lo LBB51_2
; %bb.1:
mov x20, #0
Lloh213:
adrp x8, l_.str.6@PAGE
Lloh214:
add x8, x8, l_.str.6@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
b LBB51_10
LBB51_2:
mov x24, x4
mov x21, x3
mov x22, x2
mov x23, x1
mov x19, x0
mov w1, #0
mov x2, x4
bl _parse_png_file
cbz w0, LBB51_8
; %bb.3:
ldr x20, [x19, #56]
str xzr, [x19, #56]
cbz w24, LBB51_6
; %bb.4:
ldr w1, [x19, #12]
cmp w1, w24
b.eq LBB51_6
; %bb.5:
ldp w3, w4, [x19]
mov x0, x20
mov x2, x24
bl _convert_format
mov x20, x0
str w24, [x19, #12]
cbz x0, LBB51_10
LBB51_6:
ldr w8, [x19]
str w8, [x23]
ldr w8, [x19, #4]
str w8, [x22]
cbz x21, LBB51_9
; %bb.7:
ldr w8, [x19, #8]
str w8, [x21]
b LBB51_9
LBB51_8:
mov x20, #0
LBB51_9:
ldr x0, [x19, #56]
bl _free
str xzr, [x19, #56]
ldr x0, [x19, #48]
bl _free
str xzr, [x19, #48]
ldr x0, [x19, #40]
bl _free
str xzr, [x19, #40]
LBB51_10:
mov x0, x20
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh213, Lloh214
.cfi_endproc
; -- End function
.globl _stbi_png_load ; -- Begin function stbi_png_load
.p2align 2
_stbi_png_load: ; @stbi_png_load
.cfi_startproc
; %bb.0:
sub sp, sp, #128
.cfi_def_cfa_offset 128
stp x24, x23, [sp, #64] ; 16-byte Folded Spill
stp x22, x21, [sp, #80] ; 16-byte Folded Spill
stp x20, x19, [sp, #96] ; 16-byte Folded Spill
stp x29, x30, [sp, #112] ; 16-byte Folded Spill
add x29, sp, #112
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x4
mov x20, x3
mov x21, x2
mov x23, x1
Lloh215:
adrp x1, l_.str@PAGE
Lloh216:
add x1, x1, l_.str@PAGEOFF
bl _fopen
cbz x0, LBB52_2
; %bb.1:
mov x22, x0
str x0, [sp, #16]
mov x0, sp
mov x1, x23
mov x2, x21
mov x3, x20
mov x4, x19
bl _do_png
mov x19, x0
mov x0, x22
bl _fclose
b LBB52_3
LBB52_2:
mov x19, #0
LBB52_3:
mov x0, x19
ldp x29, x30, [sp, #112] ; 16-byte Folded Reload
ldp x20, x19, [sp, #96] ; 16-byte Folded Reload
ldp x22, x21, [sp, #80] ; 16-byte Folded Reload
ldp x24, x23, [sp, #64] ; 16-byte Folded Reload
add sp, sp, #128
ret
.loh AdrpAdd Lloh215, Lloh216
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function parse_png_file
_parse_png_file: ; @parse_png_file
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov w9, #5232
Lloh217:
adrp x16, ___chkstk_darwin@GOTPAGE
Lloh218:
ldr x16, [x16, ___chkstk_darwin@GOTPAGEOFF]
blr x16
sub sp, sp, #1, lsl #12 ; =4096
sub sp, sp, #1136
mov x24, x2
mov x21, x1
mov x19, x0
mov x20, #0
Lloh219:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh220:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh221:
ldr x8, [x8]
stur x8, [x29, #-96]
Lloh222:
adrp x22, _check_png_header.png_sig@PAGE
Lloh223:
add x22, x22, _check_png_header.png_sig@PAGEOFF
LBB53_1: ; =>This Inner Loop Header: Depth=1
ldr x0, [x19, #16]
cbz x0, LBB53_3
; %bb.2: ; in Loop: Header=BB53_1 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB53_6
LBB53_3: ; in Loop: Header=BB53_1 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB53_5
; %bb.4: ; in Loop: Header=BB53_1 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB53_6
LBB53_5: ; in Loop: Header=BB53_1 Depth=1
mov w8, #0
LBB53_6: ; in Loop: Header=BB53_1 Depth=1
ldrb w9, [x22, x20]
cmp w8, w9
b.ne LBB53_10
; %bb.7: ; in Loop: Header=BB53_1 Depth=1
add x20, x20, #1
cmp x20, #8
b.ne LBB53_1
; %bb.8:
cmp w21, #1
b.ne LBB53_14
LBB53_9:
mov w0, #1
b LBB53_12
LBB53_10:
mov w0, #0
Lloh224:
adrp x8, l_.str.68@PAGE
Lloh225:
add x8, x8, l_.str.68@PAGEOFF
LBB53_11:
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB53_12:
ldur x8, [x29, #-96]
Lloh226:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh227:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh228:
ldr x9, [x9]
cmp x9, x8
b.ne LBB53_202
; %bb.13:
add sp, sp, #1, lsl #12 ; =4096
add sp, sp, #1136
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB53_14:
mov w20, #17490
movk w20, #18760, lsl #16
mov x0, x19
bl _get32
mov x23, x0
mov x0, x19
bl _get32
cmp w0, w20
b.ne LBB53_16
; %bb.15:
str w24, [sp, #68] ; 4-byte Folded Spill
mov w24, #0
stp wzr, wzr, [sp, #76] ; 8-byte Folded Spill
mov w20, #0
mov w28, #0
mov w26, #0
mov w22, #21572
movk w22, #20556, lsl #16
add x25, sp, #96
mov w27, #255
mov w8, #17490
movk w8, #18760, lsl #16
mov w9, #1
b LBB53_20
LBB53_16:
mov w0, #0
Lloh229:
adrp x8, l_.str.51@PAGE
Lloh230:
add x8, x8, l_.str.51@PAGEOFF
b LBB53_11
LBB53_17: ; in Loop: Header=BB53_20 Depth=1
ldr w24, [sp, #72] ; 4-byte Folded Reload
LBB53_18: ; in Loop: Header=BB53_20 Depth=1
mov w22, #21572
movk w22, #20556, lsl #16
LBB53_19: ; in Loop: Header=BB53_20 Depth=1
mov x0, x19
bl _get32
mov x0, x19
bl _get32
mov x23, x0
mov x0, x19
bl _get32
mov x8, x0
mov w9, #0
LBB53_20: ; =>This Loop Header: Depth=1
; Child Loop BB53_41 Depth 2
; Child Loop BB53_86 Depth 2
; Child Loop BB53_61 Depth 2
; Child Loop BB53_50 Depth 2
mov w23, w23
cmp w8, w22
b.gt LBB53_30
; %bb.21: ; in Loop: Header=BB53_20 Depth=1
mov w10, #16724
movk w10, #18756, lsl #16
cmp w8, w10
b.eq LBB53_45
; %bb.22: ; in Loop: Header=BB53_20 Depth=1
mov w10, #17490
movk w10, #18760, lsl #16
cmp w8, w10
b.ne LBB53_76
; %bb.23: ; in Loop: Header=BB53_20 Depth=1
tbz w9, #0, LBB53_148
; %bb.24: ; in Loop: Header=BB53_20 Depth=1
cmp w23, #13
b.ne LBB53_149
; %bb.25: ; in Loop: Header=BB53_20 Depth=1
mov x0, x19
bl _get32
str w0, [x19]
mov w23, #1
movk w23, #256, lsl #16
cmp w0, w23
b.hs LBB53_121
; %bb.26: ; in Loop: Header=BB53_20 Depth=1
mov x0, x19
bl _get32
str w0, [x19, #4]
cmp w0, w23
b.hs LBB53_121
; %bb.27: ; in Loop: Header=BB53_20 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB53_97
; %bb.28: ; in Loop: Header=BB53_20 Depth=1
bl _fgetc
cmn w0, #1
b.eq LBB53_123
; %bb.29: ; in Loop: Header=BB53_20 Depth=1
cmp w0, #8
b.eq LBB53_99
b LBB53_123
LBB53_30: ; in Loop: Header=BB53_20 Depth=1
mov w9, #21573
movk w9, #20556, lsl #16
cmp w8, w9
b.eq LBB53_55
; %bb.31: ; in Loop: Header=BB53_20 Depth=1
mov w9, #20051
movk w9, #29778, lsl #16
cmp w8, w9
b.ne LBB53_77
; %bb.32: ; in Loop: Header=BB53_20 Depth=1
ldr x8, [x19, #40]
cbnz x8, LBB53_145
; %bb.33: ; in Loop: Header=BB53_20 Depth=1
cbz w24, LBB53_80
; %bb.34: ; in Loop: Header=BB53_20 Depth=1
cmp w21, #2
b.eq LBB53_156
; %bb.35: ; in Loop: Header=BB53_20 Depth=1
cbz w26, LBB53_157
; %bb.36: ; in Loop: Header=BB53_20 Depth=1
cmp w26, w23
b.lo LBB53_151
; %bb.37: ; in Loop: Header=BB53_20 Depth=1
cbz w23, LBB53_119
; %bb.38: ; in Loop: Header=BB53_20 Depth=1
mov x22, #0
lsl x23, x23, #2
b LBB53_41
LBB53_39: ; in Loop: Header=BB53_41 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB53_40: ; in Loop: Header=BB53_41 Depth=2
and x9, x22, #0xfffffffc
add x9, x25, x9
strb w8, [x9, #3]
add x22, x22, #4
cmp x23, x22
b.eq LBB53_96
LBB53_41: ; Parent Loop BB53_20 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x19, #16]
cbnz x0, LBB53_39
; %bb.42: ; in Loop: Header=BB53_41 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB53_44
; %bb.43: ; in Loop: Header=BB53_41 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB53_40
LBB53_44: ; in Loop: Header=BB53_41 Depth=2
mov w8, #0
b LBB53_40
LBB53_45: ; in Loop: Header=BB53_20 Depth=1
cbz w24, LBB53_47
; %bb.46: ; in Loop: Header=BB53_20 Depth=1
cbz w26, LBB53_147
LBB53_47: ; in Loop: Header=BB53_20 Depth=1
cmp w21, #2
b.eq LBB53_150
; %bb.48: ; in Loop: Header=BB53_20 Depth=1
add w22, w23, w20
cmp w22, w28
b.ls LBB53_53
; %bb.49: ; in Loop: Header=BB53_20 Depth=1
cmp w23, #1, lsl #12 ; =4096
mov w8, #4096
csel w8, w23, w8, hi
cmp w28, #0
csel w8, w8, w28, eq
LBB53_50: ; Parent Loop BB53_20 Depth=1
; => This Inner Loop Header: Depth=2
mov x28, x8
lsl w8, w8, #1
cmp w22, w28
b.hi LBB53_50
; %bb.51: ; in Loop: Header=BB53_20 Depth=1
ldr x0, [x19, #40]
mov w1, w28
bl _realloc
cbz x0, LBB53_196
; %bb.52: ; in Loop: Header=BB53_20 Depth=1
str x0, [x19, #40]
LBB53_53: ; in Loop: Header=BB53_20 Depth=1
ldr x3, [x19, #16]
ldr x8, [x19, #40]
add x0, x8, w20, uxtw
cbz x3, LBB53_95
; %bb.54: ; in Loop: Header=BB53_20 Depth=1
mov w1, #1
mov x2, x23
bl _fread
mov x20, x22
cmp x0, x23
mov w22, #21572
movk w22, #20556, lsl #16
b.eq LBB53_19
b LBB53_158
LBB53_55: ; in Loop: Header=BB53_20 Depth=1
cmp w23, #769
b.hs LBB53_122
; %bb.56: ; in Loop: Header=BB53_20 Depth=1
and x8, x23, #0xffff
mov w9, #43691
movk w9, #43690, lsl #16
mul x8, x8, x9
lsr x26, x8, #33
add w8, w26, w26, lsl #1
cmp w8, w23
b.ne LBB53_122
; %bb.57: ; in Loop: Header=BB53_20 Depth=1
and w8, w23, #0xffff
cmp w8, #3
b.lo LBB53_19
; %bb.58: ; in Loop: Header=BB53_20 Depth=1
str w24, [sp, #72] ; 4-byte Folded Spill
mov x23, #0
mov x24, #0
cmp w26, #1
csinc w8, w26, wzr, hi
lsl x22, x8, #2
b LBB53_61
LBB53_59: ; in Loop: Header=BB53_61 Depth=2
add x10, x9, #1
str x10, [x19, #24]
ldrb w9, [x9]
LBB53_60: ; in Loop: Header=BB53_61 Depth=2
orr x10, x8, #0x2
strb w9, [x25, x10]
orr x8, x8, #0x3
strb w27, [x25, x8]
add x24, x24, #1
add x23, x23, #4
cmp x22, x23
b.eq LBB53_17
LBB53_61: ; Parent Loop BB53_20 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x19, #16]
cbz x0, LBB53_65
; %bb.62: ; in Loop: Header=BB53_61 Depth=2
bl _fgetc
mov x8, x0
ldr x0, [x19, #16]
cmn w8, #1
csel w8, wzr, w8, eq
strb w8, [x25, x23]
cbz x0, LBB53_67
; %bb.63: ; in Loop: Header=BB53_61 Depth=2
bl _fgetc
mov x8, x0
ldr x0, [x19, #16]
cmn w8, #1
csel w8, wzr, w8, eq
add x9, x25, x23
strb w8, [x9, #1]
mov x8, x23
cbz x0, LBB53_74
; %bb.64: ; in Loop: Header=BB53_61 Depth=2
bl _fgetc
cmn w0, #1
csel w9, wzr, w0, eq
mov x8, x23
b LBB53_60
LBB53_65: ; in Loop: Header=BB53_61 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB53_68
; %bb.66: ; in Loop: Header=BB53_61 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB53_69
LBB53_67: ; in Loop: Header=BB53_61 Depth=2
lsl x8, x24, #2
b LBB53_70
LBB53_68: ; in Loop: Header=BB53_61 Depth=2
mov w8, #0
LBB53_69: ; in Loop: Header=BB53_61 Depth=2
strb w8, [x25, x23]
mov x8, x23
LBB53_70: ; in Loop: Header=BB53_61 Depth=2
ldp x9, x10, [x19, #24]
cmp x9, x10
b.hs LBB53_72
; %bb.71: ; in Loop: Header=BB53_61 Depth=2
add x10, x9, #1
str x10, [x19, #24]
ldrb w9, [x9]
b LBB53_73
LBB53_72: ; in Loop: Header=BB53_61 Depth=2
mov w9, #0
LBB53_73: ; in Loop: Header=BB53_61 Depth=2
orr x10, x8, #0x1
strb w9, [x25, x10]
LBB53_74: ; in Loop: Header=BB53_61 Depth=2
ldp x9, x10, [x19, #24]
cmp x9, x10
b.lo LBB53_59
; %bb.75: ; in Loop: Header=BB53_61 Depth=2
mov w9, #0
b LBB53_60
LBB53_76: ; in Loop: Header=BB53_20 Depth=1
mov w9, #20036
movk w9, #18757, lsl #16
cmp w8, w9
b.eq LBB53_125
LBB53_77: ; in Loop: Header=BB53_20 Depth=1
tbz w8, #29, LBB53_146
; %bb.78: ; in Loop: Header=BB53_20 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB53_94
; %bb.79: ; in Loop: Header=BB53_20 Depth=1
sxtw x1, w23
mov w2, #1
bl _fseek
b LBB53_19
LBB53_80: ; in Loop: Header=BB53_20 Depth=1
ldr w8, [x19, #8]
tbz w8, #0, LBB53_159
; %bb.81: ; in Loop: Header=BB53_20 Depth=1
cmp w23, w8, lsl #1
b.ne LBB53_151
; %bb.82: ; in Loop: Header=BB53_20 Depth=1
cmp w8, #1
b.lt LBB53_120
; %bb.83: ; in Loop: Header=BB53_20 Depth=1
mov x22, #0
add x23, sp, #93
b LBB53_86
LBB53_84: ; in Loop: Header=BB53_86 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB53_85: ; in Loop: Header=BB53_86 Depth=2
strb w8, [x23, x22]
add x22, x22, #1
ldrsw x8, [x19, #8]
cmp x22, x8
b.ge LBB53_118
LBB53_86: ; Parent Loop BB53_20 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x19, #16]
cbz x0, LBB53_89
; %bb.87: ; in Loop: Header=BB53_86 Depth=2
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB53_92
; %bb.88: ; in Loop: Header=BB53_86 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB53_85
LBB53_89: ; in Loop: Header=BB53_86 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB53_91
; %bb.90: ; in Loop: Header=BB53_86 Depth=2
add x8, x8, #1
str x8, [x19, #24]
LBB53_91: ; in Loop: Header=BB53_86 Depth=2
cmp x8, x9
b.lo LBB53_84
b LBB53_93
LBB53_92: ; in Loop: Header=BB53_86 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.lo LBB53_84
LBB53_93: ; in Loop: Header=BB53_86 Depth=2
mov w8, #0
b LBB53_85
LBB53_94: ; in Loop: Header=BB53_20 Depth=1
ldr x8, [x19, #24]
add x8, x8, w23, sxtw
str x8, [x19, #24]
b LBB53_19
LBB53_95: ; in Loop: Header=BB53_20 Depth=1
ldr x1, [x19, #24]
mov x2, x23
bl _memcpy
ldr x8, [x19, #24]
add x8, x8, x23
str x8, [x19, #24]
mov x20, x22
b LBB53_18
LBB53_96: ; in Loop: Header=BB53_20 Depth=1
mov w24, #4
b LBB53_18
LBB53_97: ; in Loop: Header=BB53_20 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB53_123
; %bb.98: ; in Loop: Header=BB53_20 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w0, [x8]
cmp w0, #8
b.ne LBB53_123
LBB53_99: ; in Loop: Header=BB53_20 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB53_102
; %bb.100: ; in Loop: Header=BB53_20 Depth=1
bl _fgetc
mov x23, x0
cmn w0, #1
b.eq LBB53_106
; %bb.101: ; in Loop: Header=BB53_20 Depth=1
cmp w23, #7
b.lt LBB53_104
b LBB53_155
LBB53_102: ; in Loop: Header=BB53_20 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB53_106
; %bb.103: ; in Loop: Header=BB53_20 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w23, [x8]
cmp w23, #7
b.ge LBB53_155
LBB53_104: ; in Loop: Header=BB53_20 Depth=1
cmp w23, #3
b.ne LBB53_107
; %bb.105: ; in Loop: Header=BB53_20 Depth=1
mov w24, #3
b LBB53_108
LBB53_106: ; in Loop: Header=BB53_20 Depth=1
mov w23, #0
b LBB53_108
LBB53_107: ; in Loop: Header=BB53_20 Depth=1
tbnz w23, #0, LBB53_155
LBB53_108: ; in Loop: Header=BB53_20 Depth=1
mov x0, x19
bl _get8
cbnz w0, LBB53_152
; %bb.109: ; in Loop: Header=BB53_20 Depth=1
mov x0, x19
bl _get8
cbnz w0, LBB53_153
; %bb.110: ; in Loop: Header=BB53_20 Depth=1
mov x0, x19
bl _get8
cmp w0, #2
b.ge LBB53_154
; %bb.111: ; in Loop: Header=BB53_20 Depth=1
str w0, [sp, #80] ; 4-byte Folded Spill
ldr w9, [x19]
cbz w9, LBB53_124
; %bb.112: ; in Loop: Header=BB53_20 Depth=1
ldr w8, [x19, #4]
cbz w8, LBB53_124
; %bb.113: ; in Loop: Header=BB53_20 Depth=1
mov w10, #1073741824
udiv w9, w10, w9
cbz w24, LBB53_115
; %bb.114: ; in Loop: Header=BB53_20 Depth=1
mov w10, #1
str w10, [x19, #8]
cmp w8, w9, lsr #2
b.ls LBB53_19
b LBB53_121
LBB53_115: ; in Loop: Header=BB53_20 Depth=1
and w10, w23, #0x2
bfxil w10, w23, #2, #1
add w10, w10, #1
str w10, [x19, #8]
udiv w9, w9, w10
cmp w9, w8
b.lo LBB53_121
; %bb.116: ; in Loop: Header=BB53_20 Depth=1
cmp w21, #2
b.eq LBB53_9
; %bb.117: ; in Loop: Header=BB53_20 Depth=1
mov w24, #0
b LBB53_19
LBB53_118: ; in Loop: Header=BB53_20 Depth=1
mov w24, #0
mov w8, #1
str w8, [sp, #76] ; 4-byte Folded Spill
b LBB53_18
LBB53_119: ; in Loop: Header=BB53_20 Depth=1
mov w24, #4
b LBB53_19
LBB53_120: ; in Loop: Header=BB53_20 Depth=1
mov w24, #0
mov w8, #1
str w8, [sp, #76] ; 4-byte Folded Spill
b LBB53_19
LBB53_121:
mov w0, #0
Lloh231:
adrp x8, l_.str.33@PAGE
Lloh232:
add x8, x8, l_.str.33@PAGEOFF
b LBB53_11
LBB53_122:
mov w0, #0
Lloh233:
adrp x8, l_.str.60@PAGE
Lloh234:
add x8, x8, l_.str.60@PAGEOFF
b LBB53_11
LBB53_123:
mov w0, #0
Lloh235:
adrp x8, l_.str.54@PAGE
Lloh236:
add x8, x8, l_.str.54@PAGEOFF
b LBB53_11
LBB53_124:
mov w0, #0
Lloh237:
adrp x8, l_.str.59@PAGE
Lloh238:
add x8, x8, l_.str.59@PAGEOFF
b LBB53_11
LBB53_125:
cbnz w21, LBB53_9
; %bb.126:
ldr x21, [x19, #40]
cbz x21, LBB53_160
; %bb.127:
mov w0, #16384
bl _malloc
cbz x0, LBB53_162
; %bb.128:
mov x1, x0
add x8, x21, w20, sxtw
add x20, sp, #1120
stp x21, x8, [x20]
add x0, sp, #1120
mov w2, #16384
mov w3, #1
mov w4, #1
bl _do_zlib
cbz w0, LBB53_161
; %bb.129:
ldr w8, [x20, #24]
ldr x9, [x20, #32]
str x9, [x19, #48]
cbz x9, LBB53_163
; %bb.130:
sub w26, w8, w9
ldr x0, [x19, #40]
bl _free
str xzr, [x19, #40]
ldr w8, [x19, #8]
add w9, w8, #1
cmp w24, #0
ldr w20, [sp, #68] ; 4-byte Folded Reload
ccmp w9, w20, #0, eq
ccmp w20, #3, #4, eq
ldr w9, [sp, #76] ; 4-byte Folded Reload
ccmp w9, #0, #0, eq
cinc w3, w8, ne
str w3, [x19, #12]
ldr x1, [x19, #48]
ldr w8, [sp, #80] ; 4-byte Folded Reload
cbz w8, LBB53_165
; %bb.131:
Lloh239:
adrp x8, _stbi_png_partial@GOTPAGE
Lloh240:
ldr x8, [x8, _stbi_png_partial@GOTPAGEOFF]
ldr w9, [x8]
str w9, [sp, #4] ; 4-byte Folded Spill
str wzr, [x8]
ldp w21, w20, [x19]
mul w8, w21, w3
mul w0, w8, w20
str x3, [sp, #16] ; 8-byte Folded Spill
mov x22, x1
bl _malloc
mov x1, x22
ldr x3, [sp, #16] ; 8-byte Folded Reload
str x0, [sp, #8] ; 8-byte Folded Spill
mov x10, #0
sxtw x25, w3
Lloh241:
adrp x11, l___const.create_png_image.xorig@PAGE
Lloh242:
add x11, x11, l___const.create_png_image.xorig@PAGEOFF
Lloh243:
adrp x12, l___const.create_png_image.xspc@PAGE
Lloh244:
add x12, x12, l___const.create_png_image.xspc@PAGEOFF
str w24, [sp, #72] ; 4-byte Folded Spill
LBB53_132: ; =>This Loop Header: Depth=1
; Child Loop BB53_138 Depth 2
; Child Loop BB53_140 Depth 3
lsl x8, x10, #2
ldrsw x23, [x11, x8]
ldrsw x22, [x12, x8]
mvn w9, w23
add w9, w21, w9
add w9, w9, w22
cmp w22, w9
b.hi LBB53_143
; %bb.133: ; in Loop: Header=BB53_132 Depth=1
Lloh245:
adrp x13, l___const.create_png_image.yorig@PAGE
Lloh246:
add x13, x13, l___const.create_png_image.yorig@PAGEOFF
ldr w28, [x13, x8]
Lloh247:
adrp x13, l___const.create_png_image.yspc@PAGE
Lloh248:
add x13, x13, l___const.create_png_image.yspc@PAGEOFF
ldr w24, [x13, x8]
mvn w8, w28
add w8, w20, w8
add w8, w8, w24
cmp w24, w8
b.hi LBB53_142
; %bb.134: ; in Loop: Header=BB53_132 Depth=1
stp x10, x1, [sp, #24] ; 16-byte Folded Spill
udiv w27, w9, w22
udiv w21, w8, w24
mov x0, x19
str w26, [sp, #44] ; 4-byte Folded Spill
mov x2, x26
; kill: def $w3 killed $w3 killed $x3
mov x4, x27
mov x5, x21
bl _create_png_image_raw
cbz w0, LBB53_176
; %bb.135: ; in Loop: Header=BB53_132 Depth=1
cmp w21, #1
b.lt LBB53_141
; %bb.136: ; in Loop: Header=BB53_132 Depth=1
mov x20, #0
mov x8, x27
sxtw x9, w8
ldr x8, [sp, #8] ; 8-byte Folded Reload
madd x8, x25, x23, x8
stp x8, x9, [sp, #48] ; 16-byte Folded Spill
ldr x8, [sp, #16] ; 8-byte Folded Reload
mul w23, w8, w28
mul w8, w8, w24
str w8, [sp, #64] ; 4-byte Folded Spill
mul x22, x25, x22
str x21, [sp, #80] ; 8-byte Folded Spill
b LBB53_138
LBB53_137: ; in Loop: Header=BB53_138 Depth=2
add x20, x20, #1
ldr w8, [sp, #64] ; 4-byte Folded Reload
add w23, w23, w8
ldr x21, [sp, #80] ; 8-byte Folded Reload
cmp x20, x21
b.eq LBB53_141
LBB53_138: ; Parent Loop BB53_132 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB53_140 Depth 3
cmp w27, #1
b.lt LBB53_137
; %bb.139: ; in Loop: Header=BB53_138 Depth=2
mov x21, #0
ldr w8, [x19]
ldr x26, [x19, #56]
ldr x9, [sp, #56] ; 8-byte Folded Reload
mul x24, x20, x9
mul w8, w8, w23
ldr x9, [sp, #48] ; 8-byte Folded Reload
add x28, x9, x8
LBB53_140: ; Parent Loop BB53_132 Depth=1
; Parent Loop BB53_138 Depth=2
; => This Inner Loop Header: Depth=3
add x8, x21, x24
madd x1, x8, x25, x26
mov x0, x28
mov x2, x25
bl _memcpy
add x21, x21, #1
add x28, x28, x22
cmp x21, x27
b.ne LBB53_140
b LBB53_137
LBB53_141: ; in Loop: Header=BB53_132 Depth=1
ldr x0, [x19, #56]
bl _free
ldp x3, x10, [sp, #16] ; 16-byte Folded Reload
mul w8, w27, w3
mul w8, w21, w8
add w8, w21, w8
ldr w26, [sp, #44] ; 4-byte Folded Reload
sub w26, w26, w8
ldr x1, [sp, #32] ; 8-byte Folded Reload
add x1, x1, w8, sxtw
ldr w24, [sp, #72] ; 4-byte Folded Reload
Lloh249:
adrp x11, l___const.create_png_image.xorig@PAGE
Lloh250:
add x11, x11, l___const.create_png_image.xorig@PAGEOFF
Lloh251:
adrp x12, l___const.create_png_image.xspc@PAGE
Lloh252:
add x12, x12, l___const.create_png_image.xspc@PAGEOFF
b LBB53_143
LBB53_142: ; in Loop: Header=BB53_132 Depth=1
ldr w24, [sp, #72] ; 4-byte Folded Reload
LBB53_143: ; in Loop: Header=BB53_132 Depth=1
add x10, x10, #1
cmp x10, #7
b.eq LBB53_164
; %bb.144: ; in Loop: Header=BB53_132 Depth=1
ldp w21, w20, [x19]
b LBB53_132
LBB53_145:
mov w0, #0
Lloh253:
adrp x8, l_.str.61@PAGE
Lloh254:
add x8, x8, l_.str.61@PAGEOFF
b LBB53_11
LBB53_146:
mov w0, #0
Lloh255:
adrp x9, _parse_png_file.invalid_chunk@PAGE
Lloh256:
add x9, x9, _parse_png_file.invalid_chunk@PAGEOFF
rev w8, w8
str w8, [x9]
adrp x8, _failure_reason@PAGE
str x9, [x8, _failure_reason@PAGEOFF]
b LBB53_12
LBB53_147:
mov w0, #0
Lloh257:
adrp x8, l_.str.65@PAGE
Lloh258:
add x8, x8, l_.str.65@PAGEOFF
b LBB53_11
LBB53_148:
mov w0, #0
Lloh259:
adrp x8, l_.str.52@PAGE
Lloh260:
add x8, x8, l_.str.52@PAGEOFF
b LBB53_11
LBB53_149:
mov w0, #0
Lloh261:
adrp x8, l_.str.53@PAGE
Lloh262:
add x8, x8, l_.str.53@PAGEOFF
b LBB53_11
LBB53_150:
str w24, [x19, #8]
mov w0, #1
b LBB53_12
LBB53_151:
mov w0, #0
Lloh263:
adrp x8, l_.str.63@PAGE
Lloh264:
add x8, x8, l_.str.63@PAGEOFF
b LBB53_11
LBB53_152:
mov w0, #0
Lloh265:
adrp x8, l_.str.56@PAGE
Lloh266:
add x8, x8, l_.str.56@PAGEOFF
b LBB53_11
LBB53_153:
mov w0, #0
Lloh267:
adrp x8, l_.str.57@PAGE
Lloh268:
add x8, x8, l_.str.57@PAGEOFF
b LBB53_11
LBB53_154:
mov w0, #0
Lloh269:
adrp x8, l_.str.58@PAGE
Lloh270:
add x8, x8, l_.str.58@PAGEOFF
b LBB53_11
LBB53_155:
mov w0, #0
Lloh271:
adrp x8, l_.str.55@PAGE
Lloh272:
add x8, x8, l_.str.55@PAGEOFF
b LBB53_11
LBB53_156:
mov w8, #4
str w8, [x19, #8]
mov w0, #1
b LBB53_12
LBB53_157:
mov w0, #0
Lloh273:
adrp x8, l_.str.62@PAGE
Lloh274:
add x8, x8, l_.str.62@PAGEOFF
b LBB53_11
LBB53_158:
mov w0, #0
Lloh275:
adrp x8, l_.str.66@PAGE
Lloh276:
add x8, x8, l_.str.66@PAGEOFF
b LBB53_11
LBB53_159:
mov w0, #0
Lloh277:
adrp x8, l_.str.64@PAGE
Lloh278:
add x8, x8, l_.str.64@PAGEOFF
b LBB53_11
LBB53_160:
mov w0, #0
Lloh279:
adrp x8, l_.str.67@PAGE
Lloh280:
add x8, x8, l_.str.67@PAGEOFF
b LBB53_11
LBB53_161:
ldr x0, [x20, #32]
bl _free
LBB53_162:
mov w0, #0
str xzr, [x19, #48]
b LBB53_12
LBB53_163:
mov w0, #0
b LBB53_12
LBB53_164:
ldr x8, [sp, #8] ; 8-byte Folded Reload
str x8, [x19, #56]
Lloh281:
adrp x8, _stbi_png_partial@GOTPAGE
Lloh282:
ldr x8, [x8, _stbi_png_partial@GOTPAGEOFF]
ldr w9, [sp, #4] ; 4-byte Folded Reload
Lloh283:
str w9, [x8]
ldr w20, [sp, #68] ; 4-byte Folded Reload
b LBB53_166
LBB53_165:
ldp w4, w5, [x19]
mov x0, x19
mov x2, x26
; kill: def $w3 killed $w3 killed $x3
bl _create_png_image_raw
cbz w0, LBB53_12
LBB53_166:
ldr w8, [sp, #76] ; 4-byte Folded Reload
cbz w8, LBB53_190
; %bb.167:
ldr w9, [x19, #12]
ldp w8, w10, [x19]
mul w8, w10, w8
ldr x10, [x19, #56]
cmp w9, #2
b.eq LBB53_177
; %bb.168:
cmp w9, #4
b.ne LBB53_203
; %bb.169:
cbz w8, LBB53_190
; %bb.170:
ldrb w9, [sp, #93]
ldrb w11, [sp, #94]
add x10, x10, #3
ldrb w12, [sp, #95]
b LBB53_172
LBB53_171: ; in Loop: Header=BB53_172 Depth=1
add x10, x10, #4
subs w8, w8, #1
b.eq LBB53_190
LBB53_172: ; =>This Inner Loop Header: Depth=1
ldurb w13, [x10, #-3]
cmp w13, w9
b.ne LBB53_171
; %bb.173: ; in Loop: Header=BB53_172 Depth=1
ldurb w13, [x10, #-2]
cmp w13, w11
b.ne LBB53_171
; %bb.174: ; in Loop: Header=BB53_172 Depth=1
ldurb w13, [x10, #-1]
cmp w13, w12
b.ne LBB53_171
; %bb.175: ; in Loop: Header=BB53_172 Depth=1
strb wzr, [x10]
b LBB53_171
LBB53_176:
ldr x0, [sp, #8] ; 8-byte Folded Reload
bl _free
mov w0, #0
b LBB53_12
LBB53_177:
cbz w8, LBB53_190
; %bb.178:
ldrb w9, [sp, #93]
sub w11, w8, #1
cmp w11, #8
b.hs LBB53_180
; %bb.179:
mov w14, #0
mov x13, x10
b LBB53_188
LBB53_180:
add x13, x11, #1
cmp w11, #16
b.hs LBB53_182
; %bb.181:
mov x12, #0
b LBB53_186
LBB53_182:
ands x12, x13, #0xf
mov w14, #16
csel x14, x14, x12, eq
sub x12, x13, x14
dup.16b v0, w9
add x15, x10, #15
mov x16, x13
LBB53_183: ; =>This Inner Loop Header: Depth=1
sub x17, x15, #15
ld2.16b { v1, v2 }, [x17]
cmeq.16b v1, v1, v0
mvn.16b v1, v1
sub x17, x15, #14
sub x0, x15, #12
sub x1, x15, #10
sub x2, x15, #8
st1.b { v1 }[0], [x17]
st1.b { v1 }[1], [x0]
sub x17, x15, #6
sub x0, x15, #4
st1.b { v1 }[2], [x1]
st1.b { v1 }[3], [x2]
sub x1, x15, #2
add x2, x15, #2
st1.b { v1 }[4], [x17]
st1.b { v1 }[5], [x0]
add x17, x15, #4
add x0, x15, #6
st1.b { v1 }[6], [x1]
st1.b { v1 }[7], [x15]
add x1, x15, #8
add x3, x15, #10
st1.b { v1 }[8], [x2]
st1.b { v1 }[9], [x17]
add x17, x15, #12
add x2, x15, #14
st1.b { v1 }[10], [x0]
st1.b { v1 }[11], [x1]
st1.b { v1 }[12], [x3]
st1.b { v1 }[13], [x17]
st1.b { v1 }[14], [x2]
add x17, x15, #16
st1.b { v1 }[15], [x17]
sub x16, x16, #16
add x15, x15, #32
cmp x14, x16
b.ne LBB53_183
; %bb.184:
cmp x14, #8
b.hi LBB53_186
; %bb.185:
add x13, x10, x12, lsl #1
mov x14, x12
b LBB53_188
LBB53_186:
ands x14, x13, #0x7
mov w15, #8
csel x15, x15, x14, eq
sub x14, x13, x15
dup.8b v0, w9
add x13, x10, x14, lsl #1
mvn x11, x11
add x15, x15, x12
add x11, x11, x15
add x10, x10, x12, lsl #1
add x10, x10, #7
LBB53_187: ; =>This Inner Loop Header: Depth=1
sub x12, x10, #7
ld2.8b { v1, v2 }, [x12]
cmeq.8b v1, v1, v0
mvn.8b v1, v1
sub x12, x10, #6
sub x15, x10, #4
sub x16, x10, #2
st1.b { v1 }[0], [x12]
st1.b { v1 }[1], [x15]
add x12, x10, #2
add x15, x10, #4
st1.b { v1 }[2], [x16]
st1.b { v1 }[3], [x10]
add x16, x10, #6
add x17, x10, #8
st1.b { v1 }[4], [x12]
st1.b { v1 }[5], [x15]
st1.b { v1 }[6], [x16]
st1.b { v1 }[7], [x17]
add x10, x10, #16
adds x11, x11, #8
b.ne LBB53_187
LBB53_188:
sub w8, w8, w14
ldr w20, [sp, #68] ; 4-byte Folded Reload
LBB53_189: ; =>This Inner Loop Header: Depth=1
ldrb w10, [x13]
cmp w10, w9
csetm w10, ne
strb w10, [x13, #1]
add x13, x13, #2
subs w8, w8, #1
b.ne LBB53_189
LBB53_190:
cbz w24, LBB53_201
; %bb.191:
cmp w20, #2
csel w23, w20, w24, gt
stp w24, w23, [x19, #8]
ldp w8, w9, [x19]
mul w22, w9, w8
ldr x20, [x19, #56]
mul w0, w22, w23
bl _malloc
cbz x0, LBB53_196
; %bb.192:
mov x21, x0
cmp w23, #3
b.ne LBB53_197
; %bb.193:
cbz w22, LBB53_200
; %bb.194:
add x8, sp, #96
mov x9, x20
mov x10, x21
LBB53_195: ; =>This Inner Loop Header: Depth=1
ldrb w11, [x9], #1
add x11, x8, x11, lsl #2
ldrh w12, [x11]
strh w12, [x10]
ldrb w11, [x11, #2]
strb w11, [x10, #2]
add x10, x10, #3
subs x22, x22, #1
b.ne LBB53_195
b LBB53_200
LBB53_196:
Lloh284:
adrp x8, l_.str.5@PAGE
Lloh285:
add x8, x8, l_.str.5@PAGEOFF
b LBB53_11
LBB53_197:
cbz w22, LBB53_200
; %bb.198:
add x8, sp, #96
mov x9, x20
mov x10, x21
LBB53_199: ; =>This Inner Loop Header: Depth=1
ldrb w11, [x9], #1
ldr w11, [x8, x11, lsl #2]
str w11, [x10], #4
subs x22, x22, #1
b.ne LBB53_199
LBB53_200:
mov x0, x20
bl _free
str x21, [x19, #56]
LBB53_201:
ldr x0, [x19, #48]
bl _free
str xzr, [x19, #48]
b LBB53_9
LBB53_202:
bl ___stack_chk_fail
LBB53_203:
bl _parse_png_file.cold.1
.loh AdrpAdd Lloh222, Lloh223
.loh AdrpLdrGotLdr Lloh219, Lloh220, Lloh221
.loh AdrpLdrGot Lloh217, Lloh218
.loh AdrpAdd Lloh224, Lloh225
.loh AdrpLdrGotLdr Lloh226, Lloh227, Lloh228
.loh AdrpAdd Lloh229, Lloh230
.loh AdrpAdd Lloh231, Lloh232
.loh AdrpAdd Lloh233, Lloh234
.loh AdrpAdd Lloh235, Lloh236
.loh AdrpAdd Lloh237, Lloh238
.loh AdrpAdd Lloh243, Lloh244
.loh AdrpAdd Lloh241, Lloh242
.loh AdrpLdrGot Lloh239, Lloh240
.loh AdrpAdd Lloh247, Lloh248
.loh AdrpAdd Lloh245, Lloh246
.loh AdrpAdd Lloh251, Lloh252
.loh AdrpAdd Lloh249, Lloh250
.loh AdrpAdd Lloh253, Lloh254
.loh AdrpAdd Lloh255, Lloh256
.loh AdrpAdd Lloh257, Lloh258
.loh AdrpAdd Lloh259, Lloh260
.loh AdrpAdd Lloh261, Lloh262
.loh AdrpAdd Lloh263, Lloh264
.loh AdrpAdd Lloh265, Lloh266
.loh AdrpAdd Lloh267, Lloh268
.loh AdrpAdd Lloh269, Lloh270
.loh AdrpAdd Lloh271, Lloh272
.loh AdrpAdd Lloh273, Lloh274
.loh AdrpAdd Lloh275, Lloh276
.loh AdrpAdd Lloh277, Lloh278
.loh AdrpAdd Lloh279, Lloh280
.loh AdrpLdrGotStr Lloh281, Lloh282, Lloh283
.loh AdrpAdd Lloh284, Lloh285
.cfi_endproc
; -- End function
.globl _stbi_png_info ; -- Begin function stbi_png_info
.p2align 2
_stbi_png_info: ; @stbi_png_info
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x3
mov x21, x2
mov x22, x1
Lloh286:
adrp x1, l_.str@PAGE
Lloh287:
add x1, x1, l_.str@PAGEOFF
bl _fopen
cbz x0, LBB54_9
; %bb.1:
mov x19, x0
str x0, [sp, #16]
mov x0, sp
mov w1, #2
mov w2, #0
bl _parse_png_file
cbz w0, LBB54_10
; %bb.2:
cbz x22, LBB54_4
; %bb.3:
ldr w8, [sp]
str w8, [x22]
LBB54_4:
cbz x21, LBB54_6
; %bb.5:
ldr w8, [sp, #4]
str w8, [x21]
LBB54_6:
cbz x20, LBB54_8
; %bb.7:
ldr w8, [sp, #8]
str w8, [x20]
LBB54_8:
mov w20, #1
b LBB54_11
LBB54_9:
mov w20, #0
b LBB54_12
LBB54_10:
mov w20, #0
LBB54_11:
mov x0, x19
bl _fclose
LBB54_12:
mov x0, x20
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpAdd Lloh286, Lloh287
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function bmp_test
_bmp_test: ; @bmp_test
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
ldr x0, [x0, #16]
cbz x0, LBB55_3
; %bb.1:
bl _fgetc
cmn w0, #1
b.eq LBB55_27
; %bb.2:
cmp w0, #66
b.eq LBB55_5
b LBB55_27
LBB55_3:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB55_27
; %bb.4:
add x9, x8, #1
str x9, [x19, #24]
ldrb w0, [x8]
cmp w0, #66
b.ne LBB55_27
LBB55_5:
ldr x0, [x19, #16]
cbz x0, LBB55_8
; %bb.6:
bl _fgetc
cmn w0, #1
b.eq LBB55_27
; %bb.7:
cmp w0, #77
b.eq LBB55_10
b LBB55_27
LBB55_8:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB55_27
; %bb.9:
add x9, x8, #1
str x9, [x19, #24]
ldrb w0, [x8]
cmp w0, #77
b.ne LBB55_27
LBB55_10:
mov x0, x19
bl _get32le
ldr x0, [x19, #16]
cbz x0, LBB55_15
; %bb.11:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB55_17
; %bb.12:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB55_20
; %bb.13:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB55_29
; %bb.14:
bl _fgetc
b LBB55_24
LBB55_15:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB55_18
; %bb.16:
add x8, x8, #1
str x8, [x19, #24]
b LBB55_18
LBB55_17:
ldp x8, x9, [x19, #24]
LBB55_18:
cmp x8, x9
b.hs LBB55_20
; %bb.19:
add x8, x8, #1
str x8, [x19, #24]
LBB55_20:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB55_22
; %bb.21:
add x8, x8, #1
str x8, [x19, #24]
LBB55_22:
cmp x8, x9
b.hs LBB55_24
LBB55_23:
add x8, x8, #1
str x8, [x19, #24]
LBB55_24:
mov x0, x19
bl _get32le
mov x0, x19
bl _get32le
mov x8, x0
mov w0, #1
cmp w8, #56
b.hi LBB55_26
; %bb.25:
mov w9, w8
mov w10, #1
lsl x9, x10, x9
mov x10, #4096
movk x10, #256, lsl #32
movk x10, #256, lsl #48
tst x9, x10
b.ne LBB55_28
LBB55_26:
cmp w8, #108
b.eq LBB55_28
LBB55_27:
mov w0, #0
LBB55_28:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB55_29:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.lo LBB55_23
b LBB55_24
.cfi_endproc
; -- End function
.globl _stbi_bmp_load ; -- Begin function stbi_bmp_load
.p2align 2
_stbi_bmp_load: ; @stbi_bmp_load
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x4
mov x20, x3
mov x21, x2
mov x23, x1
Lloh288:
adrp x1, l_.str@PAGE
Lloh289:
add x1, x1, l_.str@PAGEOFF
bl _fopen
cbz x0, LBB56_2
; %bb.1:
mov x22, x0
str x0, [sp, #24]
add x0, sp, #8
mov x1, x23
mov x2, x21
mov x3, x20
mov x4, x19
bl _bmp_load
mov x19, x0
mov x0, x22
bl _fclose
b LBB56_3
LBB56_2:
mov x19, #0
LBB56_3:
mov x0, x19
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpAdd Lloh288, Lloh289
.cfi_endproc
; -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ; -- Begin function bmp_load
lCPI57_0:
.long 0 ; 0x0
.long 1 ; 0x1
.long 2 ; 0x2
.long 3 ; 0x3
.section __TEXT,__text,regular,pure_instructions
.p2align 2
_bmp_load: ; @bmp_load
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #1200
mov x25, x4
mov x27, x3
mov x28, x2
mov x23, x1
mov x21, x0
Lloh290:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh291:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh292:
ldr x8, [x8]
stur x8, [x29, #-96]
mov x22, x0
ldr x0, [x22, #16]!
cbz x0, LBB57_3
; %bb.1:
bl _fgetc
cmn w0, #1
b.eq LBB57_15
; %bb.2:
cmp w0, #66
b.eq LBB57_5
b LBB57_15
LBB57_3:
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_15
; %bb.4:
add x9, x8, #1
str x9, [x21, #24]
ldrb w0, [x8]
cmp w0, #66
b.ne LBB57_15
LBB57_5:
ldr x0, [x22]
cbz x0, LBB57_8
; %bb.6:
bl _fgetc
cmn w0, #1
b.eq LBB57_15
; %bb.7:
cmp w0, #77
b.ne LBB57_15
b LBB57_10
LBB57_8:
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_15
; %bb.9:
add x9, x8, #1
str x9, [x21, #24]
ldrb w0, [x8]
cmp w0, #77
b.ne LBB57_15
LBB57_10:
mov x0, x21
bl _get32le
ldr x0, [x21, #16]
cbz x0, LBB57_19
; %bb.11:
bl _fgetc
ldr x0, [x22]
cbz x0, LBB57_21
; %bb.12:
bl _fgetc
ldr x0, [x22]
cbz x0, LBB57_24
; %bb.13:
bl _fgetc
ldr x0, [x22]
cbz x0, LBB57_42
; %bb.14:
bl _fgetc
b LBB57_28
LBB57_15:
mov x24, #0
Lloh293:
adrp x8, l_.str.74@PAGE
Lloh294:
add x8, x8, l_.str.74@PAGEOFF
LBB57_16:
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB57_17:
ldur x8, [x29, #-96]
Lloh295:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh296:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh297:
ldr x9, [x9]
cmp x9, x8
b.ne LBB57_287
; %bb.18:
mov x0, x24
add sp, sp, #1200
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB57_19:
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_22
; %bb.20:
add x8, x8, #1
str x8, [x21, #24]
b LBB57_22
LBB57_21:
ldp x8, x9, [x21, #24]
LBB57_22:
cmp x8, x9
b.hs LBB57_24
; %bb.23:
add x8, x8, #1
str x8, [x21, #24]
LBB57_24:
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_26
; %bb.25:
add x8, x8, #1
str x8, [x21, #24]
LBB57_26:
cmp x8, x9
b.hs LBB57_28
LBB57_27:
add x8, x8, #1
str x8, [x21, #24]
LBB57_28:
mov x0, x21
bl _get32le
mov x24, x0
mov x0, x21
bl _get32le
mov x19, x0
cmp w0, #56
b.hi LBB57_30
; %bb.29:
mov w8, w19
mov w9, #1
lsl x8, x9, x8
mov x9, #4096
movk x9, #256, lsl #32
movk x9, #256, lsl #48
tst x8, x9
b.ne LBB57_31
LBB57_30:
cmp w19, #108
b.ne LBB57_38
LBB57_31:
Lloh298:
adrp x8, l_.str.76@PAGE
Lloh299:
add x8, x8, l_.str.76@PAGEOFF
adrp x26, _failure_reason@PAGE
str x8, [x26, _failure_reason@PAGEOFF]
cmp w19, #12
b.ne LBB57_37
; %bb.32:
ldr x0, [x22]
cbz x0, LBB57_39
; %bb.33:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x22]
cbz x0, LBB57_41
; %bb.34:
bl _fgetc
mov x8, x0
ldr x0, [x21, #16]
lsl w9, w8, #8
cmn w8, #1
csel w8, wzr, w9, eq
add w8, w8, w20
str w8, [x21]
cbz x0, LBB57_48
; %bb.35:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x22]
cbz x0, LBB57_90
; %bb.36:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
add w0, w20, w8, lsl #8
b LBB57_53
LBB57_37:
mov x0, x21
bl _get32le
str w0, [x21]
mov x0, x21
bl _get32le
b LBB57_53
LBB57_38:
mov x24, #0
Lloh300:
adrp x8, l_.str.75@PAGE
Lloh301:
add x8, x8, l_.str.75@PAGEOFF
b LBB57_16
LBB57_39:
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_43
; %bb.40:
add x10, x8, #1
str x10, [x21, #24]
ldrb w20, [x8]
mov x8, x10
b LBB57_44
LBB57_41:
ldp x8, x9, [x21, #24]
b LBB57_44
LBB57_42:
ldp x8, x9, [x21, #24]
cmp x8, x9
b.lo LBB57_27
b LBB57_28
LBB57_43:
mov w20, #0
LBB57_44:
cmp x8, x9
b.hs LBB57_46
; %bb.45:
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
b LBB57_47
LBB57_46:
mov w8, #0
LBB57_47:
add w8, w20, w8, lsl #8
str w8, [x21]
LBB57_48:
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_51
; %bb.49:
add x10, x8, #1
str x10, [x21, #24]
ldrb w20, [x8]
mov x8, x10
cmp x8, x9
b.hs LBB57_52
LBB57_50:
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
add w0, w20, w8, lsl #8
b LBB57_53
LBB57_51:
mov w20, #0
cmp x8, x9
b.lo LBB57_50
LBB57_52:
mov w8, #0
add w0, w20, wzr, lsl #8
LBB57_53:
str w0, [x21, #4]
ldr x0, [x21, #16]
cbz x0, LBB57_56
; %bb.54:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x22]
cbz x0, LBB57_58
; %bb.55:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB57_63
LBB57_56:
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_59
; %bb.57:
add x10, x8, #1
str x10, [x21, #24]
ldrb w20, [x8]
mov x8, x10
b LBB57_60
LBB57_58:
ldp x8, x9, [x21, #24]
b LBB57_60
LBB57_59:
mov w20, #0
LBB57_60:
cmp x8, x9
b.hs LBB57_62
; %bb.61:
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
b LBB57_63
LBB57_62:
mov w8, #0
LBB57_63:
add w8, w20, w8, lsl #8
cmp w8, #1
b.ne LBB57_285
; %bb.64:
ldr x0, [x22]
cbz x0, LBB57_67
; %bb.65:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x22]
cbz x0, LBB57_69
; %bb.66:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB57_74
LBB57_67:
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_70
; %bb.68:
add x10, x8, #1
str x10, [x21, #24]
ldrb w20, [x8]
mov x8, x10
b LBB57_71
LBB57_69:
ldp x8, x9, [x21, #24]
b LBB57_71
LBB57_70:
mov w20, #0
LBB57_71:
cmp x8, x9
b.hs LBB57_73
; %bb.72:
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
b LBB57_74
LBB57_73:
mov w8, #0
LBB57_74:
add w8, w20, w8, lsl #8
str w8, [sp, #124] ; 4-byte Folded Spill
cmp w8, #1
b.ne LBB57_76
; %bb.75:
mov x24, #0
Lloh302:
adrp x8, l_.str.77@PAGE
Lloh303:
add x8, x8, l_.str.77@PAGEOFF
str x8, [x26, _failure_reason@PAGEOFF]
b LBB57_17
LBB57_76:
str w24, [sp, #128] ; 4-byte Folded Spill
ldr w24, [x21, #4]
cmp w24, #0
cneg w8, w24, mi
str w8, [x21, #4]
cmp w19, #12
b.ne LBB57_79
; %bb.77:
mov x20, x26
ldr w8, [sp, #124] ; 4-byte Folded Reload
cmp w8, #23
b.gt LBB57_81
; %bb.78:
mov w26, #0
str xzr, [sp, #112] ; 8-byte Folded Spill
str wzr, [sp, #120] ; 4-byte Folded Spill
ldr w8, [sp, #128] ; 4-byte Folded Reload
sub w8, w8, #38
mov w9, #21846
movk w9, #21845, lsl #16
smull x8, w8, w9
lsr x9, x8, #63
lsr x8, x8, #32
add w8, w8, w9
b LBB57_96
LBB57_79:
mov x0, x21
bl _get32le
sub w8, w0, #1
cmp w8, #1
b.hi LBB57_82
; %bb.80:
mov x24, #0
Lloh304:
adrp x8, l_.str.78@PAGE
Lloh305:
add x8, x8, l_.str.78@PAGEOFF
str x8, [x26, _failure_reason@PAGEOFF]
b LBB57_17
LBB57_81:
str wzr, [sp, #104] ; 4-byte Folded Spill
mov w26, #0
str xzr, [sp, #112] ; 8-byte Folded Spill
str wzr, [sp, #120] ; 4-byte Folded Spill
b LBB57_97
LBB57_82:
mov x20, x0
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
and w8, w19, #0xffffffef
cmp w8, #40
b.ne LBB57_91
; %bb.83:
cmp w19, #56
b.ne LBB57_85
; %bb.84:
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
LBB57_85:
ldr w8, [sp, #124] ; 4-byte Folded Reload
cmp w8, #32
b.eq LBB57_87
; %bb.86:
cmp w8, #16
b.ne LBB57_93
LBB57_87:
cmp w20, #3
b.eq LBB57_283
; %bb.88:
cbnz w20, LBB57_285
; %bb.89:
mov x20, x26
str wzr, [sp, #104] ; 4-byte Folded Spill
mov w8, #-16777216
ldr w9, [sp, #124] ; 4-byte Folded Reload
cmp w9, #32
csel w26, w8, wzr, eq
mov w8, #31
mov w9, #255
csel w10, w9, w8, eq
mov w8, #992
mov w9, #65280
csel w8, w9, w8, eq
str w8, [sp, #120] ; 4-byte Folded Spill
mov w8, #31744
mov w9, #16711680
csel w8, w9, w8, eq
stp w10, w8, [sp, #112] ; 8-byte Folded Spill
b LBB57_97
LBB57_90:
ldp x8, x9, [x21, #24]
cmp x8, x9
b.lo LBB57_50
b LBB57_52
LBB57_91:
cmp w19, #108
b.ne LBB57_288
; %bb.92:
mov x20, x26
mov x0, x21
bl _get32le
str w0, [sp, #116] ; 4-byte Folded Spill
mov x0, x21
bl _get32le
str w0, [sp, #120] ; 4-byte Folded Spill
mov x0, x21
bl _get32le
str w0, [sp, #112] ; 4-byte Folded Spill
mov x0, x21
bl _get32le
mov x26, x0
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
mov x0, x21
bl _get32le
b LBB57_94
LBB57_93:
mov x20, x26
mov w26, #0
str xzr, [sp, #112] ; 8-byte Folded Spill
str wzr, [sp, #120] ; 4-byte Folded Spill
LBB57_94:
ldr w8, [sp, #124] ; 4-byte Folded Reload
cmp w8, #15
b.gt LBB57_129
; %bb.95:
ldr w8, [sp, #128] ; 4-byte Folded Reload
sub w8, w8, w19
sub w8, w8, #14
asr w8, w8, #2
LBB57_96:
str w8, [sp, #104] ; 4-byte Folded Spill
LBB57_97:
str w24, [sp, #36] ; 4-byte Folded Spill
str w26, [sp, #108] ; 4-byte Folded Spill
cmp w26, #0
mov w8, #3
cinc w8, w8, ne
str w8, [x21, #8]
cmp w25, #2
csel w10, w25, w8, gt
ldp w8, w9, [x21]
str w10, [sp, #156] ; 4-byte Folded Spill
mul w8, w8, w10
mul w0, w8, w9
bl _malloc
mov x24, x0
cbz x0, LBB57_123
; %bb.98:
ldr w8, [sp, #124] ; 4-byte Folded Reload
cmp w8, #15
mov x26, x20
b.gt LBB57_124
; %bb.99:
ldr w8, [sp, #104] ; 4-byte Folded Reload
cbz w8, LBB57_126
; %bb.100:
cmp w8, #256
b.gt LBB57_126
; %bb.101:
mov x20, x26
str w25, [sp, #20] ; 4-byte Folded Spill
str x23, [sp, #24] ; 8-byte Folded Spill
cmp w8, #1
b.lt LBB57_127
; %bb.102:
ldr w8, [sp, #104] ; 4-byte Folded Reload
mov w23, w8
add x8, sp, #160
add x25, x8, #1
mov w26, #255
b LBB57_105
LBB57_103: ; in Loop: Header=BB57_105 Depth=1
bl _fgetc
LBB57_104: ; in Loop: Header=BB57_105 Depth=1
strb w26, [x25, #2]
add x25, x25, #4
subs x23, x23, #1
b.eq LBB57_127
LBB57_105: ; =>This Inner Loop Header: Depth=1
ldr x0, [x22]
cbz x0, LBB57_109
; %bb.106: ; in Loop: Header=BB57_105 Depth=1
bl _fgetc
mov x8, x0
ldr x0, [x22]
cmn w8, #1
csel w8, wzr, w8, eq
strb w8, [x25, #1]
cbz x0, LBB57_112
; %bb.107: ; in Loop: Header=BB57_105 Depth=1
bl _fgetc
mov x8, x0
ldr x0, [x22]
cmn w8, #1
csel w8, wzr, w8, eq
strb w8, [x25]
cbz x0, LBB57_116
; %bb.108: ; in Loop: Header=BB57_105 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB57_119
LBB57_109: ; in Loop: Header=BB57_105 Depth=1
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_111
; %bb.110: ; in Loop: Header=BB57_105 Depth=1
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
strb w8, [x25, #1]
b LBB57_112
LBB57_111: ; in Loop: Header=BB57_105 Depth=1
mov w8, #0
strb wzr, [x25, #1]
LBB57_112: ; in Loop: Header=BB57_105 Depth=1
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_114
; %bb.113: ; in Loop: Header=BB57_105 Depth=1
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
b LBB57_115
LBB57_114: ; in Loop: Header=BB57_105 Depth=1
mov w8, #0
LBB57_115: ; in Loop: Header=BB57_105 Depth=1
strb w8, [x25]
LBB57_116: ; in Loop: Header=BB57_105 Depth=1
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_118
; %bb.117: ; in Loop: Header=BB57_105 Depth=1
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
b LBB57_119
LBB57_118: ; in Loop: Header=BB57_105 Depth=1
mov w8, #0
LBB57_119: ; in Loop: Header=BB57_105 Depth=1
sturb w8, [x25, #-1]
cmp w19, #12
b.eq LBB57_104
; %bb.120: ; in Loop: Header=BB57_105 Depth=1
ldr x0, [x22]
cbnz x0, LBB57_103
; %bb.121: ; in Loop: Header=BB57_105 Depth=1
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_104
; %bb.122: ; in Loop: Header=BB57_105 Depth=1
add x8, x8, #1
str x8, [x21, #24]
b LBB57_104
LBB57_123:
Lloh306:
adrp x8, l_.str.5@PAGE
Lloh307:
add x8, x8, l_.str.5@PAGEOFF
str x8, [x20, _failure_reason@PAGEOFF]
b LBB57_17
LBB57_124:
ldr w8, [sp, #128] ; 4-byte Folded Reload
sub w8, w8, w19
sub w8, w8, #14
ldr x0, [x22]
cbz x0, LBB57_130
; %bb.125:
sxtw x1, w8
mov w2, #1
bl _fseek
b LBB57_131
LBB57_126:
mov x0, x24
bl _free
mov x24, #0
Lloh308:
adrp x8, l_.str.80@PAGE
Lloh309:
add x8, x8, l_.str.80@PAGEOFF
str x8, [x26, _failure_reason@PAGEOFF]
b LBB57_17
LBB57_127:
ldr w8, [sp, #128] ; 4-byte Folded Reload
sub w8, w8, w19
cmp w19, #12
mov w9, #-4
cinc w9, w9, eq
ldr w10, [sp, #104] ; 4-byte Folded Reload
madd w8, w10, w9, w8
sub w8, w8, #14
ldr x0, [x22]
cbz x0, LBB57_227
; %bb.128:
sxtw x1, w8
mov w2, #1
bl _fseek
b LBB57_228
LBB57_129:
str wzr, [sp, #104] ; 4-byte Folded Spill
b LBB57_97
LBB57_130:
ldr x9, [x21, #24]
add x8, x9, w8, sxtw
str x8, [x21, #24]
LBB57_131:
ldr w10, [sp, #112] ; 4-byte Folded Reload
ldr w8, [sp, #124] ; 4-byte Folded Reload
cmp w8, #32
b.eq LBB57_135
; %bb.132:
cmp w8, #24
b.eq LBB57_146
; %bb.133:
cmp w8, #16
b.ne LBB57_140
; %bb.134:
ldr w8, [x21]
ubfiz w20, w8, #1, #1
b LBB57_141
LBB57_135:
cmp w10, #255
b.ne LBB57_140
; %bb.136:
mov w8, #65280
ldr w9, [sp, #120] ; 4-byte Folded Reload
cmp w9, w8
b.ne LBB57_140
; %bb.137:
mov w8, #-16777216
ldr w9, [sp, #116] ; 4-byte Folded Reload
cmp w9, w8
b.ne LBB57_140
; %bb.138:
ldr w9, [sp, #108] ; 4-byte Folded Reload
cmp w9, w8
b.ne LBB57_140
; %bb.139:
str w25, [sp, #20] ; 4-byte Folded Spill
str x23, [sp, #24] ; 8-byte Folded Spill
stp x28, x27, [sp] ; 16-byte Folded Spill
str wzr, [sp, #48] ; 4-byte Folded Spill
mov w20, #0
mov w27, #0
mov w28, #0
str wzr, [sp, #60] ; 4-byte Folded Spill
mov w19, #0
mov w8, #1
stp w8, wzr, [sp, #100] ; 8-byte Folded Spill
b LBB57_147
LBB57_140:
mov w20, #0
LBB57_141:
ldr w8, [sp, #116] ; 4-byte Folded Reload
cbz w8, LBB57_145
; %bb.142:
ldr w8, [sp, #120] ; 4-byte Folded Reload
cbz w8, LBB57_145
; %bb.143:
cbz w10, LBB57_145
; %bb.144:
str w25, [sp, #20] ; 4-byte Folded Spill
str x23, [sp, #24] ; 8-byte Folded Spill
stp x28, x27, [sp] ; 16-byte Folded Spill
mov x23, x10
ldr w19, [sp, #116] ; 4-byte Folded Reload
mov x0, x19
bl _high_bit
sub w8, w0, #7
str w8, [sp, #104] ; 4-byte Folded Spill
mov x0, x19
bl _bitcount
mov x19, x0
ldr w0, [sp, #120] ; 4-byte Folded Reload
bl _high_bit
sub w27, w0, #7
mov x0, x23
bl _high_bit
sub w28, w0, #7
ldr w0, [sp, #108] ; 4-byte Folded Reload
bl _high_bit
str wzr, [sp, #100] ; 4-byte Folded Spill
sub w8, w0, #7
str w8, [sp, #60] ; 4-byte Folded Spill
mov w8, #1
str w8, [sp, #48] ; 4-byte Folded Spill
b LBB57_147
LBB57_145:
mov x24, #0
Lloh310:
adrp x8, l_.str.82@PAGE
Lloh311:
add x8, x8, l_.str.82@PAGEOFF
str x8, [x26, _failure_reason@PAGEOFF]
b LBB57_17
LBB57_146:
str w25, [sp, #20] ; 4-byte Folded Spill
str x23, [sp, #24] ; 8-byte Folded Spill
stp x28, x27, [sp] ; 16-byte Folded Spill
stp wzr, wzr, [sp, #100] ; 8-byte Folded Spill
str wzr, [sp, #48] ; 4-byte Folded Spill
mov w27, #0
mov w28, #0
str wzr, [sp, #60] ; 4-byte Folded Spill
mov w19, #0
ldr w8, [x21]
and w20, w8, #0x3
LBB57_147:
ldr w4, [x21, #4]
cmp w4, #1
b.lt LBB57_259
; %bb.148:
mov w25, #0
mov w11, #0
ldr w8, [sp, #104] ; 4-byte Folded Reload
neg w8, w8
str w8, [sp, #96] ; 4-byte Folded Spill
neg w8, w27
str w8, [sp, #92] ; 4-byte Folded Spill
lsl w8, w19, #1
mov w9, #8
cmp w8, #8
csel w8, w8, w9, gt
cset w9, lt
mov x10, x9
bfi w10, w19, #1, #31
sub w8, w8, w10
dup.4s v1, w19
Lloh312:
adrp x10, lCPI57_0@PAGE
Lloh313:
ldr q0, [x10, lCPI57_0@PAGEOFF]
mla.4s v1, v1, v0
str q1, [sp, #64] ; 16-byte Folded Spill
lsl w10, w19, #2
dup.4s v18, w10
neg w10, w28
str w10, [sp, #88] ; 4-byte Folded Spill
udiv w8, w8, w19
add w8, w8, w9
ldr w9, [sp, #60] ; 4-byte Folded Reload
neg w9, w9
str w9, [sp, #56] ; 4-byte Folded Spill
mov w9, w20
str x9, [sp, #40] ; 8-byte Folded Spill
add w23, w8, #1
str q18, [sp, #128] ; 16-byte Folded Spill
b LBB57_151
LBB57_149: ; in Loop: Header=BB57_151 Depth=1
ldr x8, [x21, #24]
ldr x9, [sp, #40] ; 8-byte Folded Reload
add x8, x8, x9
str x8, [x21, #24]
LBB57_150: ; in Loop: Header=BB57_151 Depth=1
ldr w11, [sp, #52] ; 4-byte Folded Reload
add w11, w11, #1
ldr w4, [x21, #4]
cmp w11, w4
b.ge LBB57_259
LBB57_151: ; =>This Loop Header: Depth=1
; Child Loop BB57_201 Depth 2
; Child Loop BB57_155 Depth 2
; Child Loop BB57_168 Depth 3
; Child Loop BB57_170 Depth 3
; Child Loop BB57_175 Depth 3
; Child Loop BB57_177 Depth 3
; Child Loop BB57_182 Depth 3
; Child Loop BB57_184 Depth 3
; Child Loop BB57_190 Depth 3
; Child Loop BB57_192 Depth 3
ldr w8, [x21]
ldr w9, [sp, #48] ; 4-byte Folded Reload
str w11, [sp, #52] ; 4-byte Folded Spill
cmp w8, #1
tbz w9, #0, LBB57_198
; %bb.152: ; in Loop: Header=BB57_151 Depth=1
b.lt LBB57_225
; %bb.153: ; in Loop: Header=BB57_151 Depth=1
mov w20, #0
mov x26, x25
b LBB57_155
LBB57_154: ; in Loop: Header=BB57_155 Depth=2
add w20, w20, #1
ldr w8, [x21]
mov x26, x25
cmp w20, w8
b.ge LBB57_225
LBB57_155: ; Parent Loop BB57_151 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB57_168 Depth 3
; Child Loop BB57_170 Depth 3
; Child Loop BB57_175 Depth 3
; Child Loop BB57_177 Depth 3
; Child Loop BB57_182 Depth 3
; Child Loop BB57_184 Depth 3
; Child Loop BB57_190 Depth 3
; Child Loop BB57_192 Depth 3
ldr w8, [sp, #124] ; 4-byte Folded Reload
cmp w8, #16
b.ne LBB57_159
; %bb.156: ; in Loop: Header=BB57_155 Depth=2
ldr x0, [x22]
cbz x0, LBB57_160
; %bb.157: ; in Loop: Header=BB57_155 Depth=2
bl _fgetc
cmn w0, #1
csel w25, wzr, w0, eq
ldr x0, [x22]
cbz x0, LBB57_163
; %bb.158: ; in Loop: Header=BB57_155 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
ldr q18, [sp, #128] ; 16-byte Folded Reload
add w0, w25, w8, lsl #8
b LBB57_165
LBB57_159: ; in Loop: Header=BB57_155 Depth=2
mov x0, x21
bl _get32le
ldr q18, [sp, #128] ; 16-byte Folded Reload
b LBB57_165
LBB57_160: ; in Loop: Header=BB57_155 Depth=2
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_197
; %bb.161: ; in Loop: Header=BB57_155 Depth=2
add x10, x8, #1
str x10, [x21, #24]
ldrb w25, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB57_164
LBB57_162: ; in Loop: Header=BB57_155 Depth=2
mov w8, #0
add w0, w25, wzr, lsl #8
b LBB57_165
LBB57_163: ; in Loop: Header=BB57_155 Depth=2
ldp x8, x9, [x21, #24]
ldr q18, [sp, #128] ; 16-byte Folded Reload
cmp x8, x9
b.hs LBB57_162
LBB57_164: ; in Loop: Header=BB57_155 Depth=2
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
add w0, w25, w8, lsl #8
LBB57_165: ; in Loop: Header=BB57_155 Depth=2
ldr w8, [sp, #116] ; 4-byte Folded Reload
and w8, w0, w8
ldr w9, [sp, #96] ; 4-byte Folded Reload
lsl w9, w8, w9
ldr w10, [sp, #104] ; 4-byte Folded Reload
asr w8, w8, w10
cmp w10, #0
csel w8, w9, w8, lt
cmp w19, #7
b.gt LBB57_171
; %bb.166: ; in Loop: Header=BB57_155 Depth=2
mov x9, x19
mov x11, x8
cmp w23, #16
ldr w13, [sp, #156] ; 4-byte Folded Reload
b.lo LBB57_170
; %bb.167: ; in Loop: Header=BB57_155 Depth=2
mov w11, #0
and w10, w23, #0xfffffff0
madd w9, w10, w19, w19
movi.2d v0, #0000000000000000
mov.s v0[0], w8
dup.4s v2, w8
movi.2d v1, #0000000000000000
ldr q5, [sp, #64] ; 16-byte Folded Reload
movi.2d v3, #0000000000000000
movi.2d v4, #0000000000000000
LBB57_168: ; Parent Loop BB57_151 Depth=1
; Parent Loop BB57_155 Depth=2
; => This Inner Loop Header: Depth=3
add.4s v6, v5, v18
add.4s v7, v6, v18
add.4s v16, v7, v18
neg.4s v5, v5
sshl.4s v5, v2, v5
neg.4s v6, v6
sshl.4s v6, v2, v6
neg.4s v7, v7
sshl.4s v7, v2, v7
neg.4s v17, v16
sshl.4s v17, v2, v17
add.4s v0, v5, v0
add.4s v1, v6, v1
add.4s v3, v7, v3
add.4s v4, v17, v4
add w11, w11, #16
add.4s v5, v16, v18
cmp w11, w10
b.ne LBB57_168
; %bb.169: ; in Loop: Header=BB57_155 Depth=2
add.4s v0, v1, v0
add.4s v0, v3, v0
add.4s v0, v4, v0
addv.4s s0, v0
fmov w11, s0
cmp w23, w10
b.eq LBB57_172
LBB57_170: ; Parent Loop BB57_151 Depth=1
; Parent Loop BB57_155 Depth=2
; => This Inner Loop Header: Depth=3
asr w10, w8, w9
add w11, w10, w11
add w9, w9, w19
cmp w9, #8
b.lt LBB57_170
b LBB57_172
LBB57_171: ; in Loop: Header=BB57_155 Depth=2
mov x11, x8
ldr w13, [sp, #156] ; 4-byte Folded Reload
LBB57_172: ; in Loop: Header=BB57_155 Depth=2
sxtw x8, w26
strb w11, [x24, x8]
ldr w9, [sp, #120] ; 4-byte Folded Reload
and w9, w0, w9
ldr w10, [sp, #92] ; 4-byte Folded Reload
lsl w10, w9, w10
asr w9, w9, w27
cmp w27, #0
csel w9, w10, w9, lt
cmp w19, #7
b.gt LBB57_178
; %bb.173: ; in Loop: Header=BB57_155 Depth=2
mov x10, x19
mov x12, x9
cmp w23, #16
b.lo LBB57_177
; %bb.174: ; in Loop: Header=BB57_155 Depth=2
mov w12, #0
and w11, w23, #0xfffffff0
madd w10, w11, w19, w19
movi.2d v0, #0000000000000000
mov.s v0[0], w9
dup.4s v2, w9
movi.2d v1, #0000000000000000
ldr q5, [sp, #64] ; 16-byte Folded Reload
movi.2d v3, #0000000000000000
movi.2d v4, #0000000000000000
LBB57_175: ; Parent Loop BB57_151 Depth=1
; Parent Loop BB57_155 Depth=2
; => This Inner Loop Header: Depth=3
add.4s v6, v5, v18
add.4s v7, v6, v18
add.4s v16, v7, v18
neg.4s v5, v5
sshl.4s v5, v2, v5
neg.4s v6, v6
sshl.4s v6, v2, v6
neg.4s v7, v7
sshl.4s v7, v2, v7
neg.4s v17, v16
sshl.4s v17, v2, v17
add.4s v0, v5, v0
add.4s v1, v6, v1
add.4s v3, v7, v3
add.4s v4, v17, v4
add w12, w12, #16
add.4s v5, v16, v18
cmp w12, w11
b.ne LBB57_175
; %bb.176: ; in Loop: Header=BB57_155 Depth=2
add.4s v0, v1, v0
add.4s v0, v3, v0
add.4s v0, v4, v0
addv.4s s0, v0
fmov w12, s0
cmp w23, w11
b.eq LBB57_179
LBB57_177: ; Parent Loop BB57_151 Depth=1
; Parent Loop BB57_155 Depth=2
; => This Inner Loop Header: Depth=3
asr w11, w9, w10
add w12, w11, w12
add w10, w10, w19
cmp w10, #8
b.lt LBB57_177
b LBB57_179
LBB57_178: ; in Loop: Header=BB57_155 Depth=2
mov x12, x9
LBB57_179: ; in Loop: Header=BB57_155 Depth=2
add x9, x8, #1
strb w12, [x24, x9]
ldr w9, [sp, #112] ; 4-byte Folded Reload
and w9, w0, w9
ldr w10, [sp, #88] ; 4-byte Folded Reload
lsl w10, w9, w10
asr w9, w9, w28
cmp w28, #0
csel w9, w10, w9, lt
cmp w19, #7
b.gt LBB57_185
; %bb.180: ; in Loop: Header=BB57_155 Depth=2
mov x10, x19
mov x12, x9
cmp w23, #16
b.lo LBB57_184
; %bb.181: ; in Loop: Header=BB57_155 Depth=2
mov w12, #0
and w11, w23, #0xfffffff0
madd w10, w11, w19, w19
movi.2d v0, #0000000000000000
mov.s v0[0], w9
dup.4s v2, w9
movi.2d v1, #0000000000000000
ldr q5, [sp, #64] ; 16-byte Folded Reload
movi.2d v3, #0000000000000000
movi.2d v4, #0000000000000000
LBB57_182: ; Parent Loop BB57_151 Depth=1
; Parent Loop BB57_155 Depth=2
; => This Inner Loop Header: Depth=3
add.4s v6, v5, v18
add.4s v7, v6, v18
add.4s v16, v7, v18
neg.4s v5, v5
sshl.4s v5, v2, v5
neg.4s v6, v6
sshl.4s v6, v2, v6
neg.4s v7, v7
sshl.4s v7, v2, v7
neg.4s v17, v16
sshl.4s v17, v2, v17
add.4s v0, v5, v0
add.4s v1, v6, v1
add.4s v3, v7, v3
add.4s v4, v17, v4
add w12, w12, #16
add.4s v5, v16, v18
cmp w12, w11
b.ne LBB57_182
; %bb.183: ; in Loop: Header=BB57_155 Depth=2
add.4s v0, v1, v0
add.4s v0, v3, v0
add.4s v0, v4, v0
addv.4s s0, v0
fmov w12, s0
cmp w23, w11
b.eq LBB57_186
LBB57_184: ; Parent Loop BB57_151 Depth=1
; Parent Loop BB57_155 Depth=2
; => This Inner Loop Header: Depth=3
asr w11, w9, w10
add w12, w11, w12
add w10, w10, w19
cmp w10, #8
b.lt LBB57_184
b LBB57_186
LBB57_185: ; in Loop: Header=BB57_155 Depth=2
mov x12, x9
LBB57_186: ; in Loop: Header=BB57_155 Depth=2
add x9, x8, #2
strb w12, [x24, x9]
ldr w9, [sp, #108] ; 4-byte Folded Reload
cbz w9, LBB57_193
; %bb.187: ; in Loop: Header=BB57_155 Depth=2
and w9, w0, w9
ldp w10, w11, [sp, #56] ; 8-byte Folded Reload
lsl w10, w9, w10
asr w9, w9, w11
cmp w11, #0
csel w9, w10, w9, lt
cmp w19, #7
b.gt LBB57_194
; %bb.188: ; in Loop: Header=BB57_155 Depth=2
mov x10, x19
mov x12, x9
cmp w23, #16
b.lo LBB57_192
; %bb.189: ; in Loop: Header=BB57_155 Depth=2
mov w12, #0
and w11, w23, #0xfffffff0
madd w10, w11, w19, w19
movi.2d v0, #0000000000000000
mov.s v0[0], w9
dup.4s v2, w9
movi.2d v1, #0000000000000000
ldr q5, [sp, #64] ; 16-byte Folded Reload
movi.2d v3, #0000000000000000
movi.2d v4, #0000000000000000
LBB57_190: ; Parent Loop BB57_151 Depth=1
; Parent Loop BB57_155 Depth=2
; => This Inner Loop Header: Depth=3
add.4s v6, v5, v18
add.4s v7, v6, v18
add.4s v16, v7, v18
neg.4s v5, v5
sshl.4s v5, v2, v5
neg.4s v6, v6
sshl.4s v6, v2, v6
neg.4s v7, v7
sshl.4s v7, v2, v7
neg.4s v17, v16
sshl.4s v17, v2, v17
add.4s v0, v5, v0
add.4s v1, v6, v1
add.4s v3, v7, v3
add.4s v4, v17, v4
add w12, w12, #16
add.4s v5, v16, v18
cmp w12, w11
b.ne LBB57_190
; %bb.191: ; in Loop: Header=BB57_155 Depth=2
add.4s v0, v1, v0
add.4s v0, v3, v0
add.4s v0, v4, v0
addv.4s s0, v0
fmov w12, s0
cmp w23, w11
b.eq LBB57_195
LBB57_192: ; Parent Loop BB57_151 Depth=1
; Parent Loop BB57_155 Depth=2
; => This Inner Loop Header: Depth=3
asr w11, w9, w10
add w12, w11, w12
add w10, w10, w19
cmp w10, #8
b.lt LBB57_192
b LBB57_195
LBB57_193: ; in Loop: Header=BB57_155 Depth=2
mov w12, #255
b LBB57_195
LBB57_194: ; in Loop: Header=BB57_155 Depth=2
mov x12, x9
LBB57_195: ; in Loop: Header=BB57_155 Depth=2
add x25, x8, #3
cmp w13, #4
b.ne LBB57_154
; %bb.196: ; in Loop: Header=BB57_155 Depth=2
strb w12, [x24, x25]
add w8, w26, #4
mov x25, x8
b LBB57_154
LBB57_197: ; in Loop: Header=BB57_155 Depth=2
mov w25, #0
cmp x8, x9
b.lo LBB57_164
b LBB57_162
LBB57_198: ; in Loop: Header=BB57_151 Depth=1
b.lt LBB57_225
; %bb.199: ; in Loop: Header=BB57_151 Depth=1
mov w20, #0
mov x26, x25
b LBB57_201
LBB57_200: ; in Loop: Header=BB57_201 Depth=2
add w20, w20, #1
ldr w8, [x21]
mov x26, x25
cmp w20, w8
b.ge LBB57_225
LBB57_201: ; Parent Loop BB57_151 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x22]
cbz x0, LBB57_205
; %bb.202: ; in Loop: Header=BB57_201 Depth=2
bl _fgetc
mov x8, x0
ldr x0, [x22]
cmn w8, #1
csel w8, wzr, w8, eq
mov x9, x26
sxtw x9, w9
add x25, x9, x24
strb w8, [x25, #2]
cbz x0, LBB57_209
; %bb.203: ; in Loop: Header=BB57_201 Depth=2
bl _fgetc
mov x8, x0
ldr x0, [x22]
cmn w8, #1
csel w8, wzr, w8, eq
strb w8, [x25, #1]
cbz x0, LBB57_213
; %bb.204: ; in Loop: Header=BB57_201 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB57_216
LBB57_205: ; in Loop: Header=BB57_201 Depth=2
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_207
; %bb.206: ; in Loop: Header=BB57_201 Depth=2
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
b LBB57_208
LBB57_207: ; in Loop: Header=BB57_201 Depth=2
mov w8, #0
LBB57_208: ; in Loop: Header=BB57_201 Depth=2
add x9, x24, w26, sxtw
strb w8, [x9, #2]
LBB57_209: ; in Loop: Header=BB57_201 Depth=2
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_211
; %bb.210: ; in Loop: Header=BB57_201 Depth=2
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
b LBB57_212
LBB57_211: ; in Loop: Header=BB57_201 Depth=2
mov w8, #0
LBB57_212: ; in Loop: Header=BB57_201 Depth=2
add x9, x24, w26, sxtw
strb w8, [x9, #1]
LBB57_213: ; in Loop: Header=BB57_201 Depth=2
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_215
; %bb.214: ; in Loop: Header=BB57_201 Depth=2
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
b LBB57_216
LBB57_215: ; in Loop: Header=BB57_201 Depth=2
mov w8, #0
LBB57_216: ; in Loop: Header=BB57_201 Depth=2
sxtw x25, w26
strb w8, [x24, x25]
ldr w8, [sp, #100] ; 4-byte Folded Reload
cbz w8, LBB57_219
; %bb.217: ; in Loop: Header=BB57_201 Depth=2
ldr x0, [x22]
cbz x0, LBB57_222
; %bb.218: ; in Loop: Header=BB57_201 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB57_220
LBB57_219: ; in Loop: Header=BB57_201 Depth=2
mov w8, #255
LBB57_220: ; in Loop: Header=BB57_201 Depth=2
ldr w9, [sp, #156] ; 4-byte Folded Reload
ldr q18, [sp, #128] ; 16-byte Folded Reload
add x25, x25, #3
cmp w9, #4
b.ne LBB57_200
; %bb.221: ; in Loop: Header=BB57_201 Depth=2
strb w8, [x24, x25]
add w8, w26, #4
mov x25, x8
b LBB57_200
LBB57_222: ; in Loop: Header=BB57_201 Depth=2
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_224
; %bb.223: ; in Loop: Header=BB57_201 Depth=2
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
b LBB57_220
LBB57_224: ; in Loop: Header=BB57_201 Depth=2
mov w8, #0
b LBB57_220
LBB57_225: ; in Loop: Header=BB57_151 Depth=1
ldr x0, [x22]
cbz x0, LBB57_149
; %bb.226: ; in Loop: Header=BB57_151 Depth=1
ldr x1, [sp, #40] ; 8-byte Folded Reload
mov w2, #1
bl _fseek
ldr q18, [sp, #128] ; 16-byte Folded Reload
b LBB57_150
LBB57_227:
ldr x9, [x21, #24]
add x8, x9, w8, sxtw
str x8, [x21, #24]
LBB57_228:
ldr w14, [sp, #156] ; 4-byte Folded Reload
ldr w8, [sp, #124] ; 4-byte Folded Reload
cmp w8, #4
b.ne LBB57_230
; %bb.229:
stp x28, x27, [sp] ; 16-byte Folded Spill
ldr w8, [x21]
add w9, w8, #1
lsr w9, w9, #1
b LBB57_232
LBB57_230:
cmp w8, #8
b.ne LBB57_286
; %bb.231:
stp x28, x27, [sp] ; 16-byte Folded Spill
ldr w8, [x21]
mov x9, x8
LBB57_232:
ldr w4, [x21, #4]
cmp w4, #1
b.lt LBB57_259
; %bb.233:
mov w27, #0
mov w20, #0
neg w9, w9
and w9, w9, #0x3
str x9, [sp, #128] ; 8-byte Folded Spill
add x23, sp, #160
mov w25, #255
cmp w8, #1
b.ge LBB57_236
b LBB57_235
LBB57_234:
ldr w8, [x21]
mov x27, x28
ldr w14, [sp, #156] ; 4-byte Folded Reload
cmp w8, #1
b.ge LBB57_236
LBB57_235:
mov x28, x27
b LBB57_255
LBB57_236:
mov w26, #0
b LBB57_238
LBB57_237: ; in Loop: Header=BB57_238 Depth=1
add w26, w26, #2
ldr w8, [x21]
mov x27, x28
cmp w26, w8
b.ge LBB57_255
LBB57_238: ; =>This Inner Loop Header: Depth=1
ldr x0, [x22]
cbz x0, LBB57_240
; %bb.239: ; in Loop: Header=BB57_238 Depth=1
bl _fgetc
ldr w14, [sp, #156] ; 4-byte Folded Reload
cmn w0, #1
csel w8, wzr, w0, eq
ldr w13, [sp, #124] ; 4-byte Folded Reload
b LBB57_243
LBB57_240: ; in Loop: Header=BB57_238 Depth=1
ldp x8, x9, [x21, #24]
cmp x8, x9
ldr w13, [sp, #124] ; 4-byte Folded Reload
b.hs LBB57_242
; %bb.241: ; in Loop: Header=BB57_238 Depth=1
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
b LBB57_243
LBB57_242: ; in Loop: Header=BB57_238 Depth=1
mov w8, #0
LBB57_243: ; in Loop: Header=BB57_238 Depth=1
and w9, w8, #0xf
asr w10, w8, #4
cmp w13, #4
csel w10, w10, w8, eq
csel w8, w9, wzr, eq
add x9, x23, w10, sxtw #2
ldrb w10, [x9]
sxtw x11, w27
add x12, x24, x11
strb w10, [x12]
ldurh w9, [x9, #1]
sturh w9, [x12, #1]
add x9, x11, #3
mov x19, x9
cmp w14, #4
b.ne LBB57_245
; %bb.244: ; in Loop: Header=BB57_238 Depth=1
add w19, w27, #4
strb w25, [x24, x9]
LBB57_245: ; in Loop: Header=BB57_238 Depth=1
add w9, w26, #1
ldr w10, [x21]
cmp w9, w10
b.eq LBB57_254
; %bb.246: ; in Loop: Header=BB57_238 Depth=1
cmp w13, #8
b.ne LBB57_252
; %bb.247: ; in Loop: Header=BB57_238 Depth=1
ldr x0, [x22]
cbz x0, LBB57_249
; %bb.248: ; in Loop: Header=BB57_238 Depth=1
bl _fgetc
ldr w14, [sp, #156] ; 4-byte Folded Reload
cmn w0, #1
csel w8, wzr, w0, eq
b LBB57_252
LBB57_249: ; in Loop: Header=BB57_238 Depth=1
ldp x8, x9, [x21, #24]
cmp x8, x9
b.hs LBB57_251
; %bb.250: ; in Loop: Header=BB57_238 Depth=1
add x9, x8, #1
str x9, [x21, #24]
ldrb w8, [x8]
b LBB57_252
LBB57_251: ; in Loop: Header=BB57_238 Depth=1
mov w8, #0
LBB57_252: ; in Loop: Header=BB57_238 Depth=1
add x8, x23, w8, sxtw #2
ldrb w9, [x8]
sxtw x10, w19
add x11, x24, x10
strb w9, [x11]
ldurh w8, [x8, #1]
sturh w8, [x11, #1]
add x28, x10, #3
cmp w14, #4
b.ne LBB57_237
; %bb.253: ; in Loop: Header=BB57_238 Depth=1
strb w25, [x24, x28]
add w8, w19, #4
mov x28, x8
b LBB57_237
LBB57_254:
mov x28, x19
LBB57_255:
ldr x0, [x22]
cbz x0, LBB57_257
; %bb.256:
ldr x1, [sp, #128] ; 8-byte Folded Reload
mov w2, #1
bl _fseek
b LBB57_258
LBB57_257:
ldr x8, [x21, #24]
ldr x9, [sp, #128] ; 8-byte Folded Reload
add x8, x8, x9
str x8, [x21, #24]
LBB57_258:
add w20, w20, #1
ldr w4, [x21, #4]
cmp w20, w4
b.lt LBB57_234
LBB57_259:
ldr w8, [sp, #36] ; 4-byte Folded Reload
cmp w8, #1
ldp x20, x19, [sp] ; 16-byte Folded Reload
ldr x22, [sp, #24] ; 8-byte Folded Reload
ldr w2, [sp, #20] ; 4-byte Folded Reload
b.lt LBB57_278
; %bb.260:
cmp w4, #2
b.lt LBB57_278
; %bb.261:
mov w0, #0
mov x8, #0
lsr w9, w4, #1
ldr w10, [x21]
ldr w11, [sp, #156] ; 4-byte Folded Reload
mul w10, w10, w11
add x11, x24, x10
and x12, x10, #0xffffffe0
and x13, x10, #0x18
and x14, x10, #0xfffffff8
add x15, x24, #16
sub w16, w4, #1
mul w17, w10, w16
neg x16, x14
b LBB57_263
LBB57_262: ; in Loop: Header=BB57_263 Depth=1
add x8, x8, #1
add w0, w0, w10
sub w17, w17, w10
cmp x8, x9
b.eq LBB57_278
LBB57_263: ; =>This Loop Header: Depth=1
; Child Loop BB57_272 Depth 2
; Child Loop BB57_276 Depth 2
; Child Loop BB57_270 Depth 2
mov w17, w17
mov w0, w0
cmp w10, #1
b.lt LBB57_262
; %bb.264: ; in Loop: Header=BB57_263 Depth=1
cmp w10, #8
b.lo LBB57_268
; %bb.265: ; in Loop: Header=BB57_263 Depth=1
mul w1, w10, w8
add x6, x24, x1
add x1, x11, x1
mvn w3, w8
add w3, w4, w3
mul w3, w10, w3
add x5, x24, x3
add x3, x11, x3
cmp x6, x3
ccmp x5, x1, #2, lo
b.lo LBB57_268
; %bb.266: ; in Loop: Header=BB57_263 Depth=1
cmp w10, #32
b.hs LBB57_271
; %bb.267: ; in Loop: Header=BB57_263 Depth=1
mov x5, #0
b LBB57_275
LBB57_268: ; in Loop: Header=BB57_263 Depth=1
mov x3, #0
LBB57_269: ; in Loop: Header=BB57_263 Depth=1
add x1, x3, x17
add x1, x24, x1
add x5, x3, x0
add x7, x24, x5
sub x3, x10, x3
LBB57_270: ; Parent Loop BB57_263 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w5, [x7]
ldrb w6, [x1]
strb w6, [x7], #1
strb w5, [x1], #1
subs x3, x3, #1
b.ne LBB57_270
b LBB57_262
LBB57_271: ; in Loop: Header=BB57_263 Depth=1
add x1, x15, x0
add x5, x15, x17
mov x3, x12
LBB57_272: ; Parent Loop BB57_263 Depth=1
; => This Inner Loop Header: Depth=2
ldp q0, q1, [x1, #-16]
ldp q2, q3, [x5, #-16]
stp q2, q3, [x1, #-16]
stp q0, q1, [x5, #-16]
add x1, x1, #32
add x5, x5, #32
subs x3, x3, #32
b.ne LBB57_272
; %bb.273: ; in Loop: Header=BB57_263 Depth=1
cmp x12, x10
b.eq LBB57_262
; %bb.274: ; in Loop: Header=BB57_263 Depth=1
mov x5, x12
mov x3, x12
cbz x13, LBB57_269
LBB57_275: ; in Loop: Header=BB57_263 Depth=1
add x1, x5, x0
add x1, x24, x1
add x3, x5, x17
add x6, x24, x3
add x3, x16, x5
LBB57_276: ; Parent Loop BB57_263 Depth=1
; => This Inner Loop Header: Depth=2
ldr d0, [x1]
ldr d1, [x6]
str d1, [x1], #8
str d0, [x6], #8
adds x3, x3, #8
b.ne LBB57_276
; %bb.277: ; in Loop: Header=BB57_263 Depth=1
mov x3, x14
cmp x14, x10
b.eq LBB57_262
b LBB57_269
LBB57_278:
cbz w2, LBB57_281
; %bb.279:
ldr w1, [sp, #156] ; 4-byte Folded Reload
cmp w1, w2
b.eq LBB57_281
; %bb.280:
ldr w3, [x21]
mov x0, x24
bl _convert_format
mov x24, x0
cbz x0, LBB57_17
LBB57_281:
ldr w8, [x21]
str w8, [x22]
ldr w8, [x21, #4]
str w8, [x20]
cbz x19, LBB57_17
; %bb.282:
ldr w8, [sp, #156] ; 4-byte Folded Reload
str w8, [x19]
b LBB57_17
LBB57_283:
mov x20, x26
mov x0, x21
bl _get32le
mov x26, x0
mov x0, x21
bl _get32le
str w0, [sp, #120] ; 4-byte Folded Spill
mov x0, x21
bl _get32le
ldr w8, [sp, #120] ; 4-byte Folded Reload
mov w9, #0
stp w0, w26, [sp, #112] ; 8-byte Folded Spill
cmp w26, w8
str w9, [sp, #104] ; 4-byte Folded Spill
mov x26, x9
b.ne LBB57_97
; %bb.284:
cmp w8, w0
b.ne LBB57_97
LBB57_285:
mov x24, #0
b LBB57_17
LBB57_286:
mov x0, x24
bl _free
mov x24, #0
Lloh314:
adrp x8, l_.str.81@PAGE
Lloh315:
add x8, x8, l_.str.81@PAGEOFF
str x8, [x20, _failure_reason@PAGEOFF]
b LBB57_17
LBB57_287:
bl ___stack_chk_fail
LBB57_288:
bl _bmp_load.cold.1
.loh AdrpLdrGotLdr Lloh290, Lloh291, Lloh292
.loh AdrpAdd Lloh293, Lloh294
.loh AdrpLdrGotLdr Lloh295, Lloh296, Lloh297
.loh AdrpAdd Lloh298, Lloh299
.loh AdrpAdd Lloh300, Lloh301
.loh AdrpAdd Lloh302, Lloh303
.loh AdrpAdd Lloh304, Lloh305
.loh AdrpAdd Lloh306, Lloh307
.loh AdrpAdd Lloh308, Lloh309
.loh AdrpAdd Lloh310, Lloh311
.loh AdrpLdr Lloh312, Lloh313
.loh AdrpAdd Lloh314, Lloh315
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function tga_test
_tga_test: ; @tga_test
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
ldr x0, [x0, #16]
cbz x0, LBB58_3
; %bb.1:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB58_5
; %bb.2:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
tst w8, #0xfe
b.ne LBB58_60
b LBB58_7
LBB58_3:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_5
; %bb.4:
add x8, x8, #1
str x8, [x19, #24]
LBB58_5:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_9
; %bb.6:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
tst w8, #0xfe
b.ne LBB58_60
LBB58_7:
ldr x0, [x19, #16]
cbz x0, LBB58_9
; %bb.8:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB58_11
LBB58_9:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_60
; %bb.10:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB58_11:
and w8, w8, #0xff
cmp w8, #11
mov w9, #1
lsl w8, w9, w8
mov w9, #3598
and w8, w8, w9
ccmp w8, #0, #4, ls
b.eq LBB58_60
; %bb.12:
ldr x0, [x19, #16]
cbz x0, LBB58_24
; %bb.13:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB58_51
; %bb.14:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB58_28
; %bb.15:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB58_65
; %bb.16:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB58_32
; %bb.17:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB58_34
; %bb.18:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB58_68
; %bb.19:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB58_38
; %bb.20:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB58_69
; %bb.21:
bl _fgetc
ldr x0, [x19, #16]
cbz x0, LBB58_42
; %bb.22:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB58_70
; %bb.23:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB58_47
LBB58_24:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_26
; %bb.25:
add x8, x8, #1
str x8, [x19, #24]
LBB58_26:
cmp x8, x9
b.hs LBB58_28
LBB58_27:
add x8, x8, #1
str x8, [x19, #24]
LBB58_28:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_30
; %bb.29:
add x8, x8, #1
str x8, [x19, #24]
LBB58_30:
cmp x8, x9
b.hs LBB58_32
LBB58_31:
add x8, x8, #1
str x8, [x19, #24]
LBB58_32:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_34
; %bb.33:
add x8, x8, #1
str x8, [x19, #24]
LBB58_34:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_36
; %bb.35:
add x8, x8, #1
str x8, [x19, #24]
LBB58_36:
cmp x8, x9
b.hs LBB58_38
LBB58_37:
add x8, x8, #1
str x8, [x19, #24]
LBB58_38:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_40
; %bb.39:
add x8, x8, #1
str x8, [x19, #24]
LBB58_40:
cmp x8, x9
b.hs LBB58_42
LBB58_41:
add x8, x8, #1
str x8, [x19, #24]
LBB58_42:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_45
; %bb.43:
add x10, x8, #1
str x10, [x19, #24]
ldrb w20, [x8]
mov x8, x10
cmp x8, x9
b.hs LBB58_46
LBB58_44:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB58_47
LBB58_45:
mov w20, #0
cmp x8, x9
b.lo LBB58_44
LBB58_46:
mov w8, #0
LBB58_47:
add w8, w8, w20, lsl #8
cmp w8, #1
b.lt LBB58_60
; %bb.48:
ldr x0, [x19, #16]
cbz x0, LBB58_52
; %bb.49:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB58_55
; %bb.50:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB58_57
LBB58_51:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.lo LBB58_27
b LBB58_28
LBB58_52:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_64
; %bb.53:
add x10, x8, #1
str x10, [x19, #24]
ldrb w20, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB58_56
LBB58_54:
mov w8, #0
b LBB58_57
LBB58_55:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_54
LBB58_56:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB58_57:
add w8, w8, w20, lsl #8
cmp w8, #1
b.lt LBB58_60
; %bb.58:
ldr x0, [x19, #16]
cbz x0, LBB58_62
; %bb.59:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB58_67
LBB58_60:
mov w0, #0
LBB58_61:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB58_62:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB58_66
; %bb.63:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB58_67
LBB58_64:
mov w20, #0
cmp x8, x9
b.lo LBB58_56
b LBB58_54
LBB58_65:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.lo LBB58_31
b LBB58_32
LBB58_66:
mov w8, #0
LBB58_67:
sub w8, w8, #8
tst w8, #0xffffffe7
cset w0, eq
b LBB58_61
LBB58_68:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.lo LBB58_37
b LBB58_38
LBB58_69:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.lo LBB58_41
b LBB58_42
LBB58_70:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.lo LBB58_44
b LBB58_46
.cfi_endproc
; -- End function
.globl _stbi_tga_load ; -- Begin function stbi_tga_load
.p2align 2
_stbi_tga_load: ; @stbi_tga_load
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x4
mov x20, x3
mov x21, x2
mov x23, x1
Lloh316:
adrp x1, l_.str@PAGE
Lloh317:
add x1, x1, l_.str@PAGEOFF
bl _fopen
cbz x0, LBB59_2
; %bb.1:
mov x22, x0
str x0, [sp, #24]
add x0, sp, #8
mov x1, x23
mov x2, x21
mov x3, x20
mov x4, x19
bl _tga_load
mov x19, x0
mov x0, x22
bl _fclose
b LBB59_3
LBB59_2:
mov x19, #0
LBB59_3:
mov x0, x19
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpAdd Lloh316, Lloh317
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function tga_load
_tga_load: ; @tga_load
.cfi_startproc
; %bb.0:
sub sp, sp, #192
.cfi_def_cfa_offset 192
stp x28, x27, [sp, #96] ; 16-byte Folded Spill
stp x26, x25, [sp, #112] ; 16-byte Folded Spill
stp x24, x23, [sp, #128] ; 16-byte Folded Spill
stp x22, x21, [sp, #144] ; 16-byte Folded Spill
stp x20, x19, [sp, #160] ; 16-byte Folded Spill
stp x29, x30, [sp, #176] ; 16-byte Folded Spill
add x29, sp, #176
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x19, x0
mov x25, x0
ldr x0, [x25, #16]!
cbz x0, LBB60_19
; %bb.1:
str w4, [sp, #88] ; 4-byte Folded Spill
mov x21, x3
mov x27, x1
mov x24, x2
bl _fgetc
mov x8, x0
ldr x0, [x25]
and w9, w8, #0xff
cmn w8, #1
csel w28, wzr, w9, eq
cbz x0, LBB60_21
; %bb.2:
bl _fgetc
mov x8, x0
ldr x0, [x25]
cmn w8, #1
csel w8, wzr, w8, eq
str w8, [sp, #80] ; 4-byte Folded Spill
cbz x0, LBB60_26
; %bb.3:
bl _fgetc
cmn w0, #1
csel w11, wzr, w0, eq
ldr x0, [x25]
and w20, w11, #0xff
cbz x0, LBB60_34
; %bb.4:
str w11, [sp, #84] ; 4-byte Folded Spill
bl _fgetc
cmn w0, #1
csel w22, wzr, w0, eq
ldr x0, [x25]
cbz x0, LBB60_35
; %bb.5:
bl _fgetc
mov x8, x0
ldr x0, [x25]
lsl w9, w8, #8
cmn w8, #1
csel w8, wzr, w9, eq
add w8, w8, w22
str x8, [sp, #72] ; 8-byte Folded Spill
cbz x0, LBB60_41
; %bb.6:
bl _fgetc
cmn w0, #1
csel w22, wzr, w0, eq
ldr x0, [x25]
cbz x0, LBB60_42
; %bb.7:
bl _fgetc
mov x8, x0
ldr x0, [x25]
lsl w9, w8, #8
cmn w8, #1
csel w8, wzr, w9, eq
add w8, w8, w22
str w8, [sp, #44] ; 4-byte Folded Spill
cbz x0, LBB60_60
; %bb.8:
bl _fgetc
mov x8, x0
ldr x0, [x25]
and w9, w8, #0xff
cmn w8, #1
csel w22, wzr, w9, eq
cbz x0, LBB60_61
; %bb.9:
bl _fgetc
ldr x0, [x25]
cbz x0, LBB60_62
; %bb.10:
bl _fgetc
ldr x0, [x25]
cbz x0, LBB60_63
; %bb.11:
bl _fgetc
ldr x0, [x25]
cbz x0, LBB60_64
; %bb.12:
bl _fgetc
ldr x0, [x25]
cbz x0, LBB60_65
; %bb.13:
bl _fgetc
cmn w0, #1
csel w23, wzr, w0, eq
ldr x0, [x25]
cbz x0, LBB60_66
; %bb.14:
bl _fgetc
mov x8, x0
ldr x0, [x25]
lsl w9, w8, #8
cmn w8, #1
csel w8, wzr, w9, eq
add w8, w8, w23
str w8, [sp, #4] ; 4-byte Folded Spill
cbz x0, LBB60_72
; %bb.15:
bl _fgetc
cmn w0, #1
csel w23, wzr, w0, eq
ldr x0, [x25]
cbz x0, LBB60_73
; %bb.16:
bl _fgetc
mov x8, x0
ldr x0, [x25]
lsl w9, w8, #8
cmn w8, #1
csel w8, wzr, w9, eq
add w8, w8, w23
str w8, [sp, #28] ; 4-byte Folded Spill
cbz x0, LBB60_168
; %bb.17:
bl _fgetc
cmn w0, #1
csel w26, wzr, w0, eq
ldr x0, [x25]
and w23, w26, #0xff
cbz x0, LBB60_169
; %bb.18:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
str w8, [sp, #8] ; 4-byte Folded Spill
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
ldr w28, [sp, #4] ; 4-byte Folded Reload
b LBB60_82
LBB60_19:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_22
; %bb.20:
add x9, x8, #1
str x9, [x19, #24]
ldrb w21, [x8]
b LBB60_23
LBB60_21:
mov x2, x24
mov x1, x27
mov x3, x21
ldr w4, [sp, #88] ; 4-byte Folded Reload
mov x21, x28
b LBB60_23
LBB60_22:
mov w21, #0
LBB60_23:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_25
; %bb.24:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
str w8, [sp, #80] ; 4-byte Folded Spill
b LBB60_27
LBB60_25:
str wzr, [sp, #80] ; 4-byte Folded Spill
b LBB60_27
LBB60_26:
mov x2, x24
mov x1, x27
mov x3, x21
ldr w4, [sp, #88] ; 4-byte Folded Reload
mov x21, x28
LBB60_27:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_29
; %bb.28:
add x9, x8, #1
str x9, [x19, #24]
ldrb w20, [x8]
mov x11, x20
b LBB60_30
LBB60_29:
mov w20, #0
mov w11, #0
LBB60_30:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_32
; %bb.31:
add x10, x8, #1
str x10, [x19, #24]
ldrb w22, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB60_33
b LBB60_36
LBB60_32:
mov w22, #0
cmp x8, x9
b.hs LBB60_36
LBB60_33:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
add w27, w22, w8, lsl #8
b LBB60_37
LBB60_34:
mov x2, x24
mov x1, x27
mov x3, x21
ldr w4, [sp, #88] ; 4-byte Folded Reload
mov x21, x28
b LBB60_30
LBB60_35:
ldp x8, x9, [x19, #24]
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
cmp x8, x9
b.lo LBB60_33
LBB60_36:
mov w8, #0
add w27, w22, wzr, lsl #8
LBB60_37:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_39
; %bb.38:
add x10, x8, #1
str x10, [x19, #24]
ldrb w22, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB60_40
b LBB60_43
LBB60_39:
mov w22, #0
cmp x8, x9
b.hs LBB60_43
LBB60_40:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB60_44
LBB60_41:
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
b LBB60_37
LBB60_42:
ldp x8, x9, [x19, #24]
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
cmp x8, x9
b.lo LBB60_40
LBB60_43:
mov w8, #0
LBB60_44:
add w8, w22, w8, lsl #8
str w8, [sp, #44] ; 4-byte Folded Spill
LBB60_45:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_47
; %bb.46:
add x9, x8, #1
str x9, [x19, #24]
ldrb w22, [x8]
b LBB60_48
LBB60_47:
mov w22, #0
LBB60_48:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_50
; %bb.49:
add x8, x8, #1
str x8, [x19, #24]
LBB60_50:
cmp x8, x9
b.hs LBB60_52
LBB60_51:
add x8, x8, #1
str x8, [x19, #24]
LBB60_52:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_54
; %bb.53:
add x8, x8, #1
str x8, [x19, #24]
LBB60_54:
cmp x8, x9
b.hs LBB60_56
LBB60_55:
add x8, x8, #1
str x8, [x19, #24]
LBB60_56:
ldp x9, x8, [x19, #24]
cmp x9, x8
b.hs LBB60_58
; %bb.57:
add x10, x9, #1
str x10, [x19, #24]
ldrb w23, [x9]
mov x9, x10
cmp x9, x8
b.lo LBB60_59
b LBB60_67
LBB60_58:
mov w23, #0
cmp x9, x8
b.hs LBB60_67
LBB60_59:
add x8, x9, #1
str x8, [x19, #24]
ldrb w8, [x9]
add w28, w23, w8, lsl #8
b LBB60_68
LBB60_60:
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
b LBB60_45
LBB60_61:
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
b LBB60_48
LBB60_62:
ldp x8, x9, [x19, #24]
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
cmp x8, x9
b.lo LBB60_51
b LBB60_52
LBB60_63:
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
b LBB60_52
LBB60_64:
ldp x8, x9, [x19, #24]
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
cmp x8, x9
b.lo LBB60_55
b LBB60_56
LBB60_65:
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
b LBB60_56
LBB60_66:
ldp x9, x8, [x19, #24]
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
cmp x9, x8
b.lo LBB60_59
LBB60_67:
mov w8, #0
add w28, w23, wzr, lsl #8
LBB60_68:
ldp x9, x8, [x19, #24]
cmp x9, x8
b.hs LBB60_70
; %bb.69:
add x10, x9, #1
str x10, [x19, #24]
ldrb w23, [x9]
mov x9, x10
cmp x9, x8
b.lo LBB60_71
b LBB60_74
LBB60_70:
mov w23, #0
cmp x9, x8
b.hs LBB60_74
LBB60_71:
add x8, x9, #1
str x8, [x19, #24]
ldrb w8, [x9]
lsl w8, w8, #8
b LBB60_75
LBB60_72:
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
ldr w28, [sp, #4] ; 4-byte Folded Reload
b LBB60_68
LBB60_73:
ldp x9, x8, [x19, #24]
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
ldr w28, [sp, #4] ; 4-byte Folded Reload
cmp x9, x8
b.lo LBB60_71
LBB60_74:
mov w8, #0
LBB60_75:
add w8, w8, w23
str w8, [sp, #28] ; 4-byte Folded Spill
LBB60_76:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_78
; %bb.77:
add x9, x8, #1
str x9, [x19, #24]
ldrb w23, [x8]
mov x26, x23
b LBB60_79
LBB60_78:
mov w23, #0
mov w26, #0
LBB60_79:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_81
; %bb.80:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
str w8, [sp, #8] ; 4-byte Folded Spill
b LBB60_82
LBB60_81:
str wzr, [sp, #8] ; 4-byte Folded Spill
LBB60_82:
mov x24, #0
sub w8, w20, #8
ands w9, w11, #0xf8
str w9, [sp, #84] ; 4-byte Folded Spill
csel w8, w20, w8, eq
cmp w28, #1
b.lt LBB60_167
; %bb.83:
ldr w10, [sp, #28] ; 4-byte Folded Reload
cmp w10, #1
b.lt LBB60_167
; %bb.84:
cmp w8, #1
b.lt LBB60_167
; %bb.85:
cmp w8, #3
b.gt LBB60_167
; %bb.86:
sub w8, w26, #8
ubfx w9, w8, #3, #5
bfi w9, w8, #5, #27
and w8, w9, #0xff
cmp w8, #3
b.hi LBB60_89
; %bb.87:
ldr w8, [sp, #80] ; 4-byte Folded Reload
tst w8, #0xff
csel w9, w23, w22, eq
str w28, [x1]
str w10, [x2]
sub w8, w4, #5
str w9, [sp, #64] ; 4-byte Folded Spill
lsr w9, w9, #3
cmn w8, #4
csel w8, w9, w4, lo
str w9, [sp, #12] ; 4-byte Folded Spill
str w9, [x3]
mul w9, w10, w28
str w8, [sp, #68] ; 4-byte Folded Spill
str x9, [sp, #72] ; 8-byte Folded Spill
mul w8, w8, w9
sxtw x0, w8
bl _malloc
mov x24, x0
ldr x0, [x25]
cbz x0, LBB60_90
; %bb.88:
mov w1, w21
mov w2, #1
bl _fseek
b LBB60_91
LBB60_89:
mov x24, #0
b LBB60_167
LBB60_90:
ldr x8, [x19, #24]
add x8, x8, w21, uxtw
str x8, [x19, #24]
LBB60_91:
ldr w8, [sp, #80] ; 4-byte Folded Reload
tst w8, #0xff
str w28, [sp, #4] ; 4-byte Folded Spill
b.eq LBB60_94
; %bb.92:
ldr x0, [x25]
cbz x0, LBB60_95
; %bb.93:
sxtw x1, w27
mov w2, #1
bl _fseek
ldr x21, [x25]
b LBB60_96
LBB60_94:
str xzr, [sp, #16] ; 8-byte Folded Spill
b LBB60_99
LBB60_95:
mov x21, #0
ldr x8, [x19, #24]
add x8, x8, w27, sxtw
str x8, [x19, #24]
LBB60_96:
ldr w8, [sp, #44] ; 4-byte Folded Reload
mul w8, w22, w8
add w9, w8, #7
cmp w8, #0
csel w8, w9, w8, lt
sbfx x22, x8, #3, #29
mov x0, x22
bl _malloc
str x0, [sp, #16] ; 8-byte Folded Spill
cbz x21, LBB60_98
; %bb.97:
mov w1, #1
mov x2, x22
mov x3, x21
bl _fread
b LBB60_99
LBB60_98:
ldr x21, [x19, #24]
mov x1, x21
mov x2, x22
bl _memcpy
add x8, x21, x22
str x8, [x19, #24]
LBB60_99:
ldr x8, [sp, #72] ; 8-byte Folded Reload
cmp w8, #1
b.lt LBB60_144
; %bb.100:
mov x22, #0
str wzr, [sp, #88] ; 4-byte Folded Spill
mov w21, #0
ldr w9, [sp, #64] ; 4-byte Folded Reload
sub w8, w9, #8
ror w8, w8, #3
stp x24, x8, [sp, #48] ; 16-byte Folded Spill
sub w8, w9, #1
lsr w8, w8, #3
add w8, w8, #1
str x8, [sp, #32] ; 8-byte Folded Spill
mov w8, #1
; implicit-def: $w28
; implicit-def: $w27
; implicit-def: $w23
; implicit-def: $w26
b LBB60_105
LBB60_101: ; in Loop: Header=BB60_105 Depth=1
lsl w8, w22, #2
add x8, x24, x8
strb w28, [x8]
strb w27, [x8, #1]
strb w23, [x8, #2]
mov w8, #3
bfi w8, w22, #2, #30
LBB60_102: ; in Loop: Header=BB60_105 Depth=1
mov x9, x26
LBB60_103: ; in Loop: Header=BB60_105 Depth=1
strb w9, [x24, x8]
LBB60_104: ; in Loop: Header=BB60_105 Depth=1
mov w8, #0
sub w21, w21, #1
add x22, x22, #1
ldr x9, [sp, #72] ; 8-byte Folded Reload
cmp x22, x9
b.eq LBB60_144
LBB60_105: ; =>This Loop Header: Depth=1
; Child Loop BB60_122 Depth 2
ldr w9, [sp, #84] ; 4-byte Folded Reload
cbz w9, LBB60_115
; %bb.106: ; in Loop: Header=BB60_105 Depth=1
cbz w21, LBB60_109
; %bb.107: ; in Loop: Header=BB60_105 Depth=1
ldr w9, [sp, #88] ; 4-byte Folded Reload
cmp w9, #0
cset w9, ne
str w9, [sp, #88] ; 4-byte Folded Spill
cset w9, eq
orr w8, w9, w8
tbnz w8, #0, LBB60_115
; %bb.108: ; in Loop: Header=BB60_105 Depth=1
mov w8, #1
str w8, [sp, #88] ; 4-byte Folded Spill
b LBB60_139
LBB60_109: ; in Loop: Header=BB60_105 Depth=1
ldr x0, [x25]
cbz x0, LBB60_111
; %bb.110: ; in Loop: Header=BB60_105 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB60_114
LBB60_111: ; in Loop: Header=BB60_105 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_113
; %bb.112: ; in Loop: Header=BB60_105 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB60_114
LBB60_113: ; in Loop: Header=BB60_105 Depth=1
mov w8, #0
LBB60_114: ; in Loop: Header=BB60_105 Depth=1
and w9, w8, #0x7f
add w21, w9, #1
ubfx w8, w8, #7, #1
str w8, [sp, #88] ; 4-byte Folded Spill
LBB60_115: ; in Loop: Header=BB60_105 Depth=1
ldr w8, [sp, #80] ; 4-byte Folded Reload
tst w8, #0xff
b.eq LBB60_118
; %bb.116: ; in Loop: Header=BB60_105 Depth=1
ldr x0, [x25]
cbz x0, LBB60_126
; %bb.117: ; in Loop: Header=BB60_105 Depth=1
mov x24, x27
mov x20, x26
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
ldr w9, [sp, #64] ; 4-byte Folded Reload
cbnz w9, LBB60_129
b LBB60_130
LBB60_118: ; in Loop: Header=BB60_105 Depth=1
mov x24, x27
mov x20, x26
ldr w8, [sp, #64] ; 4-byte Folded Reload
cbz w8, LBB60_130
; %bb.119: ; in Loop: Header=BB60_105 Depth=1
sub x26, x29, #84
ldr x27, [sp, #32] ; 8-byte Folded Reload
b LBB60_122
LBB60_120: ; in Loop: Header=BB60_122 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB60_121: ; in Loop: Header=BB60_122 Depth=2
strb w8, [x26], #1
subs x27, x27, #1
b.eq LBB60_130
LBB60_122: ; Parent Loop BB60_105 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x25]
cbnz x0, LBB60_120
; %bb.123: ; in Loop: Header=BB60_122 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_125
; %bb.124: ; in Loop: Header=BB60_122 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB60_121
LBB60_125: ; in Loop: Header=BB60_122 Depth=2
mov w8, #0
b LBB60_121
LBB60_126: ; in Loop: Header=BB60_105 Depth=1
mov x24, x27
mov x20, x26
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB60_128
; %bb.127: ; in Loop: Header=BB60_105 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
ldr w9, [sp, #64] ; 4-byte Folded Reload
cbnz w9, LBB60_129
b LBB60_130
LBB60_128: ; in Loop: Header=BB60_105 Depth=1
mov w8, #0
ldr w9, [sp, #64] ; 4-byte Folded Reload
cbz w9, LBB60_130
LBB60_129: ; in Loop: Header=BB60_105 Depth=1
and w8, w8, #0xff
ldr w9, [sp, #44] ; 4-byte Folded Reload
cmp w9, w8
csel w8, w8, wzr, gt
ldr w9, [sp, #12] ; 4-byte Folded Reload
mul w8, w8, w9
ldr x9, [sp, #16] ; 8-byte Folded Reload
add x1, x9, x8
sub x0, x29, #84
ldr x2, [sp, #32] ; 8-byte Folded Reload
bl _memcpy
LBB60_130: ; in Loop: Header=BB60_105 Depth=1
ldr x10, [sp, #56] ; 8-byte Folded Reload
cmp w10, #3
b.hi LBB60_133
; %bb.131: ; in Loop: Header=BB60_105 Depth=1
Lloh318:
adrp x11, lJTI60_0@PAGE
Lloh319:
add x11, x11, lJTI60_0@PAGEOFF
adr x8, LBB60_132
ldrb w9, [x11, x10]
add x8, x8, x9, lsl #2
br x8
LBB60_132: ; in Loop: Header=BB60_105 Depth=1
mov w26, #255
ldurb w23, [x29, #-84]
b LBB60_135
LBB60_133: ; in Loop: Header=BB60_105 Depth=1
mov x26, x20
mov x27, x24
b LBB60_138
LBB60_134: ; in Loop: Header=BB60_105 Depth=1
ldurb w23, [x29, #-84]
ldurb w26, [x29, #-83]
LBB60_135: ; in Loop: Header=BB60_105 Depth=1
mov x27, x23
mov x28, x23
b LBB60_138
LBB60_136: ; in Loop: Header=BB60_105 Depth=1
ldurb w28, [x29, #-82]
ldurb w27, [x29, #-83]
mov w26, #255
ldurb w23, [x29, #-84]
b LBB60_138
LBB60_137: ; in Loop: Header=BB60_105 Depth=1
ldurb w28, [x29, #-82]
ldurb w27, [x29, #-83]
ldurb w23, [x29, #-84]
ldurb w26, [x29, #-81]
LBB60_138: ; in Loop: Header=BB60_105 Depth=1
ldr x24, [sp, #48] ; 8-byte Folded Reload
LBB60_139: ; in Loop: Header=BB60_105 Depth=1
ldr w8, [sp, #68] ; 4-byte Folded Reload
sub w8, w8, #1
cmp w8, #3
b.hi LBB60_104
; %bb.140: ; in Loop: Header=BB60_105 Depth=1
Lloh320:
adrp x11, lJTI60_1@PAGE
Lloh321:
add x11, x11, lJTI60_1@PAGEOFF
adr x9, LBB60_101
ldrb w10, [x11, x8]
add x9, x9, x10, lsl #2
br x9
LBB60_141: ; in Loop: Header=BB60_105 Depth=1
and w8, w28, #0xff
and w9, w27, #0xff
and w10, w23, #0xff
mov w11, #29
mul w10, w10, w11
mov w11, #150
madd w9, w9, w11, w10
mov w10, #77
madd w8, w8, w10, w9
lsr w9, w8, #8
mov x8, x22
b LBB60_103
LBB60_142: ; in Loop: Header=BB60_105 Depth=1
and w8, w28, #0xff
and w9, w27, #0xff
and w10, w23, #0xff
mov w11, #29
mul w10, w10, w11
mov w11, #150
madd w9, w9, w11, w10
mov w10, #77
madd w8, w8, w10, w9
lsr w8, w8, #8
lsl x9, x22, #1
strb w8, [x24, x9]
mov w8, #1
bfi x8, x22, #1, #63
b LBB60_102
LBB60_143: ; in Loop: Header=BB60_105 Depth=1
add w8, w22, w22, lsl #1
add x9, x24, x8
strb w28, [x9]
strb w27, [x9, #1]
add w8, w8, #2
mov x9, x23
b LBB60_103
LBB60_144:
ldr w8, [sp, #8] ; 4-byte Folded Reload
tbnz w8, #5, LBB60_165
; %bb.145:
ldr w8, [sp, #28] ; 4-byte Folded Reload
subs w10, w8, #1
b.lt LBB60_165
; %bb.146:
mov w3, #0
mov w9, #0
ldr w12, [sp, #4] ; 4-byte Folded Reload
ldr w11, [sp, #68] ; 4-byte Folded Reload
mul w8, w11, w12
mul w11, w11, w10
mul w4, w11, w12
lsr w10, w10, #1
sub w11, w8, #1
add x12, x11, #1
add x13, x24, x12
and x14, x12, #0x1ffffffe0
sub w15, w8, w14
and x16, x12, #0x18
and x17, x12, #0x1fffffff8
sub w0, w8, w17
add x1, x24, #16
neg x2, x17
b LBB60_148
LBB60_147: ; in Loop: Header=BB60_148 Depth=1
add w5, w9, #1
sub w4, w4, w8
add w3, w3, w8
cmp w9, w10
mov x9, x5
b.eq LBB60_165
LBB60_148: ; =>This Loop Header: Depth=1
; Child Loop BB60_156 Depth 2
; Child Loop BB60_160 Depth 2
; Child Loop BB60_164 Depth 2
sxtw x3, w3
sxtw x4, w4
cmp w8, #1
b.lt LBB60_147
; %bb.149: ; in Loop: Header=BB60_148 Depth=1
cmp w11, #7
b.hs LBB60_151
; %bb.150: ; in Loop: Header=BB60_148 Depth=1
mov x5, x3
mov x6, x4
mov x7, x8
b LBB60_163
LBB60_151: ; in Loop: Header=BB60_148 Depth=1
add x5, x24, x3
add x6, x13, x4
cmp x5, x6
b.hs LBB60_153
; %bb.152: ; in Loop: Header=BB60_148 Depth=1
add x5, x13, x3
add x6, x24, x4
cmp x6, x5
mov x5, x3
mov x6, x4
mov x7, x8
b.lo LBB60_163
LBB60_153: ; in Loop: Header=BB60_148 Depth=1
cmp w11, #31
b.hs LBB60_155
; %bb.154: ; in Loop: Header=BB60_148 Depth=1
mov x20, #0
b LBB60_159
LBB60_155: ; in Loop: Header=BB60_148 Depth=1
add x5, x1, x3
add x6, x1, x4
mov x7, x14
LBB60_156: ; Parent Loop BB60_148 Depth=1
; => This Inner Loop Header: Depth=2
ldp q0, q1, [x5, #-16]
ldp q2, q3, [x6, #-16]
stp q2, q3, [x5, #-16]
stp q0, q1, [x6, #-16]
add x5, x5, #32
add x6, x6, #32
subs x7, x7, #32
b.ne LBB60_156
; %bb.157: ; in Loop: Header=BB60_148 Depth=1
cmp x12, x14
b.eq LBB60_147
; %bb.158: ; in Loop: Header=BB60_148 Depth=1
mov x20, x14
cbz x16, LBB60_162
LBB60_159: ; in Loop: Header=BB60_148 Depth=1
add x5, x17, x3
add x6, x17, x4
add x7, x2, x20
add x19, x20, x3
add x19, x24, x19
add x20, x20, x4
add x20, x24, x20
LBB60_160: ; Parent Loop BB60_148 Depth=1
; => This Inner Loop Header: Depth=2
ldr d0, [x19]
ldr d1, [x20]
str d1, [x19], #8
str d0, [x20], #8
adds x7, x7, #8
b.ne LBB60_160
; %bb.161: ; in Loop: Header=BB60_148 Depth=1
mov x7, x0
cmp x12, x17
b.eq LBB60_147
b LBB60_163
LBB60_162: ; in Loop: Header=BB60_148 Depth=1
add x6, x14, x4
add x5, x14, x3
mov x7, x15
LBB60_163: ; in Loop: Header=BB60_148 Depth=1
add x5, x24, x5
add x6, x24, x6
add w7, w7, #1
LBB60_164: ; Parent Loop BB60_148 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w19, [x5]
ldrb w20, [x6]
strb w20, [x5], #1
strb w19, [x6], #1
sub w7, w7, #1
cmp w7, #1
b.hi LBB60_164
b LBB60_147
LBB60_165:
ldr x0, [sp, #16] ; 8-byte Folded Reload
cbz x0, LBB60_167
; %bb.166:
bl _free
LBB60_167:
mov x0, x24
ldp x29, x30, [sp, #176] ; 16-byte Folded Reload
ldp x20, x19, [sp, #160] ; 16-byte Folded Reload
ldp x22, x21, [sp, #144] ; 16-byte Folded Reload
ldp x24, x23, [sp, #128] ; 16-byte Folded Reload
ldp x26, x25, [sp, #112] ; 16-byte Folded Reload
ldp x28, x27, [sp, #96] ; 16-byte Folded Reload
add sp, sp, #192
ret
LBB60_168:
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
ldr w28, [sp, #4] ; 4-byte Folded Reload
b LBB60_76
LBB60_169:
mov x2, x24
mov x1, x27
mov x3, x21
ldp w11, w4, [sp, #84] ; 8-byte Folded Reload
mov x21, x28
ldr x27, [sp, #72] ; 8-byte Folded Reload
ldr w28, [sp, #4] ; 4-byte Folded Reload
b LBB60_79
.loh AdrpAdd Lloh318, Lloh319
.loh AdrpAdd Lloh320, Lloh321
.cfi_endproc
.section __TEXT,__const
lJTI60_0:
.byte (LBB60_132-LBB60_132)>>2
.byte (LBB60_134-LBB60_132)>>2
.byte (LBB60_136-LBB60_132)>>2
.byte (LBB60_137-LBB60_132)>>2
lJTI60_1:
.byte (LBB60_141-LBB60_101)>>2
.byte (LBB60_142-LBB60_101)>>2
.byte (LBB60_143-LBB60_101)>>2
.byte (LBB60_101-LBB60_101)>>2
; -- End function
.section __TEXT,__text,regular,pure_instructions
.globl _stbi_psd_load ; -- Begin function stbi_psd_load
.p2align 2
_stbi_psd_load: ; @stbi_psd_load
.cfi_startproc
; %bb.0:
sub sp, sp, #112
.cfi_def_cfa_offset 112
stp x24, x23, [sp, #48] ; 16-byte Folded Spill
stp x22, x21, [sp, #64] ; 16-byte Folded Spill
stp x20, x19, [sp, #80] ; 16-byte Folded Spill
stp x29, x30, [sp, #96] ; 16-byte Folded Spill
add x29, sp, #96
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x4
mov x20, x3
mov x21, x2
mov x23, x1
Lloh322:
adrp x1, l_.str@PAGE
Lloh323:
add x1, x1, l_.str@PAGEOFF
bl _fopen
cbz x0, LBB61_2
; %bb.1:
mov x22, x0
str x0, [sp, #24]
add x0, sp, #8
mov x1, x23
mov x2, x21
mov x3, x20
mov x4, x19
bl _psd_load
mov x19, x0
mov x0, x22
bl _fclose
b LBB61_3
LBB61_2:
mov x19, #0
LBB61_3:
mov x0, x19
ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
ldp x20, x19, [sp, #80] ; 16-byte Folded Reload
ldp x22, x21, [sp, #64] ; 16-byte Folded Reload
ldp x24, x23, [sp, #48] ; 16-byte Folded Reload
add sp, sp, #112
ret
.loh AdrpAdd Lloh322, Lloh323
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function psd_load
_psd_load: ; @psd_load
.cfi_startproc
; %bb.0:
sub sp, sp, #160
.cfi_def_cfa_offset 160
stp x28, x27, [sp, #64] ; 16-byte Folded Spill
stp x26, x25, [sp, #80] ; 16-byte Folded Spill
stp x24, x23, [sp, #96] ; 16-byte Folded Spill
stp x22, x21, [sp, #112] ; 16-byte Folded Spill
stp x20, x19, [sp, #128] ; 16-byte Folded Spill
stp x29, x30, [sp, #144] ; 16-byte Folded Spill
add x29, sp, #144
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x22, x4
mov x21, x3
mov x24, x2
mov x20, x1
mov x23, x0
bl _get32
mov w8, #20563
movk w8, #14402, lsl #16
cmp w0, w8
b.ne LBB62_4
; %bb.1:
ldr x0, [x23, #16]
cbz x0, LBB62_5
; %bb.2:
bl _fgetc
cmn w0, #1
csel w19, wzr, w0, eq
ldr x0, [x23, #16]
cbz x0, LBB62_7
; %bb.3:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB62_11
LBB62_4:
mov x26, #0
Lloh324:
adrp x8, l_.str.83@PAGE
Lloh325:
add x8, x8, l_.str.83@PAGEOFF
b LBB62_74
LBB62_5:
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_9
; %bb.6:
add x10, x8, #1
str x10, [x23, #24]
ldrb w19, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB62_8
b LBB62_10
LBB62_7:
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_10
LBB62_8:
add x9, x8, #1
str x9, [x23, #24]
ldrb w8, [x8]
b LBB62_11
LBB62_9:
mov w19, #0
cmp x8, x9
b.lo LBB62_8
LBB62_10:
mov w8, #0
LBB62_11:
add w8, w8, w19, lsl #8
cmp w8, #1
b.ne LBB62_16
; %bb.12:
ldr x0, [x23, #16]
cbz x0, LBB62_17
; %bb.13:
mov w1, #6
mov w2, #1
bl _fseek
ldr x0, [x23, #16]
cbz x0, LBB62_18
; %bb.14:
bl _fgetc
cmn w0, #1
csel w19, wzr, w0, eq
ldr x0, [x23, #16]
cbz x0, LBB62_22
; %bb.15:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB62_24
LBB62_16:
mov x26, #0
Lloh326:
adrp x8, l_.str.84@PAGE
Lloh327:
add x8, x8, l_.str.84@PAGEOFF
b LBB62_74
LBB62_17:
ldr x8, [x23, #24]
add x8, x8, #6
str x8, [x23, #24]
LBB62_18:
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_20
; %bb.19:
add x10, x8, #1
str x10, [x23, #24]
ldrb w19, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB62_21
b LBB62_23
LBB62_20:
mov w19, #0
cmp x8, x9
b.hs LBB62_23
LBB62_21:
add x9, x8, #1
str x9, [x23, #24]
ldrb w8, [x8]
b LBB62_24
LBB62_22:
ldp x8, x9, [x23, #24]
cmp x8, x9
b.lo LBB62_21
LBB62_23:
mov w8, #0
LBB62_24:
add w27, w8, w19, lsl #8
cmp w27, #17
b.lo LBB62_26
; %bb.25:
mov x26, #0
Lloh328:
adrp x8, l_.str.85@PAGE
Lloh329:
add x8, x8, l_.str.85@PAGEOFF
b LBB62_74
LBB62_26:
mov x0, x23
bl _get32
mov x28, x0
mov x0, x23
bl _get32
mov x25, x0
ldr x0, [x23, #16]
cbz x0, LBB62_29
; %bb.27:
bl _fgetc
cmn w0, #1
csel w19, wzr, w0, eq
ldr x0, [x23, #16]
cbz x0, LBB62_31
; %bb.28:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB62_35
LBB62_29:
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_33
; %bb.30:
add x10, x8, #1
str x10, [x23, #24]
ldrb w19, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB62_32
b LBB62_34
LBB62_31:
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_34
LBB62_32:
add x9, x8, #1
str x9, [x23, #24]
ldrb w8, [x8]
b LBB62_35
LBB62_33:
mov w19, #0
cmp x8, x9
b.lo LBB62_32
LBB62_34:
mov w8, #0
LBB62_35:
add w8, w8, w19, lsl #8
cmp w8, #8
b.ne LBB62_39
; %bb.36:
ldr x0, [x23, #16]
cbz x0, LBB62_40
; %bb.37:
bl _fgetc
cmn w0, #1
csel w19, wzr, w0, eq
ldr x0, [x23, #16]
cbz x0, LBB62_42
; %bb.38:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB62_47
LBB62_39:
mov x26, #0
Lloh330:
adrp x8, l_.str.86@PAGE
Lloh331:
add x8, x8, l_.str.86@PAGEOFF
b LBB62_74
LBB62_40:
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_43
; %bb.41:
add x10, x8, #1
str x10, [x23, #24]
ldrb w19, [x8]
mov x8, x10
b LBB62_44
LBB62_42:
ldp x8, x9, [x23, #24]
b LBB62_44
LBB62_43:
mov w19, #0
LBB62_44:
cmp x8, x9
b.hs LBB62_46
; %bb.45:
add x9, x8, #1
str x9, [x23, #24]
ldrb w8, [x8]
b LBB62_47
LBB62_46:
mov w8, #0
LBB62_47:
add w8, w8, w19, lsl #8
cmp w8, #3
b.ne LBB62_50
; %bb.48:
mov x0, x23
bl _get32
; kill: def $w0 killed $w0 def $x0
ldr x8, [x23, #16]
cbz x8, LBB62_51
; %bb.49:
sxtw x1, w0
mov x0, x8
mov w2, #1
bl _fseek
b LBB62_52
LBB62_50:
mov x26, #0
Lloh332:
adrp x8, l_.str.87@PAGE
Lloh333:
add x8, x8, l_.str.87@PAGEOFF
b LBB62_74
LBB62_51:
ldr x8, [x23, #24]
add x8, x8, w0, sxtw
str x8, [x23, #24]
LBB62_52:
mov x0, x23
bl _get32
; kill: def $w0 killed $w0 def $x0
ldr x8, [x23, #16]
cbz x8, LBB62_54
; %bb.53:
sxtw x1, w0
mov x0, x8
mov w2, #1
bl _fseek
b LBB62_55
LBB62_54:
ldr x8, [x23, #24]
add x8, x8, w0, sxtw
str x8, [x23, #24]
LBB62_55:
mov x0, x23
bl _get32
; kill: def $w0 killed $w0 def $x0
ldr x8, [x23, #16]
cbz x8, LBB62_59
; %bb.56:
sxtw x1, w0
mov x0, x8
mov w2, #1
bl _fseek
ldr x0, [x23, #16]
cbz x0, LBB62_60
; %bb.57:
bl _fgetc
cmn w0, #1
csel w19, wzr, w0, eq
ldr x0, [x23, #16]
cbz x0, LBB62_63
; %bb.58:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB62_67
LBB62_59:
ldr x8, [x23, #24]
add x8, x8, w0, sxtw
str x8, [x23, #24]
LBB62_60:
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_62
; %bb.61:
add x10, x8, #1
str x10, [x23, #24]
ldrb w19, [x8]
mov x8, x10
b LBB62_64
LBB62_62:
mov w19, #0
b LBB62_64
LBB62_63:
ldp x8, x9, [x23, #24]
LBB62_64:
cmp x8, x9
b.hs LBB62_66
; %bb.65:
add x9, x8, #1
str x9, [x23, #24]
ldrb w8, [x8]
b LBB62_67
LBB62_66:
mov w8, #0
LBB62_67:
add w19, w8, w19, lsl #8
cmp w19, #2
b.lt LBB62_69
; %bb.68:
mov x26, #0
Lloh334:
adrp x8, l_.str.36@PAGE
Lloh335:
add x8, x8, l_.str.36@PAGEOFF
b LBB62_74
LBB62_69:
mul w8, w28, w25
lsl w8, w8, #2
sxtw x0, w8
bl _malloc
mov x26, x0
cbz x0, LBB62_73
; %bb.70:
stp w25, w28, [sp] ; 8-byte Folded Spill
mul w25, w25, w28
stp x24, x20, [sp, #8] ; 16-byte Folded Spill
str x27, [sp, #56] ; 8-byte Folded Spill
cbz w19, LBB62_76
; %bb.71:
mul w8, w27, w28
lsl w8, w8, #1
ldr x0, [x23, #16]
cbz x0, LBB62_84
; %bb.72:
sxtw x1, w8
mov w2, #1
bl _fseek
b LBB62_85
LBB62_73:
Lloh336:
adrp x8, l_.str.5@PAGE
Lloh337:
add x8, x8, l_.str.5@PAGEOFF
LBB62_74:
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB62_75:
mov x0, x26
ldp x29, x30, [sp, #144] ; 16-byte Folded Reload
ldp x20, x19, [sp, #128] ; 16-byte Folded Reload
ldp x22, x21, [sp, #112] ; 16-byte Folded Reload
ldp x24, x23, [sp, #96] ; 16-byte Folded Reload
ldp x26, x25, [sp, #80] ; 16-byte Folded Reload
ldp x28, x27, [sp, #64] ; 16-byte Folded Reload
add sp, sp, #160
ret
LBB62_76:
cmp w25, #1
b.lt LBB62_136
; %bb.77:
mov x19, x25
mov x20, x26
b LBB62_80
LBB62_78: ; in Loop: Header=BB62_80 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB62_79: ; in Loop: Header=BB62_80 Depth=1
strb w8, [x20], #4
subs w19, w19, #1
b.eq LBB62_127
LBB62_80: ; =>This Inner Loop Header: Depth=1
ldr x0, [x23, #16]
cbnz x0, LBB62_78
; %bb.81: ; in Loop: Header=BB62_80 Depth=1
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_83
; %bb.82: ; in Loop: Header=BB62_80 Depth=1
add x9, x8, #1
str x9, [x23, #24]
ldrb w8, [x8]
b LBB62_79
LBB62_83: ; in Loop: Header=BB62_80 Depth=1
mov w8, #0
b LBB62_79
LBB62_84:
ldr x9, [x23, #24]
add x8, x9, w8, sxtw
str x8, [x23, #24]
LBB62_85:
mov x24, #0
sub w8, w25, #1
str x8, [sp, #48] ; 8-byte Folded Spill
add x8, x8, #1
str x8, [sp, #32] ; 8-byte Folded Spill
and x8, x8, #0x1fffffffc
str x8, [sp, #40] ; 8-byte Folded Spill
lsl x8, x8, #2
str x8, [sp, #24] ; 8-byte Folded Spill
add x28, x26, #8
b LBB62_87
LBB62_86: ; in Loop: Header=BB62_87 Depth=1
add x24, x24, #1
add x28, x28, #1
cmp x24, #4
b.eq LBB62_178
LBB62_87: ; =>This Loop Header: Depth=1
; Child Loop BB62_123 Depth 2
; Child Loop BB62_126 Depth 2
; Child Loop BB62_95 Depth 2
; Child Loop BB62_118 Depth 3
; Child Loop BB62_121 Depth 3
; Child Loop BB62_105 Depth 3
add x19, x26, x24
ldr x8, [sp, #56] ; 8-byte Folded Reload
cmp x24, x8
b.hs LBB62_90
; %bb.88: ; in Loop: Header=BB62_87 Depth=1
cmp w25, #1
b.lt LBB62_86
; %bb.89: ; in Loop: Header=BB62_87 Depth=1
mov w20, #0
b LBB62_95
LBB62_90: ; in Loop: Header=BB62_87 Depth=1
cmp w25, #1
b.lt LBB62_86
; %bb.91: ; in Loop: Header=BB62_87 Depth=1
cmp x24, #3
csetm w8, eq
ldr x9, [sp, #48] ; 8-byte Folded Reload
cmp w9, #3
b.hs LBB62_122
; %bb.92: ; in Loop: Header=BB62_87 Depth=1
mov w9, #0
b LBB62_125
LBB62_93: ; in Loop: Header=BB62_95 Depth=2
mov x19, x10
LBB62_94: ; in Loop: Header=BB62_95 Depth=2
cmp w20, w25
b.ge LBB62_86
LBB62_95: ; Parent Loop BB62_87 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB62_118 Depth 3
; Child Loop BB62_121 Depth 3
; Child Loop BB62_105 Depth 3
ldr x0, [x23, #16]
cbz x0, LBB62_98
; %bb.96: ; in Loop: Header=BB62_95 Depth=2
bl _fgetc
mov x27, x0
cmn w0, #1
b.eq LBB62_102
; %bb.97: ; in Loop: Header=BB62_95 Depth=2
cmp w27, #128
b.eq LBB62_94
b LBB62_100
LBB62_98: ; in Loop: Header=BB62_95 Depth=2
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_102
; %bb.99: ; in Loop: Header=BB62_95 Depth=2
add x9, x8, #1
str x9, [x23, #24]
ldrb w27, [x8]
cmp w27, #128
b.eq LBB62_94
LBB62_100: ; in Loop: Header=BB62_95 Depth=2
b.ge LBB62_109
; %bb.101: ; in Loop: Header=BB62_95 Depth=2
adds w27, w27, #1
add w20, w27, w20
b.lo LBB62_105
b LBB62_94
LBB62_102: ; in Loop: Header=BB62_95 Depth=2
add w20, w20, #1
mov w27, #1
b LBB62_105
LBB62_103: ; in Loop: Header=BB62_105 Depth=3
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB62_104: ; in Loop: Header=BB62_105 Depth=3
strb w8, [x19], #4
subs w27, w27, #1
b.eq LBB62_94
LBB62_105: ; Parent Loop BB62_87 Depth=1
; Parent Loop BB62_95 Depth=2
; => This Inner Loop Header: Depth=3
ldr x0, [x23, #16]
cbnz x0, LBB62_103
; %bb.106: ; in Loop: Header=BB62_105 Depth=3
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_108
; %bb.107: ; in Loop: Header=BB62_105 Depth=3
add x9, x8, #1
str x9, [x23, #24]
ldrb w8, [x8]
b LBB62_104
LBB62_108: ; in Loop: Header=BB62_105 Depth=3
mov w8, #0
b LBB62_104
LBB62_109: ; in Loop: Header=BB62_95 Depth=2
ldr x0, [x23, #16]
cbz x0, LBB62_111
; %bb.110: ; in Loop: Header=BB62_95 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB62_114
LBB62_111: ; in Loop: Header=BB62_95 Depth=2
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_113
; %bb.112: ; in Loop: Header=BB62_95 Depth=2
add x9, x8, #1
str x9, [x23, #24]
ldrb w8, [x8]
b LBB62_114
LBB62_113: ; in Loop: Header=BB62_95 Depth=2
mov w8, #0
LBB62_114: ; in Loop: Header=BB62_95 Depth=2
eor w10, w27, #0xff
add w9, w10, #2
add w20, w9, w20
cbz w9, LBB62_94
; %bb.115: ; in Loop: Header=BB62_95 Depth=2
add w10, w10, #1
cmp w10, #3
b.hs LBB62_117
; %bb.116: ; in Loop: Header=BB62_95 Depth=2
mov x10, x19
b LBB62_120
LBB62_117: ; in Loop: Header=BB62_95 Depth=2
add x11, x10, #1
and x12, x11, #0x1fffffffc
add x10, x19, x12, lsl #2
sub w9, w9, w12
add x13, x19, #8
mov x14, x12
LBB62_118: ; Parent Loop BB62_87 Depth=1
; Parent Loop BB62_95 Depth=2
; => This Inner Loop Header: Depth=3
sturb w8, [x13, #-8]
sturb w8, [x13, #-4]
strb w8, [x13]
strb w8, [x13, #4]
add x13, x13, #16
subs x14, x14, #4
b.ne LBB62_118
; %bb.119: ; in Loop: Header=BB62_95 Depth=2
cmp x11, x12
b.eq LBB62_93
LBB62_120: ; in Loop: Header=BB62_95 Depth=2
mov x19, x10
LBB62_121: ; Parent Loop BB62_87 Depth=1
; Parent Loop BB62_95 Depth=2
; => This Inner Loop Header: Depth=3
strb w8, [x19], #4
subs w9, w9, #1
b.ne LBB62_121
b LBB62_94
LBB62_122: ; in Loop: Header=BB62_87 Depth=1
ldr x9, [sp, #24] ; 8-byte Folded Reload
add x19, x19, x9
ldr x9, [sp, #40] ; 8-byte Folded Reload
mov x10, x28
LBB62_123: ; Parent Loop BB62_87 Depth=1
; => This Inner Loop Header: Depth=2
sturb w8, [x10, #-8]
sturb w8, [x10, #-4]
strb w8, [x10]
strb w8, [x10, #4]
add x10, x10, #16
subs x9, x9, #4
b.ne LBB62_123
; %bb.124: ; in Loop: Header=BB62_87 Depth=1
ldp x10, x11, [sp, #32] ; 16-byte Folded Reload
mov x9, x11
cmp x10, x11
b.eq LBB62_86
LBB62_125: ; in Loop: Header=BB62_87 Depth=1
sub w9, w25, w9
LBB62_126: ; Parent Loop BB62_87 Depth=1
; => This Inner Loop Header: Depth=2
strb w8, [x19], #4
subs w9, w9, #1
b.ne LBB62_126
b LBB62_86
LBB62_127:
add x19, x26, #1
cmp w27, #1
b.lt LBB62_137
; %bb.128:
cmp w25, #1
b.lt LBB62_145
; %bb.129:
mov x20, x25
b LBB62_132
LBB62_130: ; in Loop: Header=BB62_132 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB62_131: ; in Loop: Header=BB62_132 Depth=1
strb w8, [x19], #4
subs w20, w20, #1
b.eq LBB62_145
LBB62_132: ; =>This Inner Loop Header: Depth=1
ldr x0, [x23, #16]
cbnz x0, LBB62_130
; %bb.133: ; in Loop: Header=BB62_132 Depth=1
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_135
; %bb.134: ; in Loop: Header=BB62_132 Depth=1
add x9, x8, #1
str x9, [x23, #24]
ldrb w8, [x8]
b LBB62_131
LBB62_135: ; in Loop: Header=BB62_132 Depth=1
mov w8, #0
b LBB62_131
LBB62_136:
cbnz w27, LBB62_145
b LBB62_178
LBB62_137:
subs w8, w25, #1
b.lt LBB62_178
; %bb.138:
cmp w8, #3
b.hs LBB62_140
; %bb.139:
mov w8, #0
b LBB62_143
LBB62_140:
add x9, x8, #1
and x8, x9, #0x1fffffffc
add x19, x19, x8, lsl #2
add x10, x26, #9
mov x11, x8
LBB62_141: ; =>This Inner Loop Header: Depth=1
sturb wzr, [x10, #-8]
sturb wzr, [x10, #-4]
strb wzr, [x10]
strb wzr, [x10, #4]
add x10, x10, #16
subs x11, x11, #4
b.ne LBB62_141
; %bb.142:
cmp x9, x8
b.eq LBB62_145
LBB62_143:
sub w8, w25, w8
LBB62_144: ; =>This Inner Loop Header: Depth=1
strb wzr, [x19], #4
subs w8, w8, #1
b.ne LBB62_144
LBB62_145:
add x19, x26, #2
ldr x8, [sp, #56] ; 8-byte Folded Reload
cmp w8, #1
b.gt LBB62_149
; %bb.146:
subs w8, w25, #1
b.lt LBB62_178
; %bb.147:
cmp w8, #3
b.hs LBB62_157
; %bb.148:
mov w8, #0
ldr x24, [sp, #56] ; 8-byte Folded Reload
b LBB62_160
LBB62_149:
cmp w25, #1
b.lt LBB62_178
; %bb.150:
mov x20, x25
ldr x24, [sp, #56] ; 8-byte Folded Reload
b LBB62_153
LBB62_151: ; in Loop: Header=BB62_153 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB62_152: ; in Loop: Header=BB62_153 Depth=1
strb w8, [x19], #4
subs w20, w20, #1
b.eq LBB62_162
LBB62_153: ; =>This Inner Loop Header: Depth=1
ldr x0, [x23, #16]
cbnz x0, LBB62_151
; %bb.154: ; in Loop: Header=BB62_153 Depth=1
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_156
; %bb.155: ; in Loop: Header=BB62_153 Depth=1
add x9, x8, #1
str x9, [x23, #24]
ldrb w8, [x8]
b LBB62_152
LBB62_156: ; in Loop: Header=BB62_153 Depth=1
mov w8, #0
b LBB62_152
LBB62_157:
add x9, x8, #1
and x8, x9, #0x1fffffffc
add x19, x19, x8, lsl #2
add x10, x26, #10
mov x11, x8
ldr x24, [sp, #56] ; 8-byte Folded Reload
LBB62_158: ; =>This Inner Loop Header: Depth=1
sturb wzr, [x10, #-8]
sturb wzr, [x10, #-4]
strb wzr, [x10]
strb wzr, [x10, #4]
add x10, x10, #16
subs x11, x11, #4
b.ne LBB62_158
; %bb.159:
cmp x9, x8
b.eq LBB62_162
LBB62_160:
sub w8, w25, w8
LBB62_161: ; =>This Inner Loop Header: Depth=1
strb wzr, [x19], #4
subs w8, w8, #1
b.ne LBB62_161
LBB62_162:
add x19, x26, #3
cmp w24, #3
b.ge LBB62_166
; %bb.163:
subs w8, w25, #1
b.lt LBB62_178
; %bb.164:
cmp w8, #3
b.hs LBB62_173
; %bb.165:
mov w8, #0
b LBB62_176
LBB62_166:
cmp w25, #1
b.ge LBB62_169
b LBB62_178
LBB62_167: ; in Loop: Header=BB62_169 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB62_168: ; in Loop: Header=BB62_169 Depth=1
strb w8, [x19], #4
subs w25, w25, #1
b.eq LBB62_178
LBB62_169: ; =>This Inner Loop Header: Depth=1
ldr x0, [x23, #16]
cbnz x0, LBB62_167
; %bb.170: ; in Loop: Header=BB62_169 Depth=1
ldp x8, x9, [x23, #24]
cmp x8, x9
b.hs LBB62_172
; %bb.171: ; in Loop: Header=BB62_169 Depth=1
add x9, x8, #1
str x9, [x23, #24]
ldrb w8, [x8]
b LBB62_168
LBB62_172: ; in Loop: Header=BB62_169 Depth=1
mov w8, #0
b LBB62_168
LBB62_173:
add x9, x8, #1
and x8, x9, #0x1fffffffc
add x19, x19, x8, lsl #2
add x10, x26, #15
mov w11, #255
mov x12, x8
LBB62_174: ; =>This Inner Loop Header: Depth=1
sturb w11, [x10, #-12]
sturb w11, [x10, #-8]
sturb w11, [x10, #-4]
strb w11, [x10], #16
subs x12, x12, #4
b.ne LBB62_174
; %bb.175:
cmp x9, x8
b.eq LBB62_178
LBB62_176:
sub w8, w25, w8
mov w9, #255
LBB62_177: ; =>This Inner Loop Header: Depth=1
strb w9, [x19], #4
subs w8, w8, #1
b.ne LBB62_177
LBB62_178:
tst w22, #0xfffffffb
b.eq LBB62_180
; %bb.179:
mov x0, x26
mov w1, #4
mov x2, x22
ldp w20, w19, [sp] ; 8-byte Folded Reload
mov x3, x20
mov x4, x19
bl _convert_format
mov x26, x0
ldp x9, x8, [sp, #8] ; 16-byte Folded Reload
ldr x10, [sp, #56] ; 8-byte Folded Reload
cbnz x0, LBB62_181
b LBB62_75
LBB62_180:
ldp x9, x8, [sp, #8] ; 16-byte Folded Reload
ldr x10, [sp, #56] ; 8-byte Folded Reload
ldp w20, w19, [sp] ; 8-byte Folded Reload
LBB62_181:
cbz x21, LBB62_183
; %bb.182:
str w10, [x21]
LBB62_183:
str w19, [x9]
str w20, [x8]
b LBB62_75
.loh AdrpAdd Lloh324, Lloh325
.loh AdrpAdd Lloh326, Lloh327
.loh AdrpAdd Lloh328, Lloh329
.loh AdrpAdd Lloh330, Lloh331
.loh AdrpAdd Lloh332, Lloh333
.loh AdrpAdd Lloh334, Lloh335
.loh AdrpAdd Lloh336, Lloh337
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function hdr_load
_hdr_load: ; @hdr_load
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
sub sp, sp, #1120
mov x20, x4
mov x23, x3
mov x24, x2
mov x25, x1
mov x19, x0
Lloh338:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh339:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh340:
ldr x8, [x8]
stur x8, [x29, #-96]
add x1, sp, #80
bl _hdr_gettoken
ldr x8, [sp, #80]
mov x9, #16163
movk x9, #16722, lsl #16
movk x9, #18756, lsl #32
movk x9, #20033, lsl #48
eor x8, x8, x9
ldur x9, [sp, #83]
mov x10, #17473
movk x10, #16713, lsl #16
movk x10, #17230, lsl #32
movk x10, #69, lsl #48
eor x9, x9, x10
orr x8, x8, x9
cbz x8, LBB63_2
; %bb.1:
mov x20, #0
Lloh341:
adrp x8, l_.str.90@PAGE
Lloh342:
add x8, x8, l_.str.90@PAGEOFF
b LBB63_13
LBB63_2:
add x21, sp, #80
add x1, sp, #80
mov x0, x19
bl _hdr_gettoken
ldrb w8, [sp, #80]
cbz w8, LBB63_11
; %bb.3:
mov w21, #0
mov x22, #20294
movk x22, #19794, lsl #16
movk x22, #21569, lsl #32
movk x22, #13117, lsl #48
mov x26, #11570
movk x26, #26978, lsl #16
movk x26, #24436, lsl #32
movk x26, #27762, lsl #48
mov x27, #25964
movk x27, #29279, lsl #16
movk x27, #25191, lsl #32
movk x27, #101, lsl #48
LBB63_4: ; =>This Inner Loop Header: Depth=1
ldp x8, x9, [sp, #80]
eor x8, x8, x22
eor x9, x9, x26
ldur x10, [sp, #95]
eor x10, x10, x27
orr x8, x8, x9
orr x8, x8, x10
cmp x8, #0
csinc w21, w21, wzr, ne
add x1, sp, #80
mov x0, x19
bl _hdr_gettoken
ldrb w8, [sp, #80]
cbnz w8, LBB63_4
; %bb.5:
cbz w21, LBB63_12
; %bb.6:
add x21, sp, #80
add x1, sp, #80
mov x0, x19
bl _hdr_gettoken
ldrh w8, [sp, #80]
mov w9, #22829
eor w8, w8, w9
ldrb w9, [sp, #82]
eor w9, w9, #0x20
orr w8, w8, w9
cbnz w8, LBB63_10
; %bb.7:
add x0, x21, #3
str x0, [sp, #72]
add x1, sp, #72
mov w2, #10
bl _strtol
mov x26, x0
ldr x8, [sp, #72]
sub x22, x8, #1
LBB63_8: ; =>This Inner Loop Header: Depth=1
ldrb w8, [x22, #1]!
cmp w8, #32
b.eq LBB63_8
; %bb.9:
Lloh343:
adrp x1, l_.str.95@PAGE
Lloh344:
add x1, x1, l_.str.95@PAGEOFF
mov w21, #3
mov x0, x22
mov w2, #3
bl _strncmp
cbz w0, LBB63_16
LBB63_10:
mov x20, #0
Lloh345:
adrp x8, l_.str.94@PAGE
Lloh346:
add x8, x8, l_.str.94@PAGEOFF
b LBB63_13
LBB63_11:
str x21, [sp, #72]
LBB63_12:
mov x20, #0
Lloh347:
adrp x8, l_.str.92@PAGE
Lloh348:
add x8, x8, l_.str.92@PAGEOFF
LBB63_13:
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB63_14:
ldur x8, [x29, #-96]
Lloh349:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh350:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh351:
ldr x9, [x9]
cmp x9, x8
b.ne LBB63_81
; %bb.15:
mov x0, x20
add sp, sp, #1120
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
LBB63_16:
add x0, x22, #3
str x0, [sp, #72]
mov x1, #0
mov w2, #10
bl _strtol
mov x22, x0
str w22, [x25]
str w26, [x24]
str w21, [x23]
cmp w20, #0
csel w23, w21, w20, eq
mul w27, w23, w22
mul w8, w27, w26
sbfiz x0, x8, #2, #32
bl _malloc
mov x20, x0
sub w8, w22, #8, lsl #12 ; =32768
mov w9, #-32760
cmp w8, w9
b.hs LBB63_18
; %bb.17:
mov w21, #0
b LBB63_82
LBB63_18:
cmp w26, #1
b.lt LBB63_77
; %bb.19:
mov x9, #0
mov x24, #0
lsl w10, w22, #2
and x11, x26, #0xffffffff
and x8, x22, #0xffffffff
stp x8, x11, [sp, #32] ; 16-byte Folded Spill
smull x8, w22, w23
lsl x8, x8, #2
stp x10, x8, [sp, #16] ; 16-byte Folded Spill
sbfiz x21, x23, #2, #32
str x20, [sp, #56] ; 8-byte Folded Spill
str x26, [sp, #8] ; 8-byte Folded Spill
str w27, [sp, #4] ; 4-byte Folded Spill
LBB63_20: ; =>This Loop Header: Depth=1
; Child Loop BB63_44 Depth 2
; Child Loop BB63_47 Depth 3
; Child Loop BB63_58 Depth 4
; Child Loop BB63_70 Depth 4
; Child Loop BB63_73 Depth 4
; Child Loop BB63_75 Depth 2
str x9, [sp, #48] ; 8-byte Folded Spill
ldr x0, [x19, #16]
cbz x0, LBB63_24
; %bb.21: ; in Loop: Header=BB63_20 Depth=1
bl _fgetc
cmn w0, #1
csel w25, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB63_27
; %bb.22: ; in Loop: Header=BB63_20 Depth=1
bl _fgetc
cmn w0, #1
csel w26, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB63_30
; %bb.23: ; in Loop: Header=BB63_20 Depth=1
bl _fgetc
cmn w0, #1
csel w27, wzr, w0, eq
b LBB63_33
LBB63_24: ; in Loop: Header=BB63_20 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB63_26
; %bb.25: ; in Loop: Header=BB63_20 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w25, [x8]
b LBB63_27
LBB63_26: ; in Loop: Header=BB63_20 Depth=1
mov w25, #0
LBB63_27: ; in Loop: Header=BB63_20 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB63_29
; %bb.28: ; in Loop: Header=BB63_20 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w26, [x8]
b LBB63_30
LBB63_29: ; in Loop: Header=BB63_20 Depth=1
mov w26, #0
LBB63_30: ; in Loop: Header=BB63_20 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB63_32
; %bb.31: ; in Loop: Header=BB63_20 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w27, [x8]
b LBB63_33
LBB63_32: ; in Loop: Header=BB63_20 Depth=1
mov w27, #0
LBB63_33: ; in Loop: Header=BB63_20 Depth=1
and w8, w27, #0x80
cmp w25, #2
ccmp w26, #2, #0, eq
ccmp w8, #0, #0, eq
b.ne LBB63_79
; %bb.34: ; in Loop: Header=BB63_20 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB63_36
; %bb.35: ; in Loop: Header=BB63_20 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB63_39
LBB63_36: ; in Loop: Header=BB63_20 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB63_38
; %bb.37: ; in Loop: Header=BB63_20 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB63_39
LBB63_38: ; in Loop: Header=BB63_20 Depth=1
mov w8, #0
LBB63_39: ; in Loop: Header=BB63_20 Depth=1
lsl w9, w27, #8
orr w8, w8, w9
cmp w8, w22
b.ne LBB63_80
; %bb.40: ; in Loop: Header=BB63_20 Depth=1
cbnz x24, LBB63_42
; %bb.41: ; in Loop: Header=BB63_20 Depth=1
ldr x0, [sp, #16] ; 8-byte Folded Reload
bl _malloc
mov x24, x0
LBB63_42: ; in Loop: Header=BB63_20 Depth=1
mov w26, #0
b LBB63_44
LBB63_43: ; in Loop: Header=BB63_44 Depth=2
add w26, w26, #1
cmp w26, #4
b.eq LBB63_74
LBB63_44: ; Parent Loop BB63_20 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB63_47 Depth 3
; Child Loop BB63_58 Depth 4
; Child Loop BB63_70 Depth 4
; Child Loop BB63_73 Depth 4
mov w27, #0
b LBB63_47
LBB63_45: ; in Loop: Header=BB63_47 Depth=3
add w27, w27, w9
LBB63_46: ; in Loop: Header=BB63_47 Depth=3
cmp w27, w22
b.ge LBB63_43
LBB63_47: ; Parent Loop BB63_20 Depth=1
; Parent Loop BB63_44 Depth=2
; => This Loop Header: Depth=3
; Child Loop BB63_58 Depth 4
; Child Loop BB63_70 Depth 4
; Child Loop BB63_73 Depth 4
ldr x0, [x19, #16]
cbz x0, LBB63_49
; %bb.48: ; in Loop: Header=BB63_47 Depth=3
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB63_51
LBB63_49: ; in Loop: Header=BB63_47 Depth=3
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB63_46
; %bb.50: ; in Loop: Header=BB63_47 Depth=3
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB63_51: ; in Loop: Header=BB63_47 Depth=3
and w25, w8, #0xff
cmp w25, #128
b.ls LBB63_54
; %bb.52: ; in Loop: Header=BB63_47 Depth=3
ldr x0, [x19, #16]
cbz x0, LBB63_62
; %bb.53: ; in Loop: Header=BB63_47 Depth=3
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
eor w9, w25, #0x80
cbnz w9, LBB63_65
b LBB63_46
LBB63_54: ; in Loop: Header=BB63_47 Depth=3
cbz w25, LBB63_46
; %bb.55: ; in Loop: Header=BB63_47 Depth=3
add w28, w26, w27, lsl #2
b LBB63_58
LBB63_56: ; in Loop: Header=BB63_58 Depth=4
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB63_57: ; in Loop: Header=BB63_58 Depth=4
strb w8, [x24, w28, sxtw]
add w28, w28, #4
add w27, w27, #1
subs w25, w25, #1
b.eq LBB63_46
LBB63_58: ; Parent Loop BB63_20 Depth=1
; Parent Loop BB63_44 Depth=2
; Parent Loop BB63_47 Depth=3
; => This Inner Loop Header: Depth=4
ldr x0, [x19, #16]
cbnz x0, LBB63_56
; %bb.59: ; in Loop: Header=BB63_58 Depth=4
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB63_61
; %bb.60: ; in Loop: Header=BB63_58 Depth=4
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB63_57
LBB63_61: ; in Loop: Header=BB63_58 Depth=4
mov w8, #0
b LBB63_57
LBB63_62: ; in Loop: Header=BB63_47 Depth=3
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB63_64
; %bb.63: ; in Loop: Header=BB63_47 Depth=3
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
eor w9, w25, #0x80
cbnz w9, LBB63_65
b LBB63_46
LBB63_64: ; in Loop: Header=BB63_47 Depth=3
mov w8, #0
eor w9, w25, #0x80
cbz w9, LBB63_46
LBB63_65: ; in Loop: Header=BB63_47 Depth=3
mov w10, w27
sub w12, w9, #1
cmp w12, #3
b.hs LBB63_67
; %bb.66: ; in Loop: Header=BB63_47 Depth=3
mov w11, #0
b LBB63_72
LBB63_67: ; in Loop: Header=BB63_47 Depth=3
mov w11, #0
lsl w13, w27, #2
lsl x15, x12, #2
tst x15, #0xffffffff00000000
cset w14, ne
add w16, w26, w13
add w15, w16, w15
cmp w15, w16
b.lt LBB63_72
; %bb.68: ; in Loop: Header=BB63_47 Depth=3
tbnz w14, #0, LBB63_72
; %bb.69: ; in Loop: Header=BB63_47 Depth=3
add x12, x12, #1
and x11, x12, #0x1fffffffc
add x10, x11, x10
add w13, w26, w13
mov x14, x11
LBB63_70: ; Parent Loop BB63_20 Depth=1
; Parent Loop BB63_44 Depth=2
; Parent Loop BB63_47 Depth=3
; => This Inner Loop Header: Depth=4
add w15, w13, #4
add w16, w13, #8
add w17, w13, #12
strb w8, [x24, w13, sxtw]
strb w8, [x24, w15, sxtw]
strb w8, [x24, w16, sxtw]
strb w8, [x24, w17, sxtw]
add w13, w13, #16
subs x14, x14, #4
b.ne LBB63_70
; %bb.71: ; in Loop: Header=BB63_47 Depth=3
cmp x12, x11
b.eq LBB63_45
LBB63_72: ; in Loop: Header=BB63_47 Depth=3
add w10, w26, w10, lsl #2
sub w11, w9, w11
LBB63_73: ; Parent Loop BB63_20 Depth=1
; Parent Loop BB63_44 Depth=2
; Parent Loop BB63_47 Depth=3
; => This Inner Loop Header: Depth=4
strb w8, [x24, w10, sxtw]
add w10, w10, #4
subs w11, w11, #1
b.ne LBB63_73
b LBB63_45
LBB63_74: ; in Loop: Header=BB63_20 Depth=1
ldr x25, [sp, #32] ; 8-byte Folded Reload
mov x26, x24
ldr x27, [sp, #56] ; 8-byte Folded Reload
LBB63_75: ; Parent Loop BB63_20 Depth=1
; => This Inner Loop Header: Depth=2
mov x0, x27
mov x1, x26
mov x2, x23
bl _hdr_convert
add x27, x27, x21
add x26, x26, #4
subs x25, x25, #1
b.ne LBB63_75
; %bb.76: ; in Loop: Header=BB63_20 Depth=1
ldp x9, x10, [sp, #48] ; 16-byte Folded Reload
add x9, x9, #1
ldr x8, [sp, #24] ; 8-byte Folded Reload
add x10, x10, x8
str x10, [sp, #56] ; 8-byte Folded Spill
ldr x8, [sp, #40] ; 8-byte Folded Reload
cmp x9, x8
b.ne LBB63_20
b LBB63_78
LBB63_77:
mov x24, #0
LBB63_78:
mov x0, x24
bl _free
b LBB63_14
LBB63_79:
strb w25, [sp, #64]
strb w26, [sp, #65]
strb w27, [sp, #66]
mov x0, x19
bl _get8
strb w0, [sp, #67]
add x1, sp, #64
mov x0, x20
mov x2, x23
bl _hdr_convert
mov x0, x24
bl _free
mov w21, #0
mov w24, #1
ldr x26, [sp, #8] ; 8-byte Folded Reload
ldr w27, [sp, #4] ; 4-byte Folded Reload
b LBB63_84
LBB63_80:
mov x0, x20
bl _free
mov x0, x24
bl _free
mov x20, #0
Lloh352:
adrp x8, l_.str.96@PAGE
Lloh353:
add x8, x8, l_.str.96@PAGEOFF
b LBB63_13
LBB63_81:
bl ___stack_chk_fail
LBB63_82:
mov w24, #0
cmp w21, w26
b.ge LBB63_14
LBB63_83:
cmp w24, w22
b.ge LBB63_86
LBB63_84:
ldr x3, [x19, #16]
cbz x3, LBB63_87
; %bb.85:
add x0, sp, #68
mov w1, #1
mov w2, #4
bl _fread
ldrb w8, [sp, #71]
b LBB63_88
LBB63_86:
add w21, w21, #1
b LBB63_82
LBB63_87:
ldr x8, [x19, #24]
ldr w9, [x8], #4
str w9, [sp, #68]
str x8, [x19, #24]
lsr w8, w9, #24
LBB63_88:
mul w9, w27, w21
add x9, x20, w9, sxtw #2
mul w10, w24, w23
add x25, x9, w10, sxtw #2
cbz w8, LBB63_93
; %bb.89:
sub w0, w8, #136
fmov d0, #1.00000000
bl _ldexp
fcvt s0, d0
ldrb w8, [sp, #68]
cmp w23, #2
b.gt LBB63_96
; %bb.90:
ldrb w9, [sp, #69]
ldrb w10, [sp, #70]
add w8, w9, w8
add w8, w8, w10
scvtf s1, w8
fmul s0, s0, s1
fmov s1, #3.00000000
fdiv s0, s0, s1
str s0, [x25]
cmp w23, #4
b.eq LBB63_97
LBB63_91:
cmp w23, #2
b.ne LBB63_101
; %bb.92:
mov w8, #1065353216
str w8, [x25, #4]
add w24, w24, #1
b LBB63_83
LBB63_93:
sub w8, w23, #1
cmp w8, #3
b.hi LBB63_101
; %bb.94:
Lloh354:
adrp x9, lJTI63_0@PAGE
Lloh355:
add x9, x9, lJTI63_0@PAGEOFF
adr x10, LBB63_95
ldrb w11, [x9, x8]
add x10, x10, x11, lsl #2
br x10
LBB63_95:
mov w8, #1065353216
str w8, [x25, #4]
b LBB63_100
LBB63_96:
ucvtf s1, w8
fmul s1, s0, s1
ldr b2, [sp, #69]
ucvtf s2, s2
fmul s2, s0, s2
stp s1, s2, [x25]
ldr b1, [sp, #70]
ucvtf s1, s1
fmul s0, s0, s1
str s0, [x25, #8]
cmp w23, #4
b.ne LBB63_91
LBB63_97:
mov w8, #1065353216
str w8, [x25, #12]
add w24, w24, #1
b LBB63_83
LBB63_98:
mov w8, #1065353216
str w8, [x25, #12]
LBB63_99:
stp wzr, wzr, [x25, #4]
LBB63_100:
str wzr, [x25]
LBB63_101:
add w24, w24, #1
b LBB63_83
.loh AdrpLdrGotLdr Lloh338, Lloh339, Lloh340
.loh AdrpAdd Lloh341, Lloh342
.loh AdrpAdd Lloh343, Lloh344
.loh AdrpAdd Lloh345, Lloh346
.loh AdrpAdd Lloh347, Lloh348
.loh AdrpLdrGotLdr Lloh349, Lloh350, Lloh351
.loh AdrpAdd Lloh352, Lloh353
.loh AdrpAdd Lloh354, Lloh355
.cfi_endproc
.section __TEXT,__const
lJTI63_0:
.byte (LBB63_100-LBB63_95)>>2
.byte (LBB63_95-LBB63_95)>>2
.byte (LBB63_99-LBB63_95)>>2
.byte (LBB63_98-LBB63_95)>>2
; -- End function
.section __TEXT,__text,regular,pure_instructions
.globl _stbi_write_bmp ; -- Begin function stbi_write_bmp
.p2align 2
_stbi_write_bmp: ; @stbi_write_bmp
.cfi_startproc
; %bb.0:
sub sp, sp, #176
.cfi_def_cfa_offset 176
stp x29, x30, [sp, #160] ; 16-byte Folded Spill
add x29, sp, #160
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
mov x6, x4
mov x5, x3
; kill: def $w2 killed $w2 def $x2
; kill: def $w1 killed $w1 def $x1
and w8, w1, #0x3
add w9, w1, w1, lsl #1
add w9, w9, w8
mul w9, w9, w2
add w9, w9, #54
movi.2d v0, #0000000000000000
stur q0, [sp, #136]
stur q0, [sp, #120]
stur q0, [sp, #104]
mov w10, #24
mov w11, #1
stp x11, x10, [sp, #88]
stp x1, x2, [sp, #72]
mov w10, #40
mov w11, #54
stp x11, x10, [sp, #56]
mov w10, #66
Lloh356:
adrp x11, l_.str.3@PAGE
Lloh357:
add x11, x11, l_.str.3@PAGEOFF
stp x11, x10, [sp, #8]
mov w10, #77
stur q0, [sp, #40]
str w8, [sp]
stp x10, x9, [sp, #24]
mov x3, x1
mov x4, x2
mov w7, #0
bl _outfile
ldp x29, x30, [sp, #160] ; 16-byte Folded Reload
add sp, sp, #176
ret
.loh AdrpAdd Lloh356, Lloh357
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function outfile
_outfile: ; @outfile
.cfi_startproc
; %bb.0:
sub sp, sp, #176
.cfi_def_cfa_offset 176
stp x28, x27, [sp, #80] ; 16-byte Folded Spill
stp x26, x25, [sp, #96] ; 16-byte Folded Spill
stp x24, x23, [sp, #112] ; 16-byte Folded Spill
stp x22, x21, [sp, #128] ; 16-byte Folded Spill
stp x20, x19, [sp, #144] ; 16-byte Folded Spill
stp x29, x30, [sp, #160] ; 16-byte Folded Spill
add x29, sp, #160
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x19, x7
mov x24, x6
mov x20, x5
mov x25, x4
str x3, [sp, #56] ; 8-byte Folded Spill
Lloh358:
adrp x1, l_.str.97@PAGE
Lloh359:
add x1, x1, l_.str.97@PAGEOFF
bl _fopen
mov x22, x0
cbz x0, LBB65_18
; %bb.1:
ldr x1, [x29, #24]
add x8, x29, #32
str x8, [sp, #64]
add x2, x29, #32
mov x0, x22
bl _writefv
str wzr, [sp, #76]
cbz w25, LBB65_17
; %bb.2:
ldr w11, [x29, #16]
sxtw x26, w20
sub x27, x26, #1
sxtw x25, w25
ldr x10, [sp, #56] ; 8-byte Folded Reload
sxtw x8, w10
mov w9, w10
str x9, [sp, #32] ; 8-byte Folded Spill
sub x9, x25, #1
mul x9, x9, x26
madd x28, x9, x8, x24
smnegl x8, w20, w10
stp x8, x11, [sp, #40] ; 16-byte Folded Spill
sub w21, w20, #1
b LBB65_4
LBB65_3: ; in Loop: Header=BB65_4 Depth=1
add x0, sp, #76
ldr x1, [sp, #48] ; 8-byte Folded Reload
mov w2, #1
mov x3, x22
bl _fwrite
ldr x8, [sp, #40] ; 8-byte Folded Reload
add x28, x28, x8
cbz w25, LBB65_17
LBB65_4: ; =>This Loop Header: Depth=1
; Child Loop BB65_7 Depth 2
sub x25, x25, #1
ldr x8, [sp, #56] ; 8-byte Folded Reload
cmp w8, #1
b.lt LBB65_3
; %bb.5: ; in Loop: Header=BB65_4 Depth=1
mov x24, x28
ldr x23, [sp, #32] ; 8-byte Folded Reload
b LBB65_7
LBB65_6: ; in Loop: Header=BB65_7 Depth=2
add x24, x24, x26
subs x23, x23, #1
b.eq LBB65_3
LBB65_7: ; Parent Loop BB65_4 Depth=1
; => This Inner Loop Header: Depth=2
cmp w21, #2
b.lo LBB65_12
; %bb.8: ; in Loop: Header=BB65_7 Depth=2
cmp w20, #3
b.eq LBB65_11
; %bb.9: ; in Loop: Header=BB65_7 Depth=2
cmp w20, #4
b.ne LBB65_14
; %bb.10: ; in Loop: Header=BB65_7 Depth=2
cbz w19, LBB65_16
LBB65_11: ; in Loop: Header=BB65_7 Depth=2
ldrb w8, [x24, #2]
ldrb w9, [x24, #1]
ldrb w10, [x24]
stp x9, x10, [sp, #8]
b LBB65_13
LBB65_12: ; in Loop: Header=BB65_7 Depth=2
ldrb w8, [x24]
stp x8, x8, [sp, #8]
LBB65_13: ; in Loop: Header=BB65_7 Depth=2
str x8, [sp]
mov x0, x22
bl _writef
LBB65_14: ; in Loop: Header=BB65_7 Depth=2
cmp w19, #1
b.lt LBB65_6
; %bb.15: ; in Loop: Header=BB65_7 Depth=2
add x0, x24, x27
mov w1, #1
mov w2, #1
mov x3, x22
bl _fwrite
b LBB65_6
LBB65_16: ; in Loop: Header=BB65_7 Depth=2
ldrb w8, [x24, #3]
ldrb w9, [x24]
eor w9, w9, #0xff
mul w9, w9, w8
mov w12, #32897
mul w9, w9, w12
mov w13, #255
sub w9, w13, w9, lsr #23
ldrb w10, [x24, #1]
mul w10, w10, w8
mul w10, w10, w12
lsr w10, w10, #23
ldrb w11, [x24, #2]
eor w11, w11, #0xff
mul w8, w11, w8
mul w8, w8, w12
sub w8, w13, w8, lsr #23
stp x10, x9, [sp, #8]
b LBB65_13
LBB65_17:
mov x0, x22
bl _fclose
LBB65_18:
cmp x22, #0
cset w0, ne
ldp x29, x30, [sp, #160] ; 16-byte Folded Reload
ldp x20, x19, [sp, #144] ; 16-byte Folded Reload
ldp x22, x21, [sp, #128] ; 16-byte Folded Reload
ldp x24, x23, [sp, #112] ; 16-byte Folded Reload
ldp x26, x25, [sp, #96] ; 16-byte Folded Reload
ldp x28, x27, [sp, #80] ; 16-byte Folded Reload
add sp, sp, #176
ret
.loh AdrpAdd Lloh358, Lloh359
.cfi_endproc
; -- End function
.globl _stbi_write_tga ; -- Begin function stbi_write_tga
.p2align 2
_stbi_write_tga: ; @stbi_write_tga
.cfi_startproc
; %bb.0:
sub sp, sp, #128
.cfi_def_cfa_offset 128
stp x29, x30, [sp, #112] ; 16-byte Folded Spill
add x29, sp, #112
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
mov x6, x4
mov x5, x3
; kill: def $w2 killed $w2 def $x2
; kill: def $w1 killed $w1 def $x1
mvn w8, w3
and w7, w8, #0x1
lsl w8, w7, #3
add w9, w8, #24
stp xzr, x1, [sp, #72]
movi.2d v0, #0000000000000000
stur q0, [sp, #56]
stur q0, [sp, #40]
mov w10, #2
str x10, [sp, #32]
Lloh360:
adrp x10, l_.str.4@PAGE
Lloh361:
add x10, x10, l_.str.4@PAGEOFF
str q0, [sp, #16]
str x10, [sp, #8]
str wzr, [sp]
stp x9, x8, [sp, #96]
str x2, [sp, #88]
mov x3, x1
mov x4, x2
bl _outfile
ldp x29, x30, [sp, #112] ; 16-byte Folded Reload
add sp, sp, #128
ret
.loh AdrpAdd Lloh360, Lloh361
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function resample_row_1
_resample_row_1: ; @resample_row_1
.cfi_startproc
; %bb.0:
mov x0, x1
ret
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function resample_row_v_2
_resample_row_v_2: ; @resample_row_v_2
.cfi_startproc
; %bb.0:
cmp w3, #1
b.lt LBB68_16
; %bb.1:
mov w8, w3
cmp w3, #8
b.hs LBB68_3
; %bb.2:
mov x9, #0
b LBB68_14
LBB68_3:
mov x9, #0
sub x10, x0, x1
cmp x10, #64
b.lo LBB68_14
; %bb.4:
sub x10, x0, x2
cmp x10, #64
b.lo LBB68_14
; %bb.5:
cmp w3, #64
b.hs LBB68_7
; %bb.6:
mov x9, #0
b LBB68_11
LBB68_7:
and x9, x8, #0xffffffc0
add x10, x0, #32
add x11, x1, #32
add x12, x2, #32
movi.16b v0, #3
movi.8b v1, #3
movi.8h v2, #2
mov x13, x9
LBB68_8: ; =>This Inner Loop Header: Depth=1
ldp q3, q4, [x11, #-32]
ldp q5, q6, [x11], #64
ldp q7, q16, [x12, #-32]
ldp q17, q18, [x12], #64
ushll2.8h v19, v7, #0
ushll.8h v7, v7, #0
ushll2.8h v20, v16, #0
ushll.8h v16, v16, #0
ushll2.8h v21, v17, #0
ushll.8h v17, v17, #0
ushll2.8h v22, v18, #0
ushll.8h v18, v18, #0
umlal.8h v7, v3, v1
umlal2.8h v19, v3, v0
umlal.8h v16, v4, v1
umlal2.8h v20, v4, v0
umlal.8h v17, v5, v1
umlal2.8h v21, v5, v0
umlal.8h v18, v6, v1
umlal2.8h v22, v6, v0
add.8h v3, v19, v2
add.8h v4, v7, v2
add.8h v5, v20, v2
add.8h v6, v16, v2
add.8h v7, v21, v2
add.8h v16, v17, v2
add.8h v17, v22, v2
add.8h v18, v18, v2
shrn.8b v4, v4, #2
shrn2.16b v4, v3, #2
shrn.8b v3, v6, #2
shrn2.16b v3, v5, #2
shrn.8b v5, v16, #2
shrn2.16b v5, v7, #2
shrn.8b v6, v18, #2
shrn2.16b v6, v17, #2
stp q4, q3, [x10, #-32]
stp q5, q6, [x10], #64
subs x13, x13, #64
b.ne LBB68_8
; %bb.9:
cmp x9, x8
b.eq LBB68_16
; %bb.10:
tst x8, #0x38
b.eq LBB68_14
LBB68_11:
mov x13, x9
and x9, x8, #0xfffffff8
add x10, x1, x13
add x11, x2, x13
add x12, x0, x13
sub x13, x13, x9
movi.8b v0, #3
movi.8h v1, #2
LBB68_12: ; =>This Inner Loop Header: Depth=1
ldr d2, [x10], #8
ldr d3, [x11], #8
ushll.8h v3, v3, #0
umlal.8h v3, v2, v0
add.8h v2, v3, v1
shrn.8b v2, v2, #2
str d2, [x12], #8
adds x13, x13, #8
b.ne LBB68_12
; %bb.13:
cmp x9, x8
b.eq LBB68_16
LBB68_14:
add x10, x0, x9
add x11, x2, x9
add x12, x1, x9
sub x8, x8, x9
LBB68_15: ; =>This Inner Loop Header: Depth=1
ldrb w9, [x12], #1
add w9, w9, w9, lsl #1
ldrb w13, [x11], #1
add w9, w13, w9
add w9, w9, #2
lsr w9, w9, #2
strb w9, [x10], #1
subs x8, x8, #1
b.ne LBB68_15
LBB68_16:
ret
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function resample_row_h_2
_resample_row_h_2: ; @resample_row_h_2
.cfi_startproc
; %bb.0:
ldrb w9, [x1]
subs w8, w3, #1
b.ne LBB69_2
; %bb.1:
strb w9, [x0, #1]
strb w9, [x0]
ret
LBB69_2:
strb w9, [x0]
ldrb w9, [x1]
add w9, w9, w9, lsl #1
ldrb w10, [x1, #1]
add w9, w10, w9
add w9, w9, #2
lsr w9, w9, #2
strb w9, [x0, #1]
cmp w3, #3
b.lt LBB69_10
; %bb.3:
sub x9, x8, #1
cmp x9, #8
b.lo LBB69_6
; %bb.4:
add x10, x0, #2
add x11, x8, x1
add x11, x11, #1
cmp x10, x11
b.hs LBB69_12
; %bb.5:
add x10, x0, x8, lsl #1
cmp x10, x1
b.ls LBB69_12
LBB69_6:
mov w11, #1
LBB69_7:
sub x9, x8, x11
add x10, x1, x11
add x11, x0, x11, lsl #1
add x11, x11, #1
LBB69_8: ; =>This Inner Loop Header: Depth=1
ldrb w12, [x10]
add w12, w12, w12, lsl #1
add w12, w12, #2
ldurb w13, [x10, #-1]
add w13, w12, w13
lsr w13, w13, #2
sturb w13, [x11, #-1]
ldrb w13, [x10, #1]!
add w12, w12, w13
lsr w12, w12, #2
strb w12, [x11], #2
subs x9, x9, #1
b.ne LBB69_8
LBB69_9:
lsl w9, w8, #1
b LBB69_11
LBB69_10:
mov w9, #2
LBB69_11:
add x10, x1, w3, sxtw
ldurb w10, [x10, #-2]
add w10, w10, w10, lsl #1
; kill: def $w8 killed $w8 killed $x8 def $x8
sxtw x8, w8
ldrb w11, [x1, x8]
add w10, w11, w10
add w10, w10, #2
lsr w10, w10, #2
mov w9, w9
strb w10, [x0, x9]
ldrb w8, [x1, x8]
orr x9, x9, #0x1
strb w8, [x0, x9]
ret
LBB69_12:
cmp x9, #16
b.hs LBB69_17
; %bb.13:
mov x10, #0
LBB69_14:
and x12, x9, #0xfffffffffffffff8
orr x11, x12, #0x1
add x13, x10, x1
add x13, x13, #2
add x14, x0, x10, lsl #1
add x14, x14, #2
sub x10, x10, x12
movi.8b v0, #3
LBB69_15: ; =>This Inner Loop Header: Depth=1
ldur d1, [x13, #-1]
movi.8h v2, #2
umlal.8h v2, v1, v0
ldur d1, [x13, #-2]
uaddw.8h v1, v2, v1
shrn.8b v3, v1, #2
ldr d1, [x13], #8
uaddw.8h v1, v2, v1
shrn.8b v4, v1, #2
st2.8b { v3, v4 }, [x14], #16
adds x10, x10, #8
b.ne LBB69_15
; %bb.16:
cmp x9, x12
b.ne LBB69_7
b LBB69_9
LBB69_17:
and x10, x9, #0xfffffffffffffff0
add x11, x1, #2
add x12, x0, #2
movi.16b v0, #3
movi.8b v1, #3
mov x13, x10
LBB69_18: ; =>This Inner Loop Header: Depth=1
ldur q2, [x11, #-1]
movi.8h v3, #2
umlal.8h v3, v2, v1
movi.8h v4, #2
umlal2.8h v4, v2, v0
ldur q2, [x11, #-2]
uaddw2.8h v5, v4, v2
uaddw.8h v2, v3, v2
shrn.8b v6, v2, #2
shrn2.16b v6, v5, #2
ldr q2, [x11], #16
uaddw2.8h v4, v4, v2
uaddw.8h v2, v3, v2
shrn.8b v7, v2, #2
shrn2.16b v7, v4, #2
st2.16b { v6, v7 }, [x12], #32
subs x13, x13, #16
b.ne LBB69_18
; %bb.19:
cmp x9, x10
b.eq LBB69_9
; %bb.20:
tbnz w9, #3, LBB69_14
; %bb.21:
orr x11, x10, #0x1
b LBB69_7
.cfi_endproc
; -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ; -- Begin function resample_row_hv_2
lCPI70_0:
.byte 0 ; 0x0
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 1 ; 0x1
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 2 ; 0x2
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 3 ; 0x3
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
lCPI70_1:
.byte 4 ; 0x4
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 5 ; 0x5
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 6 ; 0x6
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 7 ; 0x7
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
lCPI70_2:
.byte 12 ; 0xc
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 13 ; 0xd
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 14 ; 0xe
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 15 ; 0xf
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
lCPI70_3:
.byte 8 ; 0x8
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 9 ; 0x9
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 10 ; 0xa
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 11 ; 0xb
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
lCPI70_4:
.byte 0 ; 0x0
.byte 4 ; 0x4
.byte 8 ; 0x8
.byte 12 ; 0xc
.byte 16 ; 0x10
.byte 20 ; 0x14
.byte 24 ; 0x18
.byte 28 ; 0x1c
.byte 32 ; 0x20
.byte 36 ; 0x24
.byte 40 ; 0x28
.byte 44 ; 0x2c
.byte 48 ; 0x30
.byte 52 ; 0x34
.byte 56 ; 0x38
.byte 60 ; 0x3c
.section __TEXT,__text,regular,pure_instructions
.p2align 2
_resample_row_hv_2: ; @resample_row_hv_2
.cfi_startproc
; %bb.0:
ldrb w8, [x1]
add w8, w8, w8, lsl #1
cmp w3, #1
b.ne LBB70_2
; %bb.1:
ldrb w9, [x2]
add w8, w9, w8
add w8, w8, #2
lsr w8, w8, #2
strb w8, [x0, #1]
strb w8, [x0]
ret
LBB70_2:
ldrb w9, [x2]
add w12, w8, w9
add w8, w12, #2
lsr w8, w8, #2
strb w8, [x0]
cmp w3, #2
b.lt LBB70_13
; %bb.3:
mov w8, w3
sub x10, x8, #1
cmp x10, #16
b.hs LBB70_5
; %bb.4:
mov w9, #1
b LBB70_10
LBB70_5:
add x9, x0, #1
add x11, x0, x8, lsl #1
sub x13, x11, #1
add x14, x1, #1
add x15, x1, x8
add x11, x2, #1
add x16, x2, x8
cmp x9, x16
ccmp x11, x13, #2, lo
cset w11, lo
cmp x14, x13
ccmp x9, x15, #2, lo
mov w9, #1
b.lo LBB70_10
; %bb.6:
tbnz w11, #0, LBB70_10
; %bb.7:
and x11, x10, #0xfffffffffffffff0
orr x9, x11, #0x1
dup.4s v7, w12
add x12, x1, #1
add x13, x2, #1
add x14, x0, #1
Lloh362:
adrp x15, lCPI70_0@PAGE
Lloh363:
ldr q0, [x15, lCPI70_0@PAGEOFF]
Lloh364:
adrp x15, lCPI70_1@PAGE
Lloh365:
ldr q1, [x15, lCPI70_1@PAGEOFF]
Lloh366:
adrp x15, lCPI70_2@PAGE
Lloh367:
ldr q2, [x15, lCPI70_2@PAGEOFF]
Lloh368:
adrp x15, lCPI70_3@PAGE
Lloh369:
ldr q3, [x15, lCPI70_3@PAGEOFF]
movi.4s v4, #3
movi.4s v5, #8
Lloh370:
adrp x15, lCPI70_4@PAGE
Lloh371:
ldr q6, [x15, lCPI70_4@PAGEOFF]
mov x15, x11
LBB70_8: ; =>This Inner Loop Header: Depth=1
ldr q16, [x12], #16
tbl.16b v17, { v16 }, v0
tbl.16b v18, { v16 }, v1
tbl.16b v19, { v16 }, v2
ldr q20, [x13], #16
tbl.16b v16, { v16 }, v3
tbl.16b v21, { v20 }, v3
tbl.16b v22, { v20 }, v1
tbl.16b v23, { v20 }, v0
mla.4s v23, v17, v4
ext.16b v17, v7, v23, #12
tbl.16b v7, { v20 }, v2
mla.4s v22, v18, v4
mla.4s v7, v19, v4
mla.4s v21, v16, v4
ext.16b v16, v22, v21, #12
ext.16b v18, v23, v22, #12
ext.16b v19, v21, v7, #12
mov.16b v20, v7
mov.16b v24, v21
mla.4s v24, v16, v4
mov.16b v25, v22
mla.4s v25, v18, v4
mla.4s v20, v19, v4
mov.16b v26, v23
mla.4s v26, v17, v4
add.4s v26, v26, v5
add.4s v25, v25, v5
add.4s v24, v24, v5
add.4s v20, v20, v5
ushr.4s v30, v20, #4
ushr.4s v29, v24, #4
ushr.4s v28, v25, #4
mla.4s v19, v7, v4
mla.4s v16, v21, v4
ushr.4s v27, v26, #4
mla.4s v18, v22, v4
mla.4s v17, v23, v4
add.4s v17, v17, v5
add.4s v18, v18, v5
add.4s v16, v16, v5
tbl.16b v20, { v27, v28, v29, v30 }, v6
add.4s v19, v19, v5
ushr.4s v25, v19, #4
ushr.4s v24, v16, #4
ushr.4s v23, v18, #4
ushr.4s v22, v17, #4
tbl.16b v21, { v22, v23, v24, v25 }, v6
st2.16b { v20, v21 }, [x14], #32
subs x15, x15, #16
b.ne LBB70_8
; %bb.9:
mov.s w12, v7[3]
mov x13, x12
cmp x10, x11
b.eq LBB70_12
LBB70_10:
add x10, x2, x9
add x11, x1, x9
sub x8, x8, x9
add x9, x0, x9, lsl #1
LBB70_11: ; =>This Inner Loop Header: Depth=1
ldrb w13, [x11], #1
add w13, w13, w13, lsl #1
ldrb w14, [x10], #1
add w15, w12, w12, lsl #1
add w13, w13, w14
add w14, w15, w13
add w14, w14, #8
lsr w14, w14, #4
sturb w14, [x9, #-1]
add w14, w13, w13, lsl #1
add w12, w12, w14
add w12, w12, #8
lsr w12, w12, #4
strb w12, [x9], #2
mov x12, x13
subs x8, x8, #1
b.ne LBB70_11
LBB70_12:
add w8, w13, #2
lsr w8, w8, #2
LBB70_13:
lsl w9, w3, #1
add x9, x0, w9, sxtw
sturb w8, [x9, #-1]
ret
.loh AdrpLdr Lloh370, Lloh371
.loh AdrpAdrp Lloh368, Lloh370
.loh AdrpLdr Lloh368, Lloh369
.loh AdrpAdrp Lloh366, Lloh368
.loh AdrpLdr Lloh366, Lloh367
.loh AdrpAdrp Lloh364, Lloh366
.loh AdrpLdr Lloh364, Lloh365
.loh AdrpAdrp Lloh362, Lloh364
.loh AdrpLdr Lloh362, Lloh363
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function resample_row_generic
_resample_row_generic: ; @resample_row_generic
.cfi_startproc
; %bb.0:
; kill: def $w4 killed $w4 def $x4
cmp w3, #1
b.lt LBB71_18
; %bb.1:
mov x8, #0
sxtw x9, w4
mov w10, w3
mov w11, w4
and x12, x11, #0xffffffc0
and x13, x11, #0x38
and x14, x11, #0xfffffff8
add x15, x0, #32
neg x16, x14
mov x17, x0
b LBB71_3
LBB71_2: ; in Loop: Header=BB71_3 Depth=1
add x8, x8, #1
add x15, x15, x9
add x17, x17, x9
cmp x8, x10
b.eq LBB71_18
LBB71_3: ; =>This Loop Header: Depth=1
; Child Loop BB71_11 Depth 2
; Child Loop BB71_15 Depth 2
; Child Loop BB71_17 Depth 2
cmp w4, #1
b.lt LBB71_2
; %bb.4: ; in Loop: Header=BB71_3 Depth=1
cmp w4, #8
b.hs LBB71_6
; %bb.5: ; in Loop: Header=BB71_3 Depth=1
mov x2, #0
b LBB71_17
LBB71_6: ; in Loop: Header=BB71_3 Depth=1
mul x2, x8, x9
add x3, x2, x11
add x3, x0, x3
add x5, x1, x8
add x6, x5, #1
add x2, x0, x2
cmp x2, x6
ccmp x5, x3, #2, lo
b.lo LBB71_9
; %bb.7: ; in Loop: Header=BB71_3 Depth=1
cmp w4, #64
b.hs LBB71_10
; %bb.8: ; in Loop: Header=BB71_3 Depth=1
mov x3, #0
b LBB71_14
LBB71_9: ; in Loop: Header=BB71_3 Depth=1
mov x2, #0
b LBB71_17
LBB71_10: ; in Loop: Header=BB71_3 Depth=1
add x2, x1, x8
ld1r.16b { v0 }, [x2]
mov x2, x15
mov x3, x12
LBB71_11: ; Parent Loop BB71_3 Depth=1
; => This Inner Loop Header: Depth=2
stp q0, q0, [x2, #-32]
stp q0, q0, [x2], #64
subs x3, x3, #64
b.ne LBB71_11
; %bb.12: ; in Loop: Header=BB71_3 Depth=1
cmp x12, x11
b.eq LBB71_2
; %bb.13: ; in Loop: Header=BB71_3 Depth=1
mov x3, x12
mov x2, x12
cbz x13, LBB71_17
LBB71_14: ; in Loop: Header=BB71_3 Depth=1
add x2, x17, x3
add x3, x16, x3
LBB71_15: ; Parent Loop BB71_3 Depth=1
; => This Inner Loop Header: Depth=2
add x5, x1, x8
ld1r.8b { v0 }, [x5]
str d0, [x2], #8
adds x3, x3, #8
b.ne LBB71_15
; %bb.16: ; in Loop: Header=BB71_3 Depth=1
mov x2, x14
cmp x14, x11
b.eq LBB71_2
LBB71_17: ; Parent Loop BB71_3 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w3, [x1, x8]
strb w3, [x17, x2]
add x2, x2, #1
cmp x11, x2
b.ne LBB71_17
b LBB71_2
LBB71_18:
ret
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function process_marker
_process_marker: ; @process_marker
.cfi_startproc
; %bb.0:
sub sp, sp, #176
.cfi_def_cfa_offset 176
stp x28, x27, [sp, #80] ; 16-byte Folded Spill
stp x26, x25, [sp, #96] ; 16-byte Folded Spill
stp x24, x23, [sp, #112] ; 16-byte Folded Spill
stp x22, x21, [sp, #128] ; 16-byte Folded Spill
stp x20, x19, [sp, #144] ; 16-byte Folded Spill
stp x29, x30, [sp, #160] ; 16-byte Folded Spill
add x29, sp, #160
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x19, x0
Lloh372:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh373:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh374:
ldr x8, [x8]
str x8, [sp, #72]
sub w8, w1, #194
cmp w8, #27
b.hi LBB72_3
; %bb.1:
Lloh375:
adrp x9, lJTI72_0@PAGE
Lloh376:
add x9, x9, lJTI72_0@PAGEOFF
adr x10, LBB72_2
ldrb w11, [x9, x8]
add x10, x10, x11, lsl #2
br x10
LBB72_2:
mov w19, #0
Lloh377:
adrp x8, l_.str.16@PAGE
Lloh378:
add x8, x8, l_.str.16@PAGEOFF
b LBB72_100
LBB72_3:
cmp w1, #255
b.ne LBB72_5
; %bb.4:
mov w19, #0
Lloh379:
adrp x8, l_.str.15@PAGE
Lloh380:
add x8, x8, l_.str.15@PAGEOFF
b LBB72_100
LBB72_5:
cmp w1, #254
b.eq LBB72_7
; %bb.6:
and w8, w1, #0xfffffff0
cmp w8, #224
b.ne LBB72_92
LBB72_7:
ldr x0, [x19, #16]
cbz x0, LBB72_88
; %bb.8:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB72_103
; %bb.9:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
ldr x0, [x19, #16]
add w8, w8, w20, lsl #8
sub w8, w8, #2
cbz x0, LBB72_106
; %bb.10:
sxtw x1, w8
mov w19, #1
mov w2, #1
bl _fseek
b LBB72_101
LBB72_11:
ldr x0, [x19, #16]
cbz x0, LBB72_20
; %bb.12:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB72_29
; %bb.13:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB72_31
LBB72_14:
ldr x0, [x19, #16]
cbz x0, LBB72_23
; %bb.15:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB72_61
; %bb.16:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB72_63
LBB72_17:
ldr x0, [x19, #16]
cbz x0, LBB72_26
; %bb.18:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB72_81
; %bb.19:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB72_83
LBB72_20:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_93
; %bb.21:
add x10, x8, #1
str x10, [x19, #24]
ldrb w20, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB72_30
LBB72_22:
mov w8, #0
b LBB72_31
LBB72_23:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_94
; %bb.24:
add x10, x8, #1
str x10, [x19, #24]
ldrb w20, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB72_62
LBB72_25:
mov w8, #0
b LBB72_63
LBB72_26:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_95
; %bb.27:
add x10, x8, #1
str x10, [x19, #24]
ldrb w20, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB72_82
LBB72_28:
mov w8, #0
b LBB72_83
LBB72_29:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_22
LBB72_30:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB72_31:
add w8, w8, w20, lsl #8
sub w24, w8, #2
cmp w24, #1
b.lt LBB72_60
; %bb.32:
add x20, sp, #8
mov w21, #1680
mov w22, #6760
mov w23, #7784
b LBB72_34
LBB72_33: ; in Loop: Header=BB72_34 Depth=1
sub w24, w24, w25
cmp w24, #0
b.le LBB72_60
LBB72_34: ; =>This Loop Header: Depth=1
; Child Loop BB72_43 Depth 2
; Child Loop BB72_56 Depth 2
ldr x0, [x19, #16]
cbz x0, LBB72_36
; %bb.35: ; in Loop: Header=BB72_34 Depth=1
bl _fgetc
cmn w0, #1
csel w27, wzr, w0, eq
b LBB72_39
LBB72_36: ; in Loop: Header=BB72_34 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_38
; %bb.37: ; in Loop: Header=BB72_34 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w27, [x8]
b LBB72_39
LBB72_38: ; in Loop: Header=BB72_34 Depth=1
mov w27, #0
LBB72_39: ; in Loop: Header=BB72_34 Depth=1
and w26, w27, #0xf
cmp w27, #31
ccmp w26, #3, #2, le
b.hi LBB72_91
; %bb.40: ; in Loop: Header=BB72_34 Depth=1
mov x28, #0
mov w25, #0
b LBB72_43
LBB72_41: ; in Loop: Header=BB72_43 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB72_42: ; in Loop: Header=BB72_43 Depth=2
str w8, [x20, x28]
add w25, w8, w25
add x28, x28, #4
cmp x28, #64
b.eq LBB72_47
LBB72_43: ; Parent Loop BB72_34 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x19, #16]
cbnz x0, LBB72_41
; %bb.44: ; in Loop: Header=BB72_43 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_46
; %bb.45: ; in Loop: Header=BB72_43 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB72_42
LBB72_46: ; in Loop: Header=BB72_43 Depth=2
mov w8, #0
b LBB72_42
LBB72_47: ; in Loop: Header=BB72_34 Depth=1
madd x8, x26, x21, x19
cmp w27, #15
b.hi LBB72_50
; %bb.48: ; in Loop: Header=BB72_34 Depth=1
add x0, x8, #40
add x1, sp, #8
bl _build_huffman
cbz w0, LBB72_92
; %bb.49: ; in Loop: Header=BB72_34 Depth=1
madd x8, x26, x21, x19
add x26, x8, #1064
b LBB72_52
LBB72_50: ; in Loop: Header=BB72_34 Depth=1
add x0, x8, x22
add x1, sp, #8
bl _build_huffman
cbz w0, LBB72_92
; %bb.51: ; in Loop: Header=BB72_34 Depth=1
madd x8, x26, x21, x19
add x26, x8, x23
LBB72_52: ; in Loop: Header=BB72_34 Depth=1
sub w24, w24, #17
cmp w25, #1
b.lt LBB72_33
; %bb.53: ; in Loop: Header=BB72_34 Depth=1
mov x27, x25
b LBB72_56
LBB72_54: ; in Loop: Header=BB72_56 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB72_55: ; in Loop: Header=BB72_56 Depth=2
strb w8, [x26], #1
subs x27, x27, #1
b.eq LBB72_33
LBB72_56: ; Parent Loop BB72_34 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x19, #16]
cbnz x0, LBB72_54
; %bb.57: ; in Loop: Header=BB72_56 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_59
; %bb.58: ; in Loop: Header=BB72_56 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB72_55
LBB72_59: ; in Loop: Header=BB72_56 Depth=2
mov w8, #0
b LBB72_55
LBB72_60:
cmp w24, #0
cset w19, eq
b LBB72_101
LBB72_61:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_25
LBB72_62:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB72_63:
add w8, w8, w20, lsl #8
sub w20, w8, #2
cmp w20, #1
b.lt LBB72_80
; %bb.64:
Lloh381:
adrp x21, _dezigzag@PAGE
Lloh382:
add x21, x21, _dezigzag@PAGEOFF
mov w22, #13480
b LBB72_66
LBB72_65: ; in Loop: Header=BB72_66 Depth=1
subs w20, w20, #65
b.le LBB72_80
LBB72_66: ; =>This Loop Header: Depth=1
; Child Loop BB72_76 Depth 2
ldr x0, [x19, #16]
cbz x0, LBB72_69
; %bb.67: ; in Loop: Header=BB72_66 Depth=1
bl _fgetc
cmn w0, #1
b.eq LBB72_72
; %bb.68: ; in Loop: Header=BB72_66 Depth=1
cmp w0, #16
b.lo LBB72_71
b LBB72_98
LBB72_69: ; in Loop: Header=BB72_66 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_72
; %bb.70: ; in Loop: Header=BB72_66 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w0, [x8]
cmp w0, #16
b.hs LBB72_98
LBB72_71: ; in Loop: Header=BB72_66 Depth=1
and w8, w0, #0xf
cmp w8, #3
b.ls LBB72_73
b LBB72_99
LBB72_72: ; in Loop: Header=BB72_66 Depth=1
mov w8, #0
LBB72_73: ; in Loop: Header=BB72_66 Depth=1
mov x23, #0
mov w24, w8
b LBB72_76
LBB72_74: ; in Loop: Header=BB72_76 Depth=2
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB72_75: ; in Loop: Header=BB72_76 Depth=2
ldrb w9, [x21, x23]
add x10, x19, x24, lsl #6
add x9, x10, x9
strb w8, [x9, x22]
add x23, x23, #1
cmp x23, #64
b.eq LBB72_65
LBB72_76: ; Parent Loop BB72_66 Depth=1
; => This Inner Loop Header: Depth=2
ldr x0, [x19, #16]
cbnz x0, LBB72_74
; %bb.77: ; in Loop: Header=BB72_76 Depth=2
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_79
; %bb.78: ; in Loop: Header=BB72_76 Depth=2
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB72_75
LBB72_79: ; in Loop: Header=BB72_76 Depth=2
mov w8, #0
b LBB72_75
LBB72_80:
cmp w20, #0
cset w19, eq
b LBB72_101
LBB72_81:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_28
LBB72_82:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB72_83:
add w8, w8, w20, lsl #8
cmp w8, #4
b.ne LBB72_87
; %bb.84:
ldr x0, [x19, #16]
cbz x0, LBB72_96
; %bb.85:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB72_107
; %bb.86:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB72_113
LBB72_87:
mov w19, #0
Lloh383:
adrp x8, l_.str.17@PAGE
Lloh384:
add x8, x8, l_.str.17@PAGEOFF
b LBB72_100
LBB72_88:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_108
; %bb.89:
add x10, x8, #1
str x10, [x19, #24]
ldrb w20, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB72_104
LBB72_90:
mov w8, #0
b LBB72_105
LBB72_91:
Lloh385:
adrp x8, l_.str.20@PAGE
Lloh386:
add x8, x8, l_.str.20@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB72_92:
mov w19, #0
b LBB72_101
LBB72_93:
mov w20, #0
cmp x8, x9
b.lo LBB72_30
b LBB72_22
LBB72_94:
mov w20, #0
cmp x8, x9
b.lo LBB72_62
b LBB72_25
LBB72_95:
mov w20, #0
cmp x8, x9
b.lo LBB72_82
b LBB72_28
LBB72_96:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_109
; %bb.97:
add x10, x8, #1
str x10, [x19, #24]
ldrb w20, [x8]
mov x8, x10
b LBB72_110
LBB72_98:
mov w19, #0
Lloh387:
adrp x8, l_.str.18@PAGE
Lloh388:
add x8, x8, l_.str.18@PAGEOFF
b LBB72_100
LBB72_99:
mov w19, #0
Lloh389:
adrp x8, l_.str.19@PAGE
Lloh390:
add x8, x8, l_.str.19@PAGEOFF
LBB72_100:
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB72_101:
ldr x8, [sp, #72]
Lloh391:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh392:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh393:
ldr x9, [x9]
cmp x9, x8
b.ne LBB72_114
; %bb.102:
mov x0, x19
ldp x29, x30, [sp, #160] ; 16-byte Folded Reload
ldp x20, x19, [sp, #144] ; 16-byte Folded Reload
ldp x22, x21, [sp, #128] ; 16-byte Folded Reload
ldp x24, x23, [sp, #112] ; 16-byte Folded Reload
ldp x26, x25, [sp, #96] ; 16-byte Folded Reload
ldp x28, x27, [sp, #80] ; 16-byte Folded Reload
add sp, sp, #176
ret
LBB72_103:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB72_90
LBB72_104:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB72_105:
bfi w8, w20, #8, #24
sub w8, w8, #2
LBB72_106:
ldr x9, [x19, #24]
add x8, x9, w8, sxtw
str x8, [x19, #24]
mov w19, #1
b LBB72_101
LBB72_107:
ldp x8, x9, [x19, #24]
b LBB72_110
LBB72_108:
mov w20, #0
cmp x8, x9
b.lo LBB72_104
b LBB72_90
LBB72_109:
mov w20, #0
LBB72_110:
cmp x8, x9
b.hs LBB72_112
; %bb.111:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB72_113
LBB72_112:
mov w8, #0
LBB72_113:
add w8, w8, w20, lsl #8
str w8, [x19, #14084]
mov w19, #1
b LBB72_101
LBB72_114:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh372, Lloh373, Lloh374
.loh AdrpAdd Lloh375, Lloh376
.loh AdrpAdd Lloh377, Lloh378
.loh AdrpAdd Lloh379, Lloh380
.loh AdrpAdd Lloh381, Lloh382
.loh AdrpAdd Lloh383, Lloh384
.loh AdrpAdd Lloh385, Lloh386
.loh AdrpAdd Lloh387, Lloh388
.loh AdrpAdd Lloh389, Lloh390
.loh AdrpLdrGotLdr Lloh391, Lloh392, Lloh393
.cfi_endproc
.section __TEXT,__const
lJTI72_0:
.byte (LBB72_2-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_11-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_14-LBB72_2)>>2
.byte (LBB72_5-LBB72_2)>>2
.byte (LBB72_17-LBB72_2)>>2
; -- End function
.section __TEXT,__text,regular,pure_instructions
.p2align 2 ; -- Begin function get8
_get8: ; @get8
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
ldr x8, [x0, #16]
cbz x8, LBB73_2
; %bb.1:
mov x0, x8
bl _fgetc
cmn w0, #1
csel w0, wzr, w0, eq
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
LBB73_2:
ldp x8, x9, [x0, #24]
cmp x8, x9
b.hs LBB73_4
; %bb.3:
add x9, x8, #1
str x9, [x0, #24]
ldrb w0, [x8]
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
LBB73_4:
mov w0, #0
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function decode_block
_decode_block: ; @decode_block
.cfi_startproc
; %bb.0:
stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 96
stp x26, x25, [sp, #16] ; 16-byte Folded Spill
stp x24, x23, [sp, #32] ; 16-byte Folded Spill
stp x22, x21, [sp, #48] ; 16-byte Folded Spill
stp x20, x19, [sp, #64] ; 16-byte Folded Spill
stp x29, x30, [sp, #80] ; 16-byte Folded Spill
add x29, sp, #80
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
mov x22, x4
mov x19, x3
mov x20, x1
mov x21, x0
mov x1, x2
bl _decode
tbnz w0, #31, LBB74_15
; %bb.1:
mov x23, x0
movi.2d v0, #0000000000000000
stp q0, q0, [x20, #96]
stp q0, q0, [x20, #64]
stp q0, q0, [x20, #32]
Lloh394:
adrp x24, _bmask@PAGE
Lloh395:
add x24, x24, _bmask@PAGEOFF
stp q0, q0, [x20]
cbz w0, LBB74_5
; %bb.2:
ldr w8, [x21, #14052]
cmp w8, w23
b.ge LBB74_4
; %bb.3:
mov x0, x21
bl _grow_buffer_unsafe
ldr w8, [x21, #14052]
LBB74_4:
sub w9, w23, #1
ldr w10, [x21, #14048]
sub w8, w8, w23
lsr w10, w10, w8
ldr w11, [x24, w23, uxtw #2]
and w10, w10, w11
str w8, [x21, #14052]
lsr w8, w10, w9
mov w9, #-1
lsl w9, w9, w23
cmp w8, #0
csinc w8, wzr, w9, ne
add w8, w8, w10
b LBB74_6
LBB74_5:
mov w8, #0
LBB74_6:
mov w9, #72
smaddl x9, w22, w9, x21
ldr w10, [x9, #13784]
add w8, w10, w8
str w8, [x9, #13784]
strh w8, [x20]
mov w26, #1
mov w23, #-1
Lloh396:
adrp x25, _dezigzag@PAGE
Lloh397:
add x25, x25, _dezigzag@PAGEOFF
b LBB74_8
LBB74_7: ; in Loop: Header=BB74_8 Depth=1
add w9, w26, w22, lsr #4
sub w10, w27, #1
ldr w11, [x21, #14048]
sub w8, w8, w27
lsr w11, w11, w8
ldr w12, [x24, w27, uxtw #2]
and w11, w11, w12
str w8, [x21, #14052]
lsr w8, w11, w10
lsl w10, w23, w27
orr w10, w10, #0x1
cmp w8, #0
csel w8, w10, wzr, eq
add w8, w8, w11
add w26, w9, #1
ldrb w9, [x25, w9, sxtw]
strh w8, [x20, x9, lsl #1]
cmp w26, #64
b.ge LBB74_14
LBB74_8: ; =>This Inner Loop Header: Depth=1
mov x0, x21
mov x1, x19
bl _decode
tbnz w0, #31, LBB74_15
; %bb.9: ; in Loop: Header=BB74_8 Depth=1
mov x22, x0
ands w27, w0, #0xf
b.eq LBB74_12
; %bb.10: ; in Loop: Header=BB74_8 Depth=1
ldr w8, [x21, #14052]
cmp w8, w27
b.ge LBB74_7
; %bb.11: ; in Loop: Header=BB74_8 Depth=1
mov x0, x21
bl _grow_buffer_unsafe
ldr w8, [x21, #14052]
b LBB74_7
LBB74_12: ; in Loop: Header=BB74_8 Depth=1
cmp w22, #240
b.ne LBB74_14
; %bb.13: ; in Loop: Header=BB74_8 Depth=1
add w26, w26, #16
cmp w26, #64
b.lt LBB74_8
LBB74_14:
mov w0, #1
b LBB74_16
LBB74_15:
mov w0, #0
Lloh398:
adrp x8, l_.str.12@PAGE
Lloh399:
add x8, x8, l_.str.12@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB74_16:
ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
ldp x24, x23, [sp, #32] ; 16-byte Folded Reload
ldp x26, x25, [sp, #16] ; 16-byte Folded Reload
ldp x28, x27, [sp], #96 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh394, Lloh395
.loh AdrpAdd Lloh396, Lloh397
.loh AdrpAdd Lloh398, Lloh399
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function idct_block
_idct_block: ; @idct_block
.cfi_startproc
; %bb.0:
sub sp, sp, #384
.cfi_def_cfa_offset 384
stp x28, x27, [sp, #288] ; 16-byte Folded Spill
stp x26, x25, [sp, #304] ; 16-byte Folded Spill
stp x24, x23, [sp, #320] ; 16-byte Folded Spill
stp x22, x21, [sp, #336] ; 16-byte Folded Spill
stp x20, x19, [sp, #352] ; 16-byte Folded Spill
stp x29, x30, [sp, #368] ; 16-byte Folded Spill
add x29, sp, #368
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
stp x1, x0, [sp] ; 16-byte Folded Spill
mov x8, #0
Lloh400:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh401:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh402:
ldr x9, [x9]
stur x9, [x29, #-96]
add x9, x2, #64
add x10, x3, #32
add x11, sp, #16
add x11, x11, #128
mov w14, #3135
mov w15, #4816
mov w16, #1223
mov w17, #8410
mov w2, #12586
mov w3, #6149
mov w4, #-3685
mov w5, #-10497
mov w6, #-8034
mov w7, #-1597
LBB75_1: ; =>This Inner Loop Header: Depth=1
add x19, x10, x8
ldursh w20, [x9, #-48]
ldurh w21, [x9, #-32]
orr w22, w20, w21
tst w22, #0xffff
b.ne LBB75_8
; %bb.2: ; in Loop: Header=BB75_1 Depth=1
ldurh w21, [x9, #-16]
cbnz w21, LBB75_7
; %bb.3: ; in Loop: Header=BB75_1 Depth=1
ldrh w21, [x9]
cbnz w21, LBB75_7
; %bb.4: ; in Loop: Header=BB75_1 Depth=1
ldrh w21, [x9, #16]
cbnz w21, LBB75_7
; %bb.5: ; in Loop: Header=BB75_1 Depth=1
ldrh w21, [x9, #32]
cbnz w21, LBB75_7
; %bb.6: ; in Loop: Header=BB75_1 Depth=1
ldrh w21, [x9, #48]
cbz w21, LBB75_10
LBB75_7: ; in Loop: Header=BB75_1 Depth=1
mov w21, #0
LBB75_8: ; in Loop: Header=BB75_1 Depth=1
sxth w21, w21
ldurb w22, [x19, #-16]
mul w21, w22, w21
ldrsh w22, [x9, #32]
ldrb w23, [x19, #16]
mul w22, w23, w22
add w23, w22, w21
mov w12, #2217
mul w23, w23, w12
mov w12, #-7567
madd w22, w22, w12, w23
madd w21, w21, w14, w23
ldursh w23, [x9, #-64]
ldurb w24, [x19, #-32]
mul w23, w24, w23
ldrsh w24, [x9]
ldrb w25, [x19]
mul w24, w25, w24
add w25, w24, w23
lsl w25, w25, #12
sub w23, w23, w24
lsl w23, w23, #12
sub w24, w25, w21
sub w26, w23, w22
ldrsh w27, [x9, #48]
ldrb w28, [x19, #24]
mul w27, w28, w27
ldrsh w28, [x9, #16]
ldrb w30, [x19, #8]
mul w28, w30, w28
ldursh w30, [x9, #-16]
ldurb w12, [x19, #-8]
mul w12, w12, w30
ldurb w19, [x19, #-24]
mul w19, w19, w20
add w20, w12, w27
add w30, w19, w27
add w0, w12, w28
add w1, w19, w28
add w13, w1, w20
mul w13, w13, w15
madd w30, w30, w4, w13
madd w13, w0, w5, w13
mul w0, w20, w6
mul w1, w1, w7
madd w19, w19, w3, w1
madd w12, w12, w2, w0
madd w1, w28, w17, w1
madd w0, w27, w16, w0
add w20, w21, w25
add w20, w20, #512
add w21, w22, w23
add w21, w21, #512
add w22, w26, #512
add w23, w24, #512
add w19, w19, w30
add w24, w19, w20
asr w24, w24, #10
sub w19, w20, w19
stur w24, [x11, #-128]
asr w19, w19, #10
str w19, [x11, #96]
add w12, w12, w13
add w19, w12, w21
asr w19, w19, #10
stur w19, [x11, #-96]
sub w12, w21, w12
asr w12, w12, #10
str w12, [x11, #64]
add w12, w1, w13
add w13, w12, w22
asr w13, w13, #10
stur w13, [x11, #-64]
sub w12, w22, w12
asr w12, w12, #10
str w12, [x11, #32]
add w12, w0, w30
add w13, w12, w23
asr w13, w13, #10
stur w13, [x11, #-32]
sub w12, w23, w12
asr w12, w12, #10
str w12, [x11]
LBB75_9: ; in Loop: Header=BB75_1 Depth=1
add x8, x8, #1
add x9, x9, #2
add x11, x11, #4
cmp w8, #8
b.ne LBB75_1
b LBB75_11
LBB75_10: ; in Loop: Header=BB75_1 Depth=1
ldursh w20, [x9, #-64]
ldurb w19, [x19, #-32]
mul w19, w20, w19
lsl w19, w19, #2
str w19, [x11, #96]
str w19, [x11, #64]
str w19, [x11, #32]
str w19, [x11]
stur w19, [x11, #-32]
stur w19, [x11, #-64]
stur w19, [x11, #-96]
stur w19, [x11, #-128]
b LBB75_9
LBB75_11:
mov x8, #0
ldp x9, x10, [sp] ; 16-byte Folded Reload
sxtw x9, w9
add x10, x10, #3
add x11, sp, #16
add x12, x11, #16
mov w14, #-7567
mov w15, #3135
mov w16, #4816
mov w17, #1223
mov w0, #8410
mov w1, #12586
mov w2, #6149
mov w3, #-3685
mov w4, #-10497
mov w5, #-8034
mov w6, #-1597
mov w7, #-16777217
LBB75_12: ; =>This Inner Loop Header: Depth=1
add x19, x12, x8
ldp w20, w21, [x19, #8]
ldp w23, w22, [x19, #-12]
add w24, w20, w22
add x11, sp, #16
ldr w25, [x11, x8]
mov w11, #2217
mul w24, w24, w11
madd w20, w20, w14, w24
ldp w26, w27, [x19]
add w28, w26, w25
sub w25, w25, w26
madd w22, w22, w15, w24
ldur w24, [x19, #-4]
add w26, w24, w21
add w19, w23, w21
add w30, w23, w27
add w13, w30, w26
add w11, w24, w27
mul w13, w13, w16
madd w19, w19, w3, w13
madd w11, w11, w4, w13
lsl w13, w28, #12
lsl w25, w25, #12
mul w26, w26, w5
mul w28, w30, w6
madd w23, w23, w2, w28
madd w24, w24, w1, w26
madd w27, w27, w0, w28
madd w21, w21, w17, w26
sub w26, w13, w22
sub w28, w25, w20
add w13, w22, w13
add w13, w13, #16, lsl #12 ; =65536
add w22, w23, w19
add w23, w22, w13
add w20, w20, w25
asr w25, w23, #17
cmp w23, w7
csetm w23, gt
cmp w25, w25, sxtb
add w25, w25, #128
csel w23, w23, w25, ne
sub w13, w13, w22
add w20, w20, #16, lsl #12 ; =65536
cmp w13, w7
asr w13, w13, #17
sturb w23, [x10, #-3]
csetm w22, gt
cmp w13, w13, sxtb
add w13, w13, #128
csel w13, w22, w13, ne
add w22, w24, w11
add w23, w22, w20
strb w13, [x10, #4]
asr w13, w23, #17
cmp w23, w7
csetm w23, gt
cmp w13, w13, sxtb
add w13, w13, #128
csel w13, w23, w13, ne
sturb w13, [x10, #-2]
sub w13, w20, w22
cmp w13, w7
asr w13, w13, #17
csetm w20, gt
cmp w13, w13, sxtb
add w13, w13, #128
csel w13, w20, w13, ne
add w20, w28, #16, lsl #12 ; =65536
add w11, w27, w11
add w22, w11, w20
strb w13, [x10, #3]
asr w13, w22, #17
cmp w22, w7
csetm w22, gt
cmp w13, w13, sxtb
add w13, w13, #128
csel w13, w22, w13, ne
sturb w13, [x10, #-1]
sub w11, w20, w11
cmp w11, w7
asr w11, w11, #17
csetm w13, gt
cmp w11, w11, sxtb
add w11, w11, #128
csel w11, w13, w11, ne
add w13, w26, #16, lsl #12 ; =65536
add w19, w21, w19
add w20, w19, w13
strb w11, [x10, #2]
asr w11, w20, #17
cmp w20, w7
csetm w20, gt
cmp w11, w11, sxtb
add w11, w11, #128
csel w11, w20, w11, ne
strb w11, [x10]
sub w11, w13, w19
cmp w11, w7
asr w11, w11, #17
csetm w13, gt
cmp w11, w11, sxtb
add w11, w11, #128
csel w11, w13, w11, ne
strb w11, [x10, #1]
add x8, x8, #32
add x10, x10, x9
cmp w8, #256
b.ne LBB75_12
; %bb.13:
ldur x8, [x29, #-96]
Lloh403:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh404:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh405:
ldr x9, [x9]
cmp x9, x8
b.ne LBB75_15
; %bb.14:
ldp x29, x30, [sp, #368] ; 16-byte Folded Reload
ldp x20, x19, [sp, #352] ; 16-byte Folded Reload
ldp x22, x21, [sp, #336] ; 16-byte Folded Reload
ldp x24, x23, [sp, #320] ; 16-byte Folded Reload
ldp x26, x25, [sp, #304] ; 16-byte Folded Reload
ldp x28, x27, [sp, #288] ; 16-byte Folded Reload
add sp, sp, #384
ret
LBB75_15:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh400, Lloh401, Lloh402
.loh AdrpLdrGotLdr Lloh403, Lloh404, Lloh405
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function grow_buffer_unsafe
_grow_buffer_unsafe: ; @grow_buffer_unsafe
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
b LBB76_3
LBB76_1: ; in Loop: Header=BB76_3 Depth=1
mov w0, #0
LBB76_2: ; in Loop: Header=BB76_3 Depth=1
ldr w8, [x19, #14048]
orr w8, w0, w8, lsl #8
str w8, [x19, #14048]
ldr w8, [x19, #14052]
add w9, w8, #8
str w9, [x19, #14052]
cmp w8, #17
b.ge LBB76_16
LBB76_3: ; =>This Inner Loop Header: Depth=1
ldr w8, [x19, #14060]
cbnz w8, LBB76_1
; %bb.4: ; in Loop: Header=BB76_3 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB76_7
; %bb.5: ; in Loop: Header=BB76_3 Depth=1
bl _fgetc
cmn w0, #1
b.eq LBB76_1
; %bb.6: ; in Loop: Header=BB76_3 Depth=1
cmp w0, #255
b.ne LBB76_2
b LBB76_9
LBB76_7: ; in Loop: Header=BB76_3 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB76_1
; %bb.8: ; in Loop: Header=BB76_3 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w0, [x8]
cmp w0, #255
b.ne LBB76_2
LBB76_9: ; in Loop: Header=BB76_3 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB76_12
; %bb.10: ; in Loop: Header=BB76_3 Depth=1
bl _fgetc
cmn w0, #1
b.eq LBB76_14
; %bb.11: ; in Loop: Header=BB76_3 Depth=1
cbz w0, LBB76_14
b LBB76_15
LBB76_12: ; in Loop: Header=BB76_3 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB76_14
; %bb.13: ; in Loop: Header=BB76_3 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w0, [x8]
cbnz w0, LBB76_15
LBB76_14: ; in Loop: Header=BB76_3 Depth=1
mov w0, #255
b LBB76_2
LBB76_15:
mov w8, #14056
strb w0, [x19, x8]
mov w8, #1
str w8, [x19, #14060]
LBB76_16:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function decode
_decode: ; @decode
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x20, x1
mov x19, x0
ldr w8, [x0, #14052]
cmp w8, #15
b.gt LBB77_2
; %bb.1:
mov x0, x19
bl _grow_buffer_unsafe
ldr w8, [x19, #14052]
LBB77_2:
ldr w9, [x19, #14048]
sub w10, w8, #9
lsr x11, x9, x10
and x11, x11, #0x1ff
ldrb w11, [x20, x11]
cmp x11, #255
b.eq LBB77_5
; %bb.3:
add x9, x20, x11
ldrb w10, [x9, #1280]
subs w8, w8, w10
b.lt LBB77_10
; %bb.4:
str w8, [x19, #14052]
ldrb w0, [x9, #1024]
b LBB77_11
LBB77_5:
mov x11, #0
mov w12, #16
sub w12, w12, w8
lsl w13, w9, w12
subs w12, w8, #16
lsr w14, w9, w12
csel w13, w13, w14, lt
and w13, w13, #0xffff
add x14, x20, #1580
LBB77_6: ; =>This Inner Loop Header: Depth=1
ldr w15, [x14, x11, lsl #2]
add x11, x11, #1
cmp w13, w15
b.hs LBB77_6
; %bb.7:
cmp w11, #8
b.ne LBB77_9
; %bb.8:
str w12, [x19, #14052]
mov w0, #-1
b LBB77_11
LBB77_9:
add x12, x11, #9
cmp w8, w12
b.ge LBB77_12
LBB77_10:
mov w0, #-1
LBB77_11:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
LBB77_12:
and x12, x12, #0xffffffff
sub x10, x10, x11
lsr w11, w9, w10
Lloh406:
adrp x13, _bmask@PAGE
Lloh407:
add x13, x13, _bmask@PAGEOFF
lsl x12, x12, #2
ldr w14, [x13, x12]
and w11, w14, w11
add x12, x20, x12
ldr w12, [x12, #1612]
add w12, w11, w12
sxtw x11, w12
add x11, x20, x11
ldrb w14, [x11, #1280]
sub w8, w8, w14
lsr w8, w9, w8
ldr w9, [x13, x14, lsl #2]
and w8, w8, w9
add x9, x20, w12, sxtw #1
ldrh w9, [x9, #512]
cmp w8, w9
b.ne LBB77_14
; %bb.13:
str w10, [x19, #14052]
ldrb w0, [x11, #1024]
b LBB77_11
LBB77_14:
bl _decode.cold.1
.loh AdrpAdd Lloh406, Lloh407
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function build_huffman
_build_huffman: ; @build_huffman
.cfi_startproc
; %bb.0:
stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
mov x19, x0
mov x8, #0
mov w9, #0
add x10, x0, #1280
b LBB78_2
LBB78_1: ; in Loop: Header=BB78_2 Depth=1
add x8, x8, #1
cmp x8, #16
b.eq LBB78_6
LBB78_2: ; =>This Loop Header: Depth=1
; Child Loop BB78_4 Depth 2
ldr w11, [x1, x8, lsl #2]
cmp w11, #1
b.lt LBB78_1
; %bb.3: ; in Loop: Header=BB78_2 Depth=1
mov x11, #0
add w12, w8, #1
add x13, x10, w9, sxtw
LBB78_4: ; Parent Loop BB78_2 Depth=1
; => This Inner Loop Header: Depth=2
strb w12, [x13, x11]
ldr w14, [x1, x8, lsl #2]
add x11, x11, #1
cmp w11, w14
b.lt LBB78_4
; %bb.5: ; in Loop: Header=BB78_2 Depth=1
add w9, w9, w11
b LBB78_1
LBB78_6:
mov w11, #0
mov w8, #0
add x9, x19, w9, sxtw
strb wzr, [x9, #1280]
add x9, x19, #1281
mov w10, #1
mov w12, #16
mov w13, #1
b LBB78_8
LBB78_7: ; in Loop: Header=BB78_8 Depth=1
sub w15, w12, w13
lsl w15, w11, w15
str w15, [x14, #1540]
lsl w11, w11, #1
add x13, x13, #1
cmp x13, #17
b.eq LBB78_12
LBB78_8: ; =>This Loop Header: Depth=1
; Child Loop BB78_9 Depth 2
sub w15, w8, w11
add x14, x19, x13, lsl #2
str w15, [x14, #1612]
sxtw x15, w8
add x16, x19, x15
ldrb w16, [x16, #1280]
cmp x13, x16
b.ne LBB78_7
LBB78_9: ; Parent Loop BB78_8 Depth=1
; => This Inner Loop Header: Depth=2
add x8, x9, x15, lsl #1
sub x8, x8, #769
strh w11, [x8]
add w11, w11, #1
add x8, x15, #1
ldrb w16, [x9, x15]
mov x15, x8
cmp x13, x16
b.eq LBB78_9
; %bb.10: ; in Loop: Header=BB78_8 Depth=1
lsl w15, w10, w13
sub w16, w11, #1
; kill: def $w8 killed $w8 killed $x8 def $x8
cmp w16, w15
b.lt LBB78_7
; %bb.11:
mov w20, #0
Lloh408:
adrp x8, l_.str.21@PAGE
Lloh409:
add x8, x8, l_.str.21@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
b LBB78_18
LBB78_12:
mov w9, #-1
str w9, [x19, #1608]
movi.2d v0, #0xffffffffffffffff
stp q0, q0, [x19]
stp q0, q0, [x19, #32]
stp q0, q0, [x19, #64]
stp q0, q0, [x19, #96]
stp q0, q0, [x19, #128]
stp q0, q0, [x19, #160]
stp q0, q0, [x19, #192]
stp q0, q0, [x19, #224]
stp q0, q0, [x19, #256]
stp q0, q0, [x19, #288]
stp q0, q0, [x19, #320]
stp q0, q0, [x19, #352]
stp q0, q0, [x19, #384]
stp q0, q0, [x19, #416]
stp q0, q0, [x19, #448]
mov w20, #1
stp q0, q0, [x19, #480]
cmp w8, #1
b.lt LBB78_18
; %bb.13:
mov x21, #0
add x22, x19, #1280
mov w23, #9
mov w24, w8
b LBB78_15
LBB78_14: ; in Loop: Header=BB78_15 Depth=1
add x21, x21, #1
cmp x24, x21
b.eq LBB78_17
LBB78_15: ; =>This Inner Loop Header: Depth=1
ldrb w8, [x22, x21]
cmp x8, #9
b.hi LBB78_14
; %bb.16: ; in Loop: Header=BB78_15 Depth=1
sub x8, x23, x8
lsl w2, w20, w8
add x9, x22, x21, lsl #1
sub x9, x9, #768
ldrh w9, [x9]
lsl x8, x9, x8
add x0, x19, x8
mov x1, x21
bl _memset
b LBB78_14
LBB78_17:
mov w20, #1
LBB78_18:
mov x0, x20
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
ldp x24, x23, [sp], #64 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh408, Lloh409
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function process_frame_header
_process_frame_header: ; @process_frame_header
.cfi_startproc
; %bb.0:
sub sp, sp, #144
.cfi_def_cfa_offset 144
stp d9, d8, [sp, #32] ; 16-byte Folded Spill
stp x28, x27, [sp, #48] ; 16-byte Folded Spill
stp x26, x25, [sp, #64] ; 16-byte Folded Spill
stp x24, x23, [sp, #80] ; 16-byte Folded Spill
stp x22, x21, [sp, #96] ; 16-byte Folded Spill
stp x20, x19, [sp, #112] ; 16-byte Folded Spill
stp x29, x30, [sp, #128] ; 16-byte Folded Spill
add x29, sp, #128
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
.cfi_offset b8, -104
.cfi_offset b9, -112
mov x19, x0
ldr x0, [x0, #16]
cbz x0, LBB79_3
; %bb.1:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB79_6
; %bb.2:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB79_8
LBB79_3:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB79_13
; %bb.4:
add x10, x8, #1
str x10, [x19, #24]
ldrb w20, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB79_7
LBB79_5:
mov w8, #0
b LBB79_8
LBB79_6:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB79_5
LBB79_7:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB79_8:
add w20, w8, w20, lsl #8
cmp w20, #11
b.ge LBB79_10
LBB79_9:
Lloh410:
adrp x20, l_.str.24@PAGE
Lloh411:
add x20, x20, l_.str.24@PAGEOFF
b LBB79_20
LBB79_10:
ldr x0, [x19, #16]
cbz x0, LBB79_14
; %bb.11:
bl _fgetc
cmn w0, #1
b.eq LBB79_19
; %bb.12:
cmp w0, #8
b.ne LBB79_19
b LBB79_16
LBB79_13:
mov w20, #0
cmp x8, x9
b.lo LBB79_7
b LBB79_5
LBB79_14:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB79_19
; %bb.15:
add x9, x8, #1
str x9, [x19, #24]
ldrb w0, [x8]
cmp w0, #8
b.ne LBB79_19
LBB79_16:
ldr x0, [x19, #16]
cbz x0, LBB79_22
; %bb.17:
bl _fgetc
cmn w0, #1
csel w21, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB79_25
; %bb.18:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB79_27
LBB79_19:
Lloh412:
adrp x20, l_.str.25@PAGE
Lloh413:
add x20, x20, l_.str.25@PAGEOFF
LBB79_20:
mov w0, #0
adrp x8, _failure_reason@PAGE
str x20, [x8, _failure_reason@PAGEOFF]
LBB79_21:
ldp x29, x30, [sp, #128] ; 16-byte Folded Reload
ldp x20, x19, [sp, #112] ; 16-byte Folded Reload
ldp x22, x21, [sp, #96] ; 16-byte Folded Reload
ldp x24, x23, [sp, #80] ; 16-byte Folded Reload
ldp x26, x25, [sp, #64] ; 16-byte Folded Reload
ldp x28, x27, [sp, #48] ; 16-byte Folded Reload
ldp d9, d8, [sp, #32] ; 16-byte Folded Reload
add sp, sp, #144
ret
LBB79_22:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB79_35
; %bb.23:
add x10, x8, #1
str x10, [x19, #24]
ldrb w21, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB79_26
LBB79_24:
mov w8, #0
b LBB79_27
LBB79_25:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB79_24
LBB79_26:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB79_27:
adds w8, w8, w21, lsl #8
str w8, [x19, #4]
b.eq LBB79_31
; %bb.28:
ldr x0, [x19, #16]
cbz x0, LBB79_32
; %bb.29:
bl _fgetc
cmn w0, #1
csel w21, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB79_36
; %bb.30:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB79_38
LBB79_31:
Lloh414:
adrp x20, l_.str.26@PAGE
Lloh415:
add x20, x20, l_.str.26@PAGEOFF
b LBB79_20
LBB79_32:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB79_48
; %bb.33:
add x10, x8, #1
str x10, [x19, #24]
ldrb w21, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB79_37
LBB79_34:
mov w8, #0
b LBB79_38
LBB79_35:
mov w21, #0
cmp x8, x9
b.lo LBB79_26
b LBB79_24
LBB79_36:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB79_34
LBB79_37:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB79_38:
adds w8, w8, w21, lsl #8
str w8, [x19]
b.eq LBB79_41
; %bb.39:
ldr x0, [x19, #16]
cbz x0, LBB79_42
; %bb.40:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB79_44
LBB79_41:
Lloh416:
adrp x20, l_.str.27@PAGE
Lloh417:
add x20, x20, l_.str.27@PAGEOFF
b LBB79_20
LBB79_42:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB79_47
; %bb.43:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB79_44:
and w9, w8, #0xfffffffd
cmp w9, #1
b.ne LBB79_47
; %bb.45:
str w8, [x19, #8]
mov w9, w8
cmp w8, #4
b.hs LBB79_49
; %bb.46:
mov x10, #0
b LBB79_52
LBB79_47:
Lloh418:
adrp x20, l_.str.28@PAGE
Lloh419:
add x20, x20, l_.str.28@PAGEOFF
b LBB79_20
LBB79_48:
mov w21, #0
cmp x8, x9
b.lo LBB79_37
b LBB79_34
LBB79_49:
and x10, x9, #0xfffffffc
mov w11, #14040
add x11, x19, x11
mov x12, x10
LBB79_50: ; =>This Inner Loop Header: Depth=1
stur xzr, [x11, #-232]
stur xzr, [x11, #-160]
stur xzr, [x11, #-88]
stur xzr, [x11, #-16]
stur xzr, [x11, #-216]
stur xzr, [x11, #-144]
stur xzr, [x11, #-72]
str xzr, [x11]
add x11, x11, #288
subs x12, x12, #4
b.ne LBB79_50
; %bb.51:
cmp x10, x9
b.eq LBB79_54
LBB79_52:
mov w11, #72
madd x11, x10, x11, x19
mov w12, #13824
add x11, x11, x12
sub x9, x9, x10
LBB79_53: ; =>This Inner Loop Header: Depth=1
stur xzr, [x11, #-16]
str xzr, [x11], #72
subs x9, x9, #1
b.ne LBB79_53
LBB79_54:
add w9, w8, w8, lsl #1
add w9, w9, #8
cmp w20, w9
b.ne LBB79_9
; %bb.55:
cmp w8, #1
b.lt LBB79_78
; %bb.56:
mov x26, #0
mov w8, #13772
add x21, x19, x8
Lloh420:
adrp x22, l_.str.29@PAGE
Lloh421:
add x22, x22, l_.str.29@PAGEOFF
Lloh422:
adrp x20, l_.str.30@PAGE
Lloh423:
add x20, x20, l_.str.30@PAGEOFF
Lloh424:
adrp x23, l_.str.31@PAGE
Lloh425:
add x23, x23, l_.str.31@PAGEOFF
Lloh426:
adrp x24, l_.str.32@PAGE
Lloh427:
add x24, x24, l_.str.32@PAGEOFF
b LBB79_59
LBB79_57: ; in Loop: Header=BB79_59 Depth=1
str wzr, [x21]
LBB79_58: ; in Loop: Header=BB79_59 Depth=1
ldrsw x8, [x19, #8]
add x21, x21, #72
mov x26, x25
cmp x25, x8
b.ge LBB79_78
LBB79_59: ; =>This Inner Loop Header: Depth=1
ldr x0, [x19, #16]
cbz x0, LBB79_61
; %bb.60: ; in Loop: Header=BB79_59 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB79_64
LBB79_61: ; in Loop: Header=BB79_59 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB79_63
; %bb.62: ; in Loop: Header=BB79_59 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB79_64
LBB79_63: ; in Loop: Header=BB79_59 Depth=1
mov w8, #0
LBB79_64: ; in Loop: Header=BB79_59 Depth=1
stur w8, [x21, #-12]
add x25, x26, #1
mov w8, w8
cmp x25, x8
ccmp x26, x8, #4, ne
b.ne LBB79_83
; %bb.65: ; in Loop: Header=BB79_59 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB79_67
; %bb.66: ; in Loop: Header=BB79_59 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB79_69
LBB79_67: ; in Loop: Header=BB79_59 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB79_99
; %bb.68: ; in Loop: Header=BB79_59 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
LBB79_69: ; in Loop: Header=BB79_59 Depth=1
asr w9, w8, #4
stur w9, [x21, #-8]
cmp w8, #16
b.lo LBB79_20
; %bb.70: ; in Loop: Header=BB79_59 Depth=1
cmp w8, #79
b.gt LBB79_20
; %bb.71: ; in Loop: Header=BB79_59 Depth=1
and w8, w8, #0xf
stur w8, [x21, #-4]
sub w8, w8, #5
cmn w8, #4
b.lo LBB79_84
; %bb.72: ; in Loop: Header=BB79_59 Depth=1
ldr x0, [x19, #16]
cbz x0, LBB79_74
; %bb.73: ; in Loop: Header=BB79_59 Depth=1
bl _fgetc
cmn w0, #1
b.ne LBB79_76
b LBB79_57
LBB79_74: ; in Loop: Header=BB79_59 Depth=1
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB79_57
; %bb.75: ; in Loop: Header=BB79_59 Depth=1
add x9, x8, #1
str x9, [x19, #24]
ldrb w0, [x8]
LBB79_76: ; in Loop: Header=BB79_59 Depth=1
str w0, [x21]
cmp w0, #3
b.le LBB79_58
; %bb.77:
mov x20, x24
b LBB79_20
LBB79_78:
ldr d0, [x19]
fmov w9, s0
mov w10, #1073741824
udiv w10, w10, w9
udiv w11, w10, w8
str q0, [sp, #16] ; 16-byte Folded Spill
mov.s w10, v0[1]
cmp w11, w10
b.hs LBB79_80
; %bb.79:
Lloh428:
adrp x20, l_.str.33@PAGE
Lloh429:
add x20, x20, l_.str.33@PAGEOFF
b LBB79_20
LBB79_80:
cmp w8, #1
b.lt LBB79_85
; %bb.81:
mov w11, w8
cmp w8, #4
b.hs LBB79_86
; %bb.82:
mov x12, #0
movi.2s v3, #1
b LBB79_89
LBB79_83:
mov x20, x22
b LBB79_20
LBB79_84:
mov x20, x23
b LBB79_20
LBB79_85:
movi.2s v3, #1
b LBB79_92
LBB79_86:
and x12, x11, #0xfffffffc
mov w13, #13908
add x13, x19, x13
movi.2s v0, #1
mov x14, x12
movi.2s v7, #1
movi.2s v18, #1
movi.2s v19, #1
LBB79_87: ; =>This Inner Loop Header: Depth=1
fmov d4, d19
fmov d3, d18
fmov d2, d7
fmov d1, d0
ldur d5, [x13, #-144]
ldur d6, [x13, #-72]
ldr d16, [x13]
ldr d17, [x13, #72]
smax.2s v0, v5, v0
smax.2s v7, v6, v7
smax.2s v18, v16, v18
smax.2s v19, v17, v19
add x13, x13, #288
subs x14, x14, #4
b.ne LBB79_87
; %bb.88:
smax.2s v0, v17, v4
smax.2s v3, v16, v3
smax.2s v2, v6, v2
smax.2s v1, v5, v1
smax.2s v1, v1, v2
smax.2s v1, v1, v3
smax.2s v3, v1, v0
cmp x12, x11
b.eq LBB79_92
LBB79_89:
mov w13, #72
madd x13, x12, x13, x19
mov w14, #13764
add x13, x13, x14
sub x11, x11, x12
LBB79_90: ; =>This Inner Loop Header: Depth=1
fmov d0, d3
ldr d1, [x13], #72
smax.2s v3, v1, v3
subs x11, x11, #1
b.ne LBB79_90
; %bb.91:
smax.2s v3, v1, v0
LBB79_92:
str d3, [x19, #13736]
shl.2s v0, v3, #3
str d0, [x19, #13752]
mov.s w11, v0[1]
fmov w12, s0
add w9, w9, w12
sub w9, w9, #1
udiv w9, w9, w12
str w9, [x19, #13744]
add w10, w10, w11
sub w10, w10, #1
udiv w10, w10, w11
str w10, [x19, #13748]
cmp w8, #1
b.lt LBB79_96
; %bb.93:
mov x20, #0
movi.2d v0, #0xffffffffffffffff
add.2s v8, v3, v0
lsl w23, w9, #3
lsl w24, w10, #3
mov w9, #13744
add x22, x19, x9
mov w9, #72
umull x25, w8, w9
mov w8, #13788
add x26, x19, x8
mov w21, #1
mov.s w27, v3[1]
str q3, [sp] ; 16-byte Folded Spill
LBB79_94: ; =>This Inner Loop Header: Depth=1
add x8, x26, x20
ldur d0, [x8, #-24]
fmov d1, d8
ldr q2, [sp, #16] ; 16-byte Folded Reload
mla.2s v1, v0, v2
mov.s w9, v1[1]
udiv w9, w9, w27
fmov w10, s3
fmov w11, s1
udiv w10, w11, w10
fmov s1, w10
mov.s v1[1], w9
str d1, [x8]
mov.s w8, v0[1]
fmov w9, s0
mul w9, w23, w9
add x28, x19, x20
str w9, [x28, #13796]
mul w8, w24, w8
str w8, [x28, #13800]
mul w8, w8, w9
orr w8, w8, #0xf
sxtw x0, w8
bl _malloc
str x0, [x28, #13816]
cbz x0, LBB79_97
; %bb.95: ; in Loop: Header=BB79_94 Depth=1
add x8, x0, #15
and x8, x8, #0xfffffffffffffff0
str x8, [x28, #13808]
str xzr, [x28, #13824]
add x21, x21, #1
add x20, x20, #72
mov w0, #1
cmp x25, x20
ldr q3, [sp] ; 16-byte Folded Reload
b.ne LBB79_94
b LBB79_21
LBB79_96:
mov w0, #1
b LBB79_21
LBB79_97:
cmp w21, #1
b.ne LBB79_100
; %bb.98:
Lloh430:
adrp x20, l_.str.5@PAGE
Lloh431:
add x20, x20, l_.str.5@PAGEOFF
b LBB79_20
LBB79_99:
stur wzr, [x21, #-8]
Lloh432:
adrp x20, l_.str.30@PAGE
Lloh433:
add x20, x20, l_.str.30@PAGEOFF
b LBB79_20
LBB79_100:
add x19, x22, x20
Lloh434:
adrp x20, l_.str.5@PAGE
Lloh435:
add x20, x20, l_.str.5@PAGEOFF
LBB79_101: ; =>This Inner Loop Header: Depth=1
ldr x0, [x19]
bl _free
stur xzr, [x19, #-8]
sub x21, x21, #1
sub x19, x19, #72
cmp x21, #1
b.gt LBB79_101
b LBB79_20
.loh AdrpAdd Lloh410, Lloh411
.loh AdrpAdd Lloh412, Lloh413
.loh AdrpAdd Lloh414, Lloh415
.loh AdrpAdd Lloh416, Lloh417
.loh AdrpAdd Lloh418, Lloh419
.loh AdrpAdd Lloh426, Lloh427
.loh AdrpAdd Lloh424, Lloh425
.loh AdrpAdd Lloh422, Lloh423
.loh AdrpAdd Lloh420, Lloh421
.loh AdrpAdd Lloh428, Lloh429
.loh AdrpAdd Lloh430, Lloh431
.loh AdrpAdd Lloh432, Lloh433
.loh AdrpAdd Lloh434, Lloh435
.cfi_endproc
; -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ; -- Begin function zbuild_huffman
lCPI80_0:
.long 64 ; 0x40
.long 128 ; 0x80
.long 256 ; 0x100
.long 512 ; 0x200
lCPI80_1:
.long 4 ; 0x4
.long 8 ; 0x8
.long 16 ; 0x10
.long 32 ; 0x20
.section __TEXT,__text,regular,pure_instructions
.p2align 2
_zbuild_huffman: ; @zbuild_huffman
.cfi_startproc
; %bb.0:
sub sp, sp, #208
.cfi_def_cfa_offset 208
stp x22, x21, [sp, #160] ; 16-byte Folded Spill
stp x20, x19, [sp, #176] ; 16-byte Folded Spill
stp x29, x30, [sp, #192] ; 16-byte Folded Spill
add x29, sp, #192
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x21, x2
mov x19, x1
mov x20, x0
Lloh436:
adrp x8, ___stack_chk_guard@GOTPAGE
Lloh437:
ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
Lloh438:
ldr x8, [x8]
stur x8, [x29, #-40]
str wzr, [sp, #80]
movi.2d v0, #0000000000000000
stp q0, q0, [sp, #48]
stp q0, q0, [sp, #16]
mov w1, #255
mov w2, #1024
bl _memset
cmp w21, #0
b.le LBB80_4
; %bb.1:
mov w8, w21
add x9, sp, #16
mov x10, x19
LBB80_2: ; =>This Inner Loop Header: Depth=1
ldrb w11, [x10], #1
lsl x11, x11, #2
ldr w12, [x9, x11]
add w12, w12, #1
str w12, [x9, x11]
subs x8, x8, #1
b.ne LBB80_2
; %bb.3:
ldr w8, [sp, #20]
str wzr, [sp, #16]
cmp w8, #2
b.le LBB80_5
b LBB80_29
LBB80_4:
mov w8, #0
str wzr, [sp, #16]
LBB80_5:
ldur q0, [sp, #40]
ldur q1, [sp, #24]
Lloh439:
adrp x9, lCPI80_0@PAGE
Lloh440:
ldr q2, [x9, lCPI80_0@PAGEOFF]
cmgt.4s v0, v0, v2
Lloh441:
adrp x9, lCPI80_1@PAGE
Lloh442:
ldr q2, [x9, lCPI80_1@PAGEOFF]
cmgt.4s v1, v1, v2
uzp1.8h v0, v1, v0
xtn.8b v0, v0
umov.b w9, v0[1]
and w9, w9, #0x1
umov.b w10, v0[0]
and w10, w10, #0x1
bfi w10, w9, #1, #1
umov.b w9, v0[2]
and w9, w9, #0x1
bfi w10, w9, #2, #1
umov.b w9, v0[3]
and w9, w9, #0x1
bfi w10, w9, #3, #1
umov.b w9, v0[4]
and w9, w9, #0x1
bfi w10, w9, #4, #1
umov.b w9, v0[5]
and w9, w9, #0x1
bfi w10, w9, #5, #1
umov.b w9, v0[6]
and w9, w9, #0x1
orr w9, w10, w9, lsl #6
umov.b w10, v0[7]
orr w9, w9, w10, lsl #7
tst w9, #0xff
b.ne LBB80_29
; %bb.6:
ldr w9, [sp, #56]
cmp w9, #1024
b.gt LBB80_29
; %bb.7:
ldr w9, [sp, #60]
cmp w9, #2048
b.gt LBB80_29
; %bb.8:
ldr w9, [sp, #64]
cmp w9, #1, lsl #12 ; =4096
b.gt LBB80_29
; %bb.9:
ldr w9, [sp, #68]
cmp w9, #2, lsl #12 ; =8192
b.gt LBB80_29
; %bb.10:
ldr w9, [sp, #72]
cmp w9, #4, lsl #12 ; =16384
b.gt LBB80_29
; %bb.11:
ldr w9, [sp, #76]
cmp w9, #8, lsl #12 ; =32768
b.gt LBB80_29
; %bb.12:
str wzr, [sp, #92]
strh wzr, [x20, #1026]
strh wzr, [x20, #1126]
cmp w8, #2
b.gt LBB80_16
; %bb.13:
mov x9, #0
mov w10, #0
add x11, sp, #88
add x11, x11, #8
add x12, sp, #16
orr x12, x12, #0x8
mov w13, #15
mov w14, #1028
mov w15, #1
mov x16, x8
LBB80_14: ; =>This Inner Loop Header: Depth=1
lsl w17, w8, w13
add x0, x20, x9, lsl #2
str w17, [x0, #1060]
cmp x9, #14
b.eq LBB80_19
; %bb.15: ; in Loop: Header=BB80_14 Depth=1
add w10, w16, w10
lsl w8, w8, #1
lsl x17, x9, #2
ldr w16, [x12, x17]
str w8, [x11, x17]
add x17, x20, x14
strh w8, [x17]
strh w10, [x17, #100]
add w8, w16, w8
add w17, w9, #2
lsl w17, w15, w17
sub w13, w13, #1
add x9, x9, #1
add x14, x14, #2
cmp w16, #0
ccmp w8, w17, #4, ne
b.le LBB80_14
LBB80_16:
mov w0, #0
Lloh443:
adrp x8, l_.str.43@PAGE
Lloh444:
add x8, x8, l_.str.43@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB80_17:
ldur x8, [x29, #-40]
Lloh445:
adrp x9, ___stack_chk_guard@GOTPAGE
Lloh446:
ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
Lloh447:
ldr x9, [x9]
cmp x9, x8
b.ne LBB80_30
; %bb.18:
ldp x29, x30, [sp, #192] ; 16-byte Folded Reload
ldp x20, x19, [sp, #176] ; 16-byte Folded Reload
ldp x22, x21, [sp, #160] ; 16-byte Folded Reload
add sp, sp, #208
ret
LBB80_19:
mov w8, #65536
str w8, [x20, #1120]
mov w0, #1
cmp w21, #1
b.lt LBB80_17
; %bb.20:
mov x8, #0
add x9, sp, #88
mov w10, #16
mov w11, w21
b LBB80_23
LBB80_21: ; in Loop: Header=BB80_23 Depth=1
add w13, w13, #1
str w13, [x9, x12, lsl #2]
LBB80_22: ; in Loop: Header=BB80_23 Depth=1
add x8, x8, #1
cmp x8, x11
b.eq LBB80_28
LBB80_23: ; =>This Loop Header: Depth=1
; Child Loop BB80_27 Depth 2
ldrb w12, [x19, x8]
cbz w12, LBB80_22
; %bb.24: ; in Loop: Header=BB80_23 Depth=1
ldrsw x13, [x9, x12, lsl #2]
add x14, x20, x12, lsl #1
ldrh w15, [x14, #1024]
ldrh w14, [x14, #1124]
sub x15, x13, x15
add x14, x15, x14
add x15, x20, x14
strb w12, [x15, #1156]
add x15, x20, x14, lsl #1
strh w8, [x15, #1444]
cmp w12, #9
b.hi LBB80_21
; %bb.25: ; in Loop: Header=BB80_23 Depth=1
rbit w15, w13
lsr w15, w15, #16
sub w16, w10, w12
lsr w15, w15, w16
cmp w15, #511
b.hi LBB80_21
; %bb.26: ; in Loop: Header=BB80_23 Depth=1
lsl w16, w0, w12
sxtw x16, w16
LBB80_27: ; Parent Loop BB80_23 Depth=1
; => This Inner Loop Header: Depth=2
strh w14, [x20, x15, lsl #1]
add x15, x15, x16
cmp x15, #512
b.lt LBB80_27
b LBB80_21
LBB80_28:
mov w0, #1
b LBB80_17
LBB80_29:
bl _zbuild_huffman.cold.1
LBB80_30:
bl ___stack_chk_fail
.loh AdrpLdrGotLdr Lloh436, Lloh437, Lloh438
.loh AdrpLdr Lloh441, Lloh442
.loh AdrpAdrp Lloh439, Lloh441
.loh AdrpLdr Lloh439, Lloh440
.loh AdrpAdd Lloh443, Lloh444
.loh AdrpLdrGotLdr Lloh445, Lloh446, Lloh447
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function zhuffman_decode
_zhuffman_decode: ; @zhuffman_decode
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
ldr w9, [x0, #16]
cmp w9, #16
b.ge LBB81_7
; %bb.1:
ldr w8, [x0, #20]
mov x10, x9
b LBB81_4
LBB81_2: ; in Loop: Header=BB81_4 Depth=1
add x11, x9, #1
str x11, [x0]
ldrb w9, [x9]
LBB81_3: ; in Loop: Header=BB81_4 Depth=1
lsl w9, w9, w10
orr w8, w9, w8
add w9, w10, #8
stp w9, w8, [x0, #16]
cmp w10, #17
mov x10, x9
b.ge LBB81_8
LBB81_4: ; =>This Inner Loop Header: Depth=1
lsr w9, w8, w10
cbnz w9, LBB81_16
; %bb.5: ; in Loop: Header=BB81_4 Depth=1
ldp x9, x11, [x0]
cmp x9, x11
b.lo LBB81_2
; %bb.6: ; in Loop: Header=BB81_4 Depth=1
mov w9, #0
b LBB81_3
LBB81_7:
ldr w8, [x0, #20]
LBB81_8:
and w10, w8, #0x1ff
ldrh w12, [x1, w10, uxtw #1]
mov w10, #65535
cmp x12, x10
b.eq LBB81_10
; %bb.9:
add x10, x1, x12
ldrb w11, [x10, #1156]
b LBB81_15
LBB81_10:
mov x10, #0
rbit w11, w8
lsr w12, w11, #16
add x11, x1, #1096
LBB81_11: ; =>This Inner Loop Header: Depth=1
ldr w13, [x11, x10, lsl #2]
add x10, x10, #1
cmp w13, w12
b.le LBB81_11
; %bb.12:
cmp w10, #7
b.ne LBB81_14
; %bb.13:
mov w0, #-1
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
LBB81_14:
add x11, x10, #9
mov w13, #7
sub w13, w13, w10
lsr w12, w12, w13
add x13, x1, w11, uxtw #1
ldrh w14, [x13, #1024]
sub w12, w12, w14
ldrh w13, [x13, #1124]
add x12, x13, w12, sxtw
add x13, x1, x12
ldrb w13, [x13, #1156]
sub w10, w10, w13
cmn w10, #9
b.ne LBB81_17
LBB81_15:
lsr w8, w8, w11
sub w9, w9, w11
stp w9, w8, [x0, #16]
add x8, x1, x12, lsl #1
ldrh w0, [x8, #1444]
ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
ret
LBB81_16:
bl _zhuffman_decode.cold.2
LBB81_17:
bl _zhuffman_decode.cold.1
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function convert_format
_convert_format: ; @convert_format
.cfi_startproc
; %bb.0:
stp d11, d10, [sp, #-128]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 128
stp d9, d8, [sp, #16] ; 16-byte Folded Spill
stp x28, x27, [sp, #32] ; 16-byte Folded Spill
stp x26, x25, [sp, #48] ; 16-byte Folded Spill
stp x24, x23, [sp, #64] ; 16-byte Folded Spill
stp x22, x21, [sp, #80] ; 16-byte Folded Spill
stp x20, x19, [sp, #96] ; 16-byte Folded Spill
stp x29, x30, [sp, #112] ; 16-byte Folded Spill
add x29, sp, #112
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
.cfi_offset b8, -104
.cfi_offset b9, -112
.cfi_offset b10, -120
.cfi_offset b11, -128
sub sp, sp, #400
mov x19, x0
cmp w2, w1
b.eq LBB82_176
; %bb.1:
mov x21, x2
sub w8, w2, #5
cmn w8, #5
b.ls LBB82_177
; %bb.2:
mov x24, x4
mov x20, x3
mov x22, x1
mul w25, w3, w21
mul w0, w25, w4
bl _malloc
cbz x0, LBB82_175
; %bb.3:
mov x23, x0
cmp w24, #1
b.lt LBB82_174
; %bb.4:
add w13, w21, w22, lsl #3
cmp w13, #35
b.hi LBB82_178
; %bb.5:
mov w8, #1
lsl x8, x8, x13
mov x9, #7168
movk x9, #5658, lsl #16
movk x9, #14, lsl #32
tst x8, x9
b.eq LBB82_178
; %bb.6:
mov w15, #0
mov w12, #0
mov x8, #0
sub w9, w20, #1
mov w10, w24
stur x10, [x29, #-128] ; 8-byte Folded Spill
add x10, x9, #1
and x16, x10, #0x1fffffff0
lsl x11, x16, #2
str x11, [sp, #48] ; 8-byte Folded Spill
lsl x17, x16, #1
sub w11, w9, w16
str w11, [sp, #140] ; 4-byte Folded Spill
and x0, x10, #0x1fffffff8
sub w11, w9, w0
stur w11, [x29, #-164] ; 4-byte Folded Spill
lsl x1, x0, #1
lsl x11, x0, #2
stur x11, [x29, #-216] ; 8-byte Folded Spill
ands x11, x10, #0xf
mov w14, #16
csel x11, x14, x11, eq
str x11, [sp, #192] ; 8-byte Folded Spill
sub x11, x10, x11
sub w14, w9, w11
str w14, [sp, #124] ; 4-byte Folded Spill
str x11, [sp, #224] ; 8-byte Folded Spill
lsl x11, x11, #2
str x11, [sp, #112] ; 8-byte Folded Spill
ands x11, x10, #0x7
mov w14, #8
csel x11, x14, x11, eq
sub x2, x10, x11
sub w14, w9, w2
stur w14, [x29, #-196] ; 4-byte Folded Spill
lsl x14, x2, #2
str x14, [sp, #248] ; 8-byte Folded Spill
stur x16, [x29, #-192] ; 8-byte Folded Spill
str x17, [sp, #128] ; 8-byte Folded Spill
add x14, x17, x16
str x14, [sp, #104] ; 8-byte Folded Spill
add x14, x1, x0
stp x14, x1, [x29, #-232] ; 16-byte Folded Spill
and x17, x10, #0x1ffffffe0
ands x14, x10, #0x3f
mov w16, #64
csel x1, x16, x14, eq
ands x14, x10, #0x1f
mov w16, #32
csel x16, x16, x14, eq
mvn x14, x9
add x11, x14, x11
stur x11, [x29, #-208] ; 8-byte Folded Spill
lsl x11, x17, #1
sub w4, w13, #10
movi.2d v1, #0xffffffffffffffff
movi.2d v3, #0xffffffffffffffff
movi.8b v4, #77
movi.8b v5, #150
movi.8b v6, #29
movi.16b v7, #77
movi.16b v16, #150
movi.16b v17, #29
str x11, [sp, #40] ; 8-byte Folded Spill
add x11, x11, x17
str x11, [sp, #32] ; 8-byte Folded Spill
lsl x11, x17, #2
str x11, [sp, #64] ; 8-byte Folded Spill
stur x17, [x29, #-240] ; 8-byte Folded Spill
sub w11, w9, w17
str w11, [sp, #60] ; 4-byte Folded Spill
and x11, x10, #0x18
str x11, [sp, #176] ; 8-byte Folded Spill
str x1, [sp, #216] ; 8-byte Folded Spill
sub x11, x10, x1
lsl x13, x11, #1
str x13, [sp, #168] ; 8-byte Folded Spill
add x13, x13, x11
str x13, [sp, #96] ; 8-byte Folded Spill
stur x11, [x29, #-248] ; 8-byte Folded Spill
sub w11, w9, w11
str w11, [sp, #164] ; 4-byte Folded Spill
lsl x11, x2, #1
str x2, [sp, #256] ; 8-byte Folded Spill
str x11, [sp, #240] ; 8-byte Folded Spill
add x11, x11, x2
str x11, [sp, #232] ; 8-byte Folded Spill
and x11, x10, #0x1ffffffc0
lsl x13, x11, #2
str x13, [sp, #24] ; 8-byte Folded Spill
sub w13, w9, w11
str w13, [sp, #136] ; 4-byte Folded Spill
and x13, x10, #0x38
str x13, [sp, #200] ; 8-byte Folded Spill
lsl x13, x11, #1
stur x11, [x29, #-184] ; 8-byte Folded Spill
add x11, x13, x11
stp x11, x13, [sp, #8] ; 16-byte Folded Spill
stur x10, [x29, #-152] ; 8-byte Folded Spill
str x16, [sp, #184] ; 8-byte Folded Spill
sub x10, x10, x16
sub w11, w9, w10
str w11, [sp, #92] ; 4-byte Folded Spill
lsl x11, x10, #2
str x10, [sp, #208] ; 8-byte Folded Spill
add x10, x10, x10, lsl #1
stp x10, x11, [sp, #72] ; 16-byte Folded Spill
mul w13, w20, w22
stur x0, [x29, #-160] ; 8-byte Folded Spill
neg x10, x0
stur x10, [x29, #-176] ; 8-byte Folded Spill
add x11, x23, #64
add x10, x19, #32
stp x10, x11, [x29, #-144] ; 16-byte Folded Spill
add x27, x23, #96
add x14, x19, #64
add x11, x23, #32
add x10, x23, #128
stp x10, x11, [sp, #144] ; 16-byte Folded Spill
mov w7, #255
mov w5, #77
mov w3, #150
mov w0, #29
Lloh448:
adrp x1, lJTI82_0@PAGE
Lloh449:
add x1, x1, lJTI82_0@PAGEOFF
b LBB82_8
LBB82_7: ; in Loop: Header=BB82_8 Depth=1
add x8, x8, #1
add w12, w26, w13
add w15, w17, w25
ldur x10, [x29, #-128] ; 8-byte Folded Reload
cmp x8, x10
b.eq LBB82_174
LBB82_8: ; =>This Loop Header: Depth=1
; Child Loop BB82_66 Depth 2
; Child Loop BB82_71 Depth 2
; Child Loop BB82_74 Depth 2
; Child Loop BB82_142 Depth 2
; Child Loop BB82_146 Depth 2
; Child Loop BB82_149 Depth 2
; Child Loop BB82_135 Depth 2
; Child Loop BB82_139 Depth 2
; Child Loop BB82_173 Depth 2
; Child Loop BB82_125 Depth 2
; Child Loop BB82_130 Depth 2
; Child Loop BB82_133 Depth 2
; Child Loop BB82_115 Depth 2
; Child Loop BB82_120 Depth 2
; Child Loop BB82_123 Depth 2
; Child Loop BB82_108 Depth 2
; Child Loop BB82_112 Depth 2
; Child Loop BB82_170 Depth 2
; Child Loop BB82_99 Depth 2
; Child Loop BB82_103 Depth 2
; Child Loop BB82_106 Depth 2
; Child Loop BB82_90 Depth 2
; Child Loop BB82_94 Depth 2
; Child Loop BB82_97 Depth 2
; Child Loop BB82_83 Depth 2
; Child Loop BB82_87 Depth 2
; Child Loop BB82_167 Depth 2
; Child Loop BB82_76 Depth 2
; Child Loop BB82_80 Depth 2
; Child Loop BB82_164 Depth 2
; Child Loop BB82_59 Depth 2
; Child Loop BB82_63 Depth 2
; Child Loop BB82_161 Depth 2
; Child Loop BB82_151 Depth 2
; Child Loop BB82_155 Depth 2
; Child Loop BB82_158 Depth 2
mov w26, w12
mov w17, w15
add x16, x14, x26
add x28, x23, x17
mul w11, w8, w20
mul w12, w11, w22
add x15, x19, x12
mul w11, w11, w21
add x6, x23, x11
cmp w4, #24
b.hi LBB82_54
; %bb.9: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-136] ; 8-byte Folded Reload
add x2, x10, x17
ldur x10, [x29, #-144] ; 8-byte Folded Reload
add x24, x10, x26
add x11, x27, x17
add x12, x19, x26
adr x10, LBB82_10
ldrb w30, [x1, x4]
add x10, x10, x30, lsl #2
br x10
LBB82_10: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.11: ; in Loop: Header=BB82_8 Depth=1
mov x11, x9
cmp w9, #7
b.lo LBB82_160
; %bb.12: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #63
b.hs LBB82_58
; %bb.13: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_62
LBB82_14: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.15: ; in Loop: Header=BB82_8 Depth=1
mov x11, x9
cmp w9, #7
b.lo LBB82_73
; %bb.16: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #15
b.hs LBB82_65
; %bb.17: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_70
LBB82_18: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.19: ; in Loop: Header=BB82_8 Depth=1
mov x12, x9
cmp w9, #7
b.lo LBB82_163
; %bb.20: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #63
b.hs LBB82_75
; %bb.21: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_79
LBB82_22: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.23: ; in Loop: Header=BB82_8 Depth=1
mov x11, x9
cmp w9, #7
b.lo LBB82_166
; %bb.24: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #63
b.hs LBB82_82
; %bb.25: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_86
LBB82_26: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.27: ; in Loop: Header=BB82_8 Depth=1
mov x11, x9
cmp w9, #8
b.lo LBB82_96
; %bb.28: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #64
b.hs LBB82_89
; %bb.29: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_93
LBB82_30: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.31: ; in Loop: Header=BB82_8 Depth=1
mov x12, x9
cmp w9, #8
b.lo LBB82_105
; %bb.32: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #64
b.hs LBB82_98
; %bb.33: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_102
LBB82_34: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.35: ; in Loop: Header=BB82_8 Depth=1
mov x11, x9
cmp w9, #7
b.lo LBB82_169
; %bb.36: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #31
b.hs LBB82_107
; %bb.37: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_111
LBB82_38: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.39: ; in Loop: Header=BB82_8 Depth=1
mov x11, x9
cmp w9, #7
b.lo LBB82_122
; %bb.40: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #15
b.hs LBB82_114
; %bb.41: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_119
LBB82_42: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.43: ; in Loop: Header=BB82_8 Depth=1
mov x11, x9
cmp w9, #7
b.lo LBB82_132
; %bb.44: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #15
b.hs LBB82_124
; %bb.45: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_129
LBB82_46: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.47: ; in Loop: Header=BB82_8 Depth=1
mov x11, x9
cmp w9, #7
b.lo LBB82_172
; %bb.48: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #31
b.hs LBB82_134
; %bb.49: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_138
LBB82_50: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.51: ; in Loop: Header=BB82_8 Depth=1
mov x11, x9
cmp w9, #8
b.lo LBB82_148
; %bb.52: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #16
b.hs LBB82_141
; %bb.53: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_145
LBB82_54: ; in Loop: Header=BB82_8 Depth=1
tbnz w9, #31, LBB82_7
; %bb.55: ; in Loop: Header=BB82_8 Depth=1
mov x11, x9
cmp w9, #8
b.lo LBB82_157
; %bb.56: ; in Loop: Header=BB82_8 Depth=1
cmp w9, #32
b.hs LBB82_150
; %bb.57: ; in Loop: Header=BB82_8 Depth=1
mov x16, #0
b LBB82_154
LBB82_58: ; in Loop: Header=BB82_8 Depth=1
ldur x11, [x29, #-184] ; 8-byte Folded Reload
LBB82_59: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldp q18, q20, [x24, #-32]
mov.16b v19, v1
mov.16b v21, v1
ldp q22, q24, [x24], #64
mov.16b v23, v1
mov.16b v25, v1
sub x10, x2, #64
st2.16b { v18, v19 }, [x10]
sub x10, x2, #32
add x12, x2, #128
st2.16b { v20, v21 }, [x10]
st2.16b { v22, v23 }, [x2], #32
st2.16b { v24, v25 }, [x2]
mov x2, x12
subs x11, x11, #64
b.ne LBB82_59
; %bb.60: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-152] ; 8-byte Folded Reload
ldur x11, [x29, #-184] ; 8-byte Folded Reload
cmp x10, x11
b.eq LBB82_7
; %bb.61: ; in Loop: Header=BB82_8 Depth=1
ldur x16, [x29, #-184] ; 8-byte Folded Reload
ldr x10, [sp, #200] ; 8-byte Folded Reload
cbz x10, LBB82_159
LBB82_62: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-224] ; 8-byte Folded Reload
add x6, x6, x10
ldur x10, [x29, #-160] ; 8-byte Folded Reload
add x15, x15, x10
add x10, x16, x26
add x11, x19, x10
add x10, x17, x16, lsl #1
add x12, x23, x10
ldur x10, [x29, #-176] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_63: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldr d2, [x11], #8
st2.8b { v2, v3 }, [x12], #16
adds x16, x16, #8
b.ne LBB82_63
; %bb.64: ; in Loop: Header=BB82_8 Depth=1
ldur w11, [x29, #-164] ; 4-byte Folded Reload
ldp x12, x10, [x29, #-160] ; 16-byte Folded Reload
cmp x10, x12
b.eq LBB82_7
b LBB82_160
LBB82_65: ; in Loop: Header=BB82_8 Depth=1
ldur x11, [x29, #-192] ; 8-byte Folded Reload
LBB82_66: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld4.16b { v18, v19, v20, v21 }, [x12], #64
umull.8h v22, v19, v5
umull2.8h v23, v19, v16
umlal2.8h v23, v18, v7
umlal.8h v22, v18, v4
umull2.8h v24, v20, v17
umull.8h v25, v20, v6
addhn.8b v20, v22, v25
addhn2.16b v20, v23, v24
st2.16b { v20, v21 }, [x28], #32
subs x11, x11, #16
b.ne LBB82_66
; %bb.67: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-152] ; 8-byte Folded Reload
ldur x11, [x29, #-192] ; 8-byte Folded Reload
cmp x10, x11
b.eq LBB82_7
; %bb.68: ; in Loop: Header=BB82_8 Depth=1
ldur x16, [x29, #-192] ; 8-byte Folded Reload
ldur x10, [x29, #-152] ; 8-byte Folded Reload
tbnz w10, #3, LBB82_70
; %bb.69: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #48] ; 8-byte Folded Reload
add x15, x15, x10
ldr x10, [sp, #128] ; 8-byte Folded Reload
add x6, x6, x10
ldr w11, [sp, #140] ; 4-byte Folded Reload
b LBB82_73
LBB82_70: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-224] ; 8-byte Folded Reload
add x6, x6, x10
ldur x10, [x29, #-216] ; 8-byte Folded Reload
add x15, x15, x10
add x10, x26, x16, lsl #2
add x11, x19, x10
add x10, x17, x16, lsl #1
add x12, x23, x10
ldur x10, [x29, #-176] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_71: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld4.8b { v18, v19, v20, v21 }, [x11], #32
umull.8h v22, v19, v5
umlal.8h v22, v18, v4
umull.8h v23, v20, v6
addhn.8b v20, v22, v23
st2.8b { v20, v21 }, [x12], #16
adds x16, x16, #8
b.ne LBB82_71
; %bb.72: ; in Loop: Header=BB82_8 Depth=1
ldur w11, [x29, #-164] ; 4-byte Folded Reload
ldp x12, x10, [x29, #-160] ; 16-byte Folded Reload
cmp x10, x12
b.eq LBB82_7
LBB82_73: ; in Loop: Header=BB82_8 Depth=1
add w11, w11, #1
LBB82_74: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15]
ldrb w12, [x15, #1]
ldrb w16, [x15, #2]
mul w10, w10, w5
madd w10, w12, w3, w10
madd w10, w16, w0, w10
lsr w10, w10, #8
strb w10, [x6]
ldrb w10, [x15, #3]
strb w10, [x6, #1]
sub w11, w11, #1
add x15, x15, #4
add x6, x6, #2
cmp w11, #0
b.gt LBB82_74
b LBB82_7
LBB82_75: ; in Loop: Header=BB82_8 Depth=1
ldur x12, [x29, #-184] ; 8-byte Folded Reload
LBB82_76: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldp q18, q21, [x24, #-32]
mov.16b v19, v18
mov.16b v20, v18
mov.16b v22, v21
mov.16b v23, v21
ldp q24, q27, [x24], #64
sub x10, x11, #96
sub x16, x11, #48
st3.16b { v18, v19, v20 }, [x10]
st3.16b { v21, v22, v23 }, [x16]
mov.16b v25, v24
mov.16b v26, v24
mov.16b v28, v27
mov.16b v29, v27
add x10, x11, #192
st3.16b { v24, v25, v26 }, [x11], #48
st3.16b { v27, v28, v29 }, [x11]
mov x11, x10
subs x12, x12, #64
b.ne LBB82_76
; %bb.77: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-152] ; 8-byte Folded Reload
ldur x11, [x29, #-184] ; 8-byte Folded Reload
cmp x10, x11
b.eq LBB82_7
; %bb.78: ; in Loop: Header=BB82_8 Depth=1
ldur x16, [x29, #-184] ; 8-byte Folded Reload
ldr x10, [sp, #200] ; 8-byte Folded Reload
cbz x10, LBB82_162
LBB82_79: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-232] ; 8-byte Folded Reload
add x6, x6, x10
ldur x10, [x29, #-160] ; 8-byte Folded Reload
add x15, x15, x10
add x10, x16, x26
add x11, x19, x10
add x10, x16, x16, lsl #1
add x10, x10, x17
add x12, x23, x10
ldur x10, [x29, #-176] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_80: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldr d18, [x11], #8
fmov d19, d18
fmov d20, d18
st3.8b { v18, v19, v20 }, [x12], #24
adds x16, x16, #8
b.ne LBB82_80
; %bb.81: ; in Loop: Header=BB82_8 Depth=1
ldur w12, [x29, #-164] ; 4-byte Folded Reload
ldp x11, x10, [x29, #-160] ; 16-byte Folded Reload
cmp x10, x11
b.eq LBB82_7
b LBB82_163
LBB82_82: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #144] ; 8-byte Folded Reload
add x12, x10, x17
ldur x11, [x29, #-184] ; 8-byte Folded Reload
LBB82_83: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldp q18, q22, [x24, #-32]
mov.16b v19, v18
mov.16b v20, v18
mov.16b v21, v1
mov.16b v23, v22
mov.16b v24, v22
mov.16b v25, v1
ldp q26, q8, [x24], #64
mov.16b v27, v26
mov.16b v28, v26
sub x10, x12, #128
sub x16, x12, #64
st4.16b { v18, v19, v20, v21 }, [x10]
st4.16b { v22, v23, v24, v25 }, [x16]
mov.16b v29, v1
mov.16b v9, v8
mov.16b v10, v8
mov.16b v11, v1
add x10, x12, #256
st4.16b { v26, v27, v28, v29 }, [x12], #64
st4.16b { v8, v9, v10, v11 }, [x12]
mov x12, x10
subs x11, x11, #64
b.ne LBB82_83
; %bb.84: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-152] ; 8-byte Folded Reload
ldur x11, [x29, #-184] ; 8-byte Folded Reload
cmp x10, x11
b.eq LBB82_7
; %bb.85: ; in Loop: Header=BB82_8 Depth=1
ldur x16, [x29, #-184] ; 8-byte Folded Reload
ldr x10, [sp, #200] ; 8-byte Folded Reload
cbz x10, LBB82_165
LBB82_86: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-216] ; 8-byte Folded Reload
add x6, x6, x10
ldur x10, [x29, #-160] ; 8-byte Folded Reload
add x15, x15, x10
add x10, x16, x26
add x11, x19, x10
add x10, x17, x16, lsl #2
add x12, x23, x10
ldur x10, [x29, #-176] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_87: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldr d18, [x11], #8
fmov d19, d18
fmov d20, d18
fmov d21, d1
st4.8b { v18, v19, v20, v21 }, [x12], #32
adds x16, x16, #8
b.ne LBB82_87
; %bb.88: ; in Loop: Header=BB82_8 Depth=1
ldur w11, [x29, #-164] ; 4-byte Folded Reload
ldp x12, x10, [x29, #-160] ; 16-byte Folded Reload
cmp x10, x12
b.eq LBB82_7
b LBB82_166
LBB82_89: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #152] ; 8-byte Folded Reload
add x11, x10, x17
ldur x12, [x29, #-248] ; 8-byte Folded Reload
LBB82_90: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
sub x10, x16, #64
ld2.16b { v18, v19 }, [x10]
sub x10, x16, #32
ld2.16b { v20, v21 }, [x10]
mov x10, x16
ld2.16b { v22, v23 }, [x10], #32
ld2.16b { v24, v25 }, [x10]
stp q18, q20, [x11, #-32]
stp q22, q24, [x11], #64
add x16, x16, #128
subs x12, x12, #64
b.ne LBB82_90
; %bb.91: ; in Loop: Header=BB82_8 Depth=1
ldur x16, [x29, #-248] ; 8-byte Folded Reload
ldr x10, [sp, #216] ; 8-byte Folded Reload
cmp x10, #8
b.hi LBB82_93
; %bb.92: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #168] ; 8-byte Folded Reload
add x15, x15, x10
ldur x10, [x29, #-248] ; 8-byte Folded Reload
add x6, x6, x10
ldr w11, [sp, #164] ; 4-byte Folded Reload
b LBB82_96
LBB82_93: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #256] ; 8-byte Folded Reload
add x6, x6, x10
ldr x10, [sp, #240] ; 8-byte Folded Reload
add x15, x15, x10
add x10, x26, x16, lsl #1
add x11, x19, x10
add x10, x16, x17
add x12, x23, x10
ldur x10, [x29, #-208] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_94: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld2.8b { v18, v19 }, [x11], #16
str d18, [x12], #8
adds x16, x16, #8
b.ne LBB82_94
; %bb.95: ; in Loop: Header=BB82_8 Depth=1
ldur w11, [x29, #-196] ; 4-byte Folded Reload
LBB82_96: ; in Loop: Header=BB82_8 Depth=1
add w11, w11, #1
LBB82_97: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15], #2
strb w10, [x6], #1
sub w11, w11, #1
cmp w11, #0
b.gt LBB82_97
b LBB82_7
LBB82_98: ; in Loop: Header=BB82_8 Depth=1
ldur x12, [x29, #-248] ; 8-byte Folded Reload
LBB82_99: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
sub x10, x16, #64
sub x2, x16, #32
ld2.16b { v18, v19 }, [x10]
mov.16b v19, v18
mov.16b v20, v18
ld2.16b { v21, v22 }, [x2]
mov.16b v22, v21
mov.16b v23, v21
mov x10, x16
ld2.16b { v24, v25 }, [x10], #32
ld2.16b { v27, v28 }, [x10]
sub x10, x11, #96
sub x2, x11, #48
st3.16b { v18, v19, v20 }, [x10]
st3.16b { v21, v22, v23 }, [x2]
mov.16b v25, v24
mov.16b v26, v24
mov.16b v28, v27
mov.16b v29, v27
add x10, x11, #192
st3.16b { v24, v25, v26 }, [x11], #48
st3.16b { v27, v28, v29 }, [x11]
add x16, x16, #128
mov x11, x10
subs x12, x12, #64
b.ne LBB82_99
; %bb.100: ; in Loop: Header=BB82_8 Depth=1
ldur x16, [x29, #-248] ; 8-byte Folded Reload
ldr x10, [sp, #216] ; 8-byte Folded Reload
cmp x10, #8
b.hi LBB82_102
; %bb.101: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #168] ; 8-byte Folded Reload
add x15, x15, x10
ldr x10, [sp, #96] ; 8-byte Folded Reload
add x6, x6, x10
ldr w12, [sp, #164] ; 4-byte Folded Reload
b LBB82_105
LBB82_102: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #232] ; 8-byte Folded Reload
add x6, x6, x10
ldr x10, [sp, #240] ; 8-byte Folded Reload
add x15, x15, x10
lsl x10, x16, #1
add x11, x10, x26
add x11, x19, x11
add x10, x10, x16
add x10, x10, x17
add x12, x23, x10
ldur x10, [x29, #-208] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_103: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld2.8b { v18, v19 }, [x11], #16
fmov d19, d18
fmov d20, d18
st3.8b { v18, v19, v20 }, [x12], #24
adds x16, x16, #8
b.ne LBB82_103
; %bb.104: ; in Loop: Header=BB82_8 Depth=1
ldur w12, [x29, #-196] ; 4-byte Folded Reload
LBB82_105: ; in Loop: Header=BB82_8 Depth=1
add w11, w12, #1
add x12, x6, #2
LBB82_106: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15], #2
strb w10, [x12]
sturb w10, [x12, #-1]
sturb w10, [x12, #-2]
sub w11, w11, #1
add x12, x12, #3
cmp w11, #0
b.gt LBB82_106
b LBB82_7
LBB82_107: ; in Loop: Header=BB82_8 Depth=1
ldur x11, [x29, #-240] ; 8-byte Folded Reload
LBB82_108: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
sub x10, x24, #32
ld2.16b { v20, v21 }, [x10]
ld2.16b { v24, v25 }, [x24]
mov.16b v18, v20
mov.16b v19, v20
mov.16b v22, v24
mov.16b v23, v24
sub x10, x2, #64
st4.16b { v18, v19, v20, v21 }, [x10]
st4.16b { v22, v23, v24, v25 }, [x2]
add x24, x24, #64
add x2, x2, #128
subs x11, x11, #32
b.ne LBB82_108
; %bb.109: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-152] ; 8-byte Folded Reload
ldur x11, [x29, #-240] ; 8-byte Folded Reload
cmp x10, x11
b.eq LBB82_7
; %bb.110: ; in Loop: Header=BB82_8 Depth=1
ldur x16, [x29, #-240] ; 8-byte Folded Reload
ldr x10, [sp, #176] ; 8-byte Folded Reload
cbz x10, LBB82_168
LBB82_111: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-216] ; 8-byte Folded Reload
add x6, x6, x10
ldur x10, [x29, #-224] ; 8-byte Folded Reload
add x15, x15, x10
add x10, x26, x16, lsl #1
add x11, x19, x10
add x10, x17, x16, lsl #2
add x12, x23, x10
ldur x10, [x29, #-176] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_112: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld2.8b { v20, v21 }, [x11], #16
fmov d18, d20
fmov d19, d20
st4.8b { v18, v19, v20, v21 }, [x12], #32
adds x16, x16, #8
b.ne LBB82_112
; %bb.113: ; in Loop: Header=BB82_8 Depth=1
ldur w11, [x29, #-164] ; 4-byte Folded Reload
ldp x12, x10, [x29, #-160] ; 16-byte Folded Reload
cmp x10, x12
b.eq LBB82_7
b LBB82_169
LBB82_114: ; in Loop: Header=BB82_8 Depth=1
ldur x11, [x29, #-192] ; 8-byte Folded Reload
LBB82_115: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld3.16b { v18, v19, v20 }, [x12], #48
umull.8h v21, v19, v5
umull2.8h v22, v19, v16
umlal2.8h v22, v18, v7
umlal.8h v21, v18, v4
umull2.8h v23, v20, v17
umull.8h v18, v20, v6
addhn.8b v18, v21, v18
addhn2.16b v18, v22, v23
str q18, [x28], #16
subs x11, x11, #16
b.ne LBB82_115
; %bb.116: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-152] ; 8-byte Folded Reload
ldur x11, [x29, #-192] ; 8-byte Folded Reload
cmp x10, x11
b.eq LBB82_7
; %bb.117: ; in Loop: Header=BB82_8 Depth=1
ldur x16, [x29, #-192] ; 8-byte Folded Reload
ldur x10, [x29, #-152] ; 8-byte Folded Reload
tbnz w10, #3, LBB82_119
; %bb.118: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #104] ; 8-byte Folded Reload
add x15, x15, x10
ldur x10, [x29, #-192] ; 8-byte Folded Reload
add x6, x6, x10
ldr w11, [sp, #140] ; 4-byte Folded Reload
b LBB82_122
LBB82_119: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-160] ; 8-byte Folded Reload
add x6, x6, x10
ldur x10, [x29, #-232] ; 8-byte Folded Reload
add x15, x15, x10
add x10, x16, x16, lsl #1
add x10, x10, x26
add x11, x19, x10
add x10, x16, x17
add x12, x23, x10
ldur x10, [x29, #-176] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_120: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld3.8b { v18, v19, v20 }, [x11], #24
umull.8h v21, v19, v5
umlal.8h v21, v18, v4
umull.8h v18, v20, v6
addhn.8b v18, v21, v18
str d18, [x12], #8
adds x16, x16, #8
b.ne LBB82_120
; %bb.121: ; in Loop: Header=BB82_8 Depth=1
ldur w11, [x29, #-164] ; 4-byte Folded Reload
ldp x12, x10, [x29, #-160] ; 16-byte Folded Reload
cmp x10, x12
b.eq LBB82_7
LBB82_122: ; in Loop: Header=BB82_8 Depth=1
add w11, w11, #1
LBB82_123: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15]
ldrb w12, [x15, #1]
ldrb w16, [x15, #2]
mul w10, w10, w5
madd w10, w12, w3, w10
madd w10, w16, w0, w10
lsr w10, w10, #8
strb w10, [x6], #1
sub w11, w11, #1
add x15, x15, #3
cmp w11, #0
b.gt LBB82_123
b LBB82_7
LBB82_124: ; in Loop: Header=BB82_8 Depth=1
ldur x11, [x29, #-192] ; 8-byte Folded Reload
LBB82_125: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld3.16b { v18, v19, v20 }, [x12], #48
umull.8h v21, v19, v5
umull2.8h v22, v19, v16
umlal2.8h v22, v18, v7
umlal.8h v21, v18, v4
umull2.8h v23, v20, v17
umull.8h v18, v20, v6
addhn.8b v0, v21, v18
addhn2.16b v0, v22, v23
st2.16b { v0, v1 }, [x28], #32
subs x11, x11, #16
b.ne LBB82_125
; %bb.126: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-152] ; 8-byte Folded Reload
ldur x11, [x29, #-192] ; 8-byte Folded Reload
cmp x10, x11
b.eq LBB82_7
; %bb.127: ; in Loop: Header=BB82_8 Depth=1
ldur x16, [x29, #-192] ; 8-byte Folded Reload
ldur x10, [x29, #-152] ; 8-byte Folded Reload
tbnz w10, #3, LBB82_129
; %bb.128: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #104] ; 8-byte Folded Reload
add x15, x15, x10
ldr x10, [sp, #128] ; 8-byte Folded Reload
add x6, x6, x10
ldr w11, [sp, #140] ; 4-byte Folded Reload
b LBB82_132
LBB82_129: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-224] ; 8-byte Folded Reload
add x6, x6, x10
ldur x10, [x29, #-232] ; 8-byte Folded Reload
add x15, x15, x10
lsl x10, x16, #1
add x11, x10, x16
add x11, x11, x26
add x11, x19, x11
add x10, x10, x17
add x12, x23, x10
ldur x10, [x29, #-176] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_130: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld3.8b { v18, v19, v20 }, [x11], #24
umull.8h v21, v19, v5
umlal.8h v21, v18, v4
umull.8h v18, v20, v6
addhn.8b v18, v21, v18
fmov d19, d1
st2.8b { v18, v19 }, [x12], #16
adds x16, x16, #8
b.ne LBB82_130
; %bb.131: ; in Loop: Header=BB82_8 Depth=1
ldur w11, [x29, #-164] ; 4-byte Folded Reload
ldp x12, x10, [x29, #-160] ; 16-byte Folded Reload
cmp x10, x12
b.eq LBB82_7
LBB82_132: ; in Loop: Header=BB82_8 Depth=1
add w11, w11, #1
LBB82_133: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15]
ldrb w12, [x15, #1]
ldrb w16, [x15, #2]
mul w10, w10, w5
madd w10, w12, w3, w10
madd w10, w16, w0, w10
lsr w10, w10, #8
strb w10, [x6]
strb w7, [x6, #1]
sub w11, w11, #1
add x15, x15, #3
add x6, x6, #2
cmp w11, #0
b.gt LBB82_133
b LBB82_7
LBB82_134: ; in Loop: Header=BB82_8 Depth=1
ldur x11, [x29, #-240] ; 8-byte Folded Reload
LBB82_135: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
mov x10, x12
ld3.16b { v18, v19, v20 }, [x10], #48
mov.16b v21, v1
ld3.16b { v22, v23, v24 }, [x10]
mov.16b v25, v1
sub x10, x2, #64
st4.16b { v18, v19, v20, v21 }, [x10]
st4.16b { v22, v23, v24, v25 }, [x2]
add x12, x12, #96
add x2, x2, #128
subs x11, x11, #32
b.ne LBB82_135
; %bb.136: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-152] ; 8-byte Folded Reload
ldur x11, [x29, #-240] ; 8-byte Folded Reload
cmp x10, x11
b.eq LBB82_7
; %bb.137: ; in Loop: Header=BB82_8 Depth=1
ldur x16, [x29, #-240] ; 8-byte Folded Reload
ldr x10, [sp, #176] ; 8-byte Folded Reload
cbz x10, LBB82_171
LBB82_138: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-216] ; 8-byte Folded Reload
add x6, x6, x10
ldur x10, [x29, #-232] ; 8-byte Folded Reload
add x15, x15, x10
add x10, x16, x16, lsl #1
add x10, x10, x26
add x11, x19, x10
add x10, x17, x16, lsl #2
add x12, x23, x10
ldur x10, [x29, #-176] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_139: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld3.8b { v18, v19, v20 }, [x11], #24
fmov d21, d1
st4.8b { v18, v19, v20, v21 }, [x12], #32
adds x16, x16, #8
b.ne LBB82_139
; %bb.140: ; in Loop: Header=BB82_8 Depth=1
ldur w11, [x29, #-164] ; 4-byte Folded Reload
ldp x12, x10, [x29, #-160] ; 16-byte Folded Reload
cmp x10, x12
b.eq LBB82_7
b LBB82_172
LBB82_141: ; in Loop: Header=BB82_8 Depth=1
ldr x11, [sp, #224] ; 8-byte Folded Reload
LBB82_142: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld4.16b { v18, v19, v20, v21 }, [x12], #64
umull.8h v22, v19, v5
umull2.8h v23, v19, v16
umlal2.8h v23, v18, v7
umlal.8h v22, v18, v4
umull2.8h v24, v20, v17
umull.8h v18, v20, v6
addhn.8b v18, v22, v18
addhn2.16b v18, v23, v24
str q18, [x28], #16
subs x11, x11, #16
b.ne LBB82_142
; %bb.143: ; in Loop: Header=BB82_8 Depth=1
ldr x16, [sp, #224] ; 8-byte Folded Reload
ldr x10, [sp, #192] ; 8-byte Folded Reload
cmp x10, #8
b.hi LBB82_145
; %bb.144: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #112] ; 8-byte Folded Reload
add x15, x15, x10
ldr x10, [sp, #224] ; 8-byte Folded Reload
add x6, x6, x10
ldr w11, [sp, #124] ; 4-byte Folded Reload
b LBB82_148
LBB82_145: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #256] ; 8-byte Folded Reload
add x6, x6, x10
ldr x10, [sp, #248] ; 8-byte Folded Reload
add x15, x15, x10
add x10, x26, x16, lsl #2
add x11, x19, x10
add x10, x16, x17
add x12, x23, x10
ldur x10, [x29, #-208] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_146: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld4.8b { v18, v19, v20, v21 }, [x11], #32
umull.8h v22, v19, v5
umlal.8h v22, v18, v4
umull.8h v18, v20, v6
addhn.8b v18, v22, v18
str d18, [x12], #8
adds x16, x16, #8
b.ne LBB82_146
; %bb.147: ; in Loop: Header=BB82_8 Depth=1
ldur w11, [x29, #-196] ; 4-byte Folded Reload
LBB82_148: ; in Loop: Header=BB82_8 Depth=1
add w11, w11, #1
LBB82_149: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15]
ldrb w12, [x15, #1]
ldrb w16, [x15, #2]
mul w10, w10, w5
madd w10, w12, w3, w10
madd w10, w16, w0, w10
lsr w10, w10, #8
strb w10, [x6], #1
sub w11, w11, #1
add x15, x15, #4
cmp w11, #0
b.gt LBB82_149
b LBB82_7
LBB82_150: ; in Loop: Header=BB82_8 Depth=1
ldr x11, [sp, #208] ; 8-byte Folded Reload
LBB82_151: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
sub x10, x16, #64
ld4.16b { v18, v19, v20, v21 }, [x10]
ld4.16b { v22, v23, v24, v25 }, [x16]
add x10, x28, #96
st3.16b { v18, v19, v20 }, [x28], #48
st3.16b { v22, v23, v24 }, [x28]
add x16, x16, #128
mov x28, x10
subs x11, x11, #32
b.ne LBB82_151
; %bb.152: ; in Loop: Header=BB82_8 Depth=1
ldr x16, [sp, #208] ; 8-byte Folded Reload
ldr x10, [sp, #184] ; 8-byte Folded Reload
cmp x10, #8
b.hi LBB82_154
; %bb.153: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #80] ; 8-byte Folded Reload
add x15, x15, x10
ldr x10, [sp, #72] ; 8-byte Folded Reload
add x6, x6, x10
ldr w11, [sp, #92] ; 4-byte Folded Reload
b LBB82_157
LBB82_154: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #232] ; 8-byte Folded Reload
add x6, x6, x10
ldr x10, [sp, #248] ; 8-byte Folded Reload
add x15, x15, x10
add x10, x26, x16, lsl #2
add x11, x19, x10
add x10, x16, x16, lsl #1
add x10, x10, x17
add x12, x23, x10
ldur x10, [x29, #-208] ; 8-byte Folded Reload
add x16, x10, x16
LBB82_155: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ld4.8b { v18, v19, v20, v21 }, [x11], #32
st3.8b { v18, v19, v20 }, [x12], #24
adds x16, x16, #8
b.ne LBB82_155
; %bb.156: ; in Loop: Header=BB82_8 Depth=1
ldur w11, [x29, #-196] ; 4-byte Folded Reload
LBB82_157: ; in Loop: Header=BB82_8 Depth=1
add w11, w11, #1
LBB82_158: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15]
strb w10, [x6]
ldurh w10, [x15, #1]
sturh w10, [x6, #1]
sub w11, w11, #1
add x15, x15, #4
add x6, x6, #3
cmp w11, #0
b.gt LBB82_158
b LBB82_7
LBB82_159: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-184] ; 8-byte Folded Reload
add x15, x15, x10
ldr x10, [sp, #16] ; 8-byte Folded Reload
add x6, x6, x10
ldr w11, [sp, #136] ; 4-byte Folded Reload
LBB82_160: ; in Loop: Header=BB82_8 Depth=1
add w11, w11, #1
LBB82_161: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15], #1
strb w10, [x6]
strb w7, [x6, #1]
sub w11, w11, #1
add x6, x6, #2
cmp w11, #0
b.gt LBB82_161
b LBB82_7
LBB82_162: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-184] ; 8-byte Folded Reload
add x15, x15, x10
ldr x10, [sp, #8] ; 8-byte Folded Reload
add x6, x6, x10
ldr w12, [sp, #136] ; 4-byte Folded Reload
LBB82_163: ; in Loop: Header=BB82_8 Depth=1
add w11, w12, #1
add x12, x6, #2
LBB82_164: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15], #1
strb w10, [x12]
sturb w10, [x12, #-1]
sturb w10, [x12, #-2]
sub w11, w11, #1
add x12, x12, #3
cmp w11, #0
b.gt LBB82_164
b LBB82_7
LBB82_165: ; in Loop: Header=BB82_8 Depth=1
ldur x10, [x29, #-184] ; 8-byte Folded Reload
add x15, x15, x10
ldr x10, [sp, #24] ; 8-byte Folded Reload
add x6, x6, x10
ldr w11, [sp, #136] ; 4-byte Folded Reload
LBB82_166: ; in Loop: Header=BB82_8 Depth=1
add w11, w11, #1
add x12, x6, #3
LBB82_167: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15], #1
sturb w10, [x12, #-1]
sturb w10, [x12, #-2]
sturb w10, [x12, #-3]
strb w7, [x12], #4
sub w11, w11, #1
cmp w11, #0
b.gt LBB82_167
b LBB82_7
LBB82_168: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #40] ; 8-byte Folded Reload
add x15, x15, x10
ldr x10, [sp, #64] ; 8-byte Folded Reload
add x6, x6, x10
ldr w11, [sp, #60] ; 4-byte Folded Reload
LBB82_169: ; in Loop: Header=BB82_8 Depth=1
add w11, w11, #1
add x12, x6, #3
LBB82_170: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15]
sturb w10, [x12, #-1]
sturb w10, [x12, #-2]
sturb w10, [x12, #-3]
ldrb w10, [x15, #1]
strb w10, [x12], #4
sub w11, w11, #1
add x15, x15, #2
cmp w11, #0
b.gt LBB82_170
b LBB82_7
LBB82_171: ; in Loop: Header=BB82_8 Depth=1
ldr x10, [sp, #32] ; 8-byte Folded Reload
add x15, x15, x10
ldr x10, [sp, #64] ; 8-byte Folded Reload
add x6, x6, x10
ldr w11, [sp, #60] ; 4-byte Folded Reload
LBB82_172: ; in Loop: Header=BB82_8 Depth=1
add w11, w11, #1
LBB82_173: ; Parent Loop BB82_8 Depth=1
; => This Inner Loop Header: Depth=2
ldrb w10, [x15]
strb w10, [x6]
ldurh w10, [x15, #1]
sturh w10, [x6, #1]
strb w7, [x6, #3]
sub w11, w11, #1
add x15, x15, #3
add x6, x6, #4
cmp w11, #0
b.gt LBB82_173
b LBB82_7
LBB82_174:
mov x0, x19
bl _free
mov x19, x23
b LBB82_176
LBB82_175:
mov x0, x19
bl _free
mov x19, #0
Lloh450:
adrp x8, l_.str.5@PAGE
Lloh451:
add x8, x8, l_.str.5@PAGEOFF
adrp x9, _failure_reason@PAGE
str x8, [x9, _failure_reason@PAGEOFF]
LBB82_176:
mov x0, x19
add sp, sp, #400
ldp x29, x30, [sp, #112] ; 16-byte Folded Reload
ldp x20, x19, [sp, #96] ; 16-byte Folded Reload
ldp x22, x21, [sp, #80] ; 16-byte Folded Reload
ldp x24, x23, [sp, #64] ; 16-byte Folded Reload
ldp x26, x25, [sp, #48] ; 16-byte Folded Reload
ldp x28, x27, [sp, #32] ; 16-byte Folded Reload
ldp d9, d8, [sp, #16] ; 16-byte Folded Reload
ldp d11, d10, [sp], #128 ; 16-byte Folded Reload
ret
LBB82_177:
bl _convert_format.cold.1
LBB82_178:
bl _convert_format.cold.2
.loh AdrpAdd Lloh448, Lloh449
.loh AdrpAdd Lloh450, Lloh451
.cfi_endproc
.section __TEXT,__const
lJTI82_0:
.byte (LBB82_10-LBB82_10)>>2
.byte (LBB82_18-LBB82_10)>>2
.byte (LBB82_22-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_26-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_30-LBB82_10)>>2
.byte (LBB82_34-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_38-LBB82_10)>>2
.byte (LBB82_42-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_46-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_54-LBB82_10)>>2
.byte (LBB82_50-LBB82_10)>>2
.byte (LBB82_14-LBB82_10)>>2
; -- End function
.section __TEXT,__text,regular,pure_instructions
.p2align 2 ; -- Begin function get32
_get32: ; @get32
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
ldr x0, [x0, #16]
cbz x0, LBB83_5
; %bb.1:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB83_7
; %bb.2:
bl _fgetc
cmn w0, #1
csel w21, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB83_11
; %bb.3:
bl _fgetc
cmn w0, #1
csel w22, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB83_15
; %bb.4:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB83_17
LBB83_5:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB83_9
; %bb.6:
add x10, x8, #1
str x10, [x19, #24]
ldrb w20, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB83_8
b LBB83_10
LBB83_7:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB83_10
LBB83_8:
add x9, x8, #1
str x9, [x19, #24]
ldrb w21, [x8]
b LBB83_11
LBB83_9:
mov w20, #0
cmp x8, x9
b.lo LBB83_8
LBB83_10:
mov w21, #0
LBB83_11:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB83_13
; %bb.12:
add x10, x8, #1
str x10, [x19, #24]
ldrb w22, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB83_14
b LBB83_16
LBB83_13:
mov w22, #0
cmp x8, x9
b.hs LBB83_16
LBB83_14:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB83_17
LBB83_15:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.lo LBB83_14
LBB83_16:
mov w8, #0
LBB83_17:
add w9, w21, w20, lsl #8
lsl w10, w22, #8
add w9, w10, w9, lsl #16
add w0, w9, w8
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.section __TEXT,__literal16,16byte_literals
.p2align 4 ; -- Begin function create_png_image_raw
lCPI84_0:
.byte 12 ; 0xc
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 13 ; 0xd
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 14 ; 0xe
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 15 ; 0xf
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
lCPI84_1:
.byte 8 ; 0x8
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 9 ; 0x9
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 10 ; 0xa
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 11 ; 0xb
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
lCPI84_2:
.byte 4 ; 0x4
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 5 ; 0x5
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 6 ; 0x6
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 7 ; 0x7
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
lCPI84_3:
.byte 0 ; 0x0
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 1 ; 0x1
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 2 ; 0x2
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 3 ; 0x3
.byte 255 ; 0xff
.byte 255 ; 0xff
.byte 255 ; 0xff
.section __TEXT,__text,regular,pure_instructions
.p2align 2
_create_png_image_raw: ; @create_png_image_raw
.cfi_startproc
; %bb.0:
sub sp, sp, #240
.cfi_def_cfa_offset 240
stp x28, x27, [sp, #144] ; 16-byte Folded Spill
stp x26, x25, [sp, #160] ; 16-byte Folded Spill
stp x24, x23, [sp, #176] ; 16-byte Folded Spill
stp x22, x21, [sp, #192] ; 16-byte Folded Spill
stp x20, x19, [sp, #208] ; 16-byte Folded Spill
stp x29, x30, [sp, #224] ; 16-byte Folded Spill
add x29, sp, #224
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
.cfi_offset w23, -56
.cfi_offset w24, -64
.cfi_offset w25, -72
.cfi_offset w26, -80
.cfi_offset w27, -88
.cfi_offset w28, -96
ldrsw x24, [x0, #8]
add w8, w24, #1
cmp w24, w3
str w8, [sp, #104] ; 4-byte Folded Spill
ccmp w8, w3, #4, ne
b.ne LBB84_289
; %bb.1:
mov x22, x4
mov x19, x3
mov x23, x2
mov x21, x1
mov x25, x0
mul w9, w4, w3
Lloh452:
adrp x8, _stbi_png_partial@GOTPAGE
Lloh453:
ldr x8, [x8, _stbi_png_partial@GOTPAGEOFF]
Lloh454:
ldr w20, [x8]
cmp w20, #0
csinc w8, w5, wzr, eq
stur x9, [x29, #-104] ; 8-byte Folded Spill
stur x8, [x29, #-88] ; 8-byte Folded Spill
mul w0, w9, w8
bl _malloc
str x0, [x25, #56]
cbz x0, LBB84_283
; %bb.2:
cbnz w20, LBB84_5
; %bb.3:
ldr w8, [x25]
cmp w8, w22
b.ne LBB84_5
; %bb.4:
ldr w8, [x25, #4]
ldur x9, [x29, #-88] ; 8-byte Folded Reload
cmp w8, w9
mul w8, w24, w22
madd w8, w9, w8, w9
ccmp w8, w23, #4, eq
b.ne LBB84_287
LBB84_5:
ldur x8, [x29, #-88] ; 8-byte Folded Reload
cbz w8, LBB84_285
; %bb.6:
ldrb w12, [x21]
cmp w12, #4
b.hi LBB84_282
; %bb.7:
mov x8, #0
and x9, x24, #0xffffffff
ldur x15, [x29, #-104] ; 8-byte Folded Reload
neg x10, x15
sxtw x11, w19
sub w13, w22, #1
str w13, [sp, #108] ; 4-byte Folded Spill
mov w13, w19
and x23, x9, #0xffffffc0
and x14, x9, #0x38
str x14, [sp, #40] ; 8-byte Folded Spill
and x16, x9, #0xfffffff8
and x14, x9, #0xfffffff0
str x14, [sp, #16] ; 8-byte Folded Spill
and x1, x13, #0xffffffc0
and x14, x13, #0x38
str x14, [sp, #48] ; 8-byte Folded Spill
and x3, x13, #0xfffffff8
and x14, x13, #0xfffffff0
str x14, [sp, #24] ; 8-byte Folded Spill
add x14, x24, #33
str x14, [sp, #96] ; 8-byte Folded Spill
neg x14, x16
str x14, [sp, #88] ; 8-byte Folded Spill
mov w14, #48
sub x22, x14, x15
neg x30, x24
sub x14, x11, x24
str x14, [sp, #32] ; 8-byte Folded Spill
sub x14, x14, x15
str x14, [sp, #8] ; 8-byte Folded Spill
Lloh455:
adrp x28, lJTI84_0@PAGE
Lloh456:
add x28, x28, lJTI84_0@PAGEOFF
Lloh457:
adrp x14, lCPI84_2@PAGE
Lloh458:
ldr q0, [x14, lCPI84_2@PAGEOFF]
Lloh459:
adrp x14, lCPI84_3@PAGE
Lloh460:
ldr q1, [x14, lCPI84_3@PAGEOFF]
Lloh461:
adrp x14, lCPI84_0@PAGE
Lloh462:
ldr q2, [x14, lCPI84_0@PAGEOFF]
Lloh463:
adrp x14, lCPI84_1@PAGE
Lloh464:
ldr q3, [x14, lCPI84_1@PAGEOFF]
str x25, [sp, #112] ; 8-byte Folded Spill
stur x30, [x29, #-96] ; 8-byte Folded Spill
LBB84_8: ; =>This Loop Header: Depth=1
; Child Loop BB84_15 Depth 2
; Child Loop BB84_262 Depth 2
; Child Loop BB84_271 Depth 3
; Child Loop BB84_275 Depth 3
; Child Loop BB84_278 Depth 3
; Child Loop BB84_242 Depth 2
; Child Loop BB84_251 Depth 3
; Child Loop BB84_255 Depth 3
; Child Loop BB84_258 Depth 3
; Child Loop BB84_225 Depth 2
; Child Loop BB84_235 Depth 3
; Child Loop BB84_232 Depth 3
; Child Loop BB84_238 Depth 3
; Child Loop BB84_206 Depth 2
; Child Loop BB84_216 Depth 3
; Child Loop BB84_219 Depth 3
; Child Loop BB84_221 Depth 3
; Child Loop BB84_187 Depth 2
; Child Loop BB84_196 Depth 3
; Child Loop BB84_199 Depth 3
; Child Loop BB84_202 Depth 3
; Child Loop BB84_167 Depth 2
; Child Loop BB84_176 Depth 3
; Child Loop BB84_180 Depth 3
; Child Loop BB84_183 Depth 3
; Child Loop BB84_43 Depth 2
; Child Loop BB84_49 Depth 3
; Child Loop BB84_52 Depth 3
; Child Loop BB84_54 Depth 3
; Child Loop BB84_149 Depth 2
; Child Loop BB84_158 Depth 3
; Child Loop BB84_161 Depth 3
; Child Loop BB84_163 Depth 3
; Child Loop BB84_131 Depth 2
; Child Loop BB84_140 Depth 3
; Child Loop BB84_143 Depth 3
; Child Loop BB84_145 Depth 3
; Child Loop BB84_114 Depth 2
; Child Loop BB84_122 Depth 3
; Child Loop BB84_125 Depth 3
; Child Loop BB84_127 Depth 3
; Child Loop BB84_95 Depth 2
; Child Loop BB84_105 Depth 3
; Child Loop BB84_108 Depth 3
; Child Loop BB84_110 Depth 3
; Child Loop BB84_76 Depth 2
; Child Loop BB84_85 Depth 3
; Child Loop BB84_88 Depth 3
; Child Loop BB84_91 Depth 3
; Child Loop BB84_58 Depth 2
; Child Loop BB84_67 Depth 3
; Child Loop BB84_70 Depth 3
; Child Loop BB84_72 Depth 3
; Child Loop BB84_25 Depth 2
; Child Loop BB84_31 Depth 3
; Child Loop BB84_34 Depth 3
; Child Loop BB84_36 Depth 3
cbnz x8, LBB84_10
; %bb.9: ; in Loop: Header=BB84_8 Depth=1
and x12, x12, #0xff
Lloh465:
adrp x14, _first_row_filter@PAGE
Lloh466:
add x14, x14, _first_row_filter@PAGEOFF
ldrb w12, [x14, x12]
LBB84_10: ; in Loop: Header=BB84_8 Depth=1
add x25, x21, #1
cmp w24, #1
b.lt LBB84_19
; %bb.11: ; in Loop: Header=BB84_8 Depth=1
mov x15, x9
mov x2, x25
mov x5, x0
b LBB84_15
LBB84_12: ; in Loop: Header=BB84_15 Depth=2
ldrb w14, [x2]
LBB84_13: ; in Loop: Header=BB84_15 Depth=2
strb w14, [x5]
LBB84_14: ; in Loop: Header=BB84_15 Depth=2
add x5, x5, #1
add x2, x2, #1
subs x15, x15, #1
b.eq LBB84_19
LBB84_15: ; Parent Loop BB84_8 Depth=1
; => This Inner Loop Header: Depth=2
cmp w12, #6
b.hi LBB84_14
; %bb.16: ; in Loop: Header=BB84_15 Depth=2
mov w14, w12
adr x17, LBB84_12
ldrb w4, [x28, x14]
add x17, x17, x4, lsl #2
br x17
LBB84_17: ; in Loop: Header=BB84_15 Depth=2
ldrb w14, [x2]
ldrb w17, [x5, x10]
add w14, w17, w14
b LBB84_13
LBB84_18: ; in Loop: Header=BB84_15 Depth=2
ldrb w14, [x2]
ldrb w17, [x5, x10]
add w14, w14, w17, lsr #1
b LBB84_13
LBB84_19: ; in Loop: Header=BB84_8 Depth=1
add x7, x0, x10
cmp w9, w19
b.ne LBB84_37
; %bb.20: ; in Loop: Header=BB84_8 Depth=1
add x20, x25, x24
cmp w12, #6
b.hi LBB84_279
; %bb.21: ; in Loop: Header=BB84_8 Depth=1
add x15, x0, x11
add x2, x7, x11
mov w12, w12
Lloh467:
adrp x4, lJTI84_2@PAGE
Lloh468:
add x4, x4, lJTI84_2@PAGEOFF
adr x14, LBB84_22
ldrh w17, [x4, x12, lsl #1]
add x14, x14, x17, lsl #2
br x14
LBB84_22: ; in Loop: Header=BB84_8 Depth=1
ldr w6, [sp, #108] ; 4-byte Folded Reload
cbz w6, LBB84_279
; %bb.23: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x0, x11, x0
add x2, x24, x25
ldr x14, [sp, #96] ; 8-byte Folded Reload
add x5, x21, x14
mov x21, x20
b LBB84_25
LBB84_24: ; in Loop: Header=BB84_25 Depth=2
add x21, x21, x24
add x15, x15, x24
add x12, x12, #1
add x5, x5, x24
subs w6, w6, #1
b.eq LBB84_280
LBB84_25: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_31 Depth 3
; Child Loop BB84_34 Depth 3
; Child Loop BB84_36 Depth 3
cmp w24, #1
b.lt LBB84_24
; %bb.26: ; in Loop: Header=BB84_25 Depth=2
mov x14, #0
cmp w19, #8
b.lo LBB84_36
; %bb.27: ; in Loop: Header=BB84_25 Depth=2
mul x17, x12, x24
add x4, x2, x17
add x17, x0, x17
sub x17, x17, x4
cmp x17, #64
b.lo LBB84_36
; %bb.28: ; in Loop: Header=BB84_25 Depth=2
cmp w19, #64
b.hs LBB84_30
; %bb.29: ; in Loop: Header=BB84_25 Depth=2
mov x17, #0
b LBB84_34
LBB84_30: ; in Loop: Header=BB84_25 Depth=2
mov x14, #0
LBB84_31: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_25 Depth=2
; => This Inner Loop Header: Depth=3
add x17, x5, x14
ldp q4, q5, [x17, #-32]
ldp q6, q7, [x17]
add x17, x15, x14
stp q4, q5, [x17]
stp q6, q7, [x17, #32]
add x14, x14, #64
cmp x1, x14
b.ne LBB84_31
; %bb.32: ; in Loop: Header=BB84_25 Depth=2
cmp x1, x13
b.eq LBB84_24
; %bb.33: ; in Loop: Header=BB84_25 Depth=2
mov x17, x1
mov x14, x1
ldr x4, [sp, #48] ; 8-byte Folded Reload
cbz x4, LBB84_36
LBB84_34: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_25 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x21, x17]
str d4, [x15, x17]
add x17, x17, #8
cmp x3, x17
b.ne LBB84_34
; %bb.35: ; in Loop: Header=BB84_25 Depth=2
mov x14, x3
cmp x3, x13
b.eq LBB84_24
LBB84_36: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_25 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w17, [x21, x14]
strb w17, [x15, x14]
add x14, x14, #1
cmp x13, x14
b.ne LBB84_36
b LBB84_24
LBB84_37: ; in Loop: Header=BB84_8 Depth=1
mov w14, #255
strb w14, [x0, x24]
ldr w14, [sp, #104] ; 4-byte Folded Reload
cmp w14, w19
b.ne LBB84_288
; %bb.38: ; in Loop: Header=BB84_8 Depth=1
add x20, x25, x24
cmp w12, #6
b.hi LBB84_279
; %bb.39: ; in Loop: Header=BB84_8 Depth=1
add x2, x7, x11
add x4, x0, x11
mov w12, w12
Lloh469:
adrp x17, lJTI84_1@PAGE
Lloh470:
add x17, x17, lJTI84_1@PAGEOFF
adr x14, LBB84_40
ldrh w15, [x17, x12, lsl #1]
add x14, x14, x15, lsl #2
br x14
LBB84_40: ; in Loop: Header=BB84_8 Depth=1
ldr w6, [sp, #108] ; 4-byte Folded Reload
cbz w6, LBB84_279
; %bb.41: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x0, x11, x0
add x2, x24, x25
ldr x14, [sp, #96] ; 8-byte Folded Reload
add x5, x21, x14
mov x21, x20
b LBB84_43
LBB84_42: ; in Loop: Header=BB84_43 Depth=2
mov w14, #255
strb w14, [x4, x24]
add x21, x21, x24
add x4, x4, x11
add x12, x12, #1
add x5, x5, x24
subs w6, w6, #1
b.eq LBB84_280
LBB84_43: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_49 Depth 3
; Child Loop BB84_52 Depth 3
; Child Loop BB84_54 Depth 3
cmp w9, #1
b.lt LBB84_42
; %bb.44: ; in Loop: Header=BB84_43 Depth=2
mov x14, #0
cmp w9, #8
b.lo LBB84_54
; %bb.45: ; in Loop: Header=BB84_43 Depth=2
madd x15, x12, x24, x2
madd x17, x12, x11, x0
sub x15, x17, x15
cmp x15, #64
b.lo LBB84_54
; %bb.46: ; in Loop: Header=BB84_43 Depth=2
cmp w24, #64
b.hs LBB84_48
; %bb.47: ; in Loop: Header=BB84_43 Depth=2
mov x15, #0
b LBB84_52
LBB84_48: ; in Loop: Header=BB84_43 Depth=2
mov x14, #0
LBB84_49: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_43 Depth=2
; => This Inner Loop Header: Depth=3
add x15, x5, x14
ldp q4, q5, [x15, #-32]
ldp q6, q7, [x15]
add x15, x4, x14
stp q4, q5, [x15]
stp q6, q7, [x15, #32]
add x14, x14, #64
cmp x23, x14
b.ne LBB84_49
; %bb.50: ; in Loop: Header=BB84_43 Depth=2
cmp x23, x9
b.eq LBB84_42
; %bb.51: ; in Loop: Header=BB84_43 Depth=2
mov x15, x23
mov x14, x23
ldr x17, [sp, #40] ; 8-byte Folded Reload
cbz x17, LBB84_54
LBB84_52: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_43 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x21, x15]
str d4, [x4, x15]
add x15, x15, #8
cmp x16, x15
b.ne LBB84_52
; %bb.53: ; in Loop: Header=BB84_43 Depth=2
mov x14, x16
cmp x16, x9
b.eq LBB84_42
LBB84_54: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_43 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w15, [x21, x14]
strb w15, [x4, x14]
add x14, x14, #1
cmp x9, x14
b.ne LBB84_54
b LBB84_42
LBB84_55: ; in Loop: Header=BB84_8 Depth=1
ldr w7, [sp, #108] ; 4-byte Folded Reload
cbz w7, LBB84_279
; %bb.56: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x2, x11, x0
add x5, x24, x25
ldr x14, [sp, #96] ; 8-byte Folded Reload
add x6, x21, x14
ldr x14, [sp, #32] ; 8-byte Folded Reload
add x0, x0, x14
mov x21, x20
b LBB84_58
LBB84_57: ; in Loop: Header=BB84_58 Depth=2
add x21, x21, x24
add x15, x15, x24
add x12, x12, #1
add x6, x6, x24
add x0, x0, x24
subs w7, w7, #1
b.eq LBB84_280
LBB84_58: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_67 Depth 3
; Child Loop BB84_70 Depth 3
; Child Loop BB84_72 Depth 3
cmp w9, #1
b.lt LBB84_57
; %bb.59: ; in Loop: Header=BB84_58 Depth=2
cmp w19, #8
b.hs LBB84_61
; %bb.60: ; in Loop: Header=BB84_58 Depth=2
mov x17, #0
b LBB84_72
LBB84_61: ; in Loop: Header=BB84_58 Depth=2
mov x17, #0
mul x14, x12, x24
add x4, x2, x14
add x14, x5, x14
sub x14, x4, x14
cmp x14, #64
b.lo LBB84_72
; %bb.62: ; in Loop: Header=BB84_58 Depth=2
b.lo LBB84_72
; %bb.63: ; in Loop: Header=BB84_58 Depth=2
cmp w9, #64
b.lo LBB84_72
; %bb.64: ; in Loop: Header=BB84_58 Depth=2
cmp w19, #64
b.hs LBB84_66
; %bb.65: ; in Loop: Header=BB84_58 Depth=2
mov x14, #0
b LBB84_70
LBB84_66: ; in Loop: Header=BB84_58 Depth=2
mov x14, #0
add x17, x15, x30
LBB84_67: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_58 Depth=2
; => This Inner Loop Header: Depth=3
add x4, x6, x14
ldp q4, q5, [x4, #-32]
ldp q6, q7, [x4]
ldr q16, [x17, x14]
add x4, x17, x14
ldp q17, q18, [x4, #16]
ldr q19, [x4, #48]
add.16b v4, v16, v4
add.16b v5, v17, v5
add.16b v6, v18, v6
add.16b v7, v19, v7
add x4, x15, x14
stp q4, q5, [x4]
stp q6, q7, [x4, #32]
add x14, x14, #64
cmp x1, x14
b.ne LBB84_67
; %bb.68: ; in Loop: Header=BB84_58 Depth=2
cmp x1, x13
b.eq LBB84_57
; %bb.69: ; in Loop: Header=BB84_58 Depth=2
mov x14, x1
mov x17, x1
ldr x4, [sp, #48] ; 8-byte Folded Reload
cbz x4, LBB84_72
LBB84_70: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_58 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x21, x14]
ldr d5, [x0, x14]
add.8b v4, v5, v4
str d4, [x15, x14]
add x14, x14, #8
cmp x3, x14
b.ne LBB84_70
; %bb.71: ; in Loop: Header=BB84_58 Depth=2
mov x17, x3
cmp x3, x13
b.eq LBB84_57
LBB84_72: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_58 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w14, [x21, x17]
ldrb w4, [x0, x17]
add w14, w4, w14
strb w14, [x15, x17]
add x17, x17, #1
cmp x13, x17
b.ne LBB84_72
b LBB84_57
LBB84_73: ; in Loop: Header=BB84_8 Depth=1
ldr w14, [sp, #108] ; 4-byte Folded Reload
cbz w14, LBB84_279
; %bb.74: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x0, x11, x0
add x5, x24, x25
add x6, x11, x7
ldr x17, [sp, #96] ; 8-byte Folded Reload
add x7, x21, x17
mov x25, x14
mov x21, x20
b LBB84_76
LBB84_75: ; in Loop: Header=BB84_76 Depth=2
add x21, x21, x24
add x15, x15, x24
add x2, x2, x24
add x12, x12, #1
add x7, x7, x24
subs w25, w25, #1
b.eq LBB84_280
LBB84_76: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_85 Depth 3
; Child Loop BB84_88 Depth 3
; Child Loop BB84_91 Depth 3
cmp w24, #1
b.lt LBB84_75
; %bb.77: ; in Loop: Header=BB84_76 Depth=2
cmp w19, #8
b.hs LBB84_79
; %bb.78: ; in Loop: Header=BB84_76 Depth=2
mov x20, #0
b LBB84_90
LBB84_79: ; in Loop: Header=BB84_76 Depth=2
mov x20, #0
mul x14, x12, x24
add x17, x0, x14
add x4, x5, x14
sub x4, x17, x4
add x14, x6, x14
sub x14, x17, x14
cmp x4, #64
b.lo LBB84_90
; %bb.80: ; in Loop: Header=BB84_76 Depth=2
b.lo LBB84_90
; %bb.81: ; in Loop: Header=BB84_76 Depth=2
cmp x14, #64
b.lo LBB84_90
; %bb.82: ; in Loop: Header=BB84_76 Depth=2
cmp w19, #64
b.hs LBB84_84
; %bb.83: ; in Loop: Header=BB84_76 Depth=2
mov x14, #0
b LBB84_88
LBB84_84: ; in Loop: Header=BB84_76 Depth=2
mov x14, #0
add x17, x15, x10
LBB84_85: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_76 Depth=2
; => This Inner Loop Header: Depth=3
add x4, x7, x14
ldp q4, q5, [x4, #-32]
ldp q6, q7, [x4]
ldr q16, [x17, x14]
add x4, x17, x14
ldp q17, q18, [x4, #16]
ldr q19, [x4, #48]
add.16b v4, v16, v4
add.16b v5, v17, v5
add.16b v6, v18, v6
add.16b v7, v19, v7
add x4, x15, x14
stp q4, q5, [x4]
stp q6, q7, [x4, #32]
add x14, x14, #64
cmp x1, x14
b.ne LBB84_85
; %bb.86: ; in Loop: Header=BB84_76 Depth=2
cmp x1, x13
b.eq LBB84_75
; %bb.87: ; in Loop: Header=BB84_76 Depth=2
mov x14, x1
mov x20, x1
ldr x17, [sp, #48] ; 8-byte Folded Reload
cbz x17, LBB84_90
LBB84_88: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_76 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x21, x14]
ldr d5, [x2, x14]
add.8b v4, v5, v4
str d4, [x15, x14]
add x14, x14, #8
cmp x3, x14
b.ne LBB84_88
; %bb.89: ; in Loop: Header=BB84_76 Depth=2
mov x20, x3
cmp x3, x13
b.eq LBB84_75
LBB84_90: ; in Loop: Header=BB84_76 Depth=2
add x14, x15, x20
LBB84_91: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_76 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w17, [x21, x20]
ldrb w4, [x2, x20]
add w17, w4, w17
strb w17, [x14], #1
add x20, x20, #1
cmp x13, x20
b.ne LBB84_91
b LBB84_75
LBB84_92: ; in Loop: Header=BB84_8 Depth=1
ldr w26, [sp, #108] ; 4-byte Folded Reload
cbz w26, LBB84_279
; %bb.93: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x5, x11, x0
add x6, x24, x25
add x7, x11, x7
ldr x14, [sp, #96] ; 8-byte Folded Reload
add x25, x21, x14
ldr x14, [sp, #32] ; 8-byte Folded Reload
add x0, x0, x14
mov x21, x20
b LBB84_95
LBB84_94: ; in Loop: Header=BB84_95 Depth=2
add x21, x21, x24
add x15, x15, x24
add x2, x2, x24
add x12, x12, #1
add x25, x25, x24
add x0, x0, x24
subs w26, w26, #1
b.eq LBB84_280
LBB84_95: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_105 Depth 3
; Child Loop BB84_108 Depth 3
; Child Loop BB84_110 Depth 3
cmp w9, #1
b.lt LBB84_94
; %bb.96: ; in Loop: Header=BB84_95 Depth=2
cmp w19, #8
b.hs LBB84_98
; %bb.97: ; in Loop: Header=BB84_95 Depth=2
mov x20, #0
b LBB84_110
LBB84_98: ; in Loop: Header=BB84_95 Depth=2
mov x20, #0
mul x14, x12, x24
add x17, x5, x14
add x4, x6, x14
sub x4, x17, x4
add x14, x7, x14
sub x14, x17, x14
cmp x4, #64
b.lo LBB84_110
; %bb.99: ; in Loop: Header=BB84_95 Depth=2
b.lo LBB84_110
; %bb.100: ; in Loop: Header=BB84_95 Depth=2
cmp x14, #64
b.lo LBB84_110
; %bb.101: ; in Loop: Header=BB84_95 Depth=2
cmp w24, #64
b.lo LBB84_110
; %bb.102: ; in Loop: Header=BB84_95 Depth=2
cmp w19, #64
b.hs LBB84_104
; %bb.103: ; in Loop: Header=BB84_95 Depth=2
mov x14, #0
b LBB84_108
LBB84_104: ; in Loop: Header=BB84_95 Depth=2
mov x14, x25
mov x17, x15
mov x4, x1
LBB84_105: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_95 Depth=2
; => This Inner Loop Header: Depth=3
ldp q4, q5, [x14, #-32]
ldp q6, q7, [x14], #64
add x20, x17, x22
ldp q16, q17, [x20, #-48]
ldp q18, q19, [x20, #-16]
add x20, x17, x30
ldp q20, q21, [x20]
ldp q22, q23, [x20, #32]
uhadd.16b v16, v20, v16
uhadd.16b v17, v21, v17
uhadd.16b v18, v22, v18
uhadd.16b v19, v23, v19
add.16b v4, v4, v16
add.16b v5, v5, v17
add.16b v6, v6, v18
add.16b v7, v7, v19
stp q4, q5, [x17]
stp q6, q7, [x17, #32]
add x17, x17, #64
subs x4, x4, #64
b.ne LBB84_105
; %bb.106: ; in Loop: Header=BB84_95 Depth=2
cmp x1, x13
b.eq LBB84_94
; %bb.107: ; in Loop: Header=BB84_95 Depth=2
mov x14, x1
mov x20, x1
ldr x17, [sp, #48] ; 8-byte Folded Reload
cbz x17, LBB84_110
LBB84_108: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_95 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x21, x14]
ldr d5, [x2, x14]
ldr d6, [x0, x14]
uhadd.8b v5, v6, v5
add.8b v4, v4, v5
str d4, [x15, x14]
add x14, x14, #8
cmp x3, x14
b.ne LBB84_108
; %bb.109: ; in Loop: Header=BB84_95 Depth=2
mov x20, x3
cmp x3, x13
b.eq LBB84_94
LBB84_110: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_95 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w14, [x21, x20]
ldrb w17, [x2, x20]
ldrb w4, [x0, x20]
add w17, w4, w17
add w14, w14, w17, lsr #1
strb w14, [x15, x20]
add x20, x20, #1
cmp x13, x20
b.ne LBB84_110
b LBB84_94
LBB84_111: ; in Loop: Header=BB84_8 Depth=1
ldr w27, [sp, #108] ; 4-byte Folded Reload
cbz w27, LBB84_279
; %bb.112: ; in Loop: Header=BB84_8 Depth=1
mov x6, #0
add x12, x11, x0
str x12, [sp, #80] ; 8-byte Folded Spill
add x12, x24, x25
str x12, [sp, #72] ; 8-byte Folded Spill
add x12, x11, x7
str x12, [sp, #64] ; 8-byte Folded Spill
ldr x12, [sp, #32] ; 8-byte Folded Reload
add x14, x12, x7
str x14, [sp, #56] ; 8-byte Folded Spill
ldr x14, [sp, #8] ; 8-byte Folded Reload
add x26, x0, x14
add x0, x0, x12
mov x21, x20
b LBB84_114
LBB84_113: ; in Loop: Header=BB84_114 Depth=2
add x21, x21, x24
add x15, x15, x24
add x2, x2, x24
add x6, x6, #1
add x26, x26, x24
add x0, x0, x24
subs w27, w27, #1
b.eq LBB84_280
LBB84_114: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_122 Depth 3
; Child Loop BB84_125 Depth 3
; Child Loop BB84_127 Depth 3
cmp w24, #1
b.lt LBB84_113
; %bb.115: ; in Loop: Header=BB84_114 Depth=2
cmp w19, #8
b.hs LBB84_117
; %bb.116: ; in Loop: Header=BB84_114 Depth=2
mov x14, #0
b LBB84_127
LBB84_117: ; in Loop: Header=BB84_114 Depth=2
mul x14, x6, x24
ldr x12, [sp, #80] ; 8-byte Folded Reload
add x17, x12, x14
ldr x12, [sp, #72] ; 8-byte Folded Reload
add x4, x12, x14
sub x4, x17, x4
ldr x12, [sp, #64] ; 8-byte Folded Reload
add x20, x12, x14
sub x20, x17, x20
ldr x12, [sp, #56] ; 8-byte Folded Reload
add x14, x12, x14
sub x14, x17, x14
cmp x4, #16
ccmp x4, #16, #0, hs
ccmp w9, #16, #0, hs
ccmp x20, #16, #0, hs
ccmp x14, #16, #0, hs
b.hs LBB84_119
; %bb.118: ; in Loop: Header=BB84_114 Depth=2
mov x14, #0
b LBB84_127
LBB84_119: ; in Loop: Header=BB84_114 Depth=2
cmp w19, #16
b.hs LBB84_121
; %bb.120: ; in Loop: Header=BB84_114 Depth=2
mov x20, #0
b LBB84_125
LBB84_121: ; in Loop: Header=BB84_114 Depth=2
mov x20, #0
ldr x12, [sp, #24] ; 8-byte Folded Reload
LBB84_122: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_114 Depth=2
; => This Inner Loop Header: Depth=3
ldr q4, [x21, x20]
ldr q5, [x0, x20]
tbl.16b v6, { v5 }, v2
tbl.16b v7, { v5 }, v3
tbl.16b v16, { v5 }, v0
ldr q17, [x2, x20]
tbl.16b v18, { v5 }, v1
tbl.16b v19, { v17 }, v2
tbl.16b v20, { v17 }, v3
tbl.16b v21, { v17 }, v0
ldr q22, [x26, x20]
tbl.16b v23, { v17 }, v1
tbl.16b v24, { v22 }, v1
tbl.16b v25, { v22 }, v0
tbl.16b v26, { v22 }, v3
tbl.16b v27, { v22 }, v2
add.4s v28, v23, v18
add.4s v29, v21, v16
add.4s v30, v20, v7
add.4s v31, v19, v6
sub.4s v31, v31, v27
sub.4s v30, v30, v26
sub.4s v29, v29, v25
sub.4s v28, v28, v24
sub.4s v18, v28, v18
sub.4s v16, v29, v16
sub.4s v7, v30, v7
sub.4s v6, v31, v6
abs.4s v6, v6
abs.4s v7, v7
abs.4s v16, v16
abs.4s v18, v18
sub.4s v23, v28, v23
sub.4s v21, v29, v21
sub.4s v20, v30, v20
sub.4s v19, v31, v19
abs.4s v19, v19
abs.4s v20, v20
abs.4s v21, v21
abs.4s v23, v23
sub.4s v24, v28, v24
sub.4s v25, v29, v25
sub.4s v26, v30, v26
sub.4s v27, v31, v27
abs.4s v27, v27
abs.4s v26, v26
abs.4s v25, v25
abs.4s v24, v24
cmhi.4s v28, v18, v23
cmhi.4s v29, v16, v21
cmhi.4s v30, v7, v20
cmhi.4s v31, v6, v19
cmhi.4s v18, v18, v24
cmhi.4s v16, v16, v25
cmhi.4s v7, v7, v26
cmhi.4s v6, v6, v27
orr.16b v6, v31, v6
orr.16b v7, v30, v7
uzp1.8h v6, v7, v6
orr.16b v7, v29, v16
orr.16b v16, v28, v18
uzp1.8h v7, v16, v7
uzp1.16b v6, v7, v6
cmhi.4s v7, v19, v27
cmhi.4s v16, v20, v26
uzp1.8h v7, v16, v7
cmhi.4s v16, v21, v25
cmhi.4s v18, v23, v24
uzp1.8h v16, v18, v16
uzp1.16b v7, v16, v7
bsl.16b v7, v22, v17
bit.16b v5, v7, v6
add.16b v4, v5, v4
str q4, [x15, x20]
add x20, x20, #16
cmp x12, x20
b.ne LBB84_122
; %bb.123: ; in Loop: Header=BB84_114 Depth=2
cmp x12, x13
b.eq LBB84_113
; %bb.124: ; in Loop: Header=BB84_114 Depth=2
ldr x14, [sp, #24] ; 8-byte Folded Reload
mov x20, x14
tbz w13, #3, LBB84_127
LBB84_125: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_114 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x21, x20]
ldr d5, [x0, x20]
tbl.16b v6, { v5 }, v0
ldr d7, [x2, x20]
tbl.16b v16, { v5 }, v1
tbl.16b v17, { v7 }, v0
tbl.16b v18, { v7 }, v1
ldr d19, [x26, x20]
tbl.16b v20, { v19 }, v1
tbl.16b v21, { v19 }, v0
add.4s v22, v18, v16
add.4s v23, v17, v6
sub.4s v23, v23, v21
sub.4s v22, v22, v20
sub.4s v16, v22, v16
sub.4s v6, v23, v6
abs.4s v6, v6
abs.4s v16, v16
sub.4s v18, v22, v18
sub.4s v17, v23, v17
abs.4s v17, v17
abs.4s v18, v18
sub.4s v20, v22, v20
sub.4s v21, v23, v21
abs.4s v21, v21
abs.4s v20, v20
cmhi.4s v22, v16, v18
cmhi.4s v23, v6, v17
cmhi.4s v16, v16, v20
cmhi.4s v6, v6, v21
orr.16b v6, v23, v6
orr.16b v16, v22, v16
uzp1.8h v6, v16, v6
xtn.8b v6, v6
cmhi.4s v16, v17, v21
cmhi.4s v17, v18, v20
uzp1.8h v16, v17, v16
xtn.8b v16, v16
bit.8b v7, v19, v16
bit.8b v5, v7, v6
add.8b v4, v5, v4
str d4, [x15, x20]
add x20, x20, #8
cmp x3, x20
b.ne LBB84_125
; %bb.126: ; in Loop: Header=BB84_114 Depth=2
mov x14, x3
cmp x3, x13
b.eq LBB84_113
LBB84_127: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_114 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w17, [x21, x14]
ldrb w4, [x0, x14]
ldrb w20, [x2, x14]
ldrb w5, [x26, x14]
add w25, w20, w4
sub w25, w25, w5
subs w12, w25, w4
cneg w12, w12, mi
subs w7, w25, w20
cneg w7, w7, mi
subs w25, w25, w5
cneg w25, w25, mi
cmp w7, w25
csel w5, w5, w20, hi
cmp w12, w25
ccmp w12, w7, #2, ls
csel w12, w5, w4, hi
add w12, w12, w17
strb w12, [x15, x14]
add x14, x14, #1
cmp x13, x14
b.ne LBB84_127
b LBB84_113
LBB84_128: ; in Loop: Header=BB84_8 Depth=1
ldr w7, [sp, #108] ; 4-byte Folded Reload
cbz w7, LBB84_279
; %bb.129: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x2, x11, x0
add x5, x24, x25
ldr x14, [sp, #96] ; 8-byte Folded Reload
add x6, x21, x14
ldr x14, [sp, #32] ; 8-byte Folded Reload
add x0, x0, x14
mov x21, x20
b LBB84_131
LBB84_130: ; in Loop: Header=BB84_131 Depth=2
add x21, x21, x24
add x15, x15, x24
add x12, x12, #1
add x6, x6, x24
add x0, x0, x24
subs w7, w7, #1
b.eq LBB84_280
LBB84_131: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_140 Depth 3
; Child Loop BB84_143 Depth 3
; Child Loop BB84_145 Depth 3
cmp w9, #1
b.lt LBB84_130
; %bb.132: ; in Loop: Header=BB84_131 Depth=2
cmp w19, #8
b.hs LBB84_134
; %bb.133: ; in Loop: Header=BB84_131 Depth=2
mov x17, #0
b LBB84_145
LBB84_134: ; in Loop: Header=BB84_131 Depth=2
mov x17, #0
mul x14, x12, x24
add x4, x2, x14
add x14, x5, x14
sub x14, x4, x14
cmp x14, #64
b.lo LBB84_145
; %bb.135: ; in Loop: Header=BB84_131 Depth=2
b.lo LBB84_145
; %bb.136: ; in Loop: Header=BB84_131 Depth=2
cmp w24, #64
b.lo LBB84_145
; %bb.137: ; in Loop: Header=BB84_131 Depth=2
cmp w19, #64
b.hs LBB84_139
; %bb.138: ; in Loop: Header=BB84_131 Depth=2
mov x14, #0
b LBB84_143
LBB84_139: ; in Loop: Header=BB84_131 Depth=2
mov x14, #0
add x17, x15, x30
LBB84_140: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_131 Depth=2
; => This Inner Loop Header: Depth=3
add x4, x6, x14
ldp q4, q5, [x4, #-32]
ldp q6, q7, [x4]
ldr q16, [x17, x14]
add x4, x17, x14
ldp q17, q18, [x4, #16]
ldr q19, [x4, #48]
usra.16b v4, v16, #1
usra.16b v5, v17, #1
usra.16b v6, v18, #1
usra.16b v7, v19, #1
add x4, x15, x14
stp q4, q5, [x4]
stp q6, q7, [x4, #32]
add x14, x14, #64
cmp x1, x14
b.ne LBB84_140
; %bb.141: ; in Loop: Header=BB84_131 Depth=2
cmp x1, x13
b.eq LBB84_130
; %bb.142: ; in Loop: Header=BB84_131 Depth=2
mov x14, x1
mov x17, x1
ldr x4, [sp, #48] ; 8-byte Folded Reload
cbz x4, LBB84_145
LBB84_143: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_131 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x21, x14]
ldr d5, [x0, x14]
usra.8b v4, v5, #1
str d4, [x15, x14]
add x14, x14, #8
cmp x3, x14
b.ne LBB84_143
; %bb.144: ; in Loop: Header=BB84_131 Depth=2
mov x17, x3
cmp x3, x13
b.eq LBB84_130
LBB84_145: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_131 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w14, [x21, x17]
ldrb w4, [x0, x17]
add w14, w14, w4, lsr #1
strb w14, [x15, x17]
add x17, x17, #1
cmp x13, x17
b.ne LBB84_145
b LBB84_130
LBB84_146: ; in Loop: Header=BB84_8 Depth=1
ldr w7, [sp, #108] ; 4-byte Folded Reload
cbz w7, LBB84_279
; %bb.147: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x2, x11, x0
add x5, x24, x25
ldr x14, [sp, #96] ; 8-byte Folded Reload
add x6, x21, x14
ldr x14, [sp, #32] ; 8-byte Folded Reload
add x0, x0, x14
mov x21, x20
b LBB84_149
LBB84_148: ; in Loop: Header=BB84_149 Depth=2
add x21, x21, x24
add x15, x15, x24
add x12, x12, #1
add x6, x6, x24
add x0, x0, x24
subs w7, w7, #1
b.eq LBB84_280
LBB84_149: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_158 Depth 3
; Child Loop BB84_161 Depth 3
; Child Loop BB84_163 Depth 3
cmp w24, #1
b.lt LBB84_148
; %bb.150: ; in Loop: Header=BB84_149 Depth=2
cmp w19, #8
b.hs LBB84_152
; %bb.151: ; in Loop: Header=BB84_149 Depth=2
mov x17, #0
b LBB84_163
LBB84_152: ; in Loop: Header=BB84_149 Depth=2
mov x17, #0
mul x14, x12, x24
add x4, x2, x14
add x14, x5, x14
sub x14, x4, x14
cmp x14, #64
b.lo LBB84_163
; %bb.153: ; in Loop: Header=BB84_149 Depth=2
b.lo LBB84_163
; %bb.154: ; in Loop: Header=BB84_149 Depth=2
cmp w9, #64
b.lo LBB84_163
; %bb.155: ; in Loop: Header=BB84_149 Depth=2
cmp w19, #64
b.hs LBB84_157
; %bb.156: ; in Loop: Header=BB84_149 Depth=2
mov x14, #0
b LBB84_161
LBB84_157: ; in Loop: Header=BB84_149 Depth=2
mov x14, #0
add x17, x15, x30
LBB84_158: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_149 Depth=2
; => This Inner Loop Header: Depth=3
add x4, x6, x14
ldp q4, q5, [x4, #-32]
ldp q6, q7, [x4]
ldr q16, [x17, x14]
add x4, x17, x14
ldp q17, q18, [x4, #16]
ldr q19, [x4, #48]
add.16b v4, v16, v4
add.16b v5, v17, v5
add.16b v6, v18, v6
add.16b v7, v19, v7
add x4, x15, x14
stp q4, q5, [x4]
stp q6, q7, [x4, #32]
add x14, x14, #64
cmp x1, x14
b.ne LBB84_158
; %bb.159: ; in Loop: Header=BB84_149 Depth=2
cmp x1, x13
b.eq LBB84_148
; %bb.160: ; in Loop: Header=BB84_149 Depth=2
mov x14, x1
mov x17, x1
ldr x4, [sp, #48] ; 8-byte Folded Reload
cbz x4, LBB84_163
LBB84_161: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_149 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x21, x14]
ldr d5, [x0, x14]
add.8b v4, v5, v4
str d4, [x15, x14]
add x14, x14, #8
cmp x3, x14
b.ne LBB84_161
; %bb.162: ; in Loop: Header=BB84_149 Depth=2
mov x17, x3
cmp x3, x13
b.eq LBB84_148
LBB84_163: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_149 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w14, [x21, x17]
ldrb w4, [x0, x17]
add w14, w4, w14
strb w14, [x15, x17]
add x17, x17, #1
cmp x13, x17
b.ne LBB84_163
b LBB84_148
LBB84_164: ; in Loop: Header=BB84_8 Depth=1
ldr w7, [sp, #108] ; 4-byte Folded Reload
cbz w7, LBB84_279
; %bb.165: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x2, x11, x0
add x5, x24, x25
ldr x14, [sp, #96] ; 8-byte Folded Reload
add x6, x21, x14
mov x21, x20
b LBB84_167
LBB84_166: ; in Loop: Header=BB84_167 Depth=2
mov w14, #255
strb w14, [x4, x24]
add x21, x21, x24
add x4, x4, x11
add x12, x12, #1
add x6, x6, x24
add x0, x0, x11
subs w7, w7, #1
b.eq LBB84_280
LBB84_167: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_176 Depth 3
; Child Loop BB84_180 Depth 3
; Child Loop BB84_183 Depth 3
cmp w24, #1
b.lt LBB84_166
; %bb.168: ; in Loop: Header=BB84_167 Depth=2
cmp w9, #8
b.hs LBB84_170
; %bb.169: ; in Loop: Header=BB84_167 Depth=2
mov x20, #0
b LBB84_182
LBB84_170: ; in Loop: Header=BB84_167 Depth=2
mov x20, #0
madd x14, x12, x11, x2
madd x15, x12, x24, x5
sub x14, x14, x15
cmp x14, #64
b.lo LBB84_182
; %bb.171: ; in Loop: Header=BB84_167 Depth=2
b.lo LBB84_182
; %bb.172: ; in Loop: Header=BB84_167 Depth=2
cmp w19, #64
b.lo LBB84_182
; %bb.173: ; in Loop: Header=BB84_167 Depth=2
cmp w24, #64
b.hs LBB84_175
; %bb.174: ; in Loop: Header=BB84_167 Depth=2
mov x25, #0
b LBB84_179
LBB84_175: ; in Loop: Header=BB84_167 Depth=2
mov x14, #0
LBB84_176: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_167 Depth=2
; => This Inner Loop Header: Depth=3
add x15, x6, x14
ldp q4, q5, [x15, #-32]
ldp q6, q7, [x15]
add x15, x0, x14
ldp q16, q17, [x15]
ldp q18, q19, [x15, #32]
add.16b v4, v16, v4
add.16b v5, v17, v5
add.16b v6, v18, v6
add.16b v7, v19, v7
add x15, x4, x14
stp q4, q5, [x15]
stp q6, q7, [x15, #32]
add x14, x14, #64
cmp x23, x14
b.ne LBB84_176
; %bb.177: ; in Loop: Header=BB84_167 Depth=2
cmp x23, x9
b.eq LBB84_166
; %bb.178: ; in Loop: Header=BB84_167 Depth=2
mov x25, x23
mov x20, x23
ldr x14, [sp, #40] ; 8-byte Folded Reload
cbz x14, LBB84_182
LBB84_179: ; in Loop: Header=BB84_167 Depth=2
mov x14, #0
add x15, x21, x25
add x17, x0, x25
add x20, x0, x11
add x20, x20, x25
ldr x26, [sp, #88] ; 8-byte Folded Reload
add x25, x26, x25
LBB84_180: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_167 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x15, x14]
ldr d5, [x17, x14]
add.8b v4, v5, v4
str d4, [x20, x14]
add x14, x14, #8
cmn x25, x14
b.ne LBB84_180
; %bb.181: ; in Loop: Header=BB84_167 Depth=2
mov x20, x16
cmp x16, x9
b.eq LBB84_166
LBB84_182: ; in Loop: Header=BB84_167 Depth=2
add x14, x0, x20
LBB84_183: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_167 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w15, [x21, x20]
ldrb w17, [x14], #1
add w15, w17, w15
strb w15, [x4, x20]
add x20, x20, #1
cmp x9, x20
b.ne LBB84_183
b LBB84_166
LBB84_184: ; in Loop: Header=BB84_8 Depth=1
ldr w14, [sp, #108] ; 4-byte Folded Reload
cbz w14, LBB84_279
; %bb.185: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x0, x11, x0
add x5, x24, x25
add x6, x11, x7
ldr x15, [sp, #96] ; 8-byte Folded Reload
add x7, x21, x15
mov x25, x14
mov x21, x20
b LBB84_187
LBB84_186: ; in Loop: Header=BB84_187 Depth=2
mov w14, #255
strb w14, [x4, x24]
add x21, x21, x24
add x4, x4, x11
add x2, x2, x11
add x12, x12, #1
add x7, x7, x24
subs w25, w25, #1
b.eq LBB84_280
LBB84_187: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_196 Depth 3
; Child Loop BB84_199 Depth 3
; Child Loop BB84_202 Depth 3
cmp w9, #1
b.lt LBB84_186
; %bb.188: ; in Loop: Header=BB84_187 Depth=2
cmp w9, #8
b.hs LBB84_190
; %bb.189: ; in Loop: Header=BB84_187 Depth=2
mov x20, #0
b LBB84_201
LBB84_190: ; in Loop: Header=BB84_187 Depth=2
mov x20, #0
mul x14, x12, x11
madd x15, x12, x24, x5
add x17, x0, x14
sub x15, x17, x15
add x14, x6, x14
sub x14, x17, x14
cmp x15, #64
b.lo LBB84_201
; %bb.191: ; in Loop: Header=BB84_187 Depth=2
b.lo LBB84_201
; %bb.192: ; in Loop: Header=BB84_187 Depth=2
cmp x14, #64
b.lo LBB84_201
; %bb.193: ; in Loop: Header=BB84_187 Depth=2
cmp w24, #64
b.hs LBB84_195
; %bb.194: ; in Loop: Header=BB84_187 Depth=2
mov x14, #0
b LBB84_199
LBB84_195: ; in Loop: Header=BB84_187 Depth=2
mov x14, #0
add x15, x4, x10
LBB84_196: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_187 Depth=2
; => This Inner Loop Header: Depth=3
add x17, x7, x14
ldp q4, q5, [x17, #-32]
ldp q6, q7, [x17]
ldr q16, [x15, x14]
add x17, x15, x14
ldp q17, q18, [x17, #16]
ldr q19, [x17, #48]
add.16b v4, v16, v4
add.16b v5, v17, v5
add.16b v6, v18, v6
add.16b v7, v19, v7
add x17, x4, x14
stp q4, q5, [x17]
stp q6, q7, [x17, #32]
add x14, x14, #64
cmp x23, x14
b.ne LBB84_196
; %bb.197: ; in Loop: Header=BB84_187 Depth=2
cmp x23, x9
b.eq LBB84_186
; %bb.198: ; in Loop: Header=BB84_187 Depth=2
mov x14, x23
mov x20, x23
ldr x15, [sp, #40] ; 8-byte Folded Reload
cbz x15, LBB84_201
LBB84_199: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_187 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x21, x14]
ldr d5, [x2, x14]
add.8b v4, v5, v4
str d4, [x4, x14]
add x14, x14, #8
cmp x16, x14
b.ne LBB84_199
; %bb.200: ; in Loop: Header=BB84_187 Depth=2
mov x20, x16
cmp x16, x9
b.eq LBB84_186
LBB84_201: ; in Loop: Header=BB84_187 Depth=2
add x14, x4, x20
LBB84_202: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_187 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w15, [x21, x20]
ldrb w17, [x2, x20]
add w15, w17, w15
strb w15, [x14], #1
add x20, x20, #1
cmp x9, x20
b.ne LBB84_202
b LBB84_186
LBB84_203: ; in Loop: Header=BB84_8 Depth=1
ldr w27, [sp, #108] ; 4-byte Folded Reload
cbz w27, LBB84_279
; %bb.204: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x5, x11, x0
add x6, x24, x25
add x7, x11, x7
ldr x14, [sp, #96] ; 8-byte Folded Reload
add x17, x21, x14
add x26, x0, #32
mov x21, x20
b LBB84_206
LBB84_205: ; in Loop: Header=BB84_206 Depth=2
mov w14, #255
strb w14, [x4, x24]
add x21, x21, x24
add x4, x4, x11
add x2, x2, x11
add x12, x12, #1
add x17, x17, x24
add x26, x26, x11
add x0, x0, x11
subs w27, w27, #1
b.eq LBB84_280
LBB84_206: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_216 Depth 3
; Child Loop BB84_219 Depth 3
; Child Loop BB84_221 Depth 3
cmp w24, #1
b.lt LBB84_205
; %bb.207: ; in Loop: Header=BB84_206 Depth=2
cmp w9, #8
b.hs LBB84_209
; %bb.208: ; in Loop: Header=BB84_206 Depth=2
mov x20, #0
b LBB84_221
LBB84_209: ; in Loop: Header=BB84_206 Depth=2
mov x20, #0
mul x14, x12, x11
madd x15, x12, x24, x6
add x25, x5, x14
sub x15, x25, x15
add x14, x7, x14
sub x14, x25, x14
cmp x15, #64
b.lo LBB84_221
; %bb.210: ; in Loop: Header=BB84_206 Depth=2
b.lo LBB84_221
; %bb.211: ; in Loop: Header=BB84_206 Depth=2
cmp x14, #64
b.lo LBB84_221
; %bb.212: ; in Loop: Header=BB84_206 Depth=2
cmp w19, #64
b.lo LBB84_221
; %bb.213: ; in Loop: Header=BB84_206 Depth=2
cmp w24, #64
b.hs LBB84_215
; %bb.214: ; in Loop: Header=BB84_206 Depth=2
mov x14, #0
b LBB84_219
LBB84_215: ; in Loop: Header=BB84_206 Depth=2
mov x20, x26
mov x25, x17
mov x15, x4
mov x14, x23
LBB84_216: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_206 Depth=2
; => This Inner Loop Header: Depth=3
ldp q4, q5, [x25, #-32]
ldp q6, q7, [x25], #64
add x30, x15, x22
ldp q16, q17, [x30, #-48]
ldp q18, q19, [x30, #-16]
ldp q20, q21, [x20, #-32]
ldp q22, q23, [x20], #64
uhadd.16b v16, v20, v16
uhadd.16b v17, v21, v17
uhadd.16b v18, v22, v18
uhadd.16b v19, v23, v19
add.16b v4, v4, v16
add.16b v5, v5, v17
add.16b v6, v6, v18
add.16b v7, v7, v19
stp q4, q5, [x15]
stp q6, q7, [x15, #32]
add x15, x15, #64
subs x14, x14, #64
b.ne LBB84_216
; %bb.217: ; in Loop: Header=BB84_206 Depth=2
cmp x23, x9
ldur x30, [x29, #-96] ; 8-byte Folded Reload
b.eq LBB84_205
; %bb.218: ; in Loop: Header=BB84_206 Depth=2
mov x14, x23
mov x20, x23
ldr x15, [sp, #40] ; 8-byte Folded Reload
cbz x15, LBB84_221
LBB84_219: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_206 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x21, x14]
ldr d5, [x2, x14]
ldr d6, [x0, x14]
uhadd.8b v5, v6, v5
add.8b v4, v4, v5
str d4, [x4, x14]
add x14, x14, #8
cmp x16, x14
b.ne LBB84_219
; %bb.220: ; in Loop: Header=BB84_206 Depth=2
mov x20, x16
cmp x16, x9
b.eq LBB84_205
LBB84_221: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_206 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w14, [x21, x20]
ldrb w15, [x2, x20]
ldrb w25, [x0, x20]
add w15, w25, w15
add w14, w14, w15, lsr #1
strb w14, [x4, x20]
add x20, x20, #1
cmp x9, x20
b.ne LBB84_221
b LBB84_205
LBB84_222: ; in Loop: Header=BB84_8 Depth=1
ldr w27, [sp, #108] ; 4-byte Folded Reload
cbz w27, LBB84_279
; %bb.223: ; in Loop: Header=BB84_8 Depth=1
mov x5, #0
add x12, x11, x0
str x12, [sp, #80] ; 8-byte Folded Spill
add x12, x24, x25
str x12, [sp, #72] ; 8-byte Folded Spill
add x12, x11, x7
str x12, [sp, #64] ; 8-byte Folded Spill
mov x26, x7
mov x21, x20
b LBB84_225
LBB84_224: ; in Loop: Header=BB84_225 Depth=2
mov w12, #255
strb w12, [x4, x24]
add x21, x21, x24
add x4, x4, x11
add x2, x2, x11
add x5, x5, #1
add x26, x26, x11
add x0, x0, x11
subs w27, w27, #1
ldur x30, [x29, #-96] ; 8-byte Folded Reload
b.eq LBB84_280
LBB84_225: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_235 Depth 3
; Child Loop BB84_232 Depth 3
; Child Loop BB84_238 Depth 3
cmp w9, #1
b.lt LBB84_224
; %bb.226: ; in Loop: Header=BB84_225 Depth=2
cmp w9, #8
b.hs LBB84_228
; %bb.227: ; in Loop: Header=BB84_225 Depth=2
mov x14, #0
b LBB84_238
LBB84_228: ; in Loop: Header=BB84_225 Depth=2
mul x14, x5, x11
ldr x12, [sp, #72] ; 8-byte Folded Reload
madd x15, x5, x24, x12
ldr x12, [sp, #80] ; 8-byte Folded Reload
add x17, x12, x14
sub x15, x17, x15
ldr x12, [sp, #64] ; 8-byte Folded Reload
add x20, x12, x14
sub x20, x17, x20
add x14, x14, x7
sub x14, x17, x14
cmp x15, #16
ccmp x15, #16, #0, hs
ccmp w19, #16, #0, hs
ccmp x20, #16, #0, hs
ccmp x14, #16, #0, hs
b.hs LBB84_230
; %bb.229: ; in Loop: Header=BB84_225 Depth=2
mov x14, #0
b LBB84_238
LBB84_230: ; in Loop: Header=BB84_225 Depth=2
cmp w24, #16
b.hs LBB84_234
; %bb.231: ; in Loop: Header=BB84_225 Depth=2
mov x20, #0
LBB84_232: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_225 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x21, x20]
ldr d5, [x0, x20]
tbl.16b v6, { v5 }, v0
ldr d7, [x2, x20]
tbl.16b v16, { v5 }, v1
tbl.16b v17, { v7 }, v0
tbl.16b v18, { v7 }, v1
ldr d19, [x26, x20]
tbl.16b v20, { v19 }, v1
tbl.16b v21, { v19 }, v0
add.4s v22, v18, v16
add.4s v23, v17, v6
sub.4s v23, v23, v21
sub.4s v22, v22, v20
sub.4s v16, v22, v16
sub.4s v6, v23, v6
abs.4s v6, v6
abs.4s v16, v16
sub.4s v18, v22, v18
sub.4s v17, v23, v17
abs.4s v17, v17
abs.4s v18, v18
sub.4s v20, v22, v20
sub.4s v21, v23, v21
abs.4s v21, v21
abs.4s v20, v20
cmhi.4s v22, v16, v18
cmhi.4s v23, v6, v17
cmhi.4s v16, v16, v20
cmhi.4s v6, v6, v21
orr.16b v6, v23, v6
orr.16b v16, v22, v16
uzp1.8h v6, v16, v6
xtn.8b v6, v6
cmhi.4s v16, v17, v21
cmhi.4s v17, v18, v20
uzp1.8h v16, v17, v16
xtn.8b v16, v16
bit.8b v7, v19, v16
bit.8b v5, v7, v6
add.8b v4, v5, v4
str d4, [x4, x20]
add x20, x20, #8
cmp x16, x20
b.ne LBB84_232
; %bb.233: ; in Loop: Header=BB84_225 Depth=2
mov x14, x16
cmp x16, x9
b.ne LBB84_238
b LBB84_224
LBB84_234: ; in Loop: Header=BB84_225 Depth=2
mov x20, #0
ldr x12, [sp, #16] ; 8-byte Folded Reload
LBB84_235: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_225 Depth=2
; => This Inner Loop Header: Depth=3
ldr q4, [x21, x20]
sub x14, x20, x11
ldr q5, [x4, x14]
tbl.16b v6, { v5 }, v2
tbl.16b v7, { v5 }, v3
tbl.16b v16, { v5 }, v0
ldr q17, [x2, x20]
tbl.16b v18, { v5 }, v1
tbl.16b v19, { v17 }, v2
tbl.16b v20, { v17 }, v3
tbl.16b v21, { v17 }, v0
ldr q22, [x2, x14]
tbl.16b v23, { v17 }, v1
tbl.16b v24, { v22 }, v1
tbl.16b v25, { v22 }, v0
tbl.16b v26, { v22 }, v3
tbl.16b v27, { v22 }, v2
add.4s v28, v23, v18
add.4s v29, v21, v16
add.4s v30, v20, v7
add.4s v31, v19, v6
sub.4s v31, v31, v27
sub.4s v30, v30, v26
sub.4s v29, v29, v25
sub.4s v28, v28, v24
sub.4s v18, v28, v18
sub.4s v16, v29, v16
sub.4s v7, v30, v7
sub.4s v6, v31, v6
abs.4s v6, v6
abs.4s v7, v7
abs.4s v16, v16
abs.4s v18, v18
sub.4s v23, v28, v23
sub.4s v21, v29, v21
sub.4s v20, v30, v20
sub.4s v19, v31, v19
abs.4s v19, v19
abs.4s v20, v20
abs.4s v21, v21
abs.4s v23, v23
sub.4s v24, v28, v24
sub.4s v25, v29, v25
sub.4s v26, v30, v26
sub.4s v27, v31, v27
abs.4s v27, v27
abs.4s v26, v26
abs.4s v25, v25
abs.4s v24, v24
cmhi.4s v28, v18, v23
cmhi.4s v29, v16, v21
cmhi.4s v30, v7, v20
cmhi.4s v31, v6, v19
cmhi.4s v18, v18, v24
cmhi.4s v16, v16, v25
cmhi.4s v7, v7, v26
cmhi.4s v6, v6, v27
orr.16b v6, v31, v6
orr.16b v7, v30, v7
uzp1.8h v6, v7, v6
orr.16b v7, v29, v16
orr.16b v16, v28, v18
uzp1.8h v7, v16, v7
uzp1.16b v6, v7, v6
cmhi.4s v7, v19, v27
cmhi.4s v16, v20, v26
uzp1.8h v7, v16, v7
cmhi.4s v16, v21, v25
cmhi.4s v18, v23, v24
uzp1.8h v16, v18, v16
uzp1.16b v7, v16, v7
bsl.16b v7, v22, v17
bit.16b v5, v7, v6
add.16b v4, v5, v4
str q4, [x4, x20]
add x20, x20, #16
cmp x20, x12
b.ne LBB84_235
; %bb.236: ; in Loop: Header=BB84_225 Depth=2
cmp x12, x9
b.eq LBB84_224
; %bb.237: ; in Loop: Header=BB84_225 Depth=2
ldr x14, [sp, #16] ; 8-byte Folded Reload
mov x20, x14
tbnz w9, #3, LBB84_232
LBB84_238: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_225 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w15, [x21, x14]
ldrb w17, [x0, x14]
ldrb w20, [x2, x14]
ldrb w30, [x26, x14]
add w6, w20, w17
sub w6, w6, w30
subs w12, w6, w17
cneg w12, w12, mi
subs w25, w6, w20
cneg w25, w25, mi
subs w6, w6, w30
cneg w6, w6, mi
cmp w25, w6
csel w20, w30, w20, hi
cmp w12, w6
ccmp w12, w25, #2, ls
csel w12, w20, w17, hi
add w12, w12, w15
strb w12, [x4, x14]
add x14, x14, #1
cmp x9, x14
b.ne LBB84_238
b LBB84_224
LBB84_239: ; in Loop: Header=BB84_8 Depth=1
ldr w7, [sp, #108] ; 4-byte Folded Reload
cbz w7, LBB84_279
; %bb.240: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x2, x11, x0
add x5, x24, x25
ldr x14, [sp, #96] ; 8-byte Folded Reload
add x6, x21, x14
mov x21, x20
b LBB84_242
LBB84_241: ; in Loop: Header=BB84_242 Depth=2
mov w14, #255
strb w14, [x4, x24]
add x21, x21, x24
add x4, x4, x11
add x12, x12, #1
add x6, x6, x24
add x0, x0, x11
subs w7, w7, #1
b.eq LBB84_280
LBB84_242: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_251 Depth 3
; Child Loop BB84_255 Depth 3
; Child Loop BB84_258 Depth 3
cmp w24, #1
b.lt LBB84_241
; %bb.243: ; in Loop: Header=BB84_242 Depth=2
cmp w9, #8
b.hs LBB84_245
; %bb.244: ; in Loop: Header=BB84_242 Depth=2
mov x20, #0
b LBB84_257
LBB84_245: ; in Loop: Header=BB84_242 Depth=2
mov x20, #0
madd x14, x12, x11, x2
madd x15, x12, x24, x5
sub x14, x14, x15
cmp x14, #64
b.lo LBB84_257
; %bb.246: ; in Loop: Header=BB84_242 Depth=2
b.lo LBB84_257
; %bb.247: ; in Loop: Header=BB84_242 Depth=2
cmp w19, #64
b.lo LBB84_257
; %bb.248: ; in Loop: Header=BB84_242 Depth=2
cmp w24, #64
b.hs LBB84_250
; %bb.249: ; in Loop: Header=BB84_242 Depth=2
mov x25, #0
b LBB84_254
LBB84_250: ; in Loop: Header=BB84_242 Depth=2
mov x14, #0
LBB84_251: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_242 Depth=2
; => This Inner Loop Header: Depth=3
add x15, x6, x14
ldp q4, q5, [x15, #-32]
ldp q6, q7, [x15]
add x15, x0, x14
ldp q16, q17, [x15]
ldp q18, q19, [x15, #32]
usra.16b v4, v16, #1
usra.16b v5, v17, #1
usra.16b v6, v18, #1
usra.16b v7, v19, #1
add x15, x4, x14
stp q4, q5, [x15]
stp q6, q7, [x15, #32]
add x14, x14, #64
cmp x23, x14
b.ne LBB84_251
; %bb.252: ; in Loop: Header=BB84_242 Depth=2
cmp x23, x9
b.eq LBB84_241
; %bb.253: ; in Loop: Header=BB84_242 Depth=2
mov x25, x23
mov x20, x23
ldr x14, [sp, #40] ; 8-byte Folded Reload
cbz x14, LBB84_257
LBB84_254: ; in Loop: Header=BB84_242 Depth=2
mov x14, #0
add x15, x21, x25
add x17, x0, x25
add x20, x0, x11
add x20, x20, x25
ldr x26, [sp, #88] ; 8-byte Folded Reload
add x25, x26, x25
LBB84_255: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_242 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x15, x14]
ldr d5, [x17, x14]
usra.8b v4, v5, #1
str d4, [x20, x14]
add x14, x14, #8
cmn x25, x14
b.ne LBB84_255
; %bb.256: ; in Loop: Header=BB84_242 Depth=2
mov x20, x16
cmp x16, x9
b.eq LBB84_241
LBB84_257: ; in Loop: Header=BB84_242 Depth=2
add x14, x0, x20
LBB84_258: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_242 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w15, [x21, x20]
ldrb w17, [x14], #1
add w15, w15, w17, lsr #1
strb w15, [x4, x20]
add x20, x20, #1
cmp x9, x20
b.ne LBB84_258
b LBB84_241
LBB84_259: ; in Loop: Header=BB84_8 Depth=1
ldr w7, [sp, #108] ; 4-byte Folded Reload
cbz w7, LBB84_279
; %bb.260: ; in Loop: Header=BB84_8 Depth=1
mov x12, #0
add x2, x11, x0
add x5, x24, x25
ldr x14, [sp, #96] ; 8-byte Folded Reload
add x6, x21, x14
mov x21, x20
b LBB84_262
LBB84_261: ; in Loop: Header=BB84_262 Depth=2
mov w14, #255
strb w14, [x4, x24]
add x21, x21, x24
add x4, x4, x11
add x12, x12, #1
add x6, x6, x24
add x0, x0, x11
subs w7, w7, #1
b.eq LBB84_280
LBB84_262: ; Parent Loop BB84_8 Depth=1
; => This Loop Header: Depth=2
; Child Loop BB84_271 Depth 3
; Child Loop BB84_275 Depth 3
; Child Loop BB84_278 Depth 3
cmp w9, #1
b.lt LBB84_261
; %bb.263: ; in Loop: Header=BB84_262 Depth=2
cmp w9, #8
b.hs LBB84_265
; %bb.264: ; in Loop: Header=BB84_262 Depth=2
mov x20, #0
b LBB84_277
LBB84_265: ; in Loop: Header=BB84_262 Depth=2
mov x20, #0
madd x14, x12, x11, x2
madd x15, x12, x24, x5
sub x14, x14, x15
cmp x14, #64
b.lo LBB84_277
; %bb.266: ; in Loop: Header=BB84_262 Depth=2
b.lo LBB84_277
; %bb.267: ; in Loop: Header=BB84_262 Depth=2
cmp w19, #64
b.lo LBB84_277
; %bb.268: ; in Loop: Header=BB84_262 Depth=2
cmp w24, #64
b.hs LBB84_270
; %bb.269: ; in Loop: Header=BB84_262 Depth=2
mov x25, #0
b LBB84_274
LBB84_270: ; in Loop: Header=BB84_262 Depth=2
mov x14, #0
LBB84_271: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_262 Depth=2
; => This Inner Loop Header: Depth=3
add x15, x6, x14
ldp q4, q5, [x15, #-32]
ldp q6, q7, [x15]
add x15, x0, x14
ldp q16, q17, [x15]
ldp q18, q19, [x15, #32]
add.16b v4, v16, v4
add.16b v5, v17, v5
add.16b v6, v18, v6
add.16b v7, v19, v7
add x15, x4, x14
stp q4, q5, [x15]
stp q6, q7, [x15, #32]
add x14, x14, #64
cmp x23, x14
b.ne LBB84_271
; %bb.272: ; in Loop: Header=BB84_262 Depth=2
cmp x23, x9
b.eq LBB84_261
; %bb.273: ; in Loop: Header=BB84_262 Depth=2
mov x25, x23
mov x20, x23
ldr x14, [sp, #40] ; 8-byte Folded Reload
cbz x14, LBB84_277
LBB84_274: ; in Loop: Header=BB84_262 Depth=2
mov x14, #0
add x15, x21, x25
add x17, x0, x25
add x20, x0, x11
add x20, x20, x25
ldr x26, [sp, #88] ; 8-byte Folded Reload
add x25, x26, x25
LBB84_275: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_262 Depth=2
; => This Inner Loop Header: Depth=3
ldr d4, [x15, x14]
ldr d5, [x17, x14]
add.8b v4, v5, v4
str d4, [x20, x14]
add x14, x14, #8
cmn x25, x14
b.ne LBB84_275
; %bb.276: ; in Loop: Header=BB84_262 Depth=2
mov x20, x16
cmp x16, x9
b.eq LBB84_261
LBB84_277: ; in Loop: Header=BB84_262 Depth=2
add x14, x0, x20
LBB84_278: ; Parent Loop BB84_8 Depth=1
; Parent Loop BB84_262 Depth=2
; => This Inner Loop Header: Depth=3
ldrb w15, [x21, x20]
ldrb w17, [x14], #1
add w15, w17, w15
strb w15, [x4, x20]
add x20, x20, #1
cmp x9, x20
b.ne LBB84_278
b LBB84_261
LBB84_279: ; in Loop: Header=BB84_8 Depth=1
mov x21, x20
LBB84_280: ; in Loop: Header=BB84_8 Depth=1
add x8, x8, #1
ldur x12, [x29, #-88] ; 8-byte Folded Reload
cmp x8, x12
b.eq LBB84_285
; %bb.281: ; in Loop: Header=BB84_8 Depth=1
ldr x12, [sp, #112] ; 8-byte Folded Reload
ldr x12, [x12, #56]
ldur x14, [x29, #-104] ; 8-byte Folded Reload
mul w14, w14, w8
add x0, x12, x14
ldrb w12, [x21]
cmp w12, #4
b.ls LBB84_8
LBB84_282:
Lloh471:
adrp x9, l_.str.71@PAGE
Lloh472:
add x9, x9, l_.str.71@PAGEOFF
b LBB84_284
LBB84_283:
Lloh473:
adrp x9, l_.str.5@PAGE
Lloh474:
add x9, x9, l_.str.5@PAGEOFF
LBB84_284:
mov w0, #0
adrp x8, _failure_reason@PAGE
str x9, [x8, _failure_reason@PAGEOFF]
b LBB84_286
LBB84_285:
mov w0, #1
LBB84_286:
ldp x29, x30, [sp, #224] ; 16-byte Folded Reload
ldp x20, x19, [sp, #208] ; 16-byte Folded Reload
ldp x22, x21, [sp, #192] ; 16-byte Folded Reload
ldp x24, x23, [sp, #176] ; 16-byte Folded Reload
ldp x26, x25, [sp, #160] ; 16-byte Folded Reload
ldp x28, x27, [sp, #144] ; 16-byte Folded Reload
add sp, sp, #240
ret
LBB84_287:
Lloh475:
adrp x9, l_.str.70@PAGE
Lloh476:
add x9, x9, l_.str.70@PAGEOFF
b LBB84_284
LBB84_288:
bl _create_png_image_raw.cold.1
LBB84_289:
bl _create_png_image_raw.cold.2
.loh AdrpLdrGotLdr Lloh452, Lloh453, Lloh454
.loh AdrpLdr Lloh463, Lloh464
.loh AdrpAdrp Lloh461, Lloh463
.loh AdrpLdr Lloh461, Lloh462
.loh AdrpAdrp Lloh459, Lloh461
.loh AdrpLdr Lloh459, Lloh460
.loh AdrpAdrp Lloh457, Lloh459
.loh AdrpLdr Lloh457, Lloh458
.loh AdrpAdd Lloh455, Lloh456
.loh AdrpAdd Lloh465, Lloh466
.loh AdrpAdd Lloh467, Lloh468
.loh AdrpAdd Lloh469, Lloh470
.loh AdrpAdd Lloh471, Lloh472
.loh AdrpAdd Lloh473, Lloh474
.loh AdrpAdd Lloh475, Lloh476
.cfi_endproc
.section __TEXT,__const
lJTI84_0:
.byte (LBB84_12-LBB84_12)>>2
.byte (LBB84_12-LBB84_12)>>2
.byte (LBB84_17-LBB84_12)>>2
.byte (LBB84_18-LBB84_12)>>2
.byte (LBB84_17-LBB84_12)>>2
.byte (LBB84_12-LBB84_12)>>2
.byte (LBB84_12-LBB84_12)>>2
.p2align 1
lJTI84_1:
.short (LBB84_40-LBB84_40)>>2
.short (LBB84_164-LBB84_40)>>2
.short (LBB84_184-LBB84_40)>>2
.short (LBB84_203-LBB84_40)>>2
.short (LBB84_222-LBB84_40)>>2
.short (LBB84_239-LBB84_40)>>2
.short (LBB84_259-LBB84_40)>>2
.p2align 1
lJTI84_2:
.short (LBB84_22-LBB84_22)>>2
.short (LBB84_55-LBB84_22)>>2
.short (LBB84_73-LBB84_22)>>2
.short (LBB84_92-LBB84_22)>>2
.short (LBB84_111-LBB84_22)>>2
.short (LBB84_128-LBB84_22)>>2
.short (LBB84_146-LBB84_22)>>2
; -- End function
.section __TEXT,__text,regular,pure_instructions
.p2align 2 ; -- Begin function get32le
_get32le: ; @get32le
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x0
ldr x0, [x0, #16]
cbz x0, LBB85_5
; %bb.1:
bl _fgetc
cmn w0, #1
csel w20, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB85_7
; %bb.2:
bl _fgetc
cmn w0, #1
csel w21, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB85_11
; %bb.3:
bl _fgetc
cmn w0, #1
csel w22, wzr, w0, eq
ldr x0, [x19, #16]
cbz x0, LBB85_15
; %bb.4:
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
b LBB85_17
LBB85_5:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB85_9
; %bb.6:
add x10, x8, #1
str x10, [x19, #24]
ldrb w20, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB85_8
b LBB85_10
LBB85_7:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB85_10
LBB85_8:
add x9, x8, #1
str x9, [x19, #24]
ldrb w21, [x8]
b LBB85_11
LBB85_9:
mov w20, #0
cmp x8, x9
b.lo LBB85_8
LBB85_10:
mov w21, #0
LBB85_11:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.hs LBB85_13
; %bb.12:
add x10, x8, #1
str x10, [x19, #24]
ldrb w22, [x8]
mov x8, x10
cmp x8, x9
b.lo LBB85_14
b LBB85_16
LBB85_13:
mov w22, #0
cmp x8, x9
b.hs LBB85_16
LBB85_14:
add x9, x8, #1
str x9, [x19, #24]
ldrb w8, [x8]
b LBB85_17
LBB85_15:
ldp x8, x9, [x19, #24]
cmp x8, x9
b.lo LBB85_14
LBB85_16:
mov w8, #0
LBB85_17:
add w9, w20, w21, lsl #8
add w8, w22, w8, lsl #8
add w0, w9, w8, lsl #16
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function high_bit
_high_bit: ; @high_bit
.cfi_startproc
; %bb.0:
cbz w0, LBB86_2
; %bb.1:
lsr w8, w0, #16
cmp w8, #0
csel w8, w8, w0, ne
cset w9, ne
lsl w10, w9, #4
mov w11, #8
bfi w11, w9, #4, #1
lsr w9, w8, #8
cmp w8, #255
csel w8, w9, w8, hi
csel w9, w11, w10, hi
orr w10, w9, #0x4
lsr w11, w8, #4
cmp w8, #15
csel w8, w11, w8, hi
csel w9, w10, w9, hi
orr w10, w9, #0x2
lsr w11, w8, #2
cmp w8, #3
csel w8, w11, w8, hi
csel w9, w10, w9, hi
cmp w8, #1
cinc w0, w9, hi
ret
LBB86_2:
mov w0, #-1
ret
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function bitcount
_bitcount: ; @bitcount
.cfi_startproc
; %bb.0:
and w8, w0, #0x55555555
lsr w9, w0, #1
and w9, w9, #0x55555555
add w8, w9, w8
and w9, w8, #0x33333333
lsr w8, w8, #2
and w8, w8, #0x33333333
add w8, w8, w9
add w8, w8, w8, lsr #4
and w8, w8, #0xf0f0f0f
add w8, w8, w8, lsr #8
add w8, w8, w8, lsr #16
and w0, w8, #0x3f
ret
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function hdr_gettoken
_hdr_gettoken: ; @hdr_gettoken
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x19, x1
mov x20, x0
ldr x0, [x0, #16]
cbz x0, LBB88_2
; %bb.1:
bl _fgetc
cmn w0, #1
csel w22, wzr, w0, eq
b LBB88_5
LBB88_2:
ldp x8, x9, [x20, #24]
cmp x8, x9
b.hs LBB88_4
; %bb.3:
add x9, x8, #1
str x9, [x20, #24]
ldrb w22, [x8]
b LBB88_5
LBB88_4:
mov w22, #0
LBB88_5:
mov x21, #0
b LBB88_7
LBB88_6: ; in Loop: Header=BB88_7 Depth=1
bl _fgetc
cmn w0, #1
csel w22, wzr, w0, eq
add x21, x21, #1
LBB88_7: ; =>This Inner Loop Header: Depth=1
ldr x0, [x20, #16]
cbz x0, LBB88_9
; %bb.8: ; in Loop: Header=BB88_7 Depth=1
bl _feof
b LBB88_10
LBB88_9: ; in Loop: Header=BB88_7 Depth=1
ldp x8, x9, [x20, #24]
cmp x8, x9
cset w0, hs
LBB88_10: ; in Loop: Header=BB88_7 Depth=1
and w8, w22, #0xff
cmp w0, #0
ccmp w8, #10, #4, eq
b.eq LBB88_26
; %bb.11: ; in Loop: Header=BB88_7 Depth=1
strb w22, [x19, x21]
cmp x21, #1022
b.eq LBB88_16
; %bb.12: ; in Loop: Header=BB88_7 Depth=1
ldr x0, [x20, #16]
cbnz x0, LBB88_6
; %bb.13: ; in Loop: Header=BB88_7 Depth=1
ldp x8, x9, [x20, #24]
cmp x8, x9
b.hs LBB88_15
; %bb.14: ; in Loop: Header=BB88_7 Depth=1
add x9, x8, #1
str x9, [x20, #24]
ldrb w22, [x8]
add x21, x21, #1
b LBB88_7
LBB88_15: ; in Loop: Header=BB88_7 Depth=1
mov w22, #0
add x21, x21, #1
b LBB88_7
LBB88_16:
mov w21, #1023
b LBB88_19
LBB88_17: ; in Loop: Header=BB88_19 Depth=1
bl _fgetc
cmn w0, #1
csel w8, wzr, w0, eq
LBB88_18: ; in Loop: Header=BB88_19 Depth=1
cmp w8, #10
b.eq LBB88_26
LBB88_19: ; =>This Inner Loop Header: Depth=1
ldr x0, [x20, #16]
cbz x0, LBB88_21
; %bb.20: ; in Loop: Header=BB88_19 Depth=1
bl _feof
cbz w0, LBB88_22
b LBB88_26
LBB88_21: ; in Loop: Header=BB88_19 Depth=1
ldp x8, x9, [x20, #24]
cmp x8, x9
cset w0, hs
cbnz w0, LBB88_26
LBB88_22: ; in Loop: Header=BB88_19 Depth=1
ldr x0, [x20, #16]
cbnz x0, LBB88_17
; %bb.23: ; in Loop: Header=BB88_19 Depth=1
ldp x8, x9, [x20, #24]
cmp x8, x9
b.hs LBB88_25
; %bb.24: ; in Loop: Header=BB88_19 Depth=1
add x9, x8, #1
str x9, [x20, #24]
ldrb w8, [x8]
b LBB88_18
LBB88_25: ; in Loop: Header=BB88_19 Depth=1
mov w8, #0
b LBB88_18
LBB88_26:
strb wzr, [x19, x21]
mov x0, x19
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function hdr_convert
_hdr_convert: ; @hdr_convert
.cfi_startproc
; %bb.0:
stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 48
stp x20, x19, [sp, #16] ; 16-byte Folded Spill
stp x29, x30, [sp, #32] ; 16-byte Folded Spill
add x29, sp, #32
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
mov x20, x2
mov x19, x0
ldrb w8, [x1, #3]
cbz w8, LBB89_5
; %bb.1:
mov x21, x1
sub w0, w8, #136
fmov d0, #1.00000000
bl _ldexp
fcvt s0, d0
ldrb w8, [x21]
cmp w20, #2
b.gt LBB89_8
; %bb.2:
ldrb w9, [x21, #1]
ldrb w10, [x21, #2]
add w8, w9, w8
add w8, w8, w10
scvtf s1, w8
fmul s0, s0, s1
fmov s1, #3.00000000
fdiv s0, s0, s1
str s0, [x19]
cmp w20, #4
b.eq LBB89_9
LBB89_3:
cmp w20, #2
b.ne LBB89_13
; %bb.4:
mov w8, #1065353216
str w8, [x19, #4]
b LBB89_13
LBB89_5:
sub w8, w20, #1
cmp w8, #3
b.hi LBB89_13
; %bb.6:
Lloh477:
adrp x9, lJTI89_0@PAGE
Lloh478:
add x9, x9, lJTI89_0@PAGEOFF
adr x10, LBB89_7
ldrb w11, [x9, x8]
add x10, x10, x11, lsl #2
br x10
LBB89_7:
mov w8, #1065353216
str w8, [x19, #4]
b LBB89_12
LBB89_8:
ucvtf s1, w8
fmul s1, s0, s1
str s1, [x19]
ldr b1, [x21, #1]
ucvtf s1, s1
fmul s1, s0, s1
str s1, [x19, #4]
ldr b1, [x21, #2]
ucvtf s1, s1
fmul s0, s0, s1
str s0, [x19, #8]
cmp w20, #4
b.ne LBB89_3
LBB89_9:
mov w8, #1065353216
str w8, [x19, #12]
b LBB89_13
LBB89_10:
mov w8, #1065353216
str w8, [x19, #12]
LBB89_11:
stp wzr, wzr, [x19, #4]
LBB89_12:
str wzr, [x19]
LBB89_13:
ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
ret
.loh AdrpAdd Lloh477, Lloh478
.cfi_endproc
.section __TEXT,__const
lJTI89_0:
.byte (LBB89_12-LBB89_7)>>2
.byte (LBB89_7-LBB89_7)>>2
.byte (LBB89_11-LBB89_7)>>2
.byte (LBB89_10-LBB89_7)>>2
; -- End function
.section __TEXT,__text,regular,pure_instructions
.p2align 2 ; -- Begin function writefv
_writefv: ; @writefv
.cfi_startproc
; %bb.0:
sub sp, sp, #64
.cfi_def_cfa_offset 64
stp x22, x21, [sp, #16] ; 16-byte Folded Spill
stp x20, x19, [sp, #32] ; 16-byte Folded Spill
stp x29, x30, [sp, #48] ; 16-byte Folded Spill
add x29, sp, #48
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
.cfi_offset w21, -40
.cfi_offset w22, -48
str x2, [sp]
ldrb w8, [x1]
cbz w8, LBB90_9
; %bb.1:
mov x19, x0
add x20, x1, #1
Lloh479:
adrp x21, lJTI90_0@PAGE
Lloh480:
add x21, x21, lJTI90_0@PAGEOFF
b LBB90_5
LBB90_2: ; in Loop: Header=BB90_5 Depth=1
ldr x8, [sp]
add x9, x8, #8
str x9, [sp]
ldr w22, [x8]
strb w22, [sp, #12]
add x0, sp, #12
mov w1, #1
mov w2, #1
mov x3, x19
bl _fwrite
lsr w8, w22, #8
strb w8, [sp, #13]
add x0, sp, #13
mov w1, #1
mov w2, #1
mov x3, x19
bl _fwrite
lsr w8, w22, #16
strb w8, [sp, #14]
add x0, sp, #14
mov w1, #1
mov w2, #1
mov x3, x19
bl _fwrite
lsr w8, w22, #24
strb w8, [sp, #15]
add x0, sp, #15
LBB90_3: ; in Loop: Header=BB90_5 Depth=1
mov w1, #1
mov w2, #1
mov x3, x19
bl _fwrite
LBB90_4: ; in Loop: Header=BB90_5 Depth=1
ldrb w8, [x20], #1
cbz w8, LBB90_9
LBB90_5: ; =>This Inner Loop Header: Depth=1
sxtb w8, w8
sub w8, w8, #32
cmp w8, #20
b.hi LBB90_10
; %bb.6: ; in Loop: Header=BB90_5 Depth=1
adr x9, LBB90_2
ldrb w10, [x21, x8]
add x9, x9, x10, lsl #2
br x9
LBB90_7: ; in Loop: Header=BB90_5 Depth=1
ldr x8, [sp]
add x9, x8, #8
str x9, [sp]
ldr w8, [x8]
strb w8, [sp, #9]
add x0, sp, #9
b LBB90_3
LBB90_8: ; in Loop: Header=BB90_5 Depth=1
ldr x8, [sp]
add x9, x8, #8
str x9, [sp]
ldr w22, [x8]
strb w22, [sp, #10]
add x0, sp, #10
mov w1, #1
mov w2, #1
mov x3, x19
bl _fwrite
lsr w8, w22, #8
strb w8, [sp, #11]
add x0, sp, #11
b LBB90_3
LBB90_9:
ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
ldp x22, x21, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #64
ret
LBB90_10:
bl _writefv.cold.1
.loh AdrpAdd Lloh479, Lloh480
.cfi_endproc
.section __TEXT,__const
lJTI90_0:
.byte (LBB90_4-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_7-LBB90_2)>>2
.byte (LBB90_8-LBB90_2)>>2
.byte (LBB90_10-LBB90_2)>>2
.byte (LBB90_2-LBB90_2)>>2
; -- End function
.section __TEXT,__text,regular,pure_instructions
.p2align 2 ; -- Begin function writef
_writef: ; @writef
.cfi_startproc
; %bb.0:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
add x8, x29, #16
str x8, [sp, #8]
Lloh481:
adrp x1, l_.str.98@PAGE
Lloh482:
add x1, x1, l_.str.98@PAGEOFF
add x2, x29, #16
bl _writefv
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.loh AdrpAdd Lloh481, Lloh482
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.1
_do_zlib.cold.1: ; @do_zlib.cold.1
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh483:
adrp x0, l___func__.compute_huffman_codes@PAGE
Lloh484:
add x0, x0, l___func__.compute_huffman_codes@PAGEOFF
Lloh485:
adrp x1, l_.str.13@PAGE
Lloh486:
add x1, x1, l_.str.13@PAGEOFF
Lloh487:
adrp x3, l_.str.45@PAGE
Lloh488:
add x3, x3, l_.str.45@PAGEOFF
mov w2, #2121
bl ___assert_rtn
.loh AdrpAdd Lloh487, Lloh488
.loh AdrpAdd Lloh485, Lloh486
.loh AdrpAdd Lloh483, Lloh484
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.2
_do_zlib.cold.2: ; @do_zlib.cold.2
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh489:
adrp x0, l___func__.fill_bits@PAGE
Lloh490:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh491:
adrp x1, l_.str.13@PAGE
Lloh492:
add x1, x1, l_.str.13@PAGEOFF
Lloh493:
adrp x3, l_.str.37@PAGE
Lloh494:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh493, Lloh494
.loh AdrpAdd Lloh491, Lloh492
.loh AdrpAdd Lloh489, Lloh490
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.3
_do_zlib.cold.3: ; @do_zlib.cold.3
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh495:
adrp x0, l___func__.fill_bits@PAGE
Lloh496:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh497:
adrp x1, l_.str.13@PAGE
Lloh498:
add x1, x1, l_.str.13@PAGEOFF
Lloh499:
adrp x3, l_.str.37@PAGE
Lloh500:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh499, Lloh500
.loh AdrpAdd Lloh497, Lloh498
.loh AdrpAdd Lloh495, Lloh496
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.4
_do_zlib.cold.4: ; @do_zlib.cold.4
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh501:
adrp x0, l___func__.compute_huffman_codes@PAGE
Lloh502:
add x0, x0, l___func__.compute_huffman_codes@PAGEOFF
Lloh503:
adrp x1, l_.str.13@PAGE
Lloh504:
add x1, x1, l_.str.13@PAGEOFF
Lloh505:
adrp x3, l_.str.46@PAGE
Lloh506:
add x3, x3, l_.str.46@PAGEOFF
mov w2, #2133
bl ___assert_rtn
.loh AdrpAdd Lloh505, Lloh506
.loh AdrpAdd Lloh503, Lloh504
.loh AdrpAdd Lloh501, Lloh502
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.5
_do_zlib.cold.5: ; @do_zlib.cold.5
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh507:
adrp x0, l___func__.fill_bits@PAGE
Lloh508:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh509:
adrp x1, l_.str.13@PAGE
Lloh510:
add x1, x1, l_.str.13@PAGEOFF
Lloh511:
adrp x3, l_.str.37@PAGE
Lloh512:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh511, Lloh512
.loh AdrpAdd Lloh509, Lloh510
.loh AdrpAdd Lloh507, Lloh508
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.6
_do_zlib.cold.6: ; @do_zlib.cold.6
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh513:
adrp x0, l___func__.fill_bits@PAGE
Lloh514:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh515:
adrp x1, l_.str.13@PAGE
Lloh516:
add x1, x1, l_.str.13@PAGEOFF
Lloh517:
adrp x3, l_.str.37@PAGE
Lloh518:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh517, Lloh518
.loh AdrpAdd Lloh515, Lloh516
.loh AdrpAdd Lloh513, Lloh514
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.7
_do_zlib.cold.7: ; @do_zlib.cold.7
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh519:
adrp x0, l___func__.fill_bits@PAGE
Lloh520:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh521:
adrp x1, l_.str.13@PAGE
Lloh522:
add x1, x1, l_.str.13@PAGEOFF
Lloh523:
adrp x3, l_.str.37@PAGE
Lloh524:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh523, Lloh524
.loh AdrpAdd Lloh521, Lloh522
.loh AdrpAdd Lloh519, Lloh520
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.8
_do_zlib.cold.8: ; @do_zlib.cold.8
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh525:
adrp x0, l___func__.fill_bits@PAGE
Lloh526:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh527:
adrp x1, l_.str.13@PAGE
Lloh528:
add x1, x1, l_.str.13@PAGEOFF
Lloh529:
adrp x3, l_.str.37@PAGE
Lloh530:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh529, Lloh530
.loh AdrpAdd Lloh527, Lloh528
.loh AdrpAdd Lloh525, Lloh526
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.9
_do_zlib.cold.9: ; @do_zlib.cold.9
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh531:
adrp x0, l___func__.fill_bits@PAGE
Lloh532:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh533:
adrp x1, l_.str.13@PAGE
Lloh534:
add x1, x1, l_.str.13@PAGEOFF
Lloh535:
adrp x3, l_.str.37@PAGE
Lloh536:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh535, Lloh536
.loh AdrpAdd Lloh533, Lloh534
.loh AdrpAdd Lloh531, Lloh532
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.10
_do_zlib.cold.10: ; @do_zlib.cold.10
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh537:
adrp x0, l___func__.fill_bits@PAGE
Lloh538:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh539:
adrp x1, l_.str.13@PAGE
Lloh540:
add x1, x1, l_.str.13@PAGEOFF
Lloh541:
adrp x3, l_.str.37@PAGE
Lloh542:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh541, Lloh542
.loh AdrpAdd Lloh539, Lloh540
.loh AdrpAdd Lloh537, Lloh538
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.11
_do_zlib.cold.11: ; @do_zlib.cold.11
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh543:
adrp x0, l___func__.fill_bits@PAGE
Lloh544:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh545:
adrp x1, l_.str.13@PAGE
Lloh546:
add x1, x1, l_.str.13@PAGEOFF
Lloh547:
adrp x3, l_.str.37@PAGE
Lloh548:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh547, Lloh548
.loh AdrpAdd Lloh545, Lloh546
.loh AdrpAdd Lloh543, Lloh544
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.12
_do_zlib.cold.12: ; @do_zlib.cold.12
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh549:
adrp x0, l___func__.parse_uncompressed_block@PAGE
Lloh550:
add x0, x0, l___func__.parse_uncompressed_block@PAGEOFF
Lloh551:
adrp x1, l_.str.13@PAGE
Lloh552:
add x1, x1, l_.str.13@PAGEOFF
Lloh553:
adrp x3, l_.str.38@PAGE
Lloh554:
add x3, x3, l_.str.38@PAGEOFF
mov w2, #2158
bl ___assert_rtn
.loh AdrpAdd Lloh553, Lloh554
.loh AdrpAdd Lloh551, Lloh552
.loh AdrpAdd Lloh549, Lloh550
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.13
_do_zlib.cold.13: ; @do_zlib.cold.13
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh555:
adrp x0, l___func__.fill_bits@PAGE
Lloh556:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh557:
adrp x1, l_.str.13@PAGE
Lloh558:
add x1, x1, l_.str.13@PAGEOFF
Lloh559:
adrp x3, l_.str.37@PAGE
Lloh560:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh559, Lloh560
.loh AdrpAdd Lloh557, Lloh558
.loh AdrpAdd Lloh555, Lloh556
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function do_zlib.cold.14
_do_zlib.cold.14: ; @do_zlib.cold.14
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh561:
adrp x0, l___func__.fill_bits@PAGE
Lloh562:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh563:
adrp x1, l_.str.13@PAGE
Lloh564:
add x1, x1, l_.str.13@PAGEOFF
Lloh565:
adrp x3, l_.str.37@PAGE
Lloh566:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh565, Lloh566
.loh AdrpAdd Lloh563, Lloh564
.loh AdrpAdd Lloh561, Lloh562
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function parse_png_file.cold.1
_parse_png_file.cold.1: ; @parse_png_file.cold.1
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh567:
adrp x0, l___func__.compute_transparency@PAGE
Lloh568:
add x0, x0, l___func__.compute_transparency@PAGEOFF
Lloh569:
adrp x1, l_.str.13@PAGE
Lloh570:
add x1, x1, l_.str.13@PAGEOFF
Lloh571:
adrp x3, l_.str.73@PAGE
Lloh572:
add x3, x3, l_.str.73@PAGEOFF
mov w2, #2491
bl ___assert_rtn
.loh AdrpAdd Lloh571, Lloh572
.loh AdrpAdd Lloh569, Lloh570
.loh AdrpAdd Lloh567, Lloh568
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function bmp_load.cold.1
_bmp_load.cold.1: ; @bmp_load.cold.1
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh573:
adrp x0, l___func__.bmp_load@PAGE
Lloh574:
add x0, x0, l___func__.bmp_load@PAGEOFF
Lloh575:
adrp x1, l_.str.13@PAGE
Lloh576:
add x1, x1, l_.str.13@PAGEOFF
Lloh577:
adrp x3, l_.str.79@PAGE
Lloh578:
add x3, x3, l_.str.79@PAGEOFF
mov w2, #2933
bl ___assert_rtn
.loh AdrpAdd Lloh577, Lloh578
.loh AdrpAdd Lloh575, Lloh576
.loh AdrpAdd Lloh573, Lloh574
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function decode.cold.1
_decode.cold.1: ; @decode.cold.1
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh579:
adrp x0, l___func__.decode@PAGE
Lloh580:
add x0, x0, l___func__.decode@PAGEOFF
Lloh581:
adrp x1, l_.str.13@PAGE
Lloh582:
add x1, x1, l_.str.13@PAGEOFF
Lloh583:
adrp x3, l_.str.14@PAGE
Lloh584:
add x3, x3, l_.str.14@PAGEOFF
mov w2, #1027
bl ___assert_rtn
.loh AdrpAdd Lloh583, Lloh584
.loh AdrpAdd Lloh581, Lloh582
.loh AdrpAdd Lloh579, Lloh580
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function zbuild_huffman.cold.1
_zbuild_huffman.cold.1: ; @zbuild_huffman.cold.1
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh585:
adrp x0, l___func__.zbuild_huffman@PAGE
Lloh586:
add x0, x0, l___func__.zbuild_huffman@PAGEOFF
Lloh587:
adrp x1, l_.str.13@PAGE
Lloh588:
add x1, x1, l_.str.13@PAGEOFF
Lloh589:
adrp x3, l_.str.42@PAGE
Lloh590:
add x3, x3, l_.str.42@PAGEOFF
mov w2, #1935
bl ___assert_rtn
.loh AdrpAdd Lloh589, Lloh590
.loh AdrpAdd Lloh587, Lloh588
.loh AdrpAdd Lloh585, Lloh586
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function zhuffman_decode.cold.1
_zhuffman_decode.cold.1: ; @zhuffman_decode.cold.1
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh591:
adrp x0, l___func__.zhuffman_decode@PAGE
Lloh592:
add x0, x0, l___func__.zhuffman_decode@PAGEOFF
Lloh593:
adrp x1, l_.str.13@PAGE
Lloh594:
add x1, x1, l_.str.13@PAGEOFF
Lloh595:
adrp x3, l_.str.47@PAGE
Lloh596:
add x3, x3, l_.str.47@PAGEOFF
mov w2, #2034
bl ___assert_rtn
.loh AdrpAdd Lloh595, Lloh596
.loh AdrpAdd Lloh593, Lloh594
.loh AdrpAdd Lloh591, Lloh592
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function zhuffman_decode.cold.2
_zhuffman_decode.cold.2: ; @zhuffman_decode.cold.2
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh597:
adrp x0, l___func__.fill_bits@PAGE
Lloh598:
add x0, x0, l___func__.fill_bits@PAGEOFF
Lloh599:
adrp x1, l_.str.13@PAGE
Lloh600:
add x1, x1, l_.str.13@PAGEOFF
Lloh601:
adrp x3, l_.str.37@PAGE
Lloh602:
add x3, x3, l_.str.37@PAGEOFF
bl _OUTLINED_FUNCTION_0
.loh AdrpAdd Lloh601, Lloh602
.loh AdrpAdd Lloh599, Lloh600
.loh AdrpAdd Lloh597, Lloh598
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function convert_format.cold.1
_convert_format.cold.1: ; @convert_format.cold.1
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh603:
adrp x0, l___func__.convert_format@PAGE
Lloh604:
add x0, x0, l___func__.convert_format@PAGEOFF
Lloh605:
adrp x1, l_.str.13@PAGE
Lloh606:
add x1, x1, l_.str.13@PAGEOFF
Lloh607:
adrp x3, l_.str.49@PAGE
Lloh608:
add x3, x3, l_.str.49@PAGEOFF
mov w2, #760
bl ___assert_rtn
.loh AdrpAdd Lloh607, Lloh608
.loh AdrpAdd Lloh605, Lloh606
.loh AdrpAdd Lloh603, Lloh604
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function convert_format.cold.2
_convert_format.cold.2: ; @convert_format.cold.2
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh609:
adrp x0, l___func__.convert_format@PAGE
Lloh610:
add x0, x0, l___func__.convert_format@PAGEOFF
Lloh611:
adrp x1, l_.str.13@PAGE
Lloh612:
add x1, x1, l_.str.13@PAGEOFF
Lloh613:
adrp x3, l_.str.50@PAGE
Lloh614:
add x3, x3, l_.str.50@PAGEOFF
mov w2, #789
bl ___assert_rtn
.loh AdrpAdd Lloh613, Lloh614
.loh AdrpAdd Lloh611, Lloh612
.loh AdrpAdd Lloh609, Lloh610
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function create_png_image_raw.cold.1
_create_png_image_raw.cold.1: ; @create_png_image_raw.cold.1
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh615:
adrp x0, l___func__.create_png_image_raw@PAGE
Lloh616:
add x0, x0, l___func__.create_png_image_raw@PAGEOFF
Lloh617:
adrp x1, l_.str.13@PAGE
Lloh618:
add x1, x1, l_.str.13@PAGEOFF
Lloh619:
adrp x3, l_.str.72@PAGE
Lloh620:
add x3, x3, l_.str.72@PAGEOFF
mov w2, #2422
bl ___assert_rtn
.loh AdrpAdd Lloh619, Lloh620
.loh AdrpAdd Lloh617, Lloh618
.loh AdrpAdd Lloh615, Lloh616
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function create_png_image_raw.cold.2
_create_png_image_raw.cold.2: ; @create_png_image_raw.cold.2
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh621:
adrp x0, l___func__.create_png_image_raw@PAGE
Lloh622:
add x0, x0, l___func__.create_png_image_raw@PAGEOFF
Lloh623:
adrp x1, l_.str.13@PAGE
Lloh624:
add x1, x1, l_.str.13@PAGEOFF
Lloh625:
adrp x3, l_.str.69@PAGE
Lloh626:
add x3, x3, l_.str.69@PAGEOFF
mov w2, #2372
bl ___assert_rtn
.loh AdrpAdd Lloh625, Lloh626
.loh AdrpAdd Lloh623, Lloh624
.loh AdrpAdd Lloh621, Lloh622
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function writefv.cold.1
_writefv.cold.1: ; @writefv.cold.1
.cfi_startproc
; %bb.0:
stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 16
mov x29, sp
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
Lloh627:
adrp x0, l___func__.writefv@PAGE
Lloh628:
add x0, x0, l___func__.writefv@PAGEOFF
Lloh629:
adrp x1, l_.str.13@PAGE
Lloh630:
add x1, x1, l_.str.13@PAGEOFF
Lloh631:
adrp x3, l_.str.50@PAGE
Lloh632:
add x3, x3, l_.str.50@PAGEOFF
mov w2, #3812
bl ___assert_rtn
.loh AdrpAdd Lloh631, Lloh632
.loh AdrpAdd Lloh629, Lloh630
.loh AdrpAdd Lloh627, Lloh628
.cfi_endproc
; -- End function
.p2align 2 ; -- Begin function OUTLINED_FUNCTION_0
_OUTLINED_FUNCTION_0: ; @OUTLINED_FUNCTION_0 Thunk
.cfi_startproc
; %bb.0:
mov w2, #1997
b ___assert_rtn
.cfi_endproc
; -- End function
.zerofill __DATA,__bss,_failure_reason,8,3 ; @failure_reason
.comm _loaders,256,3 ; @loaders
.zerofill __DATA,__bss,_max_loaders,4,2 ; @max_loaders
.section __TEXT,__cstring,cstring_literals
l_.str: ; @.str
.asciz "rb"
l_.str.1: ; @.str.1
.asciz "can't fopen"
l_.str.2: ; @.str.2
.asciz "unknown image type"
.section __DATA,__data
.p2align 2 ; @h2l_gamma_i
_h2l_gamma_i:
.long 0x3ee8ba2e ; float 0.454545438
.p2align 2 ; @h2l_scale_i
_h2l_scale_i:
.long 0x3f800000 ; float 1
.p2align 2 ; @l2h_gamma
_l2h_gamma:
.long 0x400ccccd ; float 2.20000005
.p2align 2 ; @l2h_scale
_l2h_scale:
.long 0x3f800000 ; float 1
.section __TEXT,__cstring,cstring_literals
l_.str.3: ; @.str.3
.asciz "11 4 22 44 44 22 444444"
l_.str.4: ; @.str.4
.asciz "111 221 2222 11"
.comm _stbi_png_partial,4,2 ; @stbi_png_partial
l_.str.5: ; @.str.5
.asciz "outofmem"
l_.str.6: ; @.str.6
.asciz "bad req_comp"
l_.str.7: ; @.str.7
.asciz "bad SOS component count"
l_.str.8: ; @.str.8
.asciz "bad SOS len"
l_.str.9: ; @.str.9
.asciz "bad DC huff"
l_.str.10: ; @.str.10
.asciz "bad AC huff"
l_.str.11: ; @.str.11
.asciz "bad SOS"
l_.str.12: ; @.str.12
.asciz "bad huffman code"
.section __TEXT,__const
_dezigzag: ; @dezigzag
.ascii "\000\001\b\020\t\002\003\n\021\030 \031\022\013\004\005\f\023\032!(0)\"\033\024\r\006\007\016\025\034#*1892+$\035\026\017\027\036%,3:;4-&\037'.5<=6/7>????????????????"
.p2align 2 ; @bmask
_bmask:
.long 0 ; 0x0
.long 1 ; 0x1
.long 3 ; 0x3
.long 7 ; 0x7
.long 15 ; 0xf
.long 31 ; 0x1f
.long 63 ; 0x3f
.long 127 ; 0x7f
.long 255 ; 0xff
.long 511 ; 0x1ff
.long 1023 ; 0x3ff
.long 2047 ; 0x7ff
.long 4095 ; 0xfff
.long 8191 ; 0x1fff
.long 16383 ; 0x3fff
.long 32767 ; 0x7fff
.long 65535 ; 0xffff
.section __TEXT,__cstring,cstring_literals
l___func__.decode: ; @__func__.decode
.asciz "decode"
l_.str.13: ; @.str.13
.asciz "247019479.c"
l_.str.14: ; @.str.14
.asciz "(((j->code_buffer) >> (j->code_bits - h->size[c])) & bmask[h->size[c]]) == h->code[c]"
l_.str.15: ; @.str.15
.asciz "expected marker"
l_.str.16: ; @.str.16
.asciz "progressive jpeg"
l_.str.17: ; @.str.17
.asciz "bad DRI len"
l_.str.18: ; @.str.18
.asciz "bad DQT type"
l_.str.19: ; @.str.19
.asciz "bad DQT table"
l_.str.20: ; @.str.20
.asciz "bad DHT header"
l_.str.21: ; @.str.21
.asciz "bad code lengths"
l_.str.22: ; @.str.22
.asciz "no SOI"
l_.str.23: ; @.str.23
.asciz "no SOF"
l_.str.24: ; @.str.24
.asciz "bad SOF len"
l_.str.25: ; @.str.25
.asciz "only 8-bit"
l_.str.26: ; @.str.26
.asciz "no header height"
l_.str.27: ; @.str.27
.asciz "0 width"
l_.str.28: ; @.str.28
.asciz "bad component count"
l_.str.29: ; @.str.29
.asciz "bad component ID"
l_.str.30: ; @.str.30
.asciz "bad H"
l_.str.31: ; @.str.31
.asciz "bad V"
l_.str.32: ; @.str.32
.asciz "bad TQ"
l_.str.33: ; @.str.33
.asciz "too large"
.zerofill __DATA,__bss,_default_distance,32,0 ; @default_distance
.zerofill __DATA,__bss,_default_length,288,0 ; @default_length
l_.str.34: ; @.str.34
.asciz "bad zlib header"
l_.str.35: ; @.str.35
.asciz "no preset dict"
l_.str.36: ; @.str.36
.asciz "bad compression"
l___func__.fill_bits: ; @__func__.fill_bits
.asciz "fill_bits"
l_.str.37: ; @.str.37
.asciz "z->code_buffer < (1U << z->num_bits)"
l___func__.parse_uncompressed_block: ; @__func__.parse_uncompressed_block
.asciz "parse_uncompressed_block"
l_.str.38: ; @.str.38
.asciz "a->num_bits == 0"
l_.str.39: ; @.str.39
.asciz "zlib corrupt"
l_.str.40: ; @.str.40
.asciz "read past buffer"
l_.str.41: ; @.str.41
.asciz "output buffer limit"
l___func__.zbuild_huffman: ; @__func__.zbuild_huffman
.asciz "zbuild_huffman"
l_.str.42: ; @.str.42
.asciz "sizes[i] <= (1 << i)"
l_.str.43: ; @.str.43
.asciz "bad codelengths"
.section __TEXT,__const
_compute_huffman_codes.length_dezigzag: ; @compute_huffman_codes.length_dezigzag
.ascii "\020\021\022\000\b\007\t\006\n\005\013\004\f\003\r\002\016\001\017"
.section __TEXT,__cstring,cstring_literals
l___func__.compute_huffman_codes: ; @__func__.compute_huffman_codes
.asciz "compute_huffman_codes"
l_.str.45: ; @.str.45
.asciz "c >= 0 && c < 19"
l_.str.46: ; @.str.46
.asciz "c == 18"
l___func__.zhuffman_decode: ; @__func__.zhuffman_decode
.asciz "zhuffman_decode"
l_.str.47: ; @.str.47
.asciz "z->size[b] == s"
.section __TEXT,__const
.p2align 2 ; @length_base
_length_base:
.long 3 ; 0x3
.long 4 ; 0x4
.long 5 ; 0x5
.long 6 ; 0x6
.long 7 ; 0x7
.long 8 ; 0x8
.long 9 ; 0x9
.long 10 ; 0xa
.long 11 ; 0xb
.long 13 ; 0xd
.long 15 ; 0xf
.long 17 ; 0x11
.long 19 ; 0x13
.long 23 ; 0x17
.long 27 ; 0x1b
.long 31 ; 0x1f
.long 35 ; 0x23
.long 43 ; 0x2b
.long 51 ; 0x33
.long 59 ; 0x3b
.long 67 ; 0x43
.long 83 ; 0x53
.long 99 ; 0x63
.long 115 ; 0x73
.long 131 ; 0x83
.long 163 ; 0xa3
.long 195 ; 0xc3
.long 227 ; 0xe3
.long 258 ; 0x102
.long 0 ; 0x0
.long 0 ; 0x0
.p2align 2 ; @length_extra
_length_extra:
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 1 ; 0x1
.long 1 ; 0x1
.long 1 ; 0x1
.long 1 ; 0x1
.long 2 ; 0x2
.long 2 ; 0x2
.long 2 ; 0x2
.long 2 ; 0x2
.long 3 ; 0x3
.long 3 ; 0x3
.long 3 ; 0x3
.long 3 ; 0x3
.long 4 ; 0x4
.long 4 ; 0x4
.long 4 ; 0x4
.long 4 ; 0x4
.long 5 ; 0x5
.long 5 ; 0x5
.long 5 ; 0x5
.long 5 ; 0x5
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.p2align 2 ; @dist_base
_dist_base:
.long 1 ; 0x1
.long 2 ; 0x2
.long 3 ; 0x3
.long 4 ; 0x4
.long 5 ; 0x5
.long 7 ; 0x7
.long 9 ; 0x9
.long 13 ; 0xd
.long 17 ; 0x11
.long 25 ; 0x19
.long 33 ; 0x21
.long 49 ; 0x31
.long 65 ; 0x41
.long 97 ; 0x61
.long 129 ; 0x81
.long 193 ; 0xc1
.long 257 ; 0x101
.long 385 ; 0x181
.long 513 ; 0x201
.long 769 ; 0x301
.long 1025 ; 0x401
.long 1537 ; 0x601
.long 2049 ; 0x801
.long 3073 ; 0xc01
.long 4097 ; 0x1001
.long 6145 ; 0x1801
.long 8193 ; 0x2001
.long 12289 ; 0x3001
.long 16385 ; 0x4001
.long 24577 ; 0x6001
.long 0 ; 0x0
.long 0 ; 0x0
.p2align 2 ; @dist_extra
_dist_extra:
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 0 ; 0x0
.long 1 ; 0x1
.long 1 ; 0x1
.long 2 ; 0x2
.long 2 ; 0x2
.long 3 ; 0x3
.long 3 ; 0x3
.long 4 ; 0x4
.long 4 ; 0x4
.long 5 ; 0x5
.long 5 ; 0x5
.long 6 ; 0x6
.long 6 ; 0x6
.long 7 ; 0x7
.long 7 ; 0x7
.long 8 ; 0x8
.long 8 ; 0x8
.long 9 ; 0x9
.long 9 ; 0x9
.long 10 ; 0xa
.long 10 ; 0xa
.long 11 ; 0xb
.long 11 ; 0xb
.long 12 ; 0xc
.long 12 ; 0xc
.long 13 ; 0xd
.long 13 ; 0xd
.long 0 ; 0x0
.long 0 ; 0x0
.section __TEXT,__cstring,cstring_literals
l_.str.48: ; @.str.48
.asciz "bad dist"
l___func__.convert_format: ; @__func__.convert_format
.asciz "convert_format"
l_.str.49: ; @.str.49
.asciz "req_comp >= 1 && req_comp <= 4"
l_.str.50: ; @.str.50
.asciz "0"
l_.str.51: ; @.str.51
.asciz "first not IHDR"
l_.str.52: ; @.str.52
.asciz "multiple IHDR"
l_.str.53: ; @.str.53
.asciz "bad IHDR len"
l_.str.54: ; @.str.54
.asciz "8bit only"
l_.str.55: ; @.str.55
.asciz "bad ctype"
l_.str.56: ; @.str.56
.asciz "bad comp method"
l_.str.57: ; @.str.57
.asciz "bad filter method"
l_.str.58: ; @.str.58
.asciz "bad interlace method"
l_.str.59: ; @.str.59
.asciz "0-pixel image"
l_.str.60: ; @.str.60
.asciz "invalid PLTE"
l_.str.61: ; @.str.61
.asciz "tRNS after IDAT"
l_.str.62: ; @.str.62
.asciz "tRNS before PLTE"
l_.str.63: ; @.str.63
.asciz "bad tRNS len"
l_.str.64: ; @.str.64
.asciz "tRNS with alpha"
l_.str.65: ; @.str.65
.asciz "no PLTE"
l_.str.66: ; @.str.66
.asciz "outofdata"
l_.str.67: ; @.str.67
.asciz "no IDAT"
.section __DATA,__data
_parse_png_file.invalid_chunk: ; @parse_png_file.invalid_chunk
.asciz "XXXX chunk not known"
.section __TEXT,__const
_check_png_header.png_sig: ; @check_png_header.png_sig
.ascii "\211PNG\r\n\032\n"
.section __TEXT,__cstring,cstring_literals
l_.str.68: ; @.str.68
.asciz "bad png sig"
.section __TEXT,__const
.p2align 2 ; @__const.create_png_image.xorig
l___const.create_png_image.xorig:
.long 0 ; 0x0
.long 4 ; 0x4
.long 0 ; 0x0
.long 2 ; 0x2
.long 0 ; 0x0
.long 1 ; 0x1
.long 0 ; 0x0
.p2align 2 ; @__const.create_png_image.yorig
l___const.create_png_image.yorig:
.long 0 ; 0x0
.long 0 ; 0x0
.long 4 ; 0x4
.long 0 ; 0x0
.long 2 ; 0x2
.long 0 ; 0x0
.long 1 ; 0x1
.p2align 2 ; @__const.create_png_image.xspc
l___const.create_png_image.xspc:
.long 8 ; 0x8
.long 8 ; 0x8
.long 4 ; 0x4
.long 4 ; 0x4
.long 2 ; 0x2
.long 2 ; 0x2
.long 1 ; 0x1
.p2align 2 ; @__const.create_png_image.yspc
l___const.create_png_image.yspc:
.long 8 ; 0x8
.long 8 ; 0x8
.long 8 ; 0x8
.long 4 ; 0x4
.long 4 ; 0x4
.long 2 ; 0x2
.long 2 ; 0x2
.section __TEXT,__cstring,cstring_literals
l___func__.create_png_image_raw: ; @__func__.create_png_image_raw
.asciz "create_png_image_raw"
l_.str.69: ; @.str.69
.asciz "out_n == s->img_n || out_n == s->img_n+1"
l_.str.70: ; @.str.70
.asciz "not enough pixels"
l_.str.71: ; @.str.71
.asciz "invalid filter"
.section __TEXT,__const
_first_row_filter: ; @first_row_filter
.ascii "\000\001\000\005\006"
.section __TEXT,__cstring,cstring_literals
l_.str.72: ; @.str.72
.asciz "img_n+1 == out_n"
l___func__.compute_transparency: ; @__func__.compute_transparency
.asciz "compute_transparency"
l_.str.73: ; @.str.73
.asciz "out_n == 2 || out_n == 4"
l_.str.74: ; @.str.74
.asciz "not BMP"
l_.str.75: ; @.str.75
.asciz "unknown BMP"
l_.str.76: ; @.str.76
.asciz "bad BMP"
l_.str.77: ; @.str.77
.asciz "monochrome"
l_.str.78: ; @.str.78
.asciz "BMP RLE"
l___func__.bmp_load: ; @__func__.bmp_load
.asciz "bmp_load"
l_.str.79: ; @.str.79
.asciz "hsz == 108"
l_.str.80: ; @.str.80
.asciz "invalid"
l_.str.81: ; @.str.81
.asciz "bad bpp"
l_.str.82: ; @.str.82
.asciz "bad masks"
l_.str.83: ; @.str.83
.asciz "not PSD"
l_.str.84: ; @.str.84
.asciz "wrong version"
l_.str.85: ; @.str.85
.asciz "wrong channel count"
l_.str.86: ; @.str.86
.asciz "unsupported bit depth"
l_.str.87: ; @.str.87
.asciz "wrong color format"
l_.str.89: ; @.str.89
.asciz "#?RADIANCE"
l_.str.90: ; @.str.90
.asciz "not HDR"
l_.str.91: ; @.str.91
.asciz "FORMAT=32-bit_rle_rgbe"
l_.str.92: ; @.str.92
.asciz "unsupported format"
l_.str.93: ; @.str.93
.asciz "-Y "
l_.str.94: ; @.str.94
.asciz "unsupported data layout"
l_.str.95: ; @.str.95
.asciz "+X "
l_.str.96: ; @.str.96
.asciz "invalid decoded scanline length"
l_.str.97: ; @.str.97
.asciz "wb"
l___func__.writefv: ; @__func__.writefv
.asciz "writefv"
l_.str.98: ; @.str.98
.asciz "111"
.subsections_via_symbols
| the_stack_data/247019479.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _mrdb_state_free ## -- Begin function mrdb_state_free
.p2align 4, 0x90
_mrdb_state_free: ## @mrdb_state_free
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r14
pushq %rbx
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
movq %rdi, %rbx
callq _mrb_debug_context_free
movq __mrdb_state@GOTPCREL(%rip), %r14
movq (%r14), %rax
testq %rax, %rax
je LBB0_2
## %bb.1:
movq (%rax), %rsi
movq %rbx, %rdi
callq _mrb_free
movq (%r14), %rsi
movq %rbx, %rdi
callq _mrb_free
movq $0, (%r14)
LBB0_2:
popq %rbx
popq %r14
popq %rbp
retq
.cfi_endproc
## -- End function
.comm __mrdb_state,8,3 ## @_mrdb_state
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.globl _mrdb_state_free ; -- Begin function mrdb_state_free
.p2align 2
_mrdb_state_free: ; @mrdb_state_free
.cfi_startproc
; %bb.0:
stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
.cfi_offset w19, -24
.cfi_offset w20, -32
mov x19, x0
bl _mrb_debug_context_free
Lloh0:
adrp x20, __mrdb_state@GOTPAGE
Lloh1:
ldr x20, [x20, __mrdb_state@GOTPAGEOFF]
ldr x8, [x20]
cbz x8, LBB0_2
; %bb.1:
ldr x1, [x8]
mov x0, x19
bl _mrb_free
ldr x1, [x20]
mov x0, x19
bl _mrb_free
str xzr, [x20]
LBB0_2:
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
ret
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
; -- End function
.comm __mrdb_state,8,3 ; @_mrdb_state
.subsections_via_symbols
| AnghaBench/h2o/deps/mruby/mrbgems/mruby-bin-debugger/tools/mrdb/extr_mrdb.c_mrdb_state_free.c | anghabench |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.section __DATA,__data
.globl _gDataHigh ## @gDataHigh
.p2align 4
_gDataHigh:
.quad 0x405beb851eb851ec ## double 111.68000000000001
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405b870a3d70a3d7 ## double 110.11
.quad 0x405bc66666666666 ## double 111.09999999999999
.quad 0x405c2ae147ae147b ## double 112.67
.quad 0x405bdae147ae147b ## double 111.42
.quad 0x405c366666666666 ## double 112.84999999999999
.quad 0x405c49999999999a ## double 113.15000000000001
.quad 0x405c7ccccccccccd ## double 113.95
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c9f5c28f5c28f ## double 114.48999999999999
.quad 0x405ca66666666666 ## double 114.59999999999999
.quad 0x405c8ae147ae147b ## double 114.17
.quad 0x405c78f5c28f5c29 ## double 113.89
.quad 0x405c8b851eb851ec ## double 114.18000000000001
.quad 0x405c833333333333 ## double 114.05
.quad 0x405c2eb851eb851f ## double 112.73
.quad 0x405cae147ae147ae ## double 114.72
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405d7ccccccccccd ## double 117.95
.quad 0x405db47ae147ae14 ## double 118.81999999999999
.quad 0x405d9851eb851eb8 ## double 118.38
.quad 0x405d400000000000 ## double 117
.quad 0x405d1b851eb851ec ## double 116.43000000000001
.quad 0x405cf70a3d70a3d7 ## double 115.86
.quad 0x405d1eb851eb851f ## double 116.48
.quad 0x405bf851eb851eb8 ## double 111.88
.quad 0x405bde147ae147ae ## double 111.47
.quad 0x405b828f5c28f5c3 ## double 110.04000000000001
.quad 0x405b4ccccccccccd ## double 109.2
.quad 0x405b69999999999a ## double 109.65000000000001
.quad 0x405b666666666666 ## double 109.59999999999999
.quad 0x405b49999999999a ## double 109.15000000000001
.quad 0x405b566666666666 ## double 109.34999999999999
.quad 0x405b6a3d70a3d70a ## double 109.66
.quad 0x405b18f5c28f5c29 ## double 108.39
.quad 0x405ac51eb851eb85 ## double 107.08
.quad 0x405a733333333333 ## double 105.8
.quad 0x405aa51eb851eb85 ## double 106.58
.quad 0x405abae147ae147b ## double 106.92
.quad 0x405a5e147ae147ae ## double 105.47
.quad 0x405a83d70a3d70a4 ## double 106.06
.quad 0x405a7147ae147ae1 ## double 105.77
.quad 0x405a9c28f5c28f5c ## double 106.44
.quad 0x405ac00000000000 ## double 107
.quad 0x405ac147ae147ae1 ## double 107.02
.quad 0x405aaa3d70a3d70a ## double 106.66
.quad 0x405a69999999999a ## double 105.65000000000001
.quad 0x405a4c28f5c28f5c ## double 105.19
.quad 0x405a0f5c28f5c28f ## double 104.23999999999999
.quad 0x4059d0a3d70a3d71 ## double 103.26000000000001
.quad 0x4059cb851eb851ec ## double 103.18000000000001
.quad 0x405a000000000000 ## double 104
.quad 0x4059cd70a3d70a3d ## double 103.20999999999999
.quad 0x4059cae147ae147b ## double 103.17
.quad 0x405a60a3d70a3d71 ## double 105.51000000000001
.quad 0x405aa33333333333 ## double 106.55
.quad 0x405aa5c28f5c28f6 ## double 106.59
.quad 0x405acf5c28f5c28f ## double 107.23999999999999
.quad 0x405aeae147ae147b ## double 107.67
.quad 0x405ac66666666666 ## double 107.09999999999999
.quad 0x405aa851eb851eb8 ## double 106.63
.quad 0x405a5b851eb851ec ## double 105.43000000000001
.quad 0x405a7eb851eb851f ## double 105.98
.quad 0x405ac00000000000 ## double 107
.quad 0x405ad00000000000 ## double 107.25
.quad 0x405b000000000000 ## double 108
.quad 0x405b033333333333 ## double 108.05
.quad 0x405a8eb851eb851f ## double 106.23
.quad 0x405a78f5c28f5c29 ## double 105.89
.quad 0x405a733333333333 ## double 105.8
.quad 0x405a900000000000 ## double 106.25
.quad 0x405a7f5c28f5c28f ## double 105.98999999999999
.quad 0x405a470a3d70a3d7 ## double 105.11
.quad 0x405a300000000000 ## double 104.75
.quad 0x4059d8f5c28f5c29 ## double 103.39
.quad 0x4059cae147ae147b ## double 103.17
.quad 0x4059c1eb851eb852 ## double 103.03
.quad 0x4059c00000000000 ## double 103
.quad 0x4059c8f5c28f5c29 ## double 103.14
.quad 0x4059cae147ae147b ## double 103.17
.quad 0x4059c00000000000 ## double 103
.quad 0x40594ae147ae147b ## double 101.17
.quad 0x4059600000000000 ## double 101.5
.quad 0x40596ccccccccccd ## double 101.7
.quad 0x4058f9999999999a ## double 99.900000000000005
.quad 0x4057eeb851eb851f ## double 95.730000000000003
.quad 0x4057c9999999999a ## double 95.150000000000005
.quad 0x4057b7ae147ae148 ## double 94.870000000000005
.quad 0x4057fa3d70a3d70a ## double 95.909999999999996
.quad 0x40586a3d70a3d70a ## double 97.659999999999996
.quad 0x4058133333333333 ## double 96.299999999999997
.quad 0x4057f00000000000 ## double 95.75
.quad 0x4057f47ae147ae14 ## double 95.819999999999993
.quad 0x40582ccccccccccd ## double 96.700000000000002
.quad 0x4058333333333333 ## double 96.799999999999997
.quad 0x40583c28f5c28f5c ## double 96.939999999999998
.quad 0x4058300000000000 ## double 96.75
.quad 0x40581ccccccccccd ## double 96.450000000000002
.quad 0x40580eb851eb851f ## double 96.230000000000003
.quad 0x4057e00000000000 ## double 95.5
.quad 0x4057b00000000000 ## double 94.75
.quad 0x4057c70a3d70a3d7 ## double 95.109999999999999
.quad 0x4057c147ae147ae1 ## double 95.019999999999996
.quad 0x4057c1eb851eb852 ## double 95.030000000000001
.quad 0x4057d00000000000 ## double 95.25
.quad 0x4057ce147ae147ae ## double 95.219999999999998
.quad 0x4057dc28f5c28f5c ## double 95.439999999999998
.quad 0x4057f3d70a3d70a4 ## double 95.810000000000002
.quad 0x4057a1eb851eb852 ## double 94.530000000000001
.quad 0x4057947ae147ae14 ## double 94.319999999999993
.quad 0x40579ccccccccccd ## double 94.450000000000002
.quad 0x40577e147ae147ae ## double 93.969999999999998
.quad 0x405781eb851eb852 ## double 94.030000000000001
.quad 0x40579e147ae147ae ## double 94.469999999999998
.quad 0x40578ae147ae147b ## double 94.170000000000001
.quad 0x4057800000000000 ## double 94
.quad 0x4057b47ae147ae14 ## double 94.819999999999993
.quad 0x4057b66666666666 ## double 94.849999999999994
.quad 0x405778f5c28f5c29 ## double 93.89
.quad 0x405709999999999a ## double 92.150000000000005
.quad 0x405717ae147ae148 ## double 92.370000000000005
.quad 0x4057433333333333 ## double 93.049999999999997
.quad 0x405785c28f5c28f6 ## double 94.090000000000003
.quad 0x405800a3d70a3d71 ## double 96.010000000000005
.quad 0x4058770a3d70a3d7 ## double 97.859999999999999
.quad 0x40589c28f5c28f5c ## double 98.439999999999998
.quad 0x4058e00000000000 ## double 99.5
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4058dd70a3d70a3d ## double 99.459999999999993
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4058e147ae147ae1 ## double 99.519999999999996
.quad 0x4058db851eb851ec ## double 99.430000000000007
.quad 0x4058af5c28f5c28f ## double 98.739999999999994
.quad 0x4058cccccccccccd ## double 99.200000000000002
.quad 0x4058eccccccccccd ## double 99.700000000000002
.quad 0x4058ef5c28f5c28f ## double 99.739999999999994
.quad 0x4059170a3d70a3d7 ## double 100.36
.quad 0x405919999999999a ## double 100.40000000000001
.quad 0x40591c28f5c28f5c ## double 100.44
.quad 0x4058eeb851eb851f ## double 99.730000000000003
.quad 0x4058cb851eb851ec ## double 99.180000000000007
.quad 0x4058deb851eb851f ## double 99.480000000000003
.quad 0x4058dccccccccccd ## double 99.450000000000002
.quad 0x4058aa3d70a3d70a ## double 98.659999999999996
.quad 0x4058751eb851eb85 ## double 97.829999999999998
.quad 0x40587ae147ae147b ## double 97.920000000000001
.quad 0x4058651eb851eb85 ## double 97.579999999999998
.quad 0x40585851eb851eb8 ## double 97.379999999999995
.quad 0x40584eb851eb851f ## double 97.230000000000003
.quad 0x4058366666666666 ## double 96.849999999999994
.quad 0x4058fccccccccccd ## double 99.950000000000002
.quad 0x405939999999999a ## double 100.90000000000001
.quad 0x405935c28f5c28f6 ## double 100.84
.quad 0x4058ec28f5c28f5c ## double 99.689999999999998
.quad 0x4058f9999999999a ## double 99.900000000000005
.quad 0x4058c33333333333 ## double 99.049999999999997
.quad 0x4059151eb851eb85 ## double 100.33
.quad 0x4058e00000000000 ## double 99.5
.quad 0x40587ccccccccccd ## double 97.950000000000002
.quad 0x4058b28f5c28f5c3 ## double 98.790000000000006
.quad 0x405899999999999a ## double 98.400000000000005
.quad 0x40587851eb851eb8 ## double 97.879999999999995
.quad 0x405859999999999a ## double 97.400000000000005
.quad 0x40584eb851eb851f ## double 97.230000000000003
.quad 0x4057f3d70a3d70a4 ## double 95.810000000000002
.quad 0x4057feb851eb851f ## double 95.980000000000003
.quad 0x4058200000000000 ## double 96.5
.quad 0x4058200000000000 ## double 96.5
.quad 0x405818f5c28f5c29 ## double 96.39
.quad 0x40580a3d70a3d70a ## double 96.159999999999996
.quad 0x4057f33333333333 ## double 95.799999999999997
.quad 0x4057ee147ae147ae ## double 95.719999999999998
.quad 0x4057d51eb851eb85 ## double 95.329999999999998
.quad 0x4057b66666666666 ## double 94.849999999999994
.quad 0x4057966666666666 ## double 94.349999999999994
.quad 0x405798f5c28f5c29 ## double 94.39
.quad 0x40579b851eb851ec ## double 94.430000000000007
.quad 0x4057b7ae147ae148 ## double 94.870000000000005
.quad 0x4057a8f5c28f5c29 ## double 94.64
.quad 0x40577ccccccccccd ## double 93.950000000000002
.quad 0x4056fa3d70a3d70a ## double 91.909999999999996
.quad 0x40571eb851eb851f ## double 92.480000000000003
.quad 0x4056ea3d70a3d70a ## double 91.659999999999996
.quad 0x4056e47ae147ae14 ## double 91.569999999999993
.quad 0x40574f5c28f5c28f ## double 93.239999999999994
.quad 0x4057700000000000 ## double 93.75
.quad 0x40576f5c28f5c28f ## double 93.739999999999994
.quad 0x40575b851eb851ec ## double 93.430000000000007
.quad 0x4057733333333333 ## double 93.799999999999997
.quad 0x4057833333333333 ## double 94.049999999999997
.quad 0x4057666666666666 ## double 93.599999999999994
.quad 0x405751eb851eb852 ## double 93.280000000000001
.quad 0x4057528f5c28f5c3 ## double 93.290000000000006
.quad 0x40571b851eb851ec ## double 92.430000000000007
.quad 0x4057166666666666 ## double 92.349999999999994
.quad 0x40574c28f5c28f5c ## double 93.189999999999998
.quad 0x40573ae147ae147b ## double 92.920000000000001
.quad 0x405759999999999a ## double 93.400000000000005
.quad 0x40572f5c28f5c28f ## double 92.739999999999994
.quad 0x405715c28f5c28f6 ## double 92.340000000000003
.quad 0x4056fccccccccccd ## double 91.950000000000002
.quad 0x40572b851eb851ec ## double 92.680000000000007
.quad 0x40572b851eb851ec ## double 92.680000000000007
.quad 0x4057000000000000 ## double 92
.quad 0x4056f8f5c28f5c29 ## double 91.89
.quad 0x4056fb851eb851ec ## double 91.930000000000007
.quad 0x4056ff5c28f5c28f ## double 91.989999999999994
.quad 0x4056fccccccccccd ## double 91.950000000000002
.quad 0x4057000000000000 ## double 92
.quad 0x4056accccccccccd ## double 90.700000000000002
.quad 0x4056beb851eb851f ## double 90.980000000000003
.quad 0x4057028f5c28f5c3 ## double 92.040000000000006
.quad 0x4055fc28f5c28f5c ## double 87.939999999999998
.quad 0x4055e66666666666 ## double 87.599999999999994
.quad 0x40558d70a3d70a3d ## double 86.209999999999993
.quad 0x405539999999999a ## double 84.900000000000006
.quad 0x40552c28f5c28f5c ## double 84.689999999999998
.quad 0x40552851eb851eb8 ## double 84.629999999999995
.quad 0x4055051eb851eb85 ## double 84.079999999999998
.quad 0x4054f1eb851eb852 ## double 83.780000000000001
.quad 0x4054cd70a3d70a3d ## double 83.209999999999993
.quad 0x4054d00000000000 ## double 83.25
.quad 0x405490a3d70a3d71 ## double 82.260000000000005
.quad 0x40549e147ae147ae ## double 82.469999999999999
.quad 0x4054b28f5c28f5c3 ## double 82.790000000000006
.quad 0x4054966666666666 ## double 82.349999999999994
.quad 0x4054aae147ae147b ## double 82.670000000000001
.quad 0x4054aa3d70a3d70a ## double 82.659999999999997
.quad 0x405491eb851eb852 ## double 82.280000000000001
.quad 0x405487ae147ae148 ## double 82.120000000000005
.quad 0x4054d00000000000 ## double 83.25
.quad 0x4054f28f5c28f5c3 ## double 83.790000000000006
.quad 0x405485c28f5c28f6 ## double 82.090000000000003
.quad 0x4054cd70a3d70a3d ## double 83.209999999999993
.quad 0x4054e5c28f5c28f6 ## double 83.590000000000003
.quad 0x4054c00000000000 ## double 83
.quad 0x4054accccccccccd ## double 82.700000000000002
.quad 0x4054a00000000000 ## double 82.5
.quad 0x40544ccccccccccd ## double 81.200000000000002
.quad 0x4054370a3d70a3d7 ## double 80.859999999999999
.quad 0x40541851eb851eb8 ## double 80.379999999999995
.quad 0x40542c28f5c28f5c ## double 80.689999999999998
.quad 0x4054628f5c28f5c3 ## double 81.540000000000006
.quad 0x4054647ae147ae14 ## double 81.569999999999993
.quad 0x4054600000000000 ## double 81.5
.quad 0x40546b851eb851ec ## double 81.680000000000007
.quad 0x40545eb851eb851f ## double 81.480000000000003
.quad 0x40541e147ae147ae ## double 80.469999999999999
.quad 0x4054000000000000 ## double 80
.quad 0x4053e00000000000 ## double 79.5
.quad 0x4053de147ae147ae ## double 79.469999999999999
.quad 0x4053e33333333333 ## double 79.549999999999997
.quad 0x4053f851eb851eb8 ## double 79.879999999999995
.quad 0x4053fe147ae147ae ## double 79.969999999999999
.quad 0x4053e3d70a3d70a4 ## double 79.560000000000002
.quad 0x4053d1eb851eb852 ## double 79.280000000000001
.quad 0x405348f5c28f5c29 ## double 77.14
.quad 0x40535f5c28f5c28f ## double 77.489999999999994
.quad 0x4052f1eb851eb852 ## double 75.780000000000001
.quad 0x4053051eb851eb85 ## double 76.079999999999998
.quad 0x405311eb851eb852 ## double 76.280000000000001
.quad 0x405310a3d70a3d71 ## double 76.260000000000005
.quad 0x40530ccccccccccd ## double 76.200000000000002
.quad 0x40533147ae147ae1 ## double 76.769999999999996
.quad 0x40532eb851eb851f ## double 76.730000000000003
.quad 0x40533b851eb851ec ## double 76.930000000000007
.quad 0x40535851eb851eb8 ## double 77.379999999999995
.quad 0x4053600000000000 ## double 77.5
.quad 0x4053533333333333 ## double 77.299999999999997
.quad 0x4053528f5c28f5c3 ## double 77.290000000000006
.quad 0x4053133333333333 ## double 76.299999999999997
.quad 0x40531a3d70a3d70a ## double 76.409999999999997
.quad 0x405309999999999a ## double 76.150000000000006
.quad 0x4052e00000000000 ## double 75.5
.quad 0x4052f851eb851eb8 ## double 75.879999999999995
.quad 0x405343d70a3d70a4 ## double 77.060000000000002
.quad 0x40529ae147ae147b ## double 74.420000000000001
.quad 0x40527c28f5c28f5c ## double 73.939999999999998
.quad 0x4052933333333333 ## double 74.299999999999997
.quad 0x4052d1eb851eb852 ## double 75.280000000000001
.quad 0x4053133333333333 ## double 76.299999999999997
.quad 0x405329999999999a ## double 76.650000000000006
.quad 0x4053351eb851eb85 ## double 76.829999999999998
.quad 0x405385c28f5c28f6 ## double 78.090000000000003
.quad 0x4053a1eb851eb852 ## double 78.530000000000001
.quad 0x405398f5c28f5c29 ## double 78.39
.quad 0x40539147ae147ae1 ## double 78.269999999999996
.quad 0x40537b851eb851ec ## double 77.930000000000007
.quad 0x4053733333333333 ## double 77.799999999999997
.quad 0x4053266666666666 ## double 76.599999999999994
.quad 0x4053666666666666 ## double 77.599999999999994
.quad 0x40536147ae147ae1 ## double 77.519999999999996
.quad 0x405375c28f5c28f6 ## double 77.840000000000003
.quad 0x405397ae147ae148 ## double 78.370000000000005
.quad 0x4053a3d70a3d70a4 ## double 78.560000000000002
.quad 0x4053933333333333 ## double 78.299999999999997
.quad 0x4053933333333333 ## double 78.299999999999997
.quad 0x4053a1eb851eb852 ## double 78.530000000000001
.quad 0x4053b1eb851eb852 ## double 78.780000000000001
.quad 0x4053766666666666 ## double 77.849999999999994
.quad 0x40537b851eb851ec ## double 77.930000000000007
.quad 0x405385c28f5c28f6 ## double 78.090000000000003
.quad 0x4053866666666666 ## double 78.099999999999994
.quad 0x4053e51eb851eb85 ## double 79.579999999999998
.quad 0x40542f5c28f5c28f ## double 80.739999999999994
.quad 0x4053f9999999999a ## double 79.900000000000006
.quad 0x4054028f5c28f5c3 ## double 80.040000000000006
.quad 0x405423d70a3d70a4 ## double 80.560000000000002
.quad 0x405437ae147ae148 ## double 80.870000000000005
.quad 0x4054351eb851eb85 ## double 80.829999999999998
.quad 0x40543d70a3d70a3d ## double 80.959999999999993
.quad 0x4054370a3d70a3d7 ## double 80.859999999999999
.quad 0x40540b851eb851ec ## double 80.180000000000007
.quad 0x4054028f5c28f5c3 ## double 80.040000000000006
.quad 0x4054533333333333 ## double 81.299999999999997
.quad 0x405415c28f5c28f6 ## double 80.340000000000003
.quad 0x4054400000000000 ## double 81
.quad 0x4054566666666666 ## double 81.349999999999994
.quad 0x40548d70a3d70a3d ## double 82.209999999999993
.quad 0x4054d00000000000 ## double 83.25
.quad 0x4054c9999999999a ## double 83.150000000000006
.quad 0x4054ec28f5c28f5c ## double 83.689999999999998
.quad 0x4054c5c28f5c28f6 ## double 83.090000000000003
.quad 0x4054d51eb851eb85 ## double 83.329999999999998
.quad 0x4054d28f5c28f5c3 ## double 83.290000000000006
.quad 0x4054ce147ae147ae ## double 83.219999999999999
.quad 0x4054d5c28f5c28f6 ## double 83.340000000000003
.quad 0x4054c5c28f5c28f6 ## double 83.090000000000003
.quad 0x4054d28f5c28f5c3 ## double 83.290000000000006
.quad 0x4054af5c28f5c28f ## double 82.739999999999994
.quad 0x4054d3d70a3d70a4 ## double 83.310000000000002
.quad 0x4054e7ae147ae148 ## double 83.620000000000005
.quad 0x405519999999999a ## double 84.400000000000006
.quad 0x4054deb851eb851f ## double 83.480000000000003
.quad 0x4054b9999999999a ## double 82.900000000000006
.quad 0x40549a3d70a3d70a ## double 82.409999999999997
.quad 0x4054a00000000000 ## double 82.5
.quad 0x4054b66666666666 ## double 82.849999999999994
.quad 0x4055000000000000 ## double 84
.quad 0x4054e1eb851eb852 ## double 83.530000000000001
.quad 0x4054bae147ae147b ## double 82.920000000000001
.quad 0x4054a66666666666 ## double 82.599999999999994
.quad 0x405457ae147ae148 ## double 81.370000000000005
.quad 0x405483d70a3d70a4 ## double 82.060000000000002
.quad 0x4054af5c28f5c28f ## double 82.739999999999994
.quad 0x4054f33333333333 ## double 83.799999999999997
.quad 0x4055147ae147ae14 ## double 84.319999999999993
.quad 0x40551ccccccccccd ## double 84.450000000000002
.quad 0x4054f1eb851eb852 ## double 83.780000000000001
.quad 0x4054e5c28f5c28f6 ## double 83.590000000000003
.quad 0x4054e66666666666 ## double 83.599999999999994
.quad 0x4054eccccccccccd ## double 83.700000000000002
.quad 0x4054e33333333333 ## double 83.549999999999997
.quad 0x4054d8f5c28f5c29 ## double 83.39
.quad 0x4054d0a3d70a3d71 ## double 83.260000000000005
.quad 0x4054ee147ae147ae ## double 83.719999999999999
.quad 0x40551147ae147ae1 ## double 84.269999999999996
.quad 0x40552b851eb851ec ## double 84.680000000000007
.quad 0x40553f5c28f5c28f ## double 84.989999999999994
.quad 0x4054f00000000000 ## double 83.75
.quad 0x4054da3d70a3d70a ## double 83.409999999999997
.quad 0x4054ee147ae147ae ## double 83.719999999999999
.quad 0x4054f0a3d70a3d71 ## double 83.760000000000005
.quad 0x4054bf5c28f5c28f ## double 82.989999999999994
.quad 0x405499999999999a ## double 82.400000000000006
.quad 0x4054900000000000 ## double 82.25
.quad 0x40547eb851eb851f ## double 81.980000000000003
.quad 0x4054666666666666 ## double 81.599999999999994
.quad 0x4054328f5c28f5c3 ## double 80.790000000000006
.quad 0x405428f5c28f5c29 ## double 80.64
.quad 0x4054351eb851eb85 ## double 80.829999999999998
.quad 0x40540d70a3d70a3d ## double 80.209999999999993
.quad 0x405433d70a3d70a4 ## double 80.810000000000002
.quad 0x4054233333333333 ## double 80.549999999999997
.quad 0x405438f5c28f5c29 ## double 80.89
.quad 0x405429999999999a ## double 80.650000000000006
.quad 0x405455c28f5c28f6 ## double 81.340000000000003
.quad 0x405469999999999a ## double 81.650000000000006
.quad 0x4054300000000000 ## double 80.75
.quad 0x405441eb851eb852 ## double 81.030000000000001
.quad 0x40543a3d70a3d70a ## double 80.909999999999997
.quad 0x4054600000000000 ## double 81.5
.quad 0x40545ccccccccccd ## double 81.450000000000002
.quad 0x40547a3d70a3d70a ## double 81.909999999999997
.quad 0x40545d70a3d70a3d ## double 81.459999999999993
.quad 0x4054333333333333 ## double 80.799999999999997
.quad 0x405439999999999a ## double 80.900000000000006
.quad 0x40540c28f5c28f5c ## double 80.189999999999998
.quad 0x4053fccccccccccd ## double 79.950000000000002
.quad 0x40543147ae147ae1 ## double 80.769999999999996
.quad 0x405465c28f5c28f6 ## double 81.590000000000003
.quad 0x40548f5c28f5c28f ## double 82.239999999999994
.quad 0x4054800000000000 ## double 82
.quad 0x405473d70a3d70a4 ## double 81.810000000000002
.quad 0x40547147ae147ae1 ## double 81.769999999999996
.quad 0x405469999999999a ## double 81.650000000000006
.quad 0x405467ae147ae148 ## double 81.620000000000005
.quad 0x405489999999999a ## double 82.150000000000006
.quad 0x40547ae147ae147b ## double 81.920000000000001
.quad 0x4054c33333333333 ## double 83.049999999999997
.quad 0x405518f5c28f5c29 ## double 84.39
.quad 0x40552ccccccccccd ## double 84.700000000000002
.quad 0x4054ca3d70a3d70a ## double 83.159999999999997
.quad 0x4054dccccccccccd ## double 83.450000000000002
.quad 0x4054fd70a3d70a3d ## double 83.959999999999993
.quad 0x405533d70a3d70a4 ## double 84.810000000000002
.quad 0x405507ae147ae148 ## double 84.120000000000005
.quad 0x4055100000000000 ## double 84.25
.quad 0x405541eb851eb852 ## double 85.030000000000001
.quad 0x4054b9999999999a ## double 82.900000000000006
.quad 0x4054a00000000000 ## double 82.5
.quad 0x4054a33333333333 ## double 82.549999999999997
.quad 0x40549f5c28f5c28f ## double 82.489999999999994
.quad 0x4054c3d70a3d70a4 ## double 83.060000000000002
.quad 0x4054e47ae147ae14 ## double 83.569999999999993
.quad 0x4055200000000000 ## double 84.5
.quad 0x40550ccccccccccd ## double 84.200000000000002
.quad 0x4054ceb851eb851f ## double 83.230000000000003
.quad 0x4055000000000000 ## double 84
.quad 0x4054c66666666666 ## double 83.099999999999994
.quad 0x4054e66666666666 ## double 83.599999999999994
.quad 0x4055000000000000 ## double 84
.quad 0x4054e851eb851eb8 ## double 83.629999999999995
.quad 0x4054fccccccccccd ## double 83.950000000000002
.quad 0x405539999999999a ## double 84.900000000000006
.quad 0x4055d66666666666 ## double 87.349999999999994
.quad 0x4055c66666666666 ## double 87.099999999999994
.quad 0x40563a3d70a3d70a ## double 88.909999999999997
.quad 0x40567ae147ae147b ## double 89.920000000000001
.quad 0x405675c28f5c28f6 ## double 89.840000000000003
.quad 0x405629999999999a ## double 88.650000000000006
.quad 0x405649999999999a ## double 89.150000000000006
.quad 0x40566f5c28f5c28f ## double 89.739999999999994
.quad 0x405668f5c28f5c29 ## double 89.64
.quad 0x40567c28f5c28f5c ## double 89.939999999999998
.quad 0x40565b851eb851ec ## double 89.430000000000007
.quad 0x4056547ae147ae14 ## double 89.319999999999993
.quad 0x405658f5c28f5c29 ## double 89.39
.quad 0x4056051eb851eb85 ## double 88.079999999999998
.quad 0x4055f7ae147ae148 ## double 87.870000000000005
.quad 0x4056000000000000 ## double 88
.quad 0x4055beb851eb851f ## double 86.980000000000003
.quad 0x4055a51eb851eb85 ## double 86.579999999999998
.quad 0x405579999999999a ## double 85.900000000000006
.quad 0x4055400000000000 ## double 85
.quad 0x405535c28f5c28f6 ## double 84.840000000000003
.quad 0x4055133333333333 ## double 84.299999999999997
.quad 0x4054d51eb851eb85 ## double 83.329999999999998
.quad 0x4054f9999999999a ## double 83.900000000000006
.quad 0x4054f66666666666 ## double 83.849999999999994
.quad 0x4054d33333333333 ## double 83.299999999999997
.quad 0x4054c1eb851eb852 ## double 83.030000000000001
.quad 0x405478f5c28f5c29 ## double 81.89
.quad 0x405489999999999a ## double 82.150000000000006
.quad 0x4054933333333333 ## double 82.299999999999997
.quad 0x40549b851eb851ec ## double 82.430000000000007
.quad 0x4054bb851eb851ec ## double 82.930000000000007
.quad 0x4054f7ae147ae148 ## double 83.870000000000005
.quad 0x4054fccccccccccd ## double 83.950000000000002
.quad 0x4054e70a3d70a3d7 ## double 83.609999999999999
.quad 0x4055000000000000 ## double 84
.quad 0x40550c28f5c28f5c ## double 84.189999999999998
.quad 0x40550ccccccccccd ## double 84.200000000000002
.quad 0x4055266666666666 ## double 84.599999999999994
.quad 0x4054bf5c28f5c28f ## double 82.989999999999994
.quad 0x4054ad70a3d70a3d ## double 82.709999999999993
.quad 0x4054cccccccccccd ## double 83.200000000000002
.quad 0x4054bb851eb851ec ## double 82.930000000000007
.quad 0x4054e147ae147ae1 ## double 83.519999999999996
.quad 0x4054866666666666 ## double 82.099999999999994
.quad 0x405447ae147ae148 ## double 81.120000000000005
.quad 0x40541147ae147ae1 ## double 80.269999999999996
.quad 0x40542b851eb851ec ## double 80.680000000000007
.quad 0x40545e147ae147ae ## double 81.469999999999999
.quad 0x4054266666666666 ## double 80.599999999999994
.quad 0x4054200000000000 ## double 80.5
.quad 0x4054233333333333 ## double 80.549999999999997
.quad 0x4053eeb851eb851f ## double 79.730000000000003
.quad 0x4053a66666666666 ## double 78.599999999999994
.quad 0x40539a3d70a3d70a ## double 78.409999999999997
.quad 0x4053870a3d70a3d7 ## double 78.109999999999999
.quad 0x4053a00000000000 ## double 78.5
.quad 0x40539d70a3d70a3d ## double 78.459999999999993
.quad 0x4053e9999999999a ## double 79.650000000000006
.quad 0x4053feb851eb851f ## double 79.980000000000003
.quad 0x4054200000000000 ## double 80.5
.quad 0x405429999999999a ## double 80.650000000000006
.quad 0x405459999999999a ## double 81.400000000000006
.quad 0x40545f5c28f5c28f ## double 81.489999999999994
.quad 0x4054870a3d70a3d7 ## double 82.109999999999999
.quad 0x40545f5c28f5c28f ## double 81.489999999999994
.quad 0x40546147ae147ae1 ## double 81.519999999999996
.quad 0x40544851eb851eb8 ## double 81.129999999999995
.quad 0x40544c28f5c28f5c ## double 81.189999999999998
.quad 0x405400a3d70a3d71 ## double 80.010000000000005
.quad 0x4054147ae147ae14 ## double 80.319999999999993
.quad 0x4054328f5c28f5c3 ## double 80.790000000000006
.quad 0x4054400000000000 ## double 81
.quad 0x4054700000000000 ## double 81.75
.quad 0x40544147ae147ae1 ## double 81.019999999999996
.quad 0x40545ccccccccccd ## double 81.450000000000002
.quad 0x405499999999999a ## double 82.400000000000006
.quad 0x4054c8f5c28f5c29 ## double 83.14
.quad 0x4054d66666666666 ## double 83.349999999999994
.quad 0x4054d33333333333 ## double 83.299999999999997
.quad 0x4054800000000000 ## double 82
.quad 0x40547c28f5c28f5c ## double 81.939999999999998
.quad 0x40549b851eb851ec ## double 82.430000000000007
.quad 0x4054bc28f5c28f5c ## double 82.939999999999998
.quad 0x4054a51eb851eb85 ## double 82.579999999999998
.quad 0x4054b00000000000 ## double 82.75
.quad 0x405500a3d70a3d71 ## double 84.010000000000005
.quad 0x4054fccccccccccd ## double 83.950000000000002
.quad 0x4054f8f5c28f5c29 ## double 83.89
.quad 0x4054ef5c28f5c28f ## double 83.739999999999994
.quad 0x4054f8f5c28f5c29 ## double 83.89
.quad 0x40550ccccccccccd ## double 84.200000000000002
.quad 0x4054f70a3d70a3d7 ## double 83.859999999999999
.quad 0x4054f66666666666 ## double 83.849999999999994
.quad 0x4054fccccccccccd ## double 83.950000000000002
.quad 0x4054fe147ae147ae ## double 83.969999999999999
.quad 0x40550851eb851eb8 ## double 84.129999999999995
.quad 0x4055166666666666 ## double 84.349999999999994
.quad 0x40552147ae147ae1 ## double 84.519999999999996
.quad 0x40552851eb851eb8 ## double 84.629999999999995
.quad 0x40553ccccccccccd ## double 84.950000000000002
.quad 0x40553d70a3d70a3d ## double 84.959999999999993
.quad 0x4055470a3d70a3d7 ## double 85.109999999999999
.quad 0x4054fc28f5c28f5c ## double 83.939999999999998
.quad 0x4054b00000000000 ## double 82.75
.quad 0x4054aae147ae147b ## double 82.670000000000001
.quad 0x4054700000000000 ## double 81.75
.quad 0x40541f5c28f5c28f ## double 80.489999999999994
.quad 0x4053e147ae147ae1 ## double 79.519999999999996
.quad 0x4053e147ae147ae1 ## double 79.519999999999996
.quad 0x405361eb851eb852 ## double 77.530000000000001
.quad 0x405309999999999a ## double 76.150000000000006
.quad 0x4052be147ae147ae ## double 74.969999999999999
.quad 0x4052d51eb851eb85 ## double 75.329999999999998
.quad 0x4052df5c28f5c28f ## double 75.489999999999994
.quad 0x4052eb851eb851ec ## double 75.680000000000007
.quad 0x4052d33333333333 ## double 75.299999999999997
.quad 0x4052b147ae147ae1 ## double 74.769999999999996
.quad 0x4052d9999999999a ## double 75.400000000000006
.quad 0x40533e147ae147ae ## double 76.969999999999999
.quad 0x40535f5c28f5c28f ## double 77.489999999999994
.quad 0x4053400000000000 ## double 77
.quad 0x40533eb851eb851f ## double 76.980000000000003
.quad 0x40536eb851eb851f ## double 77.730000000000003
.quad 0x4053500000000000 ## double 77.25
.quad 0x4053200000000000 ## double 76.5
.quad 0x4052db851eb851ec ## double 75.430000000000007
.quad 0x4052fb851eb851ec ## double 75.930000000000007
.quad 0x4052c33333333333 ## double 75.049999999999997
.quad 0x4052de147ae147ae ## double 75.469999999999999
.quad 0x4052d9999999999a ## double 75.400000000000006
.quad 0x405305c28f5c28f6 ## double 76.090000000000003
.quad 0x4052f9999999999a ## double 75.900000000000006
.quad 0x4053466666666666 ## double 77.099999999999994
.quad 0x405358f5c28f5c29 ## double 77.39
.quad 0x4053600000000000 ## double 77.5
.quad 0x40535a3d70a3d70a ## double 77.409999999999997
.quad 0x40534f5c28f5c28f ## double 77.239999999999994
.quad 0x40535a3d70a3d70a ## double 77.409999999999997
.quad 0x405301eb851eb852 ## double 76.030000000000001
.quad 0x405320a3d70a3d71 ## double 76.510000000000005
.quad 0x40533ccccccccccd ## double 76.950000000000002
.quad 0x405351eb851eb852 ## double 77.280000000000001
.quad 0x405368f5c28f5c29 ## double 77.64
.quad 0x4053347ae147ae14 ## double 76.819999999999993
.quad 0x40529b851eb851ec ## double 74.430000000000007
.quad 0x40529f5c28f5c28f ## double 74.489999999999994
.quad 0x4052770a3d70a3d7 ## double 73.859999999999999
.quad 0x4052733333333333 ## double 73.799999999999997
.quad 0x40526c28f5c28f5c ## double 73.689999999999998
.quad 0x4052b0a3d70a3d71 ## double 74.760000000000005
.quad 0x4052dd70a3d70a3d ## double 75.459999999999993
.quad 0x4052fae147ae147b ## double 75.920000000000001
.quad 0x4053870a3d70a3d7 ## double 78.109999999999999
.quad 0x40534ccccccccccd ## double 77.200000000000002
.quad 0x40533d70a3d70a3d ## double 76.959999999999993
.quad 0x4053528f5c28f5c3 ## double 77.290000000000006
.quad 0x4053451eb851eb85 ## double 77.079999999999998
.quad 0x4053470a3d70a3d7 ## double 77.109999999999999
.quad 0x40534b851eb851ec ## double 77.180000000000007
.quad 0x40533eb851eb851f ## double 76.980000000000003
.quad 0x4052ee147ae147ae ## double 75.719999999999999
.quad 0x4052accccccccccd ## double 74.700000000000002
.quad 0x4052866666666666 ## double 74.099999999999994
.quad 0x4052f7ae147ae148 ## double 75.870000000000005
.quad 0x40534ccccccccccd ## double 77.200000000000002
.quad 0x4053700000000000 ## double 77.75
.quad 0x4053ea3d70a3d70a ## double 79.659999999999997
.quad 0x40555a3d70a3d70a ## double 85.409999999999997
.quad 0x40557e147ae147ae ## double 85.969999999999999
.quad 0x40558f5c28f5c28f ## double 86.239999999999994
.quad 0x4055e3d70a3d70a4 ## double 87.560000000000002
.quad 0x40561d70a3d70a3d ## double 88.459999999999993
.quad 0x40564ccccccccccd ## double 89.200000000000002
.quad 0x40565851eb851eb8 ## double 89.379999999999995
.quad 0x4056951eb851eb85 ## double 90.329999999999998
.quad 0x4056a7ae147ae148 ## double 90.620000000000005
.quad 0x4056f0a3d70a3d71 ## double 91.760000000000005
.quad 0x4056da3d70a3d70a ## double 91.409999999999996
.quad 0x4056b851eb851eb8 ## double 90.879999999999995
.quad 0x4056c47ae147ae14 ## double 91.069999999999993
.quad 0x4056e851eb851eb8 ## double 91.629999999999995
.quad 0x4056e33333333333 ## double 91.549999999999997
.quad 0x4056cae147ae147b ## double 91.170000000000001
.quad 0x405695c28f5c28f6 ## double 90.340000000000003
.quad 0x405691eb851eb852 ## double 90.280000000000001
.quad 0x405679999999999a ## double 89.900000000000006
.quad 0x4056ab851eb851ec ## double 90.680000000000007
.quad 0x4056d9999999999a ## double 91.400000000000005
.quad 0x40571147ae147ae1 ## double 92.269999999999996
.quad 0x4057028f5c28f5c3 ## double 92.040000000000006
.quad 0x40571a3d70a3d70a ## double 92.409999999999996
.quad 0x4057333333333333 ## double 92.799999999999997
.quad 0x4057400000000000 ## double 93
.quad 0x405723d70a3d70a4 ## double 92.560000000000002
.quad 0x405720a3d70a3d71 ## double 92.510000000000005
.quad 0x40574b851eb851ec ## double 93.180000000000007
.quad 0x40574d70a3d70a3d ## double 93.209999999999993
.quad 0x40576eb851eb851f ## double 93.730000000000003
.quad 0x40575b851eb851ec ## double 93.430000000000007
.quad 0x405730a3d70a3d71 ## double 92.760000000000005
.quad 0x4057333333333333 ## double 92.799999999999997
.quad 0x405745c28f5c28f6 ## double 93.090000000000003
.quad 0x40571c28f5c28f5c ## double 92.439999999999998
.quad 0x4057600000000000 ## double 93.5
.quad 0x4057900000000000 ## double 94.25
.quad 0x4057b0a3d70a3d71 ## double 94.760000000000005
.quad 0x4057be147ae147ae ## double 94.969999999999998
.quad 0x4057aae147ae147b ## double 94.670000000000001
.quad 0x40578147ae147ae1 ## double 94.019999999999996
.quad 0x40577e147ae147ae ## double 93.969999999999998
.quad 0x4057466666666666 ## double 93.099999999999994
.quad 0x4057b51eb851eb85 ## double 94.829999999999998
.quad 0x4057a8f5c28f5c29 ## double 94.64
.quad 0x4057b9999999999a ## double 94.900000000000005
.quad 0x4057af5c28f5c28f ## double 94.739999999999994
.quad 0x40577c28f5c28f5c ## double 93.939999999999998
.quad 0x4057966666666666 ## double 94.349999999999994
.quad 0x4057800000000000 ## double 94
.quad 0x40577ccccccccccd ## double 93.950000000000002
.quad 0x405765c28f5c28f6 ## double 93.590000000000003
.quad 0x40570e147ae147ae ## double 92.219999999999998
.quad 0x405737ae147ae148 ## double 92.870000000000005
.quad 0x405725c28f5c28f6 ## double 92.590000000000003
.quad 0x4057366666666666 ## double 92.849999999999994
.quad 0x4057533333333333 ## double 93.299999999999997
.quad 0x405775c28f5c28f6 ## double 93.840000000000003
.quad 0x4057c9999999999a ## double 95.150000000000005
.quad 0x4057d5c28f5c28f6 ## double 95.340000000000003
.quad 0x4057900000000000 ## double 94.25
.quad 0x40580ccccccccccd ## double 96.200000000000002
.quad 0x4057d1eb851eb852 ## double 95.280000000000001
.quad 0x4057f28f5c28f5c3 ## double 95.790000000000006
.quad 0x405805c28f5c28f6 ## double 96.090000000000003
.quad 0x4058333333333333 ## double 96.799999999999997
.quad 0x40583eb851eb851f ## double 96.980000000000003
.quad 0x4058751eb851eb85 ## double 97.829999999999998
.quad 0x40589ae147ae147b ## double 98.420000000000001
.quad 0x4058c66666666666 ## double 99.099999999999994
.quad 0x4058ba3d70a3d70a ## double 98.909999999999996
.quad 0x4058c00000000000 ## double 99
.quad 0x40589e147ae147ae ## double 98.469999999999998
.quad 0x4058a33333333333 ## double 98.549999999999997
.quad 0x40587e147ae147ae ## double 97.969999999999998
.quad 0x4058800000000000 ## double 98
.quad 0x40587eb851eb851f ## double 97.980000000000003
.quad 0x405849999999999a ## double 97.150000000000005
.quad 0x4058647ae147ae14 ## double 97.569999999999993
.quad 0x4058800000000000 ## double 98
.quad 0x405889999999999a ## double 98.150000000000005
.quad 0x40586ccccccccccd ## double 97.700000000000002
.quad 0x40586ccccccccccd ## double 97.700000000000002
.quad 0x40584ccccccccccd ## double 97.200000000000002
.quad 0x40587f5c28f5c28f ## double 97.989999999999994
.quad 0x4058666666666666 ## double 97.599999999999994
.quad 0x4058566666666666 ## double 97.349999999999994
.quad 0x4058900000000000 ## double 98.25
.quad 0x405879999999999a ## double 97.900000000000005
.quad 0x40586851eb851eb8 ## double 97.629999999999995
.quad 0x405831eb851eb852 ## double 96.780000000000001
.quad 0x4058047ae147ae14 ## double 96.069999999999993
.quad 0x4057e9999999999a ## double 95.650000000000005
.quad 0x40581851eb851eb8 ## double 96.379999999999995
.quad 0x4057d851eb851eb8 ## double 95.379999999999995
.quad 0x4057f28f5c28f5c3 ## double 95.790000000000006
.quad 0x4057d7ae147ae148 ## double 95.370000000000005
.quad 0x4057d9999999999a ## double 95.400000000000005
.quad 0x4057ca3d70a3d70a ## double 95.159999999999996
.quad 0x4057eccccccccccd ## double 95.700000000000002
.quad 0x40582851eb851eb8 ## double 96.629999999999995
.quad 0x4057e00000000000 ## double 95.5
.quad 0x4058000000000000 ## double 96
.quad 0x4057e00000000000 ## double 95.5
.quad 0x4057ce147ae147ae ## double 95.219999999999998
.quad 0x4057933333333333 ## double 94.299999999999997
.quad 0x40577ccccccccccd ## double 93.950000000000002
.quad 0x40576ccccccccccd ## double 93.700000000000002
.quad 0x40576147ae147ae1 ## double 93.519999999999996
.quad 0x40572ccccccccccd ## double 92.700000000000002
.quad 0x4056f9999999999a ## double 91.900000000000005
.quad 0x4056ce147ae147ae ## double 91.219999999999999
.quad 0x4056a66666666666 ## double 90.599999999999994
.quad 0x405679999999999a ## double 89.900000000000006
.quad 0x40568f5c28f5c28f ## double 90.239999999999994
.quad 0x40569147ae147ae1 ## double 90.269999999999996
.quad 0x4056647ae147ae14 ## double 89.569999999999993
.quad 0x405639999999999a ## double 88.900000000000006
.quad 0x40561ccccccccccd ## double 88.450000000000002
.quad 0x405630a3d70a3d71 ## double 88.760000000000005
.quad 0x40564c28f5c28f5c ## double 89.189999999999998
.quad 0x40566eb851eb851f ## double 89.730000000000003
.quad 0x405589999999999a ## double 86.150000000000006
.quad 0x4055500000000000 ## double 85.25
.quad 0x40553eb851eb851f ## double 84.980000000000003
.quad 0x40559eb851eb851f ## double 86.480000000000003
.quad 0x40558ccccccccccd ## double 86.200000000000002
.quad 0x4055cccccccccccd ## double 87.200000000000002
.quad 0x4055fa3d70a3d70a ## double 87.909999999999997
.quad 0x4056066666666666 ## double 88.099999999999994
.quad 0x4056066666666666 ## double 88.099999999999994
.quad 0x405601eb851eb852 ## double 88.030000000000001
.quad 0x4056066666666666 ## double 88.099999999999994
.quad 0x4055beb851eb851f ## double 86.980000000000003
.quad 0x40557eb851eb851f ## double 85.980000000000003
.quad 0x40553eb851eb851f ## double 84.980000000000003
.quad 0x405529999999999a ## double 84.650000000000006
.quad 0x40551c28f5c28f5c ## double 84.439999999999998
.quad 0x40552f5c28f5c28f ## double 84.739999999999994
.quad 0x40551147ae147ae1 ## double 84.269999999999996
.quad 0x40555c28f5c28f5c ## double 85.439999999999998
.quad 0x4055870a3d70a3d7 ## double 86.109999999999999
.quad 0x40559b851eb851ec ## double 86.430000000000007
.quad 0x4055a00000000000 ## double 86.5
.quad 0x4055bd70a3d70a3d ## double 86.959999999999993
.quad 0x4055a00000000000 ## double 86.5
.quad 0x4055b851eb851eb8 ## double 86.879999999999995
.quad 0x4055d1eb851eb852 ## double 87.280000000000001
.quad 0x4055c00000000000 ## double 87
.quad 0x4055b28f5c28f5c3 ## double 86.790000000000006
.quad 0x4055a0a3d70a3d71 ## double 86.510000000000005
.quad 0x40555c28f5c28f5c ## double 85.439999999999998
.quad 0x40552c28f5c28f5c ## double 84.689999999999998
.quad 0x405531eb851eb852 ## double 84.780000000000001
.quad 0x405545c28f5c28f6 ## double 85.090000000000003
.quad 0x40552c28f5c28f5c ## double 84.689999999999998
.quad 0x40553f5c28f5c28f ## double 84.989999999999994
.quad 0x40553ccccccccccd ## double 84.950000000000002
.quad 0x4055428f5c28f5c3 ## double 85.040000000000006
.quad 0x40555147ae147ae1 ## double 85.269999999999996
.quad 0x405549999999999a ## double 85.150000000000006
.quad 0x40555ccccccccccd ## double 85.450000000000002
.quad 0x4055500000000000 ## double 85.25
.quad 0x4055566666666666 ## double 85.349999999999994
.quad 0x40554851eb851eb8 ## double 85.129999999999995
.quad 0x4055228f5c28f5c3 ## double 84.540000000000006
.quad 0x405523d70a3d70a4 ## double 84.560000000000002
.quad 0x4054fc28f5c28f5c ## double 83.939999999999998
.quad 0x4054c33333333333 ## double 83.049999999999997
.quad 0x4054feb851eb851f ## double 83.980000000000003
.quad 0x40553f5c28f5c28f ## double 84.989999999999994
.quad 0x4054fd70a3d70a3d ## double 83.959999999999993
.quad 0x405530a3d70a3d71 ## double 84.760000000000005
.quad 0x40559ae147ae147b ## double 86.420000000000001
.quad 0x4055a9999999999a ## double 86.650000000000006
.quad 0x4055b33333333333 ## double 86.799999999999997
.quad 0x4055d8f5c28f5c29 ## double 87.39
.quad 0x4055d9999999999a ## double 87.400000000000006
.quad 0x4055ce147ae147ae ## double 87.219999999999999
.quad 0x405591eb851eb852 ## double 86.280000000000001
.quad 0x4055933333333333 ## double 86.299999999999997
.quad 0x4055651eb851eb85 ## double 85.579999999999998
.quad 0x405583d70a3d70a4 ## double 86.060000000000002
.quad 0x405599999999999a ## double 86.400000000000006
.quad 0x4055c70a3d70a3d7 ## double 87.109999999999999
.quad 0x40559f5c28f5c28f ## double 86.489999999999994
.quad 0x4055766666666666 ## double 85.849999999999994
.quad 0x40559eb851eb851f ## double 86.480000000000003
.quad 0x40552851eb851eb8 ## double 84.629999999999995
.quad 0x40554f5c28f5c28f ## double 85.239999999999994
.quad 0x405585c28f5c28f6 ## double 86.090000000000003
.quad 0x4055500000000000 ## double 85.25
.quad 0x4055200000000000 ## double 84.5
.quad 0x405540a3d70a3d71 ## double 85.010000000000005
.quad 0x40557c28f5c28f5c ## double 85.939999999999998
.quad 0x4055aeb851eb851f ## double 86.730000000000003
.quad 0x4055e33333333333 ## double 87.549999999999997
.quad 0x40561c28f5c28f5c ## double 88.439999999999998
.quad 0x4056200000000000 ## double 88.5
.quad 0x40561f5c28f5c28f ## double 88.489999999999994
.quad 0x405679999999999a ## double 89.900000000000006
.quad 0x40568eb851eb851f ## double 90.230000000000003
.quad 0x4056bae147ae147b ## double 90.920000000000001
.quad 0x4056b5c28f5c28f6 ## double 90.840000000000003
.quad 0x40568f5c28f5c28f ## double 90.239999999999994
.quad 0x40569b851eb851ec ## double 90.430000000000007
.quad 0x4056b33333333333 ## double 90.799999999999997
.quad 0x4056a3d70a3d70a4 ## double 90.560000000000002
.quad 0x4056bb851eb851ec ## double 90.930000000000007
.quad 0x4056cd70a3d70a3d ## double 91.209999999999993
.quad 0x4056a51eb851eb85 ## double 90.579999999999998
.quad 0x4056b00000000000 ## double 90.75
.quad 0x4056a33333333333 ## double 90.549999999999997
.quad 0x4056a00000000000 ## double 90.5
.quad 0x40563f5c28f5c28f ## double 88.989999999999994
.quad 0x40561f5c28f5c28f ## double 88.489999999999994
.quad 0x4056066666666666 ## double 88.099999999999994
.quad 0x405628f5c28f5c29 ## double 88.64
.quad 0x40561eb851eb851f ## double 88.480000000000003
.quad 0x4056366666666666 ## double 88.849999999999994
.quad 0x4056451eb851eb85 ## double 89.079999999999998
.quad 0x4056366666666666 ## double 88.849999999999994
.quad 0x40563ae147ae147b ## double 88.920000000000001
.quad 0x4055f5c28f5c28f6 ## double 87.840000000000003
.quad 0x40560b851eb851ec ## double 88.180000000000007
.quad 0x4055fccccccccccd ## double 87.950000000000002
.quad 0x40563851eb851eb8 ## double 88.879999999999995
.quad 0x40559f5c28f5c28f ## double 86.489999999999994
.quad 0x405581eb851eb852 ## double 86.030000000000001
.quad 0x4055d66666666666 ## double 87.349999999999994
.quad 0x4055da3d70a3d70a ## double 87.409999999999997
.quad 0x4055d00000000000 ## double 87.25
.quad 0x405609999999999a ## double 88.150000000000006
.quad 0x4056066666666666 ## double 88.099999999999994
.quad 0x405659999999999a ## double 89.400000000000006
.quad 0x4056466666666666 ## double 89.099999999999994
.quad 0x4056700000000000 ## double 89.75
.quad 0x40566f5c28f5c28f ## double 89.739999999999994
.quad 0x40562147ae147ae1 ## double 88.519999999999996
.quad 0x40564ccccccccccd ## double 89.200000000000002
.quad 0x405690a3d70a3d71 ## double 90.260000000000005
.quad 0x4056beb851eb851f ## double 90.980000000000003
.quad 0x4056ff5c28f5c28f ## double 91.989999999999994
.quad 0x4056e3d70a3d70a4 ## double 91.560000000000002
.quad 0x4056e70a3d70a3d7 ## double 91.609999999999999
.quad 0x4056e0a3d70a3d71 ## double 91.510000000000005
.quad 0x4056dae147ae147b ## double 91.420000000000001
.quad 0x40571eb851eb851f ## double 92.480000000000003
.quad 0x4057133333333333 ## double 92.299999999999997
.quad 0x4057166666666666 ## double 92.349999999999994
.quad 0x405785c28f5c28f6 ## double 94.090000000000003
.quad 0x4057770a3d70a3d7 ## double 93.859999999999999
.quad 0x4057828f5c28f5c3 ## double 94.040000000000006
.quad 0x4057970a3d70a3d7 ## double 94.359999999999999
.quad 0x405798f5c28f5c29 ## double 94.39
.quad 0x4057651eb851eb85 ## double 93.579999999999998
.quad 0x4057728f5c28f5c3 ## double 93.790000000000006
.quad 0x405797ae147ae148 ## double 94.370000000000005
.quad 0x4057a33333333333 ## double 94.549999999999997
.quad 0x40572b851eb851ec ## double 92.680000000000007
.quad 0x40570f5c28f5c28f ## double 92.239999999999994
.quad 0x40572ae147ae147b ## double 92.670000000000001
.quad 0x4057670a3d70a3d7 ## double 93.609999999999999
.quad 0x4057500000000000 ## double 93.25
.quad 0x40572851eb851eb8 ## double 92.629999999999995
.quad 0x40571f5c28f5c28f ## double 92.489999999999994
.quad 0x40570a3d70a3d70a ## double 92.159999999999996
.quad 0x4056deb851eb851f ## double 91.480000000000003
.quad 0x40573e147ae147ae ## double 92.969999999999998
.quad 0x40574b851eb851ec ## double 93.180000000000007
.quad 0x4057728f5c28f5c3 ## double 93.790000000000006
.quad 0x40572ccccccccccd ## double 92.700000000000002
.quad 0x40572c28f5c28f5c ## double 92.689999999999998
.quad 0x40575851eb851eb8 ## double 93.379999999999995
.quad 0x40573eb851eb851f ## double 92.980000000000003
.quad 0x4057af5c28f5c28f ## double 94.739999999999994
.quad 0x4057d1eb851eb852 ## double 95.280000000000001
.quad 0x40583851eb851eb8 ## double 96.879999999999995
.quad 0x40583eb851eb851f ## double 96.980000000000003
.quad 0x40583ae147ae147b ## double 96.920000000000001
.quad 0x405838f5c28f5c29 ## double 96.89
.quad 0x4058666666666666 ## double 97.599999999999994
.quad 0x4058500000000000 ## double 97.25
.quad 0x40585851eb851eb8 ## double 97.379999999999995
.quad 0x405850a3d70a3d71 ## double 97.260000000000005
.quad 0x405845c28f5c28f6 ## double 97.090000000000003
.quad 0x40585d70a3d70a3d ## double 97.459999999999993
.quad 0x405860a3d70a3d71 ## double 97.510000000000005
.quad 0x4058a66666666666 ## double 98.599999999999994
.quad 0x4058ceb851eb851f ## double 99.230000000000003
.quad 0x4058f147ae147ae1 ## double 99.769999999999996
.quad 0x4059000000000000 ## double 100
.quad 0x405905c28f5c28f6 ## double 100.09
.quad 0x4059133333333333 ## double 100.3
.quad 0x405913d70a3d70a4 ## double 100.31
.quad 0x4058fe147ae147ae ## double 99.969999999999998
.quad 0x4058dc28f5c28f5c ## double 99.439999999999998
.quad 0x4058cf5c28f5c28f ## double 99.239999999999994
.quad 0x405905c28f5c28f6 ## double 100.09
.quad 0x40591b851eb851ec ## double 100.43000000000001
.quad 0x4059000000000000 ## double 100
.quad 0x4058fc28f5c28f5c ## double 99.939999999999998
.quad 0x4058d51eb851eb85 ## double 99.329999999999998
.quad 0x4058a66666666666 ## double 98.599999999999994
.quad 0x4058dae147ae147b ## double 99.420000000000001
.quad 0x4058eae147ae147b ## double 99.670000000000001
.quad 0x4058f66666666666 ## double 99.849999999999994
.quad 0x40588d70a3d70a3d ## double 98.209999999999993
.quad 0x40588a3d70a3d70a ## double 98.159999999999996
.quad 0x4058828f5c28f5c3 ## double 98.040000000000006
.quad 0x40585c28f5c28f5c ## double 97.439999999999998
.quad 0x4057d66666666666 ## double 95.349999999999994
.quad 0x4057e9999999999a ## double 95.650000000000005
.quad 0x40569d70a3d70a3d ## double 90.459999999999993
.quad 0x4056e0a3d70a3d71 ## double 91.510000000000005
.quad 0x405708f5c28f5c29 ## double 92.14
.quad 0x4057166666666666 ## double 92.349999999999994
.quad 0x40574d70a3d70a3d ## double 93.209999999999993
.quad 0x40575851eb851eb8 ## double 93.379999999999995
.quad 0x40574c28f5c28f5c ## double 93.189999999999998
.quad 0x405745c28f5c28f6 ## double 93.090000000000003
.quad 0x4057433333333333 ## double 93.049999999999997
.quad 0x4057366666666666 ## double 92.849999999999994
.quad 0x4057600000000000 ## double 93.5
.quad 0x40576eb851eb851f ## double 93.730000000000003
.quad 0x4057400000000000 ## double 93
.quad 0x4057333333333333 ## double 92.799999999999997
.quad 0x40575c28f5c28f5c ## double 93.439999999999998
.quad 0x4057600000000000 ## double 93.5
.quad 0x4057500000000000 ## double 93.25
.quad 0x40575851eb851eb8 ## double 93.379999999999995
.quad 0x405777ae147ae148 ## double 93.870000000000005
.quad 0x405787ae147ae148 ## double 94.120000000000005
.quad 0x40577e147ae147ae ## double 93.969999999999998
.quad 0x40574a3d70a3d70a ## double 93.159999999999996
.quad 0x40573e147ae147ae ## double 92.969999999999998
.quad 0x40572f5c28f5c28f ## double 92.739999999999994
.quad 0x40570a3d70a3d70a ## double 92.159999999999996
.quad 0x4056de147ae147ae ## double 91.469999999999998
.quad 0x4056d33333333333 ## double 91.299999999999997
.quad 0x4056dc28f5c28f5c ## double 91.439999999999998
.quad 0x4056dc28f5c28f5c ## double 91.439999999999998
.quad 0x4056deb851eb851f ## double 91.480000000000003
.quad 0x4056d70a3d70a3d7 ## double 91.359999999999999
.quad 0x4056ab851eb851ec ## double 90.680000000000007
.quad 0x405685c28f5c28f6 ## double 90.090000000000003
.quad 0x4056766666666666 ## double 89.849999999999994
.quad 0x40566f5c28f5c28f ## double 89.739999999999994
.quad 0x4056366666666666 ## double 88.849999999999994
.quad 0x4056728f5c28f5c3 ## double 89.790000000000006
.quad 0x4056751eb851eb85 ## double 89.829999999999998
.quad 0x40568b851eb851ec ## double 90.180000000000007
.quad 0x4056833333333333 ## double 90.049999999999997
.quad 0x4056dae147ae147b ## double 91.420000000000001
.quad 0x4056deb851eb851f ## double 91.480000000000003
.quad 0x4056ca3d70a3d70a ## double 91.159999999999997
.quad 0x405681eb851eb852 ## double 90.030000000000001
.quad 0x40569e147ae147ae ## double 90.469999999999999
.quad 0x4056666666666666 ## double 89.599999999999994
.quad 0x405665c28f5c28f6 ## double 89.590000000000003
.quad 0x40563851eb851eb8 ## double 88.879999999999995
.quad 0x4056733333333333 ## double 89.799999999999997
.quad 0x4056b5c28f5c28f6 ## double 90.840000000000003
.quad 0x4056ad70a3d70a3d ## double 90.709999999999993
.quad 0x40568ae147ae147b ## double 90.170000000000001
.quad 0x40567c28f5c28f5c ## double 89.939999999999998
.quad 0x405679999999999a ## double 89.900000000000006
.quad 0x4056466666666666 ## double 89.099999999999994
.quad 0x405628f5c28f5c29 ## double 88.64
.quad 0x405619999999999a ## double 88.400000000000006
.quad 0x405640a3d70a3d71 ## double 89.010000000000005
.quad 0x405655c28f5c28f6 ## double 89.340000000000003
.quad 0x405659999999999a ## double 89.400000000000006
.quad 0x40566b851eb851ec ## double 89.680000000000007
.quad 0x40566ccccccccccd ## double 89.700000000000002
.quad 0x4057a28f5c28f5c3 ## double 94.540000000000006
.quad 0x40573ccccccccccd ## double 92.950000000000002
.quad 0x4057628f5c28f5c3 ## double 93.540000000000006
.quad 0x40574b851eb851ec ## double 93.180000000000007
.quad 0x4057666666666666 ## double 93.599999999999994
.quad 0x405737ae147ae148 ## double 92.870000000000005
.quad 0x4056eccccccccccd ## double 91.700000000000002
.quad 0x4056f0a3d70a3d71 ## double 91.760000000000005
.quad 0x4056fccccccccccd ## double 91.950000000000002
.quad 0x40569ccccccccccd ## double 90.450000000000002
.quad 0x4056a66666666666 ## double 90.599999999999994
.quad 0x4056500000000000 ## double 89.25
.quad 0x40568b851eb851ec ## double 90.180000000000007
.quad 0x405688f5c28f5c29 ## double 90.14
.quad 0x4056be147ae147ae ## double 90.969999999999999
.quad 0x4056f47ae147ae14 ## double 91.819999999999993
.quad 0x405705c28f5c28f6 ## double 92.090000000000003
.quad 0x40570f5c28f5c28f ## double 92.239999999999994
.quad 0x40575e147ae147ae ## double 93.469999999999998
.quad 0x40571b851eb851ec ## double 92.430000000000007
.quad 0x4056fc28f5c28f5c ## double 91.939999999999998
.quad 0x40569a3d70a3d70a ## double 90.409999999999997
.quad 0x40567d70a3d70a3d ## double 89.959999999999993
.quad 0x405640a3d70a3d71 ## double 89.010000000000005
.quad 0x40561ae147ae147b ## double 88.420000000000001
.quad 0x4056400000000000 ## double 89
.quad 0x40567e147ae147ae ## double 89.969999999999999
.quad 0x40566a3d70a3d70a ## double 89.659999999999997
.quad 0x405610a3d70a3d71 ## double 88.260000000000005
.quad 0x4056128f5c28f5c3 ## double 88.290000000000006
.quad 0x4055d33333333333 ## double 87.299999999999997
.quad 0x40557d70a3d70a3d ## double 85.959999999999993
.quad 0x4054870a3d70a3d7 ## double 82.109999999999999
.quad 0x405495c28f5c28f6 ## double 82.340000000000003
.quad 0x4054a1eb851eb852 ## double 82.530000000000001
.quad 0x4054a28f5c28f5c3 ## double 82.540000000000006
.quad 0x4054c1eb851eb852 ## double 83.030000000000001
.quad 0x40552e147ae147ae ## double 84.719999999999999
.quad 0x4054f66666666666 ## double 83.849999999999994
.quad 0x4054d3d70a3d70a4 ## double 83.310000000000002
.quad 0x4054f3d70a3d70a4 ## double 83.810000000000002
.quad 0x4054eb851eb851ec ## double 83.680000000000007
.quad 0x40547eb851eb851f ## double 81.980000000000003
.quad 0x4054700000000000 ## double 81.75
.quad 0x40548c28f5c28f5c ## double 82.189999999999998
.quad 0x4054628f5c28f5c3 ## double 81.540000000000006
.quad 0x4054600000000000 ## double 81.5
.quad 0x40545147ae147ae1 ## double 81.269999999999996
.quad 0x4054347ae147ae14 ## double 80.819999999999993
.quad 0x40542b851eb851ec ## double 80.680000000000007
.quad 0x4054551eb851eb85 ## double 81.329999999999998
.quad 0x4054570a3d70a3d7 ## double 81.359999999999999
.quad 0x40545147ae147ae1 ## double 81.269999999999996
.quad 0x4054a28f5c28f5c3 ## double 82.540000000000006
.quad 0x40549ccccccccccd ## double 82.450000000000002
.quad 0x4054b851eb851eb8 ## double 82.879999999999995
.quad 0x4054eccccccccccd ## double 83.700000000000002
.quad 0x4054ef5c28f5c28f ## double 83.739999999999994
.quad 0x4054bf5c28f5c28f ## double 82.989999999999994
.quad 0x4054b147ae147ae1 ## double 82.769999999999996
.quad 0x4054d0a3d70a3d71 ## double 83.260000000000005
.quad 0x4054db851eb851ec ## double 83.430000000000007
.quad 0x40550147ae147ae1 ## double 84.019999999999996
.quad 0x4054fc28f5c28f5c ## double 83.939999999999998
.quad 0x4055c1eb851eb852 ## double 87.030000000000001
.quad 0x4055c00000000000 ## double 87
.quad 0x4055a51eb851eb85 ## double 86.579999999999998
.quad 0x405551eb851eb852 ## double 85.280000000000001
.quad 0x40554e147ae147ae ## double 85.219999999999999
.quad 0x4055966666666666 ## double 86.349999999999994
.quad 0x4055b00000000000 ## double 86.75
.quad 0x40559ccccccccccd ## double 86.450000000000002
.quad 0x40552c28f5c28f5c ## double 84.689999999999998
.quad 0x405538f5c28f5c29 ## double 84.89
.quad 0x4054ea3d70a3d70a ## double 83.659999999999997
.quad 0x40550ccccccccccd ## double 84.200000000000002
.quad 0x405529999999999a ## double 84.650000000000006
.quad 0x40551851eb851eb8 ## double 84.379999999999995
.quad 0x40550a3d70a3d70a ## double 84.159999999999997
.quad 0x40550b851eb851ec ## double 84.180000000000007
.quad 0x4055328f5c28f5c3 ## double 84.790000000000006
.quad 0x40554f5c28f5c28f ## double 85.239999999999994
.quad 0x4055528f5c28f5c3 ## double 85.290000000000006
.quad 0x40553f5c28f5c28f ## double 84.989999999999994
.quad 0x40553ccccccccccd ## double 84.950000000000002
.quad 0x4055233333333333 ## double 84.549999999999997
.quad 0x4055128f5c28f5c3 ## double 84.290000000000006
.quad 0x40553eb851eb851f ## double 84.980000000000003
.quad 0x405509999999999a ## double 84.150000000000006
.quad 0x4054bae147ae147b ## double 82.920000000000001
.quad 0x4054800000000000 ## double 82
.quad 0x4054ceb851eb851f ## double 83.230000000000003
.quad 0x4055100000000000 ## double 84.25
.quad 0x40553d70a3d70a3d ## double 84.959999999999993
.quad 0x40554f5c28f5c28f ## double 85.239999999999994
.quad 0x4056451eb851eb85 ## double 89.079999999999998
.quad 0x40562b851eb851ec ## double 88.680000000000007
.quad 0x405625c28f5c28f6 ## double 88.590000000000003
.quad 0x40562ccccccccccd ## double 88.700000000000002
.quad 0x4055f9999999999a ## double 87.900000000000006
.quad 0x40557ccccccccccd ## double 85.950000000000002
.quad 0x4055b33333333333 ## double 86.799999999999997
.quad 0x40558b851eb851ec ## double 86.180000000000007
.quad 0x4055cf5c28f5c28f ## double 87.239999999999994
.quad 0x40561e147ae147ae ## double 88.469999999999999
.quad 0x405678f5c28f5c29 ## double 89.89
.quad 0x40567d70a3d70a3d ## double 89.959999999999993
.quad 0x405699999999999a ## double 90.400000000000006
.quad 0x4056866666666666 ## double 90.099999999999994
.quad 0x405650a3d70a3d71 ## double 89.260000000000005
.quad 0x4055eccccccccccd ## double 87.700000000000002
.quad 0x4055b1eb851eb852 ## double 86.780000000000001
.quad 0x4055f3d70a3d70a4 ## double 87.810000000000002
.quad 0x405615c28f5c28f6 ## double 88.340000000000003
.quad 0x4056000000000000 ## double 88
.quad 0x4055e47ae147ae14 ## double 87.569999999999993
.quad 0x40558eb851eb851f ## double 86.230000000000003
.quad 0x40556ae147ae147b ## double 85.670000000000001
.quad 0x40559f5c28f5c28f ## double 86.489999999999994
.quad 0x4055600000000000 ## double 85.5
.quad 0x405551eb851eb852 ## double 85.280000000000001
.quad 0x40556ccccccccccd ## double 85.700000000000002
.quad 0x40557e147ae147ae ## double 85.969999999999999
.quad 0x405583d70a3d70a4 ## double 86.060000000000002
.quad 0x405520a3d70a3d71 ## double 84.510000000000005
.quad 0x405519999999999a ## double 84.400000000000006
.quad 0x405519999999999a ## double 84.400000000000006
.quad 0x4054b66666666666 ## double 82.849999999999994
.quad 0x4054047ae147ae14 ## double 80.069999999999993
.quad 0x4054528f5c28f5c3 ## double 81.290000000000006
.quad 0x4053d00000000000 ## double 79.25
.quad 0x40542f5c28f5c28f ## double 80.739999999999994
.quad 0x40542ccccccccccd ## double 80.700000000000002
.quad 0x4054b9999999999a ## double 82.900000000000006
.quad 0x4054a33333333333 ## double 82.549999999999997
.quad 0x4054deb851eb851f ## double 83.480000000000003
.quad 0x40547e147ae147ae ## double 81.969999999999999
.quad 0x4053e3d70a3d70a4 ## double 79.560000000000002
.quad 0x4053e70a3d70a3d7 ## double 79.609999999999999
.quad 0x40547f5c28f5c28f ## double 81.989999999999994
.quad 0x4054951eb851eb85 ## double 82.329999999999998
.quad 0x4054d00000000000 ## double 83.25
.quad 0x4055000000000000 ## double 84
.quad 0x4054d66666666666 ## double 83.349999999999994
.quad 0x405539999999999a ## double 84.900000000000006
.quad 0x40549f5c28f5c28f ## double 82.489999999999994
.quad 0x4054a00000000000 ## double 82.5
.quad 0x4054e00000000000 ## double 83.5
.quad 0x4054aae147ae147b ## double 82.670000000000001
.quad 0x4053deb851eb851f ## double 79.480000000000003
.quad 0x4053ab851eb851ec ## double 78.680000000000007
.quad 0x4052e851eb851eb8 ## double 75.629999999999995
.quad 0x4053151eb851eb85 ## double 76.329999999999998
.quad 0x40535ccccccccccd ## double 77.450000000000002
.quad 0x40537f5c28f5c28f ## double 77.989999999999994
.quad 0x405371eb851eb852 ## double 77.780000000000001
.quad 0x40536eb851eb851f ## double 77.730000000000003
.quad 0x4053700000000000 ## double 77.75
.quad 0x4053c00000000000 ## double 79
.quad 0x40539e147ae147ae ## double 78.469999999999999
.quad 0x4053a5c28f5c28f6 ## double 78.590000000000003
.quad 0x4053c33333333333 ## double 79.049999999999997
.quad 0x4053d7ae147ae148 ## double 79.370000000000005
.quad 0x4053ff5c28f5c28f ## double 79.989999999999994
.quad 0x4054000000000000 ## double 80
.quad 0x4054033333333333 ## double 80.049999999999997
.quad 0x4053eccccccccccd ## double 79.700000000000002
.quad 0x4053e00000000000 ## double 79.5
.quad 0x40535ccccccccccd ## double 77.450000000000002
.quad 0x4053166666666666 ## double 76.349999999999994
.quad 0x405385c28f5c28f6 ## double 78.090000000000003
.quad 0x4053be147ae147ae ## double 78.969999999999999
.quad 0x40538147ae147ae1 ## double 78.019999999999996
.quad 0x405389999999999a ## double 78.150000000000006
.quad 0x40538ccccccccccd ## double 78.200000000000002
.quad 0x4053b9999999999a ## double 78.900000000000006
.quad 0x40538b851eb851ec ## double 78.180000000000007
.quad 0x4053b7ae147ae148 ## double 78.870000000000005
.quad 0x405395c28f5c28f6 ## double 78.340000000000003
.quad 0x4054533333333333 ## double 81.299999999999997
.quad 0x40542ccccccccccd ## double 80.700000000000002
.quad 0x40541e147ae147ae ## double 80.469999999999999
.quad 0x4053e00000000000 ## double 79.5
.quad 0x4054600000000000 ## double 81.5
.quad 0x4054570a3d70a3d7 ## double 81.359999999999999
.quad 0x40543147ae147ae1 ## double 80.769999999999996
.quad 0x4054766666666666 ## double 81.849999999999994
.quad 0x4054ef5c28f5c28f ## double 83.739999999999994
.quad 0x4055ed70a3d70a3d ## double 87.709999999999993
.quad 0x405629999999999a ## double 88.650000000000006
.quad 0x405625c28f5c28f6 ## double 88.590000000000003
.quad 0x40563ccccccccccd ## double 88.950000000000002
.quad 0x4056028f5c28f5c3 ## double 88.040000000000006
.quad 0x4055c147ae147ae1 ## double 87.019999999999996
.quad 0x40556c28f5c28f5c ## double 85.689999999999998
.quad 0x40558b851eb851ec ## double 86.180000000000007
.quad 0x4055333333333333 ## double 84.799999999999997
.quad 0x405469999999999a ## double 81.650000000000006
.quad 0x4054247ae147ae14 ## double 80.569999999999993
.quad 0x40539d70a3d70a3d ## double 78.459999999999993
.quad 0x40535b851eb851ec ## double 77.430000000000007
.quad 0x4053cb851eb851ec ## double 79.180000000000007
.quad 0x40542ccccccccccd ## double 80.700000000000002
.quad 0x4054400000000000 ## double 81
.quad 0x4054600000000000 ## double 81.5
.quad 0x4054000000000000 ## double 80
.quad 0x405415c28f5c28f6 ## double 80.340000000000003
.quad 0x4053ec28f5c28f5c ## double 79.689999999999998
.quad 0x40547147ae147ae1 ## double 81.769999999999996
.quad 0x4054728f5c28f5c3 ## double 81.790000000000006
.quad 0x4054000000000000 ## double 80
.quad 0x40546c28f5c28f5c ## double 81.689999999999998
.quad 0x40549eb851eb851f ## double 82.480000000000003
.quad 0x4054400000000000 ## double 81
.quad 0x4054551eb851eb85 ## double 81.329999999999998
.quad 0x4054b00000000000 ## double 82.75
.quad 0x40553eb851eb851f ## double 84.980000000000003
.quad 0x4055370a3d70a3d7 ## double 84.859999999999999
.quad 0x4055c1eb851eb852 ## double 87.030000000000001
.quad 0x40565d70a3d70a3d ## double 89.459999999999993
.quad 0x4056033333333333 ## double 88.049999999999997
.quad 0x4056070a3d70a3d7 ## double 88.109999999999999
.quad 0x405589999999999a ## double 86.150000000000006
.quad 0x4055900000000000 ## double 86.25
.quad 0x40554ae147ae147b ## double 85.170000000000001
.quad 0x40553ae147ae147b ## double 84.920000000000001
.quad 0x40546d70a3d70a3d ## double 81.709999999999993
.quad 0x4053dccccccccccd ## double 79.450000000000002
.quad 0x40543f5c28f5c28f ## double 80.989999999999994
.quad 0x405419999999999a ## double 80.400000000000006
.quad 0x40543f5c28f5c28f ## double 80.989999999999994
.quad 0x4054151eb851eb85 ## double 80.329999999999998
.quad 0x4053f9999999999a ## double 79.900000000000006
.quad 0x4053970a3d70a3d7 ## double 78.359999999999999
.quad 0x4053d28f5c28f5c3 ## double 79.290000000000006
.quad 0x4054000000000000 ## double 80
.quad 0x40547851eb851eb8 ## double 81.879999999999995
.quad 0x405480a3d70a3d71 ## double 82.010000000000005
.quad 0x4054f3d70a3d70a4 ## double 83.810000000000002
.quad 0x4054200000000000 ## double 80.5
.quad 0x4053f28f5c28f5c3 ## double 79.790000000000006
.quad 0x4053d9999999999a ## double 79.400000000000006
.quad 0x4053433333333333 ## double 77.049999999999997
.quad 0x4053600000000000 ## double 77.5
.quad 0x4052a7ae147ae148 ## double 74.620000000000005
.quad 0x4052e33333333333 ## double 75.549999999999997
.quad 0x4052be147ae147ae ## double 74.969999999999999
.quad 0x4052eb851eb851ec ## double 75.680000000000007
.quad 0x4052f9999999999a ## double 75.900000000000006
.quad 0x4052900000000000 ## double 74.25
.quad 0x4052400000000000 ## double 73
.quad 0x4050c00000000000 ## double 67
.quad 0x40511eb851eb851f ## double 68.480000000000004
.quad 0x404fe66666666666 ## double 63.799999999999997
.quad 0x404ff5c28f5c28f6 ## double 63.920000000000002
.quad 0x404d3d70a3d70a3d ## double 58.479999999999997
.quad 0x404c59999999999a ## double 56.700000000000003
.quad 0x404d0b851eb851ec ## double 58.090000000000003
.quad 0x404d400000000000 ## double 58.5
.quad 0x404dc147ae147ae1 ## double 59.509999999999998
.quad 0x404e947ae147ae14 ## double 61.159999999999997
.quad 0x404f000000000000 ## double 62
.quad 0x404efd70a3d70a3d ## double 61.979999999999997
.quad 0x404df1eb851eb852 ## double 59.890000000000001
.quad 0x404fbeb851eb851f ## double 63.490000000000002
.quad 0x40503851eb851eb8 ## double 64.879999999999995
.quad 0x404fe28f5c28f5c3 ## double 63.770000000000003
.quad 0x404fa66666666666 ## double 63.299999999999997
.quad 0x404fe00000000000 ## double 63.75
.quad 0x405059999999999a ## double 65.400000000000006
.quad 0x40509eb851eb851f ## double 66.480000000000004
.quad 0x4051b9999999999a ## double 70.900000000000006
.quad 0x40525ccccccccccd ## double 73.450000000000002
.quad 0x40522e147ae147ae ## double 72.719999999999999
.quad 0x40523851eb851eb8 ## double 72.879999999999995
.quad 0x4052833333333333 ## double 74.049999999999997
.quad 0x4053600000000000 ## double 77.5
.quad 0x4052e66666666666 ## double 75.599999999999994
.quad 0x4052beb851eb851f ## double 74.980000000000003
.quad 0x40527f5c28f5c28f ## double 73.989999999999994
.quad 0x4052366666666666 ## double 72.849999999999994
.quad 0x40527f5c28f5c28f ## double 73.989999999999994
.quad 0x4052c00000000000 ## double 75
.quad 0x4053166666666666 ## double 76.349999999999994
.quad 0x4053566666666666 ## double 77.349999999999994
.quad 0x405340a3d70a3d71 ## double 77.010000000000005
.quad 0x40541c28f5c28f5c ## double 80.439999999999998
.quad 0x405438f5c28f5c29 ## double 80.89
.quad 0x40545eb851eb851f ## double 81.480000000000003
.quad 0x40549ccccccccccd ## double 82.450000000000002
.quad 0x4054951eb851eb85 ## double 82.329999999999998
.quad 0x4054800000000000 ## double 82
.quad 0x4054b66666666666 ## double 82.849999999999994
.quad 0x4053feb851eb851f ## double 79.980000000000003
.quad 0x40532d70a3d70a3d ## double 76.709999999999993
.quad 0x4052c147ae147ae1 ## double 75.019999999999996
.quad 0x4052733333333333 ## double 73.799999999999997
.quad 0x405228f5c28f5c29 ## double 72.640000000000001
.quad 0x4052933333333333 ## double 74.299999999999997
.quad 0x4052033333333333 ## double 72.049999999999997
.quad 0x405179999999999a ## double 69.900000000000006
.quad 0x40515ae147ae147b ## double 69.420000000000002
.quad 0x4051166666666666 ## double 68.349999999999994
.quad 0x40510eb851eb851f ## double 68.230000000000004
.quad 0x4051accccccccccd ## double 70.700000000000003
.quad 0x4051e66666666666 ## double 71.599999999999994
.quad 0x40522ccccccccccd ## double 72.700000000000003
.quad 0x4051d9999999999a ## double 71.400000000000006
.quad 0x40515e147ae147ae ## double 69.469999999999999
.quad 0x4051800000000000 ## double 70
.quad 0x4051800000000000 ## double 70
.quad 0x4051800000000000 ## double 70
.quad 0x40520c28f5c28f5c ## double 72.189999999999998
.quad 0x40522ccccccccccd ## double 72.700000000000003
.quad 0x40528ccccccccccd ## double 74.200000000000002
.quad 0x4051e66666666666 ## double 71.599999999999994
.quad 0x4051d8f5c28f5c29 ## double 71.390000000000001
.quad 0x4051c00000000000 ## double 71
.quad 0x4051ef5c28f5c28f ## double 71.739999999999995
.quad 0x4051700000000000 ## double 69.75
.quad 0x4051d00000000000 ## double 71.25
.quad 0x4052100000000000 ## double 72.25
.quad 0x40525eb851eb851f ## double 73.480000000000003
.quad 0x405279999999999a ## double 73.900000000000006
.quad 0x4051ad70a3d70a3d ## double 70.709999999999994
.quad 0x405159999999999a ## double 69.400000000000006
.quad 0x40521a3d70a3d70a ## double 72.409999999999997
.quad 0x405267ae147ae148 ## double 73.620000000000005
.quad 0x405209999999999a ## double 72.150000000000006
.quad 0x4051a5c28f5c28f6 ## double 70.590000000000003
.quad 0x4051dccccccccccd ## double 71.450000000000003
.quad 0x4051df5c28f5c28f ## double 71.489999999999995
.quad 0x4051bf5c28f5c28f ## double 70.989999999999995
.quad 0x405285c28f5c28f6 ## double 74.090000000000003
.quad 0x4052cccccccccccd ## double 75.200000000000002
.quad 0x405339999999999a ## double 76.900000000000006
.quad 0x4053700000000000 ## double 77.75
.quad 0x40533147ae147ae1 ## double 76.769999999999996
.quad 0x405359999999999a ## double 77.400000000000006
.quad 0x4052fccccccccccd ## double 75.950000000000002
.quad 0x4053900000000000 ## double 78.25
.quad 0x4053b33333333333 ## double 78.799999999999997
.quad 0x4053b66666666666 ## double 78.849999999999994
.quad 0x40543ccccccccccd ## double 80.950000000000002
.quad 0x40542d70a3d70a3d ## double 80.709999999999993
.quad 0x4053e9999999999a ## double 79.650000000000006
.quad 0x40543a3d70a3d70a ## double 80.909999999999997
.quad 0x4054833333333333 ## double 82.049999999999997
.quad 0x4054900000000000 ## double 82.25
.quad 0x4054800000000000 ## double 82
.quad 0x4054c70a3d70a3d7 ## double 83.109999999999999
.quad 0x4054f00000000000 ## double 83.75
.quad 0x4055433333333333 ## double 85.049999999999997
.quad 0x405518f5c28f5c29 ## double 84.39
.quad 0x4055400000000000 ## double 85
.quad 0x40555d70a3d70a3d ## double 85.459999999999993
.quad 0x405599999999999a ## double 86.400000000000006
.quad 0x4055800000000000 ## double 86
.quad 0x40559f5c28f5c28f ## double 86.489999999999994
.quad 0x40555eb851eb851f ## double 85.480000000000003
.quad 0x4054928f5c28f5c3 ## double 82.290000000000006
.quad 0x4054200000000000 ## double 80.5
.quad 0x4054c00000000000 ## double 83
.quad 0x4054b33333333333 ## double 82.799999999999997
.quad 0x4053600000000000 ## double 77.5
.quad 0x405477ae147ae148 ## double 81.870000000000005
.quad 0x4054e00000000000 ## double 83.5
.quad 0x4055333333333333 ## double 84.799999999999997
.quad 0x405529999999999a ## double 84.650000000000006
.quad 0x40554eb851eb851f ## double 85.230000000000003
.quad 0x4055466666666666 ## double 85.099999999999994
.quad 0x4055bd70a3d70a3d ## double 86.959999999999993
.quad 0x4055bae147ae147b ## double 86.920000000000001
.quad 0x4056100000000000 ## double 88.25
.quad 0x4056451eb851eb85 ## double 89.079999999999998
.quad 0x40563ccccccccccd ## double 88.950000000000002
.quad 0x405681eb851eb852 ## double 90.030000000000001
.quad 0x4056466666666666 ## double 89.099999999999994
.quad 0x4055cccccccccccd ## double 87.200000000000002
.quad 0x4055b66666666666 ## double 86.849999999999994
.quad 0x4055a66666666666 ## double 86.599999999999994
.quad 0x4055fd70a3d70a3d ## double 87.959999999999993
.quad 0x405620a3d70a3d71 ## double 88.510000000000005
.quad 0x40567ccccccccccd ## double 89.950000000000002
.quad 0x40562147ae147ae1 ## double 88.519999999999996
.quad 0x405625c28f5c28f6 ## double 88.590000000000003
.quad 0x4059500000000000 ## double 101.25
.quad 0x405948f5c28f5c29 ## double 101.14
.quad 0x4059600000000000 ## double 101.5
.quad 0x405980a3d70a3d71 ## double 102.01000000000001
.quad 0x405a000000000000 ## double 104
.quad 0x405a48f5c28f5c29 ## double 105.14
.quad 0x4059f28f5c28f5c3 ## double 103.79000000000001
.quad 0x405a6ccccccccccd ## double 105.7
.quad 0x405aaa3d70a3d70a ## double 106.66
.quad 0x405aaccccccccccd ## double 106.7
.quad 0x405ab1eb851eb852 ## double 106.78
.quad 0x405ab9999999999a ## double 106.90000000000001
.quad 0x405b033333333333 ## double 108.05
.quad 0x405b28f5c28f5c29 ## double 108.64
.quad 0x405adccccccccccd ## double 107.45
.quad 0x405afccccccccccd ## double 107.95
.quad 0x405b29999999999a ## double 108.65000000000001
.quad 0x405b366666666666 ## double 108.84999999999999
.quad 0x405a7e147ae147ae ## double 105.97
.quad 0x405ad5c28f5c28f6 ## double 107.34
.quad 0x405ae00000000000 ## double 107.5
.quad 0x405ac5c28f5c28f6 ## double 107.09
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405aa1eb851eb852 ## double 106.53
.quad 0x4059c66666666666 ## double 103.09999999999999
.quad 0x4058e66666666666 ## double 99.599999999999994
.quad 0x405913d70a3d70a4 ## double 100.31
.quad 0x4058cc28f5c28f5c ## double 99.189999999999998
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4058cccccccccccd ## double 99.200000000000002
.quad 0x4058eccccccccccd ## double 99.700000000000002
.quad 0x4059000000000000 ## double 100
.quad 0x4059733333333333 ## double 101.8
.quad 0x405a333333333333 ## double 104.8
.quad 0x405b49999999999a ## double 109.15000000000001
.quad 0x405b1eb851eb851f ## double 108.48
.quad 0x405af8f5c28f5c29 ## double 107.89
.quad 0x405aec28f5c28f5c ## double 107.69
.quad 0x405a6e147ae147ae ## double 105.72
.quad 0x405ac00000000000 ## double 107
.quad 0x405af47ae147ae14 ## double 107.81999999999999
.quad 0x405b200000000000 ## double 108.5
.quad 0x405b000000000000 ## double 108
.quad 0x405b533333333333 ## double 109.3
.quad 0x405afeb851eb851f ## double 107.98
.quad 0x405a700000000000 ## double 105.75
.quad 0x405b23d70a3d70a4 ## double 108.56
.quad 0x405b8d70a3d70a3d ## double 110.20999999999999
.quad 0x405bac28f5c28f5c ## double 110.69
.quad 0x405b6eb851eb851f ## double 109.73
.quad 0x405bd00000000000 ## double 111.25
.quad 0x405c90a3d70a3d71 ## double 114.26000000000001
.quad 0x405cb9999999999a ## double 114.90000000000001
.quad 0x405e233333333333 ## double 120.55
.quad 0x405db00000000000 ## double 118.75
.quad 0x405deccccccccccd ## double 119.7
.quad 0x405e05c28f5c28f6 ## double 120.09
.quad 0x405e8b851eb851ec ## double 122.18000000000001
.quad 0x405f000000000000 ## double 124
.quad 0x405f98f5c28f5c29 ## double 126.39
.quad 0x405f4ccccccccccd ## double 125.2
.quad 0x405f8c28f5c28f5c ## double 126.19
.quad 0x405f666666666666 ## double 125.59999999999999
.quad 0x405f0e147ae147ae ## double 124.22
.quad 0x405e600000000000 ## double 121.5
.quad 0x405ecf5c28f5c28f ## double 123.23999999999999
.quad 0x405ef851eb851eb8 ## double 123.88
.quad 0x405efb851eb851ec ## double 123.93000000000001
.quad 0x405eeccccccccccd ## double 123.7
.quad 0x405e9eb851eb851f ## double 122.48
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405f000000000000 ## double 124
.quad 0x405f2ccccccccccd ## double 124.7
.quad 0x405ebe147ae147ae ## double 122.97
.quad 0x405e88f5c28f5c29 ## double 122.14
.quad 0x405e5eb851eb851f ## double 121.48
.quad 0x405e933333333333 ## double 122.3
.quad 0x405ecd70a3d70a3d ## double 123.20999999999999
.quad 0x405eaf5c28f5c28f ## double 122.73999999999999
.quad 0x405e400000000000 ## double 121
.quad 0x405e400000000000 ## double 121
.quad 0x405e6ccccccccccd ## double 121.7
.quad 0x405e7eb851eb851f ## double 121.98
.quad 0x405d2e147ae147ae ## double 116.72
.quad 0x405cb66666666666 ## double 114.84999999999999
.quad 0x405cfc28f5c28f5c ## double 115.94
.quad 0x405ca33333333333 ## double 114.55
.quad 0x405c9851eb851eb8 ## double 114.38
.quad 0x405d133333333333 ## double 116.3
.quad 0x405d19999999999a ## double 116.40000000000001
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405cd00000000000 ## double 115.25
.quad 0x405d333333333333 ## double 116.8
.quad 0x405cf33333333333 ## double 115.8
.quad 0x405cc00000000000 ## double 115
.quad 0x405d151eb851eb85 ## double 116.33
.quad 0x405d366666666666 ## double 116.84999999999999
.quad 0x405d400000000000 ## double 117
.quad 0x405cb9999999999a ## double 114.90000000000001
.quad 0x405cb9999999999a ## double 114.90000000000001
.quad 0x405ce3d70a3d70a4 ## double 115.56
.quad 0x405ccccccccccccd ## double 115.2
.quad 0x405cb33333333333 ## double 114.8
.quad 0x405ba5c28f5c28f6 ## double 110.59
.quad 0x405b933333333333 ## double 110.3
.quad 0x405b8ae147ae147b ## double 110.17
.quad 0x405bc7ae147ae148 ## double 111.12
.quad 0x405b700000000000 ## double 109.75
.quad 0x405baccccccccccd ## double 110.7
.quad 0x405c066666666666 ## double 112.09999999999999
.quad 0x405bb66666666666 ## double 110.84999999999999
.quad 0x405b300000000000 ## double 108.75
.quad 0x405aac28f5c28f5c ## double 106.69
.quad 0x405a71eb851eb852 ## double 105.78
.quad 0x4059af5c28f5c28f ## double 102.73999999999999
.quad 0x4059f00000000000 ## double 103.75
.quad 0x405aaccccccccccd ## double 106.7
.quad 0x4059c00000000000 ## double 103
.quad 0x4059b0a3d70a3d71 ## double 102.76000000000001
.quad 0x4059400000000000 ## double 101
.quad 0x4058d9999999999a ## double 99.400000000000005
.quad 0x4058533333333333 ## double 97.299999999999997
.quad 0x4058a00000000000 ## double 98.5
.quad 0x4058c00000000000 ## double 99
.quad 0x40589ccccccccccd ## double 98.450000000000002
.quad 0x4058b851eb851eb8 ## double 98.879999999999995
.quad 0x405867ae147ae148 ## double 97.620000000000005
.quad 0x405779999999999a ## double 93.900000000000005
.quad 0x40575eb851eb851f ## double 93.480000000000003
.quad 0x40572d70a3d70a3d ## double 92.709999999999993
.quad 0x4056e00000000000 ## double 91.5
.quad 0x40579ccccccccccd ## double 94.450000000000002
.quad 0x4058000000000000 ## double 96
.quad 0x4057e28f5c28f5c3 ## double 95.540000000000006
.quad 0x4057600000000000 ## double 93.5
.quad 0x4057f00000000000 ## double 95.75
.quad 0x4058366666666666 ## double 96.849999999999994
.quad 0x4058300000000000 ## double 96.75
.quad 0x4057f70a3d70a3d7 ## double 95.859999999999999
.quad 0x405859999999999a ## double 97.400000000000005
.quad 0x405889999999999a ## double 98.150000000000005
.quad 0x40592b851eb851ec ## double 100.68000000000001
.quad 0x4059b51eb851eb85 ## double 102.83
.quad 0x4059c851eb851eb8 ## double 103.13
.quad 0x4059570a3d70a3d7 ## double 101.36
.quad 0x4059d9999999999a ## double 103.40000000000001
.quad 0x405a79999999999a ## double 105.90000000000001
.quad 0x405acae147ae147b ## double 107.17
.quad 0x405b0ccccccccccd ## double 108.2
.quad 0x405af33333333333 ## double 107.8
.quad 0x405a2ccccccccccd ## double 104.7
.quad 0x405a028f5c28f5c3 ## double 104.04000000000001
.quad 0x405a3e147ae147ae ## double 104.97
.quad 0x405a400000000000 ## double 105
.quad 0x405a5f5c28f5c28f ## double 105.48999999999999
.quad 0x405a8147ae147ae1 ## double 106.02
.quad 0x405aa5c28f5c28f6 ## double 106.59
.quad 0x405ab66666666666 ## double 106.84999999999999
.quad 0x405aa8f5c28f5c29 ## double 106.64
.quad 0x405a3ccccccccccd ## double 104.95
.quad 0x405a228f5c28f5c3 ## double 104.54000000000001
.quad 0x405a866666666666 ## double 106.09999999999999
.quad 0x405ad7ae147ae148 ## double 107.37
.quad 0x405ad7ae147ae148 ## double 107.37
.quad 0x405b85c28f5c28f6 ## double 110.09
.quad 0x405b5e147ae147ae ## double 109.47
.quad 0x405b200000000000 ## double 108.5
.quad 0x405ac00000000000 ## double 107
.quad 0x405a900000000000 ## double 106.25
.quad 0x405a7ccccccccccd ## double 105.95
.quad 0x405a800000000000 ## double 106
.quad 0x405a6ccccccccccd ## double 105.7
.quad 0x405a89999999999a ## double 106.15000000000001
.quad 0x405abccccccccccd ## double 106.95
.quad 0x405a6ccccccccccd ## double 105.7
.quad 0x405a800000000000 ## double 106
.quad 0x405ae66666666666 ## double 107.59999999999999
.quad 0x405b251eb851eb85 ## double 108.58
.quad 0x405b5851eb851eb8 ## double 109.38
.quad 0x405b533333333333 ## double 109.3
.quad 0x405af33333333333 ## double 107.8
.quad 0x405a0851eb851eb8 ## double 104.13
.quad 0x405a59999999999a ## double 105.40000000000001
.quad 0x405abccccccccccd ## double 106.95
.quad 0x405bb9999999999a ## double 110.90000000000001
.quad 0x405c50a3d70a3d71 ## double 113.26000000000001
.quad 0x405c8c28f5c28f5c ## double 114.19
.quad 0x405cd9999999999a ## double 115.40000000000001
.quad 0x405d29999999999a ## double 116.65000000000001
.quad 0x405d3ccccccccccd ## double 116.95
.quad 0x405c8b851eb851ec ## double 114.18000000000001
.quad 0x405c833333333333 ## double 114.05
.quad 0x405cb9999999999a ## double 114.90000000000001
.quad 0x405c9b851eb851ec ## double 114.43000000000001
.quad 0x405caccccccccccd ## double 114.7
.quad 0x405cbf5c28f5c28f ## double 114.98999999999999
.quad 0x405d500000000000 ## double 117.25
.quad 0x405cdc28f5c28f5c ## double 115.44
.quad 0x405cdccccccccccd ## double 115.45
.quad 0x405d051eb851eb85 ## double 116.08
.quad 0x405d9e147ae147ae ## double 118.47
.quad 0x405d733333333333 ## double 117.8
.quad 0x405d88f5c28f5c29 ## double 118.14
.quad 0x405d600000000000 ## double 117.5
.quad 0x405d900000000000 ## double 118.25
.quad 0x405dc00000000000 ## double 119
.quad 0x405d666666666666 ## double 117.59999999999999
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405c666666666666 ## double 113.59999999999999
.quad 0x405c770a3d70a3d7 ## double 113.86
.quad 0x405c866666666666 ## double 114.09999999999999
.quad 0x405d65c28f5c28f6 ## double 117.59
.quad 0x405df9999999999a ## double 119.90000000000001
.quad 0x405de66666666666 ## double 119.59999999999999
.quad 0x405dbccccccccccd ## double 118.95
.quad 0x405deccccccccccd ## double 119.7
.quad 0x405df9999999999a ## double 119.90000000000001
.quad 0x405d6b851eb851ec ## double 117.68000000000001
.quad 0x405d45c28f5c28f6 ## double 117.09
.quad 0x405cf33333333333 ## double 115.8
.quad 0x405c89999999999a ## double 114.15000000000001
.quad 0x405c4b851eb851ec ## double 113.18000000000001
.quad 0x405c89999999999a ## double 114.15000000000001
.quad 0x405db9999999999a ## double 118.90000000000001
.quad 0x405d8b851eb851ec ## double 118.18000000000001
.quad 0x405d700000000000 ## double 117.75
.quad 0x405d500000000000 ## double 117.25
.quad 0x405cf70a3d70a3d7 ## double 115.86
.quad 0x405cc66666666666 ## double 115.09999999999999
.quad 0x405dbccccccccccd ## double 118.95
.quad 0x405da9999999999a ## double 118.65000000000001
.quad 0x405d833333333333 ## double 118.05
.quad 0x405d39999999999a ## double 116.90000000000001
.quad 0x405d2ccccccccccd ## double 116.7
.quad 0x405cb66666666666 ## double 114.84999999999999
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405c833333333333 ## double 114.05
.quad 0x405d19999999999a ## double 116.40000000000001
.quad 0x405cf9999999999a ## double 115.90000000000001
.quad 0x405b800000000000 ## double 110
.quad 0x4058f851eb851eb8 ## double 99.879999999999995
.quad 0x4058f8f5c28f5c29 ## double 99.89
.quad 0x4058600000000000 ## double 97.5
.quad 0x40597c28f5c28f5c ## double 101.94
.quad 0x4058f9999999999a ## double 99.900000000000005
.quad 0x4058af5c28f5c28f ## double 98.739999999999994
.quad 0x4059000000000000 ## double 100
.quad 0x4059000000000000 ## double 100
.quad 0x4057833333333333 ## double 94.049999999999997
.quad 0x4057a9999999999a ## double 94.650000000000005
.quad 0x4058866666666666 ## double 98.099999999999994
.quad 0x405863d70a3d70a4 ## double 97.560000000000002
.quad 0x4058447ae147ae14 ## double 97.069999999999993
.quad 0x40589ccccccccccd ## double 98.450000000000002
.quad 0x4059000000000000 ## double 100
.quad 0x4057ea3d70a3d70a ## double 95.659999999999996
.quad 0x4057bf5c28f5c28f ## double 94.989999999999994
.quad 0x4056c00000000000 ## double 91
.quad 0x4056e66666666666 ## double 91.599999999999994
.quad 0x405701eb851eb852 ## double 92.030000000000001
.quad 0x4057333333333333 ## double 92.799999999999997
.quad 0x405779999999999a ## double 93.900000000000005
.quad 0x405877ae147ae148 ## double 97.870000000000005
.quad 0x4058b9999999999a ## double 98.900000000000005
.quad 0x40589c28f5c28f5c ## double 98.439999999999998
.quad 0x405899999999999a ## double 98.400000000000005
.quad 0x405a40a3d70a3d71 ## double 105.01000000000001
.quad 0x405ad5c28f5c28f6 ## double 107.34
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b19999999999a ## double 108.40000000000001
.quad 0x405a666666666666 ## double 105.59999999999999
.quad 0x405ae9999999999a ## double 107.65000000000001
.quad 0x405a99999999999a ## double 106.40000000000001
.quad 0x4059e28f5c28f5c3 ## double 103.54000000000001
.quad 0x405ab7ae147ae148 ## double 106.87
.quad 0x405a733333333333 ## double 105.8
.quad 0x405ae00000000000 ## double 107.5
.quad 0x405bb9999999999a ## double 110.90000000000001
.quad 0x405be00000000000 ## double 111.5
.quad 0x405ce66666666666 ## double 115.59999999999999
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405da8f5c28f5c29 ## double 118.64
.quad 0x405d0ae147ae147b ## double 116.17
.quad 0x405d5851eb851eb8 ## double 117.38
.quad 0x405cee147ae147ae ## double 115.72
.quad 0x405cbf5c28f5c28f ## double 114.98999999999999
.quad 0x405d8ccccccccccd ## double 118.2
.quad 0x405d666666666666 ## double 117.59999999999999
.quad 0x405cec28f5c28f5c ## double 115.69
.quad 0x405c266666666666 ## double 112.59999999999999
.quad 0x405c7f5c28f5c28f ## double 113.98999999999999
.quad 0x405c95c28f5c28f6 ## double 114.34
.quad 0x405d300000000000 ## double 116.75
.quad 0x405d300000000000 ## double 116.75
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405cd00000000000 ## double 115.25
.quad 0x405bc7ae147ae148 ## double 111.12
.quad 0x405bdc28f5c28f5c ## double 111.44
.quad 0x405b7c28f5c28f5c ## double 109.94
.quad 0x405b7c28f5c28f5c ## double 109.94
.quad 0x405c7c28f5c28f5c ## double 113.94
.quad 0x405b800000000000 ## double 110
.quad 0x4058700000000000 ## double 97.75
.quad 0x4057800000000000 ## double 94
.quad 0x40581c28f5c28f5c ## double 96.439999999999998
.quad 0x4057900000000000 ## double 94.25
.quad 0x4057bc28f5c28f5c ## double 94.939999999999998
.quad 0x4057f00000000000 ## double 95.75
.quad 0x40577c28f5c28f5c ## double 93.939999999999998
.quad 0x4057ac28f5c28f5c ## double 94.689999999999998
.quad 0x4058f00000000000 ## double 99.75
.quad 0x4057c00000000000 ## double 95
.quad 0x4055e00000000000 ## double 87.5
.quad 0x4055bc28f5c28f5c ## double 86.939999999999998
.quad 0x40560c28f5c28f5c ## double 88.189999999999998
.quad 0x405607ae147ae148 ## double 88.120000000000005
.quad 0x405663d70a3d70a4 ## double 89.560000000000002
.quad 0x405657ae147ae148 ## double 89.370000000000005
.quad 0x4055fc28f5c28f5c ## double 87.939999999999998
.quad 0x4056000000000000 ## double 88
.quad 0x40579c28f5c28f5c ## double 94.439999999999998
.quad 0x4056a00000000000 ## double 90.5
.quad 0x4056ac28f5c28f5c ## double 90.689999999999998
.quad 0x40576c28f5c28f5c ## double 93.689999999999998
.quad 0x405827ae147ae148 ## double 96.620000000000005
.quad 0x405807ae147ae148 ## double 96.120000000000005
.quad 0x4058900000000000 ## double 98.25
.quad 0x4058c00000000000 ## double 99
.quad 0x4057fc28f5c28f5c ## double 95.939999999999998
.quad 0x405973d70a3d70a4 ## double 101.81
.quad 0x405a300000000000 ## double 104.75
.quad 0x4058cc28f5c28f5c ## double 99.189999999999998
.quad 0x4058d3d70a3d70a4 ## double 99.310000000000002
.quad 0x4058800000000000 ## double 98
.quad 0x4058fc28f5c28f5c ## double 99.939999999999998
.quad 0x4059300000000000 ## double 100.75
.quad 0x4059800000000000 ## double 102
.quad 0x4059700000000000 ## double 101.75
.quad 0x405933d70a3d70a4 ## double 100.81
.quad 0x405a17ae147ae148 ## double 104.37
.quad 0x4059d00000000000 ## double 103.25
.quad 0x4059a00000000000 ## double 102.5
.quad 0x4058ec28f5c28f5c ## double 99.689999999999998
.quad 0x405903d70a3d70a4 ## double 100.06
.quad 0x4058f7ae147ae148 ## double 99.870000000000005
.quad 0x4058dc28f5c28f5c ## double 99.439999999999998
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x4059000000000000 ## double 100
.quad 0x4059f00000000000 ## double 103.75
.quad 0x4059e00000000000 ## double 103.5
.quad 0x405997ae147ae148 ## double 102.37
.quad 0x4059a7ae147ae148 ## double 102.62
.quad 0x405993d70a3d70a4 ## double 102.31
.quad 0x4058f7ae147ae148 ## double 99.870000000000005
.quad 0x4058c7ae147ae148 ## double 99.120000000000005
.quad 0x4057700000000000 ## double 93.75
.quad 0x40576c28f5c28f5c ## double 93.689999999999998
.quad 0x405737ae147ae148 ## double 92.870000000000005
.quad 0x4056bc28f5c28f5c ## double 90.939999999999998
.quad 0x405797ae147ae148 ## double 94.370000000000005
.quad 0x4057c00000000000 ## double 95
.quad 0x4058100000000000 ## double 96.25
.quad 0x4058a00000000000 ## double 98.5
.quad 0x4058c7ae147ae148 ## double 99.120000000000005
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405c47ae147ae148 ## double 113.12
.quad 0x405b9c28f5c28f5c ## double 110.44
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405c3c28f5c28f5c ## double 112.94
.quad 0x405d47ae147ae148 ## double 117.12
.quad 0x405dd7ae147ae148 ## double 119.37
.quad 0x405d07ae147ae148 ## double 116.12
.quad 0x405ca3d70a3d70a4 ## double 114.56
.quad 0x405cb7ae147ae148 ## double 114.87
.quad 0x405dd7ae147ae148 ## double 119.37
.quad 0x405de7ae147ae148 ## double 119.62
.quad 0x405d3c28f5c28f5c ## double 116.94
.quad 0x405db7ae147ae148 ## double 118.87
.quad 0x405e13d70a3d70a4 ## double 120.31
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405fa7ae147ae148 ## double 126.62
.quad 0x405f13d70a3d70a4 ## double 124.31
.quad 0x405fac28f5c28f5c ## double 126.69
.quad 0x405fa3d70a3d70a4 ## double 126.56
.quad 0x405f900000000000 ## double 126.25
.quad 0x405f800000000000 ## double 126
.quad 0x405fd00000000000 ## double 127.25
.quad 0x406026147ae147ae ## double 129.19
.quad 0x406019eb851eb852 ## double 128.81
.quad 0x405fb7ae147ae148 ## double 126.87
.quad 0x4060080000000000 ## double 128.25
.quad 0x40609e147ae147ae ## double 132.94
.quad 0x4060d00000000000 ## double 134.5
.quad 0x4060dc28f5c28f5c ## double 134.88
.quad 0x4060a9eb851eb852 ## double 133.31
.quad 0x4060de147ae147ae ## double 134.94
.quad 0x4060c6147ae147ae ## double 134.19
.quad 0x4060700000000000 ## double 131.5
.quad 0x4060c80000000000 ## double 134.25
.quad 0x4060a6147ae147ae ## double 133.19
.quad 0x40603c28f5c28f5c ## double 129.88
.quad 0x405f5c28f5c28f5c ## double 125.44
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405e8c28f5c28f5c ## double 122.19
.quad 0x405e8c28f5c28f5c ## double 122.19
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405eb3d70a3d70a4 ## double 122.81
.quad 0x405ebc28f5c28f5c ## double 122.94
.quad 0x405ea7ae147ae148 ## double 122.62
.quad 0x405eec28f5c28f5c ## double 123.69
.quad 0x405e73d70a3d70a4 ## double 121.81
.quad 0x405e100000000000 ## double 120.25
.quad 0x405e33d70a3d70a4 ## double 120.81
.quad 0x405de7ae147ae148 ## double 119.62
.quad 0x405dc00000000000 ## double 119
.quad 0x405d200000000000 ## double 116.5
.quad 0x405d200000000000 ## double 116.5
.quad 0x405cb7ae147ae148 ## double 114.87
.quad 0x405c1c28f5c28f5c ## double 112.44
.quad 0x405c6c28f5c28f5c ## double 113.69
.quad 0x405c200000000000 ## double 112.5
.quad 0x405be00000000000 ## double 111.5
.quad 0x405c17ae147ae148 ## double 112.37
.quad 0x405c57ae147ae148 ## double 113.37
.quad 0x405ce7ae147ae148 ## double 115.62
.quad 0x405d400000000000 ## double 117
.quad 0x405d73d70a3d70a4 ## double 117.81
.quad 0x405b600000000000 ## double 109.5
.quad 0x405a33d70a3d70a4 ## double 104.81
.quad 0x405af7ae147ae148 ## double 107.87
.quad 0x405a37ae147ae148 ## double 104.87
.quad 0x405a7c28f5c28f5c ## double 105.94
.quad 0x405a77ae147ae148 ## double 105.87
.quad 0x405a000000000000 ## double 104
.quad 0x405a3c28f5c28f5c ## double 104.94
.quad 0x405a43d70a3d70a4 ## double 105.06
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x405a67ae147ae148 ## double 105.62
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405c4c28f5c28f5c ## double 113.19
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405ccc28f5c28f5c ## double 115.19
.quad 0x405c33d70a3d70a4 ## double 112.81
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405c67ae147ae148 ## double 113.62
.quad 0x405c93d70a3d70a4 ## double 114.31
.quad 0x405cfc28f5c28f5c ## double 115.94
.quad 0x405e2c28f5c28f5c ## double 120.69
.quad 0x405e17ae147ae148 ## double 120.37
.quad 0x405d6c28f5c28f5c ## double 117.69
.quad 0x405dc00000000000 ## double 119
.quad 0x405e000000000000 ## double 120
.quad 0x405dfc28f5c28f5c ## double 119.94
.quad 0x405e1c28f5c28f5c ## double 120.44
.quad 0x405e300000000000 ## double 120.75
.quad 0x405e93d70a3d70a4 ## double 122.31
.quad 0x405e700000000000 ## double 121.75
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405c8c28f5c28f5c ## double 114.19
.quad 0x405b700000000000 ## double 109.75
.quad 0x405b3c28f5c28f5c ## double 108.94
.quad 0x405bb00000000000 ## double 110.75
.quad 0x405bc00000000000 ## double 111
.quad 0x405acc28f5c28f5c ## double 107.19
.quad 0x405c100000000000 ## double 112.25
.quad 0x405b800000000000 ## double 110
.quad 0x405b800000000000 ## double 110
.quad 0x405b500000000000 ## double 109.25
.quad 0x405ae00000000000 ## double 107.5
.quad 0x405ad3d70a3d70a4 ## double 107.31
.quad 0x405b37ae147ae148 ## double 108.87
.quad 0x405b500000000000 ## double 109.25
.quad 0x405a37ae147ae148 ## double 104.87
.quad 0x405a700000000000 ## double 105.75
.quad 0x405ae00000000000 ## double 107.5
.quad 0x405a6c28f5c28f5c ## double 105.69
.quad 0x405ba7ae147ae148 ## double 110.62
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405b67ae147ae148 ## double 109.62
.quad 0x405b4c28f5c28f5c ## double 109.19
.quad 0x405bc7ae147ae148 ## double 111.12
.quad 0x405c67ae147ae148 ## double 113.62
.quad 0x405c4c28f5c28f5c ## double 113.19
.quad 0x405c2c28f5c28f5c ## double 112.69
.quad 0x405c47ae147ae148 ## double 113.12
.quad 0x405c7c28f5c28f5c ## double 113.94
.quad 0x405c400000000000 ## double 113
.quad 0x405acc28f5c28f5c ## double 107.19
.quad 0x405a500000000000 ## double 105.25
.quad 0x405b700000000000 ## double 109.75
.quad 0x405cc7ae147ae148 ## double 115.12
.quad 0x405c100000000000 ## double 112.25
.quad 0x405c300000000000 ## double 112.75
.quad 0x405c93d70a3d70a4 ## double 114.31
.quad 0x405de7ae147ae148 ## double 119.62
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405f57ae147ae148 ## double 125.37
.quad 0x405f6c28f5c28f5c ## double 125.69
.quad 0x4060000000000000 ## double 128
.quad 0x405f700000000000 ## double 125.75
.quad 0x405fbc28f5c28f5c ## double 126.94
.quad 0x405e97ae147ae148 ## double 122.37
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405ec00000000000 ## double 123
.quad 0x405e9c28f5c28f5c ## double 122.44
.quad 0x405fc00000000000 ## double 127
.quad 0x4060080000000000 ## double 128.25
.quad 0x405e900000000000 ## double 122.25
.quad 0x405cf7ae147ae148 ## double 115.87
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405cac28f5c28f5c ## double 114.69
.quad 0x405c57ae147ae148 ## double 113.37
.quad 0x405bec28f5c28f5c ## double 111.69
.quad 0x405b73d70a3d70a4 ## double 109.81
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b800000000000 ## double 110
.quad 0x405b400000000000 ## double 109
.quad 0x405b400000000000 ## double 109
.quad 0x405b000000000000 ## double 108
.quad 0x405ab7ae147ae148 ## double 106.87
.quad 0x405ac00000000000 ## double 107
.quad 0x405bc00000000000 ## double 111
.quad 0x405b800000000000 ## double 110
.quad 0x405a5c28f5c28f5c ## double 105.44
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a63d70a3d70a4 ## double 105.56
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405bc00000000000 ## double 111
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405c5c28f5c28f5c ## double 113.44
.quad 0x405cfc28f5c28f5c ## double 115.94
.quad 0x405db7ae147ae148 ## double 118.87
.quad 0x405d53d70a3d70a4 ## double 117.31
.quad 0x405d5c28f5c28f5c ## double 117.44
.quad 0x405d17ae147ae148 ## double 116.37
.quad 0x405ddc28f5c28f5c ## double 119.44
.quad 0x405dc7ae147ae148 ## double 119.12
.quad 0x405df00000000000 ## double 119.75
.quad 0x405dc00000000000 ## double 119
.quad 0x405d37ae147ae148 ## double 116.87
.quad 0x405d9c28f5c28f5c ## double 118.44
.quad 0x405d600000000000 ## double 117.5
.quad 0x405cf3d70a3d70a4 ## double 115.81
.quad 0x405c5c28f5c28f5c ## double 113.44
.quad 0x405c33d70a3d70a4 ## double 112.81
.quad 0x405c8c28f5c28f5c ## double 114.19
.quad 0x405d9c28f5c28f5c ## double 118.44
.quad 0x405dfc28f5c28f5c ## double 119.94
.quad 0x405de00000000000 ## double 119.5
.quad 0x405eb7ae147ae148 ## double 122.87
.quad 0x405ec00000000000 ## double 123
.quad 0x405f300000000000 ## double 124.75
.quad 0x405e800000000000 ## double 122
.quad 0x405df00000000000 ## double 119.75
.quad 0x405ed3d70a3d70a4 ## double 123.31
.quad 0x405e400000000000 ## double 121
.quad 0x405e800000000000 ## double 122
.quad 0x405e47ae147ae148 ## double 121.12
.quad 0x405dd7ae147ae148 ## double 119.37
.quad 0x405d7c28f5c28f5c ## double 117.94
.quad 0x405dbc28f5c28f5c ## double 118.94
.quad 0x405df00000000000 ## double 119.75
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405d000000000000 ## double 116
.quad 0x405b600000000000 ## double 109.5
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405bb00000000000 ## double 110.75
.quad 0x405b800000000000 ## double 110
.quad 0x405b9c28f5c28f5c ## double 110.44
.quad 0x405b93d70a3d70a4 ## double 110.31
.quad 0x405b87ae147ae148 ## double 110.12
.quad 0x405b9c28f5c28f5c ## double 110.44
.quad 0x405bc3d70a3d70a4 ## double 111.06
.quad 0x405b6c28f5c28f5c ## double 109.69
.quad 0x405afc28f5c28f5c ## double 107.94
.quad 0x405b8c28f5c28f5c ## double 110.19
.quad 0x405c300000000000 ## double 112.75
.quad 0x405bfc28f5c28f5c ## double 111.94
.quad 0x405e87ae147ae148 ## double 122.12
.quad 0x405e400000000000 ## double 121
.quad 0x405dcc28f5c28f5c ## double 119.19
.quad 0x405d200000000000 ## double 116.5
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405a93d70a3d70a4 ## double 106.31
.quad 0x405a1c28f5c28f5c ## double 104.44
.quad 0x405a200000000000 ## double 104.5
.quad 0x405a3c28f5c28f5c ## double 104.94
.quad 0x405a800000000000 ## double 106
.quad 0x405a400000000000 ## double 105
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405b1c28f5c28f5c ## double 108.44
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x4058900000000000 ## double 98.25
.quad 0x4057b7ae147ae148 ## double 94.870000000000005
.quad 0x4057c00000000000 ## double 95
.quad 0x405817ae147ae148 ## double 96.370000000000005
.quad 0x4058100000000000 ## double 96.25
.quad 0x4058600000000000 ## double 97.5
.quad 0x405843d70a3d70a4 ## double 97.060000000000002
.quad 0x4057e00000000000 ## double 95.5
.quad 0x40577c28f5c28f5c ## double 93.939999999999998
.quad 0x40573c28f5c28f5c ## double 92.939999999999998
.quad 0x40579c28f5c28f5c ## double 94.439999999999998
.quad 0x4057fc28f5c28f5c ## double 95.939999999999998
.quad 0x405833d70a3d70a4 ## double 96.810000000000002
.quad 0x4058b3d70a3d70a4 ## double 98.810000000000002
.quad 0x4058c00000000000 ## double 99
.quad 0x4057d3d70a3d70a4 ## double 95.310000000000002
.quad 0x4057e3d70a3d70a4 ## double 95.560000000000002
.quad 0x4058000000000000 ## double 96
.quad 0x4057c7ae147ae148 ## double 95.120000000000005
.quad 0x4057a7ae147ae148 ## double 94.620000000000005
.quad 0x4057400000000000 ## double 93
.quad 0x405c43d70a3d70a4 ## double 113.06
.quad 0x405b200000000000 ## double 108.5
.quad 0x405b400000000000 ## double 109
.quad 0x405b37ae147ae148 ## double 108.87
.quad 0x405ad00000000000 ## double 107.25
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405cb7ae147ae148 ## double 114.87
.quad 0x405d2c28f5c28f5c ## double 116.69
.quad 0x405cc3d70a3d70a4 ## double 115.06
.quad 0x405dcc28f5c28f5c ## double 119.19
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405e000000000000 ## double 120
.quad 0x405e43d70a3d70a4 ## double 121.06
.quad 0x405e900000000000 ## double 122.25
.quad 0x405eb3d70a3d70a4 ## double 122.81
.quad 0x405f300000000000 ## double 124.75
.quad 0x405fb3d70a3d70a4 ## double 126.81
.quad 0x405f400000000000 ## double 125
.quad 0x405ff3d70a3d70a4 ## double 127.81
.quad 0x405fd00000000000 ## double 127.25
.quad 0x40603428f5c28f5c ## double 129.63
.quad 0x40605c28f5c28f5c ## double 130.88
.quad 0x40606e147ae147ae ## double 131.44
.quad 0x40608c28f5c28f5c ## double 132.38
.quad 0x4060e00000000000 ## double 135
.quad 0x4060b1eb851eb852 ## double 133.56
.quad 0x4060f1eb851eb852 ## double 135.56
.quad 0x406136147ae147ae ## double 137.69
.quad 0x4061100000000000 ## double 136.5
.quad 0x4060be147ae147ae ## double 133.94
.quad 0x406096147ae147ae ## double 132.69
.quad 0x4060380000000000 ## double 129.75
.quad 0x405fb00000000000 ## double 126.75
.quad 0x4060100000000000 ## double 128.5
.quad 0x405f800000000000 ## double 126
.quad 0x405f67ae147ae148 ## double 125.62
.quad 0x405f4c28f5c28f5c ## double 125.19
.quad 0x405f77ae147ae148 ## double 125.87
.quad 0x405f03d70a3d70a4 ## double 124.06
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405f17ae147ae148 ## double 124.37
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x40603428f5c28f5c ## double 129.63
.quad 0x406011eb851eb852 ## double 128.56
.quad 0x405ff7ae147ae148 ## double 127.87
.quad 0x405f200000000000 ## double 124.5
.quad 0x405ef3d70a3d70a4 ## double 123.81
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405f1c28f5c28f5c ## double 124.44
.quad 0x405fc00000000000 ## double 127
.quad 0x405f000000000000 ## double 124
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405ec7ae147ae148 ## double 123.12
.quad 0x405f800000000000 ## double 126
.quad 0x405fac28f5c28f5c ## double 126.69
.quad 0x405fa7ae147ae148 ## double 126.62
.quad 0x406016147ae147ae ## double 128.69
.quad 0x405fa00000000000 ## double 126.5
.quad 0x405f47ae147ae148 ## double 125.12
.quad 0x405f5c28f5c28f5c ## double 125.44
.quad 0x4060100000000000 ## double 128.5
.quad 0x4060380000000000 ## double 129.75
.quad 0x40608428f5c28f5c ## double 132.13
.quad 0x40615c28f5c28f5c ## double 138.88
.quad 0x4061300000000000 ## double 137.5
.quad 0x40614428f5c28f5c ## double 138.13
.quad 0x4061500000000000 ## double 138.5
.quad 0x406166147ae147ae ## double 139.19
.quad 0x406156147ae147ae ## double 138.69
.quad 0x40612e147ae147ae ## double 137.44
.quad 0x4060f00000000000 ## double 135.5
.quad 0x4060b00000000000 ## double 133.5
.quad 0x4060bc28f5c28f5c ## double 133.88
.quad 0x4060880000000000 ## double 132.25
.quad 0x406069eb851eb852 ## double 131.31
.quad 0x4060800000000000 ## double 132
.quad 0x405f53d70a3d70a4 ## double 125.31
.quad 0x405f2c28f5c28f5c ## double 124.69
.quad 0x405f700000000000 ## double 125.75
.quad 0x405f1c28f5c28f5c ## double 124.44
.quad 0x405f0c28f5c28f5c ## double 124.19
.quad 0x405ff00000000000 ## double 127.75
.quad 0x405f700000000000 ## double 125.75
.quad 0x405e5c28f5c28f5c ## double 121.44
.quad 0x405e800000000000 ## double 122
.quad 0x405e600000000000 ## double 121.5
.quad 0x405d93d70a3d70a4 ## double 118.31
.quad 0x405d000000000000 ## double 116
.quad 0x405d200000000000 ## double 116.5
.quad 0x405d300000000000 ## double 116.75
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405df7ae147ae148 ## double 119.87
.quad 0x405e6c28f5c28f5c ## double 121.69
.quad 0x405d000000000000 ## double 116
.quad 0x405cd3d70a3d70a4 ## double 115.31
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405d300000000000 ## double 116.75
.quad 0x405d37ae147ae148 ## double 116.87
.quad 0x406d9428f5c28f5c ## double 236.63
.quad 0x406c400000000000 ## double 226
.quad 0x406cd6147ae147ae ## double 230.69
.quad 0x406d2c28f5c28f5c ## double 233.38
.quad 0x406d8c28f5c28f5c ## double 236.38
.quad 0x406dd80000000000 ## double 238.75
.quad 0x406e1428f5c28f5c ## double 240.63
.quad 0x406df428f5c28f5c ## double 239.63
.quad 0x406e700000000000 ## double 243.5
.quad 0x406ec00000000000 ## double 246
.quad 0x406c8e147ae147ae ## double 228.44
.quad 0x406bbc28f5c28f5c ## double 221.88
.quad 0x406bbc28f5c28f5c ## double 221.88
.quad 0x406b300000000000 ## double 217.5
.quad 0x406ac00000000000 ## double 214
.quad 0x406a900000000000 ## double 212.5
.quad 0x406afc28f5c28f5c ## double 215.88
.quad 0x406a880000000000 ## double 212.25
.quad 0x406a600000000000 ## double 211
.quad 0x406a200000000000 ## double 209
.quad 0x406a500000000000 ## double 210.5
.quad 0x406ae80000000000 ## double 215.25
.quad 0x406a580000000000 ## double 210.75
.quad 0x4069d1eb851eb852 ## double 206.56
.quad 0x4068d80000000000 ## double 198.75
.quad 0x40657c28f5c28f5c ## double 171.88
.quad 0x4065380000000000 ## double 169.75
.quad 0x406589eb851eb852 ## double 172.31
.quad 0x4066300000000000 ## double 177.5
.quad 0x4066700000000000 ## double 179.5
.quad 0x4066fc28f5c28f5c ## double 183.88
.quad 0x4067000000000000 ## double 184
.quad 0x4066f428f5c28f5c ## double 183.63
.quad 0x406781eb851eb852 ## double 188.06
.quad 0x406781eb851eb852 ## double 188.06
.quad 0x40677428f5c28f5c ## double 187.63
.quad 0x4067580000000000 ## double 186.75
.quad 0x4066fe147ae147ae ## double 183.94
.quad 0x406636147ae147ae ## double 177.69
.quad 0x4066b00000000000 ## double 181.5
.quad 0x4066a80000000000 ## double 181.25
.quad 0x4066580000000000 ## double 178.75
.quad 0x4065fc28f5c28f5c ## double 175.88
.quad 0x4065a80000000000 ## double 173.25
.quad 0x4065500000000000 ## double 170.5
.quad 0x4064f428f5c28f5c ## double 167.63
.quad 0x4065500000000000 ## double 170.5
.quad 0x4066400000000000 ## double 178
.quad 0x4066380000000000 ## double 177.75
.quad 0x4066800000000000 ## double 180
.quad 0x4066f00000000000 ## double 183.5
.quad 0x4066c9eb851eb852 ## double 182.31
.quad 0x4066ee147ae147ae ## double 183.44
.quad 0x40673e147ae147ae ## double 185.94
.quad 0x4066dc28f5c28f5c ## double 182.88
.quad 0x40671c28f5c28f5c ## double 184.88
.quad 0x4066800000000000 ## double 180
.quad 0x40665e147ae147ae ## double 178.94
.quad 0x4066200000000000 ## double 177
.quad 0x40651e147ae147ae ## double 168.94
.quad 0x40655e147ae147ae ## double 170.94
.quad 0x4065300000000000 ## double 169.5
.quad 0x40657c28f5c28f5c ## double 171.88
.quad 0x4065be147ae147ae ## double 173.94
.quad 0x406676147ae147ae ## double 179.69
.quad 0x4066800000000000 ## double 180
.quad 0x40663e147ae147ae ## double 177.94
.quad 0x4065d80000000000 ## double 174.75
.quad 0x4065d00000000000 ## double 174.5
.quad 0x4065c9eb851eb852 ## double 174.31
.quad 0x4066480000000000 ## double 178.25
.quad 0x4066080000000000 ## double 176.25
.quad 0x406659eb851eb852 ## double 178.81
.quad 0x40651c28f5c28f5c ## double 168.88
.quad 0x4065300000000000 ## double 169.5
.quad 0x40651c28f5c28f5c ## double 168.88
.quad 0x4065600000000000 ## double 171
.quad 0x4066100000000000 ## double 176.5
.quad 0x4066200000000000 ## double 177
.quad 0x406696147ae147ae ## double 180.69
.quad 0x4067100000000000 ## double 184.5
.quad 0x4066fe147ae147ae ## double 183.94
.quad 0x4066a9eb851eb852 ## double 181.31
.quad 0x406759eb851eb852 ## double 186.81
.quad 0x4067500000000000 ## double 186.5
.quad 0x4066d428f5c28f5c ## double 182.63
.quad 0x4067300000000000 ## double 185.5
.quad 0x4068c80000000000 ## double 198.25
.quad 0x4068e80000000000 ## double 199.25
.quad 0x4068100000000000 ## double 192.5
.quad 0x4067300000000000 ## double 185.5
.quad 0x40671e147ae147ae ## double 184.94
.quad 0x40676e147ae147ae ## double 187.44
.quad 0x4067c00000000000 ## double 190
.quad 0x4067ae147ae147ae ## double 189.44
.quad 0x4068000000000000 ## double 192
.quad 0x40680c28f5c28f5c ## double 192.38
.quad 0x4068180000000000 ## double 192.75
.quad 0x4067bc28f5c28f5c ## double 189.88
.quad 0x4067500000000000 ## double 186.5
.quad 0x406766147ae147ae ## double 187.19
.quad 0x40679428f5c28f5c ## double 188.63
.quad 0x40679e147ae147ae ## double 188.94
.quad 0x4067be147ae147ae ## double 189.94
.quad 0x40677e147ae147ae ## double 187.94
.quad 0x40672c28f5c28f5c ## double 185.38
.quad 0x4066e00000000000 ## double 183
.quad 0x40665e147ae147ae ## double 178.94
.quad 0x406586147ae147ae ## double 172.19
.quad 0x40653428f5c28f5c ## double 169.63
.quad 0x4064dc28f5c28f5c ## double 166.88
.quad 0x4064b9eb851eb852 ## double 165.81
.quad 0x4064e00000000000 ## double 167
.quad 0x40650e147ae147ae ## double 168.44
.quad 0x4065400000000000 ## double 170
.quad 0x40653e147ae147ae ## double 169.94
.quad 0x40653c28f5c28f5c ## double 169.88
.quad 0x406506147ae147ae ## double 168.19
.quad 0x4064ac28f5c28f5c ## double 165.38
.quad 0x40652c28f5c28f5c ## double 169.38
.quad 0x4065280000000000 ## double 169.25
.quad 0x4065400000000000 ## double 170
.quad 0x406551eb851eb852 ## double 170.56
.quad 0x406546147ae147ae ## double 170.19
.quad 0x4064de147ae147ae ## double 166.94
.quad 0x4064e6147ae147ae ## double 167.19
.quad 0x4064e00000000000 ## double 167
.quad 0x40640428f5c28f5c ## double 160.13
.quad 0x4063fc28f5c28f5c ## double 159.88
.quad 0x4063f428f5c28f5c ## double 159.63
.quad 0x40642c28f5c28f5c ## double 161.38
.quad 0x4063f6147ae147ae ## double 159.69
.quad 0x4063b00000000000 ## double 157.5
.quad 0x4063fc28f5c28f5c ## double 159.88
.quad 0x4063dc28f5c28f5c ## double 158.88
.quad 0x4063980000000000 ## double 156.75
.quad 0x406306147ae147ae ## double 152.19
.quad 0x4062de147ae147ae ## double 150.94
.quad 0x4062b1eb851eb852 ## double 149.56
.quad 0x4062b80000000000 ## double 149.75
.quad 0x4062be147ae147ae ## double 149.94
.quad 0x4062b6147ae147ae ## double 149.69
.quad 0x4062b9eb851eb852 ## double 149.81
.quad 0x4062b00000000000 ## double 149.5
.quad 0x40625c28f5c28f5c ## double 146.88
.quad 0x406259eb851eb852 ## double 146.81
.quad 0x40620e147ae147ae ## double 144.44
.quad 0x4061ee147ae147ae ## double 143.44
.quad 0x4061f00000000000 ## double 143.5
.quad 0x4061f6147ae147ae ## double 143.69
.quad 0x4061ce147ae147ae ## double 142.44
.quad 0x406179eb851eb852 ## double 139.81
.quad 0x4061300000000000 ## double 137.5
.quad 0x4061580000000000 ## double 138.75
.quad 0x4060800000000000 ## double 132
.quad 0x4060500000000000 ## double 130.5
.quad 0x40609c28f5c28f5c ## double 132.88
.quad 0x405ffc28f5c28f5c ## double 127.94
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405eb7ae147ae148 ## double 122.87
.quad 0x405f000000000000 ## double 124
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405f500000000000 ## double 125.25
.quad 0x405f9c28f5c28f5c ## double 126.44
.quad 0x4060700000000000 ## double 131.5
.quad 0x4060c428f5c28f5c ## double 134.13
.quad 0x406106147ae147ae ## double 136.19
.quad 0x4060b6147ae147ae ## double 133.69
.quad 0x4060de147ae147ae ## double 134.94
.quad 0x4060a00000000000 ## double 133
.quad 0x40605428f5c28f5c ## double 130.63
.quad 0x40601428f5c28f5c ## double 128.63
.quad 0x405fd00000000000 ## double 127.25
.quad 0x4060080000000000 ## double 128.25
.quad 0x40605e147ae147ae ## double 130.94
.quad 0x406056147ae147ae ## double 130.69
.quad 0x40603e147ae147ae ## double 129.94
.quad 0x405fa3d70a3d70a4 ## double 126.56
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405fdc28f5c28f5c ## double 127.44
.quad 0x405f7c28f5c28f5c ## double 125.94
.quad 0x405eb7ae147ae148 ## double 122.87
.quad 0x405eac28f5c28f5c ## double 122.69
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405dbc28f5c28f5c ## double 118.94
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405f73d70a3d70a4 ## double 125.81
.quad 0x4060180000000000 ## double 128.75
.quad 0x4060780000000000 ## double 131.75
.quad 0x40605c28f5c28f5c ## double 130.88
.quad 0x406016147ae147ae ## double 128.69
.quad 0x4060000000000000 ## double 128
.quad 0x406041eb851eb852 ## double 130.06
.quad 0x4060600000000000 ## double 131
.quad 0x4060300000000000 ## double 129.5
.quad 0x405f800000000000 ## double 126
.quad 0x405fd7ae147ae148 ## double 127.37
.quad 0x40600c28f5c28f5c ## double 128.38
.quad 0x406031eb851eb852 ## double 129.56
.quad 0x4060100000000000 ## double 128.5
.quad 0x40604428f5c28f5c ## double 130.13
.quad 0x4060c80000000000 ## double 134.25
.quad 0x40605e147ae147ae ## double 130.94
.quad 0x4060280000000000 ## double 129.25
.quad 0x4060c80000000000 ## double 134.25
.quad 0x4060dc28f5c28f5c ## double 134.88
.quad 0x40614428f5c28f5c ## double 138.13
.quad 0x4060b80000000000 ## double 133.75
.quad 0x40600e147ae147ae ## double 128.44
.quad 0x405fbc28f5c28f5c ## double 126.94
.quad 0x405f700000000000 ## double 125.75
.quad 0x405f7c28f5c28f5c ## double 125.94
.quad 0x4060200000000000 ## double 129
.quad 0x40600e147ae147ae ## double 128.44
.quad 0x4060600000000000 ## double 131
.quad 0x405ec00000000000 ## double 123
.quad 0x405e17ae147ae148 ## double 120.37
.quad 0x405dbc28f5c28f5c ## double 118.94
.quad 0x405e000000000000 ## double 120
.quad 0x405e27ae147ae148 ## double 120.62
.quad 0x405e000000000000 ## double 120
.quad 0x405dd00000000000 ## double 119.25
.quad 0x405da00000000000 ## double 118.5
.quad 0x405ce3d70a3d70a4 ## double 115.56
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405cc3d70a3d70a4 ## double 115.06
.quad 0x405d27ae147ae148 ## double 116.62
.quad 0x405d3c28f5c28f5c ## double 116.94
.quad 0x405d13d70a3d70a4 ## double 116.31
.quad 0x405cb3d70a3d70a4 ## double 114.81
.quad 0x405c5c28f5c28f5c ## double 113.44
.quad 0x405cac28f5c28f5c ## double 114.69
.quad 0x405c07ae147ae148 ## double 112.12
.quad 0x405c13d70a3d70a4 ## double 112.31
.quad 0x405b600000000000 ## double 109.5
.quad 0x405b500000000000 ## double 109.25
.quad 0x405b47ae147ae148 ## double 109.12
.quad 0x405c0c28f5c28f5c ## double 112.19
.quad 0x405bf7ae147ae148 ## double 111.87
.quad 0x405cdc28f5c28f5c ## double 115.44
.quad 0x405d33d70a3d70a4 ## double 116.81
.quad 0x405d83d70a3d70a4 ## double 118.06
.quad 0x405de7ae147ae148 ## double 119.62
.quad 0x405de3d70a3d70a4 ## double 119.56
.quad 0x405e000000000000 ## double 120
.quad 0x405dc00000000000 ## double 119
.quad 0x405d0c28f5c28f5c ## double 116.19
.quad 0x405d27ae147ae148 ## double 116.62
.quad 0x405d6c28f5c28f5c ## double 117.69
.quad 0x405d93d70a3d70a4 ## double 118.31
.quad 0x405e3c28f5c28f5c ## double 120.94
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405e600000000000 ## double 121.5
.quad 0x405ec00000000000 ## double 123
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405f0c28f5c28f5c ## double 124.19
.quad 0x405f73d70a3d70a4 ## double 125.81
.quad 0x405fc00000000000 ## double 127
.quad 0x405f700000000000 ## double 125.75
.quad 0x405ff00000000000 ## double 127.75
.quad 0x406029eb851eb852 ## double 129.31
.quad 0x405e900000000000 ## double 122.25
.quad 0x405e000000000000 ## double 120
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405e000000000000 ## double 120
.quad 0x405d700000000000 ## double 117.75
.quad 0x405dbc28f5c28f5c ## double 118.94
.quad 0x405d900000000000 ## double 118.25
.quad 0x405db7ae147ae148 ## double 118.87
.quad 0x405d57ae147ae148 ## double 117.37
.quad 0x405d5c28f5c28f5c ## double 117.44
.quad 0x405d37ae147ae148 ## double 116.87
.quad 0x405da00000000000 ## double 118.5
.quad 0x405d27ae147ae148 ## double 116.62
.quad 0x405d7c28f5c28f5c ## double 117.94
.quad 0x405d67ae147ae148 ## double 117.62
.quad 0x405e000000000000 ## double 120
.quad 0x405da00000000000 ## double 118.5
.quad 0x405bd00000000000 ## double 111.25
.quad 0x405afc28f5c28f5c ## double 107.94
.quad 0x405b800000000000 ## double 110
.quad 0x405b73d70a3d70a4 ## double 109.81
.quad 0x405a900000000000 ## double 106.25
.quad 0x405af3d70a3d70a4 ## double 107.81
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405a67ae147ae148 ## double 105.62
.quad 0x405a73d70a3d70a4 ## double 105.81
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405a97ae147ae148 ## double 106.37
.quad 0x405aac28f5c28f5c ## double 106.69
.quad 0x405a37ae147ae148 ## double 104.87
.quad 0x405a3c28f5c28f5c ## double 104.94
.quad 0x405a37ae147ae148 ## double 104.87
.quad 0x405a600000000000 ## double 105.5
.quad 0x405aa7ae147ae148 ## double 106.62
.quad 0x405aac28f5c28f5c ## double 106.69
.quad 0x4059ec28f5c28f5c ## double 103.69
.quad 0x4059d7ae147ae148 ## double 103.37
.quad 0x4059d00000000000 ## double 103.25
.quad 0x4059c00000000000 ## double 103
.quad 0x405993d70a3d70a4 ## double 102.31
.quad 0x405973d70a3d70a4 ## double 101.81
.quad 0x405957ae147ae148 ## double 101.37
.quad 0x405947ae147ae148 ## double 101.12
.quad 0x405927ae147ae148 ## double 100.62
.quad 0x4058f7ae147ae148 ## double 99.870000000000005
.quad 0x40588c28f5c28f5c ## double 98.189999999999998
.quad 0x4058900000000000 ## double 98.25
.quad 0x4058f3d70a3d70a4 ## double 99.810000000000002
.quad 0x405917ae147ae148 ## double 100.37
.quad 0x4059d3d70a3d70a4 ## double 103.31
.quad 0x4059a00000000000 ## double 102.5
.quad 0x405a23d70a3d70a4 ## double 104.56
.quad 0x405a67ae147ae148 ## double 105.62
.quad 0x405a77ae147ae148 ## double 105.87
.quad 0x405a500000000000 ## double 105.25
.quad 0x4059b7ae147ae148 ## double 102.87
.quad 0x4059fc28f5c28f5c ## double 103.94
.quad 0x4059dc28f5c28f5c ## double 103.44
.quad 0x405a27ae147ae148 ## double 104.62
.quad 0x4059e7ae147ae148 ## double 103.62
.quad 0x4059ec28f5c28f5c ## double 103.69
.quad 0x4059e7ae147ae148 ## double 103.62
.quad 0x4059d00000000000 ## double 103.25
.quad 0x4059d00000000000 ## double 103.25
.quad 0x40599c28f5c28f5c ## double 102.44
.quad 0x4058e7ae147ae148 ## double 99.620000000000005
.quad 0x4058e7ae147ae148 ## double 99.620000000000005
.quad 0x4058e3d70a3d70a4 ## double 99.560000000000002
.quad 0x4058f3d70a3d70a4 ## double 99.810000000000002
.quad 0x4059100000000000 ## double 100.25
.quad 0x4059500000000000 ## double 101.25
.quad 0x4058e3d70a3d70a4 ## double 99.560000000000002
.quad 0x4058d3d70a3d70a4 ## double 99.310000000000002
.quad 0x4058800000000000 ## double 98
.quad 0x405877ae147ae148 ## double 97.870000000000005
.quad 0x4058fc28f5c28f5c ## double 99.939999999999998
.quad 0x4059000000000000 ## double 100
.quad 0x405927ae147ae148 ## double 100.62
.quad 0x4059800000000000 ## double 102
.quad 0x405b17ae147ae148 ## double 108.37
.quad 0x405a4c28f5c28f5c ## double 105.19
.quad 0x405a3c28f5c28f5c ## double 104.94
.quad 0x405a000000000000 ## double 104
.quad 0x40599c28f5c28f5c ## double 102.44
.quad 0x405947ae147ae148 ## double 101.12
.quad 0x405a37ae147ae148 ## double 104.87
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a400000000000 ## double 105
.quad 0x405acc28f5c28f5c ## double 107.19
.quad 0x405ab3d70a3d70a4 ## double 106.81
.quad 0x405a700000000000 ## double 105.75
.quad 0x405a4c28f5c28f5c ## double 105.19
.quad 0x405a600000000000 ## double 105.5
.quad 0x4059ec28f5c28f5c ## double 103.69
.quad 0x4059700000000000 ## double 101.75
.quad 0x405933d70a3d70a4 ## double 100.81
.quad 0x405a07ae147ae148 ## double 104.12
.quad 0x405a300000000000 ## double 104.75
.quad 0x4059bc28f5c28f5c ## double 102.94
.quad 0x4059bc28f5c28f5c ## double 102.94
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a500000000000 ## double 105.25
.quad 0x40597c28f5c28f5c ## double 101.94
.quad 0x4059dc28f5c28f5c ## double 103.44
.quad 0x4059fc28f5c28f5c ## double 103.94
.quad 0x405b67ae147ae148 ## double 109.62
.quad 0x405c100000000000 ## double 112.25
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405bd3d70a3d70a4 ## double 111.31
.quad 0x405be7ae147ae148 ## double 111.62
.quad 0x405c0c28f5c28f5c ## double 112.19
.quad 0x405c53d70a3d70a4 ## double 113.31
.quad 0x405b7c28f5c28f5c ## double 109.94
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405ae00000000000 ## double 107.5
.quad 0x405a700000000000 ## double 105.75
.quad 0x405a97ae147ae148 ## double 106.37
.quad 0x405a400000000000 ## double 105
.quad 0x4059fc28f5c28f5c ## double 103.94
.quad 0x405a17ae147ae148 ## double 104.37
.quad 0x405a37ae147ae148 ## double 104.87
.quad 0x405983d70a3d70a4 ## double 102.06
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4058e00000000000 ## double 99.5
.quad 0x4059000000000000 ## double 100
.quad 0x405933d70a3d70a4 ## double 100.81
.quad 0x40591c28f5c28f5c ## double 100.44
.quad 0x4059cc28f5c28f5c ## double 103.19
.quad 0x4059fc28f5c28f5c ## double 103.94
.quad 0x4059b00000000000 ## double 102.75
.quad 0x40598c28f5c28f5c ## double 102.19
.quad 0x4058cc28f5c28f5c ## double 99.189999999999998
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4059900000000000 ## double 102.25
.quad 0x4058fc28f5c28f5c ## double 99.939999999999998
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x4059b3d70a3d70a4 ## double 102.81
.quad 0x4059f7ae147ae148 ## double 103.87
.quad 0x405a7c28f5c28f5c ## double 105.94
.quad 0x405a57ae147ae148 ## double 105.37
.quad 0x4058700000000000 ## double 97.75
.quad 0x4058b7ae147ae148 ## double 98.870000000000005
.quad 0x4059ac28f5c28f5c ## double 102.69
.quad 0x4059b7ae147ae148 ## double 102.87
.quad 0x405a07ae147ae148 ## double 104.12
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a57ae147ae148 ## double 105.37
.quad 0x405a73d70a3d70a4 ## double 105.81
.quad 0x405ab7ae147ae148 ## double 106.87
.quad 0x405ac7ae147ae148 ## double 107.12
.quad 0x405a500000000000 ## double 105.25
.quad 0x405ad00000000000 ## double 107.25
.quad 0x405a23d70a3d70a4 ## double 104.56
.quad 0x405a9c28f5c28f5c ## double 106.44
.quad 0x405adc28f5c28f5c ## double 107.44
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x4059cc28f5c28f5c ## double 103.19
.quad 0x4059c00000000000 ## double 103
.quad 0x405a1c28f5c28f5c ## double 104.44
.quad 0x405a400000000000 ## double 105
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x4058e7ae147ae148 ## double 99.620000000000005
.quad 0x4059100000000000 ## double 100.25
.quad 0x4059500000000000 ## double 101.25
.quad 0x4059200000000000 ## double 100.5
.quad 0x4058cc28f5c28f5c ## double 99.189999999999998
.quad 0x4058800000000000 ## double 98
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x405903d70a3d70a4 ## double 100.06
.quad 0x405a000000000000 ## double 104
.quad 0x405a13d70a3d70a4 ## double 104.31
.quad 0x405a500000000000 ## double 105.25
.quad 0x405a23d70a3d70a4 ## double 104.56
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x405a13d70a3d70a4 ## double 104.31
.quad 0x4059b7ae147ae148 ## double 102.87
.quad 0x4059f7ae147ae148 ## double 103.87
.quad 0x405a33d70a3d70a4 ## double 104.81
.quad 0x405a17ae147ae148 ## double 104.37
.quad 0x405ac00000000000 ## double 107
.quad 0x405aa7ae147ae148 ## double 106.62
.quad 0x405afc28f5c28f5c ## double 107.94
.quad 0x405b1c28f5c28f5c ## double 108.44
.quad 0x405b000000000000 ## double 108
.quad 0x405a07ae147ae148 ## double 104.12
.quad 0x405a200000000000 ## double 104.5
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x405a800000000000 ## double 106
.quad 0x405a67ae147ae148 ## double 105.62
.quad 0x405a53d70a3d70a4 ## double 105.31
.quad 0x405ac7ae147ae148 ## double 107.12
.quad 0x405b500000000000 ## double 109.25
.quad 0x405b5c28f5c28f5c ## double 109.44
.quad 0x405afc28f5c28f5c ## double 107.94
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405a7c28f5c28f5c ## double 105.94
.quad 0x405ac00000000000 ## double 107
.quad 0x405a800000000000 ## double 106
.quad 0x405a27ae147ae148 ## double 104.62
.quad 0x405ab7ae147ae148 ## double 106.87
.quad 0x405b1c28f5c28f5c ## double 108.44
.quad 0x405b1c28f5c28f5c ## double 108.44
.quad 0x405ac00000000000 ## double 107
.quad 0x4059e00000000000 ## double 103.5
.quad 0x405a93d70a3d70a4 ## double 106.31
.quad 0x405a500000000000 ## double 105.25
.quad 0x4059700000000000 ## double 101.75
.quad 0x4058800000000000 ## double 98
.quad 0x4058100000000000 ## double 96.25
.quad 0x40581c28f5c28f5c ## double 96.439999999999998
.quad 0x405857ae147ae148 ## double 97.370000000000005
.quad 0x40580c28f5c28f5c ## double 96.189999999999998
.quad 0x40582c28f5c28f5c ## double 96.689999999999998
.quad 0x4057f00000000000 ## double 95.75
.quad 0x4057e00000000000 ## double 95.5
.quad 0x4057c7ae147ae148 ## double 95.120000000000005
.quad 0x405767ae147ae148 ## double 93.620000000000005
.quad 0x4057100000000000 ## double 92.25
.quad 0x4057200000000000 ## double 92.5
.quad 0x4057000000000000 ## double 92
.quad 0x40573c28f5c28f5c ## double 92.939999999999998
.quad 0x4057700000000000 ## double 93.75
.quad 0x4057100000000000 ## double 92.25
.quad 0x4056d7ae147ae148 ## double 91.370000000000005
.quad 0x4056fc28f5c28f5c ## double 91.939999999999998
.quad 0x4056b00000000000 ## double 90.75
.quad 0x4056600000000000 ## double 89.5
.quad 0x4056b00000000000 ## double 90.75
.quad 0x4056800000000000 ## double 90
.quad 0x4056700000000000 ## double 89.75
.quad 0x405617ae147ae148 ## double 88.370000000000005
.quad 0x4055d7ae147ae148 ## double 87.370000000000005
.quad 0x4055f7ae147ae148 ## double 87.870000000000005
.quad 0x4055e7ae147ae148 ## double 87.620000000000005
.quad 0x4055800000000000 ## double 86
.quad 0x405537ae147ae148 ## double 84.870000000000005
.quad 0x405567ae147ae148 ## double 85.620000000000005
.quad 0x4055a7ae147ae148 ## double 86.620000000000005
.quad 0x4055f7ae147ae148 ## double 87.870000000000005
.quad 0x4056100000000000 ## double 88.25
.quad 0x4056800000000000 ## double 90
.quad 0x4056f7ae147ae148 ## double 91.870000000000005
.quad 0x4066680000000000 ## double 179.25
.quad 0x4065dc28f5c28f5c ## double 174.88
.quad 0x4066000000000000 ## double 176
.quad 0x40662c28f5c28f5c ## double 177.38
.quad 0x4065f00000000000 ## double 175.5
.quad 0x40654c28f5c28f5c ## double 170.38
.quad 0x4065b80000000000 ## double 173.75
.quad 0x4065f428f5c28f5c ## double 175.63
.quad 0x4065e00000000000 ## double 175
.quad 0x40662428f5c28f5c ## double 177.13
.quad 0x4065b00000000000 ## double 173.5
.quad 0x4065280000000000 ## double 169.25
.quad 0x4065280000000000 ## double 169.25
.quad 0x4064dc28f5c28f5c ## double 166.88
.quad 0x4064ec28f5c28f5c ## double 167.38
.quad 0x4064c80000000000 ## double 166.25
.quad 0x40645c28f5c28f5c ## double 162.88
.quad 0x40643428f5c28f5c ## double 161.63
.quad 0x40642428f5c28f5c ## double 161.13
.quad 0x4063e00000000000 ## double 159
.quad 0x4062f80000000000 ## double 151.75
.quad 0x40631c28f5c28f5c ## double 152.88
.quad 0x4063a00000000000 ## double 157
.quad 0x4061f80000000000 ## double 143.75
.quad 0x40618428f5c28f5c ## double 140.13
.quad 0x4061a80000000000 ## double 141.25
.quad 0x40618428f5c28f5c ## double 140.13
.quad 0x4061780000000000 ## double 139.75
.quad 0x40614c28f5c28f5c ## double 138.38
.quad 0x4061600000000000 ## double 139
.quad 0x40611c28f5c28f5c ## double 136.88
.quad 0x4060d80000000000 ## double 134.75
.quad 0x4060dc28f5c28f5c ## double 134.88
.quad 0x4061400000000000 ## double 138
.quad 0x40611c28f5c28f5c ## double 136.88
.quad 0x4060b00000000000 ## double 133.5
.quad 0x40606428f5c28f5c ## double 131.13
.quad 0x4060880000000000 ## double 132.25
.quad 0x40612c28f5c28f5c ## double 137.38
.quad 0x4061380000000000 ## double 137.75
.quad 0x4061700000000000 ## double 139.5
.quad 0x4061b428f5c28f5c ## double 141.63
.quad 0x4061a428f5c28f5c ## double 141.13
.quad 0x40614c28f5c28f5c ## double 138.38
.quad 0x40611c28f5c28f5c ## double 136.88
.quad 0x40612428f5c28f5c ## double 137.13
.quad 0x4061780000000000 ## double 139.75
.quad 0x4061580000000000 ## double 138.75
.quad 0x4061980000000000 ## double 140.75
.quad 0x4061e428f5c28f5c ## double 143.13
.quad 0x40620c28f5c28f5c ## double 144.38
.quad 0x4062480000000000 ## double 146.25
.quad 0x4062680000000000 ## double 147.25
.quad 0x4062b80000000000 ## double 149.75
.quad 0x4062600000000000 ## double 147
.quad 0x40627428f5c28f5c ## double 147.63
.quad 0x4062a00000000000 ## double 149
.quad 0x4062500000000000 ## double 146.5
.quad 0x4062580000000000 ## double 146.75
.quad 0x4062580000000000 ## double 146.75
.quad 0x4062380000000000 ## double 145.75
.quad 0x40625c28f5c28f5c ## double 146.88
.quad 0x4062700000000000 ## double 147.5
.quad 0x4062800000000000 ## double 148
.quad 0x4062000000000000 ## double 144
.quad 0x40619c28f5c28f5c ## double 140.88
.quad 0x4061f00000000000 ## double 143.5
.quad 0x40623c28f5c28f5c ## double 145.88
.quad 0x4062300000000000 ## double 145.5
.quad 0x4062780000000000 ## double 147.75
.quad 0x40628428f5c28f5c ## double 148.13
.quad 0x40625428f5c28f5c ## double 146.63
.quad 0x4062280000000000 ## double 145.25
.quad 0x4062980000000000 ## double 148.75
.quad 0x4062c00000000000 ## double 150
.quad 0x4062b80000000000 ## double 149.75
.quad 0x40633428f5c28f5c ## double 153.63
.quad 0x4063580000000000 ## double 154.75
.quad 0x4063ac28f5c28f5c ## double 157.38
.quad 0x4063bc28f5c28f5c ## double 157.88
.quad 0x4063c80000000000 ## double 158.25
.quad 0x4063980000000000 ## double 156.75
.quad 0x4063480000000000 ## double 154.25
.quad 0x4062f80000000000 ## double 151.75
.quad 0x4063400000000000 ## double 154
.quad 0x4064080000000000 ## double 160.25
.quad 0x4064000000000000 ## double 160
.quad 0x40654428f5c28f5c ## double 170.13
.quad 0x4064f428f5c28f5c ## double 167.63
.quad 0x4064b80000000000 ## double 165.75
.quad 0x4064d80000000000 ## double 166.75
.quad 0x40650c28f5c28f5c ## double 168.38
.quad 0x4064f428f5c28f5c ## double 167.63
.quad 0x4064c80000000000 ## double 166.25
.quad 0x4064800000000000 ## double 164
.quad 0x4064400000000000 ## double 162
.quad 0x4064880000000000 ## double 164.25
.quad 0x4064780000000000 ## double 163.75
.quad 0x40646c28f5c28f5c ## double 163.38
.quad 0x4063e428f5c28f5c ## double 159.13
.quad 0x40634c28f5c28f5c ## double 154.38
.quad 0x40636c28f5c28f5c ## double 155.38
.quad 0x4063700000000000 ## double 155.5
.quad 0x40637428f5c28f5c ## double 155.63
.quad 0x4063cc28f5c28f5c ## double 158.38
.quad 0x4063800000000000 ## double 156
.quad 0x40636c28f5c28f5c ## double 155.38
.quad 0x4063cc28f5c28f5c ## double 158.38
.quad 0x4063f80000000000 ## double 159.75
.quad 0x4063e00000000000 ## double 159
.quad 0x40633428f5c28f5c ## double 153.63
.quad 0x4063280000000000 ## double 153.25
.quad 0x40635c28f5c28f5c ## double 154.88
.quad 0x4063d00000000000 ## double 158.5
.quad 0x4063d428f5c28f5c ## double 158.63
.quad 0x40643c28f5c28f5c ## double 161.88
.quad 0x40640428f5c28f5c ## double 160.13
.quad 0x4063c00000000000 ## double 158
.quad 0x40645428f5c28f5c ## double 162.63
.quad 0x4064800000000000 ## double 164
.quad 0x4064c00000000000 ## double 166
.quad 0x4064700000000000 ## double 163.5
.quad 0x4063f428f5c28f5c ## double 159.63
.quad 0x4064080000000000 ## double 160.25
.quad 0x4064180000000000 ## double 160.75
.quad 0x4063d00000000000 ## double 158.5
.quad 0x4063dc28f5c28f5c ## double 158.88
.quad 0x40635c28f5c28f5c ## double 154.88
.quad 0x4063e00000000000 ## double 159
.quad 0x40635c28f5c28f5c ## double 154.88
.quad 0x4062c00000000000 ## double 150
.quad 0x4062300000000000 ## double 145.5
.quad 0x40613428f5c28f5c ## double 137.63
.quad 0x4060fc28f5c28f5c ## double 135.88
.quad 0x4060f80000000000 ## double 135.75
.quad 0x4060dc28f5c28f5c ## double 134.88
.quad 0x4060d00000000000 ## double 134.5
.quad 0x4060e428f5c28f5c ## double 135.13
.quad 0x4060bc28f5c28f5c ## double 133.88
.quad 0x4060700000000000 ## double 131.5
.quad 0x40602c28f5c28f5c ## double 129.38
.quad 0x40603428f5c28f5c ## double 129.63
.quad 0x40602428f5c28f5c ## double 129.13
.quad 0x405fe00000000000 ## double 127.5
.quad 0x405ff00000000000 ## double 127.75
.quad 0x40600428f5c28f5c ## double 128.13
.quad 0x4060000000000000 ## double 128
.quad 0x4060380000000000 ## double 129.75
.quad 0x40603c28f5c28f5c ## double 129.88
.quad 0x4060580000000000 ## double 130.75
.quad 0x4060ec28f5c28f5c ## double 135.38
.quad 0x4060300000000000 ## double 129.5
.quad 0x40602428f5c28f5c ## double 129.13
.quad 0x4060300000000000 ## double 129.5
.quad 0x4060680000000000 ## double 131.25
.quad 0x4060400000000000 ## double 130
.quad 0x40604428f5c28f5c ## double 130.13
.quad 0x4060000000000000 ## double 128
.quad 0x40602c28f5c28f5c ## double 129.38
.quad 0x4060300000000000 ## double 129.5
.quad 0x4060000000000000 ## double 128
.quad 0x405fc7ae147ae148 ## double 127.12
.quad 0x405f900000000000 ## double 126.25
.quad 0x405f77ae147ae148 ## double 125.87
.quad 0x405f500000000000 ## double 125.25
.quad 0x405fc7ae147ae148 ## double 127.12
.quad 0x405f600000000000 ## double 125.5
.quad 0x405ff7ae147ae148 ## double 127.87
.quad 0x405fb7ae147ae148 ## double 126.87
.quad 0x405fb7ae147ae148 ## double 126.87
.quad 0x405f300000000000 ## double 124.75
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405f47ae147ae148 ## double 125.12
.quad 0x405f500000000000 ## double 125.25
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405eb7ae147ae148 ## double 122.87
.quad 0x405d900000000000 ## double 118.25
.quad 0x405d900000000000 ## double 118.25
.quad 0x405dd7ae147ae148 ## double 119.37
.quad 0x405d700000000000 ## double 117.75
.quad 0x405cf7ae147ae148 ## double 115.87
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405ce7ae147ae148 ## double 115.62
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405cc00000000000 ## double 115
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405c300000000000 ## double 112.75
.quad 0x405c47ae147ae148 ## double 113.12
.quad 0x405c67ae147ae148 ## double 113.62
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405c300000000000 ## double 112.75
.quad 0x405bb00000000000 ## double 110.75
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405bd00000000000 ## double 111.25
.quad 0x405be7ae147ae148 ## double 111.62
.quad 0x405be00000000000 ## double 111.5
.quad 0x405c200000000000 ## double 112.5
.quad 0x405c500000000000 ## double 113.25
.quad 0x405c700000000000 ## double 113.75
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405b87ae147ae148 ## double 110.12
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405b400000000000 ## double 109
.quad 0x405b37ae147ae148 ## double 108.87
.quad 0x405b000000000000 ## double 108
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a07ae147ae148 ## double 104.12
.quad 0x4059f00000000000 ## double 103.75
.quad 0x405727ae147ae148 ## double 92.620000000000005
.quad 0x4057500000000000 ## double 93.25
.quad 0x405757ae147ae148 ## double 93.370000000000005
.quad 0x4057d00000000000 ## double 95.25
.quad 0x4057e7ae147ae148 ## double 95.620000000000005
.quad 0x405877ae147ae148 ## double 97.870000000000005
.quad 0x4057f00000000000 ## double 95.75
.quad 0x4057b7ae147ae148 ## double 94.870000000000005
.quad 0x4058000000000000 ## double 96
.quad 0x405867ae147ae148 ## double 97.620000000000005
.quad 0x4058e00000000000 ## double 99.5
.quad 0x4059100000000000 ## double 100.25
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4058d00000000000 ## double 99.25
.quad 0x405907ae147ae148 ## double 100.12
.quad 0x4059500000000000 ## double 101.25
.quad 0x4059900000000000 ## double 102.25
.quad 0x4058f7ae147ae148 ## double 99.870000000000005
.quad 0x4058e7ae147ae148 ## double 99.620000000000005
.quad 0x405907ae147ae148 ## double 100.12
.quad 0x4058f00000000000 ## double 99.75
.quad 0x405907ae147ae148 ## double 100.12
.quad 0x4059700000000000 ## double 101.75
.quad 0x4059900000000000 ## double 102.25
.quad 0x4059a7ae147ae148 ## double 102.62
.quad 0x4059a00000000000 ## double 102.5
.quad 0x4059b00000000000 ## double 102.75
.quad 0x405a100000000000 ## double 104.25
.quad 0x405a07ae147ae148 ## double 104.12
.quad 0x4059f7ae147ae148 ## double 103.87
.quad 0x405a17ae147ae148 ## double 104.37
.quad 0x4059d00000000000 ## double 103.25
.quad 0x4059800000000000 ## double 102
.quad 0x405a37ae147ae148 ## double 104.87
.quad 0x405a97ae147ae148 ## double 106.37
.quad 0x405aa7ae147ae148 ## double 106.62
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405b100000000000 ## double 108.25
.quad 0x405ae7ae147ae148 ## double 107.62
.quad 0x405b17ae147ae148 ## double 108.37
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405bc00000000000 ## double 111
.quad 0x405c07ae147ae148 ## double 112.12
.quad 0x405c200000000000 ## double 112.5
.quad 0x405be7ae147ae148 ## double 111.62
.quad 0x405b500000000000 ## double 109.25
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405b37ae147ae148 ## double 108.87
.quad 0x405b200000000000 ## double 108.5
.quad 0x405b17ae147ae148 ## double 108.37
.quad 0x405af00000000000 ## double 107.75
.quad 0x405a800000000000 ## double 106
.quad 0x405ad00000000000 ## double 107.25
.quad 0x405b200000000000 ## double 108.5
.quad 0x405b400000000000 ## double 109
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405b200000000000 ## double 108.5
.quad 0x405b900000000000 ## double 110.25
.quad 0x405b300000000000 ## double 108.75
.quad 0x405b37ae147ae148 ## double 108.87
.quad 0x405b200000000000 ## double 108.5
.quad 0x405ae7ae147ae148 ## double 107.62
.quad 0x405af00000000000 ## double 107.75
.quad 0x405ae7ae147ae148 ## double 107.62
.quad 0x405ac7ae147ae148 ## double 107.12
.quad 0x405ad00000000000 ## double 107.25
.quad 0x405e07ae147ae148 ## double 120.12
.quad 0x405d17ae147ae148 ## double 116.37
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405ce7ae147ae148 ## double 115.62
.quad 0x405d97ae147ae148 ## double 118.37
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405e07ae147ae148 ## double 120.12
.quad 0x405dd7ae147ae148 ## double 119.37
.quad 0x405df7ae147ae148 ## double 119.87
.quad 0x405e200000000000 ## double 120.5
.quad 0x405d87ae147ae148 ## double 118.12
.quad 0x405c300000000000 ## double 112.75
.quad 0x405c700000000000 ## double 113.75
.quad 0x405c17ae147ae148 ## double 112.37
.quad 0x405c700000000000 ## double 113.75
.quad 0x405c000000000000 ## double 112
.quad 0x405cc7ae147ae148 ## double 115.12
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405d77ae147ae148 ## double 117.87
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405f600000000000 ## double 125.5
.quad 0x405f47ae147ae148 ## double 125.12
.quad 0x405e000000000000 ## double 120
.quad 0x405db00000000000 ## double 118.75
.quad 0x405d27ae147ae148 ## double 116.62
.quad 0x405d000000000000 ## double 116
.quad 0x405d77ae147ae148 ## double 117.87
.quad 0x405d37ae147ae148 ## double 116.87
.quad 0x405d600000000000 ## double 117.5
.quad 0x405df7ae147ae148 ## double 119.87
.quad 0x405de00000000000 ## double 119.5
.quad 0x405e500000000000 ## double 121.25
.quad 0x405e700000000000 ## double 121.75
.quad 0x405fd7ae147ae148 ## double 127.37
.quad 0x40601c28f5c28f5c ## double 128.88
.quad 0x40601c28f5c28f5c ## double 128.88
.quad 0x405f600000000000 ## double 125.5
.quad 0x405f87ae147ae148 ## double 126.12
.quad 0x405f67ae147ae148 ## double 125.62
.quad 0x405e27ae147ae148 ## double 120.62
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405db7ae147ae148 ## double 118.87
.quad 0x405da00000000000 ## double 118.5
.quad 0x405cc00000000000 ## double 115
.quad 0x405cb7ae147ae148 ## double 114.87
.quad 0x405cf7ae147ae148 ## double 115.87
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405cc7ae147ae148 ## double 115.12
.quad 0x405c17ae147ae148 ## double 112.37
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405b400000000000 ## double 109
.quad 0x405aa7ae147ae148 ## double 106.62
.quad 0x405a900000000000 ## double 106.25
.quad 0x405b000000000000 ## double 108
.quad 0x405b400000000000 ## double 109
.quad 0x405a200000000000 ## double 104.5
.quad 0x4059f7ae147ae148 ## double 103.87
.quad 0x405a07ae147ae148 ## double 104.12
.quad 0x4058100000000000 ## double 96.25
.quad 0x4056500000000000 ## double 89.25
.quad 0x4055f00000000000 ## double 87.75
.quad 0x4055a00000000000 ## double 86.5
.quad 0x4055d00000000000 ## double 87.25
.quad 0x4056200000000000 ## double 88.5
.quad 0x4056500000000000 ## double 89.25
.quad 0x405647ae147ae148 ## double 89.120000000000005
.quad 0x4056500000000000 ## double 89.25
.quad 0x4056300000000000 ## double 88.75
.quad 0x405647ae147ae148 ## double 89.120000000000005
.quad 0x4056d00000000000 ## double 91.25
.quad 0x405707ae147ae148 ## double 92.120000000000005
.quad 0x4056f00000000000 ## double 91.75
.quad 0x4056d7ae147ae148 ## double 91.370000000000005
.quad 0x4057000000000000 ## double 92
.quad 0x405707ae147ae148 ## double 92.120000000000005
.quad 0x4056f7ae147ae148 ## double 91.870000000000005
.quad 0x4056c7ae147ae148 ## double 91.120000000000005
.quad 0x4057000000000000 ## double 92
.quad 0x4056f7ae147ae148 ## double 91.870000000000005
.quad 0x405687ae147ae148 ## double 90.120000000000005
.quad 0x4057000000000000 ## double 92
.quad 0x4057c7ae147ae148 ## double 95.120000000000005
.quad 0x4057d7ae147ae148 ## double 95.370000000000005
.quad 0x4058300000000000 ## double 96.75
.quad 0x4058500000000000 ## double 97.25
.quad 0x405837ae147ae148 ## double 96.870000000000005
.quad 0x4057e00000000000 ## double 95.5
.quad 0x4058200000000000 ## double 96.5
.quad 0x405847ae147ae148 ## double 97.120000000000005
.quad 0x4058500000000000 ## double 97.25
.quad 0x4058400000000000 ## double 97
.quad 0x405887ae147ae148 ## double 98.120000000000005
.quad 0x4058600000000000 ## double 97.5
.quad 0x4058600000000000 ## double 97.5
.quad 0x405857ae147ae148 ## double 97.370000000000005
.quad 0x405827ae147ae148 ## double 96.620000000000005
.quad 0x405817ae147ae148 ## double 96.370000000000005
.quad 0x4057b7ae147ae148 ## double 94.870000000000005
.quad 0x4057d7ae147ae148 ## double 95.370000000000005
.quad 0x4057f7ae147ae148 ## double 95.870000000000005
.quad 0x4058300000000000 ## double 96.75
.quad 0x4058100000000000 ## double 96.25
.quad 0x405857ae147ae148 ## double 97.370000000000005
.quad 0x4058800000000000 ## double 98
.quad 0x4058900000000000 ## double 98.25
.quad 0x4058f00000000000 ## double 99.75
.quad 0x405907ae147ae148 ## double 100.12
.quad 0x405927ae147ae148 ## double 100.62
.quad 0x405997ae147ae148 ## double 102.37
.quad 0x4059600000000000 ## double 101.5
.quad 0x4058f7ae147ae148 ## double 99.870000000000005
.quad 0x405867ae147ae148 ## double 97.620000000000005
.quad 0x4058c7ae147ae148 ## double 99.120000000000005
.quad 0x4058b00000000000 ## double 98.75
.quad 0x405827ae147ae148 ## double 96.620000000000005
.quad 0x405827ae147ae148 ## double 96.620000000000005
.quad 0x4058a00000000000 ## double 98.5
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4058c00000000000 ## double 99
.quad 0x4058a00000000000 ## double 98.5
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x4058900000000000 ## double 98.25
.quad 0x4058400000000000 ## double 97
.quad 0x405787ae147ae148 ## double 94.120000000000005
.quad 0x4057900000000000 ## double 94.25
.quad 0x4057600000000000 ## double 93.5
.quad 0x405737ae147ae148 ## double 92.870000000000005
.quad 0x4056c7ae147ae148 ## double 91.120000000000005
.quad 0x405787ae147ae148 ## double 94.120000000000005
.quad 0x4057d7ae147ae148 ## double 95.370000000000005
.quad 0x4057c00000000000 ## double 95
.quad 0x4057a7ae147ae148 ## double 94.620000000000005
.quad 0x4057d00000000000 ## double 95.25
.quad 0x4057e7ae147ae148 ## double 95.620000000000005
.quad 0x4057d00000000000 ## double 95.25
.quad 0x4057d7ae147ae148 ## double 95.370000000000005
.quad 0x4057a00000000000 ## double 94.5
.quad 0x405777ae147ae148 ## double 93.870000000000005
.quad 0x4057b00000000000 ## double 94.75
.quad 0x405797ae147ae148 ## double 94.370000000000005
.quad 0x4058200000000000 ## double 96.5
.quad 0x405887ae147ae148 ## double 98.120000000000005
.quad 0x405837ae147ae148 ## double 96.870000000000005
.quad 0x4057900000000000 ## double 94.25
.quad 0x4057300000000000 ## double 92.75
.quad 0x4057e7ae147ae148 ## double 95.620000000000005
.quad 0x4058b00000000000 ## double 98.75
.quad 0x4058a00000000000 ## double 98.5
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x4059000000000000 ## double 100
.quad 0x4059500000000000 ## double 101.25
.quad 0x4059a7ae147ae148 ## double 102.62
.quad 0x4059c00000000000 ## double 103
.quad 0x405a200000000000 ## double 104.5
.quad 0x4059f00000000000 ## double 103.75
.quad 0x4059e00000000000 ## double 103.5
.quad 0x4059a7ae147ae148 ## double 102.62
.quad 0x4059f7ae147ae148 ## double 103.87
.quad 0x405a57ae147ae148 ## double 105.37
.quad 0x405aa7ae147ae148 ## double 106.62
.quad 0x405b200000000000 ## double 108.5
.quad 0x405b100000000000 ## double 108.25
.quad 0x405bd7ae147ae148 ## double 111.37
.quad 0x405c800000000000 ## double 114
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405c300000000000 ## double 112.75
.quad 0x405c07ae147ae148 ## double 112.12
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405b67ae147ae148 ## double 109.62
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405b800000000000 ## double 110
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405b700000000000 ## double 109.75
.quad 0x405b600000000000 ## double 109.5
.quad 0x405bf00000000000 ## double 111.75
.quad 0x405b87ae147ae148 ## double 110.12
.quad 0x405bd00000000000 ## double 111.25
.quad 0x405bd00000000000 ## double 111.25
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405b67ae147ae148 ## double 109.62
.quad 0x405b900000000000 ## double 110.25
.quad 0x405af7ae147ae148 ## double 107.87
.quad 0x405a37ae147ae148 ## double 104.87
.quad 0x405a17ae147ae148 ## double 104.37
.quad 0x405a600000000000 ## double 105.5
.quad 0x405b27ae147ae148 ## double 108.62
.quad 0x405ae00000000000 ## double 107.5
.quad 0x405a07ae147ae148 ## double 104.12
.quad 0x405a100000000000 ## double 104.25
.quad 0x4059800000000000 ## double 102
.quad 0x4059400000000000 ## double 101
.quad 0x4059500000000000 ## double 101.25
.quad 0x405927ae147ae148 ## double 100.62
.quad 0x4058b7ae147ae148 ## double 98.870000000000005
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x405877ae147ae148 ## double 97.870000000000005
.quad 0x405887ae147ae148 ## double 98.120000000000005
.quad 0x405887ae147ae148 ## double 98.120000000000005
.quad 0x405827ae147ae148 ## double 96.620000000000005
.quad 0x4058ac28f5c28f5c ## double 98.689999999999998
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4058c7ae147ae148 ## double 99.120000000000005
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x4058800000000000 ## double 98
.quad 0x4057a7ae147ae148 ## double 94.620000000000005
.quad 0x405767ae147ae148 ## double 93.620000000000005
.quad 0x4057800000000000 ## double 94
.quad 0x405757ae147ae148 ## double 93.370000000000005
.quad 0x4057100000000000 ## double 92.25
.quad 0x4056b00000000000 ## double 90.75
.quad 0x4056900000000000 ## double 90.25
.quad 0x4056b00000000000 ## double 90.75
.quad 0x4056d7ae147ae148 ## double 91.370000000000005
.quad 0x4057300000000000 ## double 92.75
.quad 0x4057600000000000 ## double 93.5
.quad 0x4057d7ae147ae148 ## double 95.370000000000005
.quad 0x4057c00000000000 ## double 95
.quad 0x405767ae147ae148 ## double 93.620000000000005
.quad 0x4057f00000000000 ## double 95.75
.quad 0x4058600000000000 ## double 97.5
.quad 0x405877ae147ae148 ## double 97.870000000000005
.quad 0x405877ae147ae148 ## double 97.870000000000005
.quad 0x405867ae147ae148 ## double 97.620000000000005
.quad 0x4057d7ae147ae148 ## double 95.370000000000005
.quad 0x4057700000000000 ## double 93.75
.quad 0x405807ae147ae148 ## double 96.120000000000005
.quad 0x4057e7ae147ae148 ## double 95.620000000000005
.quad 0x4057f7ae147ae148 ## double 95.870000000000005
.quad 0x4057d7ae147ae148 ## double 95.370000000000005
.quad 0x4057e7ae147ae148 ## double 95.620000000000005
.quad 0x4057c7ae147ae148 ## double 95.120000000000005
.quad 0x4057c00000000000 ## double 95
.quad 0x405797ae147ae148 ## double 94.370000000000005
.quad 0x4057d00000000000 ## double 95.25
.quad 0x4057a7ae147ae148 ## double 94.620000000000005
.quad 0x405807ae147ae148 ## double 96.120000000000005
.quad 0x4057a7ae147ae148 ## double 94.620000000000005
.quad 0x405757ae147ae148 ## double 93.370000000000005
.quad 0x4057900000000000 ## double 94.25
.quad 0x4058100000000000 ## double 96.25
.quad 0x405837ae147ae148 ## double 96.870000000000005
.quad 0x4057f00000000000 ## double 95.75
.quad 0x405777ae147ae148 ## double 93.870000000000005
.quad 0x405777ae147ae148 ## double 93.870000000000005
.quad 0x4057000000000000 ## double 92
.quad 0x405697ae147ae148 ## double 90.370000000000005
.quad 0x4056100000000000 ## double 88.25
.quad 0x4056200000000000 ## double 88.5
.quad 0x4056200000000000 ## double 88.5
.quad 0x4055d7ae147ae148 ## double 87.370000000000005
.quad 0x4055e7ae147ae148 ## double 87.620000000000005
.quad 0x4055b7ae147ae148 ## double 86.870000000000005
.quad 0x4055800000000000 ## double 86
.quad 0x405547ae147ae148 ## double 85.120000000000005
.quad 0x405527ae147ae148 ## double 84.620000000000005
.quad 0x405507ae147ae148 ## double 84.120000000000005
.quad 0x4054e7ae147ae148 ## double 83.620000000000005
.quad 0x4054e00000000000 ## double 83.5
.quad 0x4054e00000000000 ## double 83.5
.quad 0x4054d7ae147ae148 ## double 83.370000000000005
.quad 0x4055300000000000 ## double 84.75
.quad 0x405537ae147ae148 ## double 84.870000000000005
.quad 0x405547ae147ae148 ## double 85.120000000000005
.quad 0x405517ae147ae148 ## double 84.370000000000005
.quad 0x4054d7ae147ae148 ## double 83.370000000000005
.quad 0x4054a7ae147ae148 ## double 82.620000000000005
.quad 0x4055000000000000 ## double 84
.quad 0x4055000000000000 ## double 84
.quad 0x4054e7ae147ae148 ## double 83.620000000000005
.quad 0x4054e7ae147ae148 ## double 83.620000000000005
.quad 0x4054900000000000 ## double 82.25
.quad 0x4054b00000000000 ## double 82.75
.quad 0x4054c00000000000 ## double 83
.quad 0x405467ae147ae148 ## double 81.620000000000005
.quad 0x405457ae147ae148 ## double 81.370000000000005
.quad 0x4054000000000000 ## double 80
.quad 0x405437ae147ae148 ## double 80.870000000000005
.quad 0x4054500000000000 ## double 81.25
.quad 0x4054000000000000 ## double 80
.quad 0x405357ae147ae148 ## double 77.370000000000005
.quad 0x405317ae147ae148 ## double 76.370000000000005
.quad 0x4052d00000000000 ## double 75.25
.quad 0x4052d7ae147ae148 ## double 75.370000000000005
.quad 0x4052d7ae147ae148 ## double 75.370000000000005
.quad 0x4052f00000000000 ## double 75.75
.quad 0x4052a00000000000 ## double 74.5
.quad 0x4052c7ae147ae148 ## double 75.120000000000005
.quad 0x4052f00000000000 ## double 75.75
.quad 0x4053100000000000 ## double 76.25
.quad 0x4052f7ae147ae148 ## double 75.870000000000005
.quad 0x4052e7ae147ae148 ## double 75.620000000000005
.quad 0x4052d7ae147ae148 ## double 75.370000000000005
.quad 0x4052f00000000000 ## double 75.75
.quad 0x4052d00000000000 ## double 75.25
.quad 0x4052b7ae147ae148 ## double 74.870000000000005
.quad 0x405297ae147ae148 ## double 74.370000000000005
.quad 0x4052b7ae147ae148 ## double 74.870000000000005
.quad 0x4052d00000000000 ## double 75.25
.quad 0x405297ae147ae148 ## double 74.370000000000005
.quad 0x405277ae147ae148 ## double 73.870000000000005
.quad 0x405237ae147ae148 ## double 72.870000000000005
.quad 0x4052400000000000 ## double 73
.quad 0x405247ae147ae148 ## double 73.120000000000005
.quad 0x405257ae147ae148 ## double 73.370000000000005
.quad 0x405247ae147ae148 ## double 73.120000000000005
.quad 0x4052b7ae147ae148 ## double 74.870000000000005
.quad 0x4052a00000000000 ## double 74.5
.quad 0x4053200000000000 ## double 76.5
.quad 0x405367ae147ae148 ## double 77.620000000000005
.quad 0x405377ae147ae148 ## double 77.870000000000005
.quad 0x4053800000000000 ## double 78
.quad 0x4053800000000000 ## double 78
.quad 0x405337ae147ae148 ## double 76.870000000000005
.quad 0x405317ae147ae148 ## double 76.370000000000005
.quad 0x405347ae147ae148 ## double 77.120000000000005
.quad 0x4053500000000000 ## double 77.25
.quad 0x4052f7ae147ae148 ## double 75.870000000000005
.quad 0x4052f00000000000 ## double 75.75
.quad 0x4052d00000000000 ## double 75.25
.quad 0x4052a7ae147ae148 ## double 74.620000000000005
.quad 0x405277ae147ae148 ## double 73.870000000000005
.quad 0x4052a00000000000 ## double 74.5
.quad 0x405297ae147ae148 ## double 74.370000000000005
.quad 0x405297ae147ae148 ## double 74.370000000000005
.quad 0x4052a00000000000 ## double 74.5
.quad 0x4052900000000000 ## double 74.25
.quad 0x4052a7ae147ae148 ## double 74.620000000000005
.quad 0x405267ae147ae148 ## double 73.620000000000005
.quad 0x4051e7ae147ae148 ## double 71.620000000000005
.quad 0x4051e7ae147ae148 ## double 71.620000000000005
.quad 0x4051b7ae147ae148 ## double 70.870000000000005
.quad 0x4051c00000000000 ## double 71
.quad 0x4051a7ae147ae148 ## double 70.620000000000005
.quad 0x4051c00000000000 ## double 71
.quad 0x4051d7ae147ae148 ## double 71.370000000000005
.quad 0x4051e00000000000 ## double 71.5
.quad 0x4051e00000000000 ## double 71.5
.quad 0x4051d7ae147ae148 ## double 71.370000000000005
.quad 0x4052000000000000 ## double 72
.quad 0x405217ae147ae148 ## double 72.370000000000005
.quad 0x4051d7ae147ae148 ## double 71.370000000000005
.quad 0x4051b00000000000 ## double 70.75
.quad 0x4051d00000000000 ## double 71.25
.quad 0x4051c00000000000 ## double 71
.quad 0x4051e7ae147ae148 ## double 71.620000000000005
.quad 0x4051b7ae147ae148 ## double 70.870000000000005
.quad 0x4051800000000000 ## double 70
.quad 0x405237ae147ae148 ## double 72.870000000000005
.quad 0x405287ae147ae148 ## double 74.120000000000005
.quad 0x405257ae147ae148 ## double 73.370000000000005
.quad 0x4052700000000000 ## double 73.75
.quad 0x405247ae147ae148 ## double 73.120000000000005
.quad 0x405277ae147ae148 ## double 73.870000000000005
.quad 0x4052400000000000 ## double 73
.quad 0x4052400000000000 ## double 73
.quad 0x405267ae147ae148 ## double 73.620000000000005
.quad 0x405297ae147ae148 ## double 74.370000000000005
.quad 0x4052600000000000 ## double 73.5
.quad 0x4052100000000000 ## double 72.25
.quad 0x4052600000000000 ## double 73.5
.quad 0x4052a7ae147ae148 ## double 74.620000000000005
.quad 0x4052d00000000000 ## double 75.25
.quad 0x4052d00000000000 ## double 75.25
.quad 0x4052e7ae147ae148 ## double 75.620000000000005
.quad 0x405317ae147ae148 ## double 76.370000000000005
.quad 0x4052a7ae147ae148 ## double 74.620000000000005
.quad 0x4052a00000000000 ## double 74.5
.quad 0x405297ae147ae148 ## double 74.370000000000005
.quad 0x4052a7ae147ae148 ## double 74.620000000000005
.quad 0x4052d7ae147ae148 ## double 75.370000000000005
.quad 0x405307ae147ae148 ## double 76.120000000000005
.quad 0x4052f00000000000 ## double 75.75
.quad 0x4052a00000000000 ## double 74.5
.quad 0x4052700000000000 ## double 73.75
.quad 0x405247ae147ae148 ## double 73.120000000000005
.quad 0x405287ae147ae148 ## double 74.120000000000005
.quad 0x4052600000000000 ## double 73.5
.quad 0x405217ae147ae148 ## double 72.370000000000005
.quad 0x4051f7ae147ae148 ## double 71.870000000000005
.quad 0x4051f00000000000 ## double 71.75
.quad 0x4051600000000000 ## double 69.5
.quad 0x405147ae147ae148 ## double 69.120000000000005
.quad 0x4051600000000000 ## double 69.5
.quad 0x4051600000000000 ## double 69.5
.quad 0x405187ae147ae148 ## double 70.120000000000005
.quad 0x405177ae147ae148 ## double 69.870000000000005
.quad 0x405177ae147ae148 ## double 69.870000000000005
.quad 0x405157ae147ae148 ## double 69.370000000000005
.quad 0x405147ae147ae148 ## double 69.120000000000005
.quad 0x4051600000000000 ## double 69.5
.quad 0x4051900000000000 ## double 70.25
.quad 0x4051d7ae147ae148 ## double 71.370000000000005
.quad 0x4051a00000000000 ## double 70.5
.quad 0x4051d00000000000 ## double 71.25
.quad 0x4051d7ae147ae148 ## double 71.370000000000005
.quad 0x4051900000000000 ## double 70.25
.quad 0x405187ae147ae148 ## double 70.120000000000005
.quad 0x405167ae147ae148 ## double 69.620000000000005
.quad 0x405107ae147ae148 ## double 68.120000000000005
.quad 0x4051000000000000 ## double 68
.quad 0x405107ae147ae148 ## double 68.120000000000005
.quad 0x4051100000000000 ## double 68.25
.quad 0x4050e7ae147ae148 ## double 67.620000000000005
.quad 0x4051000000000000 ## double 68
.quad 0x4051100000000000 ## double 68.25
.quad 0x4051600000000000 ## double 69.5
.quad 0x4051700000000000 ## double 69.75
.quad 0x4051800000000000 ## double 70
.quad 0x405187ae147ae148 ## double 70.120000000000005
.quad 0x4051700000000000 ## double 69.75
.quad 0x4050d7ae147ae148 ## double 67.370000000000005
.quad 0x4051100000000000 ## double 68.25
.quad 0x405117ae147ae148 ## double 68.370000000000005
.quad 0x405127ae147ae148 ## double 68.620000000000005
.quad 0x4050b7ae147ae148 ## double 66.870000000000005
.quad 0x405067ae147ae148 ## double 65.620000000000005
.quad 0x4050300000000000 ## double 64.75
.quad 0x4050200000000000 ## double 64.5
.quad 0x405047ae147ae148 ## double 65.120000000000005
.quad 0x405027ae147ae148 ## double 64.620000000000005
.quad 0x4050400000000000 ## double 65
.quad 0x4050200000000000 ## double 64.5
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x404f90a3d70a3d71 ## double 63.130000000000003
.quad 0x404fc00000000000 ## double 63.5
.quad 0x404f90a3d70a3d71 ## double 63.130000000000003
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x404f90a3d70a3d71 ## double 63.130000000000003
.quad 0x404f600000000000 ## double 62.75
.quad 0x404f800000000000 ## double 63
.quad 0x404f400000000000 ## double 62.5
.quad 0x404f90a3d70a3d71 ## double 63.130000000000003
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f400000000000 ## double 62.5
.quad 0x404f50a3d70a3d71 ## double 62.630000000000003
.quad 0x404c200000000000 ## double 56.25
.quad 0x404c70a3d70a3d71 ## double 56.880000000000003
.quad 0x404cd0a3d70a3d71 ## double 57.630000000000003
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404d400000000000 ## double 58.5
.quad 0x404c600000000000 ## double 56.75
.quad 0x404c10a3d70a3d71 ## double 56.130000000000003
.quad 0x404c90a3d70a3d71 ## double 57.130000000000003
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404cb0a3d70a3d71 ## double 57.380000000000003
.quad 0x404c800000000000 ## double 57
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404d90a3d70a3d71 ## double 59.130000000000003
.quad 0x404e400000000000 ## double 60.5
.quad 0x404ec00000000000 ## double 61.5
.quad 0x404ef0a3d70a3d71 ## double 61.880000000000003
.quad 0x404ed0a3d70a3d71 ## double 61.630000000000003
.quad 0x404e800000000000 ## double 61
.quad 0x404f200000000000 ## double 62.25
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404f200000000000 ## double 62.25
.quad 0x404f90a3d70a3d71 ## double 63.130000000000003
.quad 0x404fc00000000000 ## double 63.5
.quad 0x4050300000000000 ## double 64.75
.quad 0x4050400000000000 ## double 65
.quad 0x405007ae147ae148 ## double 64.120000000000005
.quad 0x404f70a3d70a3d71 ## double 62.880000000000003
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404f800000000000 ## double 63
.quad 0x404fa00000000000 ## double 63.25
.quad 0x404fc00000000000 ## double 63.5
.quad 0x404ef0a3d70a3d71 ## double 61.880000000000003
.quad 0x404fc00000000000 ## double 63.5
.quad 0x404fd0a3d70a3d71 ## double 63.630000000000003
.quad 0x404fe00000000000 ## double 63.75
.quad 0x405007ae147ae148 ## double 64.120000000000005
.quad 0x405007ae147ae148 ## double 64.120000000000005
.quad 0x405007ae147ae148 ## double 64.120000000000005
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x404f400000000000 ## double 62.5
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404ef0a3d70a3d71 ## double 61.880000000000003
.quad 0x404ec00000000000 ## double 61.5
.quad 0x404d50a3d70a3d71 ## double 58.630000000000003
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404ce00000000000 ## double 57.75
.quad 0x404d200000000000 ## double 58.25
.quad 0x404d200000000000 ## double 58.25
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404c90a3d70a3d71 ## double 57.130000000000003
.quad 0x404cc00000000000 ## double 57.5
.quad 0x404d200000000000 ## double 58.25
.quad 0x404d50a3d70a3d71 ## double 58.630000000000003
.quad 0x404d30a3d70a3d71 ## double 58.380000000000003
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404d200000000000 ## double 58.25
.quad 0x404da00000000000 ## double 59.25
.quad 0x404da00000000000 ## double 59.25
.quad 0x404dc00000000000 ## double 59.5
.quad 0x404d50a3d70a3d71 ## double 58.630000000000003
.quad 0x404ac00000000000 ## double 53.5
.quad 0x404ae00000000000 ## double 53.75
.quad 0x404b10a3d70a3d71 ## double 54.130000000000003
.quad 0x404b400000000000 ## double 54.5
.quad 0x404af0a3d70a3d71 ## double 53.880000000000003
.quad 0x404ab0a3d70a3d71 ## double 53.380000000000003
.quad 0x404af0a3d70a3d71 ## double 53.880000000000003
.quad 0x404ab0a3d70a3d71 ## double 53.380000000000003
.quad 0x404a90a3d70a3d71 ## double 53.130000000000003
.quad 0x404aa00000000000 ## double 53.25
.quad 0x404ac66666666666 ## double 53.549999999999997
.quad 0x404b10a3d70a3d71 ## double 54.130000000000003
.quad 0x404ac00000000000 ## double 53.5
.quad 0x404b50a3d70a3d71 ## double 54.630000000000003
.quad 0x404b800000000000 ## double 55
.quad 0x404b30a3d70a3d71 ## double 54.380000000000003
.quad 0x404b50a3d70a3d71 ## double 54.630000000000003
.quad 0x404c400000000000 ## double 56.5
.quad 0x404c90a3d70a3d71 ## double 57.130000000000003
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404d50a3d70a3d71 ## double 58.630000000000003
.quad 0x404d70a3d70a3d71 ## double 58.880000000000003
.quad 0x404d200000000000 ## double 58.25
.quad 0x404d200000000000 ## double 58.25
.quad 0x404d200000000000 ## double 58.25
.quad 0x404d30a3d70a3d71 ## double 58.380000000000003
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404c200000000000 ## double 56.25
.quad 0x404c200000000000 ## double 56.25
.quad 0x404be00000000000 ## double 55.75
.quad 0x404b70a3d70a3d71 ## double 54.880000000000003
.quad 0x404a800000000000 ## double 53
.quad 0x404aa00000000000 ## double 53.25
.quad 0x404ab0a3d70a3d71 ## double 53.380000000000003
.quad 0x404ac00000000000 ## double 53.5
.quad 0x404af0a3d70a3d71 ## double 53.880000000000003
.quad 0x404aa00000000000 ## double 53.25
.quad 0x404ab0a3d70a3d71 ## double 53.380000000000003
.quad 0x404ab0a3d70a3d71 ## double 53.380000000000003
.quad 0x404b200000000000 ## double 54.25
.quad 0x404af0a3d70a3d71 ## double 53.880000000000003
.quad 0x404a70a3d70a3d71 ## double 52.880000000000003
.quad 0x404b800000000000 ## double 55
.quad 0x404ba00000000000 ## double 55.25
.quad 0x404b50a3d70a3d71 ## double 54.630000000000003
.quad 0x404b50a3d70a3d71 ## double 54.630000000000003
.quad 0x404ab0a3d70a3d71 ## double 53.380000000000003
.quad 0x404ad0a3d70a3d71 ## double 53.630000000000003
.quad 0x404b200000000000 ## double 54.25
.quad 0x404b600000000000 ## double 54.75
.quad 0x404b50a3d70a3d71 ## double 54.630000000000003
.quad 0x404bd0a3d70a3d71 ## double 55.630000000000003
.quad 0x404c70a3d70a3d71 ## double 56.880000000000003
.quad 0x404c400000000000 ## double 56.5
.quad 0x404cb0a3d70a3d71 ## double 57.380000000000003
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404d10a3d70a3d71 ## double 58.130000000000003
.quad 0x404cc00000000000 ## double 57.5
.quad 0x404d600000000000 ## double 58.75
.quad 0x404db0a3d70a3d71 ## double 59.380000000000003
.quad 0x404d600000000000 ## double 58.75
.quad 0x404bd0a3d70a3d71 ## double 55.630000000000003
.quad 0x404bf0a3d70a3d71 ## double 55.880000000000003
.quad 0x404c800000000000 ## double 57
.quad 0x404cb0a3d70a3d71 ## double 57.380000000000003
.quad 0x404d400000000000 ## double 58.5
.quad 0x404dc00000000000 ## double 59.5
.quad 0x404d800000000000 ## double 59
.quad 0x404d70a3d70a3d71 ## double 58.880000000000003
.quad 0x404db0a3d70a3d71 ## double 59.380000000000003
.quad 0x404dc00000000000 ## double 59.5
.quad 0x404db0a3d70a3d71 ## double 59.380000000000003
.quad 0x404e000000000000 ## double 60
.quad 0x404df0a3d70a3d71 ## double 59.880000000000003
.quad 0x404d90a3d70a3d71 ## double 59.130000000000003
.quad 0x404ce00000000000 ## double 57.75
.quad 0x404cb0a3d70a3d71 ## double 57.380000000000003
.quad 0x404d30a3d70a3d71 ## double 58.380000000000003
.quad 0x404d600000000000 ## double 58.75
.quad 0x404db0a3d70a3d71 ## double 59.380000000000003
.quad 0x404da00000000000 ## double 59.25
.quad 0x404db0a3d70a3d71 ## double 59.380000000000003
.quad 0x404da00000000000 ## double 59.25
.quad 0x404d90a3d70a3d71 ## double 59.130000000000003
.quad 0x404db0a3d70a3d71 ## double 59.380000000000003
.quad 0x404df0a3d70a3d71 ## double 59.880000000000003
.quad 0x404cd0a3d70a3d71 ## double 57.630000000000003
.quad 0x404cd0a3d70a3d71 ## double 57.630000000000003
.quad 0x404cb0a3d70a3d71 ## double 57.380000000000003
.quad 0x404ce00000000000 ## double 57.75
.quad 0x404bc00000000000 ## double 55.5
.quad 0x404b10a3d70a3d71 ## double 54.130000000000003
.quad 0x404b10a3d70a3d71 ## double 54.130000000000003
.quad 0x404b200000000000 ## double 54.25
.quad 0x404b30a3d70a3d71 ## double 54.380000000000003
.quad 0x404b000000000000 ## double 54
.quad 0x404b10a3d70a3d71 ## double 54.130000000000003
.quad 0x404b600000000000 ## double 54.75
.quad 0x404b90a3d70a3d71 ## double 55.130000000000003
.quad 0x404c10a3d70a3d71 ## double 56.130000000000003
.quad 0x404c000000000000 ## double 56
.quad 0x404ba00000000000 ## double 55.25
.quad 0x404aa00000000000 ## double 53.25
.quad 0x404a400000000000 ## double 52.5
.quad 0x404a50a3d70a3d71 ## double 52.630000000000003
.quad 0x404a90a3d70a3d71 ## double 53.130000000000003
.quad 0x404b200000000000 ## double 54.25
.quad 0x404a800000000000 ## double 53
.quad 0x4049d0a3d70a3d71 ## double 51.630000000000003
.quad 0x404a400000000000 ## double 52.5
.quad 0x404a30a3d70a3d71 ## double 52.380000000000003
.quad 0x404910a3d70a3d71 ## double 50.130000000000003
.quad 0x404950a3d70a3d71 ## double 50.630000000000003
.quad 0x4049600000000000 ## double 50.75
.quad 0x404910a3d70a3d71 ## double 50.130000000000003
.quad 0x4049400000000000 ## double 50.5
.quad 0x404990a3d70a3d71 ## double 51.130000000000003
.quad 0x404990a3d70a3d71 ## double 51.130000000000003
.quad 0x4048000000000000 ## double 48
.quad 0x404750a3d70a3d71 ## double 46.630000000000003
.quad 0x404730a3d70a3d71 ## double 46.380000000000003
.quad 0x404750a3d70a3d71 ## double 46.630000000000003
.quad 0x4047200000000000 ## double 46.25
.quad 0x4046800000000000 ## double 45
.quad 0x4046e00000000000 ## double 45.75
.quad 0x4046a00000000000 ## double 45.25
.quad 0x4045d0a3d70a3d71 ## double 43.630000000000003
.quad 0x404650a3d70a3d71 ## double 44.630000000000003
.quad 0x404690a3d70a3d71 ## double 45.130000000000003
.quad 0x404670a3d70a3d71 ## double 44.880000000000003
.quad 0x4045a00000000000 ## double 43.25
.quad 0x4045f0a3d70a3d71 ## double 43.880000000000003
.quad 0x404630a3d70a3d71 ## double 44.380000000000003
.quad 0x404630a3d70a3d71 ## double 44.380000000000003
.quad 0x404630a3d70a3d71 ## double 44.380000000000003
.quad 0x404610a3d70a3d71 ## double 44.130000000000003
.quad 0x404630a3d70a3d71 ## double 44.380000000000003
.quad 0x4046400000000000 ## double 44.5
.quad 0x4046600000000000 ## double 44.75
.quad 0x4045f0a3d70a3d71 ## double 43.880000000000003
.quad 0x404510a3d70a3d71 ## double 42.130000000000003
.quad 0x404510a3d70a3d71 ## double 42.130000000000003
.quad 0x404550a3d70a3d71 ## double 42.630000000000003
.quad 0x4044f0a3d70a3d71 ## double 41.880000000000003
.quad 0x4045000000000000 ## double 42
.quad 0x404530a3d70a3d71 ## double 42.380000000000003
.quad 0x4045400000000000 ## double 42.5
.quad 0x404550a3d70a3d71 ## double 42.630000000000003
.quad 0x4045b0a3d70a3d71 ## double 43.380000000000003
.quad 0x4045b0a3d70a3d71 ## double 43.380000000000003
.quad 0x4046000000000000 ## double 44
.quad 0x4045f0a3d70a3d71 ## double 43.880000000000003
.quad 0x4045f0a3d70a3d71 ## double 43.880000000000003
.quad 0x404630a3d70a3d71 ## double 44.380000000000003
.quad 0x404650a3d70a3d71 ## double 44.630000000000003
.quad 0x404650a3d70a3d71 ## double 44.630000000000003
.quad 0x4046b0a3d70a3d71 ## double 45.380000000000003
.quad 0x4047200000000000 ## double 46.25
.quad 0x404730a3d70a3d71 ## double 46.380000000000003
.quad 0x404730a3d70a3d71 ## double 46.380000000000003
.quad 0x404730a3d70a3d71 ## double 46.380000000000003
.quad 0x4046f0a3d70a3d71 ## double 45.880000000000003
.quad 0x4046600000000000 ## double 44.75
.quad 0x404610a3d70a3d71 ## double 44.130000000000003
.quad 0x4046600000000000 ## double 44.75
.quad 0x404610a3d70a3d71 ## double 44.130000000000003
.quad 0x404650a3d70a3d71 ## double 44.630000000000003
.quad 0x4046600000000000 ## double 44.75
.quad 0x4045a00000000000 ## double 43.25
.quad 0x4045a00000000000 ## double 43.25
.quad 0x4045c00000000000 ## double 43.5
.quad 0x4045800000000000 ## double 43
.quad 0x4044e00000000000 ## double 41.75
.quad 0x4045000000000000 ## double 42
.quad 0x4045400000000000 ## double 42.5
.quad 0x4045c00000000000 ## double 43.5
.quad 0x4045b0a3d70a3d71 ## double 43.380000000000003
.quad 0x4045e00000000000 ## double 43.75
.quad 0x4045e00000000000 ## double 43.75
.quad 0x4046600000000000 ## double 44.75
.quad 0x404630a3d70a3d71 ## double 44.380000000000003
.quad 0x404610a3d70a3d71 ## double 44.130000000000003
.quad 0x4046600000000000 ## double 44.75
.quad 0x4046400000000000 ## double 44.5
.quad 0x4046400000000000 ## double 44.5
.quad 0x404730a3d70a3d71 ## double 46.380000000000003
.quad 0x4046f0a3d70a3d71 ## double 45.880000000000003
.quad 0x4045800000000000 ## double 43
.quad 0x4045d0a3d70a3d71 ## double 43.630000000000003
.quad 0x4046600000000000 ## double 44.75
.quad 0x4045f0a3d70a3d71 ## double 43.880000000000003
.quad 0x4045e00000000000 ## double 43.75
.quad 0x4047000000000000 ## double 46
.quad 0x4047c00000000000 ## double 47.5
.quad 0x4047f0a3d70a3d71 ## double 47.880000000000003
.quad 0x4047f0a3d70a3d71 ## double 47.880000000000003
.quad 0x4048400000000000 ## double 48.5
.quad 0x4048200000000000 ## double 48.25
.quad 0x4047800000000000 ## double 47
.quad 0x4047d0a3d70a3d71 ## double 47.630000000000003
.quad 0x4047800000000000 ## double 47
.quad 0x404810a3d70a3d71 ## double 48.130000000000003
.quad 0x404850a3d70a3d71 ## double 48.630000000000003
.quad 0x4048e00000000000 ## double 49.75
.quad 0x4048f0a3d70a3d71 ## double 49.880000000000003
.quad 0x4049400000000000 ## double 50.5
.quad 0x4049600000000000 ## double 50.75
.quad 0x4048e00000000000 ## double 49.75
.quad 0x404890a3d70a3d71 ## double 49.130000000000003
.quad 0x4048b0a3d70a3d71 ## double 49.380000000000003
.quad 0x4048d0a3d70a3d71 ## double 49.630000000000003
.quad 0x404910a3d70a3d71 ## double 50.130000000000003
.quad 0x4048d0a3d70a3d71 ## double 49.630000000000003
.quad 0x4049200000000000 ## double 50.25
.quad 0x404930a3d70a3d71 ## double 50.380000000000003
.quad 0x4049f0a3d70a3d71 ## double 51.880000000000003
.quad 0x404a400000000000 ## double 52.5
.quad 0x404a800000000000 ## double 53
.quad 0x404a600000000000 ## double 52.75
.quad 0x404ab0a3d70a3d71 ## double 53.380000000000003
.quad 0x404a400000000000 ## double 52.5
.quad 0x404b000000000000 ## double 54
.quad 0x404b30a3d70a3d71 ## double 54.380000000000003
.quad 0x404b30a3d70a3d71 ## double 54.380000000000003
.quad 0x404b000000000000 ## double 54
.quad 0x404ab0a3d70a3d71 ## double 53.380000000000003
.quad 0x404a90a3d70a3d71 ## double 53.130000000000003
.quad 0x404b000000000000 ## double 54
.quad 0x404a600000000000 ## double 52.75
.quad 0x404970a3d70a3d71 ## double 50.880000000000003
.quad 0x4048f0a3d70a3d71 ## double 49.880000000000003
.quad 0x4048a00000000000 ## double 49.25
.quad 0x4048c00000000000 ## double 49.5
.quad 0x404910a3d70a3d71 ## double 50.130000000000003
.quad 0x4048800000000000 ## double 49
.quad 0x4048200000000000 ## double 48.25
.quad 0x404810a3d70a3d71 ## double 48.130000000000003
.quad 0x4047e00000000000 ## double 47.75
.quad 0x404850a3d70a3d71 ## double 48.630000000000003
.quad 0x404870a3d70a3d71 ## double 48.880000000000003
.quad 0x404890a3d70a3d71 ## double 49.130000000000003
.quad 0x404850a3d70a3d71 ## double 48.630000000000003
.quad 0x404850a3d70a3d71 ## double 48.630000000000003
.quad 0x4048c00000000000 ## double 49.5
.quad 0x4048f0a3d70a3d71 ## double 49.880000000000003
.quad 0x4048b0a3d70a3d71 ## double 49.380000000000003
.quad 0x4048b0a3d70a3d71 ## double 49.380000000000003
.quad 0x404910a3d70a3d71 ## double 50.130000000000003
.quad 0x4049600000000000 ## double 50.75
.quad 0x4048600000000000 ## double 48.75
.quad 0x4048a00000000000 ## double 49.25
.quad 0x404830a3d70a3d71 ## double 48.380000000000003
.quad 0x4048800000000000 ## double 49
.quad 0x4049200000000000 ## double 50.25
.quad 0x404950a3d70a3d71 ## double 50.630000000000003
.quad 0x404950a3d70a3d71 ## double 50.630000000000003
.quad 0x404890a3d70a3d71 ## double 49.130000000000003
.quad 0x4048a00000000000 ## double 49.25
.quad 0x4048d0a3d70a3d71 ## double 49.630000000000003
.quad 0x404990a3d70a3d71 ## double 51.130000000000003
.quad 0x4049f0a3d70a3d71 ## double 51.880000000000003
.quad 0x404a30a3d70a3d71 ## double 52.380000000000003
.quad 0x404a200000000000 ## double 52.25
.quad 0x404a50a3d70a3d71 ## double 52.630000000000003
.quad 0x404a800000000000 ## double 53
.quad 0x404a90a3d70a3d71 ## double 53.130000000000003
.quad 0x404a10a3d70a3d71 ## double 52.130000000000003
.quad 0x4049c00000000000 ## double 51.5
.quad 0x404a000000000000 ## double 52
.quad 0x404a600000000000 ## double 52.75
.quad 0x4049d0a3d70a3d71 ## double 51.630000000000003
.quad 0x404970a3d70a3d71 ## double 50.880000000000003
.quad 0x404ae00000000000 ## double 53.75
.quad 0x404b200000000000 ## double 54.25
.quad 0x404ae00000000000 ## double 53.75
.quad 0x404b400000000000 ## double 54.5
.quad 0x404b600000000000 ## double 54.75
.quad 0x404bd0a3d70a3d71 ## double 55.630000000000003
.quad 0x404bb0a3d70a3d71 ## double 55.380000000000003
.quad 0x404c200000000000 ## double 56.25
.quad 0x404be00000000000 ## double 55.75
.quad 0x404bf0a3d70a3d71 ## double 55.880000000000003
.quad 0x404c90a3d70a3d71 ## double 57.130000000000003
.quad 0x404c600000000000 ## double 56.75
.quad 0x404c200000000000 ## double 56.25
.quad 0x404bf0a3d70a3d71 ## double 55.880000000000003
.quad 0x404ba00000000000 ## double 55.25
.quad 0x404bc00000000000 ## double 55.5
.quad 0x404b50a3d70a3d71 ## double 54.630000000000003
.quad 0x404bb0a3d70a3d71 ## double 55.380000000000003
.quad 0x404b70a3d70a3d71 ## double 54.880000000000003
.quad 0x404af0a3d70a3d71 ## double 53.880000000000003
.quad 0x404a000000000000 ## double 52
.quad 0x4049e00000000000 ## double 51.75
.quad 0x4049b0a3d70a3d71 ## double 51.380000000000003
.quad 0x4049600000000000 ## double 50.75
.quad 0x4049a00000000000 ## double 51.25
.quad 0x4049600000000000 ## double 50.75
.quad 0x404970a3d70a3d71 ## double 50.880000000000003
.quad 0x4049d0a3d70a3d71 ## double 51.630000000000003
.quad 0x4049f0a3d70a3d71 ## double 51.880000000000003
.quad 0x404af0a3d70a3d71 ## double 53.880000000000003
.quad 0x404b400000000000 ## double 54.5
.quad 0x404a90a3d70a3d71 ## double 53.130000000000003
.quad 0x404a50a3d70a3d71 ## double 52.630000000000003
.quad 0x4049f0a3d70a3d71 ## double 51.880000000000003
.quad 0x404a600000000000 ## double 52.75
.quad 0x404a800000000000 ## double 53
.quad 0x404a50a3d70a3d71 ## double 52.630000000000003
.quad 0x4049d0a3d70a3d71 ## double 51.630000000000003
.quad 0x404970a3d70a3d71 ## double 50.880000000000003
.quad 0x4049000000000000 ## double 50
.quad 0x404aa00000000000 ## double 53.25
.quad 0x4048a00000000000 ## double 49.25
.quad 0x4048600000000000 ## double 48.75
.quad 0x4047800000000000 ## double 47
.quad 0x4048200000000000 ## double 48.25
.quad 0x404970a3d70a3d71 ## double 50.880000000000003
.quad 0x404910a3d70a3d71 ## double 50.130000000000003
.quad 0x4048800000000000 ## double 49
.quad 0x4048c00000000000 ## double 49.5
.quad 0x4048800000000000 ## double 49
.quad 0x4048f0a3d70a3d71 ## double 49.880000000000003
.quad 0x404830a3d70a3d71 ## double 48.380000000000003
.quad 0x404770a3d70a3d71 ## double 46.880000000000003
.quad 0x404850a3d70a3d71 ## double 48.630000000000003
.quad 0x404890a3d70a3d71 ## double 49.130000000000003
.quad 0x4049000000000000 ## double 50
.quad 0x4049f0a3d70a3d71 ## double 51.880000000000003
.quad 0x4049a00000000000 ## double 51.25
.quad 0x404970a3d70a3d71 ## double 50.880000000000003
.quad 0x404990a3d70a3d71 ## double 51.130000000000003
.quad 0x404a800000000000 ## double 53
.quad 0x404a800000000000 ## double 53
.quad 0x404a50a3d70a3d71 ## double 52.630000000000003
.quad 0x404a000000000000 ## double 52
.quad 0x4049d0a3d70a3d71 ## double 51.630000000000003
.quad 0x404aa00000000000 ## double 53.25
.quad 0x404ab0a3d70a3d71 ## double 53.380000000000003
.quad 0x404b800000000000 ## double 55
.quad 0x404df0a3d70a3d71 ## double 59.880000000000003
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x404f70a3d70a3d71 ## double 62.880000000000003
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x4050100000000000 ## double 64.25
.quad 0x4050700000000000 ## double 65.75
.quad 0x4050d7ae147ae148 ## double 67.370000000000005
.quad 0x4050e7ae147ae148 ## double 67.620000000000005
.quad 0x4051000000000000 ## double 68
.quad 0x4051100000000000 ## double 68.25
.quad 0x405137ae147ae148 ## double 68.870000000000005
.quad 0x405127ae147ae148 ## double 68.620000000000005
.quad 0x4050900000000000 ## double 66.25
.quad 0x4050800000000000 ## double 66
.quad 0x4050600000000000 ## double 65.5
.quad 0x404ff0a3d70a3d71 ## double 63.880000000000003
.quad 0x404f50a3d70a3d71 ## double 62.630000000000003
.quad 0x404ff0a3d70a3d71 ## double 63.880000000000003
.quad 0x405017ae147ae148 ## double 64.370000000000005
.quad 0x4050400000000000 ## double 65
.quad 0x4050600000000000 ## double 65.5
.quad 0x405047ae147ae148 ## double 65.120000000000005
.quad 0x405057ae147ae148 ## double 65.370000000000005
.quad 0x4050700000000000 ## double 65.75
.quad 0x4051000000000000 ## double 68
.quad 0x4050e7ae147ae148 ## double 67.620000000000005
.quad 0x4050a00000000000 ## double 66.5
.quad 0x4050d00000000000 ## double 67.25
.quad 0x405167ae147ae148 ## double 69.620000000000005
.quad 0x405177ae147ae148 ## double 69.870000000000005
.quad 0x4051400000000000 ## double 69
.quad 0x4050d7ae147ae148 ## double 67.370000000000005
.quad 0x4050e7ae147ae148 ## double 67.620000000000005
.quad 0x4050c7ae147ae148 ## double 67.120000000000005
.quad 0x4050d00000000000 ## double 67.25
.quad 0x4051300000000000 ## double 68.75
.quad 0x405157ae147ae148 ## double 69.370000000000005
.quad 0x4051400000000000 ## double 69
.quad 0x405147ae147ae148 ## double 69.120000000000005
.quad 0x405167ae147ae148 ## double 69.620000000000005
.quad 0x4051900000000000 ## double 70.25
.quad 0x405237ae147ae148 ## double 72.870000000000005
.quad 0x405287ae147ae148 ## double 74.120000000000005
.quad 0x4053b7ae147ae148 ## double 78.870000000000005
.quad 0x4054000000000000 ## double 80
.quad 0x4053c00000000000 ## double 79
.quad 0x4053f7ae147ae148 ## double 79.870000000000005
.quad 0x4053e00000000000 ## double 79.5
.quad 0x4054000000000000 ## double 80
.quad 0x405417ae147ae148 ## double 80.370000000000005
.quad 0x4053b7ae147ae148 ## double 78.870000000000005
.quad 0x4054200000000000 ## double 80.5
.quad 0x405447ae147ae148 ## double 81.120000000000005
.quad 0x405457ae147ae148 ## double 81.370000000000005
.quad 0x4054b00000000000 ## double 82.75
.quad 0x4054b7ae147ae148 ## double 82.870000000000005
.quad 0x4054c7ae147ae148 ## double 83.120000000000005
.quad 0x4054f00000000000 ## double 83.75
.quad 0x4054f00000000000 ## double 83.75
.quad 0x405527ae147ae148 ## double 84.620000000000005
.quad 0x4055200000000000 ## double 84.5
.quad 0x4055000000000000 ## double 84
.quad 0x4054f00000000000 ## double 83.75
.quad 0x4055200000000000 ## double 84.5
.quad 0x4055c00000000000 ## double 87
.quad 0x4056300000000000 ## double 88.75
.quad 0x4056100000000000 ## double 88.25
.quad 0x4055e00000000000 ## double 87.5
.quad 0x4055b7ae147ae148 ## double 86.870000000000005
.quad 0x405597ae147ae148 ## double 86.370000000000005
.quad 0x4055a7ae147ae148 ## double 86.620000000000005
.quad 0x4055f00000000000 ## double 87.75
.quad 0x4055e7ae147ae148 ## double 87.620000000000005
.quad 0x4055e7ae147ae148 ## double 87.620000000000005
.quad 0x4055e00000000000 ## double 87.5
.quad 0x4055d7ae147ae148 ## double 87.370000000000005
.quad 0x4055d7ae147ae148 ## double 87.370000000000005
.quad 0x4055c7ae147ae148 ## double 87.120000000000005
.quad 0x4055c7ae147ae148 ## double 87.120000000000005
.quad 0x4055900000000000 ## double 86.25
.quad 0x4055a00000000000 ## double 86.5
.quad 0x4055b7ae147ae148 ## double 86.870000000000005
.quad 0x4056200000000000 ## double 88.5
.quad 0x405627ae147ae148 ## double 88.620000000000005
.quad 0x405647ae147ae148 ## double 89.120000000000005
.quad 0x4056300000000000 ## double 88.75
.quad 0x405647ae147ae148 ## double 89.120000000000005
.quad 0x4056400000000000 ## double 89
.quad 0x405627ae147ae148 ## double 88.620000000000005
.quad 0x4056300000000000 ## double 88.75
.quad 0x405627ae147ae148 ## double 88.620000000000005
.quad 0x405687ae147ae148 ## double 90.120000000000005
.quad 0x4057400000000000 ## double 93
.quad 0x4057b7ae147ae148 ## double 94.870000000000005
.quad 0x4057d00000000000 ## double 95.25
.quad 0x4057c7ae147ae148 ## double 95.120000000000005
.quad 0x4057e00000000000 ## double 95.5
.quad 0x4057b7ae147ae148 ## double 94.870000000000005
.quad 0x4057800000000000 ## double 94
.quad 0x4057500000000000 ## double 93.25
.quad 0x405767ae147ae148 ## double 93.620000000000005
.quad 0x4057500000000000 ## double 93.25
.quad 0x4057800000000000 ## double 94
.quad 0x4057900000000000 ## double 94.25
.quad 0x405757ae147ae148 ## double 93.370000000000005
.quad 0x405857ae147ae148 ## double 97.370000000000005
.quad 0x405917ae147ae148 ## double 100.37
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x4058b7ae147ae148 ## double 98.870000000000005
.quad 0x405877ae147ae148 ## double 97.870000000000005
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x405877ae147ae148 ## double 97.870000000000005
.quad 0x4058b00000000000 ## double 98.75
.quad 0x4058900000000000 ## double 98.25
.quad 0x4058900000000000 ## double 98.25
.quad 0x4058a00000000000 ## double 98.5
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x405857ae147ae148 ## double 97.370000000000005
.quad 0x4058900000000000 ## double 98.25
.quad 0x405877ae147ae148 ## double 97.870000000000005
.quad 0x405817ae147ae148 ## double 96.370000000000005
.quad 0x4057d7ae147ae148 ## double 95.370000000000005
.quad 0x4057e00000000000 ## double 95.5
.quad 0x4057f7ae147ae148 ## double 95.870000000000005
.quad 0x405777ae147ae148 ## double 93.870000000000005
.quad 0x4057800000000000 ## double 94
.quad 0x405787ae147ae148 ## double 94.120000000000005
.quad 0x405777ae147ae148 ## double 93.870000000000005
.quad 0x405707ae147ae148 ## double 92.120000000000005
.quad 0x4056c7ae147ae148 ## double 91.120000000000005
.quad 0x4056c00000000000 ## double 91
.quad 0x4056d00000000000 ## double 91.25
.quad 0x4056b00000000000 ## double 90.75
.quad 0x4056700000000000 ## double 89.75
.quad 0x4056b00000000000 ## double 90.75
.quad 0x4056c00000000000 ## double 91
.quad 0x4056d00000000000 ## double 91.25
.quad 0x4057000000000000 ## double 92
.quad 0x4056e00000000000 ## double 91.5
.quad 0x4056c00000000000 ## double 91
.quad 0x4056e00000000000 ## double 91.5
.quad 0x4057100000000000 ## double 92.25
.quad 0x4056e7ae147ae148 ## double 91.620000000000005
.quad 0x4057300000000000 ## double 92.75
.quad 0x4057200000000000 ## double 92.5
.quad 0x4056f7ae147ae148 ## double 91.870000000000005
.quad 0x405707ae147ae148 ## double 92.120000000000005
.quad 0x4057100000000000 ## double 92.25
.quad 0x405767ae147ae148 ## double 93.620000000000005
.quad 0x405777ae147ae148 ## double 93.870000000000005
.quad 0x405797ae147ae148 ## double 94.370000000000005
.quad 0x4057900000000000 ## double 94.25
.quad 0x405787ae147ae148 ## double 94.120000000000005
.quad 0x405787ae147ae148 ## double 94.120000000000005
.quad 0x4057800000000000 ## double 94
.quad 0x4057400000000000 ## double 93
.quad 0x405707ae147ae148 ## double 92.120000000000005
.quad 0x4056b00000000000 ## double 90.75
.quad 0x405637ae147ae148 ## double 88.870000000000005
.quad 0x4056200000000000 ## double 88.5
.quad 0x4056300000000000 ## double 88.75
.quad 0x4056500000000000 ## double 89.25
.quad 0x4056a7ae147ae148 ## double 90.620000000000005
.quad 0x405687ae147ae148 ## double 90.120000000000005
.quad 0x4056900000000000 ## double 90.25
.quad 0x4056700000000000 ## double 89.75
.quad 0x405627ae147ae148 ## double 88.620000000000005
.quad 0x405637ae147ae148 ## double 88.870000000000005
.quad 0x4056700000000000 ## double 89.75
.quad 0x4055e00000000000 ## double 87.5
.quad 0x405587ae147ae148 ## double 86.120000000000005
.quad 0x405577ae147ae148 ## double 85.870000000000005
.quad 0x4055400000000000 ## double 85
.quad 0x405537ae147ae148 ## double 84.870000000000005
.quad 0x4054c7ae147ae148 ## double 83.120000000000005
.quad 0x405497ae147ae148 ## double 82.370000000000005
.quad 0x4054e00000000000 ## double 83.5
.quad 0x4054e00000000000 ## double 83.5
.quad 0x405537ae147ae148 ## double 84.870000000000005
.quad 0x4054f7ae147ae148 ## double 83.870000000000005
.quad 0x405517ae147ae148 ## double 84.370000000000005
.quad 0x405557ae147ae148 ## double 85.370000000000005
.quad 0x4055800000000000 ## double 86
.quad 0x4055800000000000 ## double 86
.quad 0x4055b00000000000 ## double 86.75
.quad 0x4055a7ae147ae148 ## double 86.620000000000005
.quad 0x4055c7ae147ae148 ## double 87.120000000000005
.quad 0x4056200000000000 ## double 88.5
.quad 0x405637ae147ae148 ## double 88.870000000000005
.quad 0x405667ae147ae148 ## double 89.620000000000005
.quad 0x4056900000000000 ## double 90.25
.quad 0x405647ae147ae148 ## double 89.120000000000005
.quad 0x405617ae147ae148 ## double 88.370000000000005
.quad 0x405617ae147ae148 ## double 88.370000000000005
.quad 0x4055d7ae147ae148 ## double 87.370000000000005
.quad 0x4055d00000000000 ## double 87.25
.quad 0x405607ae147ae148 ## double 88.120000000000005
.quad 0x405667ae147ae148 ## double 89.620000000000005
.quad 0x405627ae147ae148 ## double 88.620000000000005
.quad 0x4055f00000000000 ## double 87.75
.quad 0x405627ae147ae148 ## double 88.620000000000005
.quad 0x405617ae147ae148 ## double 88.370000000000005
.quad 0x405627ae147ae148 ## double 88.620000000000005
.quad 0x4056500000000000 ## double 89.25
.quad 0x4056900000000000 ## double 90.25
.quad 0x405697ae147ae148 ## double 90.370000000000005
.quad 0x4056a7ae147ae148 ## double 90.620000000000005
.quad 0x4056b00000000000 ## double 90.75
.quad 0x4056c7ae147ae148 ## double 91.120000000000005
.quad 0x4056c7ae147ae148 ## double 91.120000000000005
.quad 0x4057500000000000 ## double 93.25
.quad 0x405717ae147ae148 ## double 92.370000000000005
.quad 0x4056b7ae147ae148 ## double 90.870000000000005
.quad 0x405677ae147ae148 ## double 89.870000000000005
.quad 0x405697ae147ae148 ## double 90.370000000000005
.quad 0x4056b7ae147ae148 ## double 90.870000000000005
.quad 0x405707ae147ae148 ## double 92.120000000000005
.quad 0x4057100000000000 ## double 92.25
.quad 0x4056d7ae147ae148 ## double 91.370000000000005
.quad 0x4057000000000000 ## double 92
.quad 0x4057200000000000 ## double 92.5
.quad 0x405787ae147ae148 ## double 94.120000000000005
.quad 0x4057800000000000 ## double 94
.quad 0x4057800000000000 ## double 94
.quad 0x4057900000000000 ## double 94.25
.quad 0x4057f7ae147ae148 ## double 95.870000000000005
.quad 0x4057e7ae147ae148 ## double 95.620000000000005
.quad 0x4057e00000000000 ## double 95.5
.quad 0x405817ae147ae148 ## double 96.370000000000005
.quad 0x405887ae147ae148 ## double 98.120000000000005
.quad 0x405847ae147ae148 ## double 97.120000000000005
.quad 0x4057f00000000000 ## double 95.75
.quad 0x4057200000000000 ## double 92.5
.quad 0x4056b7ae147ae148 ## double 90.870000000000005
.quad 0x4057200000000000 ## double 92.5
.quad 0x4057900000000000 ## double 94.25
.quad 0x4057c00000000000 ## double 95
.quad 0x4057b00000000000 ## double 94.75
.quad 0x405747ae147ae148 ## double 93.120000000000005
.quad 0x4056c7ae147ae148 ## double 91.120000000000005
.quad 0x4056a7ae147ae148 ## double 90.620000000000005
.quad 0x4056c7ae147ae148 ## double 91.120000000000005
.quad 0x4056b00000000000 ## double 90.75
.quad 0x405667ae147ae148 ## double 89.620000000000005
.quad 0x405637ae147ae148 ## double 88.870000000000005
.quad 0x4056800000000000 ## double 90
.quad 0x4056200000000000 ## double 88.5
.quad 0x4055900000000000 ## double 86.25
.quad 0x405577ae147ae148 ## double 85.870000000000005
.quad 0x4055b7ae147ae148 ## double 86.870000000000005
.quad 0x4055c00000000000 ## double 87
.quad 0x4056000000000000 ## double 88
.quad 0x405637ae147ae148 ## double 88.870000000000005
.quad 0x4056500000000000 ## double 89.25
.quad 0x4055f7ae147ae148 ## double 87.870000000000005
.quad 0x4055a00000000000 ## double 86.5
.quad 0x4056700000000000 ## double 89.75
.quad 0x4056900000000000 ## double 90.25
.quad 0x4056c7ae147ae148 ## double 91.120000000000005
.quad 0x4056e00000000000 ## double 91.5
.quad 0x405707ae147ae148 ## double 92.120000000000005
.quad 0x405717ae147ae148 ## double 92.370000000000005
.quad 0x405777ae147ae148 ## double 93.870000000000005
.quad 0x405847ae147ae148 ## double 97.120000000000005
.quad 0x405887ae147ae148 ## double 98.120000000000005
.quad 0x4057e7ae147ae148 ## double 95.620000000000005
.quad 0x4058000000000000 ## double 96
.quad 0x4058300000000000 ## double 96.75
.quad 0x405857ae147ae148 ## double 97.370000000000005
.quad 0x4058300000000000 ## double 96.75
.quad 0x4058700000000000 ## double 97.75
.quad 0x405907ae147ae148 ## double 100.12
.quad 0x405907ae147ae148 ## double 100.12
.quad 0x4058c7ae147ae148 ## double 99.120000000000005
.quad 0x405927ae147ae148 ## double 100.62
.quad 0x4059300000000000 ## double 100.75
.quad 0x405957ae147ae148 ## double 101.37
.quad 0x4059000000000000 ## double 100
.quad 0x4058500000000000 ## double 97.25
.quad 0x4058700000000000 ## double 97.75
.quad 0x4058700000000000 ## double 97.75
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x405917ae147ae148 ## double 100.37
.quad 0x4059100000000000 ## double 100.25
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x4058c7ae147ae148 ## double 99.120000000000005
.quad 0x4058b00000000000 ## double 98.75
.quad 0x4059200000000000 ## double 100.5
.quad 0x4059400000000000 ## double 101
.quad 0x405917ae147ae148 ## double 100.37
.quad 0x4059500000000000 ## double 101.25
.quad 0x4059f00000000000 ## double 103.75
.quad 0x405a300000000000 ## double 104.75
.quad 0x405967ae147ae148 ## double 101.62
.quad 0x4059200000000000 ## double 100.5
.quad 0x4058f7ae147ae148 ## double 99.870000000000005
.quad 0x4058900000000000 ## double 98.25
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4058e7ae147ae148 ## double 99.620000000000005
.quad 0x4059400000000000 ## double 101
.quad 0x4059a00000000000 ## double 102.5
.quad 0x4059f00000000000 ## double 103.75
.quad 0x405a100000000000 ## double 104.25
.quad 0x405a57ae147ae148 ## double 105.37
.quad 0x405a700000000000 ## double 105.75
.quad 0x405a700000000000 ## double 105.75
.quad 0x405a77ae147ae148 ## double 105.87
.quad 0x405a27ae147ae148 ## double 104.62
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x405a97ae147ae148 ## double 106.37
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a37ae147ae148 ## double 104.87
.quad 0x405a17ae147ae148 ## double 104.37
.quad 0x4059f00000000000 ## double 103.75
.quad 0x405a000000000000 ## double 104
.quad 0x405957ae147ae148 ## double 101.37
.quad 0x4059500000000000 ## double 101.25
.quad 0x405947ae147ae148 ## double 101.12
.quad 0x4058e7ae147ae148 ## double 99.620000000000005
.quad 0x4058c7ae147ae148 ## double 99.120000000000005
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4058b7ae147ae148 ## double 98.870000000000005
.quad 0x405847ae147ae148 ## double 97.120000000000005
.quad 0x4057f7ae147ae148 ## double 95.870000000000005
.quad 0x4057d00000000000 ## double 95.25
.quad 0x4057e7ae147ae148 ## double 95.620000000000005
.quad 0x4057e7ae147ae148 ## double 95.620000000000005
.quad 0x405807ae147ae148 ## double 96.120000000000005
.quad 0x405817ae147ae148 ## double 96.370000000000005
.quad 0x405817ae147ae148 ## double 96.370000000000005
.quad 0x4058100000000000 ## double 96.25
.quad 0x4057e7ae147ae148 ## double 95.620000000000005
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x4058c00000000000 ## double 99
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4058f7ae147ae148 ## double 99.870000000000005
.quad 0x4058e00000000000 ## double 99.5
.quad 0x4058e7ae147ae148 ## double 99.620000000000005
.quad 0x4058f00000000000 ## double 99.75
.quad 0x4059200000000000 ## double 100.5
.quad 0x4059300000000000 ## double 100.75
.quad 0x4059500000000000 ## double 101.25
.quad 0x405967ae147ae148 ## double 101.62
.quad 0x4059700000000000 ## double 101.75
.quad 0x405987ae147ae148 ## double 102.12
.quad 0x4059800000000000 ## double 102
.quad 0x405957ae147ae148 ## double 101.37
.quad 0x4059400000000000 ## double 101
.quad 0x405967ae147ae148 ## double 101.62
.quad 0x405957ae147ae148 ## double 101.37
.quad 0x4059c00000000000 ## double 103
.quad 0x405987ae147ae148 ## double 102.12
.quad 0x405937ae147ae148 ## double 100.87
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x405837ae147ae148 ## double 96.870000000000005
.quad 0x4058e00000000000 ## double 99.5
.quad 0x4058f7ae147ae148 ## double 99.870000000000005
.quad 0x4058f00000000000 ## double 99.75
.quad 0x4058c00000000000 ## double 99
.quad 0x405917ae147ae148 ## double 100.37
.quad 0x405937ae147ae148 ## double 100.87
.quad 0x4059200000000000 ## double 100.5
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4058b7ae147ae148 ## double 98.870000000000005
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x4058700000000000 ## double 97.75
.quad 0x4058b00000000000 ## double 98.75
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4058e7ae147ae148 ## double 99.620000000000005
.quad 0x4058e7ae147ae148 ## double 99.620000000000005
.quad 0x4059000000000000 ## double 100
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4059500000000000 ## double 101.25
.quad 0x4059400000000000 ## double 101
.quad 0x4059000000000000 ## double 100
.quad 0x405967ae147ae148 ## double 101.62
.quad 0x405957ae147ae148 ## double 101.37
.quad 0x4059800000000000 ## double 102
.quad 0x4059e00000000000 ## double 103.5
.quad 0x4059b00000000000 ## double 102.75
.quad 0x4059c00000000000 ## double 103
.quad 0x4059e7ae147ae148 ## double 103.62
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a800000000000 ## double 106
.quad 0x405ad7ae147ae148 ## double 107.37
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x405a27ae147ae148 ## double 104.62
.quad 0x405a57ae147ae148 ## double 105.37
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x405a27ae147ae148 ## double 104.62
.quad 0x405a27ae147ae148 ## double 104.62
.quad 0x405a100000000000 ## double 104.25
.quad 0x405a100000000000 ## double 104.25
.quad 0x405a27ae147ae148 ## double 104.62
.quad 0x405a100000000000 ## double 104.25
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x405a900000000000 ## double 106.25
.quad 0x405a800000000000 ## double 106
.quad 0x405a800000000000 ## double 106
.quad 0x405a77ae147ae148 ## double 105.87
.quad 0x405a000000000000 ## double 104
.quad 0x4059e00000000000 ## double 103.5
.quad 0x4059e7ae147ae148 ## double 103.62
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405a800000000000 ## double 106
.quad 0x405a200000000000 ## double 104.5
.quad 0x405a600000000000 ## double 105.5
.quad 0x405af7ae147ae148 ## double 107.87
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b47ae147ae148 ## double 109.12
.quad 0x405b37ae147ae148 ## double 108.87
.quad 0x405b47ae147ae148 ## double 109.12
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405b900000000000 ## double 110.25
.quad 0x405bb00000000000 ## double 110.75
.quad 0x405bb00000000000 ## double 110.75
.quad 0x405b500000000000 ## double 109.25
.quad 0x405b17ae147ae148 ## double 108.37
.quad 0x405bc00000000000 ## double 111
.quad 0x405bf7ae147ae148 ## double 111.87
.quad 0x405be7ae147ae148 ## double 111.62
.quad 0x405c57ae147ae148 ## double 113.37
.quad 0x405c600000000000 ## double 113.5
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405c67ae147ae148 ## double 113.62
.quad 0x405c27ae147ae148 ## double 112.62
.quad 0x405cc00000000000 ## double 115
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405dc00000000000 ## double 119
.quad 0x40600428f5c28f5c ## double 128.13
.quad 0x405ff00000000000 ## double 127.75
.quad 0x4060580000000000 ## double 130.75
.quad 0x40603428f5c28f5c ## double 129.63
.quad 0x4060280000000000 ## double 129.25
.quad 0x4060500000000000 ## double 130.5
.quad 0x4060b428f5c28f5c ## double 133.63
.quad 0x4060b80000000000 ## double 133.75
.quad 0x4060dc28f5c28f5c ## double 134.88
.quad 0x4060e00000000000 ## double 135
.quad 0x4060900000000000 ## double 132.5
.quad 0x4060700000000000 ## double 131.5
.quad 0x4060700000000000 ## double 131.5
.quad 0x40605c28f5c28f5c ## double 130.88
.quad 0x4060a00000000000 ## double 133
.quad 0x4060e428f5c28f5c ## double 135.13
.quad 0x4061200000000000 ## double 137
.quad 0x4061480000000000 ## double 138.25
.quad 0x4061580000000000 ## double 138.75
.quad 0x4061780000000000 ## double 139.75
.quad 0x40613c28f5c28f5c ## double 137.88
.quad 0x4061080000000000 ## double 136.25
.quad 0x4060e00000000000 ## double 135
.quad 0x4060bc28f5c28f5c ## double 133.88
.quad 0x4060900000000000 ## double 132.5
.quad 0x40603c28f5c28f5c ## double 129.88
.quad 0x4060600000000000 ## double 131
.quad 0x4060400000000000 ## double 130
.quad 0x4060100000000000 ## double 128.5
.quad 0x405fe7ae147ae148 ## double 127.62
.quad 0x405fe7ae147ae148 ## double 127.62
.quad 0x405fc7ae147ae148 ## double 127.12
.quad 0x405fc00000000000 ## double 127
.quad 0x405f57ae147ae148 ## double 125.37
.quad 0x405f400000000000 ## double 125
.quad 0x405ea7ae147ae148 ## double 122.62
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405e07ae147ae148 ## double 120.12
.quad 0x405de7ae147ae148 ## double 119.62
.quad 0x405e07ae147ae148 ## double 120.12
.quad 0x405d900000000000 ## double 118.25
.quad 0x405d07ae147ae148 ## double 116.12
.quad 0x405b500000000000 ## double 109.25
.quad 0x405af00000000000 ## double 107.75
.quad 0x405ac7ae147ae148 ## double 107.12
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b17ae147ae148 ## double 108.37
.quad 0x405bb00000000000 ## double 110.75
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405bf7ae147ae148 ## double 111.87
.quad 0x405c400000000000 ## double 113
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405c700000000000 ## double 113.75
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405c800000000000 ## double 114
.quad 0x405c600000000000 ## double 113.5
.quad 0x405bf00000000000 ## double 111.75
.quad 0x405c200000000000 ## double 112.5
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405c47ae147ae148 ## double 113.12
.quad 0x405c700000000000 ## double 113.75
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405d000000000000 ## double 116
.quad 0x405cb7ae147ae148 ## double 114.87
.quad 0x405cc00000000000 ## double 115
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405c87ae147ae148 ## double 114.12
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c87ae147ae148 ## double 114.12
.quad 0x405cc7ae147ae148 ## double 115.12
.quad 0x405cb7ae147ae148 ## double 114.87
.quad 0x405c87ae147ae148 ## double 114.12
.quad 0x405c67ae147ae148 ## double 113.62
.quad 0x405c800000000000 ## double 114
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c500000000000 ## double 113.25
.quad 0x405b900000000000 ## double 110.25
.quad 0x405ae7ae147ae148 ## double 107.62
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b400000000000 ## double 109
.quad 0x405b07ae147ae148 ## double 108.12
.quad 0x405b27ae147ae148 ## double 108.62
.quad 0x405ad7ae147ae148 ## double 107.37
.quad 0x405aa7ae147ae148 ## double 106.62
.quad 0x405ab7ae147ae148 ## double 106.87
.quad 0x405af7ae147ae148 ## double 107.87
.quad 0x405ae00000000000 ## double 107.5
.quad 0x405b500000000000 ## double 109.25
.quad 0x405b400000000000 ## double 109
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b300000000000 ## double 108.75
.quad 0x405b400000000000 ## double 109
.quad 0x405a57ae147ae148 ## double 105.37
.quad 0x405967ae147ae148 ## double 101.62
.quad 0x405917ae147ae148 ## double 100.37
.quad 0x405937ae147ae148 ## double 100.87
.quad 0x4059800000000000 ## double 102
.quad 0x4059f7ae147ae148 ## double 103.87
.quad 0x405aa7ae147ae148 ## double 106.62
.quad 0x405b27ae147ae148 ## double 108.62
.quad 0x405b600000000000 ## double 109.5
.quad 0x405b400000000000 ## double 109
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b400000000000 ## double 109
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405b600000000000 ## double 109.5
.quad 0x405ac7ae147ae148 ## double 107.12
.quad 0x405aa7ae147ae148 ## double 106.62
.quad 0x405ab00000000000 ## double 106.75
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405a900000000000 ## double 106.25
.quad 0x405af7ae147ae148 ## double 107.87
.quad 0x405b400000000000 ## double 109
.quad 0x405b500000000000 ## double 109.25
.quad 0x405ae7ae147ae148 ## double 107.62
.quad 0x405a700000000000 ## double 105.75
.quad 0x405a400000000000 ## double 105
.quad 0x405af00000000000 ## double 107.75
.quad 0x405b17ae147ae148 ## double 108.37
.quad 0x405af00000000000 ## double 107.75
.quad 0x405ae7ae147ae148 ## double 107.62
.quad 0x405a600000000000 ## double 105.5
.quad 0x4059e00000000000 ## double 103.5
.quad 0x4059f7ae147ae148 ## double 103.87
.quad 0x4059a7ae147ae148 ## double 102.62
.quad 0x405977ae147ae148 ## double 101.87
.quad 0x405a07ae147ae148 ## double 104.12
.quad 0x405a17ae147ae148 ## double 104.37
.quad 0x405a07ae147ae148 ## double 104.12
.quad 0x405a17ae147ae148 ## double 104.37
.quad 0x4059400000000000 ## double 101
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x4059400000000000 ## double 101
.quad 0x4059700000000000 ## double 101.75
.quad 0x4059b00000000000 ## double 102.75
.quad 0x4059d7ae147ae148 ## double 103.37
.quad 0x405a700000000000 ## double 105.75
.quad 0x405a500000000000 ## double 105.25
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x405a000000000000 ## double 104
.quad 0x4059e7ae147ae148 ## double 103.62
.quad 0x4059f7ae147ae148 ## double 103.87
.quad 0x4059f7ae147ae148 ## double 103.87
.quad 0x405a400000000000 ## double 105
.quad 0x405a17ae147ae148 ## double 104.37
.quad 0x405b600000000000 ## double 109.5
.quad 0x405bb00000000000 ## double 110.75
.quad 0x405c17ae147ae148 ## double 112.37
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405c17ae147ae148 ## double 112.37
.quad 0x405c300000000000 ## double 112.75
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405d000000000000 ## double 116
.quad 0x405d700000000000 ## double 117.75
.quad 0x405e100000000000 ## double 120.25
.quad 0x405e07ae147ae148 ## double 120.12
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405ec7ae147ae148 ## double 123.12
.quad 0x405e87ae147ae148 ## double 122.12
.quad 0x405e17ae147ae148 ## double 120.37
.quad 0x405dd00000000000 ## double 119.25
.quad 0x405df00000000000 ## double 119.75
.quad 0x405dd00000000000 ## double 119.25
.quad 0x405da00000000000 ## double 118.5
.quad 0x405d67ae147ae148 ## double 117.62
.quad 0x405db00000000000 ## double 118.75
.quad 0x405db00000000000 ## double 118.75
.quad 0x405d87ae147ae148 ## double 118.12
.quad 0x405d97ae147ae148 ## double 118.37
.quad 0x405d87ae147ae148 ## double 118.12
.quad 0x405d800000000000 ## double 118
.quad 0x405d500000000000 ## double 117.25
.quad 0x405de00000000000 ## double 119.5
.quad 0x405dc7ae147ae148 ## double 119.12
.quad 0x405dc7ae147ae148 ## double 119.12
.quad 0x405dc7ae147ae148 ## double 119.12
.quad 0x405e300000000000 ## double 120.75
.quad 0x405e500000000000 ## double 121.25
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405e87ae147ae148 ## double 122.12
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405e100000000000 ## double 120.25
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405e600000000000 ## double 121.5
.quad 0x405e900000000000 ## double 122.25
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405e300000000000 ## double 120.75
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405e87ae147ae148 ## double 122.12
.quad 0x405df7ae147ae148 ## double 119.87
.quad 0x405d800000000000 ## double 118
.quad 0x405db7ae147ae148 ## double 118.87
.quad 0x405db00000000000 ## double 118.75
.quad 0x405d700000000000 ## double 117.75
.quad 0x405d57ae147ae148 ## double 117.37
.quad 0x405d100000000000 ## double 116.25
.quad 0x405d100000000000 ## double 116.25
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405ce7ae147ae148 ## double 115.62
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405bd00000000000 ## double 111.25
.quad 0x405bc00000000000 ## double 111
.quad 0x405bd7ae147ae148 ## double 111.37
.quad 0x405ba7ae147ae148 ## double 110.62
.quad 0x405b87ae147ae148 ## double 110.12
.quad 0x405b47ae147ae148 ## double 109.12
.quad 0x405b67ae147ae148 ## double 109.62
.quad 0x405b500000000000 ## double 109.25
.quad 0x405b400000000000 ## double 109
.quad 0x405b800000000000 ## double 110
.quad 0x405b87ae147ae148 ## double 110.12
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405b600000000000 ## double 109.5
.quad 0x405b600000000000 ## double 109.5
.quad 0x405b900000000000 ## double 110.25
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405bd00000000000 ## double 111.25
.quad 0x405bd00000000000 ## double 111.25
.quad 0x405b000000000000 ## double 108
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405a97ae147ae148 ## double 106.37
.quad 0x405a77ae147ae148 ## double 105.87
.quad 0x405a97ae147ae148 ## double 106.37
.quad 0x405ab7ae147ae148 ## double 106.87
.quad 0x405af00000000000 ## double 107.75
.quad 0x405ab00000000000 ## double 106.75
.quad 0x405a77ae147ae148 ## double 105.87
.quad 0x405ab00000000000 ## double 106.75
.quad 0x405ab7ae147ae148 ## double 106.87
.quad 0x405a77ae147ae148 ## double 105.87
.quad 0x405a300000000000 ## double 104.75
.quad 0x405a700000000000 ## double 105.75
.quad 0x405a87ae147ae148 ## double 106.12
.quad 0x405ae00000000000 ## double 107.5
.quad 0x405b200000000000 ## double 108.5
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405b47ae147ae148 ## double 109.12
.quad 0x405b300000000000 ## double 108.75
.quad 0x405ad7ae147ae148 ## double 107.37
.quad 0x405ac00000000000 ## double 107
.quad 0x405b100000000000 ## double 108.25
.quad 0x405af00000000000 ## double 107.75
.quad 0x405ad7ae147ae148 ## double 107.37
.quad 0x405ad7ae147ae148 ## double 107.37
.quad 0x405a900000000000 ## double 106.25
.quad 0x405a700000000000 ## double 105.75
.quad 0x405a77ae147ae148 ## double 105.87
.quad 0x405a57ae147ae148 ## double 105.37
.quad 0x405a17ae147ae148 ## double 104.37
.quad 0x405a27ae147ae148 ## double 104.62
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a07ae147ae148 ## double 104.12
.quad 0x4059f7ae147ae148 ## double 103.87
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a47ae147ae148 ## double 105.12
.quad 0x4059c7ae147ae148 ## double 103.12
.quad 0x405a200000000000 ## double 104.5
.quad 0x405a27ae147ae148 ## double 104.62
.quad 0x405a100000000000 ## double 104.25
.quad 0x4059d00000000000 ## double 103.25
.quad 0x405997ae147ae148 ## double 102.37
.quad 0x4059d00000000000 ## double 103.25
.quad 0x4059f7ae147ae148 ## double 103.87
.quad 0x4059f00000000000 ## double 103.75
.quad 0x405957ae147ae148 ## double 101.37
.quad 0x4058f7ae147ae148 ## double 99.870000000000005
.quad 0x4058b00000000000 ## double 98.75
.quad 0x4058f00000000000 ## double 99.75
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x405867ae147ae148 ## double 97.620000000000005
.quad 0x4058700000000000 ## double 97.75
.quad 0x4058500000000000 ## double 97.25
.quad 0x405877ae147ae148 ## double 97.870000000000005
.quad 0x405877ae147ae148 ## double 97.870000000000005
.quad 0x4058900000000000 ## double 98.25
.quad 0x4058c7ae147ae148 ## double 99.120000000000005
.quad 0x4058f7ae147ae148 ## double 99.870000000000005
.quad 0x4058e7ae147ae148 ## double 99.620000000000005
.quad 0x405937ae147ae148 ## double 100.87
.quad 0x405917ae147ae148 ## double 100.37
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x4058d00000000000 ## double 99.25
.quad 0x405917ae147ae148 ## double 100.37
.quad 0x4059000000000000 ## double 100
.quad 0x4059500000000000 ## double 101.25
.quad 0x405937ae147ae148 ## double 100.87
.quad 0x405967ae147ae148 ## double 101.62
.quad 0x405917ae147ae148 ## double 100.37
.quad 0x4058e00000000000 ## double 99.5
.quad 0x405887ae147ae148 ## double 98.120000000000005
.quad 0x4057a7ae147ae148 ## double 94.620000000000005
.quad 0x4057c7ae147ae148 ## double 95.120000000000005
.quad 0x4057d7ae147ae148 ## double 95.370000000000005
.quad 0x4057e00000000000 ## double 95.5
.quad 0x4057f00000000000 ## double 95.75
.quad 0x4057d7ae147ae148 ## double 95.370000000000005
.quad 0x4057c7ae147ae148 ## double 95.120000000000005
.quad 0x4057d00000000000 ## double 95.25
.quad 0x4057d00000000000 ## double 95.25
.quad 0x4057d00000000000 ## double 95.25
.quad 0x4057e00000000000 ## double 95.5
.quad 0x405857ae147ae148 ## double 97.370000000000005
.quad 0x4058400000000000 ## double 97
.quad 0x4058300000000000 ## double 96.75
.quad 0x405887ae147ae148 ## double 98.120000000000005
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4058d00000000000 ## double 99.25
.quad 0x405977ae147ae148 ## double 101.87
.quad 0x4058e00000000000 ## double 99.5
.quad 0x405887ae147ae148 ## double 98.120000000000005
.quad 0x405887ae147ae148 ## double 98.120000000000005
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4059300000000000 ## double 100.75
.quad 0x4059300000000000 ## double 100.75
.quad 0x4059200000000000 ## double 100.5
.quad 0x4058f00000000000 ## double 99.75
.quad 0x4058d7ae147ae148 ## double 99.370000000000005
.quad 0x4058f00000000000 ## double 99.75
.quad 0x4058900000000000 ## double 98.25
.quad 0x4058a00000000000 ## double 98.5
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4058b7ae147ae148 ## double 98.870000000000005
.quad 0x4058a7ae147ae148 ## double 98.620000000000005
.quad 0x4058600000000000 ## double 97.5
.quad 0x4058800000000000 ## double 98
.quad 0x4058700000000000 ## double 97.75
.quad 0x405887ae147ae148 ## double 98.120000000000005
.quad 0x4058b7ae147ae148 ## double 98.870000000000005
.quad 0x4058e7ae147ae148 ## double 99.620000000000005
.quad 0x405917ae147ae148 ## double 100.37
.quad 0x405957ae147ae148 ## double 101.37
.quad 0x4059300000000000 ## double 100.75
.quad 0x405937ae147ae148 ## double 100.87
.quad 0x4059700000000000 ## double 101.75
.quad 0x4059b00000000000 ## double 102.75
.quad 0x405a200000000000 ## double 104.5
.quad 0x405a300000000000 ## double 104.75
.quad 0x405a27ae147ae148 ## double 104.62
.quad 0x405a57ae147ae148 ## double 105.37
.quad 0x4059c00000000000 ## double 103
.quad 0x4059b7ae147ae148 ## double 102.87
.quad 0x405af00000000000 ## double 107.75
.quad 0x405b000000000000 ## double 108
.quad 0x405af00000000000 ## double 107.75
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405b500000000000 ## double 109.25
.quad 0x405b07ae147ae148 ## double 108.12
.quad 0x405b47ae147ae148 ## double 109.12
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b700000000000 ## double 109.75
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405bd00000000000 ## double 111.25
.quad 0x405c200000000000 ## double 112.5
.quad 0x405d700000000000 ## double 117.75
.quad 0x405d77ae147ae148 ## double 117.87
.quad 0x405d37ae147ae148 ## double 116.87
.quad 0x405d500000000000 ## double 117.25
.quad 0x405d17ae147ae148 ## double 116.37
.quad 0x405d200000000000 ## double 116.5
.quad 0x405d07ae147ae148 ## double 116.12
.quad 0x405d100000000000 ## double 116.25
.quad 0x405cf7ae147ae148 ## double 115.87
.quad 0x405d600000000000 ## double 117.5
.quad 0x405d500000000000 ## double 117.25
.quad 0x405d17ae147ae148 ## double 116.37
.quad 0x405d17ae147ae148 ## double 116.37
.quad 0x405d400000000000 ## double 117
.quad 0x405d67ae147ae148 ## double 117.62
.quad 0x405db00000000000 ## double 118.75
.quad 0x405d900000000000 ## double 118.25
.quad 0x405d67ae147ae148 ## double 117.62
.quad 0x405d600000000000 ## double 117.5
.quad 0x405d97ae147ae148 ## double 118.37
.quad 0x405d87ae147ae148 ## double 118.12
.quad 0x405d200000000000 ## double 116.5
.quad 0x405d07ae147ae148 ## double 116.12
.quad 0x405c87ae147ae148 ## double 114.12
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405d17ae147ae148 ## double 116.37
.quad 0x405d200000000000 ## double 116.5
.quad 0x405d200000000000 ## double 116.5
.quad 0x405d900000000000 ## double 118.25
.quad 0x405d900000000000 ## double 118.25
.quad 0x405dc7ae147ae148 ## double 119.12
.quad 0x405dd7ae147ae148 ## double 119.37
.quad 0x405d67ae147ae148 ## double 117.62
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405c800000000000 ## double 114
.quad 0x405cc00000000000 ## double 115
.quad 0x405d200000000000 ## double 116.5
.quad 0x405cd00000000000 ## double 115.25
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c400000000000 ## double 113
.quad 0x405c27ae147ae148 ## double 112.62
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405c87ae147ae148 ## double 114.12
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405d300000000000 ## double 116.75
.quad 0x405d200000000000 ## double 116.5
.quad 0x405cc00000000000 ## double 115
.quad 0x405cd00000000000 ## double 115.25
.quad 0x405cc7ae147ae148 ## double 115.12
.quad 0x405cf7ae147ae148 ## double 115.87
.quad 0x405cc00000000000 ## double 115
.quad 0x405c300000000000 ## double 112.75
.quad 0x405c17ae147ae148 ## double 112.37
.quad 0x405c300000000000 ## double 112.75
.quad 0x405c200000000000 ## double 112.5
.quad 0x405c07ae147ae148 ## double 112.12
.quad 0x405c500000000000 ## double 113.25
.quad 0x405cc7ae147ae148 ## double 115.12
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405c300000000000 ## double 112.75
.quad 0x405bc7ae147ae148 ## double 111.12
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405bd00000000000 ## double 111.25
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405b67ae147ae148 ## double 109.62
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405be00000000000 ## double 111.5
.quad 0x405be7ae147ae148 ## double 111.62
.quad 0x405b400000000000 ## double 109
.quad 0x405b600000000000 ## double 109.5
.quad 0x405b67ae147ae148 ## double 109.62
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405b700000000000 ## double 109.75
.quad 0x405b87ae147ae148 ## double 110.12
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405bb00000000000 ## double 110.75
.quad 0x405b87ae147ae148 ## double 110.12
.quad 0x405b700000000000 ## double 109.75
.quad 0x405b800000000000 ## double 110
.quad 0x405b900000000000 ## double 110.25
.quad 0x405b87ae147ae148 ## double 110.12
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405bf7ae147ae148 ## double 111.87
.quad 0x405c200000000000 ## double 112.5
.quad 0x405c200000000000 ## double 112.5
.quad 0x405bf7ae147ae148 ## double 111.87
.quad 0x405c47ae147ae148 ## double 113.12
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405bd00000000000 ## double 111.25
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405b900000000000 ## double 110.25
.quad 0x405b87ae147ae148 ## double 110.12
.quad 0x405b700000000000 ## double 109.75
.quad 0x405bd7ae147ae148 ## double 111.37
.quad 0x405bc7ae147ae148 ## double 111.12
.quad 0x405c200000000000 ## double 112.5
.quad 0x405c700000000000 ## double 113.75
.quad 0x405c67ae147ae148 ## double 113.62
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405cc7ae147ae148 ## double 115.12
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405c47ae147ae148 ## double 113.12
.quad 0x405c47ae147ae148 ## double 113.12
.quad 0x405bf00000000000 ## double 111.75
.quad 0x405c000000000000 ## double 112
.quad 0x405bc00000000000 ## double 111
.quad 0x405c100000000000 ## double 112.25
.quad 0x405bd7ae147ae148 ## double 111.37
.quad 0x405b900000000000 ## double 110.25
.quad 0x405b900000000000 ## double 110.25
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b000000000000 ## double 108
.quad 0x405b200000000000 ## double 108.5
.quad 0x405b600000000000 ## double 109.5
.quad 0x405b600000000000 ## double 109.5
.quad 0x405b700000000000 ## double 109.75
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405b900000000000 ## double 110.25
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405bd7ae147ae148 ## double 111.37
.quad 0x405d200000000000 ## double 116.5
.quad 0x405db00000000000 ## double 118.75
.quad 0x405db00000000000 ## double 118.75
.quad 0x405dd7ae147ae148 ## double 119.37
.quad 0x405de7ae147ae148 ## double 119.62
.quad 0x405da00000000000 ## double 118.5
.quad 0x405df7ae147ae148 ## double 119.87
.quad 0x405e27ae147ae148 ## double 120.62
.quad 0x405e47ae147ae148 ## double 121.12
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405e400000000000 ## double 121
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405e97ae147ae148 ## double 122.37
.quad 0x405e900000000000 ## double 122.25
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405f600000000000 ## double 125.5
.quad 0x405f800000000000 ## double 126
.quad 0x405fa7ae147ae148 ## double 126.62
.quad 0x405fb7ae147ae148 ## double 126.87
.quad 0x405fc7ae147ae148 ## double 127.12
.quad 0x405fb00000000000 ## double 126.75
.quad 0x405f900000000000 ## double 126.25
.quad 0x405fa7ae147ae148 ## double 126.62
.quad 0x405fe00000000000 ## double 127.5
.quad 0x40600c28f5c28f5c ## double 128.38
.quad 0x40601428f5c28f5c ## double 128.63
.quad 0x405ff7ae147ae148 ## double 127.87
.quad 0x40601c28f5c28f5c ## double 128.88
.quad 0x40603c28f5c28f5c ## double 129.88
.quad 0x40605c28f5c28f5c ## double 130.88
.quad 0x40605c28f5c28f5c ## double 130.88
.quad 0x4060280000000000 ## double 129.25
.quad 0x405ff7ae147ae148 ## double 127.87
.quad 0x405fc00000000000 ## double 127
.quad 0x405f17ae147ae148 ## double 124.37
.quad 0x405f27ae147ae148 ## double 124.62
.quad 0x405f07ae147ae148 ## double 124.12
.quad 0x405f17ae147ae148 ## double 124.37
.quad 0x405f57ae147ae148 ## double 125.37
.quad 0x405f400000000000 ## double 125
.quad 0x405f200000000000 ## double 124.5
.quad 0x405f47ae147ae148 ## double 125.12
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405f07ae147ae148 ## double 124.12
.quad 0x405e900000000000 ## double 122.25
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405e97ae147ae148 ## double 122.37
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405ec7ae147ae148 ## double 123.12
.quad 0x405ea7ae147ae148 ## double 122.62
.quad 0x405e900000000000 ## double 122.25
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405f000000000000 ## double 124
.quad 0x405ec7ae147ae148 ## double 123.12
.quad 0x405ef7ae147ae148 ## double 123.87
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405f07ae147ae148 ## double 124.12
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405e700000000000 ## double 121.75
.quad 0x405e300000000000 ## double 120.75
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405e300000000000 ## double 120.75
.quad 0x405e57ae147ae148 ## double 121.37
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405e87ae147ae148 ## double 122.12
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405dd7ae147ae148 ## double 119.37
.quad 0x405db00000000000 ## double 118.75
.quad 0x405df00000000000 ## double 119.75
.quad 0x405df00000000000 ## double 119.75
.quad 0x405df00000000000 ## double 119.75
.quad 0x405d300000000000 ## double 116.75
.quad 0x405da00000000000 ## double 118.5
.quad 0x405d500000000000 ## double 117.25
.quad 0x405ce7ae147ae148 ## double 115.62
.quad 0x405d07ae147ae148 ## double 116.12
.quad 0x405d000000000000 ## double 116
.quad 0x405d600000000000 ## double 117.5
.quad 0x405d87ae147ae148 ## double 118.12
.quad 0x405d600000000000 ## double 117.5
.quad 0x405dd00000000000 ## double 119.25
.quad 0x405e17ae147ae148 ## double 120.37
.quad 0x405e000000000000 ## double 120
.quad 0x405e27ae147ae148 ## double 120.62
.quad 0x405e07ae147ae148 ## double 120.12
.quad 0x405e57ae147ae148 ## double 121.37
.quad 0x405eb7ae147ae148 ## double 122.87
.quad 0x405ef7ae147ae148 ## double 123.87
.quad 0x405f17ae147ae148 ## double 124.37
.quad 0x405eb7ae147ae148 ## double 122.87
.quad 0x405e500000000000 ## double 121.25
.quad 0x405e500000000000 ## double 121.25
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405f27ae147ae148 ## double 124.62
.quad 0x405f27ae147ae148 ## double 124.62
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405f300000000000 ## double 124.75
.quad 0x405f17ae147ae148 ## double 124.37
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405e100000000000 ## double 120.25
.quad 0x405df00000000000 ## double 119.75
.quad 0x405da00000000000 ## double 118.5
.quad 0x405db00000000000 ## double 118.75
.quad 0x405d200000000000 ## double 116.5
.quad 0x405d100000000000 ## double 116.25
.quad 0x405c87ae147ae148 ## double 114.12
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405cd00000000000 ## double 115.25
.quad 0x405d200000000000 ## double 116.5
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405c87ae147ae148 ## double 114.12
.quad 0x405c67ae147ae148 ## double 113.62
.quad 0x405c67ae147ae148 ## double 113.62
.quad 0x405c57ae147ae148 ## double 113.37
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405c67ae147ae148 ## double 113.62
.quad 0x405c57ae147ae148 ## double 113.37
.quad 0x405c87ae147ae148 ## double 114.12
.quad 0x405cc00000000000 ## double 115
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405cf7ae147ae148 ## double 115.87
.quad 0x405cc00000000000 ## double 115
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405cd00000000000 ## double 115.25
.quad 0x405c47ae147ae148 ## double 113.12
.quad 0x405c500000000000 ## double 113.25
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405bc7ae147ae148 ## double 111.12
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c700000000000 ## double 113.75
.quad 0x405c800000000000 ## double 114
.quad 0x405c300000000000 ## double 112.75
.quad 0x405c07ae147ae148 ## double 112.12
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405bf00000000000 ## double 111.75
.quad 0x405c47ae147ae148 ## double 113.12
.quad 0x405cc00000000000 ## double 115
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405d17ae147ae148 ## double 116.37
.quad 0x405d300000000000 ## double 116.75
.quad 0x405d77ae147ae148 ## double 117.87
.quad 0x405db00000000000 ## double 118.75
.quad 0x405de7ae147ae148 ## double 119.62
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405f17ae147ae148 ## double 124.37
.quad 0x405f100000000000 ## double 124.25
.quad 0x405f600000000000 ## double 125.5
.quad 0x405fa00000000000 ## double 126.5
.quad 0x405fb7ae147ae148 ## double 126.87
.quad 0x405fa00000000000 ## double 126.5
.quad 0x405f900000000000 ## double 126.25
.quad 0x405ef7ae147ae148 ## double 123.87
.quad 0x405ec7ae147ae148 ## double 123.12
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405e97ae147ae148 ## double 122.37
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405f300000000000 ## double 124.75
.quad 0x405f57ae147ae148 ## double 125.37
.quad 0x405f900000000000 ## double 126.25
.quad 0x405fa00000000000 ## double 126.5
.quad 0x405f700000000000 ## double 125.75
.quad 0x405f67ae147ae148 ## double 125.62
.quad 0x405fd7ae147ae148 ## double 127.37
.quad 0x405fd7ae147ae148 ## double 127.37
.quad 0x4060080000000000 ## double 128.25
.quad 0x405ff7ae147ae148 ## double 127.87
.quad 0x4060300000000000 ## double 129.5
.quad 0x4060280000000000 ## double 129.25
.quad 0x405ff00000000000 ## double 127.75
.quad 0x405ff00000000000 ## double 127.75
.quad 0x40600c28f5c28f5c ## double 128.38
.quad 0x405ff00000000000 ## double 127.75
.quad 0x405f900000000000 ## double 126.25
.quad 0x405f900000000000 ## double 126.25
.quad 0x405f77ae147ae148 ## double 125.87
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405e17ae147ae148 ## double 120.37
.quad 0x405d67ae147ae148 ## double 117.62
.quad 0x405d600000000000 ## double 117.5
.quad 0x405d97ae147ae148 ## double 118.37
.quad 0x405de7ae147ae148 ## double 119.62
.quad 0x405df00000000000 ## double 119.75
.quad 0x405d57ae147ae148 ## double 117.37
.quad 0x405d57ae147ae148 ## double 117.37
.quad 0x405d47ae147ae148 ## double 117.12
.quad 0x405d37ae147ae148 ## double 116.87
.quad 0x405cd00000000000 ## double 115.25
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405c67ae147ae148 ## double 113.62
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405c400000000000 ## double 113
.quad 0x405b17ae147ae148 ## double 108.37
.quad 0x405b500000000000 ## double 109.25
.quad 0x405b700000000000 ## double 109.75
.quad 0x405b600000000000 ## double 109.5
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405bb00000000000 ## double 110.75
.quad 0x405ba7ae147ae148 ## double 110.62
.quad 0x405ba7ae147ae148 ## double 110.62
.quad 0x405c300000000000 ## double 112.75
.quad 0x405bf00000000000 ## double 111.75
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405be00000000000 ## double 111.5
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405bf7ae147ae148 ## double 111.87
.quad 0x405be7ae147ae148 ## double 111.62
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405cc7ae147ae148 ## double 115.12
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405c700000000000 ## double 113.75
.quad 0x405c600000000000 ## double 113.5
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c87ae147ae148 ## double 114.12
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405d000000000000 ## double 116
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405d200000000000 ## double 116.5
.quad 0x405c87ae147ae148 ## double 114.12
.quad 0x405c67ae147ae148 ## double 113.62
.quad 0x405c27ae147ae148 ## double 112.62
.quad 0x405b900000000000 ## double 110.25
.quad 0x405b700000000000 ## double 109.75
.quad 0x405b100000000000 ## double 108.25
.quad 0x405b400000000000 ## double 109
.quad 0x405b200000000000 ## double 108.5
.quad 0x405aa7ae147ae148 ## double 106.62
.quad 0x405b300000000000 ## double 108.75
.quad 0x405b17ae147ae148 ## double 108.37
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405c57ae147ae148 ## double 113.37
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c800000000000 ## double 114
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405d200000000000 ## double 116.5
.quad 0x405d700000000000 ## double 117.75
.quad 0x405da7ae147ae148 ## double 118.62
.quad 0x405d600000000000 ## double 117.5
.quad 0x405d600000000000 ## double 117.5
.quad 0x405d500000000000 ## double 117.25
.quad 0x405d97ae147ae148 ## double 118.37
.quad 0x405d97ae147ae148 ## double 118.37
.quad 0x405d700000000000 ## double 117.75
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405d77ae147ae148 ## double 117.87
.quad 0x405d300000000000 ## double 116.75
.quad 0x405cf7ae147ae148 ## double 115.87
.quad 0x405ce7ae147ae148 ## double 115.62
.quad 0x405c57ae147ae148 ## double 113.37
.quad 0x405c700000000000 ## double 113.75
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c700000000000 ## double 113.75
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405c17ae147ae148 ## double 112.37
.quad 0x405bf00000000000 ## double 111.75
.quad 0x405b500000000000 ## double 109.25
.quad 0x405b17ae147ae148 ## double 108.37
.quad 0x405b600000000000 ## double 109.5
.quad 0x405b500000000000 ## double 109.25
.quad 0x405bf00000000000 ## double 111.75
.quad 0x405ba7ae147ae148 ## double 110.62
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c500000000000 ## double 113.25
.quad 0x405bf00000000000 ## double 111.75
.quad 0x405c000000000000 ## double 112
.quad 0x405bd7ae147ae148 ## double 111.37
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405df00000000000 ## double 119.75
.quad 0x405e100000000000 ## double 120.25
.quad 0x405d57ae147ae148 ## double 117.37
.quad 0x405d900000000000 ## double 118.25
.quad 0x405d700000000000 ## double 117.75
.quad 0x405d800000000000 ## double 118
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405f000000000000 ## double 124
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405e500000000000 ## double 121.25
.quad 0x405d17ae147ae148 ## double 116.37
.quad 0x405d700000000000 ## double 117.75
.quad 0x405d27ae147ae148 ## double 116.62
.quad 0x405d67ae147ae148 ## double 117.62
.quad 0x405df7ae147ae148 ## double 119.87
.quad 0x405e17ae147ae148 ## double 120.37
.quad 0x405d97ae147ae148 ## double 118.37
.quad 0x405de00000000000 ## double 119.5
.quad 0x405d900000000000 ## double 118.25
.quad 0x405de00000000000 ## double 119.5
.quad 0x405dd00000000000 ## double 119.25
.quad 0x405d400000000000 ## double 117
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405be7ae147ae148 ## double 111.62
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405be00000000000 ## double 111.5
.quad 0x405b600000000000 ## double 109.5
.quad 0x405af00000000000 ## double 107.75
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405c07ae147ae148 ## double 112.12
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405c17ae147ae148 ## double 112.37
.quad 0x405d400000000000 ## double 117
.quad 0x405db7ae147ae148 ## double 118.87
.quad 0x405de00000000000 ## double 119.5
.quad 0x405d700000000000 ## double 117.75
.quad 0x405d77ae147ae148 ## double 117.87
.quad 0x405db7ae147ae148 ## double 118.87
.quad 0x405de00000000000 ## double 119.5
.quad 0x405dd00000000000 ## double 119.25
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405ea7ae147ae148 ## double 122.62
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405e07ae147ae148 ## double 120.12
.quad 0x405de00000000000 ## double 119.5
.quad 0x405df00000000000 ## double 119.75
.quad 0x405f200000000000 ## double 124.5
.quad 0x405f27ae147ae148 ## double 124.62
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405ec00000000000 ## double 123
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405f400000000000 ## double 125
.quad 0x405ec00000000000 ## double 123
.quad 0x405e000000000000 ## double 120
.quad 0x405da7ae147ae148 ## double 118.62
.quad 0x405d400000000000 ## double 117
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405f600000000000 ## double 125.5
.quad 0x405e800000000000 ## double 122
.quad 0x4061400000000000 ## double 138
.quad 0x4061a80000000000 ## double 141.25
.quad 0x4062380000000000 ## double 145.75
.quad 0x40626c28f5c28f5c ## double 147.38
.quad 0x4062f00000000000 ## double 151.5
.quad 0x4062e80000000000 ## double 151.25
.quad 0x4062f428f5c28f5c ## double 151.63
.quad 0x40631c28f5c28f5c ## double 152.88
.quad 0x4063200000000000 ## double 153
.quad 0x40638c28f5c28f5c ## double 156.38
.quad 0x4063a80000000000 ## double 157.25
.quad 0x40639c28f5c28f5c ## double 156.88
.quad 0x4063580000000000 ## double 154.75
.quad 0x4062fc28f5c28f5c ## double 151.88
.quad 0x4063380000000000 ## double 153.75
.quad 0x4063c80000000000 ## double 158.25
.quad 0x40639428f5c28f5c ## double 156.63
.quad 0x4063c428f5c28f5c ## double 158.13
.quad 0x4063800000000000 ## double 156
.quad 0x4063780000000000 ## double 155.75
.quad 0x4063900000000000 ## double 156.5
.quad 0x4063d00000000000 ## double 158.5
.quad 0x4063d428f5c28f5c ## double 158.63
.quad 0x4063fc28f5c28f5c ## double 159.88
.quad 0x4064480000000000 ## double 162.25
.quad 0x4064580000000000 ## double 162.75
.quad 0x40642c28f5c28f5c ## double 161.38
.quad 0x4063cc28f5c28f5c ## double 158.38
.quad 0x4063c428f5c28f5c ## double 158.13
.quad 0x4064100000000000 ## double 160.5
.quad 0x4064580000000000 ## double 162.75
.quad 0x4064700000000000 ## double 163.5
.quad 0x40648428f5c28f5c ## double 164.13
.quad 0x40652c28f5c28f5c ## double 169.38
.quad 0x4065300000000000 ## double 169.5
.quad 0x4065000000000000 ## double 168
.quad 0x40650c28f5c28f5c ## double 168.38
.quad 0x4065300000000000 ## double 169.5
.quad 0x4065dc28f5c28f5c ## double 174.88
.quad 0x4065e428f5c28f5c ## double 175.13
.quad 0x4065fc28f5c28f5c ## double 175.88
.quad 0x4065e80000000000 ## double 175.25
.quad 0x4065a80000000000 ## double 173.25
.quad 0x4065bc28f5c28f5c ## double 173.88
.quad 0x4065d80000000000 ## double 174.75
.quad 0x4065e00000000000 ## double 175
.quad 0x4065c00000000000 ## double 174
.quad 0x40658428f5c28f5c ## double 172.13
.quad 0x40653428f5c28f5c ## double 169.63
.quad 0x4064d80000000000 ## double 166.75
.quad 0x4064b428f5c28f5c ## double 165.63
.quad 0x4064680000000000 ## double 163.25
.quad 0x4064200000000000 ## double 161
.quad 0x4064000000000000 ## double 160
.quad 0x40641c28f5c28f5c ## double 160.88
.quad 0x4064600000000000 ## double 163
.quad 0x4064500000000000 ## double 162.5
.quad 0x4064380000000000 ## double 161.75
.quad 0x40645c28f5c28f5c ## double 162.88
.quad 0x40645428f5c28f5c ## double 162.63
.quad 0x4064300000000000 ## double 161.5
.quad 0x4064480000000000 ## double 162.25
.quad 0x40645c28f5c28f5c ## double 162.88
.quad 0x40649428f5c28f5c ## double 164.63
.quad 0x4064bc28f5c28f5c ## double 165.88
.quad 0x4065100000000000 ## double 168.5
.quad 0x4065180000000000 ## double 168.75
.quad 0x4065300000000000 ## double 169.5
.quad 0x40653c28f5c28f5c ## double 169.88
.quad 0x40653c28f5c28f5c ## double 169.88
.quad 0x4064e80000000000 ## double 167.25
.quad 0x4064e80000000000 ## double 167.25
.quad 0x4064dc28f5c28f5c ## double 166.88
.quad 0x4064c00000000000 ## double 166
.quad 0x4064b428f5c28f5c ## double 165.63
.quad 0x4064a428f5c28f5c ## double 165.13
.quad 0x40649428f5c28f5c ## double 164.63
.quad 0x4064b80000000000 ## double 165.75
.quad 0x4064d00000000000 ## double 166.5
.quad 0x4064f80000000000 ## double 167.75
.quad 0x40651428f5c28f5c ## double 168.63
.quad 0x4064f428f5c28f5c ## double 167.63
.quad 0x4064a00000000000 ## double 165
.quad 0x4064700000000000 ## double 163.5
.quad 0x40644428f5c28f5c ## double 162.13
.quad 0x4064400000000000 ## double 162
.quad 0x4064500000000000 ## double 162.5
.quad 0x4064480000000000 ## double 162.25
.quad 0x40640428f5c28f5c ## double 160.13
.quad 0x4063ec28f5c28f5c ## double 159.38
.quad 0x4063e428f5c28f5c ## double 159.13
.quad 0x4064300000000000 ## double 161.5
.quad 0x40643428f5c28f5c ## double 161.63
.quad 0x40643c28f5c28f5c ## double 161.88
.quad 0x4064280000000000 ## double 161.25
.quad 0x40642c28f5c28f5c ## double 161.38
.quad 0x40640c28f5c28f5c ## double 160.38
.quad 0x4063f80000000000 ## double 159.75
.quad 0x40642c28f5c28f5c ## double 161.38
.quad 0x40647c28f5c28f5c ## double 163.88
.quad 0x4064580000000000 ## double 162.75
.quad 0x40644c28f5c28f5c ## double 162.38
.quad 0x4064180000000000 ## double 160.75
.quad 0x4063e80000000000 ## double 159.25
.quad 0x4063d80000000000 ## double 158.75
.quad 0x4063d428f5c28f5c ## double 158.63
.quad 0x4064380000000000 ## double 161.75
.quad 0x4064280000000000 ## double 161.25
.quad 0x4064ac28f5c28f5c ## double 165.38
.quad 0x4064ec28f5c28f5c ## double 167.38
.quad 0x4064e00000000000 ## double 167
.quad 0x4064c428f5c28f5c ## double 166.13
.quad 0x4064ec28f5c28f5c ## double 167.38
.quad 0x4064bc28f5c28f5c ## double 165.88
.quad 0x4064d80000000000 ## double 166.75
.quad 0x4064f428f5c28f5c ## double 167.63
.quad 0x4064f428f5c28f5c ## double 167.63
.quad 0x4064600000000000 ## double 163
.quad 0x40644c28f5c28f5c ## double 162.38
.quad 0x4064300000000000 ## double 161.5
.quad 0x4063f428f5c28f5c ## double 159.63
.quad 0x40639428f5c28f5c ## double 156.63
.quad 0x4063800000000000 ## double 156
.quad 0x40634428f5c28f5c ## double 154.13
.quad 0x4063900000000000 ## double 156.5
.quad 0x4063a00000000000 ## double 157
.quad 0x4063c00000000000 ## double 158
.quad 0x4062d428f5c28f5c ## double 150.63
.quad 0x4063080000000000 ## double 152.25
.quad 0x4062fc28f5c28f5c ## double 151.88
.quad 0x4062dc28f5c28f5c ## double 150.88
.quad 0x4062c80000000000 ## double 150.25
.quad 0x40623428f5c28f5c ## double 145.63
.quad 0x4062700000000000 ## double 147.5
.quad 0x4062880000000000 ## double 148.25
.quad 0x4062c428f5c28f5c ## double 150.13
.quad 0x4062cc28f5c28f5c ## double 150.38
.quad 0x4062d80000000000 ## double 150.75
.quad 0x4062ec28f5c28f5c ## double 151.38
.quad 0x4063000000000000 ## double 152
.quad 0x40632c28f5c28f5c ## double 153.38
.quad 0x4063180000000000 ## double 152.75
.quad 0x40634428f5c28f5c ## double 154.13
.quad 0x40639428f5c28f5c ## double 156.63
.quad 0x4063900000000000 ## double 156.5
.quad 0x40631c28f5c28f5c ## double 152.88
.quad 0x4062c80000000000 ## double 150.25
.quad 0x4062a80000000000 ## double 149.25
.quad 0x40626c28f5c28f5c ## double 147.38
.quad 0x4062800000000000 ## double 148
.quad 0x40626428f5c28f5c ## double 147.13
.quad 0x4062200000000000 ## double 145
.quad 0x4062400000000000 ## double 146
.quad 0x40622428f5c28f5c ## double 145.13
.quad 0x40620c28f5c28f5c ## double 144.38
.quad 0x4061dc28f5c28f5c ## double 142.88
.quad 0x40615c28f5c28f5c ## double 138.88
.quad 0x4061900000000000 ## double 140.5
.quad 0x4061bc28f5c28f5c ## double 141.88
.quad 0x4061ac28f5c28f5c ## double 141.38
.quad 0x40615c28f5c28f5c ## double 138.88
.quad 0x4061800000000000 ## double 140
.quad 0x40619428f5c28f5c ## double 140.63
.quad 0x4061b428f5c28f5c ## double 141.63
.quad 0x4061f00000000000 ## double 143.5
.quad 0x4062080000000000 ## double 144.25
.quad 0x40620428f5c28f5c ## double 144.13
.quad 0x4061780000000000 ## double 139.75
.quad 0x40617428f5c28f5c ## double 139.63
.quad 0x4061700000000000 ## double 139.5
.quad 0x40615428f5c28f5c ## double 138.63
.quad 0x4060d80000000000 ## double 134.75
.quad 0x4060e00000000000 ## double 135
.quad 0x4060d00000000000 ## double 134.5
.quad 0x4060f00000000000 ## double 135.5
.quad 0x4060fc28f5c28f5c ## double 135.88
.quad 0x40612428f5c28f5c ## double 137.13
.quad 0x4061300000000000 ## double 137.5
.quad 0x4060e428f5c28f5c ## double 135.13
.quad 0x4060cc28f5c28f5c ## double 134.38
.quad 0x4060900000000000 ## double 132.5
.quad 0x40602428f5c28f5c ## double 129.13
.quad 0x40605c28f5c28f5c ## double 130.88
.quad 0x4060480000000000 ## double 130.25
.quad 0x4060380000000000 ## double 129.75
.quad 0x405ff7ae147ae148 ## double 127.87
.quad 0x40603c28f5c28f5c ## double 129.88
.quad 0x4060000000000000 ## double 128
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405fd00000000000 ## double 127.25
.quad 0x405f500000000000 ## double 125.25
.quad 0x405e47ae147ae148 ## double 121.12
.quad 0x405e57ae147ae148 ## double 121.37
.quad 0x405db00000000000 ## double 118.75
.quad 0x405db00000000000 ## double 118.75
.quad 0x405e47ae147ae148 ## double 121.12
.quad 0x405ea7ae147ae148 ## double 122.62
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405f000000000000 ## double 124
.quad 0x405f17ae147ae148 ## double 124.37
.quad 0x405f000000000000 ## double 124
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405e800000000000 ## double 122
.quad 0x405e900000000000 ## double 122.25
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405f400000000000 ## double 125
.quad 0x405f77ae147ae148 ## double 125.87
.quad 0x405fc00000000000 ## double 127
.quad 0x40600428f5c28f5c ## double 128.13
.quad 0x4060100000000000 ## double 128.5
.quad 0x405fe7ae147ae148 ## double 127.62
.quad 0x4060080000000000 ## double 128.25
.quad 0x40602c28f5c28f5c ## double 129.38
.quad 0x4060200000000000 ## double 129
.quad 0x40600428f5c28f5c ## double 128.13
.quad 0x4060180000000000 ## double 128.75
.quad 0x4060100000000000 ## double 128.5
.quad 0x4060280000000000 ## double 129.25
.quad 0x40604428f5c28f5c ## double 130.13
.quad 0x40603c28f5c28f5c ## double 129.88
.quad 0x405fd7ae147ae148 ## double 127.37
.quad 0x405fd00000000000 ## double 127.25
.quad 0x405ff7ae147ae148 ## double 127.87
.quad 0x405fe00000000000 ## double 127.5
.quad 0x405f500000000000 ## double 125.25
.quad 0x405f57ae147ae148 ## double 125.37
.quad 0x405ef7ae147ae148 ## double 123.87
.quad 0x405ec00000000000 ## double 123
.quad 0x405ec7ae147ae148 ## double 123.12
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405e600000000000 ## double 121.5
.quad 0x405ec00000000000 ## double 123
.quad 0x405f17ae147ae148 ## double 124.37
.quad 0x405f100000000000 ## double 124.25
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405e800000000000 ## double 122
.quad 0x405f300000000000 ## double 124.75
.quad 0x405fa00000000000 ## double 126.5
.quad 0x405f97ae147ae148 ## double 126.37
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405ec00000000000 ## double 123
.quad 0x405e700000000000 ## double 121.75
.quad 0x405e97ae147ae148 ## double 122.37
.quad 0x405e87ae147ae148 ## double 122.12
.quad 0x405e900000000000 ## double 122.25
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405e600000000000 ## double 121.5
.quad 0x405e57ae147ae148 ## double 121.37
.quad 0x405e500000000000 ## double 121.25
.quad 0x405e97ae147ae148 ## double 122.37
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405ec00000000000 ## double 123
.quad 0x405ec00000000000 ## double 123
.quad 0x405f500000000000 ## double 125.25
.quad 0x405f200000000000 ## double 124.5
.quad 0x405ff7ae147ae148 ## double 127.87
.quad 0x405fe00000000000 ## double 127.5
.quad 0x4060b428f5c28f5c ## double 133.63
.quad 0x4060b428f5c28f5c ## double 133.63
.quad 0x4060a80000000000 ## double 133.25
.quad 0x4060ac28f5c28f5c ## double 133.38
.quad 0x4060f00000000000 ## double 135.5
.quad 0x4060fc28f5c28f5c ## double 135.88
.quad 0x4060e80000000000 ## double 135.25
.quad 0x40611428f5c28f5c ## double 136.63
.quad 0x40613428f5c28f5c ## double 137.63
.quad 0x40616c28f5c28f5c ## double 139.38
.quad 0x40616c28f5c28f5c ## double 139.38
.quad 0x40615428f5c28f5c ## double 138.63
.quad 0x4061480000000000 ## double 138.25
.quad 0x4061600000000000 ## double 139
.quad 0x40617428f5c28f5c ## double 139.63
.quad 0x4061500000000000 ## double 138.5
.quad 0x4061500000000000 ## double 138.5
.quad 0x40617c28f5c28f5c ## double 139.88
.quad 0x4061dc28f5c28f5c ## double 142.88
.quad 0x4062100000000000 ## double 144.5
.quad 0x4062300000000000 ## double 145.5
.quad 0x4061fc28f5c28f5c ## double 143.88
.quad 0x4061a00000000000 ## double 141
.quad 0x4061700000000000 ## double 139.5
.quad 0x40610c28f5c28f5c ## double 136.38
.quad 0x40615c28f5c28f5c ## double 138.88
.quad 0x4061800000000000 ## double 140
.quad 0x40618c28f5c28f5c ## double 140.38
.quad 0x4061c00000000000 ## double 142
.quad 0x4061a80000000000 ## double 141.25
.quad 0x4061800000000000 ## double 140
.quad 0x40614428f5c28f5c ## double 138.13
.quad 0x4061900000000000 ## double 140.5
.quad 0x4061700000000000 ## double 139.5
.quad 0x4061380000000000 ## double 137.75
.quad 0x4060f80000000000 ## double 135.75
.quad 0x4060d00000000000 ## double 134.5
.quad 0x4060e80000000000 ## double 135.25
.quad 0x4060cc28f5c28f5c ## double 134.38
.quad 0x4060980000000000 ## double 132.75
.quad 0x40607428f5c28f5c ## double 131.63
.quad 0x40607428f5c28f5c ## double 131.63
.quad 0x40607c28f5c28f5c ## double 131.88
.quad 0x4060700000000000 ## double 131.5
.quad 0x40607c28f5c28f5c ## double 131.88
.quad 0x40607c28f5c28f5c ## double 131.88
.quad 0x4060ac28f5c28f5c ## double 133.38
.quad 0x4060a80000000000 ## double 133.25
.quad 0x4060a80000000000 ## double 133.25
.quad 0x4060980000000000 ## double 132.75
.quad 0x4060dc28f5c28f5c ## double 134.88
.quad 0x4060e00000000000 ## double 135
.quad 0x4060b428f5c28f5c ## double 133.63
.quad 0x4060c00000000000 ## double 134
.quad 0x4060bc28f5c28f5c ## double 133.88
.quad 0x40608c28f5c28f5c ## double 132.38
.quad 0x4060b00000000000 ## double 133.5
.quad 0x4060c00000000000 ## double 134
.quad 0x40610428f5c28f5c ## double 136.13
.quad 0x40615428f5c28f5c ## double 138.63
.quad 0x4061ac28f5c28f5c ## double 141.38
.quad 0x40623428f5c28f5c ## double 145.63
.quad 0x40623c28f5c28f5c ## double 145.88
.quad 0x4062180000000000 ## double 144.75
.quad 0x40621428f5c28f5c ## double 144.63
.quad 0x40628428f5c28f5c ## double 148.13
.quad 0x4062c428f5c28f5c ## double 150.13
.quad 0x4062c428f5c28f5c ## double 150.13
.quad 0x40629428f5c28f5c ## double 148.63
.quad 0x4062700000000000 ## double 147.5
.quad 0x40626c28f5c28f5c ## double 147.38
.quad 0x40627428f5c28f5c ## double 147.63
.quad 0x4062900000000000 ## double 148.5
.quad 0x40627c28f5c28f5c ## double 147.88
.quad 0x4062600000000000 ## double 147
.quad 0x4062700000000000 ## double 147.5
.quad 0x4062600000000000 ## double 147
.quad 0x4062600000000000 ## double 147
.quad 0x4062980000000000 ## double 148.75
.quad 0x4062bc28f5c28f5c ## double 149.88
.quad 0x4062bc28f5c28f5c ## double 149.88
.quad 0x4062a428f5c28f5c ## double 149.13
.quad 0x4062b00000000000 ## double 149.5
.quad 0x4062980000000000 ## double 148.75
.quad 0x4062a00000000000 ## double 149
.quad 0x4062e00000000000 ## double 151
.quad 0x4062dc28f5c28f5c ## double 150.88
.quad 0x40631c28f5c28f5c ## double 152.88
.quad 0x4063180000000000 ## double 152.75
.quad 0x4063300000000000 ## double 153.5
.quad 0x40635428f5c28f5c ## double 154.63
.quad 0x40633428f5c28f5c ## double 153.63
.quad 0x4063100000000000 ## double 152.5
.quad 0x4062600000000000 ## double 147
.quad 0x4062480000000000 ## double 146.25
.quad 0x40622428f5c28f5c ## double 145.13
.quad 0x40625428f5c28f5c ## double 146.63
.quad 0x40624c28f5c28f5c ## double 146.38
.quad 0x4062280000000000 ## double 145.25
.quad 0x40623428f5c28f5c ## double 145.63
.quad 0x40628428f5c28f5c ## double 148.13
.quad 0x4062a428f5c28f5c ## double 149.13
.quad 0x4062cc28f5c28f5c ## double 150.38
.quad 0x4062f00000000000 ## double 151.5
.quad 0x4062c80000000000 ## double 150.25
.quad 0x4062d00000000000 ## double 150.5
.quad 0x4063300000000000 ## double 153.5
.quad 0x40637c28f5c28f5c ## double 155.88
.quad 0x40637c28f5c28f5c ## double 155.88
.quad 0x4063a80000000000 ## double 157.25
.quad 0x4063b00000000000 ## double 157.5
.quad 0x4063f80000000000 ## double 159.75
.quad 0x40643c28f5c28f5c ## double 161.88
.quad 0x40642c28f5c28f5c ## double 161.38
.quad 0x40640c28f5c28f5c ## double 160.38
.quad 0x4063d00000000000 ## double 158.5
.quad 0x4063680000000000 ## double 155.25
.quad 0x40636428f5c28f5c ## double 155.13
.quad 0x40635c28f5c28f5c ## double 154.88
.quad 0x40635c28f5c28f5c ## double 154.88
.quad 0x40637428f5c28f5c ## double 155.63
.quad 0x4063680000000000 ## double 155.25
.quad 0x40633428f5c28f5c ## double 153.63
.quad 0x40631428f5c28f5c ## double 152.63
.quad 0x40630c28f5c28f5c ## double 152.38
.quad 0x4062f00000000000 ## double 151.5
.quad 0x40631428f5c28f5c ## double 152.63
.quad 0x4063000000000000 ## double 152
.quad 0x4062c80000000000 ## double 150.25
.quad 0x4062c428f5c28f5c ## double 150.13
.quad 0x40631c28f5c28f5c ## double 152.88
.quad 0x4062e428f5c28f5c ## double 151.13
.quad 0x4063000000000000 ## double 152
.quad 0x40632428f5c28f5c ## double 153.13
.quad 0x4062e00000000000 ## double 151
.quad 0x40627c28f5c28f5c ## double 147.88
.quad 0x4062980000000000 ## double 148.75
.quad 0x4062c00000000000 ## double 150
.quad 0x4062dc28f5c28f5c ## double 150.88
.quad 0x4063180000000000 ## double 152.75
.quad 0x40630c28f5c28f5c ## double 152.38
.quad 0x4063300000000000 ## double 153.5
.quad 0x4062e00000000000 ## double 151
.quad 0x4062f00000000000 ## double 151.5
.quad 0x4062dc28f5c28f5c ## double 150.88
.quad 0x40631428f5c28f5c ## double 152.63
.quad 0x40630428f5c28f5c ## double 152.13
.quad 0x4062900000000000 ## double 148.5
.quad 0x4062680000000000 ## double 147.25
.quad 0x4062a428f5c28f5c ## double 149.13
.quad 0x40628c28f5c28f5c ## double 148.38
.quad 0x40632428f5c28f5c ## double 153.13
.quad 0x4063100000000000 ## double 152.5
.quad 0x4063980000000000 ## double 156.75
.quad 0x4063c80000000000 ## double 158.25
.quad 0x4063e428f5c28f5c ## double 159.13
.quad 0x4063d428f5c28f5c ## double 158.63
.quad 0x4064000000000000 ## double 160
.quad 0x4064200000000000 ## double 161
.quad 0x4063e428f5c28f5c ## double 159.13
.quad 0x40640c28f5c28f5c ## double 160.38
.quad 0x4063f80000000000 ## double 159.75
.quad 0x4063bc28f5c28f5c ## double 157.88
.quad 0x40637c28f5c28f5c ## double 155.88
.quad 0x4063a80000000000 ## double 157.25
.quad 0x4063c80000000000 ## double 158.25
.quad 0x4063bc28f5c28f5c ## double 157.88
.quad 0x4063a80000000000 ## double 157.25
.quad 0x4063800000000000 ## double 156
.quad 0x4063600000000000 ## double 155
.quad 0x4063700000000000 ## double 155.5
.quad 0x40635428f5c28f5c ## double 154.63
.quad 0x4063300000000000 ## double 153.5
.quad 0x4062e428f5c28f5c ## double 151.13
.quad 0x40633428f5c28f5c ## double 153.63
.quad 0x4062f80000000000 ## double 151.75
.quad 0x4062e80000000000 ## double 151.25
.quad 0x4062d00000000000 ## double 150.5
.quad 0x4062800000000000 ## double 148
.quad 0x4062c00000000000 ## double 150
.quad 0x4062cc28f5c28f5c ## double 150.38
.quad 0x4062c00000000000 ## double 150
.quad 0x4063880000000000 ## double 156.25
.quad 0x4063900000000000 ## double 156.5
.quad 0x4063180000000000 ## double 152.75
.quad 0x4062bc28f5c28f5c ## double 149.88
.quad 0x4062b428f5c28f5c ## double 149.63
.quad 0x4062e00000000000 ## double 151
.quad 0x4062cc28f5c28f5c ## double 150.38
.quad 0x4063800000000000 ## double 156
.quad 0x4063900000000000 ## double 156.5
.quad 0x40635c28f5c28f5c ## double 154.88
.quad 0x40634c28f5c28f5c ## double 154.38
.quad 0x40635c28f5c28f5c ## double 154.88
.quad 0x4063d80000000000 ## double 158.75
.quad 0x4063d00000000000 ## double 158.5
.quad 0x40637428f5c28f5c ## double 155.63
.quad 0x4063380000000000 ## double 153.75
.quad 0x40635428f5c28f5c ## double 154.63
.quad 0x40636428f5c28f5c ## double 155.13
.quad 0x40639428f5c28f5c ## double 156.63
.quad 0x4063400000000000 ## double 154
.quad 0x40633c28f5c28f5c ## double 153.88
.quad 0x4063500000000000 ## double 154.5
.quad 0x4063300000000000 ## double 153.5
.quad 0x4062f00000000000 ## double 151.5
.quad 0x4062ac28f5c28f5c ## double 149.38
.quad 0x4062ac28f5c28f5c ## double 149.38
.quad 0x4062800000000000 ## double 148
.quad 0x40621c28f5c28f5c ## double 144.88
.quad 0x4061b80000000000 ## double 141.75
.quad 0x4061dc28f5c28f5c ## double 142.88
.quad 0x4061bc28f5c28f5c ## double 141.88
.quad 0x40615428f5c28f5c ## double 138.63
.quad 0x40617c28f5c28f5c ## double 139.88
.quad 0x4061980000000000 ## double 140.75
.quad 0x4061900000000000 ## double 140.5
.quad 0x4061780000000000 ## double 139.75
.quad 0x4061700000000000 ## double 139.5
.quad 0x4061900000000000 ## double 140.5
.quad 0x4061880000000000 ## double 140.25
.quad 0x40616428f5c28f5c ## double 139.13
.quad 0x4061680000000000 ## double 139.25
.quad 0x40615428f5c28f5c ## double 138.63
.quad 0x40614c28f5c28f5c ## double 138.38
.quad 0x4061200000000000 ## double 137
.quad 0x4060f80000000000 ## double 135.75
.quad 0x40610c28f5c28f5c ## double 136.38
.quad 0x4060dc28f5c28f5c ## double 134.88
.quad 0x4060a00000000000 ## double 133
.quad 0x4060980000000000 ## double 132.75
.quad 0x4060a80000000000 ## double 133.25
.quad 0x4060c00000000000 ## double 134
.quad 0x40609428f5c28f5c ## double 132.63
.quad 0x40606428f5c28f5c ## double 131.13
.quad 0x4060580000000000 ## double 130.75
.quad 0x40606c28f5c28f5c ## double 131.38
.quad 0x40606c28f5c28f5c ## double 131.38
.quad 0x40602c28f5c28f5c ## double 129.38
.quad 0x40601c28f5c28f5c ## double 128.88
.quad 0x4060680000000000 ## double 131.25
.quad 0x40605c28f5c28f5c ## double 130.88
.quad 0x40604428f5c28f5c ## double 130.13
.quad 0x405ff7ae147ae148 ## double 127.87
.quad 0x4060200000000000 ## double 129
.quad 0x4060400000000000 ## double 130
.quad 0x4060300000000000 ## double 129.5
.quad 0x4060200000000000 ## double 129
.quad 0x40600428f5c28f5c ## double 128.13
.quad 0x405f900000000000 ## double 126.25
.quad 0x405f300000000000 ## double 124.75
.quad 0x405f57ae147ae148 ## double 125.37
.quad 0x405f300000000000 ## double 124.75
.quad 0x405f400000000000 ## double 125
.quad 0x405f17ae147ae148 ## double 124.37
.quad 0x405f500000000000 ## double 125.25
.quad 0x405fc00000000000 ## double 127
.quad 0x405fa7ae147ae148 ## double 126.62
.quad 0x405f100000000000 ## double 124.25
.quad 0x405ef7ae147ae148 ## double 123.87
.quad 0x405fc00000000000 ## double 127
.quad 0x4060000000000000 ## double 128
.quad 0x40600c28f5c28f5c ## double 128.38
.quad 0x4060000000000000 ## double 128
.quad 0x4060000000000000 ## double 128
.quad 0x4060000000000000 ## double 128
.quad 0x4060100000000000 ## double 128.5
.quad 0x4060100000000000 ## double 128.5
.quad 0x4060000000000000 ## double 128
.quad 0x4060100000000000 ## double 128.5
.quad 0x4060100000000000 ## double 128.5
.quad 0x4060300000000000 ## double 129.5
.quad 0x4060500000000000 ## double 130.5
.quad 0x4060380000000000 ## double 129.75
.quad 0x40601428f5c28f5c ## double 128.63
.quad 0x405fd00000000000 ## double 127.25
.quad 0x405fe00000000000 ## double 127.5
.quad 0x405fe7ae147ae148 ## double 127.62
.quad 0x4060000000000000 ## double 128
.quad 0x40601428f5c28f5c ## double 128.63
.quad 0x40601c28f5c28f5c ## double 128.88
.quad 0x4060080000000000 ## double 128.25
.quad 0x405fc7ae147ae148 ## double 127.12
.quad 0x40600c28f5c28f5c ## double 128.38
.quad 0x40600c28f5c28f5c ## double 128.38
.quad 0x405ff00000000000 ## double 127.75
.quad 0x405f97ae147ae148 ## double 126.37
.quad 0x405f97ae147ae148 ## double 126.37
.quad 0x405fd00000000000 ## double 127.25
.quad 0x40602428f5c28f5c ## double 129.13
.quad 0x405fe00000000000 ## double 127.5
.quad 0x405fd7ae147ae148 ## double 127.37
.quad 0x40601428f5c28f5c ## double 128.63
.quad 0x40601c28f5c28f5c ## double 128.88
.quad 0x4060380000000000 ## double 129.75
.quad 0x4060780000000000 ## double 131.75
.quad 0x4060700000000000 ## double 131.5
.quad 0x4060880000000000 ## double 132.25
.quad 0x4060980000000000 ## double 132.75
.quad 0x40607c28f5c28f5c ## double 131.88
.quad 0x4060580000000000 ## double 130.75
.quad 0x4060800000000000 ## double 132
.quad 0x4060900000000000 ## double 132.5
.quad 0x4060480000000000 ## double 130.25
.quad 0x40603c28f5c28f5c ## double 129.88
.quad 0x40607c28f5c28f5c ## double 131.88
.quad 0x40603c28f5c28f5c ## double 129.88
.quad 0x4060400000000000 ## double 130
.quad 0x4060400000000000 ## double 130
.quad 0x4060480000000000 ## double 130.25
.quad 0x4060180000000000 ## double 128.75
.quad 0x405fb00000000000 ## double 126.75
.quad 0x405f27ae147ae148 ## double 124.62
.quad 0x405f07ae147ae148 ## double 124.12
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405f200000000000 ## double 124.5
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405f27ae147ae148 ## double 124.62
.quad 0x405f57ae147ae148 ## double 125.37
.quad 0x405f400000000000 ## double 125
.quad 0x405f200000000000 ## double 124.5
.quad 0x405f100000000000 ## double 124.25
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405e47ae147ae148 ## double 121.12
.quad 0x405df00000000000 ## double 119.75
.quad 0x405df00000000000 ## double 119.75
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405e200000000000 ## double 120.5
.quad 0x405e47ae147ae148 ## double 121.12
.quad 0x405e100000000000 ## double 120.25
.quad 0x405f87ae147ae148 ## double 126.12
.quad 0x4060080000000000 ## double 128.25
.quad 0x405ff7ae147ae148 ## double 127.87
.quad 0x40603c28f5c28f5c ## double 129.88
.quad 0x4060380000000000 ## double 129.75
.quad 0x40605428f5c28f5c ## double 130.63
.quad 0x40604428f5c28f5c ## double 130.13
.quad 0x4060300000000000 ## double 129.5
.quad 0x4060400000000000 ## double 130
.quad 0x4060300000000000 ## double 129.5
.quad 0x4060480000000000 ## double 130.25
.quad 0x4060780000000000 ## double 131.75
.quad 0x40607428f5c28f5c ## double 131.63
.quad 0x40608c28f5c28f5c ## double 132.38
.quad 0x4060980000000000 ## double 132.75
.quad 0x4060a80000000000 ## double 133.25
.quad 0x4060a80000000000 ## double 133.25
.quad 0x4060680000000000 ## double 131.25
.quad 0x4060300000000000 ## double 129.5
.quad 0x4060280000000000 ## double 129.25
.quad 0x40605c28f5c28f5c ## double 130.88
.quad 0x40605428f5c28f5c ## double 130.63
.quad 0x40605c28f5c28f5c ## double 130.88
.quad 0x4060080000000000 ## double 128.25
.quad 0x405fd00000000000 ## double 127.25
.quad 0x405f900000000000 ## double 126.25
.quad 0x405f67ae147ae148 ## double 125.62
.quad 0x405f67ae147ae148 ## double 125.62
.quad 0x405fb7ae147ae148 ## double 126.87
.quad 0x405fb7ae147ae148 ## double 126.87
.quad 0x405fb00000000000 ## double 126.75
.quad 0x405fc7ae147ae148 ## double 127.12
.quad 0x4060380000000000 ## double 129.75
.quad 0x4060200000000000 ## double 129
.quad 0x40602428f5c28f5c ## double 129.13
.quad 0x4060280000000000 ## double 129.25
.quad 0x4060000000000000 ## double 128
.quad 0x40600428f5c28f5c ## double 128.13
.quad 0x4060400000000000 ## double 130
.quad 0x40604428f5c28f5c ## double 130.13
.quad 0x40604c28f5c28f5c ## double 130.38
.quad 0x4060380000000000 ## double 129.75
.quad 0x40600428f5c28f5c ## double 128.13
.quad 0x405fe7ae147ae148 ## double 127.62
.quad 0x405fa7ae147ae148 ## double 126.62
.quad 0x405f57ae147ae148 ## double 125.37
.quad 0x405fc00000000000 ## double 127
.quad 0x405fd00000000000 ## double 127.25
.quad 0x405fe7ae147ae148 ## double 127.62
.quad 0x4060300000000000 ## double 129.5
.quad 0x4060080000000000 ## double 128.25
.quad 0x405fc7ae147ae148 ## double 127.12
.quad 0x405fe7ae147ae148 ## double 127.62
.quad 0x405fa7ae147ae148 ## double 126.62
.quad 0x405f67ae147ae148 ## double 125.62
.quad 0x4060000000000000 ## double 128
.quad 0x4060180000000000 ## double 128.75
.quad 0x4060400000000000 ## double 130
.quad 0x40605428f5c28f5c ## double 130.63
.quad 0x40606428f5c28f5c ## double 131.13
.quad 0x40603c28f5c28f5c ## double 129.88
.quad 0x4060400000000000 ## double 130
.quad 0x40604c28f5c28f5c ## double 130.38
.quad 0x4060800000000000 ## double 132
.quad 0x4060900000000000 ## double 132.5
.quad 0x4060780000000000 ## double 131.75
.quad 0x4060600000000000 ## double 131
.quad 0x4060ac28f5c28f5c ## double 133.38
.quad 0x4060f00000000000 ## double 135.5
.quad 0x4060fc28f5c28f5c ## double 135.88
.quad 0x40610c28f5c28f5c ## double 136.38
.quad 0x4061180000000000 ## double 136.75
.quad 0x4060d00000000000 ## double 134.5
.quad 0x4060e00000000000 ## double 135
.quad 0x4060f00000000000 ## double 135.5
.quad 0x4060bc28f5c28f5c ## double 133.88
.quad 0x4060c80000000000 ## double 134.25
.quad 0x4060d00000000000 ## double 134.5
.quad 0x4060dc28f5c28f5c ## double 134.88
.quad 0x4060880000000000 ## double 132.25
.quad 0x4060980000000000 ## double 132.75
.quad 0x4060d428f5c28f5c ## double 134.63
.quad 0x4060d428f5c28f5c ## double 134.63
.quad 0x4060a00000000000 ## double 133
.quad 0x4061280000000000 ## double 137.25
.quad 0x40613c28f5c28f5c ## double 137.88
.quad 0x40613428f5c28f5c ## double 137.63
.quad 0x40611428f5c28f5c ## double 136.63
.quad 0x4061480000000000 ## double 138.25
.quad 0x4061480000000000 ## double 138.25
.quad 0x40611428f5c28f5c ## double 136.63
.quad 0x40611c28f5c28f5c ## double 136.88
.quad 0x40613428f5c28f5c ## double 137.63
.quad 0x4061200000000000 ## double 137
.quad 0x4060e80000000000 ## double 135.25
.quad 0x4060c428f5c28f5c ## double 134.13
.quad 0x4060d00000000000 ## double 134.5
.quad 0x40609428f5c28f5c ## double 132.63
.quad 0x40605428f5c28f5c ## double 130.63
.quad 0x4060100000000000 ## double 128.5
.quad 0x405f400000000000 ## double 125
.quad 0x405f27ae147ae148 ## double 124.62
.quad 0x405f600000000000 ## double 125.5
.quad 0x405f67ae147ae148 ## double 125.62
.quad 0x405f500000000000 ## double 125.25
.quad 0x405f000000000000 ## double 124
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405e300000000000 ## double 120.75
.quad 0x405e200000000000 ## double 120.5
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405e17ae147ae148 ## double 120.37
.quad 0x405e900000000000 ## double 122.25
.quad 0x405ea7ae147ae148 ## double 122.62
.quad 0x405ef7ae147ae148 ## double 123.87
.quad 0x405f000000000000 ## double 124
.quad 0x405f100000000000 ## double 124.25
.quad 0x405f100000000000 ## double 124.25
.quad 0x405f200000000000 ## double 124.5
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405ef7ae147ae148 ## double 123.87
.quad 0x405f200000000000 ## double 124.5
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405dc00000000000 ## double 119
.quad 0x405e100000000000 ## double 120.25
.quad 0x405de7ae147ae148 ## double 119.62
.quad 0x405dd7ae147ae148 ## double 119.37
.quad 0x405dc7ae147ae148 ## double 119.12
.quad 0x405d900000000000 ## double 118.25
.quad 0x405dc7ae147ae148 ## double 119.12
.quad 0x405de00000000000 ## double 119.5
.quad 0x405e100000000000 ## double 120.25
.quad 0x405e400000000000 ## double 121
.quad 0x405e57ae147ae148 ## double 121.37
.quad 0x405e900000000000 ## double 122.25
.quad 0x405ec7ae147ae148 ## double 123.12
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405f47ae147ae148 ## double 125.12
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405ea7ae147ae148 ## double 122.62
.quad 0x405e57ae147ae148 ## double 121.37
.quad 0x405e57ae147ae148 ## double 121.37
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405ec7ae147ae148 ## double 123.12
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405f100000000000 ## double 124.25
.quad 0x405f000000000000 ## double 124
.quad 0x405f600000000000 ## double 125.5
.quad 0x405f700000000000 ## double 125.75
.quad 0x405fc7ae147ae148 ## double 127.12
.quad 0x405ff00000000000 ## double 127.75
.quad 0x405fb7ae147ae148 ## double 126.87
.quad 0x405fa00000000000 ## double 126.5
.quad 0x405fb7ae147ae148 ## double 126.87
.quad 0x405fb00000000000 ## double 126.75
.quad 0x405fc7ae147ae148 ## double 127.12
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405f47ae147ae148 ## double 125.12
.quad 0x405fb00000000000 ## double 126.75
.quad 0x405fd7ae147ae148 ## double 127.37
.quad 0x405fd7ae147ae148 ## double 127.37
.quad 0x405fa00000000000 ## double 126.5
.quad 0x405ff00000000000 ## double 127.75
.quad 0x405f900000000000 ## double 126.25
.quad 0x405ec00000000000 ## double 123
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405e900000000000 ## double 122.25
.quad 0x405e87ae147ae148 ## double 122.12
.quad 0x405e700000000000 ## double 121.75
.quad 0x405e900000000000 ## double 122.25
.quad 0x405e400000000000 ## double 121
.quad 0x405e97ae147ae148 ## double 122.37
.quad 0x405e900000000000 ## double 122.25
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405f87ae147ae148 ## double 126.12
.quad 0x405f800000000000 ## double 126
.quad 0x405f800000000000 ## double 126
.quad 0x405f300000000000 ## double 124.75
.quad 0x405f47ae147ae148 ## double 125.12
.quad 0x405fe00000000000 ## double 127.5
.quad 0x405f900000000000 ## double 126.25
.quad 0x40600c28f5c28f5c ## double 128.38
.quad 0x405ff7ae147ae148 ## double 127.87
.quad 0x4060100000000000 ## double 128.5
.quad 0x405ff00000000000 ## double 127.75
.quad 0x405f87ae147ae148 ## double 126.12
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405f200000000000 ## double 124.5
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405ef7ae147ae148 ## double 123.87
.quad 0x405f100000000000 ## double 124.25
.quad 0x405ea7ae147ae148 ## double 122.62
.quad 0x405ec7ae147ae148 ## double 123.12
.quad 0x405f07ae147ae148 ## double 124.12
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405f800000000000 ## double 126
.quad 0x405f700000000000 ## double 125.75
.quad 0x405f300000000000 ## double 124.75
.quad 0x405f67ae147ae148 ## double 125.62
.quad 0x405f700000000000 ## double 125.75
.quad 0x405f97ae147ae148 ## double 126.37
.quad 0x405fb00000000000 ## double 126.75
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405ef7ae147ae148 ## double 123.87
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405e400000000000 ## double 121
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405f000000000000 ## double 124
.quad 0x405e500000000000 ## double 121.25
.quad 0x405cc7ae147ae148 ## double 115.12
.quad 0x405c500000000000 ## double 113.25
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405b57ae147ae148 ## double 109.37
.quad 0x405b67ae147ae148 ## double 109.62
.quad 0x405b300000000000 ## double 108.75
.quad 0x405af00000000000 ## double 107.75
.quad 0x405ac00000000000 ## double 107
.quad 0x405a97ae147ae148 ## double 106.37
.quad 0x405ad00000000000 ## double 107.25
.quad 0x405ad7ae147ae148 ## double 107.37
.quad 0x405b07ae147ae148 ## double 108.12
.quad 0x405b17ae147ae148 ## double 108.37
.quad 0x405ad00000000000 ## double 107.25
.quad 0x405a700000000000 ## double 105.75
.quad 0x405a67ae147ae148 ## double 105.62
.quad 0x405ac7ae147ae148 ## double 107.12
.quad 0x405ad7ae147ae148 ## double 107.37
.quad 0x405ae00000000000 ## double 107.5
.quad 0x405a800000000000 ## double 106
.quad 0x405ae00000000000 ## double 107.5
.quad 0x405ad7ae147ae148 ## double 107.37
.quad 0x405a700000000000 ## double 105.75
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405a67ae147ae148 ## double 105.62
.quad 0x405a200000000000 ## double 104.5
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a900000000000 ## double 106.25
.quad 0x405aa00000000000 ## double 106.5
.quad 0x405b000000000000 ## double 108
.quad 0x4059a00000000000 ## double 102.5
.quad 0x4059f00000000000 ## double 103.75
.quad 0x405977ae147ae148 ## double 101.87
.quad 0x405947ae147ae148 ## double 101.12
.quad 0x405a07ae147ae148 ## double 104.12
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a37ae147ae148 ## double 104.87
.quad 0x405a600000000000 ## double 105.5
.quad 0x405a800000000000 ## double 106
.quad 0x405a900000000000 ## double 106.25
.quad 0x405a800000000000 ## double 106
.quad 0x405ae7ae147ae148 ## double 107.62
.quad 0x405b500000000000 ## double 109.25
.quad 0x405b27ae147ae148 ## double 108.62
.quad 0x405b300000000000 ## double 108.75
.quad 0x405b200000000000 ## double 108.5
.quad 0x405ae7ae147ae148 ## double 107.62
.quad 0x405af7ae147ae148 ## double 107.87
.quad 0x405b07ae147ae148 ## double 108.12
.quad 0x405b400000000000 ## double 109
.quad 0x405b200000000000 ## double 108.5
.quad 0x405b700000000000 ## double 109.75
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405bf00000000000 ## double 111.75
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405c500000000000 ## double 113.25
.quad 0x405c100000000000 ## double 112.25
.quad 0x405c07ae147ae148 ## double 112.12
.quad 0x405c57ae147ae148 ## double 113.37
.quad 0x405cb7ae147ae148 ## double 114.87
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c57ae147ae148 ## double 113.37
.quad 0x405cb7ae147ae148 ## double 114.87
.quad 0x405d17ae147ae148 ## double 116.37
.quad 0x405d500000000000 ## double 117.25
.quad 0x405d200000000000 ## double 116.5
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c100000000000 ## double 112.25
.quad 0x405be7ae147ae148 ## double 111.62
.quad 0x405bd7ae147ae148 ## double 111.37
.quad 0x405bc7ae147ae148 ## double 111.12
.quad 0x405c100000000000 ## double 112.25
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405c600000000000 ## double 113.5
.quad 0x405c300000000000 ## double 112.75
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405bb00000000000 ## double 110.75
.quad 0x405b800000000000 ## double 110
.quad 0x405b700000000000 ## double 109.75
.quad 0x405bd7ae147ae148 ## double 111.37
.quad 0x405be00000000000 ## double 111.5
.quad 0x405c200000000000 ## double 112.5
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405cf00000000000 ## double 115.75
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405c500000000000 ## double 113.25
.quad 0x405c400000000000 ## double 113
.quad 0x405c800000000000 ## double 114
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405cd7ae147ae148 ## double 115.37
.quad 0x405c800000000000 ## double 114
.quad 0x405c900000000000 ## double 114.25
.quad 0x405c200000000000 ## double 112.5
.quad 0x405be00000000000 ## double 111.5
.quad 0x405c000000000000 ## double 112
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405b27ae147ae148 ## double 108.62
.quad 0x405b500000000000 ## double 109.25
.quad 0x405b100000000000 ## double 108.25
.quad 0x405be7ae147ae148 ## double 111.62
.quad 0x405be00000000000 ## double 111.5
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405be7ae147ae148 ## double 111.62
.quad 0x405bf00000000000 ## double 111.75
.quad 0x405c200000000000 ## double 112.5
.quad 0x405c600000000000 ## double 113.5
.quad 0x405be00000000000 ## double 111.5
.quad 0x405b500000000000 ## double 109.25
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405b77ae147ae148 ## double 109.87
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405b97ae147ae148 ## double 110.37
.quad 0x405bd7ae147ae148 ## double 111.37
.quad 0x405bc00000000000 ## double 111
.quad 0x405b900000000000 ## double 110.25
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405b87ae147ae148 ## double 110.12
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405bb00000000000 ## double 110.75
.quad 0x405bb7ae147ae148 ## double 110.87
.quad 0x405c97ae147ae148 ## double 114.37
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405cc00000000000 ## double 115
.quad 0x405cc00000000000 ## double 115
.quad 0x405cf7ae147ae148 ## double 115.87
.quad 0x405d700000000000 ## double 117.75
.quad 0x405d500000000000 ## double 117.25
.quad 0x405d47ae147ae148 ## double 117.12
.quad 0x405dc7ae147ae148 ## double 119.12
.quad 0x405e300000000000 ## double 120.75
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405e400000000000 ## double 121
.quad 0x405e200000000000 ## double 120.5
.quad 0x405e900000000000 ## double 122.25
.quad 0x405ec00000000000 ## double 123
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405ef7ae147ae148 ## double 123.87
.quad 0x405f000000000000 ## double 124
.quad 0x405f200000000000 ## double 124.5
.quad 0x405f400000000000 ## double 125
.quad 0x405f000000000000 ## double 124
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405f27ae147ae148 ## double 124.62
.quad 0x405f300000000000 ## double 124.75
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405f07ae147ae148 ## double 124.12
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405ea7ae147ae148 ## double 122.62
.quad 0x405e97ae147ae148 ## double 122.37
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405e67ae147ae148 ## double 121.62
.quad 0x405ec7ae147ae148 ## double 123.12
.quad 0x405f17ae147ae148 ## double 124.37
.quad 0x405f200000000000 ## double 124.5
.quad 0x405e97ae147ae148 ## double 122.37
.quad 0x405df7ae147ae148 ## double 119.87
.quad 0x405dd7ae147ae148 ## double 119.37
.quad 0x405dd00000000000 ## double 119.25
.quad 0x405dc7ae147ae148 ## double 119.12
.quad 0x405dd00000000000 ## double 119.25
.quad 0x405de00000000000 ## double 119.5
.quad 0x405e900000000000 ## double 122.25
.quad 0x405e700000000000 ## double 121.75
.quad 0x405e400000000000 ## double 121
.quad 0x405e700000000000 ## double 121.75
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405f67ae147ae148 ## double 125.62
.quad 0x405f600000000000 ## double 125.5
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405f200000000000 ## double 124.5
.quad 0x405f77ae147ae148 ## double 125.87
.quad 0x405fe7ae147ae148 ## double 127.62
.quad 0x405ff00000000000 ## double 127.75
.quad 0x405fd00000000000 ## double 127.25
.quad 0x405f700000000000 ## double 125.75
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405eb7ae147ae148 ## double 122.87
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405fa00000000000 ## double 126.5
.quad 0x405fc00000000000 ## double 127
.quad 0x405fd00000000000 ## double 127.25
.quad 0x4060200000000000 ## double 129
.quad 0x4060400000000000 ## double 130
.quad 0x40602c28f5c28f5c ## double 129.38
.quad 0x40601c28f5c28f5c ## double 128.88
.quad 0x4060380000000000 ## double 129.75
.quad 0x40601428f5c28f5c ## double 128.63
.quad 0x4060380000000000 ## double 129.75
.quad 0x4060500000000000 ## double 130.5
.quad 0x4060580000000000 ## double 130.75
.quad 0x40608428f5c28f5c ## double 132.13
.quad 0x40609428f5c28f5c ## double 132.63
.quad 0x4060b80000000000 ## double 133.75
.quad 0x4060bc28f5c28f5c ## double 133.88
.quad 0x4060ac28f5c28f5c ## double 133.38
.quad 0x4060c00000000000 ## double 134
.quad 0x4060c80000000000 ## double 134.25
.quad 0x4060900000000000 ## double 132.5
.quad 0x4060880000000000 ## double 132.25
.quad 0x4060680000000000 ## double 131.25
.quad 0x40604428f5c28f5c ## double 130.13
.quad 0x4060100000000000 ## double 128.5
.quad 0x40600428f5c28f5c ## double 128.13
.quad 0x4060300000000000 ## double 129.5
.quad 0x40602428f5c28f5c ## double 129.13
.quad 0x40600c28f5c28f5c ## double 128.38
.quad 0x4060300000000000 ## double 129.5
.quad 0x405fa7ae147ae148 ## double 126.62
.quad 0x405f300000000000 ## double 124.75
.quad 0x405f07ae147ae148 ## double 124.12
.quad 0x405f200000000000 ## double 124.5
.quad 0x405f37ae147ae148 ## double 124.87
.quad 0x405f100000000000 ## double 124.25
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405e800000000000 ## double 122
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405eb7ae147ae148 ## double 122.87
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405eb7ae147ae148 ## double 122.87
.quad 0x405e77ae147ae148 ## double 121.87
.quad 0x405df00000000000 ## double 119.75
.quad 0x405de7ae147ae148 ## double 119.62
.quad 0x405de00000000000 ## double 119.5
.quad 0x405dd00000000000 ## double 119.25
.quad 0x405dd00000000000 ## double 119.25
.quad 0x405d900000000000 ## double 118.25
.quad 0x405d37ae147ae148 ## double 116.87
.quad 0x405dc00000000000 ## double 119
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405ed00000000000 ## double 123.25
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405e800000000000 ## double 122
.quad 0x405e600000000000 ## double 121.5
.quad 0x405db7ae147ae148 ## double 118.87
.quad 0x405dc7ae147ae148 ## double 119.12
.quad 0x405dc00000000000 ## double 119
.quad 0x405de00000000000 ## double 119.5
.quad 0x405df7ae147ae148 ## double 119.87
.quad 0x405e07ae147ae148 ## double 120.12
.quad 0x405df7ae147ae148 ## double 119.87
.quad 0x405e57ae147ae148 ## double 121.37
.quad 0x405e600000000000 ## double 121.5
.quad 0x405e57ae147ae148 ## double 121.37
.quad 0x405e800000000000 ## double 122
.quad 0x405f07ae147ae148 ## double 124.12
.quad 0x405fa00000000000 ## double 126.5
.quad 0x405fc00000000000 ## double 127
.quad 0x405f900000000000 ## double 126.25
.quad 0x405f600000000000 ## double 125.5
.quad 0x405f900000000000 ## double 126.25
.quad 0x405f57ae147ae148 ## double 125.37
.quad 0x405e600000000000 ## double 121.5
.quad 0x405e200000000000 ## double 120.5
.quad 0x405e700000000000 ## double 121.75
.quad 0x405f000000000000 ## double 124
.quad 0x405e700000000000 ## double 121.75
.quad 0x405ee00000000000 ## double 123.5
.quad 0x405ed7ae147ae148 ## double 123.37
.quad 0x405e600000000000 ## double 121.5
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405e700000000000 ## double 121.75
.quad 0x405de7ae147ae148 ## double 119.62
.quad 0x405e500000000000 ## double 121.25
.quad 0x405e400000000000 ## double 121
.quad 0x405e37ae147ae148 ## double 120.87
.quad 0x405e57ae147ae148 ## double 121.37
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405ef7ae147ae148 ## double 123.87
.quad 0x405ef00000000000 ## double 123.75
.quad 0x405f200000000000 ## double 124.5
.quad 0x405ee7ae147ae148 ## double 123.62
.quad 0x405eb00000000000 ## double 122.75
.quad 0x405ea00000000000 ## double 122.5
.quad 0x405eb7ae147ae148 ## double 122.87
.quad 0x405e400000000000 ## double 121
.quad 0x405df00000000000 ## double 119.75
.quad 0x405d500000000000 ## double 117.25
.quad 0x405ca7ae147ae148 ## double 114.62
.quad 0x405ca00000000000 ## double 114.5
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405d100000000000 ## double 116.25
.quad 0x405d07ae147ae148 ## double 116.12
.quad 0x405cb7ae147ae148 ## double 114.87
.quad 0x405c77ae147ae148 ## double 113.87
.quad 0x405bf7ae147ae148 ## double 111.87
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405cc00000000000 ## double 115
.quad 0x405cf7ae147ae148 ## double 115.87
.quad 0x405ce7ae147ae148 ## double 115.62
.quad 0x405cc00000000000 ## double 115
.quad 0x405c37ae147ae148 ## double 112.87
.quad 0x405bf00000000000 ## double 111.75
.quad 0x405c200000000000 ## double 112.5
.quad 0x405cb00000000000 ## double 114.75
.quad 0x405cc00000000000 ## double 115
.quad 0x405cd00000000000 ## double 115.25
.quad 0x405d27ae147ae148 ## double 116.62
.quad 0x405d000000000000 ## double 116
.quad 0x405d400000000000 ## double 117
.quad 0x405d67ae147ae148 ## double 117.62
.quad 0x405d900000000000 ## double 118.25
.quad 0x405da00000000000 ## double 118.5
.quad 0x405cf7ae147ae148 ## double 115.87
.quad 0x405d37ae147ae148 ## double 116.87
.quad 0x405cf7ae147ae148 ## double 115.87
.quad 0x405d47ae147ae148 ## double 117.12
.quad 0x405d67ae147ae148 ## double 117.62
.quad 0x405d600000000000 ## double 117.5
.quad 0x405d800000000000 ## double 118
.quad 0x405d500000000000 ## double 117.25
.quad 0x405d47ae147ae148 ## double 117.12
.quad 0x405d77ae147ae148 ## double 117.87
.quad 0x405ce00000000000 ## double 115.5
.quad 0x405cd00000000000 ## double 115.25
.quad 0x405c07ae147ae148 ## double 112.12
.quad 0x405c27ae147ae148 ## double 112.62
.quad 0x405ba00000000000 ## double 110.5
.quad 0x405b800000000000 ## double 110
.quad 0x405b400000000000 ## double 109
.quad 0x405ad7ae147ae148 ## double 107.37
.quad 0x405ab00000000000 ## double 106.75
.quad 0x405a000000000000 ## double 104
.quad 0x4059d7ae147ae148 ## double 103.37
.quad 0x4059b00000000000 ## double 102.75
.quad 0x4059d00000000000 ## double 103.25
.quad 0x4059a00000000000 ## double 102.5
.quad 0x405a37ae147ae148 ## double 104.87
.quad 0x405a200000000000 ## double 104.5
.quad 0x405a000000000000 ## double 104
.quad 0x4059b7ae147ae148 ## double 102.87
.quad 0x4059d00000000000 ## double 103.25
.quad 0x4059c7ae147ae148 ## double 103.12
.quad 0x4059b7ae147ae148 ## double 102.87
.quad 0x405947ae147ae148 ## double 101.12
.quad 0x405947ae147ae148 ## double 101.12
.quad 0x4059000000000000 ## double 100
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4059300000000000 ## double 100.75
.quad 0x4059400000000000 ## double 101
.quad 0x405947ae147ae148 ## double 101.12
.quad 0x4059400000000000 ## double 101
.quad 0x4059a7ae147ae148 ## double 102.62
.quad 0x405997ae147ae148 ## double 102.37
.quad 0x4059d00000000000 ## double 103.25
.quad 0x4059d7ae147ae148 ## double 103.37
.quad 0x4059900000000000 ## double 102.25
.quad 0x4059c00000000000 ## double 103
.quad 0x4059c00000000000 ## double 103
.quad 0x405977ae147ae148 ## double 101.87
.quad 0x4059200000000000 ## double 100.5
.quad 0x4059500000000000 ## double 101.25
.quad 0x4059600000000000 ## double 101.5
.quad 0x4058b00000000000 ## double 98.75
.quad 0x4058e00000000000 ## double 99.5
.quad 0x4058e00000000000 ## double 99.5
.quad 0x4058c00000000000 ## double 99
.quad 0x4059100000000000 ## double 100.25
.quad 0x4059200000000000 ## double 100.5
.quad 0x4058e00000000000 ## double 99.5
.quad 0x4058700000000000 ## double 97.75
.quad 0x405837ae147ae148 ## double 96.870000000000005
.quad 0x4057e00000000000 ## double 95.5
.quad 0x405857ae147ae148 ## double 97.370000000000005
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x4058600000000000 ## double 97.5
.quad 0x4058000000000000 ## double 96
.quad 0x4058500000000000 ## double 97.25
.quad 0x4058d00000000000 ## double 99.25
.quad 0x4058b7ae147ae148 ## double 98.870000000000005
.quad 0x4058a00000000000 ## double 98.5
.quad 0x4058800000000000 ## double 98
.quad 0x405807ae147ae148 ## double 96.120000000000005
.quad 0x4058100000000000 ## double 96.25
.quad 0x405787ae147ae148 ## double 94.120000000000005
.quad 0x4058600000000000 ## double 97.5
.quad 0x4058b00000000000 ## double 98.75
.quad 0x4059100000000000 ## double 100.25
.quad 0x405917ae147ae148 ## double 100.37
.quad 0x4059300000000000 ## double 100.75
.quad 0x4058f7ae147ae148 ## double 99.870000000000005
.quad 0x4058d00000000000 ## double 99.25
.quad 0x405907ae147ae148 ## double 100.12
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x4058d00000000000 ## double 99.25
.quad 0x405897ae147ae148 ## double 98.370000000000005
.quad 0x4058a00000000000 ## double 98.5
.quad 0x4058300000000000 ## double 96.75
.quad 0x4057e7ae147ae148 ## double 95.620000000000005
.quad 0x4058400000000000 ## double 97
.quad 0x405847ae147ae148 ## double 97.120000000000005
.quad 0x4058800000000000 ## double 98
.quad 0x4058500000000000 ## double 97.25
.quad 0x405817ae147ae148 ## double 96.370000000000005
.quad 0x405837ae147ae148 ## double 96.870000000000005
.quad 0x4057a00000000000 ## double 94.5
.quad 0x405817ae147ae148 ## double 96.370000000000005
.quad 0x4058000000000000 ## double 96
.quad 0x4057a7ae147ae148 ## double 94.620000000000005
.quad 0x4057a7ae147ae148 ## double 94.620000000000005
.quad 0x4056d00000000000 ## double 91.25
.quad 0x4056e00000000000 ## double 91.5
.quad 0x4058100000000000 ## double 96.25
.quad 0x4057b00000000000 ## double 94.75
.quad 0x4057c7ae147ae148 ## double 95.120000000000005
.quad 0x4058000000000000 ## double 96
.quad 0x4058600000000000 ## double 97.5
.quad 0x405817ae147ae148 ## double 96.370000000000005
.quad 0x4057700000000000 ## double 93.75
.quad 0x405657ae147ae148 ## double 89.370000000000005
.quad 0x4056500000000000 ## double 89.25
.quad 0x4056100000000000 ## double 88.25
.quad 0x4055a00000000000 ## double 86.5
.quad 0x4054f00000000000 ## double 83.75
.quad 0x4054e7ae147ae148 ## double 83.620000000000005
.quad 0x4054b00000000000 ## double 82.75
.quad 0x405487ae147ae148 ## double 82.120000000000005
.quad 0x4054c00000000000 ## double 83
.quad 0x405517ae147ae148 ## double 84.370000000000005
.quad 0x4055200000000000 ## double 84.5
.quad 0x4055200000000000 ## double 84.5
.quad 0x4054c7ae147ae148 ## double 83.120000000000005
.quad 0x4054b00000000000 ## double 82.75
.quad 0x405557ae147ae148 ## double 85.370000000000005
.quad 0x405557ae147ae148 ## double 85.370000000000005
.quad 0x4055a00000000000 ## double 86.5
.quad 0x405577ae147ae148 ## double 85.870000000000005
.quad 0x4054f00000000000 ## double 83.75
.quad 0x4055400000000000 ## double 85
.quad 0x405557ae147ae148 ## double 85.370000000000005
.quad 0x4055600000000000 ## double 85.5
.quad 0x4054c00000000000 ## double 83
.quad 0x4054600000000000 ## double 81.5
.quad 0x4054200000000000 ## double 80.5
.quad 0x4054600000000000 ## double 81.5
.quad 0x405497ae147ae148 ## double 82.370000000000005
.quad 0x405487ae147ae148 ## double 82.120000000000005
.quad 0x405477ae147ae148 ## double 81.870000000000005
.quad 0x4055300000000000 ## double 84.75
.quad 0x4055300000000000 ## double 84.75
.quad 0x4055200000000000 ## double 84.5
.quad 0x4054e00000000000 ## double 83.5
.quad 0x4054c00000000000 ## double 83
.quad 0x405417ae147ae148 ## double 80.370000000000005
.quad 0x405477ae147ae148 ## double 81.870000000000005
.quad 0x405507ae147ae148 ## double 84.120000000000005
.quad 0x405517ae147ae148 ## double 84.370000000000005
.quad 0x4055200000000000 ## double 84.5
.quad 0x405447ae147ae148 ## double 81.120000000000005
.quad 0x405417ae147ae148 ## double 80.370000000000005
.quad 0x4053c00000000000 ## double 79
.quad 0x405307ae147ae148 ## double 76.120000000000005
.quad 0x4052a7ae147ae148 ## double 74.620000000000005
.quad 0x4052a7ae147ae148 ## double 74.620000000000005
.quad 0x4052b7ae147ae148 ## double 74.870000000000005
.quad 0x405317ae147ae148 ## double 76.370000000000005
.quad 0x4053500000000000 ## double 77.25
.quad 0x4053400000000000 ## double 77
.quad 0x4053200000000000 ## double 76.5
.quad 0x405327ae147ae148 ## double 76.620000000000005
.quad 0x405367ae147ae148 ## double 77.620000000000005
.quad 0x4053300000000000 ## double 76.75
.quad 0x4052b00000000000 ## double 74.75
.quad 0x4052b00000000000 ## double 74.75
.quad 0x4052c00000000000 ## double 75
.quad 0x405267ae147ae148 ## double 73.620000000000005
.quad 0x4052800000000000 ## double 74
.quad 0x4052400000000000 ## double 73
.quad 0x405207ae147ae148 ## double 72.120000000000005
.quad 0x4052500000000000 ## double 73.25
.quad 0x405267ae147ae148 ## double 73.620000000000005
.quad 0x4052300000000000 ## double 72.75
.quad 0x4052800000000000 ## double 74
.quad 0x4052000000000000 ## double 72
.quad 0x4051c00000000000 ## double 71
.quad 0x4051c00000000000 ## double 71
.quad 0x4051800000000000 ## double 70
.quad 0x4051500000000000 ## double 69.25
.quad 0x4051a00000000000 ## double 70.5
.quad 0x405187ae147ae148 ## double 70.120000000000005
.quad 0x4051500000000000 ## double 69.25
.quad 0x4051600000000000 ## double 69.5
.quad 0x405127ae147ae148 ## double 68.620000000000005
.quad 0x4050b00000000000 ## double 66.75
.quad 0x4050e7ae147ae148 ## double 67.620000000000005
.quad 0x405087ae147ae148 ## double 66.120000000000005
.quad 0x404fd0a3d70a3d71 ## double 63.630000000000003
.quad 0x404f800000000000 ## double 63
.quad 0x404f70a3d70a3d71 ## double 62.880000000000003
.quad 0x404f90a3d70a3d71 ## double 63.130000000000003
.quad 0x404fe00000000000 ## double 63.75
.quad 0x404fa00000000000 ## double 63.25
.quad 0x4050100000000000 ## double 64.25
.quad 0x4050300000000000 ## double 64.75
.quad 0x405087ae147ae148 ## double 66.120000000000005
.quad 0x4050d00000000000 ## double 67.25
.quad 0x4050c7ae147ae148 ## double 67.120000000000005
.quad 0x4050900000000000 ## double 66.25
.quad 0x405067ae147ae148 ## double 65.620000000000005
.quad 0x4050800000000000 ## double 66
.quad 0x4050900000000000 ## double 66.25
.quad 0x4050a7ae147ae148 ## double 66.620000000000005
.quad 0x4050c00000000000 ## double 67
.quad 0x4050d7ae147ae148 ## double 67.370000000000005
.quad 0x405127ae147ae148 ## double 68.620000000000005
.quad 0x4050e7ae147ae148 ## double 67.620000000000005
.quad 0x4050a7ae147ae148 ## double 66.620000000000005
.quad 0x4050f00000000000 ## double 67.75
.quad 0x4050e00000000000 ## double 67.5
.quad 0x4050b7ae147ae148 ## double 66.870000000000005
.quad 0x4050400000000000 ## double 65
.quad 0x4050b7ae147ae148 ## double 66.870000000000005
.quad 0x404f400000000000 ## double 62.5
.quad 0x404ed0a3d70a3d71 ## double 61.630000000000003
.quad 0x404ea00000000000 ## double 61.25
.quad 0x404e600000000000 ## double 60.75
.quad 0x404e50a3d70a3d71 ## double 60.630000000000003
.quad 0x404e70a3d70a3d71 ## double 60.880000000000003
.quad 0x404ec00000000000 ## double 61.5
.quad 0x404eb0a3d70a3d71 ## double 61.380000000000003
.quad 0x404ec00000000000 ## double 61.5
.quad 0x404e50a3d70a3d71 ## double 60.630000000000003
.quad 0x404ea00000000000 ## double 61.25
.quad 0x404ec00000000000 ## double 61.5
.quad 0x404df0a3d70a3d71 ## double 59.880000000000003
.quad 0x404dd0a3d70a3d71 ## double 59.630000000000003
.quad 0x404d50a3d70a3d71 ## double 58.630000000000003
.quad 0x404da00000000000 ## double 59.25
.quad 0x404e000000000000 ## double 60
.quad 0x404de00000000000 ## double 59.75
.quad 0x404dc00000000000 ## double 59.5
.quad 0x404e10a3d70a3d71 ## double 60.130000000000003
.quad 0x404d800000000000 ## double 59
.quad 0x404d50a3d70a3d71 ## double 58.630000000000003
.quad 0x404df0a3d70a3d71 ## double 59.880000000000003
.quad 0x404e50a3d70a3d71 ## double 60.630000000000003
.quad 0x404ed0a3d70a3d71 ## double 61.630000000000003
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404ef0a3d70a3d71 ## double 61.880000000000003
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f50a3d70a3d71 ## double 62.630000000000003
.quad 0x404f50a3d70a3d71 ## double 62.630000000000003
.quad 0x404f800000000000 ## double 63
.quad 0x404f800000000000 ## double 63
.quad 0x404fa00000000000 ## double 63.25
.quad 0x404fa00000000000 ## double 63.25
.quad 0x404fe00000000000 ## double 63.75
.quad 0x4050000000000000 ## double 64
.quad 0x4050100000000000 ## double 64.25
.quad 0x405027ae147ae148 ## double 64.620000000000005
.quad 0x4050300000000000 ## double 64.75
.quad 0x405037ae147ae148 ## double 64.870000000000005
.quad 0x405047ae147ae148 ## double 65.120000000000005
.quad 0x405047ae147ae148 ## double 65.120000000000005
.quad 0x4050500000000000 ## double 65.25
.quad 0x4050500000000000 ## double 65.25
.quad 0x4050300000000000 ## double 64.75
.quad 0x4050300000000000 ## double 64.75
.quad 0x405027ae147ae148 ## double 64.620000000000005
.quad 0x4050500000000000 ## double 65.25
.quad 0x4050700000000000 ## double 65.75
.quad 0x405087ae147ae148 ## double 66.120000000000005
.quad 0x4050700000000000 ## double 65.75
.quad 0x4050300000000000 ## double 64.75
.quad 0x404ff0a3d70a3d71 ## double 63.880000000000003
.quad 0x404ff0a3d70a3d71 ## double 63.880000000000003
.quad 0x405047ae147ae148 ## double 65.120000000000005
.quad 0x405037ae147ae148 ## double 64.870000000000005
.quad 0x405027ae147ae148 ## double 64.620000000000005
.quad 0x4050000000000000 ## double 64
.quad 0x404f600000000000 ## double 62.75
.quad 0x404f400000000000 ## double 62.5
.quad 0x404f400000000000 ## double 62.5
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404ef0a3d70a3d71 ## double 61.880000000000003
.quad 0x404ee00000000000 ## double 61.75
.quad 0x404ef0a3d70a3d71 ## double 61.880000000000003
.quad 0x404e90a3d70a3d71 ## double 61.130000000000003
.quad 0x404e50a3d70a3d71 ## double 60.630000000000003
.quad 0x404e400000000000 ## double 60.5
.quad 0x404e200000000000 ## double 60.25
.quad 0x404de00000000000 ## double 59.75
.quad 0x404e10a3d70a3d71 ## double 60.130000000000003
.quad 0x404db0a3d70a3d71 ## double 59.380000000000003
.quad 0x404dd0a3d70a3d71 ## double 59.630000000000003
.quad 0x404dc00000000000 ## double 59.5
.quad 0x404d000000000000 ## double 58
.quad 0x404d30a3d70a3d71 ## double 58.380000000000003
.quad 0x404d10a3d70a3d71 ## double 58.130000000000003
.quad 0x404d70a3d70a3d71 ## double 58.880000000000003
.quad 0x404d70a3d70a3d71 ## double 58.880000000000003
.quad 0x404d10a3d70a3d71 ## double 58.130000000000003
.quad 0x404db0a3d70a3d71 ## double 59.380000000000003
.quad 0x404dd0a3d70a3d71 ## double 59.630000000000003
.quad 0x404dc00000000000 ## double 59.5
.quad 0x404da00000000000 ## double 59.25
.quad 0x404db0a3d70a3d71 ## double 59.380000000000003
.quad 0x404e000000000000 ## double 60
.quad 0x404e70a3d70a3d71 ## double 60.880000000000003
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f600000000000 ## double 62.75
.quad 0x404ef0a3d70a3d71 ## double 61.880000000000003
.quad 0x404f400000000000 ## double 62.5
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404eb0a3d70a3d71 ## double 61.380000000000003
.quad 0x404f70a3d70a3d71 ## double 62.880000000000003
.quad 0x404f800000000000 ## double 63
.quad 0x404f800000000000 ## double 63
.quad 0x404fa00000000000 ## double 63.25
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404ee00000000000 ## double 61.75
.quad 0x404f000000000000 ## double 62
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404ee00000000000 ## double 61.75
.quad 0x404f800000000000 ## double 63
.quad 0x4050000000000000 ## double 64
.quad 0x404fc00000000000 ## double 63.5
.quad 0x404fa00000000000 ## double 63.25
.quad 0x404fc00000000000 ## double 63.5
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x4050300000000000 ## double 64.75
.quad 0x4050200000000000 ## double 64.5
.quad 0x404f600000000000 ## double 62.75
.quad 0x404f400000000000 ## double 62.5
.quad 0x404f200000000000 ## double 62.25
.quad 0x404f50a3d70a3d71 ## double 62.630000000000003
.quad 0x404f800000000000 ## double 63
.quad 0x404f000000000000 ## double 62
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404e10a3d70a3d71 ## double 60.130000000000003
.quad 0x404d70a3d70a3d71 ## double 58.880000000000003
.quad 0x404d200000000000 ## double 58.25
.quad 0x404d30a3d70a3d71 ## double 58.380000000000003
.quad 0x404d600000000000 ## double 58.75
.quad 0x404c600000000000 ## double 56.75
.quad 0x404ca00000000000 ## double 57.25
.quad 0x404c800000000000 ## double 57
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404d30a3d70a3d71 ## double 58.380000000000003
.quad 0x404cb0a3d70a3d71 ## double 57.380000000000003
.quad 0x404cd0a3d70a3d71 ## double 57.630000000000003
.quad 0x404c800000000000 ## double 57
.quad 0x404cb0a3d70a3d71 ## double 57.380000000000003
.quad 0x404c800000000000 ## double 57
.quad 0x404c70a3d70a3d71 ## double 56.880000000000003
.quad 0x404ca00000000000 ## double 57.25
.quad 0x404c70a3d70a3d71 ## double 56.880000000000003
.quad 0x404c800000000000 ## double 57
.quad 0x404be00000000000 ## double 55.75
.quad 0x404b400000000000 ## double 54.5
.quad 0x404af0a3d70a3d71 ## double 53.880000000000003
.quad 0x404af0a3d70a3d71 ## double 53.880000000000003
.quad 0x404b600000000000 ## double 54.75
.quad 0x404b600000000000 ## double 54.75
.quad 0x404b000000000000 ## double 54
.quad 0x404ae00000000000 ## double 53.75
.quad 0x404bb0a3d70a3d71 ## double 55.380000000000003
.quad 0x404b90a3d70a3d71 ## double 55.130000000000003
.quad 0x404b000000000000 ## double 54
.quad 0x404ae00000000000 ## double 53.75
.quad 0x404b50a3d70a3d71 ## double 54.630000000000003
.quad 0x404b400000000000 ## double 54.5
.quad 0x404b400000000000 ## double 54.5
.quad 0x404a90a3d70a3d71 ## double 53.130000000000003
.quad 0x404a30a3d70a3d71 ## double 52.380000000000003
.quad 0x404970a3d70a3d71 ## double 50.880000000000003
.quad 0x404970a3d70a3d71 ## double 50.880000000000003
.quad 0x404930a3d70a3d71 ## double 50.380000000000003
.quad 0x404990a3d70a3d71 ## double 51.130000000000003
.quad 0x4049b0a3d70a3d71 ## double 51.380000000000003
.quad 0x4049a00000000000 ## double 51.25
.quad 0x404a50a3d70a3d71 ## double 52.630000000000003
.quad 0x404a90a3d70a3d71 ## double 53.130000000000003
.quad 0x404a50a3d70a3d71 ## double 52.630000000000003
.quad 0x404a800000000000 ## double 53
.quad 0x4049e00000000000 ## double 51.75
.quad 0x4049e00000000000 ## double 51.75
.quad 0x404a600000000000 ## double 52.75
.quad 0x404a30a3d70a3d71 ## double 52.380000000000003
.quad 0x404a800000000000 ## double 53
.quad 0x404a90a3d70a3d71 ## double 53.130000000000003
.quad 0x4049e00000000000 ## double 51.75
.quad 0x4048e00000000000 ## double 49.75
.quad 0x404970a3d70a3d71 ## double 50.880000000000003
.quad 0x4049a00000000000 ## double 51.25
.quad 0x404970a3d70a3d71 ## double 50.880000000000003
.quad 0x404990a3d70a3d71 ## double 51.130000000000003
.quad 0x4049d0a3d70a3d71 ## double 51.630000000000003
.quad 0x404a200000000000 ## double 52.25
.quad 0x4049f0a3d70a3d71 ## double 51.880000000000003
.quad 0x4049d0a3d70a3d71 ## double 51.630000000000003
.quad 0x404a30a3d70a3d71 ## double 52.380000000000003
.quad 0x404a70a3d70a3d71 ## double 52.880000000000003
.quad 0x404ab0a3d70a3d71 ## double 53.380000000000003
.quad 0x404b90a3d70a3d71 ## double 55.130000000000003
.quad 0x404c000000000000 ## double 56
.quad 0x404c200000000000 ## double 56.25
.quad 0x404bc00000000000 ## double 55.5
.quad 0x404b800000000000 ## double 55
.quad 0x404b90a3d70a3d71 ## double 55.130000000000003
.quad 0x404bc00000000000 ## double 55.5
.quad 0x404b90a3d70a3d71 ## double 55.130000000000003
.quad 0x404b30a3d70a3d71 ## double 54.380000000000003
.quad 0x404b400000000000 ## double 54.5
.quad 0x404b70a3d70a3d71 ## double 54.880000000000003
.quad 0x404b800000000000 ## double 55
.quad 0x404b200000000000 ## double 54.25
.quad 0x404ba00000000000 ## double 55.25
.quad 0x404b50a3d70a3d71 ## double 54.630000000000003
.quad 0x404b800000000000 ## double 55
.quad 0x404b90a3d70a3d71 ## double 55.130000000000003
.quad 0x404b400000000000 ## double 54.5
.quad 0x404b90a3d70a3d71 ## double 55.130000000000003
.quad 0x404bb0a3d70a3d71 ## double 55.380000000000003
.quad 0x404c400000000000 ## double 56.5
.quad 0x404c600000000000 ## double 56.75
.quad 0x404c10a3d70a3d71 ## double 56.130000000000003
.quad 0x404b800000000000 ## double 55
.quad 0x404b400000000000 ## double 54.5
.quad 0x404b10a3d70a3d71 ## double 54.130000000000003
.quad 0x404b70a3d70a3d71 ## double 54.880000000000003
.quad 0x404c10a3d70a3d71 ## double 56.130000000000003
.quad 0x404c200000000000 ## double 56.25
.quad 0x404c10a3d70a3d71 ## double 56.130000000000003
.quad 0x404c30a3d70a3d71 ## double 56.380000000000003
.quad 0x404c10a3d70a3d71 ## double 56.130000000000003
.quad 0x404c30a3d70a3d71 ## double 56.380000000000003
.quad 0x404c10a3d70a3d71 ## double 56.130000000000003
.quad 0x404c50a3d70a3d71 ## double 56.630000000000003
.quad 0x404c50a3d70a3d71 ## double 56.630000000000003
.quad 0x404cb0a3d70a3d71 ## double 57.380000000000003
.quad 0x404cc00000000000 ## double 57.5
.quad 0x404ca00000000000 ## double 57.25
.quad 0x404c90a3d70a3d71 ## double 57.130000000000003
.quad 0x404d200000000000 ## double 58.25
.quad 0x404d600000000000 ## double 58.75
.quad 0x404d50a3d70a3d71 ## double 58.630000000000003
.quad 0x404d600000000000 ## double 58.75
.quad 0x404d70a3d70a3d71 ## double 58.880000000000003
.quad 0x404d10a3d70a3d71 ## double 58.130000000000003
.quad 0x404ca00000000000 ## double 57.25
.quad 0x404cc00000000000 ## double 57.5
.quad 0x404d600000000000 ## double 58.75
.quad 0x404d200000000000 ## double 58.25
.quad 0x404c600000000000 ## double 56.75
.quad 0x404c50a3d70a3d71 ## double 56.630000000000003
.quad 0x404c50a3d70a3d71 ## double 56.630000000000003
.quad 0x404be00000000000 ## double 55.75
.quad 0x404bc00000000000 ## double 55.5
.quad 0x404bd0a3d70a3d71 ## double 55.630000000000003
.quad 0x404b90a3d70a3d71 ## double 55.130000000000003
.quad 0x404ba00000000000 ## double 55.25
.quad 0x404bf0a3d70a3d71 ## double 55.880000000000003
.quad 0x404bd0a3d70a3d71 ## double 55.630000000000003
.quad 0x404bf0a3d70a3d71 ## double 55.880000000000003
.quad 0x404c50a3d70a3d71 ## double 56.630000000000003
.quad 0x404c70a3d70a3d71 ## double 56.880000000000003
.quad 0x404c70a3d70a3d71 ## double 56.880000000000003
.quad 0x404c70a3d70a3d71 ## double 56.880000000000003
.quad 0x404c200000000000 ## double 56.25
.quad 0x404c400000000000 ## double 56.5
.quad 0x404c70a3d70a3d71 ## double 56.880000000000003
.quad 0x404c90a3d70a3d71 ## double 57.130000000000003
.quad 0x404cb0a3d70a3d71 ## double 57.380000000000003
.quad 0x404c90a3d70a3d71 ## double 57.130000000000003
.quad 0x404ca00000000000 ## double 57.25
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404d000000000000 ## double 58
.quad 0x404d90a3d70a3d71 ## double 59.130000000000003
.quad 0x404dd0a3d70a3d71 ## double 59.630000000000003
.quad 0x404e10a3d70a3d71 ## double 60.130000000000003
.quad 0x404dc00000000000 ## double 59.5
.quad 0x404dd0a3d70a3d71 ## double 59.630000000000003
.quad 0x404d200000000000 ## double 58.25
.quad 0x404d000000000000 ## double 58
.quad 0x404d400000000000 ## double 58.5
.quad 0x404d70a3d70a3d71 ## double 58.880000000000003
.quad 0x404de00000000000 ## double 59.75
.quad 0x404e30a3d70a3d71 ## double 60.380000000000003
.quad 0x404dd0a3d70a3d71 ## double 59.630000000000003
.quad 0x404dc00000000000 ## double 59.5
.quad 0x404d200000000000 ## double 58.25
.quad 0x404d10a3d70a3d71 ## double 58.130000000000003
.quad 0x404d70a3d70a3d71 ## double 58.880000000000003
.quad 0x404d400000000000 ## double 58.5
.quad 0x404d90a3d70a3d71 ## double 59.130000000000003
.quad 0x404de00000000000 ## double 59.75
.quad 0x404e400000000000 ## double 60.5
.quad 0x404e600000000000 ## double 60.75
.quad 0x404d90a3d70a3d71 ## double 59.130000000000003
.quad 0x404d400000000000 ## double 58.5
.quad 0x404d10a3d70a3d71 ## double 58.130000000000003
.quad 0x404c30a3d70a3d71 ## double 56.380000000000003
.quad 0x404bf0a3d70a3d71 ## double 55.880000000000003
.quad 0x404c30a3d70a3d71 ## double 56.380000000000003
.quad 0x404c50a3d70a3d71 ## double 56.630000000000003
.quad 0x404c50a3d70a3d71 ## double 56.630000000000003
.quad 0x404c70a3d70a3d71 ## double 56.880000000000003
.quad 0x404c70a3d70a3d71 ## double 56.880000000000003
.quad 0x404c90a3d70a3d71 ## double 57.130000000000003
.quad 0x404ca00000000000 ## double 57.25
.quad 0x404c70a3d70a3d71 ## double 56.880000000000003
.quad 0x404cd0a3d70a3d71 ## double 57.630000000000003
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404cd0a3d70a3d71 ## double 57.630000000000003
.quad 0x404d10a3d70a3d71 ## double 58.130000000000003
.quad 0x404d200000000000 ## double 58.25
.quad 0x404d30a3d70a3d71 ## double 58.380000000000003
.quad 0x404db0a3d70a3d71 ## double 59.380000000000003
.quad 0x404de00000000000 ## double 59.75
.quad 0x404e200000000000 ## double 60.25
.quad 0x404ea00000000000 ## double 61.25
.quad 0x404eb0a3d70a3d71 ## double 61.380000000000003
.quad 0x404e90a3d70a3d71 ## double 61.130000000000003
.quad 0x404eb0a3d70a3d71 ## double 61.380000000000003
.quad 0x404e800000000000 ## double 61
.quad 0x404eb0a3d70a3d71 ## double 61.380000000000003
.quad 0x404ee00000000000 ## double 61.75
.quad 0x404e200000000000 ## double 60.25
.quad 0x404df0a3d70a3d71 ## double 59.880000000000003
.quad 0x404e000000000000 ## double 60
.quad 0x404e600000000000 ## double 60.75
.quad 0x404fa00000000000 ## double 63.25
.quad 0x404ee00000000000 ## double 61.75
.quad 0x404e400000000000 ## double 60.5
.quad 0x404e50a3d70a3d71 ## double 60.630000000000003
.quad 0x404e800000000000 ## double 61
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f600000000000 ## double 62.75
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x404f50a3d70a3d71 ## double 62.630000000000003
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x4050100000000000 ## double 64.25
.quad 0x405007ae147ae148 ## double 64.120000000000005
.quad 0x404fa00000000000 ## double 63.25
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x405007ae147ae148 ## double 64.120000000000005
.quad 0x4050300000000000 ## double 64.75
.quad 0x4050500000000000 ## double 65.25
.quad 0x4050500000000000 ## double 65.25
.quad 0x4050300000000000 ## double 64.75
.quad 0x405027ae147ae148 ## double 64.620000000000005
.quad 0x405007ae147ae148 ## double 64.120000000000005
.quad 0x404f600000000000 ## double 62.75
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x404fa00000000000 ## double 63.25
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f70a3d70a3d71 ## double 62.880000000000003
.quad 0x404fe00000000000 ## double 63.75
.quad 0x4050100000000000 ## double 64.25
.quad 0x4050200000000000 ## double 64.5
.quad 0x405037ae147ae148 ## double 64.870000000000005
.quad 0x4050100000000000 ## double 64.25
.quad 0x404f50a3d70a3d71 ## double 62.630000000000003
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404ed0a3d70a3d71 ## double 61.630000000000003
.quad 0x404f200000000000 ## double 62.25
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f400000000000 ## double 62.5
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404f600000000000 ## double 62.75
.quad 0x404fa00000000000 ## double 63.25
.quad 0x404fd0a3d70a3d71 ## double 63.630000000000003
.quad 0x405017ae147ae148 ## double 64.370000000000005
.quad 0x4050300000000000 ## double 64.75
.quad 0x4050200000000000 ## double 64.5
.quad 0x4050300000000000 ## double 64.75
.quad 0x4050100000000000 ## double 64.25
.quad 0x405017ae147ae148 ## double 64.370000000000005
.quad 0x4050600000000000 ## double 65.5
.quad 0x4050600000000000 ## double 65.5
.quad 0x405067ae147ae148 ## double 65.620000000000005
.quad 0x405077ae147ae148 ## double 65.870000000000005
.quad 0x4050600000000000 ## double 65.5
.quad 0x405067ae147ae148 ## double 65.620000000000005
.quad 0x405057ae147ae148 ## double 65.370000000000005
.quad 0x405067ae147ae148 ## double 65.620000000000005
.quad 0x4050c7ae147ae148 ## double 67.120000000000005
.quad 0x4050d00000000000 ## double 67.25
.quad 0x4050c00000000000 ## double 67
.quad 0x4050b00000000000 ## double 66.75
.quad 0x4050e00000000000 ## double 67.5
.quad 0x4050b00000000000 ## double 66.75
.quad 0x4051000000000000 ## double 68
.quad 0x4051000000000000 ## double 68
.quad 0x4051700000000000 ## double 69.75
.quad 0x405187ae147ae148 ## double 70.120000000000005
.quad 0x4051e00000000000 ## double 71.5
.quad 0x4051b00000000000 ## double 70.75
.quad 0x4051500000000000 ## double 69.25
.quad 0x405147ae147ae148 ## double 69.120000000000005
.quad 0x4051500000000000 ## double 69.25
.quad 0x4051600000000000 ## double 69.5
.quad 0x4051400000000000 ## double 69
.quad 0x4051300000000000 ## double 68.75
.quad 0x405107ae147ae148 ## double 68.120000000000005
.quad 0x4050d00000000000 ## double 67.25
.quad 0x405037ae147ae148 ## double 64.870000000000005
.quad 0x405067ae147ae148 ## double 65.620000000000005
.quad 0x4050800000000000 ## double 66
.quad 0x4050900000000000 ## double 66.25
.quad 0x405097ae147ae148 ## double 66.370000000000005
.quad 0x4050800000000000 ## double 66
.quad 0x4050200000000000 ## double 64.5
.quad 0x4050a00000000000 ## double 66.5
.quad 0x4050b00000000000 ## double 66.75
.quad 0x4050d00000000000 ## double 67.25
.quad 0x405137ae147ae148 ## double 68.870000000000005
.quad 0x4051600000000000 ## double 69.5
.quad 0x4051400000000000 ## double 69
.quad 0x4051000000000000 ## double 68
.quad 0x4051000000000000 ## double 68
.quad 0x405137ae147ae148 ## double 68.870000000000005
.quad 0x4051700000000000 ## double 69.75
.quad 0x4051a7ae147ae148 ## double 70.620000000000005
.quad 0x405197ae147ae148 ## double 70.370000000000005
.quad 0x4051e7ae147ae148 ## double 71.620000000000005
.quad 0x4052100000000000 ## double 72.25
.quad 0x4052300000000000 ## double 72.75
.quad 0x405227ae147ae148 ## double 72.620000000000005
.quad 0x4051f00000000000 ## double 71.75
.quad 0x4052100000000000 ## double 72.25
.quad 0x4051e7ae147ae148 ## double 71.620000000000005
.quad 0x4051c7ae147ae148 ## double 71.120000000000005
.quad 0x4050f00000000000 ## double 67.75
.quad 0x4050d00000000000 ## double 67.25
.quad 0x4050c7ae147ae148 ## double 67.120000000000005
.quad 0x405107ae147ae148 ## double 68.120000000000005
.quad 0x4051900000000000 ## double 70.25
.quad 0x4050e7ae147ae148 ## double 67.620000000000005
.quad 0x4050d00000000000 ## double 67.25
.quad 0x4050900000000000 ## double 66.25
.quad 0x4050c7ae147ae148 ## double 67.120000000000005
.quad 0x4050b00000000000 ## double 66.75
.quad 0x4050a00000000000 ## double 66.5
.quad 0x4050a7ae147ae148 ## double 66.620000000000005
.quad 0x4050f00000000000 ## double 67.75
.quad 0x405107ae147ae148 ## double 68.120000000000005
.quad 0x405137ae147ae148 ## double 68.870000000000005
.quad 0x4051200000000000 ## double 68.5
.quad 0x405157ae147ae148 ## double 69.370000000000005
.quad 0x4051c7ae147ae148 ## double 71.120000000000005
.quad 0x405197ae147ae148 ## double 70.370000000000005
.quad 0x4051b7ae147ae148 ## double 70.870000000000005
.quad 0x4051b00000000000 ## double 70.75
.quad 0x4051e00000000000 ## double 71.5
.quad 0x4051b7ae147ae148 ## double 70.870000000000005
.quad 0x405197ae147ae148 ## double 70.370000000000005
.quad 0x4051100000000000 ## double 68.25
.quad 0x4051100000000000 ## double 68.25
.quad 0x4050c00000000000 ## double 67
.quad 0x405057ae147ae148 ## double 65.370000000000005
.quad 0x405067ae147ae148 ## double 65.620000000000005
.quad 0x4050200000000000 ## double 64.5
.quad 0x4050100000000000 ## double 64.25
.quad 0x405077ae147ae148 ## double 65.870000000000005
.quad 0x4050f7ae147ae148 ## double 67.870000000000005
.quad 0x4051000000000000 ## double 68
.quad 0x4051400000000000 ## double 69
.quad 0x405117ae147ae148 ## double 68.370000000000005
.quad 0x4050e00000000000 ## double 67.5
.quad 0x4050d7ae147ae148 ## double 67.370000000000005
.quad 0x4050f7ae147ae148 ## double 67.870000000000005
.quad 0x405097ae147ae148 ## double 66.370000000000005
.quad 0x4050900000000000 ## double 66.25
.quad 0x4050a00000000000 ## double 66.5
.quad 0x405097ae147ae148 ## double 66.370000000000005
.quad 0x4050900000000000 ## double 66.25
.quad 0x4050900000000000 ## double 66.25
.quad 0x4050a7ae147ae148 ## double 66.620000000000005
.quad 0x4050d00000000000 ## double 67.25
.quad 0x405107ae147ae148 ## double 68.120000000000005
.quad 0x4050e00000000000 ## double 67.5
.quad 0x4050a00000000000 ## double 66.5
.quad 0x405067ae147ae148 ## double 65.620000000000005
.quad 0x4050800000000000 ## double 66
.quad 0x4050c00000000000 ## double 67
.quad 0x4050f00000000000 ## double 67.75
.quad 0x4050e00000000000 ## double 67.5
.quad 0x405137ae147ae148 ## double 68.870000000000005
.quad 0x4050f00000000000 ## double 67.75
.quad 0x4050f7ae147ae148 ## double 67.870000000000005
.quad 0x4050c00000000000 ## double 67
.quad 0x4051000000000000 ## double 68
.quad 0x405147ae147ae148 ## double 69.120000000000005
.quad 0x405147ae147ae148 ## double 69.120000000000005
.quad 0x4050e00000000000 ## double 67.5
.quad 0x4051000000000000 ## double 68
.quad 0x4050b00000000000 ## double 66.75
.quad 0x4050900000000000 ## double 66.25
.quad 0x4050700000000000 ## double 65.75
.quad 0x405067ae147ae148 ## double 65.620000000000005
.quad 0x405057ae147ae148 ## double 65.370000000000005
.quad 0x4050400000000000 ## double 65
.quad 0x405057ae147ae148 ## double 65.370000000000005
.quad 0x405057ae147ae148 ## double 65.370000000000005
.quad 0x4050800000000000 ## double 66
.quad 0x4050600000000000 ## double 65.5
.quad 0x4050500000000000 ## double 65.25
.quad 0x405027ae147ae148 ## double 64.620000000000005
.quad 0x4050400000000000 ## double 65
.quad 0x405057ae147ae148 ## double 65.370000000000005
.quad 0x405087ae147ae148 ## double 66.120000000000005
.quad 0x4050800000000000 ## double 66
.quad 0x4050700000000000 ## double 65.75
.quad 0x405067ae147ae148 ## double 65.620000000000005
.quad 0x405027ae147ae148 ## double 64.620000000000005
.quad 0x4050500000000000 ## double 65.25
.quad 0x4050400000000000 ## double 65
.quad 0x404f70a3d70a3d71 ## double 62.880000000000003
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f70a3d70a3d71 ## double 62.880000000000003
.quad 0x404f200000000000 ## double 62.25
.quad 0x404f200000000000 ## double 62.25
.quad 0x404ec00000000000 ## double 61.5
.quad 0x404e800000000000 ## double 61
.quad 0x404dd0a3d70a3d71 ## double 59.630000000000003
.quad 0x404dc00000000000 ## double 59.5
.quad 0x404e000000000000 ## double 60
.quad 0x404e200000000000 ## double 60.25
.quad 0x404e200000000000 ## double 60.25
.quad 0x404db0a3d70a3d71 ## double 59.380000000000003
.quad 0x404da00000000000 ## double 59.25
.quad 0x404d30a3d70a3d71 ## double 58.380000000000003
.quad 0x404de00000000000 ## double 59.75
.quad 0x404de00000000000 ## double 59.75
.quad 0x404e30a3d70a3d71 ## double 60.380000000000003
.quad 0x404e30a3d70a3d71 ## double 60.380000000000003
.quad 0x404d90a3d70a3d71 ## double 59.130000000000003
.quad 0x404d30a3d70a3d71 ## double 58.380000000000003
.quad 0x404d30a3d70a3d71 ## double 58.380000000000003
.quad 0x404ce00000000000 ## double 57.75
.quad 0x404cc00000000000 ## double 57.5
.quad 0x404cf0a3d70a3d71 ## double 57.880000000000003
.quad 0x404d30a3d70a3d71 ## double 58.380000000000003
.quad 0x404d10a3d70a3d71 ## double 58.130000000000003
.quad 0x404c90a3d70a3d71 ## double 57.130000000000003
.quad 0x404cb0a3d70a3d71 ## double 57.380000000000003
.quad 0x404bc00000000000 ## double 55.5
.quad 0x404c10a3d70a3d71 ## double 56.130000000000003
.quad 0x404c50a3d70a3d71 ## double 56.630000000000003
.quad 0x404bd0a3d70a3d71 ## double 55.630000000000003
.quad 0x404b800000000000 ## double 55
.quad 0x404b10a3d70a3d71 ## double 54.130000000000003
.quad 0x404a70a3d70a3d71 ## double 52.880000000000003
.quad 0x404ac00000000000 ## double 53.5
.quad 0x404ae00000000000 ## double 53.75
.quad 0x404a800000000000 ## double 53
.quad 0x404aa00000000000 ## double 53.25
.quad 0x404ac00000000000 ## double 53.5
.quad 0x404aa00000000000 ## double 53.25
.quad 0x404a800000000000 ## double 53
.quad 0x404ae00000000000 ## double 53.75
.quad 0x404b30a3d70a3d71 ## double 54.380000000000003
.quad 0x404be00000000000 ## double 55.75
.quad 0x404be00000000000 ## double 55.75
.quad 0x404be00000000000 ## double 55.75
.quad 0x404b30a3d70a3d71 ## double 54.380000000000003
.quad 0x404b70a3d70a3d71 ## double 54.880000000000003
.quad 0x404b90a3d70a3d71 ## double 55.130000000000003
.quad 0x404b90a3d70a3d71 ## double 55.130000000000003
.quad 0x404bb0a3d70a3d71 ## double 55.380000000000003
.quad 0x404bb0a3d70a3d71 ## double 55.380000000000003
.quad 0x404b200000000000 ## double 54.25
.quad 0x404b30a3d70a3d71 ## double 54.380000000000003
.quad 0x404b400000000000 ## double 54.5
.quad 0x4049d0a3d70a3d71 ## double 51.630000000000003
.quad 0x4049e00000000000 ## double 51.75
.quad 0x4049d0a3d70a3d71 ## double 51.630000000000003
.quad 0x404a70a3d70a3d71 ## double 52.880000000000003
.quad 0x404ac00000000000 ## double 53.5
.quad 0x404ae00000000000 ## double 53.75
.quad 0x404b800000000000 ## double 55
.quad 0x404ba00000000000 ## double 55.25
.quad 0x404b600000000000 ## double 54.75
.quad 0x404be00000000000 ## double 55.75
.quad 0x404b70a3d70a3d71 ## double 54.880000000000003
.quad 0x404bf0a3d70a3d71 ## double 55.880000000000003
.quad 0x404c000000000000 ## double 56
.quad 0x404be00000000000 ## double 55.75
.quad 0x404bf0a3d70a3d71 ## double 55.880000000000003
.quad 0x404c30a3d70a3d71 ## double 56.380000000000003
.quad 0x404cc00000000000 ## double 57.5
.quad 0x404bf0a3d70a3d71 ## double 55.880000000000003
.quad 0x404b90a3d70a3d71 ## double 55.130000000000003
.quad 0x404c30a3d70a3d71 ## double 56.380000000000003
.quad 0x404d10a3d70a3d71 ## double 58.130000000000003
.quad 0x404da00000000000 ## double 59.25
.quad 0x404de00000000000 ## double 59.75
.quad 0x404e200000000000 ## double 60.25
.quad 0x404e30a3d70a3d71 ## double 60.380000000000003
.quad 0x404e600000000000 ## double 60.75
.quad 0x404ea00000000000 ## double 61.25
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404f200000000000 ## double 62.25
.quad 0x404f200000000000 ## double 62.25
.quad 0x404ef0a3d70a3d71 ## double 61.880000000000003
.quad 0x404f200000000000 ## double 62.25
.quad 0x404f90a3d70a3d71 ## double 63.130000000000003
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x404fc00000000000 ## double 63.5
.quad 0x404fe00000000000 ## double 63.75
.quad 0x4050000000000000 ## double 64
.quad 0x405047ae147ae148 ## double 65.120000000000005
.quad 0x405057ae147ae148 ## double 65.370000000000005
.quad 0x405077ae147ae148 ## double 65.870000000000005
.quad 0x4050900000000000 ## double 66.25
.quad 0x4050c7ae147ae148 ## double 67.120000000000005
.quad 0x4050f00000000000 ## double 67.75
.quad 0x4050f00000000000 ## double 67.75
.quad 0x4051000000000000 ## double 68
.quad 0x4051900000000000 ## double 70.25
.quad 0x4051a00000000000 ## double 70.5
.quad 0x405137ae147ae148 ## double 68.870000000000005
.quad 0x4050e00000000000 ## double 67.5
.quad 0x4050e7ae147ae148 ## double 67.620000000000005
.quad 0x4051100000000000 ## double 68.25
.quad 0x4051400000000000 ## double 69
.quad 0x405167ae147ae148 ## double 69.620000000000005
.quad 0x405167ae147ae148 ## double 69.620000000000005
.quad 0x4051600000000000 ## double 69.5
.quad 0x4051c7ae147ae148 ## double 71.120000000000005
.quad 0x4051a7ae147ae148 ## double 70.620000000000005
.quad 0x4051d7ae147ae148 ## double 71.370000000000005
.quad 0x4052000000000000 ## double 72
.quad 0x4051b00000000000 ## double 70.75
.quad 0x4051e00000000000 ## double 71.5
.quad 0x4051d00000000000 ## double 71.25
.quad 0x405177ae147ae148 ## double 69.870000000000005
.quad 0x405177ae147ae148 ## double 69.870000000000005
.quad 0x4051700000000000 ## double 69.75
.quad 0x405197ae147ae148 ## double 70.370000000000005
.quad 0x4051b7ae147ae148 ## double 70.870000000000005
.quad 0x4051900000000000 ## double 70.25
.quad 0x4050d00000000000 ## double 67.25
.quad 0x4051100000000000 ## double 68.25
.quad 0x4050f00000000000 ## double 67.75
.quad 0x4051000000000000 ## double 68
.quad 0x4050e00000000000 ## double 67.5
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x404fe00000000000 ## double 63.75
.quad 0x404fc00000000000 ## double 63.5
.quad 0x4050200000000000 ## double 64.5
.quad 0x405027ae147ae148 ## double 64.620000000000005
.quad 0x405027ae147ae148 ## double 64.620000000000005
.quad 0x4050400000000000 ## double 65
.quad 0x405047ae147ae148 ## double 65.120000000000005
.quad 0x405037ae147ae148 ## double 64.870000000000005
.quad 0x4050500000000000 ## double 65.25
.quad 0x4050700000000000 ## double 65.75
.quad 0x405027ae147ae148 ## double 64.620000000000005
.quad 0x4050500000000000 ## double 65.25
.quad 0x405077ae147ae148 ## double 65.870000000000005
.quad 0x405087ae147ae148 ## double 66.120000000000005
.quad 0x4050500000000000 ## double 65.25
.quad 0x4050700000000000 ## double 65.75
.quad 0x4050b00000000000 ## double 66.75
.quad 0x4050a7ae147ae148 ## double 66.620000000000005
.quad 0x4050e7ae147ae148 ## double 67.620000000000005
.quad 0x4050d00000000000 ## double 67.25
.quad 0x405097ae147ae148 ## double 66.370000000000005
.quad 0x405057ae147ae148 ## double 65.370000000000005
.quad 0x405047ae147ae148 ## double 65.120000000000005
.quad 0x4050600000000000 ## double 65.5
.quad 0x4050900000000000 ## double 66.25
.quad 0x4050c00000000000 ## double 67
.quad 0x4050500000000000 ## double 65.25
.quad 0x405027ae147ae148 ## double 64.620000000000005
.quad 0x404f200000000000 ## double 62.25
.quad 0x404ef0a3d70a3d71 ## double 61.880000000000003
.quad 0x404f400000000000 ## double 62.5
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404fa00000000000 ## double 63.25
.quad 0x404f800000000000 ## double 63
.quad 0x404f600000000000 ## double 62.75
.quad 0x404f70a3d70a3d71 ## double 62.880000000000003
.quad 0x404f400000000000 ## double 62.5
.quad 0x404f10a3d70a3d71 ## double 62.130000000000003
.quad 0x404f000000000000 ## double 62
.quad 0x404f90a3d70a3d71 ## double 63.130000000000003
.quad 0x404fa00000000000 ## double 63.25
.quad 0x404fe00000000000 ## double 63.75
.quad 0x404fd0a3d70a3d71 ## double 63.630000000000003
.quad 0x405007ae147ae148 ## double 64.120000000000005
.quad 0x4050000000000000 ## double 64
.quad 0x404f70a3d70a3d71 ## double 62.880000000000003
.quad 0x404f30a3d70a3d71 ## double 62.380000000000003
.quad 0x404f70a3d70a3d71 ## double 62.880000000000003
.quad 0x404f90a3d70a3d71 ## double 63.130000000000003
.quad 0x404fb0a3d70a3d71 ## double 63.380000000000003
.quad 0x405017ae147ae148 ## double 64.370000000000005
.quad 0x404f800000000000 ## double 63
.quad 0x404fe00000000000 ## double 63.75
.quad 0x4050200000000000 ## double 64.5
.quad 0x4050700000000000 ## double 65.75
.quad 0x405077ae147ae148 ## double 65.870000000000005
.quad 0x4050c7ae147ae148 ## double 67.120000000000005
.quad 0x4050e7ae147ae148 ## double 67.620000000000005
.quad 0x4050f00000000000 ## double 67.75
.quad 0x4050d7ae147ae148 ## double 67.370000000000005
.quad 0x4051400000000000 ## double 69
.quad 0x4051600000000000 ## double 69.5
.quad 0x405137ae147ae148 ## double 68.870000000000005
.quad 0x4051400000000000 ## double 69
.quad 0x4051300000000000 ## double 68.75
.quad 0x4050e7ae147ae148 ## double 67.620000000000005
.quad 0x4051200000000000 ## double 68.5
.quad 0x4051300000000000 ## double 68.75
.quad 0x4051600000000000 ## double 69.5
.quad 0x4051500000000000 ## double 69.25
.quad 0x405177ae147ae148 ## double 69.870000000000005
.quad 0x4051c00000000000 ## double 71
.quad 0x4051d7ae147ae148 ## double 71.370000000000005
.quad 0x405127ae147ae148 ## double 68.620000000000005
.quad 0x4051200000000000 ## double 68.5
.quad 0x4051500000000000 ## double 69.25
.quad 0x405117ae147ae148 ## double 68.370000000000005
.quad 0x4050b7ae147ae148 ## double 66.870000000000005
.quad 0x4050a7ae147ae148 ## double 66.620000000000005
.quad 0x4050f00000000000 ## double 67.75
.quad 0x4050f7ae147ae148 ## double 67.870000000000005
.quad 0x4051000000000000 ## double 68
.quad 0x4051100000000000 ## double 68.25
.quad 0x4051200000000000 ## double 68.5
.quad 0x405177ae147ae148 ## double 69.870000000000005
.quad 0x4051800000000000 ## double 70
.quad 0x4051800000000000 ## double 70
.quad 0x4051900000000000 ## double 70.25
.quad 0x405187ae147ae148 ## double 70.120000000000005
.quad 0x4051a7ae147ae148 ## double 70.620000000000005
.quad 0x4051a00000000000 ## double 70.5
.quad 0x4051b00000000000 ## double 70.75
.quad 0x4051c7ae147ae148 ## double 71.120000000000005
.quad 0x4051e7ae147ae148 ## double 71.620000000000005
.quad 0x4051f7ae147ae148 ## double 71.870000000000005
.quad 0x4051f7ae147ae148 ## double 71.870000000000005
.quad 0x4052200000000000 ## double 72.5
.quad 0x405217ae147ae148 ## double 72.370000000000005
.quad 0x405197ae147ae148 ## double 70.370000000000005
.quad 0x405197ae147ae148 ## double 70.370000000000005
.quad 0x4051a00000000000 ## double 70.5
.quad 0x4051500000000000 ## double 69.25
.quad 0x405197ae147ae148 ## double 70.370000000000005
.quad 0x4051c7ae147ae148 ## double 71.120000000000005
.quad 0x405157ae147ae148 ## double 69.370000000000005
.quad 0x4051600000000000 ## double 69.5
.quad 0x4051900000000000 ## double 70.25
.quad 0x4051800000000000 ## double 70
.quad 0x4051800000000000 ## double 70
.quad 0x4051700000000000 ## double 69.75
.quad 0x4051600000000000 ## double 69.5
.quad 0x405157ae147ae148 ## double 69.370000000000005
.quad 0x405167ae147ae148 ## double 69.620000000000005
.quad 0x405167ae147ae148 ## double 69.620000000000005
.quad 0x405157ae147ae148 ## double 69.370000000000005
.quad 0x405187ae147ae148 ## double 70.120000000000005
.quad 0x4051800000000000 ## double 70
.quad 0x4051600000000000 ## double 69.5
.quad 0x405197ae147ae148 ## double 70.370000000000005
.quad 0x4051a7ae147ae148 ## double 70.620000000000005
.quad 0x4051800000000000 ## double 70
.quad 0x4051e00000000000 ## double 71.5
.quad 0x405217ae147ae148 ## double 72.370000000000005
.quad 0x405267ae147ae148 ## double 73.620000000000005
.quad 0x4052800000000000 ## double 74
.quad 0x4053300000000000 ## double 76.75
.quad 0x405217ae147ae148 ## double 72.370000000000005
.quad 0x4052200000000000 ## double 72.5
.quad 0x405267ae147ae148 ## double 73.620000000000005
.quad 0x405287ae147ae148 ## double 74.120000000000005
.quad 0x405287ae147ae148 ## double 74.120000000000005
.quad 0x4052a00000000000 ## double 74.5
.quad 0x4052900000000000 ## double 74.25
.quad 0x4052a00000000000 ## double 74.5
.quad 0x4052900000000000 ## double 74.25
.quad 0x4052a00000000000 ## double 74.5
.quad 0x4052700000000000 ## double 73.75
.quad 0x4052600000000000 ## double 73.5
.quad 0x4052800000000000 ## double 74
.quad 0x4052c00000000000 ## double 75
.quad 0x4052d00000000000 ## double 75.25
.quad 0x4053800000000000 ## double 78
.quad 0x4053a00000000000 ## double 78.5
.quad 0x405367ae147ae148 ## double 77.620000000000005
.quad 0x405377ae147ae148 ## double 77.870000000000005
.quad 0x4053900000000000 ## double 78.25
.quad 0x4053900000000000 ## double 78.25
.quad 0x4053800000000000 ## double 78
.quad 0x405317ae147ae148 ## double 76.370000000000005
.quad 0x405317ae147ae148 ## double 76.370000000000005
.quad 0x40730c0000000000 ## double 304.75
.quad 0x407326147ae147ae ## double 306.38
.quad 0x4073340000000000 ## double 307.25
.quad 0x407336147ae147ae ## double 307.38
.quad 0x4073380000000000 ## double 307.5
.quad 0x4073680000000000 ## double 310.5
.quad 0x4073680000000000 ## double 310.5
.quad 0x40734e147ae147ae ## double 308.88
.quad 0x40735c0000000000 ## double 309.75
.quad 0x4073580000000000 ## double 309.5
.quad 0x4073300000000000 ## double 307
.quad 0x4073600000000000 ## double 310
.quad 0x40735c0000000000 ## double 309.75
.quad 0x4073500000000000 ## double 309
.quad 0x407362147ae147ae ## double 310.13
.quad 0x40738c0000000000 ## double 312.75
.quad 0x4073840000000000 ## double 312.25
.quad 0x40735e147ae147ae ## double 309.88
.quad 0x4073d80000000000 ## double 317.5
.quad 0x4074180000000000 ## double 321.5
.quad 0x4073e80000000000 ## double 318.5
.quad 0x4073d80000000000 ## double 317.5
.quad 0x4073a40000000000 ## double 314.25
.quad 0x4073680000000000 ## double 310.5
.quad 0x4073800000000000 ## double 312
.quad 0x4073840000000000 ## double 312.25
.quad 0x40738e147ae147ae ## double 312.88
.quad 0x4073640000000000 ## double 310.25
.quad 0x4073540000000000 ## double 309.25
.quad 0x40738e147ae147ae ## double 312.88
.quad 0x4073a00000000000 ## double 314
.quad 0x4073980000000000 ## double 313.5
.quad 0x407396147ae147ae ## double 313.38
.quad 0x4073a40000000000 ## double 314.25
.quad 0x4074180000000000 ## double 321.5
.quad 0x4074200000000000 ## double 322
.quad 0x4073fc0000000000 ## double 319.75
.quad 0x407406147ae147ae ## double 320.38
.quad 0x40740c0000000000 ## double 320.75
.quad 0x407416147ae147ae ## double 321.38
.quad 0x4073fc0000000000 ## double 319.75
.quad 0x4073b80000000000 ## double 315.5
.quad 0x4073ea147ae147ae ## double 318.63
.quad 0x4073f6147ae147ae ## double 319.38
.quad 0x4073fe147ae147ae ## double 319.88
.quad 0x40740c0000000000 ## double 320.75
.quad 0x4073a80000000000 ## double 314.5
.quad 0x4073d00000000000 ## double 317
.quad 0x4073d80000000000 ## double 317.5
.quad 0x4073c00000000000 ## double 316
.quad 0x40737a147ae147ae ## double 311.63
.quad 0x4073840000000000 ## double 312.25
.quad 0x4073540000000000 ## double 309.25
.quad 0x40734a147ae147ae ## double 308.63
.quad 0x40735c0000000000 ## double 309.75
.quad 0x40737c0000000000 ## double 311.75
.quad 0x4073600000000000 ## double 310
.quad 0x40735e147ae147ae ## double 309.88
.quad 0x4073580000000000 ## double 309.5
.quad 0x40733c0000000000 ## double 307.75
.quad 0x4073280000000000 ## double 306.5
.quad 0x40733c0000000000 ## double 307.75
.quad 0x4072d40000000000 ## double 301.25
.quad 0x4072d80000000000 ## double 301.5
.quad 0x4072d00000000000 ## double 301
.quad 0x4073000000000000 ## double 304
.quad 0x40730c0000000000 ## double 304.75
.quad 0x4072f80000000000 ## double 303.5
.quad 0x4073180000000000 ## double 305.5
.quad 0x40734a147ae147ae ## double 308.63
.quad 0x407346147ae147ae ## double 308.38
.quad 0x4073400000000000 ## double 308
.quad 0x4073480000000000 ## double 308.5
.quad 0x4073440000000000 ## double 308.25
.quad 0x4073600000000000 ## double 310
.quad 0x4073300000000000 ## double 307
.quad 0x4072de147ae147ae ## double 301.88
.quad 0x4072ca147ae147ae ## double 300.63
.quad 0x4072bc0000000000 ## double 299.75
.quad 0x4073280000000000 ## double 306.5
.quad 0x40732a147ae147ae ## double 306.63
.quad 0x4073700000000000 ## double 311
.quad 0x4073700000000000 ## double 311
.quad 0x4073880000000000 ## double 312.5
.quad 0x4073a80000000000 ## double 314.5
.quad 0x4073940000000000 ## double 313.25
.quad 0x4073980000000000 ## double 313.5
.quad 0x4073800000000000 ## double 312
.quad 0x4073740000000000 ## double 311.25
.quad 0x4073900000000000 ## double 313
.quad 0x4073800000000000 ## double 312
.quad 0x40736c0000000000 ## double 310.75
.quad 0x4073800000000000 ## double 312
.quad 0x4073800000000000 ## double 312
.quad 0x4073aa147ae147ae ## double 314.63
.quad 0x4073c40000000000 ## double 316.25
.quad 0x40739e147ae147ae ## double 313.88
.quad 0x4073800000000000 ## double 312
.quad 0x4073380000000000 ## double 307.5
.quad 0x407346147ae147ae ## double 308.38
.quad 0x40731c0000000000 ## double 305.75
.quad 0x4073300000000000 ## double 307
.quad 0x4073500000000000 ## double 309
.quad 0x40734c0000000000 ## double 308.75
.quad 0x4073100000000000 ## double 305
.quad 0x4073280000000000 ## double 306.5
.quad 0x4073400000000000 ## double 308
.quad 0x4073280000000000 ## double 306.5
.quad 0x4073600000000000 ## double 310
.quad 0x4072700000000000 ## double 295
.quad 0x4071ce147ae147ae ## double 284.88
.quad 0x4071d80000000000 ## double 285.5
.quad 0x4071b40000000000 ## double 283.25
.quad 0x4070fe147ae147ae ## double 271.88
.quad 0x407136147ae147ae ## double 275.38
.quad 0x4071300000000000 ## double 275
.quad 0x4071480000000000 ## double 276.5
.quad 0x4071680000000000 ## double 278.5
.quad 0x40716c0000000000 ## double 278.75
.quad 0x4071480000000000 ## double 276.5
.quad 0x4071640000000000 ## double 278.25
.quad 0x4071780000000000 ## double 279.5
.quad 0x407166147ae147ae ## double 278.38
.quad 0x40712a147ae147ae ## double 274.63
.quad 0x4071280000000000 ## double 274.5
.quad 0x4070e00000000000 ## double 270
.quad 0x4070b80000000000 ## double 267.5
.quad 0x4071000000000000 ## double 272
.quad 0x4070fc0000000000 ## double 271.75
.quad 0x4070e80000000000 ## double 270.5
.quad 0x4070c00000000000 ## double 268
.quad 0x4070940000000000 ## double 265.25
.quad 0x4070880000000000 ## double 264.5
.quad 0x4070640000000000 ## double 262.25
.quad 0x4070540000000000 ## double 261.25
.quad 0x4070640000000000 ## double 262.25
.quad 0x407046147ae147ae ## double 260.38
.quad 0x4070880000000000 ## double 264.5
.quad 0x4070980000000000 ## double 265.5
.quad 0x4070ac0000000000 ## double 266.75
.quad 0x4070a00000000000 ## double 266
.quad 0x4070740000000000 ## double 263.25
.quad 0x4070c40000000000 ## double 268.25
.quad 0x4070f80000000000 ## double 271.5
.quad 0x40710c0000000000 ## double 272.75
.quad 0x4071480000000000 ## double 276.5
.quad 0x4071080000000000 ## double 272.5
.quad 0x4071100000000000 ## double 273
.quad 0x40711e147ae147ae ## double 273.88
.quad 0x407132147ae147ae ## double 275.13
.quad 0x4071580000000000 ## double 277.5
.quad 0x4071640000000000 ## double 278.25
.quad 0x4071680000000000 ## double 278.5
.quad 0x4071580000000000 ## double 277.5
.quad 0x40718c0000000000 ## double 280.75
.quad 0x40717c0000000000 ## double 279.75
.quad 0x40717c0000000000 ## double 279.75
.quad 0x4071e00000000000 ## double 286
.quad 0x4072080000000000 ## double 288.5
.quad 0x4072580000000000 ## double 293.5
.quad 0x4072280000000000 ## double 290.5
.quad 0x4071b80000000000 ## double 283.5
.quad 0x4071b00000000000 ## double 283
.quad 0x4071980000000000 ## double 281.5
.quad 0x4071a00000000000 ## double 282
.quad 0x4071a00000000000 ## double 282
.quad 0x4071980000000000 ## double 281.5
.quad 0x4071880000000000 ## double 280.5
.quad 0x4071680000000000 ## double 278.5
.quad 0x4071700000000000 ## double 279
.quad 0x4071a00000000000 ## double 282
.quad 0x4071a80000000000 ## double 282.5
.quad 0x4071980000000000 ## double 281.5
.quad 0x4071c80000000000 ## double 284.5
.quad 0x4071dc0000000000 ## double 285.75
.quad 0x4072080000000000 ## double 288.5
.quad 0x4072340000000000 ## double 291.25
.quad 0x40723e147ae147ae ## double 291.88
.quad 0x4072680000000000 ## double 294.5
.quad 0x407286147ae147ae ## double 296.38
.quad 0x4072d6147ae147ae ## double 301.38
.quad 0x4072ce147ae147ae ## double 300.88
.quad 0x4072f6147ae147ae ## double 303.38
.quad 0x4073080000000000 ## double 304.5
.quad 0x4072e80000000000 ## double 302.5
.quad 0x4072f00000000000 ## double 303
.quad 0x4072d00000000000 ## double 301
.quad 0x40726e147ae147ae ## double 294.88
.quad 0x4072600000000000 ## double 294
.quad 0x4072700000000000 ## double 295
.quad 0x4072840000000000 ## double 296.25
.quad 0x407296147ae147ae ## double 297.38
.quad 0x4072b80000000000 ## double 299.5
.quad 0x4072c00000000000 ## double 300
.quad 0x4072cc0000000000 ## double 300.75
.quad 0x40729a147ae147ae ## double 297.63
.quad 0x40727c0000000000 ## double 295.75
.quad 0x4072bc0000000000 ## double 299.75
.quad 0x4072b6147ae147ae ## double 299.38
.quad 0x4072a80000000000 ## double 298.5
.quad 0x4072380000000000 ## double 291.5
.quad 0x4072380000000000 ## double 291.5
.quad 0x40722c0000000000 ## double 290.75
.quad 0x4072340000000000 ## double 291.25
.quad 0x4072540000000000 ## double 293.25
.quad 0x40721c0000000000 ## double 289.75
.quad 0x4072280000000000 ## double 290.5
.quad 0x40721a147ae147ae ## double 289.63
.quad 0x4072700000000000 ## double 295
.quad 0x4072500000000000 ## double 293
.quad 0x40719c0000000000 ## double 281.75
.quad 0x4071980000000000 ## double 281.5
.quad 0x4071780000000000 ## double 279.5
.quad 0x4071440000000000 ## double 276.25
.quad 0x40714c0000000000 ## double 276.75
.quad 0x4071100000000000 ## double 273
.quad 0x4070f00000000000 ## double 271
.quad 0x4070e40000000000 ## double 270.25
.quad 0x4071000000000000 ## double 272
.quad 0x4070dc0000000000 ## double 269.75
.quad 0x4070ae147ae147ae ## double 266.88
.quad 0x4070e2147ae147ae ## double 270.13
.quad 0x4070bc0000000000 ## double 267.75
.quad 0x4070380000000000 ## double 259.5
.quad 0x40703c0000000000 ## double 259.75
.quad 0x40704c0000000000 ## double 260.75
.quad 0x40704e147ae147ae ## double 260.88
.quad 0x40704a147ae147ae ## double 260.63
.quad 0x4070300000000000 ## double 259
.quad 0x4070380000000000 ## double 259.5
.quad 0x4070180000000000 ## double 257.5
.quad 0x4070440000000000 ## double 260.25
.quad 0x4070640000000000 ## double 262.25
.quad 0x40705e147ae147ae ## double 261.88
.quad 0x4070600000000000 ## double 262
.quad 0x4070680000000000 ## double 262.5
.quad 0x4070a80000000000 ## double 266.5
.quad 0x4070b00000000000 ## double 267
.quad 0x4070c40000000000 ## double 268.25
.quad 0x4070dc0000000000 ## double 269.75
.quad 0x4070e00000000000 ## double 270
.quad 0x4070ea147ae147ae ## double 270.63
.quad 0x4071080000000000 ## double 272.5
.quad 0x4071440000000000 ## double 276.25
.quad 0x4071240000000000 ## double 274.25
.quad 0x4070f6147ae147ae ## double 271.38
.quad 0x4070e00000000000 ## double 270
.quad 0x4070dc0000000000 ## double 269.75
.quad 0x4070aa147ae147ae ## double 266.63
.quad 0x4070c80000000000 ## double 268.5
.quad 0x4070a00000000000 ## double 266
.quad 0x40704c0000000000 ## double 260.75
.quad 0x4070380000000000 ## double 259.5
.quad 0x407066147ae147ae ## double 262.38
.quad 0x40705c0000000000 ## double 261.75
.quad 0x40705e147ae147ae ## double 261.88
.quad 0x4070800000000000 ## double 264
.quad 0x4070680000000000 ## double 262.5
.quad 0x4070a00000000000 ## double 266
.quad 0x4070ac0000000000 ## double 266.75
.quad 0x40708a147ae147ae ## double 264.63
.quad 0x4070a40000000000 ## double 266.25
.quad 0x4070ba147ae147ae ## double 267.63
.quad 0x4070c80000000000 ## double 268.5
.quad 0x4070a40000000000 ## double 266.25
.quad 0x4070a40000000000 ## double 266.25
.quad 0x4070940000000000 ## double 265.25
.quad 0x40704e147ae147ae ## double 260.88
.quad 0x407046147ae147ae ## double 260.38
.quad 0x4070780000000000 ## double 263.5
.quad 0x4070880000000000 ## double 264.5
.quad 0x4070540000000000 ## double 261.25
.quad 0x4070a40000000000 ## double 266.25
.quad 0x4070ba147ae147ae ## double 267.63
.quad 0x4070cc0000000000 ## double 268.75
.quad 0x4070a00000000000 ## double 266
.quad 0x40705c0000000000 ## double 261.75
.quad 0x4070900000000000 ## double 265
.quad 0x4070800000000000 ## double 264
.quad 0x4070240000000000 ## double 258.25
.quad 0x406ff00000000000 ## double 255.5
.quad 0x40701e147ae147ae ## double 257.88
.quad 0x406fc00000000000 ## double 254
.quad 0x406fa00000000000 ## double 253
.quad 0x406fb428f5c28f5c ## double 253.63
.quad 0x406e800000000000 ## double 244
.quad 0x406df00000000000 ## double 239.5
.quad 0x406e400000000000 ## double 242
.quad 0x406e100000000000 ## double 240.5
.quad 0x406e300000000000 ## double 241.5
.quad 0x406e600000000000 ## double 243
.quad 0x406e800000000000 ## double 244
.quad 0x406e300000000000 ## double 241.5
.quad 0x406dcc28f5c28f5c ## double 238.38
.quad 0x406d880000000000 ## double 236.25
.quad 0x406db428f5c28f5c ## double 237.63
.quad 0x406dcc28f5c28f5c ## double 238.38
.quad 0x406e000000000000 ## double 240
.quad 0x406dfc28f5c28f5c ## double 239.88
.quad 0x406de00000000000 ## double 239
.quad 0x406e180000000000 ## double 240.75
.quad 0x406e180000000000 ## double 240.75
.quad 0x406e200000000000 ## double 241
.quad 0x406e8c28f5c28f5c ## double 244.38
.quad 0x406e880000000000 ## double 244.25
.quad 0x406e700000000000 ## double 243.5
.quad 0x406e4428f5c28f5c ## double 242.13
.quad 0x406e600000000000 ## double 243
.quad 0x406e780000000000 ## double 243.75
.quad 0x406eb80000000000 ## double 245.75
.quad 0x406eec28f5c28f5c ## double 247.38
.quad 0x406ed80000000000 ## double 246.75
.quad 0x406e980000000000 ## double 244.75
.quad 0x406e880000000000 ## double 244.25
.quad 0x406f380000000000 ## double 249.75
.quad 0x406f880000000000 ## double 252.25
.quad 0x406f900000000000 ## double 252.5
.quad 0x406fb80000000000 ## double 253.75
.quad 0x407012147ae147ae ## double 257.13
.quad 0x4070180000000000 ## double 257.5
.quad 0x4070000000000000 ## double 256
.quad 0x4070040000000000 ## double 256.25
.quad 0x406ff428f5c28f5c ## double 255.63
.quad 0x407006147ae147ae ## double 256.38
.quad 0x4070040000000000 ## double 256.25
.quad 0x4070200000000000 ## double 258
.quad 0x40701c0000000000 ## double 257.75
.quad 0x4070280000000000 ## double 258.5
.quad 0x4070440000000000 ## double 260.25
.quad 0x407042147ae147ae ## double 260.13
.quad 0x40705e147ae147ae ## double 261.88
.quad 0x40704c0000000000 ## double 260.75
.quad 0x40702c0000000000 ## double 258.75
.quad 0x4070480000000000 ## double 260.5
.quad 0x4070a40000000000 ## double 266.25
.quad 0x4070a6147ae147ae ## double 266.38
.quad 0x4070ae147ae147ae ## double 266.88
.quad 0x4070ae147ae147ae ## double 266.88
.quad 0x40709c0000000000 ## double 265.75
.quad 0x4070aa147ae147ae ## double 266.63
.quad 0x4070b6147ae147ae ## double 267.38
.quad 0x4070b00000000000 ## double 267
.quad 0x4070a40000000000 ## double 266.25
.quad 0x4070b40000000000 ## double 267.25
.quad 0x4070d40000000000 ## double 269.25
.quad 0x4070dc0000000000 ## double 269.75
.quad 0x4070d40000000000 ## double 269.25
.quad 0x4070c00000000000 ## double 268
.quad 0x4070bc0000000000 ## double 267.75
.quad 0x4070c40000000000 ## double 268.25
.quad 0x4070c2147ae147ae ## double 268.13
.quad 0x4070dc0000000000 ## double 269.75
.quad 0x4070ce147ae147ae ## double 268.88
.quad 0x4070b00000000000 ## double 267
.quad 0x4070fc0000000000 ## double 271.75
.quad 0x4070ec0000000000 ## double 270.75
.quad 0x4071100000000000 ## double 273
.quad 0x4071340000000000 ## double 275.25
.quad 0x4071180000000000 ## double 273.5
.quad 0x4070f40000000000 ## double 271.25
.quad 0x4070ea147ae147ae ## double 270.63
.quad 0x4070e40000000000 ## double 270.25
.quad 0x4070c00000000000 ## double 268
.quad 0x4070ac0000000000 ## double 266.75
.quad 0x407086147ae147ae ## double 264.38
.quad 0x40708c0000000000 ## double 264.75
.quad 0x4070a00000000000 ## double 266
.quad 0x4070ba147ae147ae ## double 267.63
.quad 0x4070a6147ae147ae ## double 266.38
.quad 0x4070880000000000 ## double 264.5
.quad 0x40709e147ae147ae ## double 265.88
.quad 0x40709e147ae147ae ## double 265.88
.quad 0x40706c0000000000 ## double 262.75
.quad 0x4070580000000000 ## double 261.5
.quad 0x407066147ae147ae ## double 262.38
.quad 0x40708c0000000000 ## double 264.75
.quad 0x4070880000000000 ## double 264.5
.quad 0x4070840000000000 ## double 264.25
.quad 0x4070900000000000 ## double 265
.quad 0x4070b2147ae147ae ## double 267.13
.quad 0x4070ce147ae147ae ## double 268.88
.quad 0x4070d40000000000 ## double 269.25
.quad 0x4070d40000000000 ## double 269.25
.quad 0x4070a40000000000 ## double 266.25
.quad 0x4070500000000000 ## double 261
.quad 0x40704e147ae147ae ## double 260.88
.quad 0x4070340000000000 ## double 259.25
.quad 0x4070580000000000 ## double 261.5
.quad 0x4070700000000000 ## double 263
.quad 0x4070400000000000 ## double 260
.quad 0x40704c0000000000 ## double 260.75
.quad 0x4070380000000000 ## double 259.5
.quad 0x406f980000000000 ## double 252.75
.quad 0x406f8c28f5c28f5c ## double 252.38
.quad 0x406fa80000000000 ## double 253.25
.quad 0x406fa00000000000 ## double 253
.quad 0x406f580000000000 ## double 250.75
.quad 0x406fac28f5c28f5c ## double 253.38
.quad 0x40701a147ae147ae ## double 257.63
.quad 0x4070340000000000 ## double 259.25
.quad 0x407042147ae147ae ## double 260.13
.quad 0x4070540000000000 ## double 261.25
.quad 0x4070500000000000 ## double 261
.quad 0x4070180000000000 ## double 257.5
.quad 0x40703a147ae147ae ## double 259.63
.quad 0x4070380000000000 ## double 259.5
.quad 0x40702a147ae147ae ## double 258.63
.quad 0x4070180000000000 ## double 257.5
.quad 0x4070340000000000 ## double 259.25
.quad 0x40702c0000000000 ## double 258.75
.quad 0x40702c0000000000 ## double 258.75
.quad 0x4070280000000000 ## double 258.5
.quad 0x4070000000000000 ## double 256
.quad 0x40701a147ae147ae ## double 257.63
.quad 0x4070200000000000 ## double 258
.quad 0x407026147ae147ae ## double 258.38
.quad 0x40702c0000000000 ## double 258.75
.quad 0x4070340000000000 ## double 259.25
.quad 0x4070600000000000 ## double 262
.quad 0x4070680000000000 ## double 262.5
.quad 0x4070540000000000 ## double 261.25
.quad 0x40704a147ae147ae ## double 260.63
.quad 0x4070480000000000 ## double 260.5
.quad 0x40703c0000000000 ## double 259.75
.quad 0x4070400000000000 ## double 260
.quad 0x4070280000000000 ## double 258.5
.quad 0x40702a147ae147ae ## double 258.63
.quad 0x4070300000000000 ## double 259
.quad 0x4070300000000000 ## double 259
.quad 0x4070200000000000 ## double 258
.quad 0x4070680000000000 ## double 262.5
.quad 0x40707e147ae147ae ## double 263.88
.quad 0x4070840000000000 ## double 264.25
.quad 0x407076147ae147ae ## double 263.38
.quad 0x407072147ae147ae ## double 263.13
.quad 0x4070940000000000 ## double 265.25
.quad 0x4070bc0000000000 ## double 267.75
.quad 0x4070c80000000000 ## double 268.5
.quad 0x4070ca147ae147ae ## double 268.63
.quad 0x4070c80000000000 ## double 268.5
.quad 0x4070d00000000000 ## double 269
.quad 0x4070cc0000000000 ## double 268.75
.quad 0x4070ee147ae147ae ## double 270.88
.quad 0x4070d80000000000 ## double 269.5
.quad 0x4070be147ae147ae ## double 267.88
.quad 0x4070cc0000000000 ## double 268.75
.quad 0x4070e6147ae147ae ## double 270.38
.quad 0x4071000000000000 ## double 272
.quad 0x4070f80000000000 ## double 271.5
.quad 0x4070d6147ae147ae ## double 269.38
.quad 0x4070f40000000000 ## double 271.25
.quad 0x4070e2147ae147ae ## double 270.13
.quad 0x4070d40000000000 ## double 269.25
.quad 0x4070d6147ae147ae ## double 269.38
.quad 0x4070b6147ae147ae ## double 267.38
.quad 0x4070cc0000000000 ## double 268.75
.quad 0x4070c40000000000 ## double 268.25
.quad 0x4070a40000000000 ## double 266.25
.quad 0x4070a80000000000 ## double 266.5
.quad 0x4070da147ae147ae ## double 269.63
.quad 0x4070d00000000000 ## double 269
.quad 0x4070e80000000000 ## double 270.5
.quad 0x4070da147ae147ae ## double 269.63
.quad 0x4070ec0000000000 ## double 270.75
.quad 0x4070c2147ae147ae ## double 268.13
.quad 0x4070c80000000000 ## double 268.5
.quad 0x4070f80000000000 ## double 271.5
.quad 0x4071140000000000 ## double 273.25
.quad 0x4071240000000000 ## double 274.25
.quad 0x407126147ae147ae ## double 274.38
.quad 0x4071140000000000 ## double 273.25
.quad 0x40712c0000000000 ## double 274.75
.quad 0x4071380000000000 ## double 275.5
.quad 0x4071240000000000 ## double 274.25
.quad 0x4070ec0000000000 ## double 270.75
.quad 0x40705a147ae147ae ## double 261.63
.quad 0x4070580000000000 ## double 261.5
.quad 0x40703e147ae147ae ## double 259.88
.quad 0x4070640000000000 ## double 262.25
.quad 0x4070580000000000 ## double 261.5
.quad 0x40705c0000000000 ## double 261.75
.quad 0x40707a147ae147ae ## double 263.63
.quad 0x40707e147ae147ae ## double 263.88
.quad 0x4070a6147ae147ae ## double 266.38
.quad 0x40709c0000000000 ## double 265.75
.quad 0x4070ce147ae147ae ## double 268.88
.quad 0x4070b40000000000 ## double 267.25
.quad 0x4070a80000000000 ## double 266.5
.quad 0x4070640000000000 ## double 262.25
.quad 0x407046147ae147ae ## double 260.38
.quad 0x4070540000000000 ## double 261.25
.quad 0x4070440000000000 ## double 260.25
.quad 0x40702c0000000000 ## double 258.75
.quad 0x407026147ae147ae ## double 258.38
.quad 0x40703e147ae147ae ## double 259.88
.quad 0x4070280000000000 ## double 258.5
.quad 0x406fb80000000000 ## double 253.75
.quad 0x406f880000000000 ## double 252.25
.quad 0x406fb00000000000 ## double 253.5
.quad 0x406fb80000000000 ## double 253.75
.quad 0x406fa00000000000 ## double 253
.quad 0x406f800000000000 ## double 252
.quad 0x406f580000000000 ## double 250.75
.quad 0x406f0c28f5c28f5c ## double 248.38
.quad 0x406f200000000000 ## double 249
.quad 0x406ed428f5c28f5c ## double 246.63
.quad 0x406f280000000000 ## double 249.25
.quad 0x406f2428f5c28f5c ## double 249.13
.quad 0x406f0c28f5c28f5c ## double 248.38
.quad 0x406f0428f5c28f5c ## double 248.13
.quad 0x406f3428f5c28f5c ## double 249.63
.quad 0x406f600000000000 ## double 251
.quad 0x406fd00000000000 ## double 254.5
.quad 0x4070080000000000 ## double 256.5
.quad 0x406fec28f5c28f5c ## double 255.38
.quad 0x406fe428f5c28f5c ## double 255.13
.quad 0x406fc80000000000 ## double 254.25
.quad 0x406f9c28f5c28f5c ## double 252.88
.quad 0x406ff00000000000 ## double 255.5
.quad 0x4070140000000000 ## double 257.25
.quad 0x4070100000000000 ## double 257
.quad 0x4070400000000000 ## double 260
.quad 0x4070600000000000 ## double 262
.quad 0x40707c0000000000 ## double 263.75
.quad 0x4070600000000000 ## double 262
.quad 0x4070400000000000 ## double 260
.quad 0x40703c0000000000 ## double 259.75
.quad 0x40705a147ae147ae ## double 261.63
.quad 0x4070600000000000 ## double 262
.quad 0x4070480000000000 ## double 260.5
.quad 0x40703a147ae147ae ## double 259.63
.quad 0x4070740000000000 ## double 263.25
.quad 0x4070e80000000000 ## double 270.5
.quad 0x4070f80000000000 ## double 271.5
.quad 0x40710e147ae147ae ## double 272.88
.quad 0x40710a147ae147ae ## double 272.63
.quad 0x4071080000000000 ## double 272.5
.quad 0x4071380000000000 ## double 275.5
.quad 0x4071600000000000 ## double 278
.quad 0x4071740000000000 ## double 279.25
.quad 0x4071540000000000 ## double 277.25
.quad 0x40714e147ae147ae ## double 276.88
.quad 0x4071380000000000 ## double 275.5
.quad 0x40714c0000000000 ## double 276.75
.quad 0x407162147ae147ae ## double 278.13
.quad 0x4071680000000000 ## double 278.5
.quad 0x4071680000000000 ## double 278.5
.quad 0x4071880000000000 ## double 280.5
.quad 0x4071940000000000 ## double 281.25
.quad 0x4071780000000000 ## double 279.5
.quad 0x40718c0000000000 ## double 280.75
.quad 0x4071aa147ae147ae ## double 282.63
.quad 0x4071da147ae147ae ## double 285.63
.quad 0x4071d80000000000 ## double 285.5
.quad 0x4071c00000000000 ## double 284
.quad 0x4071cc0000000000 ## double 284.75
.quad 0x4071c2147ae147ae ## double 284.13
.quad 0x4071da147ae147ae ## double 285.63
.quad 0x4071e2147ae147ae ## double 286.13
.quad 0x4071d00000000000 ## double 285
.quad 0x4071c6147ae147ae ## double 284.38
.quad 0x4071c80000000000 ## double 284.5
.quad 0x407182147ae147ae ## double 280.13
.quad 0x4071940000000000 ## double 281.25
.quad 0x4071880000000000 ## double 280.5
.quad 0x4071700000000000 ## double 279
.quad 0x4071680000000000 ## double 278.5
.quad 0x4071580000000000 ## double 277.5
.quad 0x40715c0000000000 ## double 277.75
.quad 0x4071400000000000 ## double 276
.quad 0x407142147ae147ae ## double 276.13
.quad 0x4071400000000000 ## double 276
.quad 0x40714a147ae147ae ## double 276.63
.quad 0x4071500000000000 ## double 277
.quad 0x4070ee147ae147ae ## double 270.88
.quad 0x4070fe147ae147ae ## double 271.88
.quad 0x4071200000000000 ## double 274
.quad 0x4071280000000000 ## double 274.5
.quad 0x4071240000000000 ## double 274.25
.quad 0x4070da147ae147ae ## double 269.63
.quad 0x4070e00000000000 ## double 270
.quad 0x4070e40000000000 ## double 270.25
.quad 0x407102147ae147ae ## double 272.13
.quad 0x4070e80000000000 ## double 270.5
.quad 0x4071040000000000 ## double 272.25
.quad 0x4070fc0000000000 ## double 271.75
.quad 0x40714c0000000000 ## double 276.75
.quad 0x40715e147ae147ae ## double 277.88
.quad 0x4071340000000000 ## double 275.25
.quad 0x4071240000000000 ## double 274.25
.quad 0x4070d80000000000 ## double 269.5
.quad 0x4070d80000000000 ## double 269.5
.quad 0x4071100000000000 ## double 273
.quad 0x4071400000000000 ## double 276
.quad 0x4071380000000000 ## double 275.5
.quad 0x4071700000000000 ## double 279
.quad 0x40717a147ae147ae ## double 279.63
.quad 0x40713c0000000000 ## double 275.75
.quad 0x4071100000000000 ## double 273
.quad 0x4070f00000000000 ## double 271
.quad 0x4070f40000000000 ## double 271.25
.quad 0x4070d00000000000 ## double 269
.quad 0x4071000000000000 ## double 272
.quad 0x4071100000000000 ## double 273
.quad 0x407122147ae147ae ## double 274.13
.quad 0x4071480000000000 ## double 276.5
.quad 0x40712c0000000000 ## double 274.75
.quad 0x4071540000000000 ## double 277.25
.quad 0x4071880000000000 ## double 280.5
.quad 0x4071a00000000000 ## double 282
.quad 0x4071880000000000 ## double 280.5
.quad 0x4071700000000000 ## double 279
.quad 0x407182147ae147ae ## double 280.13
.quad 0x4071600000000000 ## double 278
.quad 0x4070fc0000000000 ## double 271.75
.quad 0x407106147ae147ae ## double 272.38
.quad 0x4070e6147ae147ae ## double 270.38
.quad 0x4070a40000000000 ## double 266.25
.quad 0x4070d40000000000 ## double 269.25
.quad 0x4070ce147ae147ae ## double 268.88
.quad 0x4070e80000000000 ## double 270.5
.quad 0x4070e80000000000 ## double 270.5
.quad 0x4070f40000000000 ## double 271.25
.quad 0x40710a147ae147ae ## double 272.63
.quad 0x407122147ae147ae ## double 274.13
.quad 0x4071100000000000 ## double 273
.quad 0x40711c0000000000 ## double 273.75
.quad 0x4071280000000000 ## double 274.5
.quad 0x4071000000000000 ## double 272
.quad 0x40711e147ae147ae ## double 273.88
.quad 0x40711e147ae147ae ## double 273.88
.quad 0x4070fa147ae147ae ## double 271.63
.quad 0x4071200000000000 ## double 274
.quad 0x4071380000000000 ## double 275.5
.quad 0x4071080000000000 ## double 272.5
.quad 0x4070fc0000000000 ## double 271.75
.quad 0x4071100000000000 ## double 273
.quad 0x4070ec0000000000 ## double 270.75
.quad 0x4070f6147ae147ae ## double 271.38
.quad 0x4070b80000000000 ## double 267.5
.quad 0x4070a40000000000 ## double 266.25
.quad 0x4070900000000000 ## double 265
.quad 0x4070880000000000 ## double 264.5
.quad 0x40708e147ae147ae ## double 264.88
.quad 0x40705e147ae147ae ## double 261.88
.quad 0x40704a147ae147ae ## double 260.63
.quad 0x40705c0000000000 ## double 261.75
.quad 0x4070bc0000000000 ## double 267.75
.quad 0x4070d6147ae147ae ## double 269.38
.quad 0x4070ec0000000000 ## double 270.75
.quad 0x4071080000000000 ## double 272.5
.quad 0x4071000000000000 ## double 272
.quad 0x4070c6147ae147ae ## double 268.38
.quad 0x4070a00000000000 ## double 266
.quad 0x4070780000000000 ## double 263.5
.quad 0x40703c0000000000 ## double 259.75
.quad 0x4070140000000000 ## double 257.25
.quad 0x4070800000000000 ## double 264
.quad 0x40708c0000000000 ## double 264.75
.quad 0x4070980000000000 ## double 265.5
.quad 0x4070940000000000 ## double 265.25
.quad 0x407096147ae147ae ## double 265.38
.quad 0x4070f80000000000 ## double 271.5
.quad 0x4071140000000000 ## double 273.25
.quad 0x407172147ae147ae ## double 279.13
.quad 0x40716c0000000000 ## double 278.75
.quad 0x40719a147ae147ae ## double 281.63
.quad 0x40719e147ae147ae ## double 281.88
.quad 0x407186147ae147ae ## double 280.38
.quad 0x4071900000000000 ## double 281
.quad 0x4071900000000000 ## double 281
.quad 0x4071b40000000000 ## double 283.25
.quad 0x4071b80000000000 ## double 283.5
.quad 0x4071d00000000000 ## double 285
.quad 0x4071f80000000000 ## double 287.5
.quad 0x4071f80000000000 ## double 287.5
.quad 0x4071d80000000000 ## double 285.5
.quad 0x4071e2147ae147ae ## double 286.13
.quad 0x4072080000000000 ## double 288.5
.quad 0x4071fc0000000000 ## double 287.75
.quad 0x4071ec0000000000 ## double 286.75
.quad 0x4071da147ae147ae ## double 285.63
.quad 0x4071c00000000000 ## double 284
.quad 0x4071780000000000 ## double 279.5
.quad 0x4071680000000000 ## double 278.5
.quad 0x407182147ae147ae ## double 280.13
.quad 0x40717a147ae147ae ## double 279.63
.quad 0x4071700000000000 ## double 279
.quad 0x407182147ae147ae ## double 280.13
.quad 0x407182147ae147ae ## double 280.13
.quad 0x4071680000000000 ## double 278.5
.quad 0x4071600000000000 ## double 278
.quad 0x4071540000000000 ## double 277.25
.quad 0x40712c0000000000 ## double 274.75
.quad 0x40710c0000000000 ## double 272.75
.quad 0x4070e6147ae147ae ## double 270.38
.quad 0x4071000000000000 ## double 272
.quad 0x40710c0000000000 ## double 272.75
.quad 0x40711c0000000000 ## double 273.75
.quad 0x4071180000000000 ## double 273.5
.quad 0x407132147ae147ae ## double 275.13
.quad 0x40717a147ae147ae ## double 279.63
.quad 0x4071880000000000 ## double 280.5
.quad 0x40718c0000000000 ## double 280.75
.quad 0x4071800000000000 ## double 280
.quad 0x40716e147ae147ae ## double 278.88
.quad 0x4071600000000000 ## double 278
.quad 0x40717c0000000000 ## double 279.75
.quad 0x4071680000000000 ## double 278.5
.quad 0x4071340000000000 ## double 275.25
.quad 0x4071340000000000 ## double 275.25
.quad 0x40713a147ae147ae ## double 275.63
.quad 0x4071580000000000 ## double 277.5
.quad 0x40713e147ae147ae ## double 275.88
.quad 0x40710e147ae147ae ## double 272.88
.quad 0x4071180000000000 ## double 273.5
.quad 0x40711c0000000000 ## double 273.75
.quad 0x407112147ae147ae ## double 273.13
.quad 0x4071380000000000 ## double 275.5
.quad 0x4071380000000000 ## double 275.5
.quad 0x40711e147ae147ae ## double 273.88
.quad 0x4071080000000000 ## double 272.5
.quad 0x4071080000000000 ## double 272.5
.quad 0x40710c0000000000 ## double 272.75
.quad 0x4071380000000000 ## double 275.5
.quad 0x407146147ae147ae ## double 276.38
.quad 0x4071780000000000 ## double 279.5
.quad 0x407186147ae147ae ## double 280.38
.quad 0x407186147ae147ae ## double 280.38
.quad 0x4071800000000000 ## double 280
.quad 0x4071700000000000 ## double 279
.quad 0x407176147ae147ae ## double 279.38
.quad 0x4071600000000000 ## double 278
.quad 0x4071540000000000 ## double 277.25
.quad 0x4071400000000000 ## double 276
.quad 0x4071600000000000 ## double 278
.quad 0x40716e147ae147ae ## double 278.88
.quad 0x4071400000000000 ## double 276
.quad 0x4071400000000000 ## double 276
.quad 0x40714c0000000000 ## double 276.75
.quad 0x4071340000000000 ## double 275.25
.quad 0x4071000000000000 ## double 272
.quad 0x40710e147ae147ae ## double 272.88
.quad 0x4070e80000000000 ## double 270.5
.quad 0x4070e00000000000 ## double 270
.quad 0x4070d00000000000 ## double 269
.quad 0x4070700000000000 ## double 263
.quad 0x4070480000000000 ## double 260.5
.quad 0x40704c0000000000 ## double 260.75
.quad 0x40701e147ae147ae ## double 257.88
.quad 0x406ff80000000000 ## double 255.75
.quad 0x406fe428f5c28f5c ## double 255.13
.quad 0x406fd428f5c28f5c ## double 254.63
.quad 0x406fb428f5c28f5c ## double 253.63
.quad 0x4070040000000000 ## double 256.25
.quad 0x4070280000000000 ## double 258.5
.quad 0x4070240000000000 ## double 258.25
.quad 0x4070080000000000 ## double 256.5
.quad 0x4070140000000000 ## double 257.25
.quad 0x406fc00000000000 ## double 254
.quad 0x406f980000000000 ## double 252.75
.quad 0x406f700000000000 ## double 251.5
.quad 0x406fa00000000000 ## double 253
.quad 0x4070180000000000 ## double 257.5
.quad 0x40701a147ae147ae ## double 257.63
.quad 0x406fe80000000000 ## double 255.25
.quad 0x406fc80000000000 ## double 254.25
.quad 0x406fb80000000000 ## double 253.75
.quad 0x406fe00000000000 ## double 255
.quad 0x4070100000000000 ## double 257
.quad 0x4070140000000000 ## double 257.25
.quad 0x4070380000000000 ## double 259.5
.quad 0x4070240000000000 ## double 258.25
.quad 0x406f680000000000 ## double 251.25
.quad 0x406f400000000000 ## double 250
.quad 0x406f700000000000 ## double 251.5
.quad 0x406f9c28f5c28f5c ## double 252.88
.quad 0x406f980000000000 ## double 252.75
.quad 0x406fcc28f5c28f5c ## double 254.38
.quad 0x40700c0000000000 ## double 256.75
.quad 0x4070000000000000 ## double 256
.quad 0x40703c0000000000 ## double 259.75
.quad 0x4070340000000000 ## double 259.25
.quad 0x4070400000000000 ## double 260
.quad 0x4070640000000000 ## double 262.25
.quad 0x4070600000000000 ## double 262
.quad 0x40705c0000000000 ## double 261.75
.quad 0x4070340000000000 ## double 259.25
.quad 0x4070180000000000 ## double 257.5
.quad 0x40706e147ae147ae ## double 262.88
.quad 0x40707c0000000000 ## double 263.75
.quad 0x4070ec0000000000 ## double 270.75
.quad 0x4070d80000000000 ## double 269.5
.quad 0x4070d40000000000 ## double 269.25
.quad 0x4070fc0000000000 ## double 271.75
.quad 0x4071140000000000 ## double 273.25
.quad 0x4071000000000000 ## double 272
.quad 0x4070b40000000000 ## double 267.25
.quad 0x40707c0000000000 ## double 263.75
.quad 0x40706c0000000000 ## double 262.75
.quad 0x40704c0000000000 ## double 260.75
.quad 0x40705c0000000000 ## double 261.75
.quad 0x4070480000000000 ## double 260.5
.quad 0x4070680000000000 ## double 262.5
.quad 0x407092147ae147ae ## double 265.13
.quad 0x4070600000000000 ## double 262
.quad 0x4070440000000000 ## double 260.25
.quad 0x4070340000000000 ## double 259.25
.quad 0x40703a147ae147ae ## double 259.63
.quad 0x4070680000000000 ## double 262.5
.quad 0x40706c0000000000 ## double 262.75
.quad 0x4070540000000000 ## double 261.25
.quad 0x40709e147ae147ae ## double 265.88
.quad 0x40709e147ae147ae ## double 265.88
.quad 0x4070640000000000 ## double 262.25
.quad 0x4070680000000000 ## double 262.5
.quad 0x407042147ae147ae ## double 260.13
.quad 0x4070180000000000 ## double 257.5
.quad 0x4070340000000000 ## double 259.25
.quad 0x407046147ae147ae ## double 260.38
.quad 0x40704c0000000000 ## double 260.75
.quad 0x4070340000000000 ## double 259.25
.quad 0x407006147ae147ae ## double 256.38
.quad 0x4070580000000000 ## double 261.5
.quad 0x4070680000000000 ## double 262.5
.quad 0x407072147ae147ae ## double 263.13
.quad 0x4070600000000000 ## double 262
.quad 0x4070880000000000 ## double 264.5
.quad 0x4070640000000000 ## double 262.25
.quad 0x4070140000000000 ## double 257.25
.quad 0x406fe80000000000 ## double 255.25
.quad 0x406fe00000000000 ## double 255
.quad 0x4070200000000000 ## double 258
.quad 0x4070480000000000 ## double 260.5
.quad 0x4070200000000000 ## double 258
.quad 0x40701c0000000000 ## double 257.75
.quad 0x4070140000000000 ## double 257.25
.quad 0x4070740000000000 ## double 263.25
.quad 0x4070880000000000 ## double 264.5
.quad 0x4070540000000000 ## double 261.25
.quad 0x4070340000000000 ## double 259.25
.quad 0x40703c0000000000 ## double 259.75
.quad 0x4070100000000000 ## double 257
.quad 0x406f880000000000 ## double 252.25
.quad 0x406fe00000000000 ## double 255
.quad 0x4070000000000000 ## double 256
.quad 0x406ff00000000000 ## double 255.5
.quad 0x406f600000000000 ## double 251
.quad 0x406f900000000000 ## double 252.5
.quad 0x406fa00000000000 ## double 253
.quad 0x406f400000000000 ## double 250
.quad 0x406e580000000000 ## double 242.75
.quad 0x406e5c28f5c28f5c ## double 242.88
.quad 0x406e280000000000 ## double 241.25
.quad 0x406dcc28f5c28f5c ## double 238.38
.quad 0x406d880000000000 ## double 236.25
.quad 0x406cf80000000000 ## double 231.75
.quad 0x406cd80000000000 ## double 230.75
.quad 0x406ce00000000000 ## double 231
.quad 0x406cdc28f5c28f5c ## double 230.88
.quad 0x406cc00000000000 ## double 230
.quad 0x406c500000000000 ## double 226.5
.quad 0x406c280000000000 ## double 225.25
.quad 0x406bf00000000000 ## double 223.5
.quad 0x406c1428f5c28f5c ## double 224.63
.quad 0x406bf00000000000 ## double 223.5
.quad 0x406bf00000000000 ## double 223.5
.quad 0x406b980000000000 ## double 220.75
.quad 0x406b400000000000 ## double 218
.quad 0x406b7c28f5c28f5c ## double 219.88
.quad 0x406bb00000000000 ## double 221.5
.quad 0x406b980000000000 ## double 220.75
.quad 0x406b7c28f5c28f5c ## double 219.88
.quad 0x406b1c28f5c28f5c ## double 216.88
.quad 0x406af80000000000 ## double 215.75
.quad 0x406b380000000000 ## double 217.75
.quad 0x406b400000000000 ## double 218
.quad 0x406afc28f5c28f5c ## double 215.88
.quad 0x406b200000000000 ## double 217
.quad 0x406b9428f5c28f5c ## double 220.63
.quad 0x406b8428f5c28f5c ## double 220.13
.quad 0x406b7428f5c28f5c ## double 219.63
.quad 0x406c080000000000 ## double 224.25
.quad 0x406c680000000000 ## double 227.25
.quad 0x406c500000000000 ## double 226.5
.quad 0x406c400000000000 ## double 226
.quad 0x406c280000000000 ## double 225.25
.quad 0x406bdc28f5c28f5c ## double 222.88
.quad 0x406bd80000000000 ## double 222.75
.quad 0x406bc00000000000 ## double 222
.quad 0x406b980000000000 ## double 220.75
.quad 0x406c380000000000 ## double 225.75
.quad 0x406c280000000000 ## double 225.25
.quad 0x406be80000000000 ## double 223.25
.quad 0x406c200000000000 ## double 225
.quad 0x406c180000000000 ## double 224.75
.quad 0x406b7c28f5c28f5c ## double 219.88
.quad 0x406b1c28f5c28f5c ## double 216.88
.quad 0x406b3428f5c28f5c ## double 217.63
.quad 0x406b400000000000 ## double 218
.quad 0x406b200000000000 ## double 217
.quad 0x406ac80000000000 ## double 214.25
.quad 0x406a980000000000 ## double 212.75
.quad 0x406a980000000000 ## double 212.75
.quad 0x406ab00000000000 ## double 213.5
.quad 0x406ac80000000000 ## double 214.25
.quad 0x406af00000000000 ## double 215.5
.quad 0x406a780000000000 ## double 211.75
.quad 0x406ad80000000000 ## double 214.75
.quad 0x406af80000000000 ## double 215.75
.quad 0x406ab80000000000 ## double 213.75
.quad 0x406ad80000000000 ## double 214.75
.quad 0x406a7c28f5c28f5c ## double 211.88
.quad 0x406a500000000000 ## double 210.5
.quad 0x406a900000000000 ## double 212.5
.quad 0x406a400000000000 ## double 210
.quad 0x406aa00000000000 ## double 213
.quad 0x406a500000000000 ## double 210.5
.quad 0x4069e00000000000 ## double 207
.quad 0x406a280000000000 ## double 209.25
.quad 0x4069d80000000000 ## double 206.75
.quad 0x40694c28f5c28f5c ## double 202.38
.quad 0x4069700000000000 ## double 203.5
.quad 0x4068700000000000 ## double 195.5
.quad 0x4067980000000000 ## double 188.75
.quad 0x4067b80000000000 ## double 189.75
.quad 0x4067e00000000000 ## double 191
.quad 0x4068600000000000 ## double 195
.quad 0x4068880000000000 ## double 196.25
.quad 0x4068200000000000 ## double 193
.quad 0x4068280000000000 ## double 193.25
.quad 0x4067980000000000 ## double 188.75
.quad 0x4067c80000000000 ## double 190.25
.quad 0x4067c00000000000 ## double 190
.quad 0x4067500000000000 ## double 186.5
.quad 0x40669428f5c28f5c ## double 180.63
.quad 0x4066880000000000 ## double 180.25
.quad 0x4066800000000000 ## double 180
.quad 0x4066e00000000000 ## double 183
.quad 0x4066d80000000000 ## double 182.75
.quad 0x4066980000000000 ## double 180.75
.quad 0x40673c28f5c28f5c ## double 185.88
.quad 0x4066f00000000000 ## double 183.5
.quad 0x4066c00000000000 ## double 182
.quad 0x4066f80000000000 ## double 183.75
.quad 0x4066d80000000000 ## double 182.75
.quad 0x4067500000000000 ## double 186.5
.quad 0x4067700000000000 ## double 187.5
.quad 0x4067500000000000 ## double 186.5
.quad 0x4066a00000000000 ## double 181
.quad 0x4066b80000000000 ## double 181.75
.quad 0x4066c00000000000 ## double 182
.quad 0x4066880000000000 ## double 180.25
.quad 0x4066580000000000 ## double 178.75
.quad 0x40664428f5c28f5c ## double 178.13
.quad 0x4066c80000000000 ## double 182.25
.quad 0x4066f80000000000 ## double 183.75
.quad 0x4066c80000000000 ## double 182.25
.quad 0x4066a00000000000 ## double 181
.quad 0x4066c80000000000 ## double 182.25
.quad 0x40672c28f5c28f5c ## double 185.38
.quad 0x4066f00000000000 ## double 183.5
.quad 0x4066f80000000000 ## double 183.75
.quad 0x4067380000000000 ## double 185.75
.quad 0x4067480000000000 ## double 186.25
.quad 0x4067980000000000 ## double 188.75
.quad 0x4067c80000000000 ## double 190.25
.quad 0x4067d80000000000 ## double 190.75
.quad 0x4068680000000000 ## double 195.25
.quad 0x40685c28f5c28f5c ## double 194.88
.quad 0x4068400000000000 ## double 194
.quad 0x4068080000000000 ## double 192.25
.quad 0x4068500000000000 ## double 194.5
.quad 0x4068500000000000 ## double 194.5
.quad 0x4068d80000000000 ## double 198.75
.quad 0x4068e80000000000 ## double 199.25
.quad 0x4069700000000000 ## double 203.5
.quad 0x40693c28f5c28f5c ## double 201.88
.quad 0x4069a80000000000 ## double 205.25
.quad 0x4069f80000000000 ## double 207.75
.quad 0x406a300000000000 ## double 209.5
.quad 0x406a000000000000 ## double 208
.quad 0x406a000000000000 ## double 208
.quad 0x406a280000000000 ## double 209.25
.quad 0x406a400000000000 ## double 210
.quad 0x4069d00000000000 ## double 206.5
.quad 0x4069e00000000000 ## double 207
.quad 0x406a000000000000 ## double 208
.quad 0x4069f00000000000 ## double 207.5
.quad 0x406a580000000000 ## double 210.75
.quad 0x406a600000000000 ## double 211
.quad 0x406a700000000000 ## double 211.5
.quad 0x406a500000000000 ## double 210.5
.quad 0x406aa80000000000 ## double 213.25
.quad 0x406aec28f5c28f5c ## double 215.38
.quad 0x406aa00000000000 ## double 213
.quad 0x406a480000000000 ## double 210.25
.quad 0x406a0c28f5c28f5c ## double 208.38
.quad 0x4069b00000000000 ## double 205.5
.quad 0x406a200000000000 ## double 209
.quad 0x406a100000000000 ## double 208.5
.quad 0x406a180000000000 ## double 208.75
.quad 0x406a780000000000 ## double 211.75
.quad 0x406aa80000000000 ## double 213.25
.quad 0x406a9c28f5c28f5c ## double 212.88
.quad 0x406ae00000000000 ## double 215
.quad 0x406b3428f5c28f5c ## double 217.63
.quad 0x406b400000000000 ## double 218
.quad 0x406b580000000000 ## double 218.75
.quad 0x406b780000000000 ## double 219.75
.quad 0x406b680000000000 ## double 219.25
.quad 0x406af80000000000 ## double 215.75
.quad 0x406ae00000000000 ## double 215
.quad 0x406b100000000000 ## double 216.5
.quad 0x406b600000000000 ## double 219
.quad 0x406b6c28f5c28f5c ## double 219.38
.quad 0x406b000000000000 ## double 216
.quad 0x406b100000000000 ## double 216.5
.quad 0x406b800000000000 ## double 220
.quad 0x406b680000000000 ## double 219.25
.quad 0x406b680000000000 ## double 219.25
.quad 0x406c4c28f5c28f5c ## double 226.38
.quad 0x406c6c28f5c28f5c ## double 227.38
.quad 0x406bf00000000000 ## double 223.5
.quad 0x406b580000000000 ## double 218.75
.quad 0x406ad00000000000 ## double 214.5
.quad 0x406a700000000000 ## double 211.5
.quad 0x406a900000000000 ## double 212.5
.quad 0x406adc28f5c28f5c ## double 214.88
.quad 0x406ae00000000000 ## double 215
.quad 0x406a580000000000 ## double 210.75
.quad 0x406a900000000000 ## double 212.5
.quad 0x406a400000000000 ## double 210
.quad 0x4069800000000000 ## double 204
.quad 0x4069e00000000000 ## double 207
.quad 0x406a400000000000 ## double 210
.quad 0x406a200000000000 ## double 209
.quad 0x406a780000000000 ## double 211.75
.quad 0x406af80000000000 ## double 215.75
.quad 0x406ae80000000000 ## double 215.25
.quad 0x406a9c28f5c28f5c ## double 212.88
.quad 0x406b2c28f5c28f5c ## double 217.38
.quad 0x406a980000000000 ## double 212.75
.quad 0x406a800000000000 ## double 212
.quad 0x406a300000000000 ## double 209.5
.quad 0x4069c80000000000 ## double 206.25
.quad 0x406a900000000000 ## double 212.5
.quad 0x406a500000000000 ## double 210.5
.quad 0x4069300000000000 ## double 201.5
.quad 0x4068cc28f5c28f5c ## double 198.38
.quad 0x4069100000000000 ## double 200.5
.quad 0x4069880000000000 ## double 204.25
.quad 0x4069f80000000000 ## double 207.75
.quad 0x4069f00000000000 ## double 207.5
.quad 0x406a100000000000 ## double 208.5
.quad 0x406a500000000000 ## double 210.5
.quad 0x406a800000000000 ## double 212
.quad 0x406a500000000000 ## double 210.5
.quad 0x4069c80000000000 ## double 206.25
.quad 0x406a400000000000 ## double 210
.quad 0x406b0c28f5c28f5c ## double 216.38
.quad 0x406ad80000000000 ## double 214.75
.quad 0x406b6c28f5c28f5c ## double 219.38
.quad 0x406b580000000000 ## double 218.75
.quad 0x406b480000000000 ## double 218.25
.quad 0x406b200000000000 ## double 217
.quad 0x406b000000000000 ## double 216
.quad 0x406ba00000000000 ## double 221
.quad 0x406b900000000000 ## double 220.5
.quad 0x406b8428f5c28f5c ## double 220.13
.quad 0x406b680000000000 ## double 219.25
.quad 0x406b880000000000 ## double 220.25
.quad 0x406c180000000000 ## double 224.75
.quad 0x406b800000000000 ## double 220
.quad 0x406af80000000000 ## double 215.75
.quad 0x406ac00000000000 ## double 214
.quad 0x406a900000000000 ## double 212.5
.quad 0x406a380000000000 ## double 209.75
.quad 0x406b380000000000 ## double 217.75
.quad 0x406b980000000000 ## double 220.75
.quad 0x406b5c28f5c28f5c ## double 218.88
.quad 0x406b380000000000 ## double 217.75
.quad 0x406b980000000000 ## double 220.75
.quad 0x406ba00000000000 ## double 221
.quad 0x406b480000000000 ## double 218.25
.quad 0x406a500000000000 ## double 210.5
.quad 0x40693c28f5c28f5c ## double 201.88
.quad 0x40693c28f5c28f5c ## double 201.88
.quad 0x4068b80000000000 ## double 197.75
.quad 0x4069400000000000 ## double 202
.quad 0x4068fc28f5c28f5c ## double 199.88
.quad 0x4068580000000000 ## double 194.75
.quad 0x4068200000000000 ## double 193
.quad 0x4067a00000000000 ## double 189
.quad 0x4067f00000000000 ## double 191.5
.quad 0x4067800000000000 ## double 188
.quad 0x4067800000000000 ## double 188
.quad 0x40645c28f5c28f5c ## double 162.88
.quad 0x40649428f5c28f5c ## double 164.63
.quad 0x4064a80000000000 ## double 165.25
.quad 0x4064700000000000 ## double 163.5
.quad 0x4064480000000000 ## double 162.25
.quad 0x4064000000000000 ## double 160
.quad 0x4064180000000000 ## double 160.75
.quad 0x4064900000000000 ## double 164.5
.quad 0x4065400000000000 ## double 170
.quad 0x4065500000000000 ## double 170.5
.quad 0x4065900000000000 ## double 172.5
.quad 0x4065a00000000000 ## double 173
.quad 0x4065100000000000 ## double 168.5
.quad 0x4064e428f5c28f5c ## double 167.13
.quad 0x4064d00000000000 ## double 166.5
.quad 0x4065480000000000 ## double 170.25
.quad 0x4065680000000000 ## double 171.25
.quad 0x40656c28f5c28f5c ## double 171.38
.quad 0x40651c28f5c28f5c ## double 168.88
.quad 0x4064880000000000 ## double 164.25
.quad 0x4064c00000000000 ## double 166
.quad 0x4065180000000000 ## double 168.75
.quad 0x4064f00000000000 ## double 167.5
.quad 0x4065400000000000 ## double 170
.quad 0x4065400000000000 ## double 170
.quad 0x4065600000000000 ## double 171
.quad 0x4065a80000000000 ## double 173.25
.quad 0x40657428f5c28f5c ## double 171.63
.quad 0x4065080000000000 ## double 168.25
.quad 0x4065500000000000 ## double 170.5
.quad 0x4065a00000000000 ## double 173
.quad 0x4065f428f5c28f5c ## double 175.63
.quad 0x4065c00000000000 ## double 174
.quad 0x4065100000000000 ## double 168.5
.quad 0x4064b80000000000 ## double 165.75
.quad 0x4065700000000000 ## double 171.5
.quad 0x4065980000000000 ## double 172.75
.quad 0x4065500000000000 ## double 170.5
.quad 0x4065f00000000000 ## double 175.5
.quad 0x4066580000000000 ## double 178.75
.quad 0x4066b00000000000 ## double 181.5
.quad 0x4066480000000000 ## double 178.25
.quad 0x4065d00000000000 ## double 174.5
.quad 0x4065e80000000000 ## double 175.25
.quad 0x4065a00000000000 ## double 173
.quad 0x40657c28f5c28f5c ## double 171.88
.quad 0x4065d80000000000 ## double 174.75
.quad 0x4066300000000000 ## double 177.5
.quad 0x4067000000000000 ## double 184
.quad 0x4067780000000000 ## double 187.75
.quad 0x4067480000000000 ## double 186.25
.quad 0x4067bc28f5c28f5c ## double 189.88
.quad 0x4067f80000000000 ## double 191.75
.quad 0x4067e00000000000 ## double 191
.quad 0x4067e80000000000 ## double 191.25
.quad 0x4069000000000000 ## double 200
.quad 0x4068800000000000 ## double 196
.quad 0x4067e80000000000 ## double 191.25
.quad 0x4068300000000000 ## double 193.5
.quad 0x4068480000000000 ## double 194.25
.quad 0x4068a00000000000 ## double 197
.quad 0x4067f80000000000 ## double 191.75
.quad 0x4066a80000000000 ## double 181.25
.quad 0x4066f80000000000 ## double 183.75
.quad 0x4066a80000000000 ## double 181.25
.quad 0x4067300000000000 ## double 185.5
.quad 0x4068700000000000 ## double 195.5
.quad 0x4068100000000000 ## double 192.5
.quad 0x4067580000000000 ## double 186.75
.quad 0x4066b00000000000 ## double 181.5
.quad 0x4066a00000000000 ## double 181
.quad 0x4066c00000000000 ## double 182
.quad 0x4067000000000000 ## double 184
.quad 0x40666c28f5c28f5c ## double 179.38
.quad 0x4066700000000000 ## double 179.5
.quad 0x4065800000000000 ## double 172
.quad 0x4064900000000000 ## double 164.5
.quad 0x4064980000000000 ## double 164.75
.quad 0x4063f00000000000 ## double 159.5
.quad 0x4064100000000000 ## double 160.5
.quad 0x4064700000000000 ## double 163.5
.quad 0x4064380000000000 ## double 161.75
.quad 0x4063f00000000000 ## double 159.5
.quad 0x4064d80000000000 ## double 166.75
.quad 0x4064800000000000 ## double 164
.quad 0x4065600000000000 ## double 171
.quad 0x4065000000000000 ## double 168
.quad 0x4065e00000000000 ## double 175
.quad 0x4065b00000000000 ## double 173.5
.quad 0x4065b80000000000 ## double 173.75
.quad 0x4064d00000000000 ## double 166.5
.quad 0x4065100000000000 ## double 168.5
.quad 0x4063f80000000000 ## double 159.75
.quad 0x4063a80000000000 ## double 157.25
.quad 0x4064a00000000000 ## double 165
.quad 0x4065880000000000 ## double 172.25
.quad 0x4065e80000000000 ## double 175.25
.quad 0x4066500000000000 ## double 178.5
.quad 0x4067300000000000 ## double 185.5
.quad 0x4066f00000000000 ## double 183.5
.quad 0x4066800000000000 ## double 180
.quad 0x4068100000000000 ## double 192.5
.quad 0x4068300000000000 ## double 193.5
.quad 0x4068000000000000 ## double 192
.quad 0x4068600000000000 ## double 195
.quad 0x4068480000000000 ## double 194.25
.quad 0x4068900000000000 ## double 196.5
.quad 0x4068680000000000 ## double 195.25
.quad 0x4068900000000000 ## double 196.5
.quad 0x4069000000000000 ## double 200
.quad 0x4069100000000000 ## double 200.5
.quad 0x4068980000000000 ## double 196.75
.quad 0x4069200000000000 ## double 201
.quad 0x4069500000000000 ## double 202.5
.quad 0x4069400000000000 ## double 202
.quad 0x4069780000000000 ## double 203.75
.quad 0x406a380000000000 ## double 209.75
.quad 0x406a580000000000 ## double 210.75
.quad 0x406ae00000000000 ## double 215
.quad 0x406ae00000000000 ## double 215
.quad 0x406a780000000000 ## double 211.75
.quad 0x4069dc28f5c28f5c ## double 206.88
.quad 0x4069700000000000 ## double 203.5
.quad 0x4069780000000000 ## double 203.75
.quad 0x4069f00000000000 ## double 207.5
.quad 0x4069f00000000000 ## double 207.5
.quad 0x4069700000000000 ## double 203.5
.quad 0x406ad00000000000 ## double 214.5
.quad 0x406bd80000000000 ## double 222.75
.quad 0x406bd80000000000 ## double 222.75
.quad 0x406bfc28f5c28f5c ## double 223.88
.quad 0x406b7c28f5c28f5c ## double 219.88
.quad 0x406b800000000000 ## double 220
.quad 0x406bb00000000000 ## double 221.5
.quad 0x406b380000000000 ## double 217.75
.quad 0x406b000000000000 ## double 216
.quad 0x406ba00000000000 ## double 221
.quad 0x406ae00000000000 ## double 215
.quad 0x4069180000000000 ## double 200.75
.quad 0x4069600000000000 ## double 203
.quad 0x4069600000000000 ## double 203
.quad 0x4069480000000000 ## double 202.25
.quad 0x406a2428f5c28f5c ## double 209.13
.quad 0x406aa80000000000 ## double 213.25
.quad 0x406ab00000000000 ## double 213.5
.quad 0x406ab00000000000 ## double 213.5
.quad 0x406ac00000000000 ## double 214
.quad 0x406ae00000000000 ## double 215
.quad 0x406b900000000000 ## double 220.5
.quad 0x406bb00000000000 ## double 221.5
.quad 0x406af80000000000 ## double 215.75
.quad 0x406ab80000000000 ## double 213.75
.quad 0x406b400000000000 ## double 218
.quad 0x406b500000000000 ## double 218.5
.quad 0x406b700000000000 ## double 219.5
.quad 0x406bc00000000000 ## double 222
.quad 0x406c580000000000 ## double 226.75
.quad 0x406c880000000000 ## double 228.25
.quad 0x406c400000000000 ## double 226
.quad 0x406ca00000000000 ## double 229
.quad 0x406cd00000000000 ## double 230.5
.quad 0x406d000000000000 ## double 232
.quad 0x406c780000000000 ## double 227.75
.quad 0x406bd00000000000 ## double 222.5
.quad 0x406bc00000000000 ## double 222
.quad 0x406b600000000000 ## double 219
.quad 0x406ae00000000000 ## double 215
.quad 0x406ad80000000000 ## double 214.75
.quad 0x406b3c28f5c28f5c ## double 217.88
.quad 0x406b700000000000 ## double 219.5
.quad 0x406b580000000000 ## double 218.75
.quad 0x406b180000000000 ## double 216.75
.quad 0x406b880000000000 ## double 220.25
.quad 0x406bb00000000000 ## double 221.5
.quad 0x406b980000000000 ## double 220.75
.quad 0x406b780000000000 ## double 219.75
.quad 0x406bf80000000000 ## double 223.75
.quad 0x406bf00000000000 ## double 223.5
.quad 0x406c100000000000 ## double 224.5
.quad 0x406c000000000000 ## double 224
.quad 0x406cd00000000000 ## double 230.5
.quad 0x406cd80000000000 ## double 230.75
.quad 0x406c900000000000 ## double 228.5
.quad 0x406c900000000000 ## double 228.5
.quad 0x406c100000000000 ## double 224.5
.quad 0x406c980000000000 ## double 228.75
.quad 0x406d000000000000 ## double 232
.quad 0x406cf80000000000 ## double 231.75
.quad 0x406ca00000000000 ## double 229
.quad 0x406c200000000000 ## double 225
.quad 0x406c580000000000 ## double 226.75
.quad 0x406bd00000000000 ## double 222.5
.quad 0x406c780000000000 ## double 227.75
.quad 0x406ccc28f5c28f5c ## double 230.38
.quad 0x406d100000000000 ## double 232.5
.quad 0x406d380000000000 ## double 233.75
.quad 0x406da00000000000 ## double 237
.quad 0x406d700000000000 ## double 235.5
.quad 0x406d500000000000 ## double 234.5
.quad 0x406d300000000000 ## double 233.5
.quad 0x406d000000000000 ## double 232
.quad 0x406d500000000000 ## double 234.5
.quad 0x406d600000000000 ## double 235
.quad 0x406d300000000000 ## double 233.5
.quad 0x406d900000000000 ## double 236.5
.quad 0x406e500000000000 ## double 242.5
.quad 0x406df00000000000 ## double 239.5
.quad 0x406d680000000000 ## double 235.25
.quad 0x406d900000000000 ## double 236.5
.quad 0x406e080000000000 ## double 240.25
.quad 0x406e500000000000 ## double 242.5
.quad 0x406f200000000000 ## double 249
.quad 0x406f580000000000 ## double 250.75
.quad 0x406ef80000000000 ## double 247.75
.quad 0x406e900000000000 ## double 244.5
.quad 0x406ef80000000000 ## double 247.75
.quad 0x406ed80000000000 ## double 246.75
.quad 0x406ef00000000000 ## double 247.5
.quad 0x406f580000000000 ## double 250.75
.quad 0x406f500000000000 ## double 250.5
.quad 0x406f700000000000 ## double 251.5
.quad 0x406fc00000000000 ## double 254
.quad 0x406f700000000000 ## double 251.5
.quad 0x406ed00000000000 ## double 246.5
.quad 0x406e680000000000 ## double 243.25
.quad 0x406e980000000000 ## double 244.75
.quad 0x406e900000000000 ## double 244.5
.quad 0x406e400000000000 ## double 242
.quad 0x406d980000000000 ## double 236.75
.quad 0x406dc80000000000 ## double 238.25
.quad 0x406dc00000000000 ## double 238
.quad 0x406e000000000000 ## double 240
.quad 0x406db80000000000 ## double 237.75
.quad 0x406db00000000000 ## double 237.5
.quad 0x406da00000000000 ## double 237
.quad 0x406dc00000000000 ## double 238
.quad 0x406d700000000000 ## double 235.5
.quad 0x406dc00000000000 ## double 238
.quad 0x406d300000000000 ## double 233.5
.quad 0x406cb00000000000 ## double 229.5
.quad 0x406d000000000000 ## double 232
.quad 0x406cc00000000000 ## double 230
.quad 0x406d300000000000 ## double 233.5
.quad 0x406da80000000000 ## double 237.25
.quad 0x406da80000000000 ## double 237.25
.quad 0x406dd00000000000 ## double 238.5
.quad 0x406df00000000000 ## double 239.5
.quad 0x406e180000000000 ## double 240.75
.quad 0x406e900000000000 ## double 244.5
.quad 0x406f380000000000 ## double 249.75
.quad 0x406f100000000000 ## double 248.5
.quad 0x406e880000000000 ## double 244.25
.quad 0x406f000000000000 ## double 248
.quad 0x406f180000000000 ## double 248.75
.quad 0x406f080000000000 ## double 248.25
.quad 0x406f400000000000 ## double 250
.quad 0x406f380000000000 ## double 249.75
.quad 0x406ea00000000000 ## double 245
.quad 0x406f600000000000 ## double 251
.quad 0x406f700000000000 ## double 251.5
.quad 0x406e900000000000 ## double 244.5
.quad 0x406e480000000000 ## double 242.25
.quad 0x406eb80000000000 ## double 245.75
.quad 0x406e000000000000 ## double 240
.quad 0x406d600000000000 ## double 235
.quad 0x406c880000000000 ## double 228.25
.quad 0x406cd80000000000 ## double 230.75
.quad 0x406cd80000000000 ## double 230.75
.quad 0x406dc00000000000 ## double 238
.quad 0x406eb80000000000 ## double 245.75
.quad 0x406ee00000000000 ## double 247
.quad 0x406f300000000000 ## double 249.5
.quad 0x406fe00000000000 ## double 255
.quad 0x4070040000000000 ## double 256.25
.quad 0x406f100000000000 ## double 248.5
.quad 0x406de00000000000 ## double 239
.quad 0x406e800000000000 ## double 244
.quad 0x406f600000000000 ## double 251
.quad 0x406fc00000000000 ## double 254
.quad 0x406f300000000000 ## double 249.5
.quad 0x406e900000000000 ## double 244.5
.quad 0x406f700000000000 ## double 251.5
.quad 0x406fe00000000000 ## double 255
.quad 0x406fa00000000000 ## double 253
.quad 0x4070780000000000 ## double 263.5
.quad 0x4070a00000000000 ## double 266
.quad 0x4070b00000000000 ## double 267
.quad 0x40704c0000000000 ## double 260.75
.quad 0x40701c0000000000 ## double 257.75
.quad 0x4070480000000000 ## double 260.5
.quad 0x40708c0000000000 ## double 264.75
.quad 0x4070c00000000000 ## double 268
.quad 0x4070f00000000000 ## double 271
.quad 0x4070ec0000000000 ## double 270.75
.quad 0x4070c40000000000 ## double 268.25
.quad 0x4070d80000000000 ## double 269.5
.quad 0x4071440000000000 ## double 276.25
.quad 0x40715c0000000000 ## double 277.75
.quad 0x40712c0000000000 ## double 274.75
.quad 0x4071ca147ae147ae ## double 284.63
.quad 0x4072040000000000 ## double 288.25
.quad 0x4071e00000000000 ## double 286
.quad 0x4072200000000000 ## double 290
.quad 0x4072200000000000 ## double 290
.quad 0x4071cc0000000000 ## double 284.75
.quad 0x4071a00000000000 ## double 282
.quad 0x4071dc0000000000 ## double 285.75
.quad 0x4071780000000000 ## double 279.5
.quad 0x40718c0000000000 ## double 280.75
.quad 0x4071700000000000 ## double 279
.quad 0x4071d00000000000 ## double 285
.quad 0x4071cc0000000000 ## double 284.75
.quad 0x40719c0000000000 ## double 281.75
.quad 0x4071cc0000000000 ## double 284.75
.quad 0x40720c0000000000 ## double 288.75
.quad 0x40723c0000000000 ## double 291.75
.quad 0x4072380000000000 ## double 291.5
.quad 0x40720c0000000000 ## double 288.75
.quad 0x40721c0000000000 ## double 289.75
.quad 0x4072040000000000 ## double 288.25
.quad 0x4072700000000000 ## double 295
.quad 0x4072480000000000 ## double 292.5
.quad 0x4071d00000000000 ## double 285
.quad 0x4071a00000000000 ## double 282
.quad 0x4071840000000000 ## double 280.25
.quad 0x4071c80000000000 ## double 284.5
.quad 0x4071bc0000000000 ## double 283.75
.quad 0x4071c80000000000 ## double 284.5
.quad 0x40705c0000000000 ## double 261.75
.quad 0x4070780000000000 ## double 263.5
.quad 0x4070600000000000 ## double 262
.quad 0x406f700000000000 ## double 251.5
.quad 0x406fc00000000000 ## double 254
.quad 0x4070180000000000 ## double 257.5
.quad 0x40701c0000000000 ## double 257.75
.quad 0x4070500000000000 ## double 261
.quad 0x4070880000000000 ## double 264.5
.quad 0x4070b80000000000 ## double 267.5
.quad 0x4070500000000000 ## double 261
.quad 0x40704c0000000000 ## double 260.75
.quad 0x4070880000000000 ## double 264.5
.quad 0x4070fc0000000000 ## double 271.75
.quad 0x4070ec0000000000 ## double 270.75
.quad 0x4070500000000000 ## double 261
.quad 0x4071d2147ae147ae ## double 285.13
.quad 0x4072a00000000000 ## double 298
.quad 0x4072600000000000 ## double 294
.quad 0x4072480000000000 ## double 292.5
.quad 0x4072380000000000 ## double 291.5
.quad 0x4072800000000000 ## double 296
.quad 0x4072a40000000000 ## double 298.25
.quad 0x4072ac0000000000 ## double 298.75
.quad 0x4072a00000000000 ## double 298
.quad 0x4072c80000000000 ## double 300.5
.quad 0x4072f00000000000 ## double 303
.quad 0x4073040000000000 ## double 304.25
.quad 0x4073100000000000 ## double 305
.quad 0x4073280000000000 ## double 306.5
.quad 0x4073080000000000 ## double 304.5
.quad 0x4072bc0000000000 ## double 299.75
.quad 0x4072c40000000000 ## double 300.25
.quad 0x4072c00000000000 ## double 300
.quad 0x4072cc0000000000 ## double 300.75
.quad 0x4072d40000000000 ## double 301.25
.quad 0x4072e00000000000 ## double 302
.quad 0x40730c0000000000 ## double 304.75
.quad 0x4072f80000000000 ## double 303.5
.quad 0x4072f80000000000 ## double 303.5
.quad 0x4072e00000000000 ## double 302
.quad 0x4073180000000000 ## double 305.5
.quad 0x40732c0000000000 ## double 306.75
.quad 0x4073200000000000 ## double 306
.quad 0x40736c0000000000 ## double 310.75
.quad 0x40738c0000000000 ## double 312.75
.quad 0x4073800000000000 ## double 312
.quad 0x4073a00000000000 ## double 314
.quad 0x4073ac0000000000 ## double 314.75
.quad 0x4074380000000000 ## double 323.5
.quad 0x40740c0000000000 ## double 320.75
.quad 0x40740c0000000000 ## double 320.75
.quad 0x4074240000000000 ## double 322.25
.quad 0x4074280000000000 ## double 322.5
.quad 0x4073980000000000 ## double 313.5
.quad 0x4073c80000000000 ## double 316.5
.quad 0x4073e80000000000 ## double 318.5
.quad 0x4073f40000000000 ## double 319.25
.quad 0x4073c00000000000 ## double 316
.quad 0x4073f80000000000 ## double 319.5
.quad 0x4073f00000000000 ## double 319
.quad 0x4073c80000000000 ## double 316.5
.quad 0x4073f00000000000 ## double 319
.quad 0x4074000000000000 ## double 320
.quad 0x4073a00000000000 ## double 314
.quad 0x4073180000000000 ## double 305.5
.quad 0x4072e00000000000 ## double 302
.quad 0x4072d00000000000 ## double 301
.quad 0x4073000000000000 ## double 304
.quad 0x4073a00000000000 ## double 314
.quad 0x4073f80000000000 ## double 319.5
.quad 0x4073fc0000000000 ## double 319.75
.quad 0x4073a80000000000 ## double 314.5
.quad 0x4073b80000000000 ## double 315.5
.quad 0x4073a2147ae147ae ## double 314.13
.quad 0x4074000000000000 ## double 320
.quad 0x4073f00000000000 ## double 319
.quad 0x4074240000000000 ## double 322.25
.quad 0x4073e00000000000 ## double 318
.quad 0x4073d00000000000 ## double 317
.quad 0x4073fc0000000000 ## double 319.75
.quad 0x4074880000000000 ## double 328.5
.quad 0x4074d80000000000 ## double 333.5
.quad 0x4074a00000000000 ## double 330
.quad 0x4074700000000000 ## double 327
.quad 0x4074540000000000 ## double 325.25
.quad 0x4074c80000000000 ## double 332.5
.quad 0x4073fc0000000000 ## double 319.75
.quad 0x4073d00000000000 ## double 317
.quad 0x40736c0000000000 ## double 310.75
.quad 0x4073a00000000000 ## double 314
.quad 0x4073fc0000000000 ## double 319.75
.quad 0x4074400000000000 ## double 324
.quad 0x4074880000000000 ## double 328.5
.quad 0x4079800000000000 ## double 408
.quad 0x4079080000000000 ## double 400.5
.quad 0x4078c80000000000 ## double 396.5
.quad 0x40789c0000000000 ## double 393.75
.quad 0x40782c0000000000 ## double 386.75
.quad 0x4078b00000000000 ## double 395
.quad 0x4079000000000000 ## double 400
.quad 0x4079200000000000 ## double 402
.quad 0x4079200000000000 ## double 402
.quad 0x4079380000000000 ## double 403.5
.quad 0x4079980000000000 ## double 409.5
.quad 0x4079f80000000000 ## double 415.5
.quad 0x407a700000000000 ## double 423
.quad 0x407a600000000000 ## double 422
.quad 0x407a300000000000 ## double 419
.quad 0x407a940000000000 ## double 425.25
.quad 0x407a800000000000 ## double 424
.quad 0x407a1c0000000000 ## double 417.75
.quad 0x407a1c0000000000 ## double 417.75
.quad 0x4079900000000000 ## double 409
.quad 0x4079b00000000000 ## double 411
.quad 0x4079b80000000000 ## double 411.5
.quad 0x40799c0000000000 ## double 409.75
.quad 0x407a780000000000 ## double 423.5
.quad 0x407ac40000000000 ## double 428.25
.quad 0x407b080000000000 ## double 432.5
.quad 0x407b000000000000 ## double 432
.quad 0x407a9c0000000000 ## double 425.75
.quad 0x407adc0000000000 ## double 429.75
.quad 0x407aec0000000000 ## double 430.75
.quad 0x407b040000000000 ## double 432.25
.quad 0x407b440000000000 ## double 436.25
.quad 0x407b300000000000 ## double 435
.quad 0x407ad80000000000 ## double 429.5
.quad 0x407ac00000000000 ## double 428
.quad 0x407a800000000000 ## double 424
.quad 0x407aa80000000000 ## double 426.5
.quad 0x407acc0000000000 ## double 428.75
.quad 0x407b0c0000000000 ## double 432.75
.quad 0x407b580000000000 ## double 437.5
.quad 0x407b700000000000 ## double 439
.quad 0x407b5c0000000000 ## double 437.75
.quad 0x407b680000000000 ## double 438.5
.quad 0x407b300000000000 ## double 435
.quad 0x407ad00000000000 ## double 429
.quad 0x407ace147ae147ae ## double 428.88
.quad 0x407b940000000000 ## double 441.25
.quad 0x407b800000000000 ## double 440
.quad 0x407bae147ae147ae ## double 442.88
.quad 0x407c080000000000 ## double 448.5
.quad 0x407c1c0000000000 ## double 449.75
.quad 0x407c180000000000 ## double 449.5
.quad 0x407bec0000000000 ## double 446.75
.quad 0x407bc00000000000 ## double 444
.quad 0x407ba80000000000 ## double 442.5
.quad 0x407bb40000000000 ## double 443.25
.quad 0x407bd80000000000 ## double 445.5
.quad 0x407bdc0000000000 ## double 445.75
.quad 0x407ba2147ae147ae ## double 442.13
.quad 0x407bc00000000000 ## double 444
.quad 0x407b300000000000 ## double 435
.quad 0x407af80000000000 ## double 431.5
.quad 0x407b000000000000 ## double 432
.quad 0x407b180000000000 ## double 433.5
.quad 0x407ba80000000000 ## double 442.5
.quad 0x407bd00000000000 ## double 445
.quad 0x407bf80000000000 ## double 447.5
.quad 0x407bf00000000000 ## double 447
.quad 0x407bac0000000000 ## double 442.75
.quad 0x407bf40000000000 ## double 447.25
.quad 0x407c2c0000000000 ## double 450.75
.quad 0x407c880000000000 ## double 456.5
.quad 0x407c200000000000 ## double 450
.quad 0x407be00000000000 ## double 446
.quad 0x407b700000000000 ## double 439
.quad 0x407ba00000000000 ## double 442
.quad 0x407b800000000000 ## double 440
.quad 0x407b000000000000 ## double 432
.quad 0x407b000000000000 ## double 432
.quad 0x407b600000000000 ## double 438
.quad 0x407b5c0000000000 ## double 437.75
.quad 0x407bd00000000000 ## double 445
.quad 0x407ba00000000000 ## double 442
.quad 0x407b600000000000 ## double 438
.quad 0x407c080000000000 ## double 448.5
.quad 0x407bcc0000000000 ## double 444.75
.quad 0x407b700000000000 ## double 439
.quad 0x407b700000000000 ## double 439
.quad 0x407ae00000000000 ## double 430
.quad 0x407a840000000000 ## double 424.25
.quad 0x407a8c0000000000 ## double 424.75
.quad 0x407ad00000000000 ## double 429
.quad 0x407a900000000000 ## double 425
.quad 0x407a700000000000 ## double 423
.quad 0x4079f80000000000 ## double 415.5
.quad 0x407a340000000000 ## double 419.25
.quad 0x407a400000000000 ## double 420
.quad 0x407a380000000000 ## double 419.5
.quad 0x4079c80000000000 ## double 412.5
.quad 0x4079dc0000000000 ## double 413.75
.quad 0x4079980000000000 ## double 409.5
.quad 0x4079300000000000 ## double 403
.quad 0x4078f00000000000 ## double 399
.quad 0x4078b80000000000 ## double 395.5
.quad 0x4078680000000000 ## double 390.5
.quad 0x40785c0000000000 ## double 389.75
.quad 0x40788c0000000000 ## double 392.75
.quad 0x40789c0000000000 ## double 393.75
.quad 0x4078d80000000000 ## double 397.5
.quad 0x4079100000000000 ## double 401
.quad 0x4078d00000000000 ## double 397
.quad 0x4078f00000000000 ## double 399
.quad 0x40793c0000000000 ## double 403.75
.quad 0x4079500000000000 ## double 405
.quad 0x4079480000000000 ## double 404.5
.quad 0x4078d80000000000 ## double 397.5
.quad 0x4078f00000000000 ## double 399
.quad 0x4079000000000000 ## double 400
.quad 0x40790c0000000000 ## double 400.75
.quad 0x4079100000000000 ## double 401
.quad 0x4078740000000000 ## double 391.25
.quad 0x4078300000000000 ## double 387
.quad 0x4078240000000000 ## double 386.25
.quad 0x4078340000000000 ## double 387.25
.quad 0x4078a40000000000 ## double 394.25
.quad 0x4078a80000000000 ## double 394.5
.quad 0x4078400000000000 ## double 388
.quad 0x4078180000000000 ## double 385.5
.quad 0x4078140000000000 ## double 385.25
.quad 0x4078180000000000 ## double 385.5
.quad 0x4078600000000000 ## double 390
.quad 0x4078400000000000 ## double 388
.quad 0x4077900000000000 ## double 377
.quad 0x40776c0000000000 ## double 374.75
.quad 0x4077840000000000 ## double 376.25
.quad 0x4077f00000000000 ## double 383
.quad 0x4078480000000000 ## double 388.5
.quad 0x40786e147ae147ae ## double 390.88
.quad 0x4078700000000000 ## double 391
.quad 0x40785c0000000000 ## double 389.75
.quad 0x4078180000000000 ## double 385.5
.quad 0x4077d00000000000 ## double 381
.quad 0x4077cc0000000000 ## double 380.75
.quad 0x4077f80000000000 ## double 383.5
.quad 0x40784c0000000000 ## double 388.75
.quad 0x4078680000000000 ## double 390.5
.quad 0x4078880000000000 ## double 392.5
.quad 0x4078300000000000 ## double 387
.quad 0x4077b40000000000 ## double 379.25
.quad 0x4077bc0000000000 ## double 379.75
.quad 0x4077400000000000 ## double 372
.quad 0x4077c00000000000 ## double 380
.quad 0x4077f80000000000 ## double 383.5
.quad 0x4078480000000000 ## double 388.5
.quad 0x4078a40000000000 ## double 394.25
.quad 0x4079300000000000 ## double 403
.quad 0x407936147ae147ae ## double 403.38
.quad 0x40793c0000000000 ## double 403.75
.quad 0x4079380000000000 ## double 403.5
.quad 0x40797e147ae147ae ## double 407.88
.quad 0x4079780000000000 ## double 407.5
.quad 0x40796c0000000000 ## double 406.75
.quad 0x40796c0000000000 ## double 406.75
.quad 0x40795c0000000000 ## double 405.75
.quad 0x4079700000000000 ## double 407
.quad 0x4079180000000000 ## double 401.5
.quad 0x4078f00000000000 ## double 399
.quad 0x4078fc0000000000 ## double 399.75
.quad 0x4079100000000000 ## double 401
.quad 0x40790e147ae147ae ## double 400.88
.quad 0x4078f40000000000 ## double 399.25
.quad 0x4078d00000000000 ## double 397
.quad 0x4079040000000000 ## double 400.25
.quad 0x4079080000000000 ## double 400.5
.quad 0x4078e00000000000 ## double 398
.quad 0x4078e80000000000 ## double 398.5
.quad 0x4078dc0000000000 ## double 397.75
.quad 0x4079080000000000 ## double 400.5
.quad 0x4079300000000000 ## double 403
.quad 0x4079580000000000 ## double 405.5
.quad 0x40796c0000000000 ## double 406.75
.quad 0x40799c0000000000 ## double 409.75
.quad 0x4079940000000000 ## double 409.25
.quad 0x4079980000000000 ## double 409.5
.quad 0x4079900000000000 ## double 409
.quad 0x4079780000000000 ## double 407.5
.quad 0x4079800000000000 ## double 408
.quad 0x4079d80000000000 ## double 413.5
.quad 0x4079e00000000000 ## double 414
.quad 0x4079e80000000000 ## double 414.5
.quad 0x4079dc0000000000 ## double 413.75
.quad 0x407a080000000000 ## double 416.5
.quad 0x407a140000000000 ## double 417.25
.quad 0x407a580000000000 ## double 421.5
.quad 0x407a6c0000000000 ## double 422.75
.quad 0x407aac0000000000 ## double 426.75
.quad 0x407a980000000000 ## double 425.5
.quad 0x407a900000000000 ## double 425
.quad 0x407a780000000000 ## double 423.5
.quad 0x407a300000000000 ## double 419
.quad 0x407a440000000000 ## double 420.25
.quad 0x407a100000000000 ## double 417
.quad 0x407a080000000000 ## double 416.5
.quad 0x4079b80000000000 ## double 411.5
.quad 0x4079a00000000000 ## double 410
.quad 0x40790c0000000000 ## double 400.75
.quad 0x4078fc0000000000 ## double 399.75
.quad 0x4078e00000000000 ## double 398
.quad 0x4078e2147ae147ae ## double 398.13
.quad 0x4079080000000000 ## double 400.5
.quad 0x4079100000000000 ## double 401
.quad 0x4078c80000000000 ## double 396.5
.quad 0x4078780000000000 ## double 391.5
.quad 0x4078a00000000000 ## double 394
.quad 0x40784c0000000000 ## double 388.75
.quad 0x4078bc0000000000 ## double 395.75
.quad 0x4079100000000000 ## double 401
.quad 0x4078d00000000000 ## double 397
.quad 0x4078ec0000000000 ## double 398.75
.quad 0x4078f80000000000 ## double 399.5
.quad 0x4079400000000000 ## double 404
.quad 0x4079580000000000 ## double 405.5
.quad 0x407952147ae147ae ## double 405.13
.quad 0x40791c0000000000 ## double 401.75
.quad 0x4078b40000000000 ## double 395.25
.quad 0x4078a40000000000 ## double 394.25
.quad 0x4078800000000000 ## double 392
.quad 0x4078a40000000000 ## double 394.25
.quad 0x4078b00000000000 ## double 395
.quad 0x4078980000000000 ## double 393.5
.quad 0x4078d40000000000 ## double 397.25
.quad 0x4078c00000000000 ## double 396
.quad 0x40790c0000000000 ## double 400.75
.quad 0x4078e00000000000 ## double 398
.quad 0x4078ec0000000000 ## double 398.75
.quad 0x4078f80000000000 ## double 399.5
.quad 0x40790c0000000000 ## double 400.75
.quad 0x4078fc0000000000 ## double 399.75
.quad 0x4078900000000000 ## double 393
.quad 0x4078940000000000 ## double 393.25
.quad 0x4078aa147ae147ae ## double 394.63
.quad 0x4078d00000000000 ## double 397
.quad 0x4078b80000000000 ## double 395.5
.quad 0x4078c80000000000 ## double 396.5
.quad 0x4078d80000000000 ## double 397.5
.quad 0x4079100000000000 ## double 401
.quad 0x4079100000000000 ## double 401
.quad 0x4078f00000000000 ## double 399
.quad 0x4079340000000000 ## double 403.25
.quad 0x4079600000000000 ## double 406
.quad 0x4079400000000000 ## double 404
.quad 0x40794c0000000000 ## double 404.75
.quad 0x4079280000000000 ## double 402.5
.quad 0x4079180000000000 ## double 401.5
.quad 0x4078e00000000000 ## double 398
.quad 0x4078b00000000000 ## double 395
.quad 0x4078580000000000 ## double 389.5
.quad 0x4078300000000000 ## double 387
.quad 0x40782c0000000000 ## double 386.75
.quad 0x4078280000000000 ## double 386.5
.quad 0x4078280000000000 ## double 386.5
.quad 0x4078200000000000 ## double 386
.quad 0x40781c0000000000 ## double 385.75
.quad 0x4078300000000000 ## double 387
.quad 0x40785c0000000000 ## double 389.75
.quad 0x4078100000000000 ## double 385
.quad 0x4077fc0000000000 ## double 383.75
.quad 0x4078200000000000 ## double 386
.quad 0x4078240000000000 ## double 386.25
.quad 0x4078300000000000 ## double 387
.quad 0x4078080000000000 ## double 384.5
.quad 0x4078100000000000 ## double 385
.quad 0x4077f80000000000 ## double 383.5
.quad 0x4078180000000000 ## double 385.5
.quad 0x4078540000000000 ## double 389.25
.quad 0x40786c0000000000 ## double 390.75
.quad 0x4078cc0000000000 ## double 396.75
.quad 0x4078f80000000000 ## double 399.5
.quad 0x4078b00000000000 ## double 395
.quad 0x4078e00000000000 ## double 398
.quad 0x4078e00000000000 ## double 398
.quad 0x4078fc0000000000 ## double 399.75
.quad 0x4078980000000000 ## double 393.5
.quad 0x40789c0000000000 ## double 393.75
.quad 0x4078700000000000 ## double 391
.quad 0x4078980000000000 ## double 393.5
.quad 0x40789c0000000000 ## double 393.75
.quad 0x4078580000000000 ## double 389.5
.quad 0x40783c0000000000 ## double 387.75
.quad 0x4077f80000000000 ## double 383.5
.quad 0x4077fa147ae147ae ## double 383.63
.quad 0x4078200000000000 ## double 386
.quad 0x4078100000000000 ## double 385
.quad 0x4078000000000000 ## double 384
.quad 0x4077e80000000000 ## double 382.5
.quad 0x4077cc0000000000 ## double 380.75
.quad 0x4077b00000000000 ## double 379
.quad 0x4077d00000000000 ## double 381
.quad 0x4077900000000000 ## double 377
.quad 0x4077600000000000 ## double 374
.quad 0x4077680000000000 ## double 374.5
.quad 0x4077600000000000 ## double 374
.quad 0x40779c0000000000 ## double 377.75
.quad 0x4077e00000000000 ## double 382
.quad 0x4078100000000000 ## double 385
.quad 0x4078080000000000 ## double 384.5
.quad 0x4077fc0000000000 ## double 383.75
.quad 0x4077dc0000000000 ## double 381.75
.quad 0x4077ac0000000000 ## double 378.75
.quad 0x4077c80000000000 ## double 380.5
.quad 0x40775c0000000000 ## double 373.75
.quad 0x40772c0000000000 ## double 370.75
.quad 0x40773c0000000000 ## double 371.75
.quad 0x4077500000000000 ## double 373
.quad 0x4077300000000000 ## double 371
.quad 0x40773c0000000000 ## double 371.75
.quad 0x4077580000000000 ## double 373.5
.quad 0x40770c0000000000 ## double 368.75
.quad 0x4077400000000000 ## double 372
.quad 0x4077200000000000 ## double 370
.quad 0x4077240000000000 ## double 370.25
.quad 0x40774c0000000000 ## double 372.75
.quad 0x40776c0000000000 ## double 374.75
.quad 0x40776c0000000000 ## double 374.75
.quad 0x4077700000000000 ## double 375
.quad 0x4077440000000000 ## double 372.25
.quad 0x4077300000000000 ## double 371
.quad 0x4077440000000000 ## double 372.25
.quad 0x4077700000000000 ## double 375
.quad 0x40776e147ae147ae ## double 374.88
.quad 0x4077100000000000 ## double 369
.quad 0x40771e147ae147ae ## double 369.88
.quad 0x4077400000000000 ## double 372
.quad 0x4077140000000000 ## double 369.25
.quad 0x4077200000000000 ## double 370
.quad 0x4077200000000000 ## double 370
.quad 0x4076e00000000000 ## double 366
.quad 0x4077200000000000 ## double 370
.quad 0x4076d00000000000 ## double 365
.quad 0x4076340000000000 ## double 355.25
.quad 0x4076180000000000 ## double 353.5
.quad 0x4075ec0000000000 ## double 350.75
.quad 0x4075e00000000000 ## double 350
.quad 0x4075540000000000 ## double 341.25
.quad 0x4075b80000000000 ## double 347.5
.quad 0x4075540000000000 ## double 341.25
.quad 0x40753c0000000000 ## double 339.75
.quad 0x40756c0000000000 ## double 342.75
.quad 0x4075580000000000 ## double 341.5
.quad 0x4075780000000000 ## double 343.5
.quad 0x4075400000000000 ## double 340
.quad 0x4075280000000000 ## double 338.5
.quad 0x4075200000000000 ## double 338
.quad 0x4075140000000000 ## double 337.25
.quad 0x4075400000000000 ## double 340
.quad 0x40753c0000000000 ## double 339.75
.quad 0x4075080000000000 ## double 336.5
.quad 0x40753c0000000000 ## double 339.75
.quad 0x4075900000000000 ## double 345
.quad 0x4075b00000000000 ## double 347
.quad 0x4075c80000000000 ## double 348.5
.quad 0x40753c0000000000 ## double 339.75
.quad 0x40753c0000000000 ## double 339.75
.quad 0x4074800000000000 ## double 328
.quad 0x4074540000000000 ## double 325.25
.quad 0x4074800000000000 ## double 328
.quad 0x40744c0000000000 ## double 324.75
.quad 0x4073e80000000000 ## double 318.5
.quad 0x4073d40000000000 ## double 317.25
.quad 0x4073d40000000000 ## double 317.25
.quad 0x40740c0000000000 ## double 320.75
.quad 0x4074180000000000 ## double 321.5
.quad 0x4073800000000000 ## double 312
.quad 0x4073600000000000 ## double 310
.quad 0x4073180000000000 ## double 305.5
.quad 0x4072e80000000000 ## double 302.5
.quad 0x4072ac0000000000 ## double 298.75
.quad 0x4072600000000000 ## double 294
.quad 0x4072540000000000 ## double 293.25
.quad 0x4072740000000000 ## double 295.25
.quad 0x40727c0000000000 ## double 295.75
.quad 0x4072d00000000000 ## double 301
.quad 0x4072dc0000000000 ## double 301.75
.quad 0x4072e00000000000 ## double 302
.quad 0x4072700000000000 ## double 295
.quad 0x4072880000000000 ## double 296.5
.quad 0x4072800000000000 ## double 296
.quad 0x4072bc0000000000 ## double 299.75
.quad 0x4072e00000000000 ## double 302
.quad 0x4072c00000000000 ## double 300
.quad 0x4072c00000000000 ## double 300
.quad 0x4073280000000000 ## double 306.5
.quad 0x4072f80000000000 ## double 303.5
.quad 0x40727c0000000000 ## double 295.75
.quad 0x4072c00000000000 ## double 300
.quad 0x4072d00000000000 ## double 301
.quad 0x4072d00000000000 ## double 301
.quad 0x4072c80000000000 ## double 300.5
.quad 0x40730c0000000000 ## double 304.75
.quad 0x4072f40000000000 ## double 303.25
.quad 0x4073400000000000 ## double 308
.quad 0x40731c0000000000 ## double 305.75
.quad 0x4073480000000000 ## double 308.5
.quad 0x4073540000000000 ## double 309.25
.quad 0x4073540000000000 ## double 309.25
.quad 0x4073540000000000 ## double 309.25
.quad 0x4073580000000000 ## double 309.5
.quad 0x40739a147ae147ae ## double 313.63
.quad 0x4073b40000000000 ## double 315.25
.quad 0x4073200000000000 ## double 306
.quad 0x4073540000000000 ## double 309.25
.quad 0x4073880000000000 ## double 312.5
.quad 0x4073500000000000 ## double 309
.quad 0x40733c0000000000 ## double 307.75
.quad 0x4073400000000000 ## double 308
.quad 0x4073200000000000 ## double 306
.quad 0x4073230a3d70a3d7 ## double 306.19
.quad 0x4073040000000000 ## double 304.25
.quad 0x4073280000000000 ## double 306.5
.quad 0x4073080000000000 ## double 304.5
.quad 0x4073580000000000 ## double 309.5
.quad 0x40734c0000000000 ## double 308.75
.quad 0x40734c0000000000 ## double 308.75
.quad 0x4073280000000000 ## double 306.5
.quad 0x4073080000000000 ## double 304.5
.quad 0x4072ec0000000000 ## double 302.75
.quad 0x4072e80000000000 ## double 302.5
.quad 0x4072fc0000000000 ## double 303.75
.quad 0x4072c40000000000 ## double 300.25
.quad 0x4073000000000000 ## double 304
.quad 0x40731c0000000000 ## double 305.75
.quad 0x40736c0000000000 ## double 310.75
.quad 0x40737c0000000000 ## double 311.75
.quad 0x4073900000000000 ## double 313
.quad 0x4073400000000000 ## double 308
.quad 0x4072dc0000000000 ## double 301.75
.quad 0x40731c0000000000 ## double 305.75
.quad 0x4073380000000000 ## double 307.5
.quad 0x4073cc0000000000 ## double 316.75
.quad 0x4073d80000000000 ## double 317.5
.quad 0x4073b00000000000 ## double 315
.quad 0x4073e6147ae147ae ## double 318.38
.quad 0x4073ee147ae147ae ## double 318.88
.quad 0x4073ac0000000000 ## double 314.75
.quad 0x4073600000000000 ## double 310
.quad 0x4073600000000000 ## double 310
.quad 0x4073b00000000000 ## double 315
.quad 0x4073e00000000000 ## double 318
.quad 0x4074000000000000 ## double 320
.quad 0x4072880000000000 ## double 296.5
.quad 0x4072980000000000 ## double 297.5
.quad 0x4072380000000000 ## double 291.5
.quad 0x4071f00000000000 ## double 287
.quad 0x4071dc0000000000 ## double 285.75
.quad 0x4071fc0000000000 ## double 287.75
.quad 0x40720c0000000000 ## double 288.75
.quad 0x40724c0000000000 ## double 292.75
.quad 0x40726c0000000000 ## double 294.75
.quad 0x4072800000000000 ## double 296
.quad 0x40729c0000000000 ## double 297.75
.quad 0x4072be147ae147ae ## double 299.88
.quad 0x4072d00000000000 ## double 301
.quad 0x4072800000000000 ## double 296
.quad 0x4072b80000000000 ## double 299.5
.quad 0x4072b80000000000 ## double 299.5
.quad 0x4072dc0000000000 ## double 301.75
.quad 0x4072dc0000000000 ## double 301.75
.quad 0x4072de147ae147ae ## double 301.88
.quad 0x40728c0000000000 ## double 296.75
.quad 0x4072d80000000000 ## double 301.5
.quad 0x4072e00000000000 ## double 302
.quad 0x4072d00000000000 ## double 301
.quad 0x4073a80000000000 ## double 314.5
.quad 0x4073c00000000000 ## double 316
.quad 0x4073c80000000000 ## double 316.5
.quad 0x4073980000000000 ## double 313.5
.quad 0x4073d40000000000 ## double 317.25
.quad 0x4073d80000000000 ## double 317.5
.quad 0x4073f40000000000 ## double 319.25
.quad 0x4073e80000000000 ## double 318.5
.quad 0x4073d80000000000 ## double 317.5
.quad 0x4073a40000000000 ## double 314.25
.quad 0x4073880000000000 ## double 312.5
.quad 0x4073800000000000 ## double 312
.quad 0x4073bc0000000000 ## double 315.75
.quad 0x4073c40000000000 ## double 316.25
.quad 0x4073d80000000000 ## double 317.5
.quad 0x4073cc0000000000 ## double 316.75
.quad 0x407402147ae147ae ## double 320.13
.quad 0x40743c0000000000 ## double 323.75
.quad 0x4074500000000000 ## double 325
.quad 0x4074280000000000 ## double 322.5
.quad 0x4074380000000000 ## double 323.5
.quad 0x40745c0000000000 ## double 325.75
.quad 0x4074200000000000 ## double 322
.quad 0x4073e80000000000 ## double 318.5
.quad 0x4073f80000000000 ## double 319.5
.quad 0x40743c0000000000 ## double 323.75
.quad 0x4074900000000000 ## double 329
.quad 0x40747c0000000000 ## double 327.75
.quad 0x4074900000000000 ## double 329
.quad 0x4074b80000000000 ## double 331.5
.quad 0x4074d00000000000 ## double 333
.quad 0x4075200000000000 ## double 338
.quad 0x40753c0000000000 ## double 339.75
.quad 0x4075400000000000 ## double 340
.quad 0x4075700000000000 ## double 343
.quad 0x4075b80000000000 ## double 347.5
.quad 0x4075dc0000000000 ## double 349.75
.quad 0x407562147ae147ae ## double 342.13
.quad 0x4075380000000000 ## double 339.5
.quad 0x4075880000000000 ## double 344.5
.quad 0x4075300000000000 ## double 339
.quad 0x40757c0000000000 ## double 343.75
.quad 0x4075880000000000 ## double 344.5
.quad 0x4075940000000000 ## double 345.25
.quad 0x4075b00000000000 ## double 347
.quad 0x4075d80000000000 ## double 349.5
.quad 0x40762c0000000000 ## double 354.75
.quad 0x4076100000000000 ## double 353
.quad 0x4076380000000000 ## double 355.5
.quad 0x4076300000000000 ## double 355
.quad 0x4076940000000000 ## double 361.25
.quad 0x4076a00000000000 ## double 362
.quad 0x4076940000000000 ## double 361.25
.quad 0x4076680000000000 ## double 358.5
.quad 0x4076600000000000 ## double 358
.quad 0x40764c0000000000 ## double 356.75
.quad 0x4076080000000000 ## double 352.5
.quad 0x40763c0000000000 ## double 355.75
.quad 0x4076400000000000 ## double 356
.quad 0x4076340000000000 ## double 355.25
.quad 0x40761c0000000000 ## double 353.75
.quad 0x40766c0000000000 ## double 358.75
.quad 0x4076740000000000 ## double 359.25
.quad 0x4076940000000000 ## double 361.25
.quad 0x4076880000000000 ## double 360.5
.quad 0x4076a00000000000 ## double 362
.quad 0x4076c00000000000 ## double 364
.quad 0x4076ac0000000000 ## double 362.75
.quad 0x40768c0000000000 ## double 360.75
.quad 0x4076700000000000 ## double 359
.quad 0x4076600000000000 ## double 358
.quad 0x40767c0000000000 ## double 359.75
.quad 0x4076640000000000 ## double 358.25
.quad 0x4076800000000000 ## double 360
.quad 0x4076700000000000 ## double 359
.quad 0x4076440000000000 ## double 356.25
.quad 0x4076500000000000 ## double 357
.quad 0x4076780000000000 ## double 359.5
.quad 0x40769c0000000000 ## double 361.75
.quad 0x4076a40000000000 ## double 362.25
.quad 0x4076dc0000000000 ## double 365.75
.quad 0x4076c00000000000 ## double 364
.quad 0x4076b40000000000 ## double 363.25
.quad 0x4076cc0000000000 ## double 364.75
.quad 0x4076200000000000 ## double 354
.quad 0x4075d80000000000 ## double 349.5
.quad 0x4075e40000000000 ## double 350.25
.quad 0x4075ec0000000000 ## double 350.75
.quad 0x4075c80000000000 ## double 348.5
.quad 0x4075980000000000 ## double 345.5
.quad 0x40756c0000000000 ## double 342.75
.quad 0x4075240000000000 ## double 338.25
.quad 0x4075280000000000 ## double 338.5
.quad 0x4075340000000000 ## double 339.25
.quad 0x4075080000000000 ## double 336.5
.quad 0x40752c0000000000 ## double 338.75
.quad 0x4075380000000000 ## double 339.5
.quad 0x4074f80000000000 ## double 335.5
.quad 0x4074b00000000000 ## double 331
.quad 0x4074bc0000000000 ## double 331.75
.quad 0x4075140000000000 ## double 337.25
.quad 0x407546147ae147ae ## double 340.38
.quad 0x4075780000000000 ## double 343.5
.quad 0x4075380000000000 ## double 339.5
.quad 0x4075380000000000 ## double 339.5
.quad 0x4075200000000000 ## double 338
.quad 0x4075540000000000 ## double 341.25
.quad 0x40755c0000000000 ## double 341.75
.quad 0x4075180000000000 ## double 337.5
.quad 0x4074b00000000000 ## double 331
.quad 0x4074940000000000 ## double 329.25
.quad 0x40748c0000000000 ## double 328.75
.quad 0x4074800000000000 ## double 328
.quad 0x4074580000000000 ## double 325.5
.quad 0x4074100000000000 ## double 321
.quad 0x4074600000000000 ## double 326
.quad 0x40747c0000000000 ## double 327.75
.quad 0x40741c0000000000 ## double 321.75
.quad 0x4073dc0000000000 ## double 317.75
.quad 0x4073c00000000000 ## double 316
.quad 0x4073ac0000000000 ## double 314.75
.quad 0x4073a00000000000 ## double 314
.quad 0x40739c0000000000 ## double 313.75
.quad 0x4073b00000000000 ## double 315
.quad 0x4073c00000000000 ## double 316
.quad 0x4073dc0000000000 ## double 317.75
.quad 0x4073c80000000000 ## double 316.5
.quad 0x4073a00000000000 ## double 314
.quad 0x4073cc0000000000 ## double 316.75
.quad 0x4073f80000000000 ## double 319.5
.quad 0x4074080000000000 ## double 320.5
.quad 0x4073e40000000000 ## double 318.25
.quad 0x4073cc0000000000 ## double 316.75
.quad 0x4073fc0000000000 ## double 319.75
.quad 0x40740c0000000000 ## double 320.75
.quad 0x4074080000000000 ## double 320.5
.quad 0x4073e80000000000 ## double 318.5
.quad 0x4073bc0000000000 ## double 315.75
.quad 0x4073cc0000000000 ## double 316.75
.quad 0x4073ec0000000000 ## double 318.75
.quad 0x4073e40000000000 ## double 318.25
.quad 0x4073cc0000000000 ## double 316.75
.quad 0x4073bc0000000000 ## double 315.75
.quad 0x4073a00000000000 ## double 314
.quad 0x4073780000000000 ## double 311.5
.quad 0x4073cc0000000000 ## double 316.75
.quad 0x4073f00000000000 ## double 319
.quad 0x4073fe147ae147ae ## double 319.88
.quad 0x4073e00000000000 ## double 318
.quad 0x4073f40000000000 ## double 319.25
.quad 0x4073c00000000000 ## double 316
.quad 0x4073ac0000000000 ## double 314.75
.quad 0x4073b00000000000 ## double 315
.quad 0x4073a40000000000 ## double 314.25
.quad 0x4073500000000000 ## double 309
.quad 0x4073500000000000 ## double 309
.quad 0x4073480000000000 ## double 308.5
.quad 0x4072cc0000000000 ## double 300.75
.quad 0x4072c00000000000 ## double 300
.quad 0x4072c80000000000 ## double 300.5
.quad 0x40728c0000000000 ## double 296.75
.quad 0x4072500000000000 ## double 293
.quad 0x4072480000000000 ## double 292.5
.quad 0x4072740000000000 ## double 295.25
.quad 0x4072540000000000 ## double 293.25
.quad 0x4072800000000000 ## double 296
.quad 0x4072a80000000000 ## double 298.5
.quad 0x4072f00000000000 ## double 303
.quad 0x4072c00000000000 ## double 300
.quad 0x4072b00000000000 ## double 299
.quad 0x4072600000000000 ## double 294
.quad 0x4072880000000000 ## double 296.5
.quad 0x4072c80000000000 ## double 300.5
.quad 0x4072cc0000000000 ## double 300.75
.quad 0x4072bc0000000000 ## double 299.75
.quad 0x4072900000000000 ## double 297
.quad 0x4072c00000000000 ## double 300
.quad 0x4072940000000000 ## double 297.25
.quad 0x4072300000000000 ## double 291
.quad 0x40726c0000000000 ## double 294.75
.quad 0x4072740000000000 ## double 295.25
.quad 0x4072180000000000 ## double 289.5
.quad 0x4072780000000000 ## double 295.5
.quad 0x4072400000000000 ## double 292
.quad 0x4071ec0000000000 ## double 286.75
.quad 0x4072580000000000 ## double 293.5
.quad 0x40726e147ae147ae ## double 294.88
.quad 0x40726c0000000000 ## double 294.75
.quad 0x4072500000000000 ## double 293
.quad 0x40726c0000000000 ## double 294.75
.quad 0x4072a80000000000 ## double 298.5
.quad 0x4073100000000000 ## double 305
.quad 0x4073300000000000 ## double 307
.quad 0x40735c0000000000 ## double 309.75
.quad 0x4073400000000000 ## double 308
.quad 0x4072d00000000000 ## double 301
.quad 0x4072900000000000 ## double 297
.quad 0x4072780000000000 ## double 295.5
.quad 0x40724c0000000000 ## double 292.75
.quad 0x4072400000000000 ## double 292
.quad 0x4072340000000000 ## double 291.25
.quad 0x4072080000000000 ## double 288.5
.quad 0x4071900000000000 ## double 281
.quad 0x4071340000000000 ## double 275.25
.quad 0x407186147ae147ae ## double 280.38
.quad 0x4071cc0000000000 ## double 284.75
.quad 0x4071500000000000 ## double 277
.quad 0x4070f80000000000 ## double 271.5
.quad 0x4070a80000000000 ## double 266.5
.quad 0x4070e80000000000 ## double 270.5
.quad 0x4070f00000000000 ## double 271
.quad 0x4070ec0000000000 ## double 270.75
.quad 0x40714c0000000000 ## double 276.75
.quad 0x4071300000000000 ## double 275
.quad 0x4071200000000000 ## double 274
.quad 0x4070e40000000000 ## double 270.25
.quad 0x4070800000000000 ## double 264
.quad 0x4070940000000000 ## double 265.25
.quad 0x4070d80000000000 ## double 269.5
.quad 0x4070f80000000000 ## double 271.5
.quad 0x4070bc0000000000 ## double 267.75
.quad 0x4070e00000000000 ## double 270
.quad 0x4070c00000000000 ## double 268
.quad 0x40706c0000000000 ## double 262.75
.quad 0x406f400000000000 ## double 250
.quad 0x406e980000000000 ## double 244.75
.quad 0x406e580000000000 ## double 242.75
.quad 0x406e080000000000 ## double 240.25
.quad 0x406d680000000000 ## double 235.25
.quad 0x406cbc28f5c28f5c ## double 229.88
.quad 0x406cc00000000000 ## double 230
.quad 0x406d780000000000 ## double 235.75
.quad 0x406df80000000000 ## double 239.75
.quad 0x406eb00000000000 ## double 245.5
.quad 0x406f200000000000 ## double 249
.quad 0x406f380000000000 ## double 249.75
.quad 0x406f580000000000 ## double 250.75
.quad 0x406f500000000000 ## double 250.5
.quad 0x406f7c28f5c28f5c ## double 251.88
.quad 0x406fa80000000000 ## double 253.25
.quad 0x406fd80000000000 ## double 254.75
.quad 0x406fc00000000000 ## double 254
.quad 0x406fb80000000000 ## double 253.75
.quad 0x406fe00000000000 ## double 255
.quad 0x4070000000000000 ## double 256
.quad 0x4070280000000000 ## double 258.5
.quad 0x4070300000000000 ## double 259
.quad 0x4070240000000000 ## double 258.25
.quad 0x4070680000000000 ## double 262.5
.quad 0x4070400000000000 ## double 260
.quad 0x406ff00000000000 ## double 255.5
.quad 0x406fa00000000000 ## double 253
.quad 0x406f780000000000 ## double 251.75
.quad 0x406f800000000000 ## double 252
.quad 0x406fd00000000000 ## double 254.5
.quad 0x4070000000000000 ## double 256
.quad 0x406f900000000000 ## double 252.5
.quad 0x406ee80000000000 ## double 247.25
.quad 0x406f4c28f5c28f5c ## double 250.38
.quad 0x406fe80000000000 ## double 255.25
.quad 0x406fd80000000000 ## double 254.75
.quad 0x40703c0000000000 ## double 259.75
.quad 0x4070400000000000 ## double 260
.quad 0x4070500000000000 ## double 261
.quad 0x4070600000000000 ## double 262
.quad 0x4070640000000000 ## double 262.25
.quad 0x4070e00000000000 ## double 270
.quad 0x4071140000000000 ## double 273.25
.quad 0x40713c0000000000 ## double 275.75
.quad 0x4071000000000000 ## double 272
.quad 0x40710c0000000000 ## double 272.75
.quad 0x4070f00000000000 ## double 271
.quad 0x4070700000000000 ## double 263
.quad 0x4070400000000000 ## double 260
.quad 0x4070640000000000 ## double 262.25
.quad 0x4070cc0000000000 ## double 268.75
.quad 0x4070d80000000000 ## double 269.5
.quad 0x4071180000000000 ## double 273.5
.quad 0x4071140000000000 ## double 273.25
.quad 0x4071940000000000 ## double 281.25
.quad 0x4071cc0000000000 ## double 284.75
.quad 0x4071e80000000000 ## double 286.5
.quad 0x4071f00000000000 ## double 287
.quad 0x40717c0000000000 ## double 279.75
.quad 0x4070bc0000000000 ## double 267.75
.quad 0x4070480000000000 ## double 260.5
.quad 0x406ee80000000000 ## double 247.25
.quad 0x406ee80000000000 ## double 247.25
.quad 0x406f400000000000 ## double 250
.quad 0x406eb80000000000 ## double 245.75
.quad 0x406ff00000000000 ## double 255.5
.quad 0x4070c80000000000 ## double 268.5
.quad 0x4070f80000000000 ## double 271.5
.quad 0x4070f00000000000 ## double 271
.quad 0x4071280000000000 ## double 274.5
.quad 0x40716c0000000000 ## double 278.75
.quad 0x4072040000000000 ## double 288.25
.quad 0x4072180000000000 ## double 289.5
.quad 0x4072280000000000 ## double 290.5
.quad 0x407222147ae147ae ## double 290.13
.quad 0x4072300000000000 ## double 291
.quad 0x4071c00000000000 ## double 284
.quad 0x40726a147ae147ae ## double 294.63
.quad 0x4072b00000000000 ## double 299
.quad 0x4072c40000000000 ## double 300.25
.quad 0x4072d00000000000 ## double 301
.quad 0x4072bc0000000000 ## double 299.75
.quad 0x4073600000000000 ## double 310
.quad 0x4073780000000000 ## double 311.5
.quad 0x4073a80000000000 ## double 314.5
.quad 0x4073fa147ae147ae ## double 319.63
.quad 0x4074500000000000 ## double 325
.quad 0x40745c0000000000 ## double 325.75
.quad 0x4074380000000000 ## double 323.5
.quad 0x4074500000000000 ## double 325
.quad 0x4074700000000000 ## double 327
.quad 0x4074680000000000 ## double 326.5
.quad 0x4074b00000000000 ## double 331
.quad 0x4075100000000000 ## double 337
.quad 0x4074a80000000000 ## double 330.5
.quad 0x4074400000000000 ## double 324
.quad 0x4074300000000000 ## double 323
.quad 0x4074380000000000 ## double 323.5
.quad 0x4074ac0000000000 ## double 330.75
.quad 0x4075100000000000 ## double 337
.quad 0x4075480000000000 ## double 340.5
.quad 0x4075280000000000 ## double 338.5
.quad 0x40751c0000000000 ## double 337.75
.quad 0x4074f00000000000 ## double 335
.quad 0x4075100000000000 ## double 337
.quad 0x4074800000000000 ## double 328
.quad 0x40742c0000000000 ## double 322.75
.quad 0x4074300000000000 ## double 323
.quad 0x40743c0000000000 ## double 323.75
.quad 0x4074440000000000 ## double 324.25
.quad 0x4074400000000000 ## double 324
.quad 0x40741c0000000000 ## double 321.75
.quad 0x4074900000000000 ## double 329
.quad 0x40748c0000000000 ## double 328.75
.quad 0x4074c00000000000 ## double 332
.quad 0x4074680000000000 ## double 326.5
.quad 0x4074000000000000 ## double 320
.quad 0x407492147ae147ae ## double 329.13
.quad 0x4074c00000000000 ## double 332
.quad 0x4074ec0000000000 ## double 334.75
.quad 0x4074f80000000000 ## double 335.5
.quad 0x4075500000000000 ## double 341
.quad 0x4075900000000000 ## double 345
.quad 0x4075900000000000 ## double 345
.quad 0x4075d00000000000 ## double 349
.quad 0x4075c80000000000 ## double 348.5
.quad 0x4076080000000000 ## double 352.5
.quad 0x4076280000000000 ## double 354.5
.quad 0x40762c0000000000 ## double 354.75
.quad 0x4075dc0000000000 ## double 349.75
.quad 0x4075ec0000000000 ## double 350.75
.quad 0x4076000000000000 ## double 352
.quad 0x4076300000000000 ## double 355
.quad 0x4076300000000000 ## double 355
.quad 0x4075f80000000000 ## double 351.5
.quad 0x4075f80000000000 ## double 351.5
.quad 0x4075940000000000 ## double 345.25
.quad 0x4075380000000000 ## double 339.5
.quad 0x4075ac0000000000 ## double 346.75
.quad 0x4075ac0000000000 ## double 346.75
.quad 0x4075700000000000 ## double 343
.quad 0x40758c0000000000 ## double 344.75
.quad 0x40758c0000000000 ## double 344.75
.quad 0x4075b40000000000 ## double 347.25
.quad 0x40759c0000000000 ## double 345.75
.quad 0x4076200000000000 ## double 354
.quad 0x4076700000000000 ## double 359
.quad 0x4076500000000000 ## double 357
.quad 0x4076480000000000 ## double 356.5
.quad 0x4076500000000000 ## double 357
.quad 0x407632147ae147ae ## double 355.13
.quad 0x4078300000000000 ## double 387
.quad 0x4077d80000000000 ## double 381.5
.quad 0x4077780000000000 ## double 375.5
.quad 0x4077800000000000 ## double 376
.quad 0x40771c0000000000 ## double 369.75
.quad 0x4077280000000000 ## double 370.5
.quad 0x4077180000000000 ## double 369.5
.quad 0x40770c0000000000 ## double 368.75
.quad 0x4077100000000000 ## double 369
.quad 0x4077040000000000 ## double 368.25
.quad 0x4076dc0000000000 ## double 365.75
.quad 0x4076c80000000000 ## double 364.5
.quad 0x4076840000000000 ## double 360.25
.quad 0x4076ac0000000000 ## double 362.75
.quad 0x4076980000000000 ## double 361.5
.quad 0x4076780000000000 ## double 359.5
.quad 0x4076800000000000 ## double 360
.quad 0x4076e00000000000 ## double 366
.quad 0x4076f00000000000 ## double 367
.quad 0x40768c0000000000 ## double 360.75
.quad 0x4076280000000000 ## double 354.5
.quad 0x4076600000000000 ## double 358
.quad 0x4076700000000000 ## double 359
.quad 0x4076840000000000 ## double 360.25
.quad 0x40764c0000000000 ## double 356.75
.quad 0x4076400000000000 ## double 356
.quad 0x4076980000000000 ## double 361.5
.quad 0x4076700000000000 ## double 359
.quad 0x4076800000000000 ## double 360
.quad 0x4076580000000000 ## double 357.5
.quad 0x4076700000000000 ## double 359
.quad 0x4076780000000000 ## double 359.5
.quad 0x4076900000000000 ## double 361
.quad 0x4076580000000000 ## double 357.5
.quad 0x4075f00000000000 ## double 351
.quad 0x4075fc0000000000 ## double 351.75
.quad 0x4075e00000000000 ## double 350
.quad 0x4076280000000000 ## double 354.5
.quad 0x4076540000000000 ## double 357.25
.quad 0x4076b80000000000 ## double 363.5
.quad 0x4076bc0000000000 ## double 363.75
.quad 0x4076bc0000000000 ## double 363.75
.quad 0x4076c80000000000 ## double 364.5
.quad 0x4076dc0000000000 ## double 365.75
.quad 0x4077080000000000 ## double 368.5
.quad 0x40770c0000000000 ## double 368.75
.quad 0x4077080000000000 ## double 368.5
.quad 0x4076d40000000000 ## double 365.25
.quad 0x4076940000000000 ## double 361.25
.quad 0x4076940000000000 ## double 361.25
.quad 0x4076880000000000 ## double 360.5
.quad 0x4076800000000000 ## double 360
.quad 0x4076a40000000000 ## double 362.25
.quad 0x4076780000000000 ## double 359.5
.quad 0x4076800000000000 ## double 360
.quad 0x4076b80000000000 ## double 363.5
.quad 0x4076d00000000000 ## double 365
.quad 0x4076d00000000000 ## double 365
.quad 0x4076480000000000 ## double 356.5
.quad 0x4076540000000000 ## double 357.25
.quad 0x40766c0000000000 ## double 358.75
.quad 0x4076100000000000 ## double 353
.quad 0x4076180000000000 ## double 353.5
.quad 0x4076400000000000 ## double 356
.quad 0x40763c0000000000 ## double 355.75
.quad 0x4076180000000000 ## double 353.5
.quad 0x4075bc0000000000 ## double 347.75
.quad 0x4075de147ae147ae ## double 349.88
.quad 0x4075bc0000000000 ## double 347.75
.quad 0x40759c0000000000 ## double 345.75
.quad 0x4075e80000000000 ## double 350.5
.quad 0x4075d00000000000 ## double 349
.quad 0x4075ac0000000000 ## double 346.75
.quad 0x4075980000000000 ## double 345.5
.quad 0x4075a00000000000 ## double 346
.quad 0x4075b00000000000 ## double 347
.quad 0x4075c40000000000 ## double 348.25
.quad 0x4075f00000000000 ## double 351
.quad 0x4076400000000000 ## double 356
.quad 0x4076580000000000 ## double 357.5
.quad 0x4076840000000000 ## double 360.25
.quad 0x4076700000000000 ## double 359
.quad 0x4076100000000000 ## double 353
.quad 0x4075d80000000000 ## double 349.5
.quad 0x4075b80000000000 ## double 347.5
.quad 0x4075bc0000000000 ## double 347.75
.quad 0x4075b40000000000 ## double 347.25
.quad 0x4075980000000000 ## double 345.5
.quad 0x4075a80000000000 ## double 346.5
.quad 0x4075d80000000000 ## double 349.5
.quad 0x4075280000000000 ## double 338.5
.quad 0x4075180000000000 ## double 337.5
.quad 0x4075480000000000 ## double 340.5
.quad 0x4075580000000000 ## double 341.5
.quad 0x4075940000000000 ## double 345.25
.quad 0x4075980000000000 ## double 345.5
.quad 0x4075de147ae147ae ## double 349.88
.quad 0x4075c00000000000 ## double 348
.quad 0x4075a00000000000 ## double 346
.quad 0x40755c0000000000 ## double 341.75
.quad 0x4075a80000000000 ## double 346.5
.quad 0x4075ec0000000000 ## double 350.75
.quad 0x4075d80000000000 ## double 349.5
.quad 0x4075ae147ae147ae ## double 346.88
.quad 0x4075840000000000 ## double 344.25
.quad 0x4075580000000000 ## double 341.5
.quad 0x4075380000000000 ## double 339.5
.quad 0x40752c0000000000 ## double 338.75
.quad 0x4074f00000000000 ## double 335
.quad 0x4074a80000000000 ## double 330.5
.quad 0x4074e40000000000 ## double 334.25
.quad 0x4075180000000000 ## double 337.5
.quad 0x4075100000000000 ## double 337
.quad 0x4075280000000000 ## double 338.5
.quad 0x4074e80000000000 ## double 334.5
.quad 0x4074e00000000000 ## double 334
.quad 0x4074bc0000000000 ## double 331.75
.quad 0x4074680000000000 ## double 326.5
.quad 0x4074000000000000 ## double 320
.quad 0x4074300000000000 ## double 323
.quad 0x4073c40000000000 ## double 316.25
.quad 0x4073e80000000000 ## double 318.5
.quad 0x4074180000000000 ## double 321.5
.quad 0x4074200000000000 ## double 322
.quad 0x4074500000000000 ## double 325
.quad 0x4074600000000000 ## double 326
.quad 0x4074c00000000000 ## double 332
.quad 0x4074880000000000 ## double 328.5
.quad 0x4074400000000000 ## double 324
.quad 0x4074c00000000000 ## double 332
.quad 0x4074d00000000000 ## double 333
.quad 0x4075000000000000 ## double 336
.quad 0x4074f80000000000 ## double 335.5
.quad 0x4075900000000000 ## double 345
.quad 0x4075f80000000000 ## double 351.5
.quad 0x4076c80000000000 ## double 364.5
.quad 0x4075c00000000000 ## double 348
.quad 0x4075340000000000 ## double 339.25
.quad 0x4075380000000000 ## double 339.5
.quad 0x4074e80000000000 ## double 334.5
.quad 0x40749c0000000000 ## double 329.75
.quad 0x4074580000000000 ## double 325.5
.quad 0x4074680000000000 ## double 326.5
.quad 0x4073b80000000000 ## double 315.5
.quad 0x4073f00000000000 ## double 319
.quad 0x4073dc0000000000 ## double 317.75
.quad 0x4073e00000000000 ## double 318
.quad 0x4073a40000000000 ## double 314.25
.quad 0x4073ac0000000000 ## double 314.75
.quad 0x4073940000000000 ## double 313.25
.quad 0x4073500000000000 ## double 309
.quad 0x4073700000000000 ## double 311
.quad 0x4073900000000000 ## double 313
.quad 0x4073a40000000000 ## double 314.25
.quad 0x4073e80000000000 ## double 318.5
.quad 0x4073f40000000000 ## double 319.25
.quad 0x4074080000000000 ## double 320.5
.quad 0x4074180000000000 ## double 321.5
.quad 0x4074000000000000 ## double 320
.quad 0x4074400000000000 ## double 324
.quad 0x4074280000000000 ## double 322.5
.quad 0x4074940000000000 ## double 329.25
.quad 0x4074900000000000 ## double 329
.quad 0x4074780000000000 ## double 327.5
.quad 0x4074400000000000 ## double 324
.quad 0x4074400000000000 ## double 324
.quad 0x40743c0000000000 ## double 323.75
.quad 0x4074900000000000 ## double 329
.quad 0x4074d00000000000 ## double 333
.quad 0x4074c80000000000 ## double 332.5
.quad 0x4074ee147ae147ae ## double 334.88
.quad 0x4074b80000000000 ## double 331.5
.quad 0x4074840000000000 ## double 328.25
.quad 0x40749c0000000000 ## double 329.75
.quad 0x4074b80000000000 ## double 331.5
.quad 0x4074c80000000000 ## double 332.5
.quad 0x4074bc0000000000 ## double 331.75
.quad 0x4074800000000000 ## double 328
.quad 0x4074780000000000 ## double 327.5
.quad 0x4074880000000000 ## double 328.5
.quad 0x4074a40000000000 ## double 330.25
.quad 0x4074880000000000 ## double 328.5
.quad 0x4074480000000000 ## double 324.5
.quad 0x4073e00000000000 ## double 318
.quad 0x4073c00000000000 ## double 316
.quad 0x4073980000000000 ## double 313.5
.quad 0x4073900000000000 ## double 313
.quad 0x40734c0000000000 ## double 308.75
.quad 0x40734c0000000000 ## double 308.75
.quad 0x40734c0000000000 ## double 308.75
.quad 0x4073740000000000 ## double 311.25
.quad 0x4073800000000000 ## double 312
.quad 0x4073800000000000 ## double 312
.quad 0x4073a80000000000 ## double 314.5
.quad 0x40739c0000000000 ## double 313.75
.quad 0x4073800000000000 ## double 312
.quad 0x4073580000000000 ## double 309.5
.quad 0x4073500000000000 ## double 309
.quad 0x4073700000000000 ## double 311
.quad 0x4073900000000000 ## double 313
.quad 0x4073c00000000000 ## double 316
.quad 0x4073b80000000000 ## double 315.5
.quad 0x4073780000000000 ## double 311.5
.quad 0x4073640000000000 ## double 310.25
.quad 0x4073580000000000 ## double 309.5
.quad 0x4073300000000000 ## double 307
.quad 0x4073180000000000 ## double 305.5
.quad 0x40731c0000000000 ## double 305.75
.quad 0x40730c0000000000 ## double 304.75
.quad 0x4072dc0000000000 ## double 301.75
.quad 0x4072f00000000000 ## double 303
.quad 0x40727c0000000000 ## double 295.75
.quad 0x4072c00000000000 ## double 300
.quad 0x4072f80000000000 ## double 303.5
.quad 0x40733c0000000000 ## double 307.75
.quad 0x4072f80000000000 ## double 303.5
.quad 0x4072bc0000000000 ## double 299.75
.quad 0x4072e80000000000 ## double 302.5
.quad 0x4072e80000000000 ## double 302.5
.quad 0x4072a80000000000 ## double 298.5
.quad 0x4072800000000000 ## double 296
.quad 0x4072940000000000 ## double 297.25
.quad 0x4072a00000000000 ## double 298
.quad 0x4072a80000000000 ## double 298.5
.quad 0x4072b80000000000 ## double 299.5
.quad 0x4072980000000000 ## double 297.5
.quad 0x4072880000000000 ## double 296.5
.quad 0x4072e00000000000 ## double 302
.quad 0x4072c00000000000 ## double 300
.quad 0x4073280000000000 ## double 306.5
.quad 0x4073680000000000 ## double 310.5
.quad 0x4073680000000000 ## double 310.5
.quad 0x40732c0000000000 ## double 306.75
.quad 0x4072f00000000000 ## double 303
.quad 0x4072a00000000000 ## double 298
.quad 0x4072b00000000000 ## double 299
.quad 0x40728c0000000000 ## double 296.75
.quad 0x4072900000000000 ## double 297
.quad 0x4072940000000000 ## double 297.25
.quad 0x4072980000000000 ## double 297.5
.quad 0x4072940000000000 ## double 297.25
.quad 0x4072c00000000000 ## double 300
.quad 0x4072c80000000000 ## double 300.5
.quad 0x4072c00000000000 ## double 300
.quad 0x4072c80000000000 ## double 300.5
.quad 0x4072f00000000000 ## double 303
.quad 0x4072fc0000000000 ## double 303.75
.quad 0x4072f00000000000 ## double 303
.quad 0x4072c00000000000 ## double 300
.quad 0x4073900000000000 ## double 313
.quad 0x40738c0000000000 ## double 312.75
.quad 0x4073580000000000 ## double 309.5
.quad 0x4073480000000000 ## double 308.5
.quad 0x4072fc0000000000 ## double 303.75
.quad 0x4073600000000000 ## double 310
.quad 0x4073700000000000 ## double 311
.quad 0x40732c0000000000 ## double 306.75
.quad 0x4072f80000000000 ## double 303.5
.quad 0x4073380000000000 ## double 307.5
.quad 0x4073800000000000 ## double 312
.quad 0x4073b40000000000 ## double 315.25
.quad 0x4073d00000000000 ## double 317
.quad 0x4073e00000000000 ## double 318
.quad 0x4074180000000000 ## double 321.5
.quad 0x4074400000000000 ## double 324
.quad 0x4074200000000000 ## double 322
.quad 0x40741e147ae147ae ## double 321.88
.quad 0x4074980000000000 ## double 329.5
.quad 0x4074600000000000 ## double 326
.quad 0x4073fc0000000000 ## double 319.75
.quad 0x4073e00000000000 ## double 318
.quad 0x4073fc0000000000 ## double 319.75
.quad 0x4074780000000000 ## double 327.5
.quad 0x4074900000000000 ## double 329
.quad 0x4074880000000000 ## double 328.5
.quad 0x4074840000000000 ## double 328.25
.quad 0x4074ac0000000000 ## double 330.75
.quad 0x4074a40000000000 ## double 330.25
.quad 0x4074ec0000000000 ## double 334.75
.quad 0x4074ec0000000000 ## double 334.75
.quad 0x4074e80000000000 ## double 334.5
.quad 0x4074b00000000000 ## double 331
.quad 0x4074800000000000 ## double 328
.quad 0x40747e147ae147ae ## double 327.88
.quad 0x4074680000000000 ## double 326.5
.quad 0x4074500000000000 ## double 325
.quad 0x40747c0000000000 ## double 327.75
.quad 0x4074740000000000 ## double 327.25
.quad 0x4074700000000000 ## double 327
.quad 0x4074400000000000 ## double 324
.quad 0x4074000000000000 ## double 320
.quad 0x4073c00000000000 ## double 316
.quad 0x4073d00000000000 ## double 317
.quad 0x4073d00000000000 ## double 317
.quad 0x4073d00000000000 ## double 317
.quad 0x40735c0000000000 ## double 309.75
.quad 0x4073380000000000 ## double 307.5
.quad 0x4073a80000000000 ## double 314.5
.quad 0x4073cc0000000000 ## double 316.75
.quad 0x4073d80000000000 ## double 317.5
.quad 0x4074380000000000 ## double 323.5
.quad 0x4074700000000000 ## double 327
.quad 0x4074980000000000 ## double 329.5
.quad 0x4074700000000000 ## double 327
.quad 0x4073f00000000000 ## double 319
.quad 0x4073e80000000000 ## double 318.5
.quad 0x40745c0000000000 ## double 325.75
.quad 0x40747c0000000000 ## double 327.75
.quad 0x4074d00000000000 ## double 333
.quad 0x4074b00000000000 ## double 331
.quad 0x4074c40000000000 ## double 332.25
.quad 0x4074c80000000000 ## double 332.5
.quad 0x40746c0000000000 ## double 326.75
.quad 0x4074a80000000000 ## double 330.5
.quad 0x4074f00000000000 ## double 335
.quad 0x4074f80000000000 ## double 335.5
.quad 0x4075100000000000 ## double 337
.quad 0x4075100000000000 ## double 337
.quad 0x4075100000000000 ## double 337
.quad 0x4074e00000000000 ## double 334
.quad 0x4075180000000000 ## double 337.5
.quad 0x40750c0000000000 ## double 336.75
.quad 0x4075180000000000 ## double 337.5
.quad 0x4075380000000000 ## double 339.5
.quad 0x4075200000000000 ## double 338
.quad 0x4075200000000000 ## double 338
.quad 0x4075500000000000 ## double 341
.quad 0x4075580000000000 ## double 341.5
.quad 0x4075580000000000 ## double 341.5
.quad 0x4075300000000000 ## double 339
.quad 0x4075080000000000 ## double 336.5
.quad 0x4075180000000000 ## double 337.5
.quad 0x4075280000000000 ## double 338.5
.quad 0x4075480000000000 ## double 340.5
.quad 0x4075480000000000 ## double 340.5
.quad 0x40757a147ae147ae ## double 343.63
.quad 0x4075a80000000000 ## double 346.5
.quad 0x40758c0000000000 ## double 344.75
.quad 0x4075900000000000 ## double 345
.quad 0x4075580000000000 ## double 341.5
.quad 0x40758c0000000000 ## double 344.75
.quad 0x4075780000000000 ## double 343.5
.quad 0x4075380000000000 ## double 339.5
.quad 0x4074f80000000000 ## double 335.5
.quad 0x4075500000000000 ## double 341
.quad 0x4075200000000000 ## double 338
.quad 0x4075080000000000 ## double 336.5
.quad 0x4075080000000000 ## double 336.5
.quad 0x4075800000000000 ## double 344
.quad 0x4075480000000000 ## double 340.5
.quad 0x4074c00000000000 ## double 332
.quad 0x4075100000000000 ## double 337
.quad 0x4075a00000000000 ## double 346
.quad 0x4075800000000000 ## double 344
.quad 0x4075ac0000000000 ## double 346.75
.quad 0x4076100000000000 ## double 353
.quad 0x4076600000000000 ## double 358
.quad 0x4076480000000000 ## double 356.5
.quad 0x4076880000000000 ## double 360.5
.quad 0x40766c0000000000 ## double 358.75
.quad 0x4076a80000000000 ## double 362.5
.quad 0x4076cc0000000000 ## double 364.75
.quad 0x4076d00000000000 ## double 365
.quad 0x4076600000000000 ## double 358
.quad 0x4076000000000000 ## double 352
.quad 0x4076400000000000 ## double 356
.quad 0x40767c0000000000 ## double 359.75
.quad 0x4076880000000000 ## double 360.5
.quad 0x4076500000000000 ## double 357
.quad 0x4076600000000000 ## double 358
.quad 0x40767c0000000000 ## double 359.75
.quad 0x4076680000000000 ## double 358.5
.quad 0x4075e00000000000 ## double 350
.quad 0x4076600000000000 ## double 358
.quad 0x4076700000000000 ## double 359
.quad 0x40773c0000000000 ## double 371.75
.quad 0x40773c0000000000 ## double 371.75
.quad 0x40773c0000000000 ## double 371.75
.quad 0x4077280000000000 ## double 370.5
.quad 0x4077180000000000 ## double 369.5
.quad 0x4077280000000000 ## double 370.5
.quad 0x4077700000000000 ## double 375
.quad 0x4077700000000000 ## double 375
.quad 0x4076500000000000 ## double 357
.quad 0x40759c0000000000 ## double 345.75
.quad 0x4074f80000000000 ## double 335.5
.quad 0x4074dc0000000000 ## double 333.75
.quad 0x4074980000000000 ## double 329.5
.quad 0x4074840000000000 ## double 328.25
.quad 0x4074e80000000000 ## double 334.5
.quad 0x4074a00000000000 ## double 330
.quad 0x40746c0000000000 ## double 326.75
.quad 0x4074840000000000 ## double 328.25
.quad 0x4074f00000000000 ## double 335
.quad 0x4074f40000000000 ## double 335.25
.quad 0x40750c0000000000 ## double 336.75
.quad 0x4075280000000000 ## double 338.5
.quad 0x4075700000000000 ## double 343
.quad 0x4075800000000000 ## double 344
.quad 0x40757e147ae147ae ## double 343.88
.quad 0x4075940000000000 ## double 345.25
.quad 0x4075940000000000 ## double 345.25
.quad 0x4075d80000000000 ## double 349.5
.quad 0x4075ec0000000000 ## double 350.75
.quad 0x4075740000000000 ## double 343.25
.quad 0x40752c0000000000 ## double 338.75
.quad 0x4074e80000000000 ## double 334.5
.quad 0x4074b00000000000 ## double 331
.quad 0x40749c0000000000 ## double 329.75
.quad 0x4074b80000000000 ## double 331.5
.quad 0x4074780000000000 ## double 327.5
.quad 0x4083f80000000000 ## double 639
.quad 0x4084060000000000 ## double 640.75
.quad 0x4084200000000000 ## double 644
.quad 0x4084240000000000 ## double 644.5
.quad 0x4084380000000000 ## double 647
.quad 0x40844f0a3d70a3d7 ## double 649.88
.quad 0x40844c0000000000 ## double 649.5
.quad 0x4084400000000000 ## double 648
.quad 0x4084180000000000 ## double 643
.quad 0x4083f40000000000 ## double 638.5
.quad 0x4083f40000000000 ## double 638.5
.quad 0x4084400000000000 ## double 648
.quad 0x4083ec0000000000 ## double 637.5
.quad 0x4083c00000000000 ## double 632
.quad 0x4083280000000000 ## double 613
.quad 0x4082e40000000000 ## double 604.5
.quad 0x4082be0000000000 ## double 599.75
.quad 0x4082a00000000000 ## double 596
.quad 0x4082680000000000 ## double 589
.quad 0x4082980000000000 ## double 595
.quad 0x4082640000000000 ## double 588.5
.quad 0x4082680000000000 ## double 589
.quad 0x4082800000000000 ## double 592
.quad 0x4082b80000000000 ## double 599
.quad 0x4082880000000000 ## double 593
.quad 0x4082510a3d70a3d7 ## double 586.13
.quad 0x4082940000000000 ## double 594.5
.quad 0x4082a80000000000 ## double 597
.quad 0x40828e0000000000 ## double 593.75
.quad 0x4082300000000000 ## double 582
.quad 0x4082700000000000 ## double 590
.quad 0x4082700000000000 ## double 590
.quad 0x4081dc0000000000 ## double 571.5
.quad 0x4081ee0000000000 ## double 573.75
.quad 0x4082200000000000 ## double 580
.quad 0x4082500000000000 ## double 586
.quad 0x4082a40000000000 ## double 596.5
.quad 0x4082900000000000 ## double 594
.quad 0x4082740000000000 ## double 590.5
.quad 0x4082aa0000000000 ## double 597.25
.quad 0x4082c00000000000 ## double 600
.quad 0x4082980000000000 ## double 595
.quad 0x4082600000000000 ## double 588
.quad 0x4082180000000000 ## double 579
.quad 0x4082840000000000 ## double 592.5
.quad 0x4082500000000000 ## double 586
.quad 0x4082080000000000 ## double 577
.quad 0x4081fc0000000000 ## double 575.5
.quad 0x40827c0000000000 ## double 591.5
.quad 0x4082840000000000 ## double 592.5
.quad 0x40827c0000000000 ## double 591.5
.quad 0x4082940000000000 ## double 594.5
.quad 0x4082d00000000000 ## double 602
.quad 0x4082be0000000000 ## double 599.75
.quad 0x4082f00000000000 ## double 606
.quad 0x4083240000000000 ## double 612.5
.quad 0x4083600000000000 ## double 620
.quad 0x4083660000000000 ## double 620.75
.quad 0x4083540000000000 ## double 618.5
.quad 0x40837c0000000000 ## double 623.5
.quad 0x4083900000000000 ## double 626
.quad 0x4083580000000000 ## double 619
.quad 0x4083800000000000 ## double 624
.quad 0x4083800000000000 ## double 624
.quad 0x4083440000000000 ## double 616.5
.quad 0x4083860000000000 ## double 624.75
.quad 0x4083900000000000 ## double 626
.quad 0x4083640000000000 ## double 620.5
.quad 0x4083300000000000 ## double 614
.quad 0x4082e40000000000 ## double 604.5
.quad 0x4082d80000000000 ## double 603
.quad 0x4082b00000000000 ## double 598
.quad 0x4082bc0000000000 ## double 599.5
.quad 0x4082ce0000000000 ## double 601.75
.quad 0x4083400000000000 ## double 616
.quad 0x40839a0000000000 ## double 627.25
.quad 0x40839e0000000000 ## double 627.75
.quad 0x4083960000000000 ## double 626.75
.quad 0x4083940000000000 ## double 626.5
.quad 0x4083580000000000 ## double 619
.quad 0x4083700000000000 ## double 622
.quad 0x4083880000000000 ## double 625
.quad 0x4083900000000000 ## double 626
.quad 0x4083780000000000 ## double 623
.quad 0x4083c40000000000 ## double 632.5
.quad 0x4084300000000000 ## double 646
.quad 0x4084400000000000 ## double 648
.quad 0x4084240000000000 ## double 644.5
.quad 0x4084380000000000 ## double 647
.quad 0x4084380000000000 ## double 647
.quad 0x4084400000000000 ## double 648
.quad 0x4084180000000000 ## double 643
.quad 0x4083c80000000000 ## double 633
.quad 0x4083cc0000000000 ## double 633.5
.quad 0x4083b00000000000 ## double 630
.quad 0x4083400000000000 ## double 616
.quad 0x40833e0000000000 ## double 615.75
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.section __DATA,__data
.globl _gDataHigh ; @gDataHigh
.p2align 3
_gDataHigh:
.quad 0x405beb851eb851ec ; double 111.68000000000001
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405b870a3d70a3d7 ; double 110.11
.quad 0x405bc66666666666 ; double 111.09999999999999
.quad 0x405c2ae147ae147b ; double 112.67
.quad 0x405bdae147ae147b ; double 111.42
.quad 0x405c366666666666 ; double 112.84999999999999
.quad 0x405c49999999999a ; double 113.15000000000001
.quad 0x405c7ccccccccccd ; double 113.95
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c9f5c28f5c28f ; double 114.48999999999999
.quad 0x405ca66666666666 ; double 114.59999999999999
.quad 0x405c8ae147ae147b ; double 114.17
.quad 0x405c78f5c28f5c29 ; double 113.89
.quad 0x405c8b851eb851ec ; double 114.18000000000001
.quad 0x405c833333333333 ; double 114.05
.quad 0x405c2eb851eb851f ; double 112.73
.quad 0x405cae147ae147ae ; double 114.72
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405d7ccccccccccd ; double 117.95
.quad 0x405db47ae147ae14 ; double 118.81999999999999
.quad 0x405d9851eb851eb8 ; double 118.38
.quad 0x405d400000000000 ; double 117
.quad 0x405d1b851eb851ec ; double 116.43000000000001
.quad 0x405cf70a3d70a3d7 ; double 115.86
.quad 0x405d1eb851eb851f ; double 116.48
.quad 0x405bf851eb851eb8 ; double 111.88
.quad 0x405bde147ae147ae ; double 111.47
.quad 0x405b828f5c28f5c3 ; double 110.04000000000001
.quad 0x405b4ccccccccccd ; double 109.2
.quad 0x405b69999999999a ; double 109.65000000000001
.quad 0x405b666666666666 ; double 109.59999999999999
.quad 0x405b49999999999a ; double 109.15000000000001
.quad 0x405b566666666666 ; double 109.34999999999999
.quad 0x405b6a3d70a3d70a ; double 109.66
.quad 0x405b18f5c28f5c29 ; double 108.39
.quad 0x405ac51eb851eb85 ; double 107.08
.quad 0x405a733333333333 ; double 105.8
.quad 0x405aa51eb851eb85 ; double 106.58
.quad 0x405abae147ae147b ; double 106.92
.quad 0x405a5e147ae147ae ; double 105.47
.quad 0x405a83d70a3d70a4 ; double 106.06
.quad 0x405a7147ae147ae1 ; double 105.77
.quad 0x405a9c28f5c28f5c ; double 106.44
.quad 0x405ac00000000000 ; double 107
.quad 0x405ac147ae147ae1 ; double 107.02
.quad 0x405aaa3d70a3d70a ; double 106.66
.quad 0x405a69999999999a ; double 105.65000000000001
.quad 0x405a4c28f5c28f5c ; double 105.19
.quad 0x405a0f5c28f5c28f ; double 104.23999999999999
.quad 0x4059d0a3d70a3d71 ; double 103.26000000000001
.quad 0x4059cb851eb851ec ; double 103.18000000000001
.quad 0x405a000000000000 ; double 104
.quad 0x4059cd70a3d70a3d ; double 103.20999999999999
.quad 0x4059cae147ae147b ; double 103.17
.quad 0x405a60a3d70a3d71 ; double 105.51000000000001
.quad 0x405aa33333333333 ; double 106.55
.quad 0x405aa5c28f5c28f6 ; double 106.59
.quad 0x405acf5c28f5c28f ; double 107.23999999999999
.quad 0x405aeae147ae147b ; double 107.67
.quad 0x405ac66666666666 ; double 107.09999999999999
.quad 0x405aa851eb851eb8 ; double 106.63
.quad 0x405a5b851eb851ec ; double 105.43000000000001
.quad 0x405a7eb851eb851f ; double 105.98
.quad 0x405ac00000000000 ; double 107
.quad 0x405ad00000000000 ; double 107.25
.quad 0x405b000000000000 ; double 108
.quad 0x405b033333333333 ; double 108.05
.quad 0x405a8eb851eb851f ; double 106.23
.quad 0x405a78f5c28f5c29 ; double 105.89
.quad 0x405a733333333333 ; double 105.8
.quad 0x405a900000000000 ; double 106.25
.quad 0x405a7f5c28f5c28f ; double 105.98999999999999
.quad 0x405a470a3d70a3d7 ; double 105.11
.quad 0x405a300000000000 ; double 104.75
.quad 0x4059d8f5c28f5c29 ; double 103.39
.quad 0x4059cae147ae147b ; double 103.17
.quad 0x4059c1eb851eb852 ; double 103.03
.quad 0x4059c00000000000 ; double 103
.quad 0x4059c8f5c28f5c29 ; double 103.14
.quad 0x4059cae147ae147b ; double 103.17
.quad 0x4059c00000000000 ; double 103
.quad 0x40594ae147ae147b ; double 101.17
.quad 0x4059600000000000 ; double 101.5
.quad 0x40596ccccccccccd ; double 101.7
.quad 0x4058f9999999999a ; double 99.900000000000005
.quad 0x4057eeb851eb851f ; double 95.730000000000003
.quad 0x4057c9999999999a ; double 95.150000000000005
.quad 0x4057b7ae147ae148 ; double 94.870000000000005
.quad 0x4057fa3d70a3d70a ; double 95.909999999999996
.quad 0x40586a3d70a3d70a ; double 97.659999999999996
.quad 0x4058133333333333 ; double 96.299999999999997
.quad 0x4057f00000000000 ; double 95.75
.quad 0x4057f47ae147ae14 ; double 95.819999999999993
.quad 0x40582ccccccccccd ; double 96.700000000000002
.quad 0x4058333333333333 ; double 96.799999999999997
.quad 0x40583c28f5c28f5c ; double 96.939999999999998
.quad 0x4058300000000000 ; double 96.75
.quad 0x40581ccccccccccd ; double 96.450000000000002
.quad 0x40580eb851eb851f ; double 96.230000000000003
.quad 0x4057e00000000000 ; double 95.5
.quad 0x4057b00000000000 ; double 94.75
.quad 0x4057c70a3d70a3d7 ; double 95.109999999999999
.quad 0x4057c147ae147ae1 ; double 95.019999999999996
.quad 0x4057c1eb851eb852 ; double 95.030000000000001
.quad 0x4057d00000000000 ; double 95.25
.quad 0x4057ce147ae147ae ; double 95.219999999999998
.quad 0x4057dc28f5c28f5c ; double 95.439999999999998
.quad 0x4057f3d70a3d70a4 ; double 95.810000000000002
.quad 0x4057a1eb851eb852 ; double 94.530000000000001
.quad 0x4057947ae147ae14 ; double 94.319999999999993
.quad 0x40579ccccccccccd ; double 94.450000000000002
.quad 0x40577e147ae147ae ; double 93.969999999999998
.quad 0x405781eb851eb852 ; double 94.030000000000001
.quad 0x40579e147ae147ae ; double 94.469999999999998
.quad 0x40578ae147ae147b ; double 94.170000000000001
.quad 0x4057800000000000 ; double 94
.quad 0x4057b47ae147ae14 ; double 94.819999999999993
.quad 0x4057b66666666666 ; double 94.849999999999994
.quad 0x405778f5c28f5c29 ; double 93.89
.quad 0x405709999999999a ; double 92.150000000000005
.quad 0x405717ae147ae148 ; double 92.370000000000005
.quad 0x4057433333333333 ; double 93.049999999999997
.quad 0x405785c28f5c28f6 ; double 94.090000000000003
.quad 0x405800a3d70a3d71 ; double 96.010000000000005
.quad 0x4058770a3d70a3d7 ; double 97.859999999999999
.quad 0x40589c28f5c28f5c ; double 98.439999999999998
.quad 0x4058e00000000000 ; double 99.5
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4058dd70a3d70a3d ; double 99.459999999999993
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4058e147ae147ae1 ; double 99.519999999999996
.quad 0x4058db851eb851ec ; double 99.430000000000007
.quad 0x4058af5c28f5c28f ; double 98.739999999999994
.quad 0x4058cccccccccccd ; double 99.200000000000002
.quad 0x4058eccccccccccd ; double 99.700000000000002
.quad 0x4058ef5c28f5c28f ; double 99.739999999999994
.quad 0x4059170a3d70a3d7 ; double 100.36
.quad 0x405919999999999a ; double 100.40000000000001
.quad 0x40591c28f5c28f5c ; double 100.44
.quad 0x4058eeb851eb851f ; double 99.730000000000003
.quad 0x4058cb851eb851ec ; double 99.180000000000007
.quad 0x4058deb851eb851f ; double 99.480000000000003
.quad 0x4058dccccccccccd ; double 99.450000000000002
.quad 0x4058aa3d70a3d70a ; double 98.659999999999996
.quad 0x4058751eb851eb85 ; double 97.829999999999998
.quad 0x40587ae147ae147b ; double 97.920000000000001
.quad 0x4058651eb851eb85 ; double 97.579999999999998
.quad 0x40585851eb851eb8 ; double 97.379999999999995
.quad 0x40584eb851eb851f ; double 97.230000000000003
.quad 0x4058366666666666 ; double 96.849999999999994
.quad 0x4058fccccccccccd ; double 99.950000000000002
.quad 0x405939999999999a ; double 100.90000000000001
.quad 0x405935c28f5c28f6 ; double 100.84
.quad 0x4058ec28f5c28f5c ; double 99.689999999999998
.quad 0x4058f9999999999a ; double 99.900000000000005
.quad 0x4058c33333333333 ; double 99.049999999999997
.quad 0x4059151eb851eb85 ; double 100.33
.quad 0x4058e00000000000 ; double 99.5
.quad 0x40587ccccccccccd ; double 97.950000000000002
.quad 0x4058b28f5c28f5c3 ; double 98.790000000000006
.quad 0x405899999999999a ; double 98.400000000000005
.quad 0x40587851eb851eb8 ; double 97.879999999999995
.quad 0x405859999999999a ; double 97.400000000000005
.quad 0x40584eb851eb851f ; double 97.230000000000003
.quad 0x4057f3d70a3d70a4 ; double 95.810000000000002
.quad 0x4057feb851eb851f ; double 95.980000000000003
.quad 0x4058200000000000 ; double 96.5
.quad 0x4058200000000000 ; double 96.5
.quad 0x405818f5c28f5c29 ; double 96.39
.quad 0x40580a3d70a3d70a ; double 96.159999999999996
.quad 0x4057f33333333333 ; double 95.799999999999997
.quad 0x4057ee147ae147ae ; double 95.719999999999998
.quad 0x4057d51eb851eb85 ; double 95.329999999999998
.quad 0x4057b66666666666 ; double 94.849999999999994
.quad 0x4057966666666666 ; double 94.349999999999994
.quad 0x405798f5c28f5c29 ; double 94.39
.quad 0x40579b851eb851ec ; double 94.430000000000007
.quad 0x4057b7ae147ae148 ; double 94.870000000000005
.quad 0x4057a8f5c28f5c29 ; double 94.64
.quad 0x40577ccccccccccd ; double 93.950000000000002
.quad 0x4056fa3d70a3d70a ; double 91.909999999999996
.quad 0x40571eb851eb851f ; double 92.480000000000003
.quad 0x4056ea3d70a3d70a ; double 91.659999999999996
.quad 0x4056e47ae147ae14 ; double 91.569999999999993
.quad 0x40574f5c28f5c28f ; double 93.239999999999994
.quad 0x4057700000000000 ; double 93.75
.quad 0x40576f5c28f5c28f ; double 93.739999999999994
.quad 0x40575b851eb851ec ; double 93.430000000000007
.quad 0x4057733333333333 ; double 93.799999999999997
.quad 0x4057833333333333 ; double 94.049999999999997
.quad 0x4057666666666666 ; double 93.599999999999994
.quad 0x405751eb851eb852 ; double 93.280000000000001
.quad 0x4057528f5c28f5c3 ; double 93.290000000000006
.quad 0x40571b851eb851ec ; double 92.430000000000007
.quad 0x4057166666666666 ; double 92.349999999999994
.quad 0x40574c28f5c28f5c ; double 93.189999999999998
.quad 0x40573ae147ae147b ; double 92.920000000000001
.quad 0x405759999999999a ; double 93.400000000000005
.quad 0x40572f5c28f5c28f ; double 92.739999999999994
.quad 0x405715c28f5c28f6 ; double 92.340000000000003
.quad 0x4056fccccccccccd ; double 91.950000000000002
.quad 0x40572b851eb851ec ; double 92.680000000000007
.quad 0x40572b851eb851ec ; double 92.680000000000007
.quad 0x4057000000000000 ; double 92
.quad 0x4056f8f5c28f5c29 ; double 91.89
.quad 0x4056fb851eb851ec ; double 91.930000000000007
.quad 0x4056ff5c28f5c28f ; double 91.989999999999994
.quad 0x4056fccccccccccd ; double 91.950000000000002
.quad 0x4057000000000000 ; double 92
.quad 0x4056accccccccccd ; double 90.700000000000002
.quad 0x4056beb851eb851f ; double 90.980000000000003
.quad 0x4057028f5c28f5c3 ; double 92.040000000000006
.quad 0x4055fc28f5c28f5c ; double 87.939999999999998
.quad 0x4055e66666666666 ; double 87.599999999999994
.quad 0x40558d70a3d70a3d ; double 86.209999999999993
.quad 0x405539999999999a ; double 84.900000000000006
.quad 0x40552c28f5c28f5c ; double 84.689999999999998
.quad 0x40552851eb851eb8 ; double 84.629999999999995
.quad 0x4055051eb851eb85 ; double 84.079999999999998
.quad 0x4054f1eb851eb852 ; double 83.780000000000001
.quad 0x4054cd70a3d70a3d ; double 83.209999999999993
.quad 0x4054d00000000000 ; double 83.25
.quad 0x405490a3d70a3d71 ; double 82.260000000000005
.quad 0x40549e147ae147ae ; double 82.469999999999999
.quad 0x4054b28f5c28f5c3 ; double 82.790000000000006
.quad 0x4054966666666666 ; double 82.349999999999994
.quad 0x4054aae147ae147b ; double 82.670000000000001
.quad 0x4054aa3d70a3d70a ; double 82.659999999999997
.quad 0x405491eb851eb852 ; double 82.280000000000001
.quad 0x405487ae147ae148 ; double 82.120000000000005
.quad 0x4054d00000000000 ; double 83.25
.quad 0x4054f28f5c28f5c3 ; double 83.790000000000006
.quad 0x405485c28f5c28f6 ; double 82.090000000000003
.quad 0x4054cd70a3d70a3d ; double 83.209999999999993
.quad 0x4054e5c28f5c28f6 ; double 83.590000000000003
.quad 0x4054c00000000000 ; double 83
.quad 0x4054accccccccccd ; double 82.700000000000002
.quad 0x4054a00000000000 ; double 82.5
.quad 0x40544ccccccccccd ; double 81.200000000000002
.quad 0x4054370a3d70a3d7 ; double 80.859999999999999
.quad 0x40541851eb851eb8 ; double 80.379999999999995
.quad 0x40542c28f5c28f5c ; double 80.689999999999998
.quad 0x4054628f5c28f5c3 ; double 81.540000000000006
.quad 0x4054647ae147ae14 ; double 81.569999999999993
.quad 0x4054600000000000 ; double 81.5
.quad 0x40546b851eb851ec ; double 81.680000000000007
.quad 0x40545eb851eb851f ; double 81.480000000000003
.quad 0x40541e147ae147ae ; double 80.469999999999999
.quad 0x4054000000000000 ; double 80
.quad 0x4053e00000000000 ; double 79.5
.quad 0x4053de147ae147ae ; double 79.469999999999999
.quad 0x4053e33333333333 ; double 79.549999999999997
.quad 0x4053f851eb851eb8 ; double 79.879999999999995
.quad 0x4053fe147ae147ae ; double 79.969999999999999
.quad 0x4053e3d70a3d70a4 ; double 79.560000000000002
.quad 0x4053d1eb851eb852 ; double 79.280000000000001
.quad 0x405348f5c28f5c29 ; double 77.14
.quad 0x40535f5c28f5c28f ; double 77.489999999999994
.quad 0x4052f1eb851eb852 ; double 75.780000000000001
.quad 0x4053051eb851eb85 ; double 76.079999999999998
.quad 0x405311eb851eb852 ; double 76.280000000000001
.quad 0x405310a3d70a3d71 ; double 76.260000000000005
.quad 0x40530ccccccccccd ; double 76.200000000000002
.quad 0x40533147ae147ae1 ; double 76.769999999999996
.quad 0x40532eb851eb851f ; double 76.730000000000003
.quad 0x40533b851eb851ec ; double 76.930000000000007
.quad 0x40535851eb851eb8 ; double 77.379999999999995
.quad 0x4053600000000000 ; double 77.5
.quad 0x4053533333333333 ; double 77.299999999999997
.quad 0x4053528f5c28f5c3 ; double 77.290000000000006
.quad 0x4053133333333333 ; double 76.299999999999997
.quad 0x40531a3d70a3d70a ; double 76.409999999999997
.quad 0x405309999999999a ; double 76.150000000000006
.quad 0x4052e00000000000 ; double 75.5
.quad 0x4052f851eb851eb8 ; double 75.879999999999995
.quad 0x405343d70a3d70a4 ; double 77.060000000000002
.quad 0x40529ae147ae147b ; double 74.420000000000001
.quad 0x40527c28f5c28f5c ; double 73.939999999999998
.quad 0x4052933333333333 ; double 74.299999999999997
.quad 0x4052d1eb851eb852 ; double 75.280000000000001
.quad 0x4053133333333333 ; double 76.299999999999997
.quad 0x405329999999999a ; double 76.650000000000006
.quad 0x4053351eb851eb85 ; double 76.829999999999998
.quad 0x405385c28f5c28f6 ; double 78.090000000000003
.quad 0x4053a1eb851eb852 ; double 78.530000000000001
.quad 0x405398f5c28f5c29 ; double 78.39
.quad 0x40539147ae147ae1 ; double 78.269999999999996
.quad 0x40537b851eb851ec ; double 77.930000000000007
.quad 0x4053733333333333 ; double 77.799999999999997
.quad 0x4053266666666666 ; double 76.599999999999994
.quad 0x4053666666666666 ; double 77.599999999999994
.quad 0x40536147ae147ae1 ; double 77.519999999999996
.quad 0x405375c28f5c28f6 ; double 77.840000000000003
.quad 0x405397ae147ae148 ; double 78.370000000000005
.quad 0x4053a3d70a3d70a4 ; double 78.560000000000002
.quad 0x4053933333333333 ; double 78.299999999999997
.quad 0x4053933333333333 ; double 78.299999999999997
.quad 0x4053a1eb851eb852 ; double 78.530000000000001
.quad 0x4053b1eb851eb852 ; double 78.780000000000001
.quad 0x4053766666666666 ; double 77.849999999999994
.quad 0x40537b851eb851ec ; double 77.930000000000007
.quad 0x405385c28f5c28f6 ; double 78.090000000000003
.quad 0x4053866666666666 ; double 78.099999999999994
.quad 0x4053e51eb851eb85 ; double 79.579999999999998
.quad 0x40542f5c28f5c28f ; double 80.739999999999994
.quad 0x4053f9999999999a ; double 79.900000000000006
.quad 0x4054028f5c28f5c3 ; double 80.040000000000006
.quad 0x405423d70a3d70a4 ; double 80.560000000000002
.quad 0x405437ae147ae148 ; double 80.870000000000005
.quad 0x4054351eb851eb85 ; double 80.829999999999998
.quad 0x40543d70a3d70a3d ; double 80.959999999999993
.quad 0x4054370a3d70a3d7 ; double 80.859999999999999
.quad 0x40540b851eb851ec ; double 80.180000000000007
.quad 0x4054028f5c28f5c3 ; double 80.040000000000006
.quad 0x4054533333333333 ; double 81.299999999999997
.quad 0x405415c28f5c28f6 ; double 80.340000000000003
.quad 0x4054400000000000 ; double 81
.quad 0x4054566666666666 ; double 81.349999999999994
.quad 0x40548d70a3d70a3d ; double 82.209999999999993
.quad 0x4054d00000000000 ; double 83.25
.quad 0x4054c9999999999a ; double 83.150000000000006
.quad 0x4054ec28f5c28f5c ; double 83.689999999999998
.quad 0x4054c5c28f5c28f6 ; double 83.090000000000003
.quad 0x4054d51eb851eb85 ; double 83.329999999999998
.quad 0x4054d28f5c28f5c3 ; double 83.290000000000006
.quad 0x4054ce147ae147ae ; double 83.219999999999999
.quad 0x4054d5c28f5c28f6 ; double 83.340000000000003
.quad 0x4054c5c28f5c28f6 ; double 83.090000000000003
.quad 0x4054d28f5c28f5c3 ; double 83.290000000000006
.quad 0x4054af5c28f5c28f ; double 82.739999999999994
.quad 0x4054d3d70a3d70a4 ; double 83.310000000000002
.quad 0x4054e7ae147ae148 ; double 83.620000000000005
.quad 0x405519999999999a ; double 84.400000000000006
.quad 0x4054deb851eb851f ; double 83.480000000000003
.quad 0x4054b9999999999a ; double 82.900000000000006
.quad 0x40549a3d70a3d70a ; double 82.409999999999997
.quad 0x4054a00000000000 ; double 82.5
.quad 0x4054b66666666666 ; double 82.849999999999994
.quad 0x4055000000000000 ; double 84
.quad 0x4054e1eb851eb852 ; double 83.530000000000001
.quad 0x4054bae147ae147b ; double 82.920000000000001
.quad 0x4054a66666666666 ; double 82.599999999999994
.quad 0x405457ae147ae148 ; double 81.370000000000005
.quad 0x405483d70a3d70a4 ; double 82.060000000000002
.quad 0x4054af5c28f5c28f ; double 82.739999999999994
.quad 0x4054f33333333333 ; double 83.799999999999997
.quad 0x4055147ae147ae14 ; double 84.319999999999993
.quad 0x40551ccccccccccd ; double 84.450000000000002
.quad 0x4054f1eb851eb852 ; double 83.780000000000001
.quad 0x4054e5c28f5c28f6 ; double 83.590000000000003
.quad 0x4054e66666666666 ; double 83.599999999999994
.quad 0x4054eccccccccccd ; double 83.700000000000002
.quad 0x4054e33333333333 ; double 83.549999999999997
.quad 0x4054d8f5c28f5c29 ; double 83.39
.quad 0x4054d0a3d70a3d71 ; double 83.260000000000005
.quad 0x4054ee147ae147ae ; double 83.719999999999999
.quad 0x40551147ae147ae1 ; double 84.269999999999996
.quad 0x40552b851eb851ec ; double 84.680000000000007
.quad 0x40553f5c28f5c28f ; double 84.989999999999994
.quad 0x4054f00000000000 ; double 83.75
.quad 0x4054da3d70a3d70a ; double 83.409999999999997
.quad 0x4054ee147ae147ae ; double 83.719999999999999
.quad 0x4054f0a3d70a3d71 ; double 83.760000000000005
.quad 0x4054bf5c28f5c28f ; double 82.989999999999994
.quad 0x405499999999999a ; double 82.400000000000006
.quad 0x4054900000000000 ; double 82.25
.quad 0x40547eb851eb851f ; double 81.980000000000003
.quad 0x4054666666666666 ; double 81.599999999999994
.quad 0x4054328f5c28f5c3 ; double 80.790000000000006
.quad 0x405428f5c28f5c29 ; double 80.64
.quad 0x4054351eb851eb85 ; double 80.829999999999998
.quad 0x40540d70a3d70a3d ; double 80.209999999999993
.quad 0x405433d70a3d70a4 ; double 80.810000000000002
.quad 0x4054233333333333 ; double 80.549999999999997
.quad 0x405438f5c28f5c29 ; double 80.89
.quad 0x405429999999999a ; double 80.650000000000006
.quad 0x405455c28f5c28f6 ; double 81.340000000000003
.quad 0x405469999999999a ; double 81.650000000000006
.quad 0x4054300000000000 ; double 80.75
.quad 0x405441eb851eb852 ; double 81.030000000000001
.quad 0x40543a3d70a3d70a ; double 80.909999999999997
.quad 0x4054600000000000 ; double 81.5
.quad 0x40545ccccccccccd ; double 81.450000000000002
.quad 0x40547a3d70a3d70a ; double 81.909999999999997
.quad 0x40545d70a3d70a3d ; double 81.459999999999993
.quad 0x4054333333333333 ; double 80.799999999999997
.quad 0x405439999999999a ; double 80.900000000000006
.quad 0x40540c28f5c28f5c ; double 80.189999999999998
.quad 0x4053fccccccccccd ; double 79.950000000000002
.quad 0x40543147ae147ae1 ; double 80.769999999999996
.quad 0x405465c28f5c28f6 ; double 81.590000000000003
.quad 0x40548f5c28f5c28f ; double 82.239999999999994
.quad 0x4054800000000000 ; double 82
.quad 0x405473d70a3d70a4 ; double 81.810000000000002
.quad 0x40547147ae147ae1 ; double 81.769999999999996
.quad 0x405469999999999a ; double 81.650000000000006
.quad 0x405467ae147ae148 ; double 81.620000000000005
.quad 0x405489999999999a ; double 82.150000000000006
.quad 0x40547ae147ae147b ; double 81.920000000000001
.quad 0x4054c33333333333 ; double 83.049999999999997
.quad 0x405518f5c28f5c29 ; double 84.39
.quad 0x40552ccccccccccd ; double 84.700000000000002
.quad 0x4054ca3d70a3d70a ; double 83.159999999999997
.quad 0x4054dccccccccccd ; double 83.450000000000002
.quad 0x4054fd70a3d70a3d ; double 83.959999999999993
.quad 0x405533d70a3d70a4 ; double 84.810000000000002
.quad 0x405507ae147ae148 ; double 84.120000000000005
.quad 0x4055100000000000 ; double 84.25
.quad 0x405541eb851eb852 ; double 85.030000000000001
.quad 0x4054b9999999999a ; double 82.900000000000006
.quad 0x4054a00000000000 ; double 82.5
.quad 0x4054a33333333333 ; double 82.549999999999997
.quad 0x40549f5c28f5c28f ; double 82.489999999999994
.quad 0x4054c3d70a3d70a4 ; double 83.060000000000002
.quad 0x4054e47ae147ae14 ; double 83.569999999999993
.quad 0x4055200000000000 ; double 84.5
.quad 0x40550ccccccccccd ; double 84.200000000000002
.quad 0x4054ceb851eb851f ; double 83.230000000000003
.quad 0x4055000000000000 ; double 84
.quad 0x4054c66666666666 ; double 83.099999999999994
.quad 0x4054e66666666666 ; double 83.599999999999994
.quad 0x4055000000000000 ; double 84
.quad 0x4054e851eb851eb8 ; double 83.629999999999995
.quad 0x4054fccccccccccd ; double 83.950000000000002
.quad 0x405539999999999a ; double 84.900000000000006
.quad 0x4055d66666666666 ; double 87.349999999999994
.quad 0x4055c66666666666 ; double 87.099999999999994
.quad 0x40563a3d70a3d70a ; double 88.909999999999997
.quad 0x40567ae147ae147b ; double 89.920000000000001
.quad 0x405675c28f5c28f6 ; double 89.840000000000003
.quad 0x405629999999999a ; double 88.650000000000006
.quad 0x405649999999999a ; double 89.150000000000006
.quad 0x40566f5c28f5c28f ; double 89.739999999999994
.quad 0x405668f5c28f5c29 ; double 89.64
.quad 0x40567c28f5c28f5c ; double 89.939999999999998
.quad 0x40565b851eb851ec ; double 89.430000000000007
.quad 0x4056547ae147ae14 ; double 89.319999999999993
.quad 0x405658f5c28f5c29 ; double 89.39
.quad 0x4056051eb851eb85 ; double 88.079999999999998
.quad 0x4055f7ae147ae148 ; double 87.870000000000005
.quad 0x4056000000000000 ; double 88
.quad 0x4055beb851eb851f ; double 86.980000000000003
.quad 0x4055a51eb851eb85 ; double 86.579999999999998
.quad 0x405579999999999a ; double 85.900000000000006
.quad 0x4055400000000000 ; double 85
.quad 0x405535c28f5c28f6 ; double 84.840000000000003
.quad 0x4055133333333333 ; double 84.299999999999997
.quad 0x4054d51eb851eb85 ; double 83.329999999999998
.quad 0x4054f9999999999a ; double 83.900000000000006
.quad 0x4054f66666666666 ; double 83.849999999999994
.quad 0x4054d33333333333 ; double 83.299999999999997
.quad 0x4054c1eb851eb852 ; double 83.030000000000001
.quad 0x405478f5c28f5c29 ; double 81.89
.quad 0x405489999999999a ; double 82.150000000000006
.quad 0x4054933333333333 ; double 82.299999999999997
.quad 0x40549b851eb851ec ; double 82.430000000000007
.quad 0x4054bb851eb851ec ; double 82.930000000000007
.quad 0x4054f7ae147ae148 ; double 83.870000000000005
.quad 0x4054fccccccccccd ; double 83.950000000000002
.quad 0x4054e70a3d70a3d7 ; double 83.609999999999999
.quad 0x4055000000000000 ; double 84
.quad 0x40550c28f5c28f5c ; double 84.189999999999998
.quad 0x40550ccccccccccd ; double 84.200000000000002
.quad 0x4055266666666666 ; double 84.599999999999994
.quad 0x4054bf5c28f5c28f ; double 82.989999999999994
.quad 0x4054ad70a3d70a3d ; double 82.709999999999993
.quad 0x4054cccccccccccd ; double 83.200000000000002
.quad 0x4054bb851eb851ec ; double 82.930000000000007
.quad 0x4054e147ae147ae1 ; double 83.519999999999996
.quad 0x4054866666666666 ; double 82.099999999999994
.quad 0x405447ae147ae148 ; double 81.120000000000005
.quad 0x40541147ae147ae1 ; double 80.269999999999996
.quad 0x40542b851eb851ec ; double 80.680000000000007
.quad 0x40545e147ae147ae ; double 81.469999999999999
.quad 0x4054266666666666 ; double 80.599999999999994
.quad 0x4054200000000000 ; double 80.5
.quad 0x4054233333333333 ; double 80.549999999999997
.quad 0x4053eeb851eb851f ; double 79.730000000000003
.quad 0x4053a66666666666 ; double 78.599999999999994
.quad 0x40539a3d70a3d70a ; double 78.409999999999997
.quad 0x4053870a3d70a3d7 ; double 78.109999999999999
.quad 0x4053a00000000000 ; double 78.5
.quad 0x40539d70a3d70a3d ; double 78.459999999999993
.quad 0x4053e9999999999a ; double 79.650000000000006
.quad 0x4053feb851eb851f ; double 79.980000000000003
.quad 0x4054200000000000 ; double 80.5
.quad 0x405429999999999a ; double 80.650000000000006
.quad 0x405459999999999a ; double 81.400000000000006
.quad 0x40545f5c28f5c28f ; double 81.489999999999994
.quad 0x4054870a3d70a3d7 ; double 82.109999999999999
.quad 0x40545f5c28f5c28f ; double 81.489999999999994
.quad 0x40546147ae147ae1 ; double 81.519999999999996
.quad 0x40544851eb851eb8 ; double 81.129999999999995
.quad 0x40544c28f5c28f5c ; double 81.189999999999998
.quad 0x405400a3d70a3d71 ; double 80.010000000000005
.quad 0x4054147ae147ae14 ; double 80.319999999999993
.quad 0x4054328f5c28f5c3 ; double 80.790000000000006
.quad 0x4054400000000000 ; double 81
.quad 0x4054700000000000 ; double 81.75
.quad 0x40544147ae147ae1 ; double 81.019999999999996
.quad 0x40545ccccccccccd ; double 81.450000000000002
.quad 0x405499999999999a ; double 82.400000000000006
.quad 0x4054c8f5c28f5c29 ; double 83.14
.quad 0x4054d66666666666 ; double 83.349999999999994
.quad 0x4054d33333333333 ; double 83.299999999999997
.quad 0x4054800000000000 ; double 82
.quad 0x40547c28f5c28f5c ; double 81.939999999999998
.quad 0x40549b851eb851ec ; double 82.430000000000007
.quad 0x4054bc28f5c28f5c ; double 82.939999999999998
.quad 0x4054a51eb851eb85 ; double 82.579999999999998
.quad 0x4054b00000000000 ; double 82.75
.quad 0x405500a3d70a3d71 ; double 84.010000000000005
.quad 0x4054fccccccccccd ; double 83.950000000000002
.quad 0x4054f8f5c28f5c29 ; double 83.89
.quad 0x4054ef5c28f5c28f ; double 83.739999999999994
.quad 0x4054f8f5c28f5c29 ; double 83.89
.quad 0x40550ccccccccccd ; double 84.200000000000002
.quad 0x4054f70a3d70a3d7 ; double 83.859999999999999
.quad 0x4054f66666666666 ; double 83.849999999999994
.quad 0x4054fccccccccccd ; double 83.950000000000002
.quad 0x4054fe147ae147ae ; double 83.969999999999999
.quad 0x40550851eb851eb8 ; double 84.129999999999995
.quad 0x4055166666666666 ; double 84.349999999999994
.quad 0x40552147ae147ae1 ; double 84.519999999999996
.quad 0x40552851eb851eb8 ; double 84.629999999999995
.quad 0x40553ccccccccccd ; double 84.950000000000002
.quad 0x40553d70a3d70a3d ; double 84.959999999999993
.quad 0x4055470a3d70a3d7 ; double 85.109999999999999
.quad 0x4054fc28f5c28f5c ; double 83.939999999999998
.quad 0x4054b00000000000 ; double 82.75
.quad 0x4054aae147ae147b ; double 82.670000000000001
.quad 0x4054700000000000 ; double 81.75
.quad 0x40541f5c28f5c28f ; double 80.489999999999994
.quad 0x4053e147ae147ae1 ; double 79.519999999999996
.quad 0x4053e147ae147ae1 ; double 79.519999999999996
.quad 0x405361eb851eb852 ; double 77.530000000000001
.quad 0x405309999999999a ; double 76.150000000000006
.quad 0x4052be147ae147ae ; double 74.969999999999999
.quad 0x4052d51eb851eb85 ; double 75.329999999999998
.quad 0x4052df5c28f5c28f ; double 75.489999999999994
.quad 0x4052eb851eb851ec ; double 75.680000000000007
.quad 0x4052d33333333333 ; double 75.299999999999997
.quad 0x4052b147ae147ae1 ; double 74.769999999999996
.quad 0x4052d9999999999a ; double 75.400000000000006
.quad 0x40533e147ae147ae ; double 76.969999999999999
.quad 0x40535f5c28f5c28f ; double 77.489999999999994
.quad 0x4053400000000000 ; double 77
.quad 0x40533eb851eb851f ; double 76.980000000000003
.quad 0x40536eb851eb851f ; double 77.730000000000003
.quad 0x4053500000000000 ; double 77.25
.quad 0x4053200000000000 ; double 76.5
.quad 0x4052db851eb851ec ; double 75.430000000000007
.quad 0x4052fb851eb851ec ; double 75.930000000000007
.quad 0x4052c33333333333 ; double 75.049999999999997
.quad 0x4052de147ae147ae ; double 75.469999999999999
.quad 0x4052d9999999999a ; double 75.400000000000006
.quad 0x405305c28f5c28f6 ; double 76.090000000000003
.quad 0x4052f9999999999a ; double 75.900000000000006
.quad 0x4053466666666666 ; double 77.099999999999994
.quad 0x405358f5c28f5c29 ; double 77.39
.quad 0x4053600000000000 ; double 77.5
.quad 0x40535a3d70a3d70a ; double 77.409999999999997
.quad 0x40534f5c28f5c28f ; double 77.239999999999994
.quad 0x40535a3d70a3d70a ; double 77.409999999999997
.quad 0x405301eb851eb852 ; double 76.030000000000001
.quad 0x405320a3d70a3d71 ; double 76.510000000000005
.quad 0x40533ccccccccccd ; double 76.950000000000002
.quad 0x405351eb851eb852 ; double 77.280000000000001
.quad 0x405368f5c28f5c29 ; double 77.64
.quad 0x4053347ae147ae14 ; double 76.819999999999993
.quad 0x40529b851eb851ec ; double 74.430000000000007
.quad 0x40529f5c28f5c28f ; double 74.489999999999994
.quad 0x4052770a3d70a3d7 ; double 73.859999999999999
.quad 0x4052733333333333 ; double 73.799999999999997
.quad 0x40526c28f5c28f5c ; double 73.689999999999998
.quad 0x4052b0a3d70a3d71 ; double 74.760000000000005
.quad 0x4052dd70a3d70a3d ; double 75.459999999999993
.quad 0x4052fae147ae147b ; double 75.920000000000001
.quad 0x4053870a3d70a3d7 ; double 78.109999999999999
.quad 0x40534ccccccccccd ; double 77.200000000000002
.quad 0x40533d70a3d70a3d ; double 76.959999999999993
.quad 0x4053528f5c28f5c3 ; double 77.290000000000006
.quad 0x4053451eb851eb85 ; double 77.079999999999998
.quad 0x4053470a3d70a3d7 ; double 77.109999999999999
.quad 0x40534b851eb851ec ; double 77.180000000000007
.quad 0x40533eb851eb851f ; double 76.980000000000003
.quad 0x4052ee147ae147ae ; double 75.719999999999999
.quad 0x4052accccccccccd ; double 74.700000000000002
.quad 0x4052866666666666 ; double 74.099999999999994
.quad 0x4052f7ae147ae148 ; double 75.870000000000005
.quad 0x40534ccccccccccd ; double 77.200000000000002
.quad 0x4053700000000000 ; double 77.75
.quad 0x4053ea3d70a3d70a ; double 79.659999999999997
.quad 0x40555a3d70a3d70a ; double 85.409999999999997
.quad 0x40557e147ae147ae ; double 85.969999999999999
.quad 0x40558f5c28f5c28f ; double 86.239999999999994
.quad 0x4055e3d70a3d70a4 ; double 87.560000000000002
.quad 0x40561d70a3d70a3d ; double 88.459999999999993
.quad 0x40564ccccccccccd ; double 89.200000000000002
.quad 0x40565851eb851eb8 ; double 89.379999999999995
.quad 0x4056951eb851eb85 ; double 90.329999999999998
.quad 0x4056a7ae147ae148 ; double 90.620000000000005
.quad 0x4056f0a3d70a3d71 ; double 91.760000000000005
.quad 0x4056da3d70a3d70a ; double 91.409999999999996
.quad 0x4056b851eb851eb8 ; double 90.879999999999995
.quad 0x4056c47ae147ae14 ; double 91.069999999999993
.quad 0x4056e851eb851eb8 ; double 91.629999999999995
.quad 0x4056e33333333333 ; double 91.549999999999997
.quad 0x4056cae147ae147b ; double 91.170000000000001
.quad 0x405695c28f5c28f6 ; double 90.340000000000003
.quad 0x405691eb851eb852 ; double 90.280000000000001
.quad 0x405679999999999a ; double 89.900000000000006
.quad 0x4056ab851eb851ec ; double 90.680000000000007
.quad 0x4056d9999999999a ; double 91.400000000000005
.quad 0x40571147ae147ae1 ; double 92.269999999999996
.quad 0x4057028f5c28f5c3 ; double 92.040000000000006
.quad 0x40571a3d70a3d70a ; double 92.409999999999996
.quad 0x4057333333333333 ; double 92.799999999999997
.quad 0x4057400000000000 ; double 93
.quad 0x405723d70a3d70a4 ; double 92.560000000000002
.quad 0x405720a3d70a3d71 ; double 92.510000000000005
.quad 0x40574b851eb851ec ; double 93.180000000000007
.quad 0x40574d70a3d70a3d ; double 93.209999999999993
.quad 0x40576eb851eb851f ; double 93.730000000000003
.quad 0x40575b851eb851ec ; double 93.430000000000007
.quad 0x405730a3d70a3d71 ; double 92.760000000000005
.quad 0x4057333333333333 ; double 92.799999999999997
.quad 0x405745c28f5c28f6 ; double 93.090000000000003
.quad 0x40571c28f5c28f5c ; double 92.439999999999998
.quad 0x4057600000000000 ; double 93.5
.quad 0x4057900000000000 ; double 94.25
.quad 0x4057b0a3d70a3d71 ; double 94.760000000000005
.quad 0x4057be147ae147ae ; double 94.969999999999998
.quad 0x4057aae147ae147b ; double 94.670000000000001
.quad 0x40578147ae147ae1 ; double 94.019999999999996
.quad 0x40577e147ae147ae ; double 93.969999999999998
.quad 0x4057466666666666 ; double 93.099999999999994
.quad 0x4057b51eb851eb85 ; double 94.829999999999998
.quad 0x4057a8f5c28f5c29 ; double 94.64
.quad 0x4057b9999999999a ; double 94.900000000000005
.quad 0x4057af5c28f5c28f ; double 94.739999999999994
.quad 0x40577c28f5c28f5c ; double 93.939999999999998
.quad 0x4057966666666666 ; double 94.349999999999994
.quad 0x4057800000000000 ; double 94
.quad 0x40577ccccccccccd ; double 93.950000000000002
.quad 0x405765c28f5c28f6 ; double 93.590000000000003
.quad 0x40570e147ae147ae ; double 92.219999999999998
.quad 0x405737ae147ae148 ; double 92.870000000000005
.quad 0x405725c28f5c28f6 ; double 92.590000000000003
.quad 0x4057366666666666 ; double 92.849999999999994
.quad 0x4057533333333333 ; double 93.299999999999997
.quad 0x405775c28f5c28f6 ; double 93.840000000000003
.quad 0x4057c9999999999a ; double 95.150000000000005
.quad 0x4057d5c28f5c28f6 ; double 95.340000000000003
.quad 0x4057900000000000 ; double 94.25
.quad 0x40580ccccccccccd ; double 96.200000000000002
.quad 0x4057d1eb851eb852 ; double 95.280000000000001
.quad 0x4057f28f5c28f5c3 ; double 95.790000000000006
.quad 0x405805c28f5c28f6 ; double 96.090000000000003
.quad 0x4058333333333333 ; double 96.799999999999997
.quad 0x40583eb851eb851f ; double 96.980000000000003
.quad 0x4058751eb851eb85 ; double 97.829999999999998
.quad 0x40589ae147ae147b ; double 98.420000000000001
.quad 0x4058c66666666666 ; double 99.099999999999994
.quad 0x4058ba3d70a3d70a ; double 98.909999999999996
.quad 0x4058c00000000000 ; double 99
.quad 0x40589e147ae147ae ; double 98.469999999999998
.quad 0x4058a33333333333 ; double 98.549999999999997
.quad 0x40587e147ae147ae ; double 97.969999999999998
.quad 0x4058800000000000 ; double 98
.quad 0x40587eb851eb851f ; double 97.980000000000003
.quad 0x405849999999999a ; double 97.150000000000005
.quad 0x4058647ae147ae14 ; double 97.569999999999993
.quad 0x4058800000000000 ; double 98
.quad 0x405889999999999a ; double 98.150000000000005
.quad 0x40586ccccccccccd ; double 97.700000000000002
.quad 0x40586ccccccccccd ; double 97.700000000000002
.quad 0x40584ccccccccccd ; double 97.200000000000002
.quad 0x40587f5c28f5c28f ; double 97.989999999999994
.quad 0x4058666666666666 ; double 97.599999999999994
.quad 0x4058566666666666 ; double 97.349999999999994
.quad 0x4058900000000000 ; double 98.25
.quad 0x405879999999999a ; double 97.900000000000005
.quad 0x40586851eb851eb8 ; double 97.629999999999995
.quad 0x405831eb851eb852 ; double 96.780000000000001
.quad 0x4058047ae147ae14 ; double 96.069999999999993
.quad 0x4057e9999999999a ; double 95.650000000000005
.quad 0x40581851eb851eb8 ; double 96.379999999999995
.quad 0x4057d851eb851eb8 ; double 95.379999999999995
.quad 0x4057f28f5c28f5c3 ; double 95.790000000000006
.quad 0x4057d7ae147ae148 ; double 95.370000000000005
.quad 0x4057d9999999999a ; double 95.400000000000005
.quad 0x4057ca3d70a3d70a ; double 95.159999999999996
.quad 0x4057eccccccccccd ; double 95.700000000000002
.quad 0x40582851eb851eb8 ; double 96.629999999999995
.quad 0x4057e00000000000 ; double 95.5
.quad 0x4058000000000000 ; double 96
.quad 0x4057e00000000000 ; double 95.5
.quad 0x4057ce147ae147ae ; double 95.219999999999998
.quad 0x4057933333333333 ; double 94.299999999999997
.quad 0x40577ccccccccccd ; double 93.950000000000002
.quad 0x40576ccccccccccd ; double 93.700000000000002
.quad 0x40576147ae147ae1 ; double 93.519999999999996
.quad 0x40572ccccccccccd ; double 92.700000000000002
.quad 0x4056f9999999999a ; double 91.900000000000005
.quad 0x4056ce147ae147ae ; double 91.219999999999999
.quad 0x4056a66666666666 ; double 90.599999999999994
.quad 0x405679999999999a ; double 89.900000000000006
.quad 0x40568f5c28f5c28f ; double 90.239999999999994
.quad 0x40569147ae147ae1 ; double 90.269999999999996
.quad 0x4056647ae147ae14 ; double 89.569999999999993
.quad 0x405639999999999a ; double 88.900000000000006
.quad 0x40561ccccccccccd ; double 88.450000000000002
.quad 0x405630a3d70a3d71 ; double 88.760000000000005
.quad 0x40564c28f5c28f5c ; double 89.189999999999998
.quad 0x40566eb851eb851f ; double 89.730000000000003
.quad 0x405589999999999a ; double 86.150000000000006
.quad 0x4055500000000000 ; double 85.25
.quad 0x40553eb851eb851f ; double 84.980000000000003
.quad 0x40559eb851eb851f ; double 86.480000000000003
.quad 0x40558ccccccccccd ; double 86.200000000000002
.quad 0x4055cccccccccccd ; double 87.200000000000002
.quad 0x4055fa3d70a3d70a ; double 87.909999999999997
.quad 0x4056066666666666 ; double 88.099999999999994
.quad 0x4056066666666666 ; double 88.099999999999994
.quad 0x405601eb851eb852 ; double 88.030000000000001
.quad 0x4056066666666666 ; double 88.099999999999994
.quad 0x4055beb851eb851f ; double 86.980000000000003
.quad 0x40557eb851eb851f ; double 85.980000000000003
.quad 0x40553eb851eb851f ; double 84.980000000000003
.quad 0x405529999999999a ; double 84.650000000000006
.quad 0x40551c28f5c28f5c ; double 84.439999999999998
.quad 0x40552f5c28f5c28f ; double 84.739999999999994
.quad 0x40551147ae147ae1 ; double 84.269999999999996
.quad 0x40555c28f5c28f5c ; double 85.439999999999998
.quad 0x4055870a3d70a3d7 ; double 86.109999999999999
.quad 0x40559b851eb851ec ; double 86.430000000000007
.quad 0x4055a00000000000 ; double 86.5
.quad 0x4055bd70a3d70a3d ; double 86.959999999999993
.quad 0x4055a00000000000 ; double 86.5
.quad 0x4055b851eb851eb8 ; double 86.879999999999995
.quad 0x4055d1eb851eb852 ; double 87.280000000000001
.quad 0x4055c00000000000 ; double 87
.quad 0x4055b28f5c28f5c3 ; double 86.790000000000006
.quad 0x4055a0a3d70a3d71 ; double 86.510000000000005
.quad 0x40555c28f5c28f5c ; double 85.439999999999998
.quad 0x40552c28f5c28f5c ; double 84.689999999999998
.quad 0x405531eb851eb852 ; double 84.780000000000001
.quad 0x405545c28f5c28f6 ; double 85.090000000000003
.quad 0x40552c28f5c28f5c ; double 84.689999999999998
.quad 0x40553f5c28f5c28f ; double 84.989999999999994
.quad 0x40553ccccccccccd ; double 84.950000000000002
.quad 0x4055428f5c28f5c3 ; double 85.040000000000006
.quad 0x40555147ae147ae1 ; double 85.269999999999996
.quad 0x405549999999999a ; double 85.150000000000006
.quad 0x40555ccccccccccd ; double 85.450000000000002
.quad 0x4055500000000000 ; double 85.25
.quad 0x4055566666666666 ; double 85.349999999999994
.quad 0x40554851eb851eb8 ; double 85.129999999999995
.quad 0x4055228f5c28f5c3 ; double 84.540000000000006
.quad 0x405523d70a3d70a4 ; double 84.560000000000002
.quad 0x4054fc28f5c28f5c ; double 83.939999999999998
.quad 0x4054c33333333333 ; double 83.049999999999997
.quad 0x4054feb851eb851f ; double 83.980000000000003
.quad 0x40553f5c28f5c28f ; double 84.989999999999994
.quad 0x4054fd70a3d70a3d ; double 83.959999999999993
.quad 0x405530a3d70a3d71 ; double 84.760000000000005
.quad 0x40559ae147ae147b ; double 86.420000000000001
.quad 0x4055a9999999999a ; double 86.650000000000006
.quad 0x4055b33333333333 ; double 86.799999999999997
.quad 0x4055d8f5c28f5c29 ; double 87.39
.quad 0x4055d9999999999a ; double 87.400000000000006
.quad 0x4055ce147ae147ae ; double 87.219999999999999
.quad 0x405591eb851eb852 ; double 86.280000000000001
.quad 0x4055933333333333 ; double 86.299999999999997
.quad 0x4055651eb851eb85 ; double 85.579999999999998
.quad 0x405583d70a3d70a4 ; double 86.060000000000002
.quad 0x405599999999999a ; double 86.400000000000006
.quad 0x4055c70a3d70a3d7 ; double 87.109999999999999
.quad 0x40559f5c28f5c28f ; double 86.489999999999994
.quad 0x4055766666666666 ; double 85.849999999999994
.quad 0x40559eb851eb851f ; double 86.480000000000003
.quad 0x40552851eb851eb8 ; double 84.629999999999995
.quad 0x40554f5c28f5c28f ; double 85.239999999999994
.quad 0x405585c28f5c28f6 ; double 86.090000000000003
.quad 0x4055500000000000 ; double 85.25
.quad 0x4055200000000000 ; double 84.5
.quad 0x405540a3d70a3d71 ; double 85.010000000000005
.quad 0x40557c28f5c28f5c ; double 85.939999999999998
.quad 0x4055aeb851eb851f ; double 86.730000000000003
.quad 0x4055e33333333333 ; double 87.549999999999997
.quad 0x40561c28f5c28f5c ; double 88.439999999999998
.quad 0x4056200000000000 ; double 88.5
.quad 0x40561f5c28f5c28f ; double 88.489999999999994
.quad 0x405679999999999a ; double 89.900000000000006
.quad 0x40568eb851eb851f ; double 90.230000000000003
.quad 0x4056bae147ae147b ; double 90.920000000000001
.quad 0x4056b5c28f5c28f6 ; double 90.840000000000003
.quad 0x40568f5c28f5c28f ; double 90.239999999999994
.quad 0x40569b851eb851ec ; double 90.430000000000007
.quad 0x4056b33333333333 ; double 90.799999999999997
.quad 0x4056a3d70a3d70a4 ; double 90.560000000000002
.quad 0x4056bb851eb851ec ; double 90.930000000000007
.quad 0x4056cd70a3d70a3d ; double 91.209999999999993
.quad 0x4056a51eb851eb85 ; double 90.579999999999998
.quad 0x4056b00000000000 ; double 90.75
.quad 0x4056a33333333333 ; double 90.549999999999997
.quad 0x4056a00000000000 ; double 90.5
.quad 0x40563f5c28f5c28f ; double 88.989999999999994
.quad 0x40561f5c28f5c28f ; double 88.489999999999994
.quad 0x4056066666666666 ; double 88.099999999999994
.quad 0x405628f5c28f5c29 ; double 88.64
.quad 0x40561eb851eb851f ; double 88.480000000000003
.quad 0x4056366666666666 ; double 88.849999999999994
.quad 0x4056451eb851eb85 ; double 89.079999999999998
.quad 0x4056366666666666 ; double 88.849999999999994
.quad 0x40563ae147ae147b ; double 88.920000000000001
.quad 0x4055f5c28f5c28f6 ; double 87.840000000000003
.quad 0x40560b851eb851ec ; double 88.180000000000007
.quad 0x4055fccccccccccd ; double 87.950000000000002
.quad 0x40563851eb851eb8 ; double 88.879999999999995
.quad 0x40559f5c28f5c28f ; double 86.489999999999994
.quad 0x405581eb851eb852 ; double 86.030000000000001
.quad 0x4055d66666666666 ; double 87.349999999999994
.quad 0x4055da3d70a3d70a ; double 87.409999999999997
.quad 0x4055d00000000000 ; double 87.25
.quad 0x405609999999999a ; double 88.150000000000006
.quad 0x4056066666666666 ; double 88.099999999999994
.quad 0x405659999999999a ; double 89.400000000000006
.quad 0x4056466666666666 ; double 89.099999999999994
.quad 0x4056700000000000 ; double 89.75
.quad 0x40566f5c28f5c28f ; double 89.739999999999994
.quad 0x40562147ae147ae1 ; double 88.519999999999996
.quad 0x40564ccccccccccd ; double 89.200000000000002
.quad 0x405690a3d70a3d71 ; double 90.260000000000005
.quad 0x4056beb851eb851f ; double 90.980000000000003
.quad 0x4056ff5c28f5c28f ; double 91.989999999999994
.quad 0x4056e3d70a3d70a4 ; double 91.560000000000002
.quad 0x4056e70a3d70a3d7 ; double 91.609999999999999
.quad 0x4056e0a3d70a3d71 ; double 91.510000000000005
.quad 0x4056dae147ae147b ; double 91.420000000000001
.quad 0x40571eb851eb851f ; double 92.480000000000003
.quad 0x4057133333333333 ; double 92.299999999999997
.quad 0x4057166666666666 ; double 92.349999999999994
.quad 0x405785c28f5c28f6 ; double 94.090000000000003
.quad 0x4057770a3d70a3d7 ; double 93.859999999999999
.quad 0x4057828f5c28f5c3 ; double 94.040000000000006
.quad 0x4057970a3d70a3d7 ; double 94.359999999999999
.quad 0x405798f5c28f5c29 ; double 94.39
.quad 0x4057651eb851eb85 ; double 93.579999999999998
.quad 0x4057728f5c28f5c3 ; double 93.790000000000006
.quad 0x405797ae147ae148 ; double 94.370000000000005
.quad 0x4057a33333333333 ; double 94.549999999999997
.quad 0x40572b851eb851ec ; double 92.680000000000007
.quad 0x40570f5c28f5c28f ; double 92.239999999999994
.quad 0x40572ae147ae147b ; double 92.670000000000001
.quad 0x4057670a3d70a3d7 ; double 93.609999999999999
.quad 0x4057500000000000 ; double 93.25
.quad 0x40572851eb851eb8 ; double 92.629999999999995
.quad 0x40571f5c28f5c28f ; double 92.489999999999994
.quad 0x40570a3d70a3d70a ; double 92.159999999999996
.quad 0x4056deb851eb851f ; double 91.480000000000003
.quad 0x40573e147ae147ae ; double 92.969999999999998
.quad 0x40574b851eb851ec ; double 93.180000000000007
.quad 0x4057728f5c28f5c3 ; double 93.790000000000006
.quad 0x40572ccccccccccd ; double 92.700000000000002
.quad 0x40572c28f5c28f5c ; double 92.689999999999998
.quad 0x40575851eb851eb8 ; double 93.379999999999995
.quad 0x40573eb851eb851f ; double 92.980000000000003
.quad 0x4057af5c28f5c28f ; double 94.739999999999994
.quad 0x4057d1eb851eb852 ; double 95.280000000000001
.quad 0x40583851eb851eb8 ; double 96.879999999999995
.quad 0x40583eb851eb851f ; double 96.980000000000003
.quad 0x40583ae147ae147b ; double 96.920000000000001
.quad 0x405838f5c28f5c29 ; double 96.89
.quad 0x4058666666666666 ; double 97.599999999999994
.quad 0x4058500000000000 ; double 97.25
.quad 0x40585851eb851eb8 ; double 97.379999999999995
.quad 0x405850a3d70a3d71 ; double 97.260000000000005
.quad 0x405845c28f5c28f6 ; double 97.090000000000003
.quad 0x40585d70a3d70a3d ; double 97.459999999999993
.quad 0x405860a3d70a3d71 ; double 97.510000000000005
.quad 0x4058a66666666666 ; double 98.599999999999994
.quad 0x4058ceb851eb851f ; double 99.230000000000003
.quad 0x4058f147ae147ae1 ; double 99.769999999999996
.quad 0x4059000000000000 ; double 100
.quad 0x405905c28f5c28f6 ; double 100.09
.quad 0x4059133333333333 ; double 100.3
.quad 0x405913d70a3d70a4 ; double 100.31
.quad 0x4058fe147ae147ae ; double 99.969999999999998
.quad 0x4058dc28f5c28f5c ; double 99.439999999999998
.quad 0x4058cf5c28f5c28f ; double 99.239999999999994
.quad 0x405905c28f5c28f6 ; double 100.09
.quad 0x40591b851eb851ec ; double 100.43000000000001
.quad 0x4059000000000000 ; double 100
.quad 0x4058fc28f5c28f5c ; double 99.939999999999998
.quad 0x4058d51eb851eb85 ; double 99.329999999999998
.quad 0x4058a66666666666 ; double 98.599999999999994
.quad 0x4058dae147ae147b ; double 99.420000000000001
.quad 0x4058eae147ae147b ; double 99.670000000000001
.quad 0x4058f66666666666 ; double 99.849999999999994
.quad 0x40588d70a3d70a3d ; double 98.209999999999993
.quad 0x40588a3d70a3d70a ; double 98.159999999999996
.quad 0x4058828f5c28f5c3 ; double 98.040000000000006
.quad 0x40585c28f5c28f5c ; double 97.439999999999998
.quad 0x4057d66666666666 ; double 95.349999999999994
.quad 0x4057e9999999999a ; double 95.650000000000005
.quad 0x40569d70a3d70a3d ; double 90.459999999999993
.quad 0x4056e0a3d70a3d71 ; double 91.510000000000005
.quad 0x405708f5c28f5c29 ; double 92.14
.quad 0x4057166666666666 ; double 92.349999999999994
.quad 0x40574d70a3d70a3d ; double 93.209999999999993
.quad 0x40575851eb851eb8 ; double 93.379999999999995
.quad 0x40574c28f5c28f5c ; double 93.189999999999998
.quad 0x405745c28f5c28f6 ; double 93.090000000000003
.quad 0x4057433333333333 ; double 93.049999999999997
.quad 0x4057366666666666 ; double 92.849999999999994
.quad 0x4057600000000000 ; double 93.5
.quad 0x40576eb851eb851f ; double 93.730000000000003
.quad 0x4057400000000000 ; double 93
.quad 0x4057333333333333 ; double 92.799999999999997
.quad 0x40575c28f5c28f5c ; double 93.439999999999998
.quad 0x4057600000000000 ; double 93.5
.quad 0x4057500000000000 ; double 93.25
.quad 0x40575851eb851eb8 ; double 93.379999999999995
.quad 0x405777ae147ae148 ; double 93.870000000000005
.quad 0x405787ae147ae148 ; double 94.120000000000005
.quad 0x40577e147ae147ae ; double 93.969999999999998
.quad 0x40574a3d70a3d70a ; double 93.159999999999996
.quad 0x40573e147ae147ae ; double 92.969999999999998
.quad 0x40572f5c28f5c28f ; double 92.739999999999994
.quad 0x40570a3d70a3d70a ; double 92.159999999999996
.quad 0x4056de147ae147ae ; double 91.469999999999998
.quad 0x4056d33333333333 ; double 91.299999999999997
.quad 0x4056dc28f5c28f5c ; double 91.439999999999998
.quad 0x4056dc28f5c28f5c ; double 91.439999999999998
.quad 0x4056deb851eb851f ; double 91.480000000000003
.quad 0x4056d70a3d70a3d7 ; double 91.359999999999999
.quad 0x4056ab851eb851ec ; double 90.680000000000007
.quad 0x405685c28f5c28f6 ; double 90.090000000000003
.quad 0x4056766666666666 ; double 89.849999999999994
.quad 0x40566f5c28f5c28f ; double 89.739999999999994
.quad 0x4056366666666666 ; double 88.849999999999994
.quad 0x4056728f5c28f5c3 ; double 89.790000000000006
.quad 0x4056751eb851eb85 ; double 89.829999999999998
.quad 0x40568b851eb851ec ; double 90.180000000000007
.quad 0x4056833333333333 ; double 90.049999999999997
.quad 0x4056dae147ae147b ; double 91.420000000000001
.quad 0x4056deb851eb851f ; double 91.480000000000003
.quad 0x4056ca3d70a3d70a ; double 91.159999999999997
.quad 0x405681eb851eb852 ; double 90.030000000000001
.quad 0x40569e147ae147ae ; double 90.469999999999999
.quad 0x4056666666666666 ; double 89.599999999999994
.quad 0x405665c28f5c28f6 ; double 89.590000000000003
.quad 0x40563851eb851eb8 ; double 88.879999999999995
.quad 0x4056733333333333 ; double 89.799999999999997
.quad 0x4056b5c28f5c28f6 ; double 90.840000000000003
.quad 0x4056ad70a3d70a3d ; double 90.709999999999993
.quad 0x40568ae147ae147b ; double 90.170000000000001
.quad 0x40567c28f5c28f5c ; double 89.939999999999998
.quad 0x405679999999999a ; double 89.900000000000006
.quad 0x4056466666666666 ; double 89.099999999999994
.quad 0x405628f5c28f5c29 ; double 88.64
.quad 0x405619999999999a ; double 88.400000000000006
.quad 0x405640a3d70a3d71 ; double 89.010000000000005
.quad 0x405655c28f5c28f6 ; double 89.340000000000003
.quad 0x405659999999999a ; double 89.400000000000006
.quad 0x40566b851eb851ec ; double 89.680000000000007
.quad 0x40566ccccccccccd ; double 89.700000000000002
.quad 0x4057a28f5c28f5c3 ; double 94.540000000000006
.quad 0x40573ccccccccccd ; double 92.950000000000002
.quad 0x4057628f5c28f5c3 ; double 93.540000000000006
.quad 0x40574b851eb851ec ; double 93.180000000000007
.quad 0x4057666666666666 ; double 93.599999999999994
.quad 0x405737ae147ae148 ; double 92.870000000000005
.quad 0x4056eccccccccccd ; double 91.700000000000002
.quad 0x4056f0a3d70a3d71 ; double 91.760000000000005
.quad 0x4056fccccccccccd ; double 91.950000000000002
.quad 0x40569ccccccccccd ; double 90.450000000000002
.quad 0x4056a66666666666 ; double 90.599999999999994
.quad 0x4056500000000000 ; double 89.25
.quad 0x40568b851eb851ec ; double 90.180000000000007
.quad 0x405688f5c28f5c29 ; double 90.14
.quad 0x4056be147ae147ae ; double 90.969999999999999
.quad 0x4056f47ae147ae14 ; double 91.819999999999993
.quad 0x405705c28f5c28f6 ; double 92.090000000000003
.quad 0x40570f5c28f5c28f ; double 92.239999999999994
.quad 0x40575e147ae147ae ; double 93.469999999999998
.quad 0x40571b851eb851ec ; double 92.430000000000007
.quad 0x4056fc28f5c28f5c ; double 91.939999999999998
.quad 0x40569a3d70a3d70a ; double 90.409999999999997
.quad 0x40567d70a3d70a3d ; double 89.959999999999993
.quad 0x405640a3d70a3d71 ; double 89.010000000000005
.quad 0x40561ae147ae147b ; double 88.420000000000001
.quad 0x4056400000000000 ; double 89
.quad 0x40567e147ae147ae ; double 89.969999999999999
.quad 0x40566a3d70a3d70a ; double 89.659999999999997
.quad 0x405610a3d70a3d71 ; double 88.260000000000005
.quad 0x4056128f5c28f5c3 ; double 88.290000000000006
.quad 0x4055d33333333333 ; double 87.299999999999997
.quad 0x40557d70a3d70a3d ; double 85.959999999999993
.quad 0x4054870a3d70a3d7 ; double 82.109999999999999
.quad 0x405495c28f5c28f6 ; double 82.340000000000003
.quad 0x4054a1eb851eb852 ; double 82.530000000000001
.quad 0x4054a28f5c28f5c3 ; double 82.540000000000006
.quad 0x4054c1eb851eb852 ; double 83.030000000000001
.quad 0x40552e147ae147ae ; double 84.719999999999999
.quad 0x4054f66666666666 ; double 83.849999999999994
.quad 0x4054d3d70a3d70a4 ; double 83.310000000000002
.quad 0x4054f3d70a3d70a4 ; double 83.810000000000002
.quad 0x4054eb851eb851ec ; double 83.680000000000007
.quad 0x40547eb851eb851f ; double 81.980000000000003
.quad 0x4054700000000000 ; double 81.75
.quad 0x40548c28f5c28f5c ; double 82.189999999999998
.quad 0x4054628f5c28f5c3 ; double 81.540000000000006
.quad 0x4054600000000000 ; double 81.5
.quad 0x40545147ae147ae1 ; double 81.269999999999996
.quad 0x4054347ae147ae14 ; double 80.819999999999993
.quad 0x40542b851eb851ec ; double 80.680000000000007
.quad 0x4054551eb851eb85 ; double 81.329999999999998
.quad 0x4054570a3d70a3d7 ; double 81.359999999999999
.quad 0x40545147ae147ae1 ; double 81.269999999999996
.quad 0x4054a28f5c28f5c3 ; double 82.540000000000006
.quad 0x40549ccccccccccd ; double 82.450000000000002
.quad 0x4054b851eb851eb8 ; double 82.879999999999995
.quad 0x4054eccccccccccd ; double 83.700000000000002
.quad 0x4054ef5c28f5c28f ; double 83.739999999999994
.quad 0x4054bf5c28f5c28f ; double 82.989999999999994
.quad 0x4054b147ae147ae1 ; double 82.769999999999996
.quad 0x4054d0a3d70a3d71 ; double 83.260000000000005
.quad 0x4054db851eb851ec ; double 83.430000000000007
.quad 0x40550147ae147ae1 ; double 84.019999999999996
.quad 0x4054fc28f5c28f5c ; double 83.939999999999998
.quad 0x4055c1eb851eb852 ; double 87.030000000000001
.quad 0x4055c00000000000 ; double 87
.quad 0x4055a51eb851eb85 ; double 86.579999999999998
.quad 0x405551eb851eb852 ; double 85.280000000000001
.quad 0x40554e147ae147ae ; double 85.219999999999999
.quad 0x4055966666666666 ; double 86.349999999999994
.quad 0x4055b00000000000 ; double 86.75
.quad 0x40559ccccccccccd ; double 86.450000000000002
.quad 0x40552c28f5c28f5c ; double 84.689999999999998
.quad 0x405538f5c28f5c29 ; double 84.89
.quad 0x4054ea3d70a3d70a ; double 83.659999999999997
.quad 0x40550ccccccccccd ; double 84.200000000000002
.quad 0x405529999999999a ; double 84.650000000000006
.quad 0x40551851eb851eb8 ; double 84.379999999999995
.quad 0x40550a3d70a3d70a ; double 84.159999999999997
.quad 0x40550b851eb851ec ; double 84.180000000000007
.quad 0x4055328f5c28f5c3 ; double 84.790000000000006
.quad 0x40554f5c28f5c28f ; double 85.239999999999994
.quad 0x4055528f5c28f5c3 ; double 85.290000000000006
.quad 0x40553f5c28f5c28f ; double 84.989999999999994
.quad 0x40553ccccccccccd ; double 84.950000000000002
.quad 0x4055233333333333 ; double 84.549999999999997
.quad 0x4055128f5c28f5c3 ; double 84.290000000000006
.quad 0x40553eb851eb851f ; double 84.980000000000003
.quad 0x405509999999999a ; double 84.150000000000006
.quad 0x4054bae147ae147b ; double 82.920000000000001
.quad 0x4054800000000000 ; double 82
.quad 0x4054ceb851eb851f ; double 83.230000000000003
.quad 0x4055100000000000 ; double 84.25
.quad 0x40553d70a3d70a3d ; double 84.959999999999993
.quad 0x40554f5c28f5c28f ; double 85.239999999999994
.quad 0x4056451eb851eb85 ; double 89.079999999999998
.quad 0x40562b851eb851ec ; double 88.680000000000007
.quad 0x405625c28f5c28f6 ; double 88.590000000000003
.quad 0x40562ccccccccccd ; double 88.700000000000002
.quad 0x4055f9999999999a ; double 87.900000000000006
.quad 0x40557ccccccccccd ; double 85.950000000000002
.quad 0x4055b33333333333 ; double 86.799999999999997
.quad 0x40558b851eb851ec ; double 86.180000000000007
.quad 0x4055cf5c28f5c28f ; double 87.239999999999994
.quad 0x40561e147ae147ae ; double 88.469999999999999
.quad 0x405678f5c28f5c29 ; double 89.89
.quad 0x40567d70a3d70a3d ; double 89.959999999999993
.quad 0x405699999999999a ; double 90.400000000000006
.quad 0x4056866666666666 ; double 90.099999999999994
.quad 0x405650a3d70a3d71 ; double 89.260000000000005
.quad 0x4055eccccccccccd ; double 87.700000000000002
.quad 0x4055b1eb851eb852 ; double 86.780000000000001
.quad 0x4055f3d70a3d70a4 ; double 87.810000000000002
.quad 0x405615c28f5c28f6 ; double 88.340000000000003
.quad 0x4056000000000000 ; double 88
.quad 0x4055e47ae147ae14 ; double 87.569999999999993
.quad 0x40558eb851eb851f ; double 86.230000000000003
.quad 0x40556ae147ae147b ; double 85.670000000000001
.quad 0x40559f5c28f5c28f ; double 86.489999999999994
.quad 0x4055600000000000 ; double 85.5
.quad 0x405551eb851eb852 ; double 85.280000000000001
.quad 0x40556ccccccccccd ; double 85.700000000000002
.quad 0x40557e147ae147ae ; double 85.969999999999999
.quad 0x405583d70a3d70a4 ; double 86.060000000000002
.quad 0x405520a3d70a3d71 ; double 84.510000000000005
.quad 0x405519999999999a ; double 84.400000000000006
.quad 0x405519999999999a ; double 84.400000000000006
.quad 0x4054b66666666666 ; double 82.849999999999994
.quad 0x4054047ae147ae14 ; double 80.069999999999993
.quad 0x4054528f5c28f5c3 ; double 81.290000000000006
.quad 0x4053d00000000000 ; double 79.25
.quad 0x40542f5c28f5c28f ; double 80.739999999999994
.quad 0x40542ccccccccccd ; double 80.700000000000002
.quad 0x4054b9999999999a ; double 82.900000000000006
.quad 0x4054a33333333333 ; double 82.549999999999997
.quad 0x4054deb851eb851f ; double 83.480000000000003
.quad 0x40547e147ae147ae ; double 81.969999999999999
.quad 0x4053e3d70a3d70a4 ; double 79.560000000000002
.quad 0x4053e70a3d70a3d7 ; double 79.609999999999999
.quad 0x40547f5c28f5c28f ; double 81.989999999999994
.quad 0x4054951eb851eb85 ; double 82.329999999999998
.quad 0x4054d00000000000 ; double 83.25
.quad 0x4055000000000000 ; double 84
.quad 0x4054d66666666666 ; double 83.349999999999994
.quad 0x405539999999999a ; double 84.900000000000006
.quad 0x40549f5c28f5c28f ; double 82.489999999999994
.quad 0x4054a00000000000 ; double 82.5
.quad 0x4054e00000000000 ; double 83.5
.quad 0x4054aae147ae147b ; double 82.670000000000001
.quad 0x4053deb851eb851f ; double 79.480000000000003
.quad 0x4053ab851eb851ec ; double 78.680000000000007
.quad 0x4052e851eb851eb8 ; double 75.629999999999995
.quad 0x4053151eb851eb85 ; double 76.329999999999998
.quad 0x40535ccccccccccd ; double 77.450000000000002
.quad 0x40537f5c28f5c28f ; double 77.989999999999994
.quad 0x405371eb851eb852 ; double 77.780000000000001
.quad 0x40536eb851eb851f ; double 77.730000000000003
.quad 0x4053700000000000 ; double 77.75
.quad 0x4053c00000000000 ; double 79
.quad 0x40539e147ae147ae ; double 78.469999999999999
.quad 0x4053a5c28f5c28f6 ; double 78.590000000000003
.quad 0x4053c33333333333 ; double 79.049999999999997
.quad 0x4053d7ae147ae148 ; double 79.370000000000005
.quad 0x4053ff5c28f5c28f ; double 79.989999999999994
.quad 0x4054000000000000 ; double 80
.quad 0x4054033333333333 ; double 80.049999999999997
.quad 0x4053eccccccccccd ; double 79.700000000000002
.quad 0x4053e00000000000 ; double 79.5
.quad 0x40535ccccccccccd ; double 77.450000000000002
.quad 0x4053166666666666 ; double 76.349999999999994
.quad 0x405385c28f5c28f6 ; double 78.090000000000003
.quad 0x4053be147ae147ae ; double 78.969999999999999
.quad 0x40538147ae147ae1 ; double 78.019999999999996
.quad 0x405389999999999a ; double 78.150000000000006
.quad 0x40538ccccccccccd ; double 78.200000000000002
.quad 0x4053b9999999999a ; double 78.900000000000006
.quad 0x40538b851eb851ec ; double 78.180000000000007
.quad 0x4053b7ae147ae148 ; double 78.870000000000005
.quad 0x405395c28f5c28f6 ; double 78.340000000000003
.quad 0x4054533333333333 ; double 81.299999999999997
.quad 0x40542ccccccccccd ; double 80.700000000000002
.quad 0x40541e147ae147ae ; double 80.469999999999999
.quad 0x4053e00000000000 ; double 79.5
.quad 0x4054600000000000 ; double 81.5
.quad 0x4054570a3d70a3d7 ; double 81.359999999999999
.quad 0x40543147ae147ae1 ; double 80.769999999999996
.quad 0x4054766666666666 ; double 81.849999999999994
.quad 0x4054ef5c28f5c28f ; double 83.739999999999994
.quad 0x4055ed70a3d70a3d ; double 87.709999999999993
.quad 0x405629999999999a ; double 88.650000000000006
.quad 0x405625c28f5c28f6 ; double 88.590000000000003
.quad 0x40563ccccccccccd ; double 88.950000000000002
.quad 0x4056028f5c28f5c3 ; double 88.040000000000006
.quad 0x4055c147ae147ae1 ; double 87.019999999999996
.quad 0x40556c28f5c28f5c ; double 85.689999999999998
.quad 0x40558b851eb851ec ; double 86.180000000000007
.quad 0x4055333333333333 ; double 84.799999999999997
.quad 0x405469999999999a ; double 81.650000000000006
.quad 0x4054247ae147ae14 ; double 80.569999999999993
.quad 0x40539d70a3d70a3d ; double 78.459999999999993
.quad 0x40535b851eb851ec ; double 77.430000000000007
.quad 0x4053cb851eb851ec ; double 79.180000000000007
.quad 0x40542ccccccccccd ; double 80.700000000000002
.quad 0x4054400000000000 ; double 81
.quad 0x4054600000000000 ; double 81.5
.quad 0x4054000000000000 ; double 80
.quad 0x405415c28f5c28f6 ; double 80.340000000000003
.quad 0x4053ec28f5c28f5c ; double 79.689999999999998
.quad 0x40547147ae147ae1 ; double 81.769999999999996
.quad 0x4054728f5c28f5c3 ; double 81.790000000000006
.quad 0x4054000000000000 ; double 80
.quad 0x40546c28f5c28f5c ; double 81.689999999999998
.quad 0x40549eb851eb851f ; double 82.480000000000003
.quad 0x4054400000000000 ; double 81
.quad 0x4054551eb851eb85 ; double 81.329999999999998
.quad 0x4054b00000000000 ; double 82.75
.quad 0x40553eb851eb851f ; double 84.980000000000003
.quad 0x4055370a3d70a3d7 ; double 84.859999999999999
.quad 0x4055c1eb851eb852 ; double 87.030000000000001
.quad 0x40565d70a3d70a3d ; double 89.459999999999993
.quad 0x4056033333333333 ; double 88.049999999999997
.quad 0x4056070a3d70a3d7 ; double 88.109999999999999
.quad 0x405589999999999a ; double 86.150000000000006
.quad 0x4055900000000000 ; double 86.25
.quad 0x40554ae147ae147b ; double 85.170000000000001
.quad 0x40553ae147ae147b ; double 84.920000000000001
.quad 0x40546d70a3d70a3d ; double 81.709999999999993
.quad 0x4053dccccccccccd ; double 79.450000000000002
.quad 0x40543f5c28f5c28f ; double 80.989999999999994
.quad 0x405419999999999a ; double 80.400000000000006
.quad 0x40543f5c28f5c28f ; double 80.989999999999994
.quad 0x4054151eb851eb85 ; double 80.329999999999998
.quad 0x4053f9999999999a ; double 79.900000000000006
.quad 0x4053970a3d70a3d7 ; double 78.359999999999999
.quad 0x4053d28f5c28f5c3 ; double 79.290000000000006
.quad 0x4054000000000000 ; double 80
.quad 0x40547851eb851eb8 ; double 81.879999999999995
.quad 0x405480a3d70a3d71 ; double 82.010000000000005
.quad 0x4054f3d70a3d70a4 ; double 83.810000000000002
.quad 0x4054200000000000 ; double 80.5
.quad 0x4053f28f5c28f5c3 ; double 79.790000000000006
.quad 0x4053d9999999999a ; double 79.400000000000006
.quad 0x4053433333333333 ; double 77.049999999999997
.quad 0x4053600000000000 ; double 77.5
.quad 0x4052a7ae147ae148 ; double 74.620000000000005
.quad 0x4052e33333333333 ; double 75.549999999999997
.quad 0x4052be147ae147ae ; double 74.969999999999999
.quad 0x4052eb851eb851ec ; double 75.680000000000007
.quad 0x4052f9999999999a ; double 75.900000000000006
.quad 0x4052900000000000 ; double 74.25
.quad 0x4052400000000000 ; double 73
.quad 0x4050c00000000000 ; double 67
.quad 0x40511eb851eb851f ; double 68.480000000000004
.quad 0x404fe66666666666 ; double 63.799999999999997
.quad 0x404ff5c28f5c28f6 ; double 63.920000000000002
.quad 0x404d3d70a3d70a3d ; double 58.479999999999997
.quad 0x404c59999999999a ; double 56.700000000000003
.quad 0x404d0b851eb851ec ; double 58.090000000000003
.quad 0x404d400000000000 ; double 58.5
.quad 0x404dc147ae147ae1 ; double 59.509999999999998
.quad 0x404e947ae147ae14 ; double 61.159999999999997
.quad 0x404f000000000000 ; double 62
.quad 0x404efd70a3d70a3d ; double 61.979999999999997
.quad 0x404df1eb851eb852 ; double 59.890000000000001
.quad 0x404fbeb851eb851f ; double 63.490000000000002
.quad 0x40503851eb851eb8 ; double 64.879999999999995
.quad 0x404fe28f5c28f5c3 ; double 63.770000000000003
.quad 0x404fa66666666666 ; double 63.299999999999997
.quad 0x404fe00000000000 ; double 63.75
.quad 0x405059999999999a ; double 65.400000000000006
.quad 0x40509eb851eb851f ; double 66.480000000000004
.quad 0x4051b9999999999a ; double 70.900000000000006
.quad 0x40525ccccccccccd ; double 73.450000000000002
.quad 0x40522e147ae147ae ; double 72.719999999999999
.quad 0x40523851eb851eb8 ; double 72.879999999999995
.quad 0x4052833333333333 ; double 74.049999999999997
.quad 0x4053600000000000 ; double 77.5
.quad 0x4052e66666666666 ; double 75.599999999999994
.quad 0x4052beb851eb851f ; double 74.980000000000003
.quad 0x40527f5c28f5c28f ; double 73.989999999999994
.quad 0x4052366666666666 ; double 72.849999999999994
.quad 0x40527f5c28f5c28f ; double 73.989999999999994
.quad 0x4052c00000000000 ; double 75
.quad 0x4053166666666666 ; double 76.349999999999994
.quad 0x4053566666666666 ; double 77.349999999999994
.quad 0x405340a3d70a3d71 ; double 77.010000000000005
.quad 0x40541c28f5c28f5c ; double 80.439999999999998
.quad 0x405438f5c28f5c29 ; double 80.89
.quad 0x40545eb851eb851f ; double 81.480000000000003
.quad 0x40549ccccccccccd ; double 82.450000000000002
.quad 0x4054951eb851eb85 ; double 82.329999999999998
.quad 0x4054800000000000 ; double 82
.quad 0x4054b66666666666 ; double 82.849999999999994
.quad 0x4053feb851eb851f ; double 79.980000000000003
.quad 0x40532d70a3d70a3d ; double 76.709999999999993
.quad 0x4052c147ae147ae1 ; double 75.019999999999996
.quad 0x4052733333333333 ; double 73.799999999999997
.quad 0x405228f5c28f5c29 ; double 72.640000000000001
.quad 0x4052933333333333 ; double 74.299999999999997
.quad 0x4052033333333333 ; double 72.049999999999997
.quad 0x405179999999999a ; double 69.900000000000006
.quad 0x40515ae147ae147b ; double 69.420000000000002
.quad 0x4051166666666666 ; double 68.349999999999994
.quad 0x40510eb851eb851f ; double 68.230000000000004
.quad 0x4051accccccccccd ; double 70.700000000000003
.quad 0x4051e66666666666 ; double 71.599999999999994
.quad 0x40522ccccccccccd ; double 72.700000000000003
.quad 0x4051d9999999999a ; double 71.400000000000006
.quad 0x40515e147ae147ae ; double 69.469999999999999
.quad 0x4051800000000000 ; double 70
.quad 0x4051800000000000 ; double 70
.quad 0x4051800000000000 ; double 70
.quad 0x40520c28f5c28f5c ; double 72.189999999999998
.quad 0x40522ccccccccccd ; double 72.700000000000003
.quad 0x40528ccccccccccd ; double 74.200000000000002
.quad 0x4051e66666666666 ; double 71.599999999999994
.quad 0x4051d8f5c28f5c29 ; double 71.390000000000001
.quad 0x4051c00000000000 ; double 71
.quad 0x4051ef5c28f5c28f ; double 71.739999999999995
.quad 0x4051700000000000 ; double 69.75
.quad 0x4051d00000000000 ; double 71.25
.quad 0x4052100000000000 ; double 72.25
.quad 0x40525eb851eb851f ; double 73.480000000000003
.quad 0x405279999999999a ; double 73.900000000000006
.quad 0x4051ad70a3d70a3d ; double 70.709999999999994
.quad 0x405159999999999a ; double 69.400000000000006
.quad 0x40521a3d70a3d70a ; double 72.409999999999997
.quad 0x405267ae147ae148 ; double 73.620000000000005
.quad 0x405209999999999a ; double 72.150000000000006
.quad 0x4051a5c28f5c28f6 ; double 70.590000000000003
.quad 0x4051dccccccccccd ; double 71.450000000000003
.quad 0x4051df5c28f5c28f ; double 71.489999999999995
.quad 0x4051bf5c28f5c28f ; double 70.989999999999995
.quad 0x405285c28f5c28f6 ; double 74.090000000000003
.quad 0x4052cccccccccccd ; double 75.200000000000002
.quad 0x405339999999999a ; double 76.900000000000006
.quad 0x4053700000000000 ; double 77.75
.quad 0x40533147ae147ae1 ; double 76.769999999999996
.quad 0x405359999999999a ; double 77.400000000000006
.quad 0x4052fccccccccccd ; double 75.950000000000002
.quad 0x4053900000000000 ; double 78.25
.quad 0x4053b33333333333 ; double 78.799999999999997
.quad 0x4053b66666666666 ; double 78.849999999999994
.quad 0x40543ccccccccccd ; double 80.950000000000002
.quad 0x40542d70a3d70a3d ; double 80.709999999999993
.quad 0x4053e9999999999a ; double 79.650000000000006
.quad 0x40543a3d70a3d70a ; double 80.909999999999997
.quad 0x4054833333333333 ; double 82.049999999999997
.quad 0x4054900000000000 ; double 82.25
.quad 0x4054800000000000 ; double 82
.quad 0x4054c70a3d70a3d7 ; double 83.109999999999999
.quad 0x4054f00000000000 ; double 83.75
.quad 0x4055433333333333 ; double 85.049999999999997
.quad 0x405518f5c28f5c29 ; double 84.39
.quad 0x4055400000000000 ; double 85
.quad 0x40555d70a3d70a3d ; double 85.459999999999993
.quad 0x405599999999999a ; double 86.400000000000006
.quad 0x4055800000000000 ; double 86
.quad 0x40559f5c28f5c28f ; double 86.489999999999994
.quad 0x40555eb851eb851f ; double 85.480000000000003
.quad 0x4054928f5c28f5c3 ; double 82.290000000000006
.quad 0x4054200000000000 ; double 80.5
.quad 0x4054c00000000000 ; double 83
.quad 0x4054b33333333333 ; double 82.799999999999997
.quad 0x4053600000000000 ; double 77.5
.quad 0x405477ae147ae148 ; double 81.870000000000005
.quad 0x4054e00000000000 ; double 83.5
.quad 0x4055333333333333 ; double 84.799999999999997
.quad 0x405529999999999a ; double 84.650000000000006
.quad 0x40554eb851eb851f ; double 85.230000000000003
.quad 0x4055466666666666 ; double 85.099999999999994
.quad 0x4055bd70a3d70a3d ; double 86.959999999999993
.quad 0x4055bae147ae147b ; double 86.920000000000001
.quad 0x4056100000000000 ; double 88.25
.quad 0x4056451eb851eb85 ; double 89.079999999999998
.quad 0x40563ccccccccccd ; double 88.950000000000002
.quad 0x405681eb851eb852 ; double 90.030000000000001
.quad 0x4056466666666666 ; double 89.099999999999994
.quad 0x4055cccccccccccd ; double 87.200000000000002
.quad 0x4055b66666666666 ; double 86.849999999999994
.quad 0x4055a66666666666 ; double 86.599999999999994
.quad 0x4055fd70a3d70a3d ; double 87.959999999999993
.quad 0x405620a3d70a3d71 ; double 88.510000000000005
.quad 0x40567ccccccccccd ; double 89.950000000000002
.quad 0x40562147ae147ae1 ; double 88.519999999999996
.quad 0x405625c28f5c28f6 ; double 88.590000000000003
.quad 0x4059500000000000 ; double 101.25
.quad 0x405948f5c28f5c29 ; double 101.14
.quad 0x4059600000000000 ; double 101.5
.quad 0x405980a3d70a3d71 ; double 102.01000000000001
.quad 0x405a000000000000 ; double 104
.quad 0x405a48f5c28f5c29 ; double 105.14
.quad 0x4059f28f5c28f5c3 ; double 103.79000000000001
.quad 0x405a6ccccccccccd ; double 105.7
.quad 0x405aaa3d70a3d70a ; double 106.66
.quad 0x405aaccccccccccd ; double 106.7
.quad 0x405ab1eb851eb852 ; double 106.78
.quad 0x405ab9999999999a ; double 106.90000000000001
.quad 0x405b033333333333 ; double 108.05
.quad 0x405b28f5c28f5c29 ; double 108.64
.quad 0x405adccccccccccd ; double 107.45
.quad 0x405afccccccccccd ; double 107.95
.quad 0x405b29999999999a ; double 108.65000000000001
.quad 0x405b366666666666 ; double 108.84999999999999
.quad 0x405a7e147ae147ae ; double 105.97
.quad 0x405ad5c28f5c28f6 ; double 107.34
.quad 0x405ae00000000000 ; double 107.5
.quad 0x405ac5c28f5c28f6 ; double 107.09
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405aa1eb851eb852 ; double 106.53
.quad 0x4059c66666666666 ; double 103.09999999999999
.quad 0x4058e66666666666 ; double 99.599999999999994
.quad 0x405913d70a3d70a4 ; double 100.31
.quad 0x4058cc28f5c28f5c ; double 99.189999999999998
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4058cccccccccccd ; double 99.200000000000002
.quad 0x4058eccccccccccd ; double 99.700000000000002
.quad 0x4059000000000000 ; double 100
.quad 0x4059733333333333 ; double 101.8
.quad 0x405a333333333333 ; double 104.8
.quad 0x405b49999999999a ; double 109.15000000000001
.quad 0x405b1eb851eb851f ; double 108.48
.quad 0x405af8f5c28f5c29 ; double 107.89
.quad 0x405aec28f5c28f5c ; double 107.69
.quad 0x405a6e147ae147ae ; double 105.72
.quad 0x405ac00000000000 ; double 107
.quad 0x405af47ae147ae14 ; double 107.81999999999999
.quad 0x405b200000000000 ; double 108.5
.quad 0x405b000000000000 ; double 108
.quad 0x405b533333333333 ; double 109.3
.quad 0x405afeb851eb851f ; double 107.98
.quad 0x405a700000000000 ; double 105.75
.quad 0x405b23d70a3d70a4 ; double 108.56
.quad 0x405b8d70a3d70a3d ; double 110.20999999999999
.quad 0x405bac28f5c28f5c ; double 110.69
.quad 0x405b6eb851eb851f ; double 109.73
.quad 0x405bd00000000000 ; double 111.25
.quad 0x405c90a3d70a3d71 ; double 114.26000000000001
.quad 0x405cb9999999999a ; double 114.90000000000001
.quad 0x405e233333333333 ; double 120.55
.quad 0x405db00000000000 ; double 118.75
.quad 0x405deccccccccccd ; double 119.7
.quad 0x405e05c28f5c28f6 ; double 120.09
.quad 0x405e8b851eb851ec ; double 122.18000000000001
.quad 0x405f000000000000 ; double 124
.quad 0x405f98f5c28f5c29 ; double 126.39
.quad 0x405f4ccccccccccd ; double 125.2
.quad 0x405f8c28f5c28f5c ; double 126.19
.quad 0x405f666666666666 ; double 125.59999999999999
.quad 0x405f0e147ae147ae ; double 124.22
.quad 0x405e600000000000 ; double 121.5
.quad 0x405ecf5c28f5c28f ; double 123.23999999999999
.quad 0x405ef851eb851eb8 ; double 123.88
.quad 0x405efb851eb851ec ; double 123.93000000000001
.quad 0x405eeccccccccccd ; double 123.7
.quad 0x405e9eb851eb851f ; double 122.48
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405f000000000000 ; double 124
.quad 0x405f2ccccccccccd ; double 124.7
.quad 0x405ebe147ae147ae ; double 122.97
.quad 0x405e88f5c28f5c29 ; double 122.14
.quad 0x405e5eb851eb851f ; double 121.48
.quad 0x405e933333333333 ; double 122.3
.quad 0x405ecd70a3d70a3d ; double 123.20999999999999
.quad 0x405eaf5c28f5c28f ; double 122.73999999999999
.quad 0x405e400000000000 ; double 121
.quad 0x405e400000000000 ; double 121
.quad 0x405e6ccccccccccd ; double 121.7
.quad 0x405e7eb851eb851f ; double 121.98
.quad 0x405d2e147ae147ae ; double 116.72
.quad 0x405cb66666666666 ; double 114.84999999999999
.quad 0x405cfc28f5c28f5c ; double 115.94
.quad 0x405ca33333333333 ; double 114.55
.quad 0x405c9851eb851eb8 ; double 114.38
.quad 0x405d133333333333 ; double 116.3
.quad 0x405d19999999999a ; double 116.40000000000001
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405cd00000000000 ; double 115.25
.quad 0x405d333333333333 ; double 116.8
.quad 0x405cf33333333333 ; double 115.8
.quad 0x405cc00000000000 ; double 115
.quad 0x405d151eb851eb85 ; double 116.33
.quad 0x405d366666666666 ; double 116.84999999999999
.quad 0x405d400000000000 ; double 117
.quad 0x405cb9999999999a ; double 114.90000000000001
.quad 0x405cb9999999999a ; double 114.90000000000001
.quad 0x405ce3d70a3d70a4 ; double 115.56
.quad 0x405ccccccccccccd ; double 115.2
.quad 0x405cb33333333333 ; double 114.8
.quad 0x405ba5c28f5c28f6 ; double 110.59
.quad 0x405b933333333333 ; double 110.3
.quad 0x405b8ae147ae147b ; double 110.17
.quad 0x405bc7ae147ae148 ; double 111.12
.quad 0x405b700000000000 ; double 109.75
.quad 0x405baccccccccccd ; double 110.7
.quad 0x405c066666666666 ; double 112.09999999999999
.quad 0x405bb66666666666 ; double 110.84999999999999
.quad 0x405b300000000000 ; double 108.75
.quad 0x405aac28f5c28f5c ; double 106.69
.quad 0x405a71eb851eb852 ; double 105.78
.quad 0x4059af5c28f5c28f ; double 102.73999999999999
.quad 0x4059f00000000000 ; double 103.75
.quad 0x405aaccccccccccd ; double 106.7
.quad 0x4059c00000000000 ; double 103
.quad 0x4059b0a3d70a3d71 ; double 102.76000000000001
.quad 0x4059400000000000 ; double 101
.quad 0x4058d9999999999a ; double 99.400000000000005
.quad 0x4058533333333333 ; double 97.299999999999997
.quad 0x4058a00000000000 ; double 98.5
.quad 0x4058c00000000000 ; double 99
.quad 0x40589ccccccccccd ; double 98.450000000000002
.quad 0x4058b851eb851eb8 ; double 98.879999999999995
.quad 0x405867ae147ae148 ; double 97.620000000000005
.quad 0x405779999999999a ; double 93.900000000000005
.quad 0x40575eb851eb851f ; double 93.480000000000003
.quad 0x40572d70a3d70a3d ; double 92.709999999999993
.quad 0x4056e00000000000 ; double 91.5
.quad 0x40579ccccccccccd ; double 94.450000000000002
.quad 0x4058000000000000 ; double 96
.quad 0x4057e28f5c28f5c3 ; double 95.540000000000006
.quad 0x4057600000000000 ; double 93.5
.quad 0x4057f00000000000 ; double 95.75
.quad 0x4058366666666666 ; double 96.849999999999994
.quad 0x4058300000000000 ; double 96.75
.quad 0x4057f70a3d70a3d7 ; double 95.859999999999999
.quad 0x405859999999999a ; double 97.400000000000005
.quad 0x405889999999999a ; double 98.150000000000005
.quad 0x40592b851eb851ec ; double 100.68000000000001
.quad 0x4059b51eb851eb85 ; double 102.83
.quad 0x4059c851eb851eb8 ; double 103.13
.quad 0x4059570a3d70a3d7 ; double 101.36
.quad 0x4059d9999999999a ; double 103.40000000000001
.quad 0x405a79999999999a ; double 105.90000000000001
.quad 0x405acae147ae147b ; double 107.17
.quad 0x405b0ccccccccccd ; double 108.2
.quad 0x405af33333333333 ; double 107.8
.quad 0x405a2ccccccccccd ; double 104.7
.quad 0x405a028f5c28f5c3 ; double 104.04000000000001
.quad 0x405a3e147ae147ae ; double 104.97
.quad 0x405a400000000000 ; double 105
.quad 0x405a5f5c28f5c28f ; double 105.48999999999999
.quad 0x405a8147ae147ae1 ; double 106.02
.quad 0x405aa5c28f5c28f6 ; double 106.59
.quad 0x405ab66666666666 ; double 106.84999999999999
.quad 0x405aa8f5c28f5c29 ; double 106.64
.quad 0x405a3ccccccccccd ; double 104.95
.quad 0x405a228f5c28f5c3 ; double 104.54000000000001
.quad 0x405a866666666666 ; double 106.09999999999999
.quad 0x405ad7ae147ae148 ; double 107.37
.quad 0x405ad7ae147ae148 ; double 107.37
.quad 0x405b85c28f5c28f6 ; double 110.09
.quad 0x405b5e147ae147ae ; double 109.47
.quad 0x405b200000000000 ; double 108.5
.quad 0x405ac00000000000 ; double 107
.quad 0x405a900000000000 ; double 106.25
.quad 0x405a7ccccccccccd ; double 105.95
.quad 0x405a800000000000 ; double 106
.quad 0x405a6ccccccccccd ; double 105.7
.quad 0x405a89999999999a ; double 106.15000000000001
.quad 0x405abccccccccccd ; double 106.95
.quad 0x405a6ccccccccccd ; double 105.7
.quad 0x405a800000000000 ; double 106
.quad 0x405ae66666666666 ; double 107.59999999999999
.quad 0x405b251eb851eb85 ; double 108.58
.quad 0x405b5851eb851eb8 ; double 109.38
.quad 0x405b533333333333 ; double 109.3
.quad 0x405af33333333333 ; double 107.8
.quad 0x405a0851eb851eb8 ; double 104.13
.quad 0x405a59999999999a ; double 105.40000000000001
.quad 0x405abccccccccccd ; double 106.95
.quad 0x405bb9999999999a ; double 110.90000000000001
.quad 0x405c50a3d70a3d71 ; double 113.26000000000001
.quad 0x405c8c28f5c28f5c ; double 114.19
.quad 0x405cd9999999999a ; double 115.40000000000001
.quad 0x405d29999999999a ; double 116.65000000000001
.quad 0x405d3ccccccccccd ; double 116.95
.quad 0x405c8b851eb851ec ; double 114.18000000000001
.quad 0x405c833333333333 ; double 114.05
.quad 0x405cb9999999999a ; double 114.90000000000001
.quad 0x405c9b851eb851ec ; double 114.43000000000001
.quad 0x405caccccccccccd ; double 114.7
.quad 0x405cbf5c28f5c28f ; double 114.98999999999999
.quad 0x405d500000000000 ; double 117.25
.quad 0x405cdc28f5c28f5c ; double 115.44
.quad 0x405cdccccccccccd ; double 115.45
.quad 0x405d051eb851eb85 ; double 116.08
.quad 0x405d9e147ae147ae ; double 118.47
.quad 0x405d733333333333 ; double 117.8
.quad 0x405d88f5c28f5c29 ; double 118.14
.quad 0x405d600000000000 ; double 117.5
.quad 0x405d900000000000 ; double 118.25
.quad 0x405dc00000000000 ; double 119
.quad 0x405d666666666666 ; double 117.59999999999999
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405c666666666666 ; double 113.59999999999999
.quad 0x405c770a3d70a3d7 ; double 113.86
.quad 0x405c866666666666 ; double 114.09999999999999
.quad 0x405d65c28f5c28f6 ; double 117.59
.quad 0x405df9999999999a ; double 119.90000000000001
.quad 0x405de66666666666 ; double 119.59999999999999
.quad 0x405dbccccccccccd ; double 118.95
.quad 0x405deccccccccccd ; double 119.7
.quad 0x405df9999999999a ; double 119.90000000000001
.quad 0x405d6b851eb851ec ; double 117.68000000000001
.quad 0x405d45c28f5c28f6 ; double 117.09
.quad 0x405cf33333333333 ; double 115.8
.quad 0x405c89999999999a ; double 114.15000000000001
.quad 0x405c4b851eb851ec ; double 113.18000000000001
.quad 0x405c89999999999a ; double 114.15000000000001
.quad 0x405db9999999999a ; double 118.90000000000001
.quad 0x405d8b851eb851ec ; double 118.18000000000001
.quad 0x405d700000000000 ; double 117.75
.quad 0x405d500000000000 ; double 117.25
.quad 0x405cf70a3d70a3d7 ; double 115.86
.quad 0x405cc66666666666 ; double 115.09999999999999
.quad 0x405dbccccccccccd ; double 118.95
.quad 0x405da9999999999a ; double 118.65000000000001
.quad 0x405d833333333333 ; double 118.05
.quad 0x405d39999999999a ; double 116.90000000000001
.quad 0x405d2ccccccccccd ; double 116.7
.quad 0x405cb66666666666 ; double 114.84999999999999
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405c833333333333 ; double 114.05
.quad 0x405d19999999999a ; double 116.40000000000001
.quad 0x405cf9999999999a ; double 115.90000000000001
.quad 0x405b800000000000 ; double 110
.quad 0x4058f851eb851eb8 ; double 99.879999999999995
.quad 0x4058f8f5c28f5c29 ; double 99.89
.quad 0x4058600000000000 ; double 97.5
.quad 0x40597c28f5c28f5c ; double 101.94
.quad 0x4058f9999999999a ; double 99.900000000000005
.quad 0x4058af5c28f5c28f ; double 98.739999999999994
.quad 0x4059000000000000 ; double 100
.quad 0x4059000000000000 ; double 100
.quad 0x4057833333333333 ; double 94.049999999999997
.quad 0x4057a9999999999a ; double 94.650000000000005
.quad 0x4058866666666666 ; double 98.099999999999994
.quad 0x405863d70a3d70a4 ; double 97.560000000000002
.quad 0x4058447ae147ae14 ; double 97.069999999999993
.quad 0x40589ccccccccccd ; double 98.450000000000002
.quad 0x4059000000000000 ; double 100
.quad 0x4057ea3d70a3d70a ; double 95.659999999999996
.quad 0x4057bf5c28f5c28f ; double 94.989999999999994
.quad 0x4056c00000000000 ; double 91
.quad 0x4056e66666666666 ; double 91.599999999999994
.quad 0x405701eb851eb852 ; double 92.030000000000001
.quad 0x4057333333333333 ; double 92.799999999999997
.quad 0x405779999999999a ; double 93.900000000000005
.quad 0x405877ae147ae148 ; double 97.870000000000005
.quad 0x4058b9999999999a ; double 98.900000000000005
.quad 0x40589c28f5c28f5c ; double 98.439999999999998
.quad 0x405899999999999a ; double 98.400000000000005
.quad 0x405a40a3d70a3d71 ; double 105.01000000000001
.quad 0x405ad5c28f5c28f6 ; double 107.34
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b19999999999a ; double 108.40000000000001
.quad 0x405a666666666666 ; double 105.59999999999999
.quad 0x405ae9999999999a ; double 107.65000000000001
.quad 0x405a99999999999a ; double 106.40000000000001
.quad 0x4059e28f5c28f5c3 ; double 103.54000000000001
.quad 0x405ab7ae147ae148 ; double 106.87
.quad 0x405a733333333333 ; double 105.8
.quad 0x405ae00000000000 ; double 107.5
.quad 0x405bb9999999999a ; double 110.90000000000001
.quad 0x405be00000000000 ; double 111.5
.quad 0x405ce66666666666 ; double 115.59999999999999
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405da8f5c28f5c29 ; double 118.64
.quad 0x405d0ae147ae147b ; double 116.17
.quad 0x405d5851eb851eb8 ; double 117.38
.quad 0x405cee147ae147ae ; double 115.72
.quad 0x405cbf5c28f5c28f ; double 114.98999999999999
.quad 0x405d8ccccccccccd ; double 118.2
.quad 0x405d666666666666 ; double 117.59999999999999
.quad 0x405cec28f5c28f5c ; double 115.69
.quad 0x405c266666666666 ; double 112.59999999999999
.quad 0x405c7f5c28f5c28f ; double 113.98999999999999
.quad 0x405c95c28f5c28f6 ; double 114.34
.quad 0x405d300000000000 ; double 116.75
.quad 0x405d300000000000 ; double 116.75
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405cd00000000000 ; double 115.25
.quad 0x405bc7ae147ae148 ; double 111.12
.quad 0x405bdc28f5c28f5c ; double 111.44
.quad 0x405b7c28f5c28f5c ; double 109.94
.quad 0x405b7c28f5c28f5c ; double 109.94
.quad 0x405c7c28f5c28f5c ; double 113.94
.quad 0x405b800000000000 ; double 110
.quad 0x4058700000000000 ; double 97.75
.quad 0x4057800000000000 ; double 94
.quad 0x40581c28f5c28f5c ; double 96.439999999999998
.quad 0x4057900000000000 ; double 94.25
.quad 0x4057bc28f5c28f5c ; double 94.939999999999998
.quad 0x4057f00000000000 ; double 95.75
.quad 0x40577c28f5c28f5c ; double 93.939999999999998
.quad 0x4057ac28f5c28f5c ; double 94.689999999999998
.quad 0x4058f00000000000 ; double 99.75
.quad 0x4057c00000000000 ; double 95
.quad 0x4055e00000000000 ; double 87.5
.quad 0x4055bc28f5c28f5c ; double 86.939999999999998
.quad 0x40560c28f5c28f5c ; double 88.189999999999998
.quad 0x405607ae147ae148 ; double 88.120000000000005
.quad 0x405663d70a3d70a4 ; double 89.560000000000002
.quad 0x405657ae147ae148 ; double 89.370000000000005
.quad 0x4055fc28f5c28f5c ; double 87.939999999999998
.quad 0x4056000000000000 ; double 88
.quad 0x40579c28f5c28f5c ; double 94.439999999999998
.quad 0x4056a00000000000 ; double 90.5
.quad 0x4056ac28f5c28f5c ; double 90.689999999999998
.quad 0x40576c28f5c28f5c ; double 93.689999999999998
.quad 0x405827ae147ae148 ; double 96.620000000000005
.quad 0x405807ae147ae148 ; double 96.120000000000005
.quad 0x4058900000000000 ; double 98.25
.quad 0x4058c00000000000 ; double 99
.quad 0x4057fc28f5c28f5c ; double 95.939999999999998
.quad 0x405973d70a3d70a4 ; double 101.81
.quad 0x405a300000000000 ; double 104.75
.quad 0x4058cc28f5c28f5c ; double 99.189999999999998
.quad 0x4058d3d70a3d70a4 ; double 99.310000000000002
.quad 0x4058800000000000 ; double 98
.quad 0x4058fc28f5c28f5c ; double 99.939999999999998
.quad 0x4059300000000000 ; double 100.75
.quad 0x4059800000000000 ; double 102
.quad 0x4059700000000000 ; double 101.75
.quad 0x405933d70a3d70a4 ; double 100.81
.quad 0x405a17ae147ae148 ; double 104.37
.quad 0x4059d00000000000 ; double 103.25
.quad 0x4059a00000000000 ; double 102.5
.quad 0x4058ec28f5c28f5c ; double 99.689999999999998
.quad 0x405903d70a3d70a4 ; double 100.06
.quad 0x4058f7ae147ae148 ; double 99.870000000000005
.quad 0x4058dc28f5c28f5c ; double 99.439999999999998
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x4059000000000000 ; double 100
.quad 0x4059f00000000000 ; double 103.75
.quad 0x4059e00000000000 ; double 103.5
.quad 0x405997ae147ae148 ; double 102.37
.quad 0x4059a7ae147ae148 ; double 102.62
.quad 0x405993d70a3d70a4 ; double 102.31
.quad 0x4058f7ae147ae148 ; double 99.870000000000005
.quad 0x4058c7ae147ae148 ; double 99.120000000000005
.quad 0x4057700000000000 ; double 93.75
.quad 0x40576c28f5c28f5c ; double 93.689999999999998
.quad 0x405737ae147ae148 ; double 92.870000000000005
.quad 0x4056bc28f5c28f5c ; double 90.939999999999998
.quad 0x405797ae147ae148 ; double 94.370000000000005
.quad 0x4057c00000000000 ; double 95
.quad 0x4058100000000000 ; double 96.25
.quad 0x4058a00000000000 ; double 98.5
.quad 0x4058c7ae147ae148 ; double 99.120000000000005
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405c47ae147ae148 ; double 113.12
.quad 0x405b9c28f5c28f5c ; double 110.44
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405c3c28f5c28f5c ; double 112.94
.quad 0x405d47ae147ae148 ; double 117.12
.quad 0x405dd7ae147ae148 ; double 119.37
.quad 0x405d07ae147ae148 ; double 116.12
.quad 0x405ca3d70a3d70a4 ; double 114.56
.quad 0x405cb7ae147ae148 ; double 114.87
.quad 0x405dd7ae147ae148 ; double 119.37
.quad 0x405de7ae147ae148 ; double 119.62
.quad 0x405d3c28f5c28f5c ; double 116.94
.quad 0x405db7ae147ae148 ; double 118.87
.quad 0x405e13d70a3d70a4 ; double 120.31
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405fa7ae147ae148 ; double 126.62
.quad 0x405f13d70a3d70a4 ; double 124.31
.quad 0x405fac28f5c28f5c ; double 126.69
.quad 0x405fa3d70a3d70a4 ; double 126.56
.quad 0x405f900000000000 ; double 126.25
.quad 0x405f800000000000 ; double 126
.quad 0x405fd00000000000 ; double 127.25
.quad 0x406026147ae147ae ; double 129.19
.quad 0x406019eb851eb852 ; double 128.81
.quad 0x405fb7ae147ae148 ; double 126.87
.quad 0x4060080000000000 ; double 128.25
.quad 0x40609e147ae147ae ; double 132.94
.quad 0x4060d00000000000 ; double 134.5
.quad 0x4060dc28f5c28f5c ; double 134.88
.quad 0x4060a9eb851eb852 ; double 133.31
.quad 0x4060de147ae147ae ; double 134.94
.quad 0x4060c6147ae147ae ; double 134.19
.quad 0x4060700000000000 ; double 131.5
.quad 0x4060c80000000000 ; double 134.25
.quad 0x4060a6147ae147ae ; double 133.19
.quad 0x40603c28f5c28f5c ; double 129.88
.quad 0x405f5c28f5c28f5c ; double 125.44
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405e8c28f5c28f5c ; double 122.19
.quad 0x405e8c28f5c28f5c ; double 122.19
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405eb3d70a3d70a4 ; double 122.81
.quad 0x405ebc28f5c28f5c ; double 122.94
.quad 0x405ea7ae147ae148 ; double 122.62
.quad 0x405eec28f5c28f5c ; double 123.69
.quad 0x405e73d70a3d70a4 ; double 121.81
.quad 0x405e100000000000 ; double 120.25
.quad 0x405e33d70a3d70a4 ; double 120.81
.quad 0x405de7ae147ae148 ; double 119.62
.quad 0x405dc00000000000 ; double 119
.quad 0x405d200000000000 ; double 116.5
.quad 0x405d200000000000 ; double 116.5
.quad 0x405cb7ae147ae148 ; double 114.87
.quad 0x405c1c28f5c28f5c ; double 112.44
.quad 0x405c6c28f5c28f5c ; double 113.69
.quad 0x405c200000000000 ; double 112.5
.quad 0x405be00000000000 ; double 111.5
.quad 0x405c17ae147ae148 ; double 112.37
.quad 0x405c57ae147ae148 ; double 113.37
.quad 0x405ce7ae147ae148 ; double 115.62
.quad 0x405d400000000000 ; double 117
.quad 0x405d73d70a3d70a4 ; double 117.81
.quad 0x405b600000000000 ; double 109.5
.quad 0x405a33d70a3d70a4 ; double 104.81
.quad 0x405af7ae147ae148 ; double 107.87
.quad 0x405a37ae147ae148 ; double 104.87
.quad 0x405a7c28f5c28f5c ; double 105.94
.quad 0x405a77ae147ae148 ; double 105.87
.quad 0x405a000000000000 ; double 104
.quad 0x405a3c28f5c28f5c ; double 104.94
.quad 0x405a43d70a3d70a4 ; double 105.06
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x405a67ae147ae148 ; double 105.62
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405c4c28f5c28f5c ; double 113.19
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405ccc28f5c28f5c ; double 115.19
.quad 0x405c33d70a3d70a4 ; double 112.81
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405c67ae147ae148 ; double 113.62
.quad 0x405c93d70a3d70a4 ; double 114.31
.quad 0x405cfc28f5c28f5c ; double 115.94
.quad 0x405e2c28f5c28f5c ; double 120.69
.quad 0x405e17ae147ae148 ; double 120.37
.quad 0x405d6c28f5c28f5c ; double 117.69
.quad 0x405dc00000000000 ; double 119
.quad 0x405e000000000000 ; double 120
.quad 0x405dfc28f5c28f5c ; double 119.94
.quad 0x405e1c28f5c28f5c ; double 120.44
.quad 0x405e300000000000 ; double 120.75
.quad 0x405e93d70a3d70a4 ; double 122.31
.quad 0x405e700000000000 ; double 121.75
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405c8c28f5c28f5c ; double 114.19
.quad 0x405b700000000000 ; double 109.75
.quad 0x405b3c28f5c28f5c ; double 108.94
.quad 0x405bb00000000000 ; double 110.75
.quad 0x405bc00000000000 ; double 111
.quad 0x405acc28f5c28f5c ; double 107.19
.quad 0x405c100000000000 ; double 112.25
.quad 0x405b800000000000 ; double 110
.quad 0x405b800000000000 ; double 110
.quad 0x405b500000000000 ; double 109.25
.quad 0x405ae00000000000 ; double 107.5
.quad 0x405ad3d70a3d70a4 ; double 107.31
.quad 0x405b37ae147ae148 ; double 108.87
.quad 0x405b500000000000 ; double 109.25
.quad 0x405a37ae147ae148 ; double 104.87
.quad 0x405a700000000000 ; double 105.75
.quad 0x405ae00000000000 ; double 107.5
.quad 0x405a6c28f5c28f5c ; double 105.69
.quad 0x405ba7ae147ae148 ; double 110.62
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405b67ae147ae148 ; double 109.62
.quad 0x405b4c28f5c28f5c ; double 109.19
.quad 0x405bc7ae147ae148 ; double 111.12
.quad 0x405c67ae147ae148 ; double 113.62
.quad 0x405c4c28f5c28f5c ; double 113.19
.quad 0x405c2c28f5c28f5c ; double 112.69
.quad 0x405c47ae147ae148 ; double 113.12
.quad 0x405c7c28f5c28f5c ; double 113.94
.quad 0x405c400000000000 ; double 113
.quad 0x405acc28f5c28f5c ; double 107.19
.quad 0x405a500000000000 ; double 105.25
.quad 0x405b700000000000 ; double 109.75
.quad 0x405cc7ae147ae148 ; double 115.12
.quad 0x405c100000000000 ; double 112.25
.quad 0x405c300000000000 ; double 112.75
.quad 0x405c93d70a3d70a4 ; double 114.31
.quad 0x405de7ae147ae148 ; double 119.62
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405f57ae147ae148 ; double 125.37
.quad 0x405f6c28f5c28f5c ; double 125.69
.quad 0x4060000000000000 ; double 128
.quad 0x405f700000000000 ; double 125.75
.quad 0x405fbc28f5c28f5c ; double 126.94
.quad 0x405e97ae147ae148 ; double 122.37
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405ec00000000000 ; double 123
.quad 0x405e9c28f5c28f5c ; double 122.44
.quad 0x405fc00000000000 ; double 127
.quad 0x4060080000000000 ; double 128.25
.quad 0x405e900000000000 ; double 122.25
.quad 0x405cf7ae147ae148 ; double 115.87
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405cac28f5c28f5c ; double 114.69
.quad 0x405c57ae147ae148 ; double 113.37
.quad 0x405bec28f5c28f5c ; double 111.69
.quad 0x405b73d70a3d70a4 ; double 109.81
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b800000000000 ; double 110
.quad 0x405b400000000000 ; double 109
.quad 0x405b400000000000 ; double 109
.quad 0x405b000000000000 ; double 108
.quad 0x405ab7ae147ae148 ; double 106.87
.quad 0x405ac00000000000 ; double 107
.quad 0x405bc00000000000 ; double 111
.quad 0x405b800000000000 ; double 110
.quad 0x405a5c28f5c28f5c ; double 105.44
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a63d70a3d70a4 ; double 105.56
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405bc00000000000 ; double 111
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405c5c28f5c28f5c ; double 113.44
.quad 0x405cfc28f5c28f5c ; double 115.94
.quad 0x405db7ae147ae148 ; double 118.87
.quad 0x405d53d70a3d70a4 ; double 117.31
.quad 0x405d5c28f5c28f5c ; double 117.44
.quad 0x405d17ae147ae148 ; double 116.37
.quad 0x405ddc28f5c28f5c ; double 119.44
.quad 0x405dc7ae147ae148 ; double 119.12
.quad 0x405df00000000000 ; double 119.75
.quad 0x405dc00000000000 ; double 119
.quad 0x405d37ae147ae148 ; double 116.87
.quad 0x405d9c28f5c28f5c ; double 118.44
.quad 0x405d600000000000 ; double 117.5
.quad 0x405cf3d70a3d70a4 ; double 115.81
.quad 0x405c5c28f5c28f5c ; double 113.44
.quad 0x405c33d70a3d70a4 ; double 112.81
.quad 0x405c8c28f5c28f5c ; double 114.19
.quad 0x405d9c28f5c28f5c ; double 118.44
.quad 0x405dfc28f5c28f5c ; double 119.94
.quad 0x405de00000000000 ; double 119.5
.quad 0x405eb7ae147ae148 ; double 122.87
.quad 0x405ec00000000000 ; double 123
.quad 0x405f300000000000 ; double 124.75
.quad 0x405e800000000000 ; double 122
.quad 0x405df00000000000 ; double 119.75
.quad 0x405ed3d70a3d70a4 ; double 123.31
.quad 0x405e400000000000 ; double 121
.quad 0x405e800000000000 ; double 122
.quad 0x405e47ae147ae148 ; double 121.12
.quad 0x405dd7ae147ae148 ; double 119.37
.quad 0x405d7c28f5c28f5c ; double 117.94
.quad 0x405dbc28f5c28f5c ; double 118.94
.quad 0x405df00000000000 ; double 119.75
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405d000000000000 ; double 116
.quad 0x405b600000000000 ; double 109.5
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405bb00000000000 ; double 110.75
.quad 0x405b800000000000 ; double 110
.quad 0x405b9c28f5c28f5c ; double 110.44
.quad 0x405b93d70a3d70a4 ; double 110.31
.quad 0x405b87ae147ae148 ; double 110.12
.quad 0x405b9c28f5c28f5c ; double 110.44
.quad 0x405bc3d70a3d70a4 ; double 111.06
.quad 0x405b6c28f5c28f5c ; double 109.69
.quad 0x405afc28f5c28f5c ; double 107.94
.quad 0x405b8c28f5c28f5c ; double 110.19
.quad 0x405c300000000000 ; double 112.75
.quad 0x405bfc28f5c28f5c ; double 111.94
.quad 0x405e87ae147ae148 ; double 122.12
.quad 0x405e400000000000 ; double 121
.quad 0x405dcc28f5c28f5c ; double 119.19
.quad 0x405d200000000000 ; double 116.5
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405a93d70a3d70a4 ; double 106.31
.quad 0x405a1c28f5c28f5c ; double 104.44
.quad 0x405a200000000000 ; double 104.5
.quad 0x405a3c28f5c28f5c ; double 104.94
.quad 0x405a800000000000 ; double 106
.quad 0x405a400000000000 ; double 105
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405b1c28f5c28f5c ; double 108.44
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x4058900000000000 ; double 98.25
.quad 0x4057b7ae147ae148 ; double 94.870000000000005
.quad 0x4057c00000000000 ; double 95
.quad 0x405817ae147ae148 ; double 96.370000000000005
.quad 0x4058100000000000 ; double 96.25
.quad 0x4058600000000000 ; double 97.5
.quad 0x405843d70a3d70a4 ; double 97.060000000000002
.quad 0x4057e00000000000 ; double 95.5
.quad 0x40577c28f5c28f5c ; double 93.939999999999998
.quad 0x40573c28f5c28f5c ; double 92.939999999999998
.quad 0x40579c28f5c28f5c ; double 94.439999999999998
.quad 0x4057fc28f5c28f5c ; double 95.939999999999998
.quad 0x405833d70a3d70a4 ; double 96.810000000000002
.quad 0x4058b3d70a3d70a4 ; double 98.810000000000002
.quad 0x4058c00000000000 ; double 99
.quad 0x4057d3d70a3d70a4 ; double 95.310000000000002
.quad 0x4057e3d70a3d70a4 ; double 95.560000000000002
.quad 0x4058000000000000 ; double 96
.quad 0x4057c7ae147ae148 ; double 95.120000000000005
.quad 0x4057a7ae147ae148 ; double 94.620000000000005
.quad 0x4057400000000000 ; double 93
.quad 0x405c43d70a3d70a4 ; double 113.06
.quad 0x405b200000000000 ; double 108.5
.quad 0x405b400000000000 ; double 109
.quad 0x405b37ae147ae148 ; double 108.87
.quad 0x405ad00000000000 ; double 107.25
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405cb7ae147ae148 ; double 114.87
.quad 0x405d2c28f5c28f5c ; double 116.69
.quad 0x405cc3d70a3d70a4 ; double 115.06
.quad 0x405dcc28f5c28f5c ; double 119.19
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405e000000000000 ; double 120
.quad 0x405e43d70a3d70a4 ; double 121.06
.quad 0x405e900000000000 ; double 122.25
.quad 0x405eb3d70a3d70a4 ; double 122.81
.quad 0x405f300000000000 ; double 124.75
.quad 0x405fb3d70a3d70a4 ; double 126.81
.quad 0x405f400000000000 ; double 125
.quad 0x405ff3d70a3d70a4 ; double 127.81
.quad 0x405fd00000000000 ; double 127.25
.quad 0x40603428f5c28f5c ; double 129.63
.quad 0x40605c28f5c28f5c ; double 130.88
.quad 0x40606e147ae147ae ; double 131.44
.quad 0x40608c28f5c28f5c ; double 132.38
.quad 0x4060e00000000000 ; double 135
.quad 0x4060b1eb851eb852 ; double 133.56
.quad 0x4060f1eb851eb852 ; double 135.56
.quad 0x406136147ae147ae ; double 137.69
.quad 0x4061100000000000 ; double 136.5
.quad 0x4060be147ae147ae ; double 133.94
.quad 0x406096147ae147ae ; double 132.69
.quad 0x4060380000000000 ; double 129.75
.quad 0x405fb00000000000 ; double 126.75
.quad 0x4060100000000000 ; double 128.5
.quad 0x405f800000000000 ; double 126
.quad 0x405f67ae147ae148 ; double 125.62
.quad 0x405f4c28f5c28f5c ; double 125.19
.quad 0x405f77ae147ae148 ; double 125.87
.quad 0x405f03d70a3d70a4 ; double 124.06
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405f17ae147ae148 ; double 124.37
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x40603428f5c28f5c ; double 129.63
.quad 0x406011eb851eb852 ; double 128.56
.quad 0x405ff7ae147ae148 ; double 127.87
.quad 0x405f200000000000 ; double 124.5
.quad 0x405ef3d70a3d70a4 ; double 123.81
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405f1c28f5c28f5c ; double 124.44
.quad 0x405fc00000000000 ; double 127
.quad 0x405f000000000000 ; double 124
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405ec7ae147ae148 ; double 123.12
.quad 0x405f800000000000 ; double 126
.quad 0x405fac28f5c28f5c ; double 126.69
.quad 0x405fa7ae147ae148 ; double 126.62
.quad 0x406016147ae147ae ; double 128.69
.quad 0x405fa00000000000 ; double 126.5
.quad 0x405f47ae147ae148 ; double 125.12
.quad 0x405f5c28f5c28f5c ; double 125.44
.quad 0x4060100000000000 ; double 128.5
.quad 0x4060380000000000 ; double 129.75
.quad 0x40608428f5c28f5c ; double 132.13
.quad 0x40615c28f5c28f5c ; double 138.88
.quad 0x4061300000000000 ; double 137.5
.quad 0x40614428f5c28f5c ; double 138.13
.quad 0x4061500000000000 ; double 138.5
.quad 0x406166147ae147ae ; double 139.19
.quad 0x406156147ae147ae ; double 138.69
.quad 0x40612e147ae147ae ; double 137.44
.quad 0x4060f00000000000 ; double 135.5
.quad 0x4060b00000000000 ; double 133.5
.quad 0x4060bc28f5c28f5c ; double 133.88
.quad 0x4060880000000000 ; double 132.25
.quad 0x406069eb851eb852 ; double 131.31
.quad 0x4060800000000000 ; double 132
.quad 0x405f53d70a3d70a4 ; double 125.31
.quad 0x405f2c28f5c28f5c ; double 124.69
.quad 0x405f700000000000 ; double 125.75
.quad 0x405f1c28f5c28f5c ; double 124.44
.quad 0x405f0c28f5c28f5c ; double 124.19
.quad 0x405ff00000000000 ; double 127.75
.quad 0x405f700000000000 ; double 125.75
.quad 0x405e5c28f5c28f5c ; double 121.44
.quad 0x405e800000000000 ; double 122
.quad 0x405e600000000000 ; double 121.5
.quad 0x405d93d70a3d70a4 ; double 118.31
.quad 0x405d000000000000 ; double 116
.quad 0x405d200000000000 ; double 116.5
.quad 0x405d300000000000 ; double 116.75
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405df7ae147ae148 ; double 119.87
.quad 0x405e6c28f5c28f5c ; double 121.69
.quad 0x405d000000000000 ; double 116
.quad 0x405cd3d70a3d70a4 ; double 115.31
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405d300000000000 ; double 116.75
.quad 0x405d37ae147ae148 ; double 116.87
.quad 0x406d9428f5c28f5c ; double 236.63
.quad 0x406c400000000000 ; double 226
.quad 0x406cd6147ae147ae ; double 230.69
.quad 0x406d2c28f5c28f5c ; double 233.38
.quad 0x406d8c28f5c28f5c ; double 236.38
.quad 0x406dd80000000000 ; double 238.75
.quad 0x406e1428f5c28f5c ; double 240.63
.quad 0x406df428f5c28f5c ; double 239.63
.quad 0x406e700000000000 ; double 243.5
.quad 0x406ec00000000000 ; double 246
.quad 0x406c8e147ae147ae ; double 228.44
.quad 0x406bbc28f5c28f5c ; double 221.88
.quad 0x406bbc28f5c28f5c ; double 221.88
.quad 0x406b300000000000 ; double 217.5
.quad 0x406ac00000000000 ; double 214
.quad 0x406a900000000000 ; double 212.5
.quad 0x406afc28f5c28f5c ; double 215.88
.quad 0x406a880000000000 ; double 212.25
.quad 0x406a600000000000 ; double 211
.quad 0x406a200000000000 ; double 209
.quad 0x406a500000000000 ; double 210.5
.quad 0x406ae80000000000 ; double 215.25
.quad 0x406a580000000000 ; double 210.75
.quad 0x4069d1eb851eb852 ; double 206.56
.quad 0x4068d80000000000 ; double 198.75
.quad 0x40657c28f5c28f5c ; double 171.88
.quad 0x4065380000000000 ; double 169.75
.quad 0x406589eb851eb852 ; double 172.31
.quad 0x4066300000000000 ; double 177.5
.quad 0x4066700000000000 ; double 179.5
.quad 0x4066fc28f5c28f5c ; double 183.88
.quad 0x4067000000000000 ; double 184
.quad 0x4066f428f5c28f5c ; double 183.63
.quad 0x406781eb851eb852 ; double 188.06
.quad 0x406781eb851eb852 ; double 188.06
.quad 0x40677428f5c28f5c ; double 187.63
.quad 0x4067580000000000 ; double 186.75
.quad 0x4066fe147ae147ae ; double 183.94
.quad 0x406636147ae147ae ; double 177.69
.quad 0x4066b00000000000 ; double 181.5
.quad 0x4066a80000000000 ; double 181.25
.quad 0x4066580000000000 ; double 178.75
.quad 0x4065fc28f5c28f5c ; double 175.88
.quad 0x4065a80000000000 ; double 173.25
.quad 0x4065500000000000 ; double 170.5
.quad 0x4064f428f5c28f5c ; double 167.63
.quad 0x4065500000000000 ; double 170.5
.quad 0x4066400000000000 ; double 178
.quad 0x4066380000000000 ; double 177.75
.quad 0x4066800000000000 ; double 180
.quad 0x4066f00000000000 ; double 183.5
.quad 0x4066c9eb851eb852 ; double 182.31
.quad 0x4066ee147ae147ae ; double 183.44
.quad 0x40673e147ae147ae ; double 185.94
.quad 0x4066dc28f5c28f5c ; double 182.88
.quad 0x40671c28f5c28f5c ; double 184.88
.quad 0x4066800000000000 ; double 180
.quad 0x40665e147ae147ae ; double 178.94
.quad 0x4066200000000000 ; double 177
.quad 0x40651e147ae147ae ; double 168.94
.quad 0x40655e147ae147ae ; double 170.94
.quad 0x4065300000000000 ; double 169.5
.quad 0x40657c28f5c28f5c ; double 171.88
.quad 0x4065be147ae147ae ; double 173.94
.quad 0x406676147ae147ae ; double 179.69
.quad 0x4066800000000000 ; double 180
.quad 0x40663e147ae147ae ; double 177.94
.quad 0x4065d80000000000 ; double 174.75
.quad 0x4065d00000000000 ; double 174.5
.quad 0x4065c9eb851eb852 ; double 174.31
.quad 0x4066480000000000 ; double 178.25
.quad 0x4066080000000000 ; double 176.25
.quad 0x406659eb851eb852 ; double 178.81
.quad 0x40651c28f5c28f5c ; double 168.88
.quad 0x4065300000000000 ; double 169.5
.quad 0x40651c28f5c28f5c ; double 168.88
.quad 0x4065600000000000 ; double 171
.quad 0x4066100000000000 ; double 176.5
.quad 0x4066200000000000 ; double 177
.quad 0x406696147ae147ae ; double 180.69
.quad 0x4067100000000000 ; double 184.5
.quad 0x4066fe147ae147ae ; double 183.94
.quad 0x4066a9eb851eb852 ; double 181.31
.quad 0x406759eb851eb852 ; double 186.81
.quad 0x4067500000000000 ; double 186.5
.quad 0x4066d428f5c28f5c ; double 182.63
.quad 0x4067300000000000 ; double 185.5
.quad 0x4068c80000000000 ; double 198.25
.quad 0x4068e80000000000 ; double 199.25
.quad 0x4068100000000000 ; double 192.5
.quad 0x4067300000000000 ; double 185.5
.quad 0x40671e147ae147ae ; double 184.94
.quad 0x40676e147ae147ae ; double 187.44
.quad 0x4067c00000000000 ; double 190
.quad 0x4067ae147ae147ae ; double 189.44
.quad 0x4068000000000000 ; double 192
.quad 0x40680c28f5c28f5c ; double 192.38
.quad 0x4068180000000000 ; double 192.75
.quad 0x4067bc28f5c28f5c ; double 189.88
.quad 0x4067500000000000 ; double 186.5
.quad 0x406766147ae147ae ; double 187.19
.quad 0x40679428f5c28f5c ; double 188.63
.quad 0x40679e147ae147ae ; double 188.94
.quad 0x4067be147ae147ae ; double 189.94
.quad 0x40677e147ae147ae ; double 187.94
.quad 0x40672c28f5c28f5c ; double 185.38
.quad 0x4066e00000000000 ; double 183
.quad 0x40665e147ae147ae ; double 178.94
.quad 0x406586147ae147ae ; double 172.19
.quad 0x40653428f5c28f5c ; double 169.63
.quad 0x4064dc28f5c28f5c ; double 166.88
.quad 0x4064b9eb851eb852 ; double 165.81
.quad 0x4064e00000000000 ; double 167
.quad 0x40650e147ae147ae ; double 168.44
.quad 0x4065400000000000 ; double 170
.quad 0x40653e147ae147ae ; double 169.94
.quad 0x40653c28f5c28f5c ; double 169.88
.quad 0x406506147ae147ae ; double 168.19
.quad 0x4064ac28f5c28f5c ; double 165.38
.quad 0x40652c28f5c28f5c ; double 169.38
.quad 0x4065280000000000 ; double 169.25
.quad 0x4065400000000000 ; double 170
.quad 0x406551eb851eb852 ; double 170.56
.quad 0x406546147ae147ae ; double 170.19
.quad 0x4064de147ae147ae ; double 166.94
.quad 0x4064e6147ae147ae ; double 167.19
.quad 0x4064e00000000000 ; double 167
.quad 0x40640428f5c28f5c ; double 160.13
.quad 0x4063fc28f5c28f5c ; double 159.88
.quad 0x4063f428f5c28f5c ; double 159.63
.quad 0x40642c28f5c28f5c ; double 161.38
.quad 0x4063f6147ae147ae ; double 159.69
.quad 0x4063b00000000000 ; double 157.5
.quad 0x4063fc28f5c28f5c ; double 159.88
.quad 0x4063dc28f5c28f5c ; double 158.88
.quad 0x4063980000000000 ; double 156.75
.quad 0x406306147ae147ae ; double 152.19
.quad 0x4062de147ae147ae ; double 150.94
.quad 0x4062b1eb851eb852 ; double 149.56
.quad 0x4062b80000000000 ; double 149.75
.quad 0x4062be147ae147ae ; double 149.94
.quad 0x4062b6147ae147ae ; double 149.69
.quad 0x4062b9eb851eb852 ; double 149.81
.quad 0x4062b00000000000 ; double 149.5
.quad 0x40625c28f5c28f5c ; double 146.88
.quad 0x406259eb851eb852 ; double 146.81
.quad 0x40620e147ae147ae ; double 144.44
.quad 0x4061ee147ae147ae ; double 143.44
.quad 0x4061f00000000000 ; double 143.5
.quad 0x4061f6147ae147ae ; double 143.69
.quad 0x4061ce147ae147ae ; double 142.44
.quad 0x406179eb851eb852 ; double 139.81
.quad 0x4061300000000000 ; double 137.5
.quad 0x4061580000000000 ; double 138.75
.quad 0x4060800000000000 ; double 132
.quad 0x4060500000000000 ; double 130.5
.quad 0x40609c28f5c28f5c ; double 132.88
.quad 0x405ffc28f5c28f5c ; double 127.94
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405eb7ae147ae148 ; double 122.87
.quad 0x405f000000000000 ; double 124
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405f500000000000 ; double 125.25
.quad 0x405f9c28f5c28f5c ; double 126.44
.quad 0x4060700000000000 ; double 131.5
.quad 0x4060c428f5c28f5c ; double 134.13
.quad 0x406106147ae147ae ; double 136.19
.quad 0x4060b6147ae147ae ; double 133.69
.quad 0x4060de147ae147ae ; double 134.94
.quad 0x4060a00000000000 ; double 133
.quad 0x40605428f5c28f5c ; double 130.63
.quad 0x40601428f5c28f5c ; double 128.63
.quad 0x405fd00000000000 ; double 127.25
.quad 0x4060080000000000 ; double 128.25
.quad 0x40605e147ae147ae ; double 130.94
.quad 0x406056147ae147ae ; double 130.69
.quad 0x40603e147ae147ae ; double 129.94
.quad 0x405fa3d70a3d70a4 ; double 126.56
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405fdc28f5c28f5c ; double 127.44
.quad 0x405f7c28f5c28f5c ; double 125.94
.quad 0x405eb7ae147ae148 ; double 122.87
.quad 0x405eac28f5c28f5c ; double 122.69
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405dbc28f5c28f5c ; double 118.94
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405f73d70a3d70a4 ; double 125.81
.quad 0x4060180000000000 ; double 128.75
.quad 0x4060780000000000 ; double 131.75
.quad 0x40605c28f5c28f5c ; double 130.88
.quad 0x406016147ae147ae ; double 128.69
.quad 0x4060000000000000 ; double 128
.quad 0x406041eb851eb852 ; double 130.06
.quad 0x4060600000000000 ; double 131
.quad 0x4060300000000000 ; double 129.5
.quad 0x405f800000000000 ; double 126
.quad 0x405fd7ae147ae148 ; double 127.37
.quad 0x40600c28f5c28f5c ; double 128.38
.quad 0x406031eb851eb852 ; double 129.56
.quad 0x4060100000000000 ; double 128.5
.quad 0x40604428f5c28f5c ; double 130.13
.quad 0x4060c80000000000 ; double 134.25
.quad 0x40605e147ae147ae ; double 130.94
.quad 0x4060280000000000 ; double 129.25
.quad 0x4060c80000000000 ; double 134.25
.quad 0x4060dc28f5c28f5c ; double 134.88
.quad 0x40614428f5c28f5c ; double 138.13
.quad 0x4060b80000000000 ; double 133.75
.quad 0x40600e147ae147ae ; double 128.44
.quad 0x405fbc28f5c28f5c ; double 126.94
.quad 0x405f700000000000 ; double 125.75
.quad 0x405f7c28f5c28f5c ; double 125.94
.quad 0x4060200000000000 ; double 129
.quad 0x40600e147ae147ae ; double 128.44
.quad 0x4060600000000000 ; double 131
.quad 0x405ec00000000000 ; double 123
.quad 0x405e17ae147ae148 ; double 120.37
.quad 0x405dbc28f5c28f5c ; double 118.94
.quad 0x405e000000000000 ; double 120
.quad 0x405e27ae147ae148 ; double 120.62
.quad 0x405e000000000000 ; double 120
.quad 0x405dd00000000000 ; double 119.25
.quad 0x405da00000000000 ; double 118.5
.quad 0x405ce3d70a3d70a4 ; double 115.56
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405cc3d70a3d70a4 ; double 115.06
.quad 0x405d27ae147ae148 ; double 116.62
.quad 0x405d3c28f5c28f5c ; double 116.94
.quad 0x405d13d70a3d70a4 ; double 116.31
.quad 0x405cb3d70a3d70a4 ; double 114.81
.quad 0x405c5c28f5c28f5c ; double 113.44
.quad 0x405cac28f5c28f5c ; double 114.69
.quad 0x405c07ae147ae148 ; double 112.12
.quad 0x405c13d70a3d70a4 ; double 112.31
.quad 0x405b600000000000 ; double 109.5
.quad 0x405b500000000000 ; double 109.25
.quad 0x405b47ae147ae148 ; double 109.12
.quad 0x405c0c28f5c28f5c ; double 112.19
.quad 0x405bf7ae147ae148 ; double 111.87
.quad 0x405cdc28f5c28f5c ; double 115.44
.quad 0x405d33d70a3d70a4 ; double 116.81
.quad 0x405d83d70a3d70a4 ; double 118.06
.quad 0x405de7ae147ae148 ; double 119.62
.quad 0x405de3d70a3d70a4 ; double 119.56
.quad 0x405e000000000000 ; double 120
.quad 0x405dc00000000000 ; double 119
.quad 0x405d0c28f5c28f5c ; double 116.19
.quad 0x405d27ae147ae148 ; double 116.62
.quad 0x405d6c28f5c28f5c ; double 117.69
.quad 0x405d93d70a3d70a4 ; double 118.31
.quad 0x405e3c28f5c28f5c ; double 120.94
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405e600000000000 ; double 121.5
.quad 0x405ec00000000000 ; double 123
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405f0c28f5c28f5c ; double 124.19
.quad 0x405f73d70a3d70a4 ; double 125.81
.quad 0x405fc00000000000 ; double 127
.quad 0x405f700000000000 ; double 125.75
.quad 0x405ff00000000000 ; double 127.75
.quad 0x406029eb851eb852 ; double 129.31
.quad 0x405e900000000000 ; double 122.25
.quad 0x405e000000000000 ; double 120
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405e000000000000 ; double 120
.quad 0x405d700000000000 ; double 117.75
.quad 0x405dbc28f5c28f5c ; double 118.94
.quad 0x405d900000000000 ; double 118.25
.quad 0x405db7ae147ae148 ; double 118.87
.quad 0x405d57ae147ae148 ; double 117.37
.quad 0x405d5c28f5c28f5c ; double 117.44
.quad 0x405d37ae147ae148 ; double 116.87
.quad 0x405da00000000000 ; double 118.5
.quad 0x405d27ae147ae148 ; double 116.62
.quad 0x405d7c28f5c28f5c ; double 117.94
.quad 0x405d67ae147ae148 ; double 117.62
.quad 0x405e000000000000 ; double 120
.quad 0x405da00000000000 ; double 118.5
.quad 0x405bd00000000000 ; double 111.25
.quad 0x405afc28f5c28f5c ; double 107.94
.quad 0x405b800000000000 ; double 110
.quad 0x405b73d70a3d70a4 ; double 109.81
.quad 0x405a900000000000 ; double 106.25
.quad 0x405af3d70a3d70a4 ; double 107.81
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405a67ae147ae148 ; double 105.62
.quad 0x405a73d70a3d70a4 ; double 105.81
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405a97ae147ae148 ; double 106.37
.quad 0x405aac28f5c28f5c ; double 106.69
.quad 0x405a37ae147ae148 ; double 104.87
.quad 0x405a3c28f5c28f5c ; double 104.94
.quad 0x405a37ae147ae148 ; double 104.87
.quad 0x405a600000000000 ; double 105.5
.quad 0x405aa7ae147ae148 ; double 106.62
.quad 0x405aac28f5c28f5c ; double 106.69
.quad 0x4059ec28f5c28f5c ; double 103.69
.quad 0x4059d7ae147ae148 ; double 103.37
.quad 0x4059d00000000000 ; double 103.25
.quad 0x4059c00000000000 ; double 103
.quad 0x405993d70a3d70a4 ; double 102.31
.quad 0x405973d70a3d70a4 ; double 101.81
.quad 0x405957ae147ae148 ; double 101.37
.quad 0x405947ae147ae148 ; double 101.12
.quad 0x405927ae147ae148 ; double 100.62
.quad 0x4058f7ae147ae148 ; double 99.870000000000005
.quad 0x40588c28f5c28f5c ; double 98.189999999999998
.quad 0x4058900000000000 ; double 98.25
.quad 0x4058f3d70a3d70a4 ; double 99.810000000000002
.quad 0x405917ae147ae148 ; double 100.37
.quad 0x4059d3d70a3d70a4 ; double 103.31
.quad 0x4059a00000000000 ; double 102.5
.quad 0x405a23d70a3d70a4 ; double 104.56
.quad 0x405a67ae147ae148 ; double 105.62
.quad 0x405a77ae147ae148 ; double 105.87
.quad 0x405a500000000000 ; double 105.25
.quad 0x4059b7ae147ae148 ; double 102.87
.quad 0x4059fc28f5c28f5c ; double 103.94
.quad 0x4059dc28f5c28f5c ; double 103.44
.quad 0x405a27ae147ae148 ; double 104.62
.quad 0x4059e7ae147ae148 ; double 103.62
.quad 0x4059ec28f5c28f5c ; double 103.69
.quad 0x4059e7ae147ae148 ; double 103.62
.quad 0x4059d00000000000 ; double 103.25
.quad 0x4059d00000000000 ; double 103.25
.quad 0x40599c28f5c28f5c ; double 102.44
.quad 0x4058e7ae147ae148 ; double 99.620000000000005
.quad 0x4058e7ae147ae148 ; double 99.620000000000005
.quad 0x4058e3d70a3d70a4 ; double 99.560000000000002
.quad 0x4058f3d70a3d70a4 ; double 99.810000000000002
.quad 0x4059100000000000 ; double 100.25
.quad 0x4059500000000000 ; double 101.25
.quad 0x4058e3d70a3d70a4 ; double 99.560000000000002
.quad 0x4058d3d70a3d70a4 ; double 99.310000000000002
.quad 0x4058800000000000 ; double 98
.quad 0x405877ae147ae148 ; double 97.870000000000005
.quad 0x4058fc28f5c28f5c ; double 99.939999999999998
.quad 0x4059000000000000 ; double 100
.quad 0x405927ae147ae148 ; double 100.62
.quad 0x4059800000000000 ; double 102
.quad 0x405b17ae147ae148 ; double 108.37
.quad 0x405a4c28f5c28f5c ; double 105.19
.quad 0x405a3c28f5c28f5c ; double 104.94
.quad 0x405a000000000000 ; double 104
.quad 0x40599c28f5c28f5c ; double 102.44
.quad 0x405947ae147ae148 ; double 101.12
.quad 0x405a37ae147ae148 ; double 104.87
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a400000000000 ; double 105
.quad 0x405acc28f5c28f5c ; double 107.19
.quad 0x405ab3d70a3d70a4 ; double 106.81
.quad 0x405a700000000000 ; double 105.75
.quad 0x405a4c28f5c28f5c ; double 105.19
.quad 0x405a600000000000 ; double 105.5
.quad 0x4059ec28f5c28f5c ; double 103.69
.quad 0x4059700000000000 ; double 101.75
.quad 0x405933d70a3d70a4 ; double 100.81
.quad 0x405a07ae147ae148 ; double 104.12
.quad 0x405a300000000000 ; double 104.75
.quad 0x4059bc28f5c28f5c ; double 102.94
.quad 0x4059bc28f5c28f5c ; double 102.94
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a500000000000 ; double 105.25
.quad 0x40597c28f5c28f5c ; double 101.94
.quad 0x4059dc28f5c28f5c ; double 103.44
.quad 0x4059fc28f5c28f5c ; double 103.94
.quad 0x405b67ae147ae148 ; double 109.62
.quad 0x405c100000000000 ; double 112.25
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405bd3d70a3d70a4 ; double 111.31
.quad 0x405be7ae147ae148 ; double 111.62
.quad 0x405c0c28f5c28f5c ; double 112.19
.quad 0x405c53d70a3d70a4 ; double 113.31
.quad 0x405b7c28f5c28f5c ; double 109.94
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405ae00000000000 ; double 107.5
.quad 0x405a700000000000 ; double 105.75
.quad 0x405a97ae147ae148 ; double 106.37
.quad 0x405a400000000000 ; double 105
.quad 0x4059fc28f5c28f5c ; double 103.94
.quad 0x405a17ae147ae148 ; double 104.37
.quad 0x405a37ae147ae148 ; double 104.87
.quad 0x405983d70a3d70a4 ; double 102.06
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4058e00000000000 ; double 99.5
.quad 0x4059000000000000 ; double 100
.quad 0x405933d70a3d70a4 ; double 100.81
.quad 0x40591c28f5c28f5c ; double 100.44
.quad 0x4059cc28f5c28f5c ; double 103.19
.quad 0x4059fc28f5c28f5c ; double 103.94
.quad 0x4059b00000000000 ; double 102.75
.quad 0x40598c28f5c28f5c ; double 102.19
.quad 0x4058cc28f5c28f5c ; double 99.189999999999998
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4059900000000000 ; double 102.25
.quad 0x4058fc28f5c28f5c ; double 99.939999999999998
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x4059b3d70a3d70a4 ; double 102.81
.quad 0x4059f7ae147ae148 ; double 103.87
.quad 0x405a7c28f5c28f5c ; double 105.94
.quad 0x405a57ae147ae148 ; double 105.37
.quad 0x4058700000000000 ; double 97.75
.quad 0x4058b7ae147ae148 ; double 98.870000000000005
.quad 0x4059ac28f5c28f5c ; double 102.69
.quad 0x4059b7ae147ae148 ; double 102.87
.quad 0x405a07ae147ae148 ; double 104.12
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a57ae147ae148 ; double 105.37
.quad 0x405a73d70a3d70a4 ; double 105.81
.quad 0x405ab7ae147ae148 ; double 106.87
.quad 0x405ac7ae147ae148 ; double 107.12
.quad 0x405a500000000000 ; double 105.25
.quad 0x405ad00000000000 ; double 107.25
.quad 0x405a23d70a3d70a4 ; double 104.56
.quad 0x405a9c28f5c28f5c ; double 106.44
.quad 0x405adc28f5c28f5c ; double 107.44
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x4059cc28f5c28f5c ; double 103.19
.quad 0x4059c00000000000 ; double 103
.quad 0x405a1c28f5c28f5c ; double 104.44
.quad 0x405a400000000000 ; double 105
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x4058e7ae147ae148 ; double 99.620000000000005
.quad 0x4059100000000000 ; double 100.25
.quad 0x4059500000000000 ; double 101.25
.quad 0x4059200000000000 ; double 100.5
.quad 0x4058cc28f5c28f5c ; double 99.189999999999998
.quad 0x4058800000000000 ; double 98
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x405903d70a3d70a4 ; double 100.06
.quad 0x405a000000000000 ; double 104
.quad 0x405a13d70a3d70a4 ; double 104.31
.quad 0x405a500000000000 ; double 105.25
.quad 0x405a23d70a3d70a4 ; double 104.56
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x405a13d70a3d70a4 ; double 104.31
.quad 0x4059b7ae147ae148 ; double 102.87
.quad 0x4059f7ae147ae148 ; double 103.87
.quad 0x405a33d70a3d70a4 ; double 104.81
.quad 0x405a17ae147ae148 ; double 104.37
.quad 0x405ac00000000000 ; double 107
.quad 0x405aa7ae147ae148 ; double 106.62
.quad 0x405afc28f5c28f5c ; double 107.94
.quad 0x405b1c28f5c28f5c ; double 108.44
.quad 0x405b000000000000 ; double 108
.quad 0x405a07ae147ae148 ; double 104.12
.quad 0x405a200000000000 ; double 104.5
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x405a800000000000 ; double 106
.quad 0x405a67ae147ae148 ; double 105.62
.quad 0x405a53d70a3d70a4 ; double 105.31
.quad 0x405ac7ae147ae148 ; double 107.12
.quad 0x405b500000000000 ; double 109.25
.quad 0x405b5c28f5c28f5c ; double 109.44
.quad 0x405afc28f5c28f5c ; double 107.94
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405a7c28f5c28f5c ; double 105.94
.quad 0x405ac00000000000 ; double 107
.quad 0x405a800000000000 ; double 106
.quad 0x405a27ae147ae148 ; double 104.62
.quad 0x405ab7ae147ae148 ; double 106.87
.quad 0x405b1c28f5c28f5c ; double 108.44
.quad 0x405b1c28f5c28f5c ; double 108.44
.quad 0x405ac00000000000 ; double 107
.quad 0x4059e00000000000 ; double 103.5
.quad 0x405a93d70a3d70a4 ; double 106.31
.quad 0x405a500000000000 ; double 105.25
.quad 0x4059700000000000 ; double 101.75
.quad 0x4058800000000000 ; double 98
.quad 0x4058100000000000 ; double 96.25
.quad 0x40581c28f5c28f5c ; double 96.439999999999998
.quad 0x405857ae147ae148 ; double 97.370000000000005
.quad 0x40580c28f5c28f5c ; double 96.189999999999998
.quad 0x40582c28f5c28f5c ; double 96.689999999999998
.quad 0x4057f00000000000 ; double 95.75
.quad 0x4057e00000000000 ; double 95.5
.quad 0x4057c7ae147ae148 ; double 95.120000000000005
.quad 0x405767ae147ae148 ; double 93.620000000000005
.quad 0x4057100000000000 ; double 92.25
.quad 0x4057200000000000 ; double 92.5
.quad 0x4057000000000000 ; double 92
.quad 0x40573c28f5c28f5c ; double 92.939999999999998
.quad 0x4057700000000000 ; double 93.75
.quad 0x4057100000000000 ; double 92.25
.quad 0x4056d7ae147ae148 ; double 91.370000000000005
.quad 0x4056fc28f5c28f5c ; double 91.939999999999998
.quad 0x4056b00000000000 ; double 90.75
.quad 0x4056600000000000 ; double 89.5
.quad 0x4056b00000000000 ; double 90.75
.quad 0x4056800000000000 ; double 90
.quad 0x4056700000000000 ; double 89.75
.quad 0x405617ae147ae148 ; double 88.370000000000005
.quad 0x4055d7ae147ae148 ; double 87.370000000000005
.quad 0x4055f7ae147ae148 ; double 87.870000000000005
.quad 0x4055e7ae147ae148 ; double 87.620000000000005
.quad 0x4055800000000000 ; double 86
.quad 0x405537ae147ae148 ; double 84.870000000000005
.quad 0x405567ae147ae148 ; double 85.620000000000005
.quad 0x4055a7ae147ae148 ; double 86.620000000000005
.quad 0x4055f7ae147ae148 ; double 87.870000000000005
.quad 0x4056100000000000 ; double 88.25
.quad 0x4056800000000000 ; double 90
.quad 0x4056f7ae147ae148 ; double 91.870000000000005
.quad 0x4066680000000000 ; double 179.25
.quad 0x4065dc28f5c28f5c ; double 174.88
.quad 0x4066000000000000 ; double 176
.quad 0x40662c28f5c28f5c ; double 177.38
.quad 0x4065f00000000000 ; double 175.5
.quad 0x40654c28f5c28f5c ; double 170.38
.quad 0x4065b80000000000 ; double 173.75
.quad 0x4065f428f5c28f5c ; double 175.63
.quad 0x4065e00000000000 ; double 175
.quad 0x40662428f5c28f5c ; double 177.13
.quad 0x4065b00000000000 ; double 173.5
.quad 0x4065280000000000 ; double 169.25
.quad 0x4065280000000000 ; double 169.25
.quad 0x4064dc28f5c28f5c ; double 166.88
.quad 0x4064ec28f5c28f5c ; double 167.38
.quad 0x4064c80000000000 ; double 166.25
.quad 0x40645c28f5c28f5c ; double 162.88
.quad 0x40643428f5c28f5c ; double 161.63
.quad 0x40642428f5c28f5c ; double 161.13
.quad 0x4063e00000000000 ; double 159
.quad 0x4062f80000000000 ; double 151.75
.quad 0x40631c28f5c28f5c ; double 152.88
.quad 0x4063a00000000000 ; double 157
.quad 0x4061f80000000000 ; double 143.75
.quad 0x40618428f5c28f5c ; double 140.13
.quad 0x4061a80000000000 ; double 141.25
.quad 0x40618428f5c28f5c ; double 140.13
.quad 0x4061780000000000 ; double 139.75
.quad 0x40614c28f5c28f5c ; double 138.38
.quad 0x4061600000000000 ; double 139
.quad 0x40611c28f5c28f5c ; double 136.88
.quad 0x4060d80000000000 ; double 134.75
.quad 0x4060dc28f5c28f5c ; double 134.88
.quad 0x4061400000000000 ; double 138
.quad 0x40611c28f5c28f5c ; double 136.88
.quad 0x4060b00000000000 ; double 133.5
.quad 0x40606428f5c28f5c ; double 131.13
.quad 0x4060880000000000 ; double 132.25
.quad 0x40612c28f5c28f5c ; double 137.38
.quad 0x4061380000000000 ; double 137.75
.quad 0x4061700000000000 ; double 139.5
.quad 0x4061b428f5c28f5c ; double 141.63
.quad 0x4061a428f5c28f5c ; double 141.13
.quad 0x40614c28f5c28f5c ; double 138.38
.quad 0x40611c28f5c28f5c ; double 136.88
.quad 0x40612428f5c28f5c ; double 137.13
.quad 0x4061780000000000 ; double 139.75
.quad 0x4061580000000000 ; double 138.75
.quad 0x4061980000000000 ; double 140.75
.quad 0x4061e428f5c28f5c ; double 143.13
.quad 0x40620c28f5c28f5c ; double 144.38
.quad 0x4062480000000000 ; double 146.25
.quad 0x4062680000000000 ; double 147.25
.quad 0x4062b80000000000 ; double 149.75
.quad 0x4062600000000000 ; double 147
.quad 0x40627428f5c28f5c ; double 147.63
.quad 0x4062a00000000000 ; double 149
.quad 0x4062500000000000 ; double 146.5
.quad 0x4062580000000000 ; double 146.75
.quad 0x4062580000000000 ; double 146.75
.quad 0x4062380000000000 ; double 145.75
.quad 0x40625c28f5c28f5c ; double 146.88
.quad 0x4062700000000000 ; double 147.5
.quad 0x4062800000000000 ; double 148
.quad 0x4062000000000000 ; double 144
.quad 0x40619c28f5c28f5c ; double 140.88
.quad 0x4061f00000000000 ; double 143.5
.quad 0x40623c28f5c28f5c ; double 145.88
.quad 0x4062300000000000 ; double 145.5
.quad 0x4062780000000000 ; double 147.75
.quad 0x40628428f5c28f5c ; double 148.13
.quad 0x40625428f5c28f5c ; double 146.63
.quad 0x4062280000000000 ; double 145.25
.quad 0x4062980000000000 ; double 148.75
.quad 0x4062c00000000000 ; double 150
.quad 0x4062b80000000000 ; double 149.75
.quad 0x40633428f5c28f5c ; double 153.63
.quad 0x4063580000000000 ; double 154.75
.quad 0x4063ac28f5c28f5c ; double 157.38
.quad 0x4063bc28f5c28f5c ; double 157.88
.quad 0x4063c80000000000 ; double 158.25
.quad 0x4063980000000000 ; double 156.75
.quad 0x4063480000000000 ; double 154.25
.quad 0x4062f80000000000 ; double 151.75
.quad 0x4063400000000000 ; double 154
.quad 0x4064080000000000 ; double 160.25
.quad 0x4064000000000000 ; double 160
.quad 0x40654428f5c28f5c ; double 170.13
.quad 0x4064f428f5c28f5c ; double 167.63
.quad 0x4064b80000000000 ; double 165.75
.quad 0x4064d80000000000 ; double 166.75
.quad 0x40650c28f5c28f5c ; double 168.38
.quad 0x4064f428f5c28f5c ; double 167.63
.quad 0x4064c80000000000 ; double 166.25
.quad 0x4064800000000000 ; double 164
.quad 0x4064400000000000 ; double 162
.quad 0x4064880000000000 ; double 164.25
.quad 0x4064780000000000 ; double 163.75
.quad 0x40646c28f5c28f5c ; double 163.38
.quad 0x4063e428f5c28f5c ; double 159.13
.quad 0x40634c28f5c28f5c ; double 154.38
.quad 0x40636c28f5c28f5c ; double 155.38
.quad 0x4063700000000000 ; double 155.5
.quad 0x40637428f5c28f5c ; double 155.63
.quad 0x4063cc28f5c28f5c ; double 158.38
.quad 0x4063800000000000 ; double 156
.quad 0x40636c28f5c28f5c ; double 155.38
.quad 0x4063cc28f5c28f5c ; double 158.38
.quad 0x4063f80000000000 ; double 159.75
.quad 0x4063e00000000000 ; double 159
.quad 0x40633428f5c28f5c ; double 153.63
.quad 0x4063280000000000 ; double 153.25
.quad 0x40635c28f5c28f5c ; double 154.88
.quad 0x4063d00000000000 ; double 158.5
.quad 0x4063d428f5c28f5c ; double 158.63
.quad 0x40643c28f5c28f5c ; double 161.88
.quad 0x40640428f5c28f5c ; double 160.13
.quad 0x4063c00000000000 ; double 158
.quad 0x40645428f5c28f5c ; double 162.63
.quad 0x4064800000000000 ; double 164
.quad 0x4064c00000000000 ; double 166
.quad 0x4064700000000000 ; double 163.5
.quad 0x4063f428f5c28f5c ; double 159.63
.quad 0x4064080000000000 ; double 160.25
.quad 0x4064180000000000 ; double 160.75
.quad 0x4063d00000000000 ; double 158.5
.quad 0x4063dc28f5c28f5c ; double 158.88
.quad 0x40635c28f5c28f5c ; double 154.88
.quad 0x4063e00000000000 ; double 159
.quad 0x40635c28f5c28f5c ; double 154.88
.quad 0x4062c00000000000 ; double 150
.quad 0x4062300000000000 ; double 145.5
.quad 0x40613428f5c28f5c ; double 137.63
.quad 0x4060fc28f5c28f5c ; double 135.88
.quad 0x4060f80000000000 ; double 135.75
.quad 0x4060dc28f5c28f5c ; double 134.88
.quad 0x4060d00000000000 ; double 134.5
.quad 0x4060e428f5c28f5c ; double 135.13
.quad 0x4060bc28f5c28f5c ; double 133.88
.quad 0x4060700000000000 ; double 131.5
.quad 0x40602c28f5c28f5c ; double 129.38
.quad 0x40603428f5c28f5c ; double 129.63
.quad 0x40602428f5c28f5c ; double 129.13
.quad 0x405fe00000000000 ; double 127.5
.quad 0x405ff00000000000 ; double 127.75
.quad 0x40600428f5c28f5c ; double 128.13
.quad 0x4060000000000000 ; double 128
.quad 0x4060380000000000 ; double 129.75
.quad 0x40603c28f5c28f5c ; double 129.88
.quad 0x4060580000000000 ; double 130.75
.quad 0x4060ec28f5c28f5c ; double 135.38
.quad 0x4060300000000000 ; double 129.5
.quad 0x40602428f5c28f5c ; double 129.13
.quad 0x4060300000000000 ; double 129.5
.quad 0x4060680000000000 ; double 131.25
.quad 0x4060400000000000 ; double 130
.quad 0x40604428f5c28f5c ; double 130.13
.quad 0x4060000000000000 ; double 128
.quad 0x40602c28f5c28f5c ; double 129.38
.quad 0x4060300000000000 ; double 129.5
.quad 0x4060000000000000 ; double 128
.quad 0x405fc7ae147ae148 ; double 127.12
.quad 0x405f900000000000 ; double 126.25
.quad 0x405f77ae147ae148 ; double 125.87
.quad 0x405f500000000000 ; double 125.25
.quad 0x405fc7ae147ae148 ; double 127.12
.quad 0x405f600000000000 ; double 125.5
.quad 0x405ff7ae147ae148 ; double 127.87
.quad 0x405fb7ae147ae148 ; double 126.87
.quad 0x405fb7ae147ae148 ; double 126.87
.quad 0x405f300000000000 ; double 124.75
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405f47ae147ae148 ; double 125.12
.quad 0x405f500000000000 ; double 125.25
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405eb7ae147ae148 ; double 122.87
.quad 0x405d900000000000 ; double 118.25
.quad 0x405d900000000000 ; double 118.25
.quad 0x405dd7ae147ae148 ; double 119.37
.quad 0x405d700000000000 ; double 117.75
.quad 0x405cf7ae147ae148 ; double 115.87
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405ce7ae147ae148 ; double 115.62
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405cc00000000000 ; double 115
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405c300000000000 ; double 112.75
.quad 0x405c47ae147ae148 ; double 113.12
.quad 0x405c67ae147ae148 ; double 113.62
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405c300000000000 ; double 112.75
.quad 0x405bb00000000000 ; double 110.75
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405bd00000000000 ; double 111.25
.quad 0x405be7ae147ae148 ; double 111.62
.quad 0x405be00000000000 ; double 111.5
.quad 0x405c200000000000 ; double 112.5
.quad 0x405c500000000000 ; double 113.25
.quad 0x405c700000000000 ; double 113.75
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405b87ae147ae148 ; double 110.12
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405b400000000000 ; double 109
.quad 0x405b37ae147ae148 ; double 108.87
.quad 0x405b000000000000 ; double 108
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a07ae147ae148 ; double 104.12
.quad 0x4059f00000000000 ; double 103.75
.quad 0x405727ae147ae148 ; double 92.620000000000005
.quad 0x4057500000000000 ; double 93.25
.quad 0x405757ae147ae148 ; double 93.370000000000005
.quad 0x4057d00000000000 ; double 95.25
.quad 0x4057e7ae147ae148 ; double 95.620000000000005
.quad 0x405877ae147ae148 ; double 97.870000000000005
.quad 0x4057f00000000000 ; double 95.75
.quad 0x4057b7ae147ae148 ; double 94.870000000000005
.quad 0x4058000000000000 ; double 96
.quad 0x405867ae147ae148 ; double 97.620000000000005
.quad 0x4058e00000000000 ; double 99.5
.quad 0x4059100000000000 ; double 100.25
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4058d00000000000 ; double 99.25
.quad 0x405907ae147ae148 ; double 100.12
.quad 0x4059500000000000 ; double 101.25
.quad 0x4059900000000000 ; double 102.25
.quad 0x4058f7ae147ae148 ; double 99.870000000000005
.quad 0x4058e7ae147ae148 ; double 99.620000000000005
.quad 0x405907ae147ae148 ; double 100.12
.quad 0x4058f00000000000 ; double 99.75
.quad 0x405907ae147ae148 ; double 100.12
.quad 0x4059700000000000 ; double 101.75
.quad 0x4059900000000000 ; double 102.25
.quad 0x4059a7ae147ae148 ; double 102.62
.quad 0x4059a00000000000 ; double 102.5
.quad 0x4059b00000000000 ; double 102.75
.quad 0x405a100000000000 ; double 104.25
.quad 0x405a07ae147ae148 ; double 104.12
.quad 0x4059f7ae147ae148 ; double 103.87
.quad 0x405a17ae147ae148 ; double 104.37
.quad 0x4059d00000000000 ; double 103.25
.quad 0x4059800000000000 ; double 102
.quad 0x405a37ae147ae148 ; double 104.87
.quad 0x405a97ae147ae148 ; double 106.37
.quad 0x405aa7ae147ae148 ; double 106.62
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405b100000000000 ; double 108.25
.quad 0x405ae7ae147ae148 ; double 107.62
.quad 0x405b17ae147ae148 ; double 108.37
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405bc00000000000 ; double 111
.quad 0x405c07ae147ae148 ; double 112.12
.quad 0x405c200000000000 ; double 112.5
.quad 0x405be7ae147ae148 ; double 111.62
.quad 0x405b500000000000 ; double 109.25
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405b37ae147ae148 ; double 108.87
.quad 0x405b200000000000 ; double 108.5
.quad 0x405b17ae147ae148 ; double 108.37
.quad 0x405af00000000000 ; double 107.75
.quad 0x405a800000000000 ; double 106
.quad 0x405ad00000000000 ; double 107.25
.quad 0x405b200000000000 ; double 108.5
.quad 0x405b400000000000 ; double 109
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405b200000000000 ; double 108.5
.quad 0x405b900000000000 ; double 110.25
.quad 0x405b300000000000 ; double 108.75
.quad 0x405b37ae147ae148 ; double 108.87
.quad 0x405b200000000000 ; double 108.5
.quad 0x405ae7ae147ae148 ; double 107.62
.quad 0x405af00000000000 ; double 107.75
.quad 0x405ae7ae147ae148 ; double 107.62
.quad 0x405ac7ae147ae148 ; double 107.12
.quad 0x405ad00000000000 ; double 107.25
.quad 0x405e07ae147ae148 ; double 120.12
.quad 0x405d17ae147ae148 ; double 116.37
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405ce7ae147ae148 ; double 115.62
.quad 0x405d97ae147ae148 ; double 118.37
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405e07ae147ae148 ; double 120.12
.quad 0x405dd7ae147ae148 ; double 119.37
.quad 0x405df7ae147ae148 ; double 119.87
.quad 0x405e200000000000 ; double 120.5
.quad 0x405d87ae147ae148 ; double 118.12
.quad 0x405c300000000000 ; double 112.75
.quad 0x405c700000000000 ; double 113.75
.quad 0x405c17ae147ae148 ; double 112.37
.quad 0x405c700000000000 ; double 113.75
.quad 0x405c000000000000 ; double 112
.quad 0x405cc7ae147ae148 ; double 115.12
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405d77ae147ae148 ; double 117.87
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405f600000000000 ; double 125.5
.quad 0x405f47ae147ae148 ; double 125.12
.quad 0x405e000000000000 ; double 120
.quad 0x405db00000000000 ; double 118.75
.quad 0x405d27ae147ae148 ; double 116.62
.quad 0x405d000000000000 ; double 116
.quad 0x405d77ae147ae148 ; double 117.87
.quad 0x405d37ae147ae148 ; double 116.87
.quad 0x405d600000000000 ; double 117.5
.quad 0x405df7ae147ae148 ; double 119.87
.quad 0x405de00000000000 ; double 119.5
.quad 0x405e500000000000 ; double 121.25
.quad 0x405e700000000000 ; double 121.75
.quad 0x405fd7ae147ae148 ; double 127.37
.quad 0x40601c28f5c28f5c ; double 128.88
.quad 0x40601c28f5c28f5c ; double 128.88
.quad 0x405f600000000000 ; double 125.5
.quad 0x405f87ae147ae148 ; double 126.12
.quad 0x405f67ae147ae148 ; double 125.62
.quad 0x405e27ae147ae148 ; double 120.62
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405db7ae147ae148 ; double 118.87
.quad 0x405da00000000000 ; double 118.5
.quad 0x405cc00000000000 ; double 115
.quad 0x405cb7ae147ae148 ; double 114.87
.quad 0x405cf7ae147ae148 ; double 115.87
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405cc7ae147ae148 ; double 115.12
.quad 0x405c17ae147ae148 ; double 112.37
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405b400000000000 ; double 109
.quad 0x405aa7ae147ae148 ; double 106.62
.quad 0x405a900000000000 ; double 106.25
.quad 0x405b000000000000 ; double 108
.quad 0x405b400000000000 ; double 109
.quad 0x405a200000000000 ; double 104.5
.quad 0x4059f7ae147ae148 ; double 103.87
.quad 0x405a07ae147ae148 ; double 104.12
.quad 0x4058100000000000 ; double 96.25
.quad 0x4056500000000000 ; double 89.25
.quad 0x4055f00000000000 ; double 87.75
.quad 0x4055a00000000000 ; double 86.5
.quad 0x4055d00000000000 ; double 87.25
.quad 0x4056200000000000 ; double 88.5
.quad 0x4056500000000000 ; double 89.25
.quad 0x405647ae147ae148 ; double 89.120000000000005
.quad 0x4056500000000000 ; double 89.25
.quad 0x4056300000000000 ; double 88.75
.quad 0x405647ae147ae148 ; double 89.120000000000005
.quad 0x4056d00000000000 ; double 91.25
.quad 0x405707ae147ae148 ; double 92.120000000000005
.quad 0x4056f00000000000 ; double 91.75
.quad 0x4056d7ae147ae148 ; double 91.370000000000005
.quad 0x4057000000000000 ; double 92
.quad 0x405707ae147ae148 ; double 92.120000000000005
.quad 0x4056f7ae147ae148 ; double 91.870000000000005
.quad 0x4056c7ae147ae148 ; double 91.120000000000005
.quad 0x4057000000000000 ; double 92
.quad 0x4056f7ae147ae148 ; double 91.870000000000005
.quad 0x405687ae147ae148 ; double 90.120000000000005
.quad 0x4057000000000000 ; double 92
.quad 0x4057c7ae147ae148 ; double 95.120000000000005
.quad 0x4057d7ae147ae148 ; double 95.370000000000005
.quad 0x4058300000000000 ; double 96.75
.quad 0x4058500000000000 ; double 97.25
.quad 0x405837ae147ae148 ; double 96.870000000000005
.quad 0x4057e00000000000 ; double 95.5
.quad 0x4058200000000000 ; double 96.5
.quad 0x405847ae147ae148 ; double 97.120000000000005
.quad 0x4058500000000000 ; double 97.25
.quad 0x4058400000000000 ; double 97
.quad 0x405887ae147ae148 ; double 98.120000000000005
.quad 0x4058600000000000 ; double 97.5
.quad 0x4058600000000000 ; double 97.5
.quad 0x405857ae147ae148 ; double 97.370000000000005
.quad 0x405827ae147ae148 ; double 96.620000000000005
.quad 0x405817ae147ae148 ; double 96.370000000000005
.quad 0x4057b7ae147ae148 ; double 94.870000000000005
.quad 0x4057d7ae147ae148 ; double 95.370000000000005
.quad 0x4057f7ae147ae148 ; double 95.870000000000005
.quad 0x4058300000000000 ; double 96.75
.quad 0x4058100000000000 ; double 96.25
.quad 0x405857ae147ae148 ; double 97.370000000000005
.quad 0x4058800000000000 ; double 98
.quad 0x4058900000000000 ; double 98.25
.quad 0x4058f00000000000 ; double 99.75
.quad 0x405907ae147ae148 ; double 100.12
.quad 0x405927ae147ae148 ; double 100.62
.quad 0x405997ae147ae148 ; double 102.37
.quad 0x4059600000000000 ; double 101.5
.quad 0x4058f7ae147ae148 ; double 99.870000000000005
.quad 0x405867ae147ae148 ; double 97.620000000000005
.quad 0x4058c7ae147ae148 ; double 99.120000000000005
.quad 0x4058b00000000000 ; double 98.75
.quad 0x405827ae147ae148 ; double 96.620000000000005
.quad 0x405827ae147ae148 ; double 96.620000000000005
.quad 0x4058a00000000000 ; double 98.5
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4058c00000000000 ; double 99
.quad 0x4058a00000000000 ; double 98.5
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x4058900000000000 ; double 98.25
.quad 0x4058400000000000 ; double 97
.quad 0x405787ae147ae148 ; double 94.120000000000005
.quad 0x4057900000000000 ; double 94.25
.quad 0x4057600000000000 ; double 93.5
.quad 0x405737ae147ae148 ; double 92.870000000000005
.quad 0x4056c7ae147ae148 ; double 91.120000000000005
.quad 0x405787ae147ae148 ; double 94.120000000000005
.quad 0x4057d7ae147ae148 ; double 95.370000000000005
.quad 0x4057c00000000000 ; double 95
.quad 0x4057a7ae147ae148 ; double 94.620000000000005
.quad 0x4057d00000000000 ; double 95.25
.quad 0x4057e7ae147ae148 ; double 95.620000000000005
.quad 0x4057d00000000000 ; double 95.25
.quad 0x4057d7ae147ae148 ; double 95.370000000000005
.quad 0x4057a00000000000 ; double 94.5
.quad 0x405777ae147ae148 ; double 93.870000000000005
.quad 0x4057b00000000000 ; double 94.75
.quad 0x405797ae147ae148 ; double 94.370000000000005
.quad 0x4058200000000000 ; double 96.5
.quad 0x405887ae147ae148 ; double 98.120000000000005
.quad 0x405837ae147ae148 ; double 96.870000000000005
.quad 0x4057900000000000 ; double 94.25
.quad 0x4057300000000000 ; double 92.75
.quad 0x4057e7ae147ae148 ; double 95.620000000000005
.quad 0x4058b00000000000 ; double 98.75
.quad 0x4058a00000000000 ; double 98.5
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x4059000000000000 ; double 100
.quad 0x4059500000000000 ; double 101.25
.quad 0x4059a7ae147ae148 ; double 102.62
.quad 0x4059c00000000000 ; double 103
.quad 0x405a200000000000 ; double 104.5
.quad 0x4059f00000000000 ; double 103.75
.quad 0x4059e00000000000 ; double 103.5
.quad 0x4059a7ae147ae148 ; double 102.62
.quad 0x4059f7ae147ae148 ; double 103.87
.quad 0x405a57ae147ae148 ; double 105.37
.quad 0x405aa7ae147ae148 ; double 106.62
.quad 0x405b200000000000 ; double 108.5
.quad 0x405b100000000000 ; double 108.25
.quad 0x405bd7ae147ae148 ; double 111.37
.quad 0x405c800000000000 ; double 114
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405c300000000000 ; double 112.75
.quad 0x405c07ae147ae148 ; double 112.12
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405b67ae147ae148 ; double 109.62
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405b800000000000 ; double 110
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405b700000000000 ; double 109.75
.quad 0x405b600000000000 ; double 109.5
.quad 0x405bf00000000000 ; double 111.75
.quad 0x405b87ae147ae148 ; double 110.12
.quad 0x405bd00000000000 ; double 111.25
.quad 0x405bd00000000000 ; double 111.25
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405b67ae147ae148 ; double 109.62
.quad 0x405b900000000000 ; double 110.25
.quad 0x405af7ae147ae148 ; double 107.87
.quad 0x405a37ae147ae148 ; double 104.87
.quad 0x405a17ae147ae148 ; double 104.37
.quad 0x405a600000000000 ; double 105.5
.quad 0x405b27ae147ae148 ; double 108.62
.quad 0x405ae00000000000 ; double 107.5
.quad 0x405a07ae147ae148 ; double 104.12
.quad 0x405a100000000000 ; double 104.25
.quad 0x4059800000000000 ; double 102
.quad 0x4059400000000000 ; double 101
.quad 0x4059500000000000 ; double 101.25
.quad 0x405927ae147ae148 ; double 100.62
.quad 0x4058b7ae147ae148 ; double 98.870000000000005
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x405877ae147ae148 ; double 97.870000000000005
.quad 0x405887ae147ae148 ; double 98.120000000000005
.quad 0x405887ae147ae148 ; double 98.120000000000005
.quad 0x405827ae147ae148 ; double 96.620000000000005
.quad 0x4058ac28f5c28f5c ; double 98.689999999999998
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4058c7ae147ae148 ; double 99.120000000000005
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x4058800000000000 ; double 98
.quad 0x4057a7ae147ae148 ; double 94.620000000000005
.quad 0x405767ae147ae148 ; double 93.620000000000005
.quad 0x4057800000000000 ; double 94
.quad 0x405757ae147ae148 ; double 93.370000000000005
.quad 0x4057100000000000 ; double 92.25
.quad 0x4056b00000000000 ; double 90.75
.quad 0x4056900000000000 ; double 90.25
.quad 0x4056b00000000000 ; double 90.75
.quad 0x4056d7ae147ae148 ; double 91.370000000000005
.quad 0x4057300000000000 ; double 92.75
.quad 0x4057600000000000 ; double 93.5
.quad 0x4057d7ae147ae148 ; double 95.370000000000005
.quad 0x4057c00000000000 ; double 95
.quad 0x405767ae147ae148 ; double 93.620000000000005
.quad 0x4057f00000000000 ; double 95.75
.quad 0x4058600000000000 ; double 97.5
.quad 0x405877ae147ae148 ; double 97.870000000000005
.quad 0x405877ae147ae148 ; double 97.870000000000005
.quad 0x405867ae147ae148 ; double 97.620000000000005
.quad 0x4057d7ae147ae148 ; double 95.370000000000005
.quad 0x4057700000000000 ; double 93.75
.quad 0x405807ae147ae148 ; double 96.120000000000005
.quad 0x4057e7ae147ae148 ; double 95.620000000000005
.quad 0x4057f7ae147ae148 ; double 95.870000000000005
.quad 0x4057d7ae147ae148 ; double 95.370000000000005
.quad 0x4057e7ae147ae148 ; double 95.620000000000005
.quad 0x4057c7ae147ae148 ; double 95.120000000000005
.quad 0x4057c00000000000 ; double 95
.quad 0x405797ae147ae148 ; double 94.370000000000005
.quad 0x4057d00000000000 ; double 95.25
.quad 0x4057a7ae147ae148 ; double 94.620000000000005
.quad 0x405807ae147ae148 ; double 96.120000000000005
.quad 0x4057a7ae147ae148 ; double 94.620000000000005
.quad 0x405757ae147ae148 ; double 93.370000000000005
.quad 0x4057900000000000 ; double 94.25
.quad 0x4058100000000000 ; double 96.25
.quad 0x405837ae147ae148 ; double 96.870000000000005
.quad 0x4057f00000000000 ; double 95.75
.quad 0x405777ae147ae148 ; double 93.870000000000005
.quad 0x405777ae147ae148 ; double 93.870000000000005
.quad 0x4057000000000000 ; double 92
.quad 0x405697ae147ae148 ; double 90.370000000000005
.quad 0x4056100000000000 ; double 88.25
.quad 0x4056200000000000 ; double 88.5
.quad 0x4056200000000000 ; double 88.5
.quad 0x4055d7ae147ae148 ; double 87.370000000000005
.quad 0x4055e7ae147ae148 ; double 87.620000000000005
.quad 0x4055b7ae147ae148 ; double 86.870000000000005
.quad 0x4055800000000000 ; double 86
.quad 0x405547ae147ae148 ; double 85.120000000000005
.quad 0x405527ae147ae148 ; double 84.620000000000005
.quad 0x405507ae147ae148 ; double 84.120000000000005
.quad 0x4054e7ae147ae148 ; double 83.620000000000005
.quad 0x4054e00000000000 ; double 83.5
.quad 0x4054e00000000000 ; double 83.5
.quad 0x4054d7ae147ae148 ; double 83.370000000000005
.quad 0x4055300000000000 ; double 84.75
.quad 0x405537ae147ae148 ; double 84.870000000000005
.quad 0x405547ae147ae148 ; double 85.120000000000005
.quad 0x405517ae147ae148 ; double 84.370000000000005
.quad 0x4054d7ae147ae148 ; double 83.370000000000005
.quad 0x4054a7ae147ae148 ; double 82.620000000000005
.quad 0x4055000000000000 ; double 84
.quad 0x4055000000000000 ; double 84
.quad 0x4054e7ae147ae148 ; double 83.620000000000005
.quad 0x4054e7ae147ae148 ; double 83.620000000000005
.quad 0x4054900000000000 ; double 82.25
.quad 0x4054b00000000000 ; double 82.75
.quad 0x4054c00000000000 ; double 83
.quad 0x405467ae147ae148 ; double 81.620000000000005
.quad 0x405457ae147ae148 ; double 81.370000000000005
.quad 0x4054000000000000 ; double 80
.quad 0x405437ae147ae148 ; double 80.870000000000005
.quad 0x4054500000000000 ; double 81.25
.quad 0x4054000000000000 ; double 80
.quad 0x405357ae147ae148 ; double 77.370000000000005
.quad 0x405317ae147ae148 ; double 76.370000000000005
.quad 0x4052d00000000000 ; double 75.25
.quad 0x4052d7ae147ae148 ; double 75.370000000000005
.quad 0x4052d7ae147ae148 ; double 75.370000000000005
.quad 0x4052f00000000000 ; double 75.75
.quad 0x4052a00000000000 ; double 74.5
.quad 0x4052c7ae147ae148 ; double 75.120000000000005
.quad 0x4052f00000000000 ; double 75.75
.quad 0x4053100000000000 ; double 76.25
.quad 0x4052f7ae147ae148 ; double 75.870000000000005
.quad 0x4052e7ae147ae148 ; double 75.620000000000005
.quad 0x4052d7ae147ae148 ; double 75.370000000000005
.quad 0x4052f00000000000 ; double 75.75
.quad 0x4052d00000000000 ; double 75.25
.quad 0x4052b7ae147ae148 ; double 74.870000000000005
.quad 0x405297ae147ae148 ; double 74.370000000000005
.quad 0x4052b7ae147ae148 ; double 74.870000000000005
.quad 0x4052d00000000000 ; double 75.25
.quad 0x405297ae147ae148 ; double 74.370000000000005
.quad 0x405277ae147ae148 ; double 73.870000000000005
.quad 0x405237ae147ae148 ; double 72.870000000000005
.quad 0x4052400000000000 ; double 73
.quad 0x405247ae147ae148 ; double 73.120000000000005
.quad 0x405257ae147ae148 ; double 73.370000000000005
.quad 0x405247ae147ae148 ; double 73.120000000000005
.quad 0x4052b7ae147ae148 ; double 74.870000000000005
.quad 0x4052a00000000000 ; double 74.5
.quad 0x4053200000000000 ; double 76.5
.quad 0x405367ae147ae148 ; double 77.620000000000005
.quad 0x405377ae147ae148 ; double 77.870000000000005
.quad 0x4053800000000000 ; double 78
.quad 0x4053800000000000 ; double 78
.quad 0x405337ae147ae148 ; double 76.870000000000005
.quad 0x405317ae147ae148 ; double 76.370000000000005
.quad 0x405347ae147ae148 ; double 77.120000000000005
.quad 0x4053500000000000 ; double 77.25
.quad 0x4052f7ae147ae148 ; double 75.870000000000005
.quad 0x4052f00000000000 ; double 75.75
.quad 0x4052d00000000000 ; double 75.25
.quad 0x4052a7ae147ae148 ; double 74.620000000000005
.quad 0x405277ae147ae148 ; double 73.870000000000005
.quad 0x4052a00000000000 ; double 74.5
.quad 0x405297ae147ae148 ; double 74.370000000000005
.quad 0x405297ae147ae148 ; double 74.370000000000005
.quad 0x4052a00000000000 ; double 74.5
.quad 0x4052900000000000 ; double 74.25
.quad 0x4052a7ae147ae148 ; double 74.620000000000005
.quad 0x405267ae147ae148 ; double 73.620000000000005
.quad 0x4051e7ae147ae148 ; double 71.620000000000005
.quad 0x4051e7ae147ae148 ; double 71.620000000000005
.quad 0x4051b7ae147ae148 ; double 70.870000000000005
.quad 0x4051c00000000000 ; double 71
.quad 0x4051a7ae147ae148 ; double 70.620000000000005
.quad 0x4051c00000000000 ; double 71
.quad 0x4051d7ae147ae148 ; double 71.370000000000005
.quad 0x4051e00000000000 ; double 71.5
.quad 0x4051e00000000000 ; double 71.5
.quad 0x4051d7ae147ae148 ; double 71.370000000000005
.quad 0x4052000000000000 ; double 72
.quad 0x405217ae147ae148 ; double 72.370000000000005
.quad 0x4051d7ae147ae148 ; double 71.370000000000005
.quad 0x4051b00000000000 ; double 70.75
.quad 0x4051d00000000000 ; double 71.25
.quad 0x4051c00000000000 ; double 71
.quad 0x4051e7ae147ae148 ; double 71.620000000000005
.quad 0x4051b7ae147ae148 ; double 70.870000000000005
.quad 0x4051800000000000 ; double 70
.quad 0x405237ae147ae148 ; double 72.870000000000005
.quad 0x405287ae147ae148 ; double 74.120000000000005
.quad 0x405257ae147ae148 ; double 73.370000000000005
.quad 0x4052700000000000 ; double 73.75
.quad 0x405247ae147ae148 ; double 73.120000000000005
.quad 0x405277ae147ae148 ; double 73.870000000000005
.quad 0x4052400000000000 ; double 73
.quad 0x4052400000000000 ; double 73
.quad 0x405267ae147ae148 ; double 73.620000000000005
.quad 0x405297ae147ae148 ; double 74.370000000000005
.quad 0x4052600000000000 ; double 73.5
.quad 0x4052100000000000 ; double 72.25
.quad 0x4052600000000000 ; double 73.5
.quad 0x4052a7ae147ae148 ; double 74.620000000000005
.quad 0x4052d00000000000 ; double 75.25
.quad 0x4052d00000000000 ; double 75.25
.quad 0x4052e7ae147ae148 ; double 75.620000000000005
.quad 0x405317ae147ae148 ; double 76.370000000000005
.quad 0x4052a7ae147ae148 ; double 74.620000000000005
.quad 0x4052a00000000000 ; double 74.5
.quad 0x405297ae147ae148 ; double 74.370000000000005
.quad 0x4052a7ae147ae148 ; double 74.620000000000005
.quad 0x4052d7ae147ae148 ; double 75.370000000000005
.quad 0x405307ae147ae148 ; double 76.120000000000005
.quad 0x4052f00000000000 ; double 75.75
.quad 0x4052a00000000000 ; double 74.5
.quad 0x4052700000000000 ; double 73.75
.quad 0x405247ae147ae148 ; double 73.120000000000005
.quad 0x405287ae147ae148 ; double 74.120000000000005
.quad 0x4052600000000000 ; double 73.5
.quad 0x405217ae147ae148 ; double 72.370000000000005
.quad 0x4051f7ae147ae148 ; double 71.870000000000005
.quad 0x4051f00000000000 ; double 71.75
.quad 0x4051600000000000 ; double 69.5
.quad 0x405147ae147ae148 ; double 69.120000000000005
.quad 0x4051600000000000 ; double 69.5
.quad 0x4051600000000000 ; double 69.5
.quad 0x405187ae147ae148 ; double 70.120000000000005
.quad 0x405177ae147ae148 ; double 69.870000000000005
.quad 0x405177ae147ae148 ; double 69.870000000000005
.quad 0x405157ae147ae148 ; double 69.370000000000005
.quad 0x405147ae147ae148 ; double 69.120000000000005
.quad 0x4051600000000000 ; double 69.5
.quad 0x4051900000000000 ; double 70.25
.quad 0x4051d7ae147ae148 ; double 71.370000000000005
.quad 0x4051a00000000000 ; double 70.5
.quad 0x4051d00000000000 ; double 71.25
.quad 0x4051d7ae147ae148 ; double 71.370000000000005
.quad 0x4051900000000000 ; double 70.25
.quad 0x405187ae147ae148 ; double 70.120000000000005
.quad 0x405167ae147ae148 ; double 69.620000000000005
.quad 0x405107ae147ae148 ; double 68.120000000000005
.quad 0x4051000000000000 ; double 68
.quad 0x405107ae147ae148 ; double 68.120000000000005
.quad 0x4051100000000000 ; double 68.25
.quad 0x4050e7ae147ae148 ; double 67.620000000000005
.quad 0x4051000000000000 ; double 68
.quad 0x4051100000000000 ; double 68.25
.quad 0x4051600000000000 ; double 69.5
.quad 0x4051700000000000 ; double 69.75
.quad 0x4051800000000000 ; double 70
.quad 0x405187ae147ae148 ; double 70.120000000000005
.quad 0x4051700000000000 ; double 69.75
.quad 0x4050d7ae147ae148 ; double 67.370000000000005
.quad 0x4051100000000000 ; double 68.25
.quad 0x405117ae147ae148 ; double 68.370000000000005
.quad 0x405127ae147ae148 ; double 68.620000000000005
.quad 0x4050b7ae147ae148 ; double 66.870000000000005
.quad 0x405067ae147ae148 ; double 65.620000000000005
.quad 0x4050300000000000 ; double 64.75
.quad 0x4050200000000000 ; double 64.5
.quad 0x405047ae147ae148 ; double 65.120000000000005
.quad 0x405027ae147ae148 ; double 64.620000000000005
.quad 0x4050400000000000 ; double 65
.quad 0x4050200000000000 ; double 64.5
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x404f90a3d70a3d71 ; double 63.130000000000003
.quad 0x404fc00000000000 ; double 63.5
.quad 0x404f90a3d70a3d71 ; double 63.130000000000003
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x404f90a3d70a3d71 ; double 63.130000000000003
.quad 0x404f600000000000 ; double 62.75
.quad 0x404f800000000000 ; double 63
.quad 0x404f400000000000 ; double 62.5
.quad 0x404f90a3d70a3d71 ; double 63.130000000000003
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f400000000000 ; double 62.5
.quad 0x404f50a3d70a3d71 ; double 62.630000000000003
.quad 0x404c200000000000 ; double 56.25
.quad 0x404c70a3d70a3d71 ; double 56.880000000000003
.quad 0x404cd0a3d70a3d71 ; double 57.630000000000003
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404d400000000000 ; double 58.5
.quad 0x404c600000000000 ; double 56.75
.quad 0x404c10a3d70a3d71 ; double 56.130000000000003
.quad 0x404c90a3d70a3d71 ; double 57.130000000000003
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404cb0a3d70a3d71 ; double 57.380000000000003
.quad 0x404c800000000000 ; double 57
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404d90a3d70a3d71 ; double 59.130000000000003
.quad 0x404e400000000000 ; double 60.5
.quad 0x404ec00000000000 ; double 61.5
.quad 0x404ef0a3d70a3d71 ; double 61.880000000000003
.quad 0x404ed0a3d70a3d71 ; double 61.630000000000003
.quad 0x404e800000000000 ; double 61
.quad 0x404f200000000000 ; double 62.25
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404f200000000000 ; double 62.25
.quad 0x404f90a3d70a3d71 ; double 63.130000000000003
.quad 0x404fc00000000000 ; double 63.5
.quad 0x4050300000000000 ; double 64.75
.quad 0x4050400000000000 ; double 65
.quad 0x405007ae147ae148 ; double 64.120000000000005
.quad 0x404f70a3d70a3d71 ; double 62.880000000000003
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404f800000000000 ; double 63
.quad 0x404fa00000000000 ; double 63.25
.quad 0x404fc00000000000 ; double 63.5
.quad 0x404ef0a3d70a3d71 ; double 61.880000000000003
.quad 0x404fc00000000000 ; double 63.5
.quad 0x404fd0a3d70a3d71 ; double 63.630000000000003
.quad 0x404fe00000000000 ; double 63.75
.quad 0x405007ae147ae148 ; double 64.120000000000005
.quad 0x405007ae147ae148 ; double 64.120000000000005
.quad 0x405007ae147ae148 ; double 64.120000000000005
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x404f400000000000 ; double 62.5
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404ef0a3d70a3d71 ; double 61.880000000000003
.quad 0x404ec00000000000 ; double 61.5
.quad 0x404d50a3d70a3d71 ; double 58.630000000000003
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404ce00000000000 ; double 57.75
.quad 0x404d200000000000 ; double 58.25
.quad 0x404d200000000000 ; double 58.25
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404c90a3d70a3d71 ; double 57.130000000000003
.quad 0x404cc00000000000 ; double 57.5
.quad 0x404d200000000000 ; double 58.25
.quad 0x404d50a3d70a3d71 ; double 58.630000000000003
.quad 0x404d30a3d70a3d71 ; double 58.380000000000003
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404d200000000000 ; double 58.25
.quad 0x404da00000000000 ; double 59.25
.quad 0x404da00000000000 ; double 59.25
.quad 0x404dc00000000000 ; double 59.5
.quad 0x404d50a3d70a3d71 ; double 58.630000000000003
.quad 0x404ac00000000000 ; double 53.5
.quad 0x404ae00000000000 ; double 53.75
.quad 0x404b10a3d70a3d71 ; double 54.130000000000003
.quad 0x404b400000000000 ; double 54.5
.quad 0x404af0a3d70a3d71 ; double 53.880000000000003
.quad 0x404ab0a3d70a3d71 ; double 53.380000000000003
.quad 0x404af0a3d70a3d71 ; double 53.880000000000003
.quad 0x404ab0a3d70a3d71 ; double 53.380000000000003
.quad 0x404a90a3d70a3d71 ; double 53.130000000000003
.quad 0x404aa00000000000 ; double 53.25
.quad 0x404ac66666666666 ; double 53.549999999999997
.quad 0x404b10a3d70a3d71 ; double 54.130000000000003
.quad 0x404ac00000000000 ; double 53.5
.quad 0x404b50a3d70a3d71 ; double 54.630000000000003
.quad 0x404b800000000000 ; double 55
.quad 0x404b30a3d70a3d71 ; double 54.380000000000003
.quad 0x404b50a3d70a3d71 ; double 54.630000000000003
.quad 0x404c400000000000 ; double 56.5
.quad 0x404c90a3d70a3d71 ; double 57.130000000000003
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404d50a3d70a3d71 ; double 58.630000000000003
.quad 0x404d70a3d70a3d71 ; double 58.880000000000003
.quad 0x404d200000000000 ; double 58.25
.quad 0x404d200000000000 ; double 58.25
.quad 0x404d200000000000 ; double 58.25
.quad 0x404d30a3d70a3d71 ; double 58.380000000000003
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404c200000000000 ; double 56.25
.quad 0x404c200000000000 ; double 56.25
.quad 0x404be00000000000 ; double 55.75
.quad 0x404b70a3d70a3d71 ; double 54.880000000000003
.quad 0x404a800000000000 ; double 53
.quad 0x404aa00000000000 ; double 53.25
.quad 0x404ab0a3d70a3d71 ; double 53.380000000000003
.quad 0x404ac00000000000 ; double 53.5
.quad 0x404af0a3d70a3d71 ; double 53.880000000000003
.quad 0x404aa00000000000 ; double 53.25
.quad 0x404ab0a3d70a3d71 ; double 53.380000000000003
.quad 0x404ab0a3d70a3d71 ; double 53.380000000000003
.quad 0x404b200000000000 ; double 54.25
.quad 0x404af0a3d70a3d71 ; double 53.880000000000003
.quad 0x404a70a3d70a3d71 ; double 52.880000000000003
.quad 0x404b800000000000 ; double 55
.quad 0x404ba00000000000 ; double 55.25
.quad 0x404b50a3d70a3d71 ; double 54.630000000000003
.quad 0x404b50a3d70a3d71 ; double 54.630000000000003
.quad 0x404ab0a3d70a3d71 ; double 53.380000000000003
.quad 0x404ad0a3d70a3d71 ; double 53.630000000000003
.quad 0x404b200000000000 ; double 54.25
.quad 0x404b600000000000 ; double 54.75
.quad 0x404b50a3d70a3d71 ; double 54.630000000000003
.quad 0x404bd0a3d70a3d71 ; double 55.630000000000003
.quad 0x404c70a3d70a3d71 ; double 56.880000000000003
.quad 0x404c400000000000 ; double 56.5
.quad 0x404cb0a3d70a3d71 ; double 57.380000000000003
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404d10a3d70a3d71 ; double 58.130000000000003
.quad 0x404cc00000000000 ; double 57.5
.quad 0x404d600000000000 ; double 58.75
.quad 0x404db0a3d70a3d71 ; double 59.380000000000003
.quad 0x404d600000000000 ; double 58.75
.quad 0x404bd0a3d70a3d71 ; double 55.630000000000003
.quad 0x404bf0a3d70a3d71 ; double 55.880000000000003
.quad 0x404c800000000000 ; double 57
.quad 0x404cb0a3d70a3d71 ; double 57.380000000000003
.quad 0x404d400000000000 ; double 58.5
.quad 0x404dc00000000000 ; double 59.5
.quad 0x404d800000000000 ; double 59
.quad 0x404d70a3d70a3d71 ; double 58.880000000000003
.quad 0x404db0a3d70a3d71 ; double 59.380000000000003
.quad 0x404dc00000000000 ; double 59.5
.quad 0x404db0a3d70a3d71 ; double 59.380000000000003
.quad 0x404e000000000000 ; double 60
.quad 0x404df0a3d70a3d71 ; double 59.880000000000003
.quad 0x404d90a3d70a3d71 ; double 59.130000000000003
.quad 0x404ce00000000000 ; double 57.75
.quad 0x404cb0a3d70a3d71 ; double 57.380000000000003
.quad 0x404d30a3d70a3d71 ; double 58.380000000000003
.quad 0x404d600000000000 ; double 58.75
.quad 0x404db0a3d70a3d71 ; double 59.380000000000003
.quad 0x404da00000000000 ; double 59.25
.quad 0x404db0a3d70a3d71 ; double 59.380000000000003
.quad 0x404da00000000000 ; double 59.25
.quad 0x404d90a3d70a3d71 ; double 59.130000000000003
.quad 0x404db0a3d70a3d71 ; double 59.380000000000003
.quad 0x404df0a3d70a3d71 ; double 59.880000000000003
.quad 0x404cd0a3d70a3d71 ; double 57.630000000000003
.quad 0x404cd0a3d70a3d71 ; double 57.630000000000003
.quad 0x404cb0a3d70a3d71 ; double 57.380000000000003
.quad 0x404ce00000000000 ; double 57.75
.quad 0x404bc00000000000 ; double 55.5
.quad 0x404b10a3d70a3d71 ; double 54.130000000000003
.quad 0x404b10a3d70a3d71 ; double 54.130000000000003
.quad 0x404b200000000000 ; double 54.25
.quad 0x404b30a3d70a3d71 ; double 54.380000000000003
.quad 0x404b000000000000 ; double 54
.quad 0x404b10a3d70a3d71 ; double 54.130000000000003
.quad 0x404b600000000000 ; double 54.75
.quad 0x404b90a3d70a3d71 ; double 55.130000000000003
.quad 0x404c10a3d70a3d71 ; double 56.130000000000003
.quad 0x404c000000000000 ; double 56
.quad 0x404ba00000000000 ; double 55.25
.quad 0x404aa00000000000 ; double 53.25
.quad 0x404a400000000000 ; double 52.5
.quad 0x404a50a3d70a3d71 ; double 52.630000000000003
.quad 0x404a90a3d70a3d71 ; double 53.130000000000003
.quad 0x404b200000000000 ; double 54.25
.quad 0x404a800000000000 ; double 53
.quad 0x4049d0a3d70a3d71 ; double 51.630000000000003
.quad 0x404a400000000000 ; double 52.5
.quad 0x404a30a3d70a3d71 ; double 52.380000000000003
.quad 0x404910a3d70a3d71 ; double 50.130000000000003
.quad 0x404950a3d70a3d71 ; double 50.630000000000003
.quad 0x4049600000000000 ; double 50.75
.quad 0x404910a3d70a3d71 ; double 50.130000000000003
.quad 0x4049400000000000 ; double 50.5
.quad 0x404990a3d70a3d71 ; double 51.130000000000003
.quad 0x404990a3d70a3d71 ; double 51.130000000000003
.quad 0x4048000000000000 ; double 48
.quad 0x404750a3d70a3d71 ; double 46.630000000000003
.quad 0x404730a3d70a3d71 ; double 46.380000000000003
.quad 0x404750a3d70a3d71 ; double 46.630000000000003
.quad 0x4047200000000000 ; double 46.25
.quad 0x4046800000000000 ; double 45
.quad 0x4046e00000000000 ; double 45.75
.quad 0x4046a00000000000 ; double 45.25
.quad 0x4045d0a3d70a3d71 ; double 43.630000000000003
.quad 0x404650a3d70a3d71 ; double 44.630000000000003
.quad 0x404690a3d70a3d71 ; double 45.130000000000003
.quad 0x404670a3d70a3d71 ; double 44.880000000000003
.quad 0x4045a00000000000 ; double 43.25
.quad 0x4045f0a3d70a3d71 ; double 43.880000000000003
.quad 0x404630a3d70a3d71 ; double 44.380000000000003
.quad 0x404630a3d70a3d71 ; double 44.380000000000003
.quad 0x404630a3d70a3d71 ; double 44.380000000000003
.quad 0x404610a3d70a3d71 ; double 44.130000000000003
.quad 0x404630a3d70a3d71 ; double 44.380000000000003
.quad 0x4046400000000000 ; double 44.5
.quad 0x4046600000000000 ; double 44.75
.quad 0x4045f0a3d70a3d71 ; double 43.880000000000003
.quad 0x404510a3d70a3d71 ; double 42.130000000000003
.quad 0x404510a3d70a3d71 ; double 42.130000000000003
.quad 0x404550a3d70a3d71 ; double 42.630000000000003
.quad 0x4044f0a3d70a3d71 ; double 41.880000000000003
.quad 0x4045000000000000 ; double 42
.quad 0x404530a3d70a3d71 ; double 42.380000000000003
.quad 0x4045400000000000 ; double 42.5
.quad 0x404550a3d70a3d71 ; double 42.630000000000003
.quad 0x4045b0a3d70a3d71 ; double 43.380000000000003
.quad 0x4045b0a3d70a3d71 ; double 43.380000000000003
.quad 0x4046000000000000 ; double 44
.quad 0x4045f0a3d70a3d71 ; double 43.880000000000003
.quad 0x4045f0a3d70a3d71 ; double 43.880000000000003
.quad 0x404630a3d70a3d71 ; double 44.380000000000003
.quad 0x404650a3d70a3d71 ; double 44.630000000000003
.quad 0x404650a3d70a3d71 ; double 44.630000000000003
.quad 0x4046b0a3d70a3d71 ; double 45.380000000000003
.quad 0x4047200000000000 ; double 46.25
.quad 0x404730a3d70a3d71 ; double 46.380000000000003
.quad 0x404730a3d70a3d71 ; double 46.380000000000003
.quad 0x404730a3d70a3d71 ; double 46.380000000000003
.quad 0x4046f0a3d70a3d71 ; double 45.880000000000003
.quad 0x4046600000000000 ; double 44.75
.quad 0x404610a3d70a3d71 ; double 44.130000000000003
.quad 0x4046600000000000 ; double 44.75
.quad 0x404610a3d70a3d71 ; double 44.130000000000003
.quad 0x404650a3d70a3d71 ; double 44.630000000000003
.quad 0x4046600000000000 ; double 44.75
.quad 0x4045a00000000000 ; double 43.25
.quad 0x4045a00000000000 ; double 43.25
.quad 0x4045c00000000000 ; double 43.5
.quad 0x4045800000000000 ; double 43
.quad 0x4044e00000000000 ; double 41.75
.quad 0x4045000000000000 ; double 42
.quad 0x4045400000000000 ; double 42.5
.quad 0x4045c00000000000 ; double 43.5
.quad 0x4045b0a3d70a3d71 ; double 43.380000000000003
.quad 0x4045e00000000000 ; double 43.75
.quad 0x4045e00000000000 ; double 43.75
.quad 0x4046600000000000 ; double 44.75
.quad 0x404630a3d70a3d71 ; double 44.380000000000003
.quad 0x404610a3d70a3d71 ; double 44.130000000000003
.quad 0x4046600000000000 ; double 44.75
.quad 0x4046400000000000 ; double 44.5
.quad 0x4046400000000000 ; double 44.5
.quad 0x404730a3d70a3d71 ; double 46.380000000000003
.quad 0x4046f0a3d70a3d71 ; double 45.880000000000003
.quad 0x4045800000000000 ; double 43
.quad 0x4045d0a3d70a3d71 ; double 43.630000000000003
.quad 0x4046600000000000 ; double 44.75
.quad 0x4045f0a3d70a3d71 ; double 43.880000000000003
.quad 0x4045e00000000000 ; double 43.75
.quad 0x4047000000000000 ; double 46
.quad 0x4047c00000000000 ; double 47.5
.quad 0x4047f0a3d70a3d71 ; double 47.880000000000003
.quad 0x4047f0a3d70a3d71 ; double 47.880000000000003
.quad 0x4048400000000000 ; double 48.5
.quad 0x4048200000000000 ; double 48.25
.quad 0x4047800000000000 ; double 47
.quad 0x4047d0a3d70a3d71 ; double 47.630000000000003
.quad 0x4047800000000000 ; double 47
.quad 0x404810a3d70a3d71 ; double 48.130000000000003
.quad 0x404850a3d70a3d71 ; double 48.630000000000003
.quad 0x4048e00000000000 ; double 49.75
.quad 0x4048f0a3d70a3d71 ; double 49.880000000000003
.quad 0x4049400000000000 ; double 50.5
.quad 0x4049600000000000 ; double 50.75
.quad 0x4048e00000000000 ; double 49.75
.quad 0x404890a3d70a3d71 ; double 49.130000000000003
.quad 0x4048b0a3d70a3d71 ; double 49.380000000000003
.quad 0x4048d0a3d70a3d71 ; double 49.630000000000003
.quad 0x404910a3d70a3d71 ; double 50.130000000000003
.quad 0x4048d0a3d70a3d71 ; double 49.630000000000003
.quad 0x4049200000000000 ; double 50.25
.quad 0x404930a3d70a3d71 ; double 50.380000000000003
.quad 0x4049f0a3d70a3d71 ; double 51.880000000000003
.quad 0x404a400000000000 ; double 52.5
.quad 0x404a800000000000 ; double 53
.quad 0x404a600000000000 ; double 52.75
.quad 0x404ab0a3d70a3d71 ; double 53.380000000000003
.quad 0x404a400000000000 ; double 52.5
.quad 0x404b000000000000 ; double 54
.quad 0x404b30a3d70a3d71 ; double 54.380000000000003
.quad 0x404b30a3d70a3d71 ; double 54.380000000000003
.quad 0x404b000000000000 ; double 54
.quad 0x404ab0a3d70a3d71 ; double 53.380000000000003
.quad 0x404a90a3d70a3d71 ; double 53.130000000000003
.quad 0x404b000000000000 ; double 54
.quad 0x404a600000000000 ; double 52.75
.quad 0x404970a3d70a3d71 ; double 50.880000000000003
.quad 0x4048f0a3d70a3d71 ; double 49.880000000000003
.quad 0x4048a00000000000 ; double 49.25
.quad 0x4048c00000000000 ; double 49.5
.quad 0x404910a3d70a3d71 ; double 50.130000000000003
.quad 0x4048800000000000 ; double 49
.quad 0x4048200000000000 ; double 48.25
.quad 0x404810a3d70a3d71 ; double 48.130000000000003
.quad 0x4047e00000000000 ; double 47.75
.quad 0x404850a3d70a3d71 ; double 48.630000000000003
.quad 0x404870a3d70a3d71 ; double 48.880000000000003
.quad 0x404890a3d70a3d71 ; double 49.130000000000003
.quad 0x404850a3d70a3d71 ; double 48.630000000000003
.quad 0x404850a3d70a3d71 ; double 48.630000000000003
.quad 0x4048c00000000000 ; double 49.5
.quad 0x4048f0a3d70a3d71 ; double 49.880000000000003
.quad 0x4048b0a3d70a3d71 ; double 49.380000000000003
.quad 0x4048b0a3d70a3d71 ; double 49.380000000000003
.quad 0x404910a3d70a3d71 ; double 50.130000000000003
.quad 0x4049600000000000 ; double 50.75
.quad 0x4048600000000000 ; double 48.75
.quad 0x4048a00000000000 ; double 49.25
.quad 0x404830a3d70a3d71 ; double 48.380000000000003
.quad 0x4048800000000000 ; double 49
.quad 0x4049200000000000 ; double 50.25
.quad 0x404950a3d70a3d71 ; double 50.630000000000003
.quad 0x404950a3d70a3d71 ; double 50.630000000000003
.quad 0x404890a3d70a3d71 ; double 49.130000000000003
.quad 0x4048a00000000000 ; double 49.25
.quad 0x4048d0a3d70a3d71 ; double 49.630000000000003
.quad 0x404990a3d70a3d71 ; double 51.130000000000003
.quad 0x4049f0a3d70a3d71 ; double 51.880000000000003
.quad 0x404a30a3d70a3d71 ; double 52.380000000000003
.quad 0x404a200000000000 ; double 52.25
.quad 0x404a50a3d70a3d71 ; double 52.630000000000003
.quad 0x404a800000000000 ; double 53
.quad 0x404a90a3d70a3d71 ; double 53.130000000000003
.quad 0x404a10a3d70a3d71 ; double 52.130000000000003
.quad 0x4049c00000000000 ; double 51.5
.quad 0x404a000000000000 ; double 52
.quad 0x404a600000000000 ; double 52.75
.quad 0x4049d0a3d70a3d71 ; double 51.630000000000003
.quad 0x404970a3d70a3d71 ; double 50.880000000000003
.quad 0x404ae00000000000 ; double 53.75
.quad 0x404b200000000000 ; double 54.25
.quad 0x404ae00000000000 ; double 53.75
.quad 0x404b400000000000 ; double 54.5
.quad 0x404b600000000000 ; double 54.75
.quad 0x404bd0a3d70a3d71 ; double 55.630000000000003
.quad 0x404bb0a3d70a3d71 ; double 55.380000000000003
.quad 0x404c200000000000 ; double 56.25
.quad 0x404be00000000000 ; double 55.75
.quad 0x404bf0a3d70a3d71 ; double 55.880000000000003
.quad 0x404c90a3d70a3d71 ; double 57.130000000000003
.quad 0x404c600000000000 ; double 56.75
.quad 0x404c200000000000 ; double 56.25
.quad 0x404bf0a3d70a3d71 ; double 55.880000000000003
.quad 0x404ba00000000000 ; double 55.25
.quad 0x404bc00000000000 ; double 55.5
.quad 0x404b50a3d70a3d71 ; double 54.630000000000003
.quad 0x404bb0a3d70a3d71 ; double 55.380000000000003
.quad 0x404b70a3d70a3d71 ; double 54.880000000000003
.quad 0x404af0a3d70a3d71 ; double 53.880000000000003
.quad 0x404a000000000000 ; double 52
.quad 0x4049e00000000000 ; double 51.75
.quad 0x4049b0a3d70a3d71 ; double 51.380000000000003
.quad 0x4049600000000000 ; double 50.75
.quad 0x4049a00000000000 ; double 51.25
.quad 0x4049600000000000 ; double 50.75
.quad 0x404970a3d70a3d71 ; double 50.880000000000003
.quad 0x4049d0a3d70a3d71 ; double 51.630000000000003
.quad 0x4049f0a3d70a3d71 ; double 51.880000000000003
.quad 0x404af0a3d70a3d71 ; double 53.880000000000003
.quad 0x404b400000000000 ; double 54.5
.quad 0x404a90a3d70a3d71 ; double 53.130000000000003
.quad 0x404a50a3d70a3d71 ; double 52.630000000000003
.quad 0x4049f0a3d70a3d71 ; double 51.880000000000003
.quad 0x404a600000000000 ; double 52.75
.quad 0x404a800000000000 ; double 53
.quad 0x404a50a3d70a3d71 ; double 52.630000000000003
.quad 0x4049d0a3d70a3d71 ; double 51.630000000000003
.quad 0x404970a3d70a3d71 ; double 50.880000000000003
.quad 0x4049000000000000 ; double 50
.quad 0x404aa00000000000 ; double 53.25
.quad 0x4048a00000000000 ; double 49.25
.quad 0x4048600000000000 ; double 48.75
.quad 0x4047800000000000 ; double 47
.quad 0x4048200000000000 ; double 48.25
.quad 0x404970a3d70a3d71 ; double 50.880000000000003
.quad 0x404910a3d70a3d71 ; double 50.130000000000003
.quad 0x4048800000000000 ; double 49
.quad 0x4048c00000000000 ; double 49.5
.quad 0x4048800000000000 ; double 49
.quad 0x4048f0a3d70a3d71 ; double 49.880000000000003
.quad 0x404830a3d70a3d71 ; double 48.380000000000003
.quad 0x404770a3d70a3d71 ; double 46.880000000000003
.quad 0x404850a3d70a3d71 ; double 48.630000000000003
.quad 0x404890a3d70a3d71 ; double 49.130000000000003
.quad 0x4049000000000000 ; double 50
.quad 0x4049f0a3d70a3d71 ; double 51.880000000000003
.quad 0x4049a00000000000 ; double 51.25
.quad 0x404970a3d70a3d71 ; double 50.880000000000003
.quad 0x404990a3d70a3d71 ; double 51.130000000000003
.quad 0x404a800000000000 ; double 53
.quad 0x404a800000000000 ; double 53
.quad 0x404a50a3d70a3d71 ; double 52.630000000000003
.quad 0x404a000000000000 ; double 52
.quad 0x4049d0a3d70a3d71 ; double 51.630000000000003
.quad 0x404aa00000000000 ; double 53.25
.quad 0x404ab0a3d70a3d71 ; double 53.380000000000003
.quad 0x404b800000000000 ; double 55
.quad 0x404df0a3d70a3d71 ; double 59.880000000000003
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x404f70a3d70a3d71 ; double 62.880000000000003
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x4050100000000000 ; double 64.25
.quad 0x4050700000000000 ; double 65.75
.quad 0x4050d7ae147ae148 ; double 67.370000000000005
.quad 0x4050e7ae147ae148 ; double 67.620000000000005
.quad 0x4051000000000000 ; double 68
.quad 0x4051100000000000 ; double 68.25
.quad 0x405137ae147ae148 ; double 68.870000000000005
.quad 0x405127ae147ae148 ; double 68.620000000000005
.quad 0x4050900000000000 ; double 66.25
.quad 0x4050800000000000 ; double 66
.quad 0x4050600000000000 ; double 65.5
.quad 0x404ff0a3d70a3d71 ; double 63.880000000000003
.quad 0x404f50a3d70a3d71 ; double 62.630000000000003
.quad 0x404ff0a3d70a3d71 ; double 63.880000000000003
.quad 0x405017ae147ae148 ; double 64.370000000000005
.quad 0x4050400000000000 ; double 65
.quad 0x4050600000000000 ; double 65.5
.quad 0x405047ae147ae148 ; double 65.120000000000005
.quad 0x405057ae147ae148 ; double 65.370000000000005
.quad 0x4050700000000000 ; double 65.75
.quad 0x4051000000000000 ; double 68
.quad 0x4050e7ae147ae148 ; double 67.620000000000005
.quad 0x4050a00000000000 ; double 66.5
.quad 0x4050d00000000000 ; double 67.25
.quad 0x405167ae147ae148 ; double 69.620000000000005
.quad 0x405177ae147ae148 ; double 69.870000000000005
.quad 0x4051400000000000 ; double 69
.quad 0x4050d7ae147ae148 ; double 67.370000000000005
.quad 0x4050e7ae147ae148 ; double 67.620000000000005
.quad 0x4050c7ae147ae148 ; double 67.120000000000005
.quad 0x4050d00000000000 ; double 67.25
.quad 0x4051300000000000 ; double 68.75
.quad 0x405157ae147ae148 ; double 69.370000000000005
.quad 0x4051400000000000 ; double 69
.quad 0x405147ae147ae148 ; double 69.120000000000005
.quad 0x405167ae147ae148 ; double 69.620000000000005
.quad 0x4051900000000000 ; double 70.25
.quad 0x405237ae147ae148 ; double 72.870000000000005
.quad 0x405287ae147ae148 ; double 74.120000000000005
.quad 0x4053b7ae147ae148 ; double 78.870000000000005
.quad 0x4054000000000000 ; double 80
.quad 0x4053c00000000000 ; double 79
.quad 0x4053f7ae147ae148 ; double 79.870000000000005
.quad 0x4053e00000000000 ; double 79.5
.quad 0x4054000000000000 ; double 80
.quad 0x405417ae147ae148 ; double 80.370000000000005
.quad 0x4053b7ae147ae148 ; double 78.870000000000005
.quad 0x4054200000000000 ; double 80.5
.quad 0x405447ae147ae148 ; double 81.120000000000005
.quad 0x405457ae147ae148 ; double 81.370000000000005
.quad 0x4054b00000000000 ; double 82.75
.quad 0x4054b7ae147ae148 ; double 82.870000000000005
.quad 0x4054c7ae147ae148 ; double 83.120000000000005
.quad 0x4054f00000000000 ; double 83.75
.quad 0x4054f00000000000 ; double 83.75
.quad 0x405527ae147ae148 ; double 84.620000000000005
.quad 0x4055200000000000 ; double 84.5
.quad 0x4055000000000000 ; double 84
.quad 0x4054f00000000000 ; double 83.75
.quad 0x4055200000000000 ; double 84.5
.quad 0x4055c00000000000 ; double 87
.quad 0x4056300000000000 ; double 88.75
.quad 0x4056100000000000 ; double 88.25
.quad 0x4055e00000000000 ; double 87.5
.quad 0x4055b7ae147ae148 ; double 86.870000000000005
.quad 0x405597ae147ae148 ; double 86.370000000000005
.quad 0x4055a7ae147ae148 ; double 86.620000000000005
.quad 0x4055f00000000000 ; double 87.75
.quad 0x4055e7ae147ae148 ; double 87.620000000000005
.quad 0x4055e7ae147ae148 ; double 87.620000000000005
.quad 0x4055e00000000000 ; double 87.5
.quad 0x4055d7ae147ae148 ; double 87.370000000000005
.quad 0x4055d7ae147ae148 ; double 87.370000000000005
.quad 0x4055c7ae147ae148 ; double 87.120000000000005
.quad 0x4055c7ae147ae148 ; double 87.120000000000005
.quad 0x4055900000000000 ; double 86.25
.quad 0x4055a00000000000 ; double 86.5
.quad 0x4055b7ae147ae148 ; double 86.870000000000005
.quad 0x4056200000000000 ; double 88.5
.quad 0x405627ae147ae148 ; double 88.620000000000005
.quad 0x405647ae147ae148 ; double 89.120000000000005
.quad 0x4056300000000000 ; double 88.75
.quad 0x405647ae147ae148 ; double 89.120000000000005
.quad 0x4056400000000000 ; double 89
.quad 0x405627ae147ae148 ; double 88.620000000000005
.quad 0x4056300000000000 ; double 88.75
.quad 0x405627ae147ae148 ; double 88.620000000000005
.quad 0x405687ae147ae148 ; double 90.120000000000005
.quad 0x4057400000000000 ; double 93
.quad 0x4057b7ae147ae148 ; double 94.870000000000005
.quad 0x4057d00000000000 ; double 95.25
.quad 0x4057c7ae147ae148 ; double 95.120000000000005
.quad 0x4057e00000000000 ; double 95.5
.quad 0x4057b7ae147ae148 ; double 94.870000000000005
.quad 0x4057800000000000 ; double 94
.quad 0x4057500000000000 ; double 93.25
.quad 0x405767ae147ae148 ; double 93.620000000000005
.quad 0x4057500000000000 ; double 93.25
.quad 0x4057800000000000 ; double 94
.quad 0x4057900000000000 ; double 94.25
.quad 0x405757ae147ae148 ; double 93.370000000000005
.quad 0x405857ae147ae148 ; double 97.370000000000005
.quad 0x405917ae147ae148 ; double 100.37
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x4058b7ae147ae148 ; double 98.870000000000005
.quad 0x405877ae147ae148 ; double 97.870000000000005
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x405877ae147ae148 ; double 97.870000000000005
.quad 0x4058b00000000000 ; double 98.75
.quad 0x4058900000000000 ; double 98.25
.quad 0x4058900000000000 ; double 98.25
.quad 0x4058a00000000000 ; double 98.5
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x405857ae147ae148 ; double 97.370000000000005
.quad 0x4058900000000000 ; double 98.25
.quad 0x405877ae147ae148 ; double 97.870000000000005
.quad 0x405817ae147ae148 ; double 96.370000000000005
.quad 0x4057d7ae147ae148 ; double 95.370000000000005
.quad 0x4057e00000000000 ; double 95.5
.quad 0x4057f7ae147ae148 ; double 95.870000000000005
.quad 0x405777ae147ae148 ; double 93.870000000000005
.quad 0x4057800000000000 ; double 94
.quad 0x405787ae147ae148 ; double 94.120000000000005
.quad 0x405777ae147ae148 ; double 93.870000000000005
.quad 0x405707ae147ae148 ; double 92.120000000000005
.quad 0x4056c7ae147ae148 ; double 91.120000000000005
.quad 0x4056c00000000000 ; double 91
.quad 0x4056d00000000000 ; double 91.25
.quad 0x4056b00000000000 ; double 90.75
.quad 0x4056700000000000 ; double 89.75
.quad 0x4056b00000000000 ; double 90.75
.quad 0x4056c00000000000 ; double 91
.quad 0x4056d00000000000 ; double 91.25
.quad 0x4057000000000000 ; double 92
.quad 0x4056e00000000000 ; double 91.5
.quad 0x4056c00000000000 ; double 91
.quad 0x4056e00000000000 ; double 91.5
.quad 0x4057100000000000 ; double 92.25
.quad 0x4056e7ae147ae148 ; double 91.620000000000005
.quad 0x4057300000000000 ; double 92.75
.quad 0x4057200000000000 ; double 92.5
.quad 0x4056f7ae147ae148 ; double 91.870000000000005
.quad 0x405707ae147ae148 ; double 92.120000000000005
.quad 0x4057100000000000 ; double 92.25
.quad 0x405767ae147ae148 ; double 93.620000000000005
.quad 0x405777ae147ae148 ; double 93.870000000000005
.quad 0x405797ae147ae148 ; double 94.370000000000005
.quad 0x4057900000000000 ; double 94.25
.quad 0x405787ae147ae148 ; double 94.120000000000005
.quad 0x405787ae147ae148 ; double 94.120000000000005
.quad 0x4057800000000000 ; double 94
.quad 0x4057400000000000 ; double 93
.quad 0x405707ae147ae148 ; double 92.120000000000005
.quad 0x4056b00000000000 ; double 90.75
.quad 0x405637ae147ae148 ; double 88.870000000000005
.quad 0x4056200000000000 ; double 88.5
.quad 0x4056300000000000 ; double 88.75
.quad 0x4056500000000000 ; double 89.25
.quad 0x4056a7ae147ae148 ; double 90.620000000000005
.quad 0x405687ae147ae148 ; double 90.120000000000005
.quad 0x4056900000000000 ; double 90.25
.quad 0x4056700000000000 ; double 89.75
.quad 0x405627ae147ae148 ; double 88.620000000000005
.quad 0x405637ae147ae148 ; double 88.870000000000005
.quad 0x4056700000000000 ; double 89.75
.quad 0x4055e00000000000 ; double 87.5
.quad 0x405587ae147ae148 ; double 86.120000000000005
.quad 0x405577ae147ae148 ; double 85.870000000000005
.quad 0x4055400000000000 ; double 85
.quad 0x405537ae147ae148 ; double 84.870000000000005
.quad 0x4054c7ae147ae148 ; double 83.120000000000005
.quad 0x405497ae147ae148 ; double 82.370000000000005
.quad 0x4054e00000000000 ; double 83.5
.quad 0x4054e00000000000 ; double 83.5
.quad 0x405537ae147ae148 ; double 84.870000000000005
.quad 0x4054f7ae147ae148 ; double 83.870000000000005
.quad 0x405517ae147ae148 ; double 84.370000000000005
.quad 0x405557ae147ae148 ; double 85.370000000000005
.quad 0x4055800000000000 ; double 86
.quad 0x4055800000000000 ; double 86
.quad 0x4055b00000000000 ; double 86.75
.quad 0x4055a7ae147ae148 ; double 86.620000000000005
.quad 0x4055c7ae147ae148 ; double 87.120000000000005
.quad 0x4056200000000000 ; double 88.5
.quad 0x405637ae147ae148 ; double 88.870000000000005
.quad 0x405667ae147ae148 ; double 89.620000000000005
.quad 0x4056900000000000 ; double 90.25
.quad 0x405647ae147ae148 ; double 89.120000000000005
.quad 0x405617ae147ae148 ; double 88.370000000000005
.quad 0x405617ae147ae148 ; double 88.370000000000005
.quad 0x4055d7ae147ae148 ; double 87.370000000000005
.quad 0x4055d00000000000 ; double 87.25
.quad 0x405607ae147ae148 ; double 88.120000000000005
.quad 0x405667ae147ae148 ; double 89.620000000000005
.quad 0x405627ae147ae148 ; double 88.620000000000005
.quad 0x4055f00000000000 ; double 87.75
.quad 0x405627ae147ae148 ; double 88.620000000000005
.quad 0x405617ae147ae148 ; double 88.370000000000005
.quad 0x405627ae147ae148 ; double 88.620000000000005
.quad 0x4056500000000000 ; double 89.25
.quad 0x4056900000000000 ; double 90.25
.quad 0x405697ae147ae148 ; double 90.370000000000005
.quad 0x4056a7ae147ae148 ; double 90.620000000000005
.quad 0x4056b00000000000 ; double 90.75
.quad 0x4056c7ae147ae148 ; double 91.120000000000005
.quad 0x4056c7ae147ae148 ; double 91.120000000000005
.quad 0x4057500000000000 ; double 93.25
.quad 0x405717ae147ae148 ; double 92.370000000000005
.quad 0x4056b7ae147ae148 ; double 90.870000000000005
.quad 0x405677ae147ae148 ; double 89.870000000000005
.quad 0x405697ae147ae148 ; double 90.370000000000005
.quad 0x4056b7ae147ae148 ; double 90.870000000000005
.quad 0x405707ae147ae148 ; double 92.120000000000005
.quad 0x4057100000000000 ; double 92.25
.quad 0x4056d7ae147ae148 ; double 91.370000000000005
.quad 0x4057000000000000 ; double 92
.quad 0x4057200000000000 ; double 92.5
.quad 0x405787ae147ae148 ; double 94.120000000000005
.quad 0x4057800000000000 ; double 94
.quad 0x4057800000000000 ; double 94
.quad 0x4057900000000000 ; double 94.25
.quad 0x4057f7ae147ae148 ; double 95.870000000000005
.quad 0x4057e7ae147ae148 ; double 95.620000000000005
.quad 0x4057e00000000000 ; double 95.5
.quad 0x405817ae147ae148 ; double 96.370000000000005
.quad 0x405887ae147ae148 ; double 98.120000000000005
.quad 0x405847ae147ae148 ; double 97.120000000000005
.quad 0x4057f00000000000 ; double 95.75
.quad 0x4057200000000000 ; double 92.5
.quad 0x4056b7ae147ae148 ; double 90.870000000000005
.quad 0x4057200000000000 ; double 92.5
.quad 0x4057900000000000 ; double 94.25
.quad 0x4057c00000000000 ; double 95
.quad 0x4057b00000000000 ; double 94.75
.quad 0x405747ae147ae148 ; double 93.120000000000005
.quad 0x4056c7ae147ae148 ; double 91.120000000000005
.quad 0x4056a7ae147ae148 ; double 90.620000000000005
.quad 0x4056c7ae147ae148 ; double 91.120000000000005
.quad 0x4056b00000000000 ; double 90.75
.quad 0x405667ae147ae148 ; double 89.620000000000005
.quad 0x405637ae147ae148 ; double 88.870000000000005
.quad 0x4056800000000000 ; double 90
.quad 0x4056200000000000 ; double 88.5
.quad 0x4055900000000000 ; double 86.25
.quad 0x405577ae147ae148 ; double 85.870000000000005
.quad 0x4055b7ae147ae148 ; double 86.870000000000005
.quad 0x4055c00000000000 ; double 87
.quad 0x4056000000000000 ; double 88
.quad 0x405637ae147ae148 ; double 88.870000000000005
.quad 0x4056500000000000 ; double 89.25
.quad 0x4055f7ae147ae148 ; double 87.870000000000005
.quad 0x4055a00000000000 ; double 86.5
.quad 0x4056700000000000 ; double 89.75
.quad 0x4056900000000000 ; double 90.25
.quad 0x4056c7ae147ae148 ; double 91.120000000000005
.quad 0x4056e00000000000 ; double 91.5
.quad 0x405707ae147ae148 ; double 92.120000000000005
.quad 0x405717ae147ae148 ; double 92.370000000000005
.quad 0x405777ae147ae148 ; double 93.870000000000005
.quad 0x405847ae147ae148 ; double 97.120000000000005
.quad 0x405887ae147ae148 ; double 98.120000000000005
.quad 0x4057e7ae147ae148 ; double 95.620000000000005
.quad 0x4058000000000000 ; double 96
.quad 0x4058300000000000 ; double 96.75
.quad 0x405857ae147ae148 ; double 97.370000000000005
.quad 0x4058300000000000 ; double 96.75
.quad 0x4058700000000000 ; double 97.75
.quad 0x405907ae147ae148 ; double 100.12
.quad 0x405907ae147ae148 ; double 100.12
.quad 0x4058c7ae147ae148 ; double 99.120000000000005
.quad 0x405927ae147ae148 ; double 100.62
.quad 0x4059300000000000 ; double 100.75
.quad 0x405957ae147ae148 ; double 101.37
.quad 0x4059000000000000 ; double 100
.quad 0x4058500000000000 ; double 97.25
.quad 0x4058700000000000 ; double 97.75
.quad 0x4058700000000000 ; double 97.75
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x405917ae147ae148 ; double 100.37
.quad 0x4059100000000000 ; double 100.25
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x4058c7ae147ae148 ; double 99.120000000000005
.quad 0x4058b00000000000 ; double 98.75
.quad 0x4059200000000000 ; double 100.5
.quad 0x4059400000000000 ; double 101
.quad 0x405917ae147ae148 ; double 100.37
.quad 0x4059500000000000 ; double 101.25
.quad 0x4059f00000000000 ; double 103.75
.quad 0x405a300000000000 ; double 104.75
.quad 0x405967ae147ae148 ; double 101.62
.quad 0x4059200000000000 ; double 100.5
.quad 0x4058f7ae147ae148 ; double 99.870000000000005
.quad 0x4058900000000000 ; double 98.25
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4058e7ae147ae148 ; double 99.620000000000005
.quad 0x4059400000000000 ; double 101
.quad 0x4059a00000000000 ; double 102.5
.quad 0x4059f00000000000 ; double 103.75
.quad 0x405a100000000000 ; double 104.25
.quad 0x405a57ae147ae148 ; double 105.37
.quad 0x405a700000000000 ; double 105.75
.quad 0x405a700000000000 ; double 105.75
.quad 0x405a77ae147ae148 ; double 105.87
.quad 0x405a27ae147ae148 ; double 104.62
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x405a97ae147ae148 ; double 106.37
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a37ae147ae148 ; double 104.87
.quad 0x405a17ae147ae148 ; double 104.37
.quad 0x4059f00000000000 ; double 103.75
.quad 0x405a000000000000 ; double 104
.quad 0x405957ae147ae148 ; double 101.37
.quad 0x4059500000000000 ; double 101.25
.quad 0x405947ae147ae148 ; double 101.12
.quad 0x4058e7ae147ae148 ; double 99.620000000000005
.quad 0x4058c7ae147ae148 ; double 99.120000000000005
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4058b7ae147ae148 ; double 98.870000000000005
.quad 0x405847ae147ae148 ; double 97.120000000000005
.quad 0x4057f7ae147ae148 ; double 95.870000000000005
.quad 0x4057d00000000000 ; double 95.25
.quad 0x4057e7ae147ae148 ; double 95.620000000000005
.quad 0x4057e7ae147ae148 ; double 95.620000000000005
.quad 0x405807ae147ae148 ; double 96.120000000000005
.quad 0x405817ae147ae148 ; double 96.370000000000005
.quad 0x405817ae147ae148 ; double 96.370000000000005
.quad 0x4058100000000000 ; double 96.25
.quad 0x4057e7ae147ae148 ; double 95.620000000000005
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x4058c00000000000 ; double 99
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4058f7ae147ae148 ; double 99.870000000000005
.quad 0x4058e00000000000 ; double 99.5
.quad 0x4058e7ae147ae148 ; double 99.620000000000005
.quad 0x4058f00000000000 ; double 99.75
.quad 0x4059200000000000 ; double 100.5
.quad 0x4059300000000000 ; double 100.75
.quad 0x4059500000000000 ; double 101.25
.quad 0x405967ae147ae148 ; double 101.62
.quad 0x4059700000000000 ; double 101.75
.quad 0x405987ae147ae148 ; double 102.12
.quad 0x4059800000000000 ; double 102
.quad 0x405957ae147ae148 ; double 101.37
.quad 0x4059400000000000 ; double 101
.quad 0x405967ae147ae148 ; double 101.62
.quad 0x405957ae147ae148 ; double 101.37
.quad 0x4059c00000000000 ; double 103
.quad 0x405987ae147ae148 ; double 102.12
.quad 0x405937ae147ae148 ; double 100.87
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x405837ae147ae148 ; double 96.870000000000005
.quad 0x4058e00000000000 ; double 99.5
.quad 0x4058f7ae147ae148 ; double 99.870000000000005
.quad 0x4058f00000000000 ; double 99.75
.quad 0x4058c00000000000 ; double 99
.quad 0x405917ae147ae148 ; double 100.37
.quad 0x405937ae147ae148 ; double 100.87
.quad 0x4059200000000000 ; double 100.5
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4058b7ae147ae148 ; double 98.870000000000005
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x4058700000000000 ; double 97.75
.quad 0x4058b00000000000 ; double 98.75
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4058e7ae147ae148 ; double 99.620000000000005
.quad 0x4058e7ae147ae148 ; double 99.620000000000005
.quad 0x4059000000000000 ; double 100
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4059500000000000 ; double 101.25
.quad 0x4059400000000000 ; double 101
.quad 0x4059000000000000 ; double 100
.quad 0x405967ae147ae148 ; double 101.62
.quad 0x405957ae147ae148 ; double 101.37
.quad 0x4059800000000000 ; double 102
.quad 0x4059e00000000000 ; double 103.5
.quad 0x4059b00000000000 ; double 102.75
.quad 0x4059c00000000000 ; double 103
.quad 0x4059e7ae147ae148 ; double 103.62
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a800000000000 ; double 106
.quad 0x405ad7ae147ae148 ; double 107.37
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x405a27ae147ae148 ; double 104.62
.quad 0x405a57ae147ae148 ; double 105.37
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x405a27ae147ae148 ; double 104.62
.quad 0x405a27ae147ae148 ; double 104.62
.quad 0x405a100000000000 ; double 104.25
.quad 0x405a100000000000 ; double 104.25
.quad 0x405a27ae147ae148 ; double 104.62
.quad 0x405a100000000000 ; double 104.25
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x405a900000000000 ; double 106.25
.quad 0x405a800000000000 ; double 106
.quad 0x405a800000000000 ; double 106
.quad 0x405a77ae147ae148 ; double 105.87
.quad 0x405a000000000000 ; double 104
.quad 0x4059e00000000000 ; double 103.5
.quad 0x4059e7ae147ae148 ; double 103.62
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405a800000000000 ; double 106
.quad 0x405a200000000000 ; double 104.5
.quad 0x405a600000000000 ; double 105.5
.quad 0x405af7ae147ae148 ; double 107.87
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b47ae147ae148 ; double 109.12
.quad 0x405b37ae147ae148 ; double 108.87
.quad 0x405b47ae147ae148 ; double 109.12
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405b900000000000 ; double 110.25
.quad 0x405bb00000000000 ; double 110.75
.quad 0x405bb00000000000 ; double 110.75
.quad 0x405b500000000000 ; double 109.25
.quad 0x405b17ae147ae148 ; double 108.37
.quad 0x405bc00000000000 ; double 111
.quad 0x405bf7ae147ae148 ; double 111.87
.quad 0x405be7ae147ae148 ; double 111.62
.quad 0x405c57ae147ae148 ; double 113.37
.quad 0x405c600000000000 ; double 113.5
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405c67ae147ae148 ; double 113.62
.quad 0x405c27ae147ae148 ; double 112.62
.quad 0x405cc00000000000 ; double 115
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405dc00000000000 ; double 119
.quad 0x40600428f5c28f5c ; double 128.13
.quad 0x405ff00000000000 ; double 127.75
.quad 0x4060580000000000 ; double 130.75
.quad 0x40603428f5c28f5c ; double 129.63
.quad 0x4060280000000000 ; double 129.25
.quad 0x4060500000000000 ; double 130.5
.quad 0x4060b428f5c28f5c ; double 133.63
.quad 0x4060b80000000000 ; double 133.75
.quad 0x4060dc28f5c28f5c ; double 134.88
.quad 0x4060e00000000000 ; double 135
.quad 0x4060900000000000 ; double 132.5
.quad 0x4060700000000000 ; double 131.5
.quad 0x4060700000000000 ; double 131.5
.quad 0x40605c28f5c28f5c ; double 130.88
.quad 0x4060a00000000000 ; double 133
.quad 0x4060e428f5c28f5c ; double 135.13
.quad 0x4061200000000000 ; double 137
.quad 0x4061480000000000 ; double 138.25
.quad 0x4061580000000000 ; double 138.75
.quad 0x4061780000000000 ; double 139.75
.quad 0x40613c28f5c28f5c ; double 137.88
.quad 0x4061080000000000 ; double 136.25
.quad 0x4060e00000000000 ; double 135
.quad 0x4060bc28f5c28f5c ; double 133.88
.quad 0x4060900000000000 ; double 132.5
.quad 0x40603c28f5c28f5c ; double 129.88
.quad 0x4060600000000000 ; double 131
.quad 0x4060400000000000 ; double 130
.quad 0x4060100000000000 ; double 128.5
.quad 0x405fe7ae147ae148 ; double 127.62
.quad 0x405fe7ae147ae148 ; double 127.62
.quad 0x405fc7ae147ae148 ; double 127.12
.quad 0x405fc00000000000 ; double 127
.quad 0x405f57ae147ae148 ; double 125.37
.quad 0x405f400000000000 ; double 125
.quad 0x405ea7ae147ae148 ; double 122.62
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405e07ae147ae148 ; double 120.12
.quad 0x405de7ae147ae148 ; double 119.62
.quad 0x405e07ae147ae148 ; double 120.12
.quad 0x405d900000000000 ; double 118.25
.quad 0x405d07ae147ae148 ; double 116.12
.quad 0x405b500000000000 ; double 109.25
.quad 0x405af00000000000 ; double 107.75
.quad 0x405ac7ae147ae148 ; double 107.12
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b17ae147ae148 ; double 108.37
.quad 0x405bb00000000000 ; double 110.75
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405bf7ae147ae148 ; double 111.87
.quad 0x405c400000000000 ; double 113
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405c700000000000 ; double 113.75
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405c800000000000 ; double 114
.quad 0x405c600000000000 ; double 113.5
.quad 0x405bf00000000000 ; double 111.75
.quad 0x405c200000000000 ; double 112.5
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405c47ae147ae148 ; double 113.12
.quad 0x405c700000000000 ; double 113.75
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405d000000000000 ; double 116
.quad 0x405cb7ae147ae148 ; double 114.87
.quad 0x405cc00000000000 ; double 115
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405c87ae147ae148 ; double 114.12
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c87ae147ae148 ; double 114.12
.quad 0x405cc7ae147ae148 ; double 115.12
.quad 0x405cb7ae147ae148 ; double 114.87
.quad 0x405c87ae147ae148 ; double 114.12
.quad 0x405c67ae147ae148 ; double 113.62
.quad 0x405c800000000000 ; double 114
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c500000000000 ; double 113.25
.quad 0x405b900000000000 ; double 110.25
.quad 0x405ae7ae147ae148 ; double 107.62
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b400000000000 ; double 109
.quad 0x405b07ae147ae148 ; double 108.12
.quad 0x405b27ae147ae148 ; double 108.62
.quad 0x405ad7ae147ae148 ; double 107.37
.quad 0x405aa7ae147ae148 ; double 106.62
.quad 0x405ab7ae147ae148 ; double 106.87
.quad 0x405af7ae147ae148 ; double 107.87
.quad 0x405ae00000000000 ; double 107.5
.quad 0x405b500000000000 ; double 109.25
.quad 0x405b400000000000 ; double 109
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b300000000000 ; double 108.75
.quad 0x405b400000000000 ; double 109
.quad 0x405a57ae147ae148 ; double 105.37
.quad 0x405967ae147ae148 ; double 101.62
.quad 0x405917ae147ae148 ; double 100.37
.quad 0x405937ae147ae148 ; double 100.87
.quad 0x4059800000000000 ; double 102
.quad 0x4059f7ae147ae148 ; double 103.87
.quad 0x405aa7ae147ae148 ; double 106.62
.quad 0x405b27ae147ae148 ; double 108.62
.quad 0x405b600000000000 ; double 109.5
.quad 0x405b400000000000 ; double 109
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b400000000000 ; double 109
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405b600000000000 ; double 109.5
.quad 0x405ac7ae147ae148 ; double 107.12
.quad 0x405aa7ae147ae148 ; double 106.62
.quad 0x405ab00000000000 ; double 106.75
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405a900000000000 ; double 106.25
.quad 0x405af7ae147ae148 ; double 107.87
.quad 0x405b400000000000 ; double 109
.quad 0x405b500000000000 ; double 109.25
.quad 0x405ae7ae147ae148 ; double 107.62
.quad 0x405a700000000000 ; double 105.75
.quad 0x405a400000000000 ; double 105
.quad 0x405af00000000000 ; double 107.75
.quad 0x405b17ae147ae148 ; double 108.37
.quad 0x405af00000000000 ; double 107.75
.quad 0x405ae7ae147ae148 ; double 107.62
.quad 0x405a600000000000 ; double 105.5
.quad 0x4059e00000000000 ; double 103.5
.quad 0x4059f7ae147ae148 ; double 103.87
.quad 0x4059a7ae147ae148 ; double 102.62
.quad 0x405977ae147ae148 ; double 101.87
.quad 0x405a07ae147ae148 ; double 104.12
.quad 0x405a17ae147ae148 ; double 104.37
.quad 0x405a07ae147ae148 ; double 104.12
.quad 0x405a17ae147ae148 ; double 104.37
.quad 0x4059400000000000 ; double 101
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x4059400000000000 ; double 101
.quad 0x4059700000000000 ; double 101.75
.quad 0x4059b00000000000 ; double 102.75
.quad 0x4059d7ae147ae148 ; double 103.37
.quad 0x405a700000000000 ; double 105.75
.quad 0x405a500000000000 ; double 105.25
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x405a000000000000 ; double 104
.quad 0x4059e7ae147ae148 ; double 103.62
.quad 0x4059f7ae147ae148 ; double 103.87
.quad 0x4059f7ae147ae148 ; double 103.87
.quad 0x405a400000000000 ; double 105
.quad 0x405a17ae147ae148 ; double 104.37
.quad 0x405b600000000000 ; double 109.5
.quad 0x405bb00000000000 ; double 110.75
.quad 0x405c17ae147ae148 ; double 112.37
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405c17ae147ae148 ; double 112.37
.quad 0x405c300000000000 ; double 112.75
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405d000000000000 ; double 116
.quad 0x405d700000000000 ; double 117.75
.quad 0x405e100000000000 ; double 120.25
.quad 0x405e07ae147ae148 ; double 120.12
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405ec7ae147ae148 ; double 123.12
.quad 0x405e87ae147ae148 ; double 122.12
.quad 0x405e17ae147ae148 ; double 120.37
.quad 0x405dd00000000000 ; double 119.25
.quad 0x405df00000000000 ; double 119.75
.quad 0x405dd00000000000 ; double 119.25
.quad 0x405da00000000000 ; double 118.5
.quad 0x405d67ae147ae148 ; double 117.62
.quad 0x405db00000000000 ; double 118.75
.quad 0x405db00000000000 ; double 118.75
.quad 0x405d87ae147ae148 ; double 118.12
.quad 0x405d97ae147ae148 ; double 118.37
.quad 0x405d87ae147ae148 ; double 118.12
.quad 0x405d800000000000 ; double 118
.quad 0x405d500000000000 ; double 117.25
.quad 0x405de00000000000 ; double 119.5
.quad 0x405dc7ae147ae148 ; double 119.12
.quad 0x405dc7ae147ae148 ; double 119.12
.quad 0x405dc7ae147ae148 ; double 119.12
.quad 0x405e300000000000 ; double 120.75
.quad 0x405e500000000000 ; double 121.25
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405e87ae147ae148 ; double 122.12
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405e100000000000 ; double 120.25
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405e600000000000 ; double 121.5
.quad 0x405e900000000000 ; double 122.25
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405e300000000000 ; double 120.75
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405e87ae147ae148 ; double 122.12
.quad 0x405df7ae147ae148 ; double 119.87
.quad 0x405d800000000000 ; double 118
.quad 0x405db7ae147ae148 ; double 118.87
.quad 0x405db00000000000 ; double 118.75
.quad 0x405d700000000000 ; double 117.75
.quad 0x405d57ae147ae148 ; double 117.37
.quad 0x405d100000000000 ; double 116.25
.quad 0x405d100000000000 ; double 116.25
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405ce7ae147ae148 ; double 115.62
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405bd00000000000 ; double 111.25
.quad 0x405bc00000000000 ; double 111
.quad 0x405bd7ae147ae148 ; double 111.37
.quad 0x405ba7ae147ae148 ; double 110.62
.quad 0x405b87ae147ae148 ; double 110.12
.quad 0x405b47ae147ae148 ; double 109.12
.quad 0x405b67ae147ae148 ; double 109.62
.quad 0x405b500000000000 ; double 109.25
.quad 0x405b400000000000 ; double 109
.quad 0x405b800000000000 ; double 110
.quad 0x405b87ae147ae148 ; double 110.12
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405b600000000000 ; double 109.5
.quad 0x405b600000000000 ; double 109.5
.quad 0x405b900000000000 ; double 110.25
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405bd00000000000 ; double 111.25
.quad 0x405bd00000000000 ; double 111.25
.quad 0x405b000000000000 ; double 108
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405a97ae147ae148 ; double 106.37
.quad 0x405a77ae147ae148 ; double 105.87
.quad 0x405a97ae147ae148 ; double 106.37
.quad 0x405ab7ae147ae148 ; double 106.87
.quad 0x405af00000000000 ; double 107.75
.quad 0x405ab00000000000 ; double 106.75
.quad 0x405a77ae147ae148 ; double 105.87
.quad 0x405ab00000000000 ; double 106.75
.quad 0x405ab7ae147ae148 ; double 106.87
.quad 0x405a77ae147ae148 ; double 105.87
.quad 0x405a300000000000 ; double 104.75
.quad 0x405a700000000000 ; double 105.75
.quad 0x405a87ae147ae148 ; double 106.12
.quad 0x405ae00000000000 ; double 107.5
.quad 0x405b200000000000 ; double 108.5
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405b47ae147ae148 ; double 109.12
.quad 0x405b300000000000 ; double 108.75
.quad 0x405ad7ae147ae148 ; double 107.37
.quad 0x405ac00000000000 ; double 107
.quad 0x405b100000000000 ; double 108.25
.quad 0x405af00000000000 ; double 107.75
.quad 0x405ad7ae147ae148 ; double 107.37
.quad 0x405ad7ae147ae148 ; double 107.37
.quad 0x405a900000000000 ; double 106.25
.quad 0x405a700000000000 ; double 105.75
.quad 0x405a77ae147ae148 ; double 105.87
.quad 0x405a57ae147ae148 ; double 105.37
.quad 0x405a17ae147ae148 ; double 104.37
.quad 0x405a27ae147ae148 ; double 104.62
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a07ae147ae148 ; double 104.12
.quad 0x4059f7ae147ae148 ; double 103.87
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a47ae147ae148 ; double 105.12
.quad 0x4059c7ae147ae148 ; double 103.12
.quad 0x405a200000000000 ; double 104.5
.quad 0x405a27ae147ae148 ; double 104.62
.quad 0x405a100000000000 ; double 104.25
.quad 0x4059d00000000000 ; double 103.25
.quad 0x405997ae147ae148 ; double 102.37
.quad 0x4059d00000000000 ; double 103.25
.quad 0x4059f7ae147ae148 ; double 103.87
.quad 0x4059f00000000000 ; double 103.75
.quad 0x405957ae147ae148 ; double 101.37
.quad 0x4058f7ae147ae148 ; double 99.870000000000005
.quad 0x4058b00000000000 ; double 98.75
.quad 0x4058f00000000000 ; double 99.75
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x405867ae147ae148 ; double 97.620000000000005
.quad 0x4058700000000000 ; double 97.75
.quad 0x4058500000000000 ; double 97.25
.quad 0x405877ae147ae148 ; double 97.870000000000005
.quad 0x405877ae147ae148 ; double 97.870000000000005
.quad 0x4058900000000000 ; double 98.25
.quad 0x4058c7ae147ae148 ; double 99.120000000000005
.quad 0x4058f7ae147ae148 ; double 99.870000000000005
.quad 0x4058e7ae147ae148 ; double 99.620000000000005
.quad 0x405937ae147ae148 ; double 100.87
.quad 0x405917ae147ae148 ; double 100.37
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x4058d00000000000 ; double 99.25
.quad 0x405917ae147ae148 ; double 100.37
.quad 0x4059000000000000 ; double 100
.quad 0x4059500000000000 ; double 101.25
.quad 0x405937ae147ae148 ; double 100.87
.quad 0x405967ae147ae148 ; double 101.62
.quad 0x405917ae147ae148 ; double 100.37
.quad 0x4058e00000000000 ; double 99.5
.quad 0x405887ae147ae148 ; double 98.120000000000005
.quad 0x4057a7ae147ae148 ; double 94.620000000000005
.quad 0x4057c7ae147ae148 ; double 95.120000000000005
.quad 0x4057d7ae147ae148 ; double 95.370000000000005
.quad 0x4057e00000000000 ; double 95.5
.quad 0x4057f00000000000 ; double 95.75
.quad 0x4057d7ae147ae148 ; double 95.370000000000005
.quad 0x4057c7ae147ae148 ; double 95.120000000000005
.quad 0x4057d00000000000 ; double 95.25
.quad 0x4057d00000000000 ; double 95.25
.quad 0x4057d00000000000 ; double 95.25
.quad 0x4057e00000000000 ; double 95.5
.quad 0x405857ae147ae148 ; double 97.370000000000005
.quad 0x4058400000000000 ; double 97
.quad 0x4058300000000000 ; double 96.75
.quad 0x405887ae147ae148 ; double 98.120000000000005
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4058d00000000000 ; double 99.25
.quad 0x405977ae147ae148 ; double 101.87
.quad 0x4058e00000000000 ; double 99.5
.quad 0x405887ae147ae148 ; double 98.120000000000005
.quad 0x405887ae147ae148 ; double 98.120000000000005
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4059300000000000 ; double 100.75
.quad 0x4059300000000000 ; double 100.75
.quad 0x4059200000000000 ; double 100.5
.quad 0x4058f00000000000 ; double 99.75
.quad 0x4058d7ae147ae148 ; double 99.370000000000005
.quad 0x4058f00000000000 ; double 99.75
.quad 0x4058900000000000 ; double 98.25
.quad 0x4058a00000000000 ; double 98.5
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4058b7ae147ae148 ; double 98.870000000000005
.quad 0x4058a7ae147ae148 ; double 98.620000000000005
.quad 0x4058600000000000 ; double 97.5
.quad 0x4058800000000000 ; double 98
.quad 0x4058700000000000 ; double 97.75
.quad 0x405887ae147ae148 ; double 98.120000000000005
.quad 0x4058b7ae147ae148 ; double 98.870000000000005
.quad 0x4058e7ae147ae148 ; double 99.620000000000005
.quad 0x405917ae147ae148 ; double 100.37
.quad 0x405957ae147ae148 ; double 101.37
.quad 0x4059300000000000 ; double 100.75
.quad 0x405937ae147ae148 ; double 100.87
.quad 0x4059700000000000 ; double 101.75
.quad 0x4059b00000000000 ; double 102.75
.quad 0x405a200000000000 ; double 104.5
.quad 0x405a300000000000 ; double 104.75
.quad 0x405a27ae147ae148 ; double 104.62
.quad 0x405a57ae147ae148 ; double 105.37
.quad 0x4059c00000000000 ; double 103
.quad 0x4059b7ae147ae148 ; double 102.87
.quad 0x405af00000000000 ; double 107.75
.quad 0x405b000000000000 ; double 108
.quad 0x405af00000000000 ; double 107.75
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405b500000000000 ; double 109.25
.quad 0x405b07ae147ae148 ; double 108.12
.quad 0x405b47ae147ae148 ; double 109.12
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b700000000000 ; double 109.75
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405bd00000000000 ; double 111.25
.quad 0x405c200000000000 ; double 112.5
.quad 0x405d700000000000 ; double 117.75
.quad 0x405d77ae147ae148 ; double 117.87
.quad 0x405d37ae147ae148 ; double 116.87
.quad 0x405d500000000000 ; double 117.25
.quad 0x405d17ae147ae148 ; double 116.37
.quad 0x405d200000000000 ; double 116.5
.quad 0x405d07ae147ae148 ; double 116.12
.quad 0x405d100000000000 ; double 116.25
.quad 0x405cf7ae147ae148 ; double 115.87
.quad 0x405d600000000000 ; double 117.5
.quad 0x405d500000000000 ; double 117.25
.quad 0x405d17ae147ae148 ; double 116.37
.quad 0x405d17ae147ae148 ; double 116.37
.quad 0x405d400000000000 ; double 117
.quad 0x405d67ae147ae148 ; double 117.62
.quad 0x405db00000000000 ; double 118.75
.quad 0x405d900000000000 ; double 118.25
.quad 0x405d67ae147ae148 ; double 117.62
.quad 0x405d600000000000 ; double 117.5
.quad 0x405d97ae147ae148 ; double 118.37
.quad 0x405d87ae147ae148 ; double 118.12
.quad 0x405d200000000000 ; double 116.5
.quad 0x405d07ae147ae148 ; double 116.12
.quad 0x405c87ae147ae148 ; double 114.12
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405d17ae147ae148 ; double 116.37
.quad 0x405d200000000000 ; double 116.5
.quad 0x405d200000000000 ; double 116.5
.quad 0x405d900000000000 ; double 118.25
.quad 0x405d900000000000 ; double 118.25
.quad 0x405dc7ae147ae148 ; double 119.12
.quad 0x405dd7ae147ae148 ; double 119.37
.quad 0x405d67ae147ae148 ; double 117.62
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405c800000000000 ; double 114
.quad 0x405cc00000000000 ; double 115
.quad 0x405d200000000000 ; double 116.5
.quad 0x405cd00000000000 ; double 115.25
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c400000000000 ; double 113
.quad 0x405c27ae147ae148 ; double 112.62
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405c87ae147ae148 ; double 114.12
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405d300000000000 ; double 116.75
.quad 0x405d200000000000 ; double 116.5
.quad 0x405cc00000000000 ; double 115
.quad 0x405cd00000000000 ; double 115.25
.quad 0x405cc7ae147ae148 ; double 115.12
.quad 0x405cf7ae147ae148 ; double 115.87
.quad 0x405cc00000000000 ; double 115
.quad 0x405c300000000000 ; double 112.75
.quad 0x405c17ae147ae148 ; double 112.37
.quad 0x405c300000000000 ; double 112.75
.quad 0x405c200000000000 ; double 112.5
.quad 0x405c07ae147ae148 ; double 112.12
.quad 0x405c500000000000 ; double 113.25
.quad 0x405cc7ae147ae148 ; double 115.12
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405c300000000000 ; double 112.75
.quad 0x405bc7ae147ae148 ; double 111.12
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405bd00000000000 ; double 111.25
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405b67ae147ae148 ; double 109.62
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405be00000000000 ; double 111.5
.quad 0x405be7ae147ae148 ; double 111.62
.quad 0x405b400000000000 ; double 109
.quad 0x405b600000000000 ; double 109.5
.quad 0x405b67ae147ae148 ; double 109.62
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405b700000000000 ; double 109.75
.quad 0x405b87ae147ae148 ; double 110.12
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405bb00000000000 ; double 110.75
.quad 0x405b87ae147ae148 ; double 110.12
.quad 0x405b700000000000 ; double 109.75
.quad 0x405b800000000000 ; double 110
.quad 0x405b900000000000 ; double 110.25
.quad 0x405b87ae147ae148 ; double 110.12
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405bf7ae147ae148 ; double 111.87
.quad 0x405c200000000000 ; double 112.5
.quad 0x405c200000000000 ; double 112.5
.quad 0x405bf7ae147ae148 ; double 111.87
.quad 0x405c47ae147ae148 ; double 113.12
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405bd00000000000 ; double 111.25
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405b900000000000 ; double 110.25
.quad 0x405b87ae147ae148 ; double 110.12
.quad 0x405b700000000000 ; double 109.75
.quad 0x405bd7ae147ae148 ; double 111.37
.quad 0x405bc7ae147ae148 ; double 111.12
.quad 0x405c200000000000 ; double 112.5
.quad 0x405c700000000000 ; double 113.75
.quad 0x405c67ae147ae148 ; double 113.62
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405cc7ae147ae148 ; double 115.12
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405c47ae147ae148 ; double 113.12
.quad 0x405c47ae147ae148 ; double 113.12
.quad 0x405bf00000000000 ; double 111.75
.quad 0x405c000000000000 ; double 112
.quad 0x405bc00000000000 ; double 111
.quad 0x405c100000000000 ; double 112.25
.quad 0x405bd7ae147ae148 ; double 111.37
.quad 0x405b900000000000 ; double 110.25
.quad 0x405b900000000000 ; double 110.25
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b000000000000 ; double 108
.quad 0x405b200000000000 ; double 108.5
.quad 0x405b600000000000 ; double 109.5
.quad 0x405b600000000000 ; double 109.5
.quad 0x405b700000000000 ; double 109.75
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405b900000000000 ; double 110.25
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405bd7ae147ae148 ; double 111.37
.quad 0x405d200000000000 ; double 116.5
.quad 0x405db00000000000 ; double 118.75
.quad 0x405db00000000000 ; double 118.75
.quad 0x405dd7ae147ae148 ; double 119.37
.quad 0x405de7ae147ae148 ; double 119.62
.quad 0x405da00000000000 ; double 118.5
.quad 0x405df7ae147ae148 ; double 119.87
.quad 0x405e27ae147ae148 ; double 120.62
.quad 0x405e47ae147ae148 ; double 121.12
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405e400000000000 ; double 121
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405e97ae147ae148 ; double 122.37
.quad 0x405e900000000000 ; double 122.25
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405f600000000000 ; double 125.5
.quad 0x405f800000000000 ; double 126
.quad 0x405fa7ae147ae148 ; double 126.62
.quad 0x405fb7ae147ae148 ; double 126.87
.quad 0x405fc7ae147ae148 ; double 127.12
.quad 0x405fb00000000000 ; double 126.75
.quad 0x405f900000000000 ; double 126.25
.quad 0x405fa7ae147ae148 ; double 126.62
.quad 0x405fe00000000000 ; double 127.5
.quad 0x40600c28f5c28f5c ; double 128.38
.quad 0x40601428f5c28f5c ; double 128.63
.quad 0x405ff7ae147ae148 ; double 127.87
.quad 0x40601c28f5c28f5c ; double 128.88
.quad 0x40603c28f5c28f5c ; double 129.88
.quad 0x40605c28f5c28f5c ; double 130.88
.quad 0x40605c28f5c28f5c ; double 130.88
.quad 0x4060280000000000 ; double 129.25
.quad 0x405ff7ae147ae148 ; double 127.87
.quad 0x405fc00000000000 ; double 127
.quad 0x405f17ae147ae148 ; double 124.37
.quad 0x405f27ae147ae148 ; double 124.62
.quad 0x405f07ae147ae148 ; double 124.12
.quad 0x405f17ae147ae148 ; double 124.37
.quad 0x405f57ae147ae148 ; double 125.37
.quad 0x405f400000000000 ; double 125
.quad 0x405f200000000000 ; double 124.5
.quad 0x405f47ae147ae148 ; double 125.12
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405f07ae147ae148 ; double 124.12
.quad 0x405e900000000000 ; double 122.25
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405e97ae147ae148 ; double 122.37
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405ec7ae147ae148 ; double 123.12
.quad 0x405ea7ae147ae148 ; double 122.62
.quad 0x405e900000000000 ; double 122.25
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405f000000000000 ; double 124
.quad 0x405ec7ae147ae148 ; double 123.12
.quad 0x405ef7ae147ae148 ; double 123.87
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405f07ae147ae148 ; double 124.12
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405e700000000000 ; double 121.75
.quad 0x405e300000000000 ; double 120.75
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405e300000000000 ; double 120.75
.quad 0x405e57ae147ae148 ; double 121.37
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405e87ae147ae148 ; double 122.12
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405dd7ae147ae148 ; double 119.37
.quad 0x405db00000000000 ; double 118.75
.quad 0x405df00000000000 ; double 119.75
.quad 0x405df00000000000 ; double 119.75
.quad 0x405df00000000000 ; double 119.75
.quad 0x405d300000000000 ; double 116.75
.quad 0x405da00000000000 ; double 118.5
.quad 0x405d500000000000 ; double 117.25
.quad 0x405ce7ae147ae148 ; double 115.62
.quad 0x405d07ae147ae148 ; double 116.12
.quad 0x405d000000000000 ; double 116
.quad 0x405d600000000000 ; double 117.5
.quad 0x405d87ae147ae148 ; double 118.12
.quad 0x405d600000000000 ; double 117.5
.quad 0x405dd00000000000 ; double 119.25
.quad 0x405e17ae147ae148 ; double 120.37
.quad 0x405e000000000000 ; double 120
.quad 0x405e27ae147ae148 ; double 120.62
.quad 0x405e07ae147ae148 ; double 120.12
.quad 0x405e57ae147ae148 ; double 121.37
.quad 0x405eb7ae147ae148 ; double 122.87
.quad 0x405ef7ae147ae148 ; double 123.87
.quad 0x405f17ae147ae148 ; double 124.37
.quad 0x405eb7ae147ae148 ; double 122.87
.quad 0x405e500000000000 ; double 121.25
.quad 0x405e500000000000 ; double 121.25
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405f27ae147ae148 ; double 124.62
.quad 0x405f27ae147ae148 ; double 124.62
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405f300000000000 ; double 124.75
.quad 0x405f17ae147ae148 ; double 124.37
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405e100000000000 ; double 120.25
.quad 0x405df00000000000 ; double 119.75
.quad 0x405da00000000000 ; double 118.5
.quad 0x405db00000000000 ; double 118.75
.quad 0x405d200000000000 ; double 116.5
.quad 0x405d100000000000 ; double 116.25
.quad 0x405c87ae147ae148 ; double 114.12
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405cd00000000000 ; double 115.25
.quad 0x405d200000000000 ; double 116.5
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405c87ae147ae148 ; double 114.12
.quad 0x405c67ae147ae148 ; double 113.62
.quad 0x405c67ae147ae148 ; double 113.62
.quad 0x405c57ae147ae148 ; double 113.37
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405c67ae147ae148 ; double 113.62
.quad 0x405c57ae147ae148 ; double 113.37
.quad 0x405c87ae147ae148 ; double 114.12
.quad 0x405cc00000000000 ; double 115
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405cf7ae147ae148 ; double 115.87
.quad 0x405cc00000000000 ; double 115
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405cd00000000000 ; double 115.25
.quad 0x405c47ae147ae148 ; double 113.12
.quad 0x405c500000000000 ; double 113.25
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405bc7ae147ae148 ; double 111.12
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c700000000000 ; double 113.75
.quad 0x405c800000000000 ; double 114
.quad 0x405c300000000000 ; double 112.75
.quad 0x405c07ae147ae148 ; double 112.12
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405bf00000000000 ; double 111.75
.quad 0x405c47ae147ae148 ; double 113.12
.quad 0x405cc00000000000 ; double 115
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405d17ae147ae148 ; double 116.37
.quad 0x405d300000000000 ; double 116.75
.quad 0x405d77ae147ae148 ; double 117.87
.quad 0x405db00000000000 ; double 118.75
.quad 0x405de7ae147ae148 ; double 119.62
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405f17ae147ae148 ; double 124.37
.quad 0x405f100000000000 ; double 124.25
.quad 0x405f600000000000 ; double 125.5
.quad 0x405fa00000000000 ; double 126.5
.quad 0x405fb7ae147ae148 ; double 126.87
.quad 0x405fa00000000000 ; double 126.5
.quad 0x405f900000000000 ; double 126.25
.quad 0x405ef7ae147ae148 ; double 123.87
.quad 0x405ec7ae147ae148 ; double 123.12
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405e97ae147ae148 ; double 122.37
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405f300000000000 ; double 124.75
.quad 0x405f57ae147ae148 ; double 125.37
.quad 0x405f900000000000 ; double 126.25
.quad 0x405fa00000000000 ; double 126.5
.quad 0x405f700000000000 ; double 125.75
.quad 0x405f67ae147ae148 ; double 125.62
.quad 0x405fd7ae147ae148 ; double 127.37
.quad 0x405fd7ae147ae148 ; double 127.37
.quad 0x4060080000000000 ; double 128.25
.quad 0x405ff7ae147ae148 ; double 127.87
.quad 0x4060300000000000 ; double 129.5
.quad 0x4060280000000000 ; double 129.25
.quad 0x405ff00000000000 ; double 127.75
.quad 0x405ff00000000000 ; double 127.75
.quad 0x40600c28f5c28f5c ; double 128.38
.quad 0x405ff00000000000 ; double 127.75
.quad 0x405f900000000000 ; double 126.25
.quad 0x405f900000000000 ; double 126.25
.quad 0x405f77ae147ae148 ; double 125.87
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405e17ae147ae148 ; double 120.37
.quad 0x405d67ae147ae148 ; double 117.62
.quad 0x405d600000000000 ; double 117.5
.quad 0x405d97ae147ae148 ; double 118.37
.quad 0x405de7ae147ae148 ; double 119.62
.quad 0x405df00000000000 ; double 119.75
.quad 0x405d57ae147ae148 ; double 117.37
.quad 0x405d57ae147ae148 ; double 117.37
.quad 0x405d47ae147ae148 ; double 117.12
.quad 0x405d37ae147ae148 ; double 116.87
.quad 0x405cd00000000000 ; double 115.25
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405c67ae147ae148 ; double 113.62
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405c400000000000 ; double 113
.quad 0x405b17ae147ae148 ; double 108.37
.quad 0x405b500000000000 ; double 109.25
.quad 0x405b700000000000 ; double 109.75
.quad 0x405b600000000000 ; double 109.5
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405bb00000000000 ; double 110.75
.quad 0x405ba7ae147ae148 ; double 110.62
.quad 0x405ba7ae147ae148 ; double 110.62
.quad 0x405c300000000000 ; double 112.75
.quad 0x405bf00000000000 ; double 111.75
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405be00000000000 ; double 111.5
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405bf7ae147ae148 ; double 111.87
.quad 0x405be7ae147ae148 ; double 111.62
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405cc7ae147ae148 ; double 115.12
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405c700000000000 ; double 113.75
.quad 0x405c600000000000 ; double 113.5
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c87ae147ae148 ; double 114.12
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405d000000000000 ; double 116
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405d200000000000 ; double 116.5
.quad 0x405c87ae147ae148 ; double 114.12
.quad 0x405c67ae147ae148 ; double 113.62
.quad 0x405c27ae147ae148 ; double 112.62
.quad 0x405b900000000000 ; double 110.25
.quad 0x405b700000000000 ; double 109.75
.quad 0x405b100000000000 ; double 108.25
.quad 0x405b400000000000 ; double 109
.quad 0x405b200000000000 ; double 108.5
.quad 0x405aa7ae147ae148 ; double 106.62
.quad 0x405b300000000000 ; double 108.75
.quad 0x405b17ae147ae148 ; double 108.37
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405c57ae147ae148 ; double 113.37
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c800000000000 ; double 114
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405d200000000000 ; double 116.5
.quad 0x405d700000000000 ; double 117.75
.quad 0x405da7ae147ae148 ; double 118.62
.quad 0x405d600000000000 ; double 117.5
.quad 0x405d600000000000 ; double 117.5
.quad 0x405d500000000000 ; double 117.25
.quad 0x405d97ae147ae148 ; double 118.37
.quad 0x405d97ae147ae148 ; double 118.37
.quad 0x405d700000000000 ; double 117.75
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405d77ae147ae148 ; double 117.87
.quad 0x405d300000000000 ; double 116.75
.quad 0x405cf7ae147ae148 ; double 115.87
.quad 0x405ce7ae147ae148 ; double 115.62
.quad 0x405c57ae147ae148 ; double 113.37
.quad 0x405c700000000000 ; double 113.75
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c700000000000 ; double 113.75
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405c17ae147ae148 ; double 112.37
.quad 0x405bf00000000000 ; double 111.75
.quad 0x405b500000000000 ; double 109.25
.quad 0x405b17ae147ae148 ; double 108.37
.quad 0x405b600000000000 ; double 109.5
.quad 0x405b500000000000 ; double 109.25
.quad 0x405bf00000000000 ; double 111.75
.quad 0x405ba7ae147ae148 ; double 110.62
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c500000000000 ; double 113.25
.quad 0x405bf00000000000 ; double 111.75
.quad 0x405c000000000000 ; double 112
.quad 0x405bd7ae147ae148 ; double 111.37
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405df00000000000 ; double 119.75
.quad 0x405e100000000000 ; double 120.25
.quad 0x405d57ae147ae148 ; double 117.37
.quad 0x405d900000000000 ; double 118.25
.quad 0x405d700000000000 ; double 117.75
.quad 0x405d800000000000 ; double 118
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405f000000000000 ; double 124
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405e500000000000 ; double 121.25
.quad 0x405d17ae147ae148 ; double 116.37
.quad 0x405d700000000000 ; double 117.75
.quad 0x405d27ae147ae148 ; double 116.62
.quad 0x405d67ae147ae148 ; double 117.62
.quad 0x405df7ae147ae148 ; double 119.87
.quad 0x405e17ae147ae148 ; double 120.37
.quad 0x405d97ae147ae148 ; double 118.37
.quad 0x405de00000000000 ; double 119.5
.quad 0x405d900000000000 ; double 118.25
.quad 0x405de00000000000 ; double 119.5
.quad 0x405dd00000000000 ; double 119.25
.quad 0x405d400000000000 ; double 117
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405be7ae147ae148 ; double 111.62
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405be00000000000 ; double 111.5
.quad 0x405b600000000000 ; double 109.5
.quad 0x405af00000000000 ; double 107.75
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405c07ae147ae148 ; double 112.12
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405c17ae147ae148 ; double 112.37
.quad 0x405d400000000000 ; double 117
.quad 0x405db7ae147ae148 ; double 118.87
.quad 0x405de00000000000 ; double 119.5
.quad 0x405d700000000000 ; double 117.75
.quad 0x405d77ae147ae148 ; double 117.87
.quad 0x405db7ae147ae148 ; double 118.87
.quad 0x405de00000000000 ; double 119.5
.quad 0x405dd00000000000 ; double 119.25
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405ea7ae147ae148 ; double 122.62
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405e07ae147ae148 ; double 120.12
.quad 0x405de00000000000 ; double 119.5
.quad 0x405df00000000000 ; double 119.75
.quad 0x405f200000000000 ; double 124.5
.quad 0x405f27ae147ae148 ; double 124.62
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405ec00000000000 ; double 123
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405f400000000000 ; double 125
.quad 0x405ec00000000000 ; double 123
.quad 0x405e000000000000 ; double 120
.quad 0x405da7ae147ae148 ; double 118.62
.quad 0x405d400000000000 ; double 117
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405f600000000000 ; double 125.5
.quad 0x405e800000000000 ; double 122
.quad 0x4061400000000000 ; double 138
.quad 0x4061a80000000000 ; double 141.25
.quad 0x4062380000000000 ; double 145.75
.quad 0x40626c28f5c28f5c ; double 147.38
.quad 0x4062f00000000000 ; double 151.5
.quad 0x4062e80000000000 ; double 151.25
.quad 0x4062f428f5c28f5c ; double 151.63
.quad 0x40631c28f5c28f5c ; double 152.88
.quad 0x4063200000000000 ; double 153
.quad 0x40638c28f5c28f5c ; double 156.38
.quad 0x4063a80000000000 ; double 157.25
.quad 0x40639c28f5c28f5c ; double 156.88
.quad 0x4063580000000000 ; double 154.75
.quad 0x4062fc28f5c28f5c ; double 151.88
.quad 0x4063380000000000 ; double 153.75
.quad 0x4063c80000000000 ; double 158.25
.quad 0x40639428f5c28f5c ; double 156.63
.quad 0x4063c428f5c28f5c ; double 158.13
.quad 0x4063800000000000 ; double 156
.quad 0x4063780000000000 ; double 155.75
.quad 0x4063900000000000 ; double 156.5
.quad 0x4063d00000000000 ; double 158.5
.quad 0x4063d428f5c28f5c ; double 158.63
.quad 0x4063fc28f5c28f5c ; double 159.88
.quad 0x4064480000000000 ; double 162.25
.quad 0x4064580000000000 ; double 162.75
.quad 0x40642c28f5c28f5c ; double 161.38
.quad 0x4063cc28f5c28f5c ; double 158.38
.quad 0x4063c428f5c28f5c ; double 158.13
.quad 0x4064100000000000 ; double 160.5
.quad 0x4064580000000000 ; double 162.75
.quad 0x4064700000000000 ; double 163.5
.quad 0x40648428f5c28f5c ; double 164.13
.quad 0x40652c28f5c28f5c ; double 169.38
.quad 0x4065300000000000 ; double 169.5
.quad 0x4065000000000000 ; double 168
.quad 0x40650c28f5c28f5c ; double 168.38
.quad 0x4065300000000000 ; double 169.5
.quad 0x4065dc28f5c28f5c ; double 174.88
.quad 0x4065e428f5c28f5c ; double 175.13
.quad 0x4065fc28f5c28f5c ; double 175.88
.quad 0x4065e80000000000 ; double 175.25
.quad 0x4065a80000000000 ; double 173.25
.quad 0x4065bc28f5c28f5c ; double 173.88
.quad 0x4065d80000000000 ; double 174.75
.quad 0x4065e00000000000 ; double 175
.quad 0x4065c00000000000 ; double 174
.quad 0x40658428f5c28f5c ; double 172.13
.quad 0x40653428f5c28f5c ; double 169.63
.quad 0x4064d80000000000 ; double 166.75
.quad 0x4064b428f5c28f5c ; double 165.63
.quad 0x4064680000000000 ; double 163.25
.quad 0x4064200000000000 ; double 161
.quad 0x4064000000000000 ; double 160
.quad 0x40641c28f5c28f5c ; double 160.88
.quad 0x4064600000000000 ; double 163
.quad 0x4064500000000000 ; double 162.5
.quad 0x4064380000000000 ; double 161.75
.quad 0x40645c28f5c28f5c ; double 162.88
.quad 0x40645428f5c28f5c ; double 162.63
.quad 0x4064300000000000 ; double 161.5
.quad 0x4064480000000000 ; double 162.25
.quad 0x40645c28f5c28f5c ; double 162.88
.quad 0x40649428f5c28f5c ; double 164.63
.quad 0x4064bc28f5c28f5c ; double 165.88
.quad 0x4065100000000000 ; double 168.5
.quad 0x4065180000000000 ; double 168.75
.quad 0x4065300000000000 ; double 169.5
.quad 0x40653c28f5c28f5c ; double 169.88
.quad 0x40653c28f5c28f5c ; double 169.88
.quad 0x4064e80000000000 ; double 167.25
.quad 0x4064e80000000000 ; double 167.25
.quad 0x4064dc28f5c28f5c ; double 166.88
.quad 0x4064c00000000000 ; double 166
.quad 0x4064b428f5c28f5c ; double 165.63
.quad 0x4064a428f5c28f5c ; double 165.13
.quad 0x40649428f5c28f5c ; double 164.63
.quad 0x4064b80000000000 ; double 165.75
.quad 0x4064d00000000000 ; double 166.5
.quad 0x4064f80000000000 ; double 167.75
.quad 0x40651428f5c28f5c ; double 168.63
.quad 0x4064f428f5c28f5c ; double 167.63
.quad 0x4064a00000000000 ; double 165
.quad 0x4064700000000000 ; double 163.5
.quad 0x40644428f5c28f5c ; double 162.13
.quad 0x4064400000000000 ; double 162
.quad 0x4064500000000000 ; double 162.5
.quad 0x4064480000000000 ; double 162.25
.quad 0x40640428f5c28f5c ; double 160.13
.quad 0x4063ec28f5c28f5c ; double 159.38
.quad 0x4063e428f5c28f5c ; double 159.13
.quad 0x4064300000000000 ; double 161.5
.quad 0x40643428f5c28f5c ; double 161.63
.quad 0x40643c28f5c28f5c ; double 161.88
.quad 0x4064280000000000 ; double 161.25
.quad 0x40642c28f5c28f5c ; double 161.38
.quad 0x40640c28f5c28f5c ; double 160.38
.quad 0x4063f80000000000 ; double 159.75
.quad 0x40642c28f5c28f5c ; double 161.38
.quad 0x40647c28f5c28f5c ; double 163.88
.quad 0x4064580000000000 ; double 162.75
.quad 0x40644c28f5c28f5c ; double 162.38
.quad 0x4064180000000000 ; double 160.75
.quad 0x4063e80000000000 ; double 159.25
.quad 0x4063d80000000000 ; double 158.75
.quad 0x4063d428f5c28f5c ; double 158.63
.quad 0x4064380000000000 ; double 161.75
.quad 0x4064280000000000 ; double 161.25
.quad 0x4064ac28f5c28f5c ; double 165.38
.quad 0x4064ec28f5c28f5c ; double 167.38
.quad 0x4064e00000000000 ; double 167
.quad 0x4064c428f5c28f5c ; double 166.13
.quad 0x4064ec28f5c28f5c ; double 167.38
.quad 0x4064bc28f5c28f5c ; double 165.88
.quad 0x4064d80000000000 ; double 166.75
.quad 0x4064f428f5c28f5c ; double 167.63
.quad 0x4064f428f5c28f5c ; double 167.63
.quad 0x4064600000000000 ; double 163
.quad 0x40644c28f5c28f5c ; double 162.38
.quad 0x4064300000000000 ; double 161.5
.quad 0x4063f428f5c28f5c ; double 159.63
.quad 0x40639428f5c28f5c ; double 156.63
.quad 0x4063800000000000 ; double 156
.quad 0x40634428f5c28f5c ; double 154.13
.quad 0x4063900000000000 ; double 156.5
.quad 0x4063a00000000000 ; double 157
.quad 0x4063c00000000000 ; double 158
.quad 0x4062d428f5c28f5c ; double 150.63
.quad 0x4063080000000000 ; double 152.25
.quad 0x4062fc28f5c28f5c ; double 151.88
.quad 0x4062dc28f5c28f5c ; double 150.88
.quad 0x4062c80000000000 ; double 150.25
.quad 0x40623428f5c28f5c ; double 145.63
.quad 0x4062700000000000 ; double 147.5
.quad 0x4062880000000000 ; double 148.25
.quad 0x4062c428f5c28f5c ; double 150.13
.quad 0x4062cc28f5c28f5c ; double 150.38
.quad 0x4062d80000000000 ; double 150.75
.quad 0x4062ec28f5c28f5c ; double 151.38
.quad 0x4063000000000000 ; double 152
.quad 0x40632c28f5c28f5c ; double 153.38
.quad 0x4063180000000000 ; double 152.75
.quad 0x40634428f5c28f5c ; double 154.13
.quad 0x40639428f5c28f5c ; double 156.63
.quad 0x4063900000000000 ; double 156.5
.quad 0x40631c28f5c28f5c ; double 152.88
.quad 0x4062c80000000000 ; double 150.25
.quad 0x4062a80000000000 ; double 149.25
.quad 0x40626c28f5c28f5c ; double 147.38
.quad 0x4062800000000000 ; double 148
.quad 0x40626428f5c28f5c ; double 147.13
.quad 0x4062200000000000 ; double 145
.quad 0x4062400000000000 ; double 146
.quad 0x40622428f5c28f5c ; double 145.13
.quad 0x40620c28f5c28f5c ; double 144.38
.quad 0x4061dc28f5c28f5c ; double 142.88
.quad 0x40615c28f5c28f5c ; double 138.88
.quad 0x4061900000000000 ; double 140.5
.quad 0x4061bc28f5c28f5c ; double 141.88
.quad 0x4061ac28f5c28f5c ; double 141.38
.quad 0x40615c28f5c28f5c ; double 138.88
.quad 0x4061800000000000 ; double 140
.quad 0x40619428f5c28f5c ; double 140.63
.quad 0x4061b428f5c28f5c ; double 141.63
.quad 0x4061f00000000000 ; double 143.5
.quad 0x4062080000000000 ; double 144.25
.quad 0x40620428f5c28f5c ; double 144.13
.quad 0x4061780000000000 ; double 139.75
.quad 0x40617428f5c28f5c ; double 139.63
.quad 0x4061700000000000 ; double 139.5
.quad 0x40615428f5c28f5c ; double 138.63
.quad 0x4060d80000000000 ; double 134.75
.quad 0x4060e00000000000 ; double 135
.quad 0x4060d00000000000 ; double 134.5
.quad 0x4060f00000000000 ; double 135.5
.quad 0x4060fc28f5c28f5c ; double 135.88
.quad 0x40612428f5c28f5c ; double 137.13
.quad 0x4061300000000000 ; double 137.5
.quad 0x4060e428f5c28f5c ; double 135.13
.quad 0x4060cc28f5c28f5c ; double 134.38
.quad 0x4060900000000000 ; double 132.5
.quad 0x40602428f5c28f5c ; double 129.13
.quad 0x40605c28f5c28f5c ; double 130.88
.quad 0x4060480000000000 ; double 130.25
.quad 0x4060380000000000 ; double 129.75
.quad 0x405ff7ae147ae148 ; double 127.87
.quad 0x40603c28f5c28f5c ; double 129.88
.quad 0x4060000000000000 ; double 128
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405fd00000000000 ; double 127.25
.quad 0x405f500000000000 ; double 125.25
.quad 0x405e47ae147ae148 ; double 121.12
.quad 0x405e57ae147ae148 ; double 121.37
.quad 0x405db00000000000 ; double 118.75
.quad 0x405db00000000000 ; double 118.75
.quad 0x405e47ae147ae148 ; double 121.12
.quad 0x405ea7ae147ae148 ; double 122.62
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405f000000000000 ; double 124
.quad 0x405f17ae147ae148 ; double 124.37
.quad 0x405f000000000000 ; double 124
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405e800000000000 ; double 122
.quad 0x405e900000000000 ; double 122.25
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405f400000000000 ; double 125
.quad 0x405f77ae147ae148 ; double 125.87
.quad 0x405fc00000000000 ; double 127
.quad 0x40600428f5c28f5c ; double 128.13
.quad 0x4060100000000000 ; double 128.5
.quad 0x405fe7ae147ae148 ; double 127.62
.quad 0x4060080000000000 ; double 128.25
.quad 0x40602c28f5c28f5c ; double 129.38
.quad 0x4060200000000000 ; double 129
.quad 0x40600428f5c28f5c ; double 128.13
.quad 0x4060180000000000 ; double 128.75
.quad 0x4060100000000000 ; double 128.5
.quad 0x4060280000000000 ; double 129.25
.quad 0x40604428f5c28f5c ; double 130.13
.quad 0x40603c28f5c28f5c ; double 129.88
.quad 0x405fd7ae147ae148 ; double 127.37
.quad 0x405fd00000000000 ; double 127.25
.quad 0x405ff7ae147ae148 ; double 127.87
.quad 0x405fe00000000000 ; double 127.5
.quad 0x405f500000000000 ; double 125.25
.quad 0x405f57ae147ae148 ; double 125.37
.quad 0x405ef7ae147ae148 ; double 123.87
.quad 0x405ec00000000000 ; double 123
.quad 0x405ec7ae147ae148 ; double 123.12
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405e600000000000 ; double 121.5
.quad 0x405ec00000000000 ; double 123
.quad 0x405f17ae147ae148 ; double 124.37
.quad 0x405f100000000000 ; double 124.25
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405e800000000000 ; double 122
.quad 0x405f300000000000 ; double 124.75
.quad 0x405fa00000000000 ; double 126.5
.quad 0x405f97ae147ae148 ; double 126.37
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405ec00000000000 ; double 123
.quad 0x405e700000000000 ; double 121.75
.quad 0x405e97ae147ae148 ; double 122.37
.quad 0x405e87ae147ae148 ; double 122.12
.quad 0x405e900000000000 ; double 122.25
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405e600000000000 ; double 121.5
.quad 0x405e57ae147ae148 ; double 121.37
.quad 0x405e500000000000 ; double 121.25
.quad 0x405e97ae147ae148 ; double 122.37
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405ec00000000000 ; double 123
.quad 0x405ec00000000000 ; double 123
.quad 0x405f500000000000 ; double 125.25
.quad 0x405f200000000000 ; double 124.5
.quad 0x405ff7ae147ae148 ; double 127.87
.quad 0x405fe00000000000 ; double 127.5
.quad 0x4060b428f5c28f5c ; double 133.63
.quad 0x4060b428f5c28f5c ; double 133.63
.quad 0x4060a80000000000 ; double 133.25
.quad 0x4060ac28f5c28f5c ; double 133.38
.quad 0x4060f00000000000 ; double 135.5
.quad 0x4060fc28f5c28f5c ; double 135.88
.quad 0x4060e80000000000 ; double 135.25
.quad 0x40611428f5c28f5c ; double 136.63
.quad 0x40613428f5c28f5c ; double 137.63
.quad 0x40616c28f5c28f5c ; double 139.38
.quad 0x40616c28f5c28f5c ; double 139.38
.quad 0x40615428f5c28f5c ; double 138.63
.quad 0x4061480000000000 ; double 138.25
.quad 0x4061600000000000 ; double 139
.quad 0x40617428f5c28f5c ; double 139.63
.quad 0x4061500000000000 ; double 138.5
.quad 0x4061500000000000 ; double 138.5
.quad 0x40617c28f5c28f5c ; double 139.88
.quad 0x4061dc28f5c28f5c ; double 142.88
.quad 0x4062100000000000 ; double 144.5
.quad 0x4062300000000000 ; double 145.5
.quad 0x4061fc28f5c28f5c ; double 143.88
.quad 0x4061a00000000000 ; double 141
.quad 0x4061700000000000 ; double 139.5
.quad 0x40610c28f5c28f5c ; double 136.38
.quad 0x40615c28f5c28f5c ; double 138.88
.quad 0x4061800000000000 ; double 140
.quad 0x40618c28f5c28f5c ; double 140.38
.quad 0x4061c00000000000 ; double 142
.quad 0x4061a80000000000 ; double 141.25
.quad 0x4061800000000000 ; double 140
.quad 0x40614428f5c28f5c ; double 138.13
.quad 0x4061900000000000 ; double 140.5
.quad 0x4061700000000000 ; double 139.5
.quad 0x4061380000000000 ; double 137.75
.quad 0x4060f80000000000 ; double 135.75
.quad 0x4060d00000000000 ; double 134.5
.quad 0x4060e80000000000 ; double 135.25
.quad 0x4060cc28f5c28f5c ; double 134.38
.quad 0x4060980000000000 ; double 132.75
.quad 0x40607428f5c28f5c ; double 131.63
.quad 0x40607428f5c28f5c ; double 131.63
.quad 0x40607c28f5c28f5c ; double 131.88
.quad 0x4060700000000000 ; double 131.5
.quad 0x40607c28f5c28f5c ; double 131.88
.quad 0x40607c28f5c28f5c ; double 131.88
.quad 0x4060ac28f5c28f5c ; double 133.38
.quad 0x4060a80000000000 ; double 133.25
.quad 0x4060a80000000000 ; double 133.25
.quad 0x4060980000000000 ; double 132.75
.quad 0x4060dc28f5c28f5c ; double 134.88
.quad 0x4060e00000000000 ; double 135
.quad 0x4060b428f5c28f5c ; double 133.63
.quad 0x4060c00000000000 ; double 134
.quad 0x4060bc28f5c28f5c ; double 133.88
.quad 0x40608c28f5c28f5c ; double 132.38
.quad 0x4060b00000000000 ; double 133.5
.quad 0x4060c00000000000 ; double 134
.quad 0x40610428f5c28f5c ; double 136.13
.quad 0x40615428f5c28f5c ; double 138.63
.quad 0x4061ac28f5c28f5c ; double 141.38
.quad 0x40623428f5c28f5c ; double 145.63
.quad 0x40623c28f5c28f5c ; double 145.88
.quad 0x4062180000000000 ; double 144.75
.quad 0x40621428f5c28f5c ; double 144.63
.quad 0x40628428f5c28f5c ; double 148.13
.quad 0x4062c428f5c28f5c ; double 150.13
.quad 0x4062c428f5c28f5c ; double 150.13
.quad 0x40629428f5c28f5c ; double 148.63
.quad 0x4062700000000000 ; double 147.5
.quad 0x40626c28f5c28f5c ; double 147.38
.quad 0x40627428f5c28f5c ; double 147.63
.quad 0x4062900000000000 ; double 148.5
.quad 0x40627c28f5c28f5c ; double 147.88
.quad 0x4062600000000000 ; double 147
.quad 0x4062700000000000 ; double 147.5
.quad 0x4062600000000000 ; double 147
.quad 0x4062600000000000 ; double 147
.quad 0x4062980000000000 ; double 148.75
.quad 0x4062bc28f5c28f5c ; double 149.88
.quad 0x4062bc28f5c28f5c ; double 149.88
.quad 0x4062a428f5c28f5c ; double 149.13
.quad 0x4062b00000000000 ; double 149.5
.quad 0x4062980000000000 ; double 148.75
.quad 0x4062a00000000000 ; double 149
.quad 0x4062e00000000000 ; double 151
.quad 0x4062dc28f5c28f5c ; double 150.88
.quad 0x40631c28f5c28f5c ; double 152.88
.quad 0x4063180000000000 ; double 152.75
.quad 0x4063300000000000 ; double 153.5
.quad 0x40635428f5c28f5c ; double 154.63
.quad 0x40633428f5c28f5c ; double 153.63
.quad 0x4063100000000000 ; double 152.5
.quad 0x4062600000000000 ; double 147
.quad 0x4062480000000000 ; double 146.25
.quad 0x40622428f5c28f5c ; double 145.13
.quad 0x40625428f5c28f5c ; double 146.63
.quad 0x40624c28f5c28f5c ; double 146.38
.quad 0x4062280000000000 ; double 145.25
.quad 0x40623428f5c28f5c ; double 145.63
.quad 0x40628428f5c28f5c ; double 148.13
.quad 0x4062a428f5c28f5c ; double 149.13
.quad 0x4062cc28f5c28f5c ; double 150.38
.quad 0x4062f00000000000 ; double 151.5
.quad 0x4062c80000000000 ; double 150.25
.quad 0x4062d00000000000 ; double 150.5
.quad 0x4063300000000000 ; double 153.5
.quad 0x40637c28f5c28f5c ; double 155.88
.quad 0x40637c28f5c28f5c ; double 155.88
.quad 0x4063a80000000000 ; double 157.25
.quad 0x4063b00000000000 ; double 157.5
.quad 0x4063f80000000000 ; double 159.75
.quad 0x40643c28f5c28f5c ; double 161.88
.quad 0x40642c28f5c28f5c ; double 161.38
.quad 0x40640c28f5c28f5c ; double 160.38
.quad 0x4063d00000000000 ; double 158.5
.quad 0x4063680000000000 ; double 155.25
.quad 0x40636428f5c28f5c ; double 155.13
.quad 0x40635c28f5c28f5c ; double 154.88
.quad 0x40635c28f5c28f5c ; double 154.88
.quad 0x40637428f5c28f5c ; double 155.63
.quad 0x4063680000000000 ; double 155.25
.quad 0x40633428f5c28f5c ; double 153.63
.quad 0x40631428f5c28f5c ; double 152.63
.quad 0x40630c28f5c28f5c ; double 152.38
.quad 0x4062f00000000000 ; double 151.5
.quad 0x40631428f5c28f5c ; double 152.63
.quad 0x4063000000000000 ; double 152
.quad 0x4062c80000000000 ; double 150.25
.quad 0x4062c428f5c28f5c ; double 150.13
.quad 0x40631c28f5c28f5c ; double 152.88
.quad 0x4062e428f5c28f5c ; double 151.13
.quad 0x4063000000000000 ; double 152
.quad 0x40632428f5c28f5c ; double 153.13
.quad 0x4062e00000000000 ; double 151
.quad 0x40627c28f5c28f5c ; double 147.88
.quad 0x4062980000000000 ; double 148.75
.quad 0x4062c00000000000 ; double 150
.quad 0x4062dc28f5c28f5c ; double 150.88
.quad 0x4063180000000000 ; double 152.75
.quad 0x40630c28f5c28f5c ; double 152.38
.quad 0x4063300000000000 ; double 153.5
.quad 0x4062e00000000000 ; double 151
.quad 0x4062f00000000000 ; double 151.5
.quad 0x4062dc28f5c28f5c ; double 150.88
.quad 0x40631428f5c28f5c ; double 152.63
.quad 0x40630428f5c28f5c ; double 152.13
.quad 0x4062900000000000 ; double 148.5
.quad 0x4062680000000000 ; double 147.25
.quad 0x4062a428f5c28f5c ; double 149.13
.quad 0x40628c28f5c28f5c ; double 148.38
.quad 0x40632428f5c28f5c ; double 153.13
.quad 0x4063100000000000 ; double 152.5
.quad 0x4063980000000000 ; double 156.75
.quad 0x4063c80000000000 ; double 158.25
.quad 0x4063e428f5c28f5c ; double 159.13
.quad 0x4063d428f5c28f5c ; double 158.63
.quad 0x4064000000000000 ; double 160
.quad 0x4064200000000000 ; double 161
.quad 0x4063e428f5c28f5c ; double 159.13
.quad 0x40640c28f5c28f5c ; double 160.38
.quad 0x4063f80000000000 ; double 159.75
.quad 0x4063bc28f5c28f5c ; double 157.88
.quad 0x40637c28f5c28f5c ; double 155.88
.quad 0x4063a80000000000 ; double 157.25
.quad 0x4063c80000000000 ; double 158.25
.quad 0x4063bc28f5c28f5c ; double 157.88
.quad 0x4063a80000000000 ; double 157.25
.quad 0x4063800000000000 ; double 156
.quad 0x4063600000000000 ; double 155
.quad 0x4063700000000000 ; double 155.5
.quad 0x40635428f5c28f5c ; double 154.63
.quad 0x4063300000000000 ; double 153.5
.quad 0x4062e428f5c28f5c ; double 151.13
.quad 0x40633428f5c28f5c ; double 153.63
.quad 0x4062f80000000000 ; double 151.75
.quad 0x4062e80000000000 ; double 151.25
.quad 0x4062d00000000000 ; double 150.5
.quad 0x4062800000000000 ; double 148
.quad 0x4062c00000000000 ; double 150
.quad 0x4062cc28f5c28f5c ; double 150.38
.quad 0x4062c00000000000 ; double 150
.quad 0x4063880000000000 ; double 156.25
.quad 0x4063900000000000 ; double 156.5
.quad 0x4063180000000000 ; double 152.75
.quad 0x4062bc28f5c28f5c ; double 149.88
.quad 0x4062b428f5c28f5c ; double 149.63
.quad 0x4062e00000000000 ; double 151
.quad 0x4062cc28f5c28f5c ; double 150.38
.quad 0x4063800000000000 ; double 156
.quad 0x4063900000000000 ; double 156.5
.quad 0x40635c28f5c28f5c ; double 154.88
.quad 0x40634c28f5c28f5c ; double 154.38
.quad 0x40635c28f5c28f5c ; double 154.88
.quad 0x4063d80000000000 ; double 158.75
.quad 0x4063d00000000000 ; double 158.5
.quad 0x40637428f5c28f5c ; double 155.63
.quad 0x4063380000000000 ; double 153.75
.quad 0x40635428f5c28f5c ; double 154.63
.quad 0x40636428f5c28f5c ; double 155.13
.quad 0x40639428f5c28f5c ; double 156.63
.quad 0x4063400000000000 ; double 154
.quad 0x40633c28f5c28f5c ; double 153.88
.quad 0x4063500000000000 ; double 154.5
.quad 0x4063300000000000 ; double 153.5
.quad 0x4062f00000000000 ; double 151.5
.quad 0x4062ac28f5c28f5c ; double 149.38
.quad 0x4062ac28f5c28f5c ; double 149.38
.quad 0x4062800000000000 ; double 148
.quad 0x40621c28f5c28f5c ; double 144.88
.quad 0x4061b80000000000 ; double 141.75
.quad 0x4061dc28f5c28f5c ; double 142.88
.quad 0x4061bc28f5c28f5c ; double 141.88
.quad 0x40615428f5c28f5c ; double 138.63
.quad 0x40617c28f5c28f5c ; double 139.88
.quad 0x4061980000000000 ; double 140.75
.quad 0x4061900000000000 ; double 140.5
.quad 0x4061780000000000 ; double 139.75
.quad 0x4061700000000000 ; double 139.5
.quad 0x4061900000000000 ; double 140.5
.quad 0x4061880000000000 ; double 140.25
.quad 0x40616428f5c28f5c ; double 139.13
.quad 0x4061680000000000 ; double 139.25
.quad 0x40615428f5c28f5c ; double 138.63
.quad 0x40614c28f5c28f5c ; double 138.38
.quad 0x4061200000000000 ; double 137
.quad 0x4060f80000000000 ; double 135.75
.quad 0x40610c28f5c28f5c ; double 136.38
.quad 0x4060dc28f5c28f5c ; double 134.88
.quad 0x4060a00000000000 ; double 133
.quad 0x4060980000000000 ; double 132.75
.quad 0x4060a80000000000 ; double 133.25
.quad 0x4060c00000000000 ; double 134
.quad 0x40609428f5c28f5c ; double 132.63
.quad 0x40606428f5c28f5c ; double 131.13
.quad 0x4060580000000000 ; double 130.75
.quad 0x40606c28f5c28f5c ; double 131.38
.quad 0x40606c28f5c28f5c ; double 131.38
.quad 0x40602c28f5c28f5c ; double 129.38
.quad 0x40601c28f5c28f5c ; double 128.88
.quad 0x4060680000000000 ; double 131.25
.quad 0x40605c28f5c28f5c ; double 130.88
.quad 0x40604428f5c28f5c ; double 130.13
.quad 0x405ff7ae147ae148 ; double 127.87
.quad 0x4060200000000000 ; double 129
.quad 0x4060400000000000 ; double 130
.quad 0x4060300000000000 ; double 129.5
.quad 0x4060200000000000 ; double 129
.quad 0x40600428f5c28f5c ; double 128.13
.quad 0x405f900000000000 ; double 126.25
.quad 0x405f300000000000 ; double 124.75
.quad 0x405f57ae147ae148 ; double 125.37
.quad 0x405f300000000000 ; double 124.75
.quad 0x405f400000000000 ; double 125
.quad 0x405f17ae147ae148 ; double 124.37
.quad 0x405f500000000000 ; double 125.25
.quad 0x405fc00000000000 ; double 127
.quad 0x405fa7ae147ae148 ; double 126.62
.quad 0x405f100000000000 ; double 124.25
.quad 0x405ef7ae147ae148 ; double 123.87
.quad 0x405fc00000000000 ; double 127
.quad 0x4060000000000000 ; double 128
.quad 0x40600c28f5c28f5c ; double 128.38
.quad 0x4060000000000000 ; double 128
.quad 0x4060000000000000 ; double 128
.quad 0x4060000000000000 ; double 128
.quad 0x4060100000000000 ; double 128.5
.quad 0x4060100000000000 ; double 128.5
.quad 0x4060000000000000 ; double 128
.quad 0x4060100000000000 ; double 128.5
.quad 0x4060100000000000 ; double 128.5
.quad 0x4060300000000000 ; double 129.5
.quad 0x4060500000000000 ; double 130.5
.quad 0x4060380000000000 ; double 129.75
.quad 0x40601428f5c28f5c ; double 128.63
.quad 0x405fd00000000000 ; double 127.25
.quad 0x405fe00000000000 ; double 127.5
.quad 0x405fe7ae147ae148 ; double 127.62
.quad 0x4060000000000000 ; double 128
.quad 0x40601428f5c28f5c ; double 128.63
.quad 0x40601c28f5c28f5c ; double 128.88
.quad 0x4060080000000000 ; double 128.25
.quad 0x405fc7ae147ae148 ; double 127.12
.quad 0x40600c28f5c28f5c ; double 128.38
.quad 0x40600c28f5c28f5c ; double 128.38
.quad 0x405ff00000000000 ; double 127.75
.quad 0x405f97ae147ae148 ; double 126.37
.quad 0x405f97ae147ae148 ; double 126.37
.quad 0x405fd00000000000 ; double 127.25
.quad 0x40602428f5c28f5c ; double 129.13
.quad 0x405fe00000000000 ; double 127.5
.quad 0x405fd7ae147ae148 ; double 127.37
.quad 0x40601428f5c28f5c ; double 128.63
.quad 0x40601c28f5c28f5c ; double 128.88
.quad 0x4060380000000000 ; double 129.75
.quad 0x4060780000000000 ; double 131.75
.quad 0x4060700000000000 ; double 131.5
.quad 0x4060880000000000 ; double 132.25
.quad 0x4060980000000000 ; double 132.75
.quad 0x40607c28f5c28f5c ; double 131.88
.quad 0x4060580000000000 ; double 130.75
.quad 0x4060800000000000 ; double 132
.quad 0x4060900000000000 ; double 132.5
.quad 0x4060480000000000 ; double 130.25
.quad 0x40603c28f5c28f5c ; double 129.88
.quad 0x40607c28f5c28f5c ; double 131.88
.quad 0x40603c28f5c28f5c ; double 129.88
.quad 0x4060400000000000 ; double 130
.quad 0x4060400000000000 ; double 130
.quad 0x4060480000000000 ; double 130.25
.quad 0x4060180000000000 ; double 128.75
.quad 0x405fb00000000000 ; double 126.75
.quad 0x405f27ae147ae148 ; double 124.62
.quad 0x405f07ae147ae148 ; double 124.12
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405f200000000000 ; double 124.5
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405f27ae147ae148 ; double 124.62
.quad 0x405f57ae147ae148 ; double 125.37
.quad 0x405f400000000000 ; double 125
.quad 0x405f200000000000 ; double 124.5
.quad 0x405f100000000000 ; double 124.25
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405e47ae147ae148 ; double 121.12
.quad 0x405df00000000000 ; double 119.75
.quad 0x405df00000000000 ; double 119.75
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405e200000000000 ; double 120.5
.quad 0x405e47ae147ae148 ; double 121.12
.quad 0x405e100000000000 ; double 120.25
.quad 0x405f87ae147ae148 ; double 126.12
.quad 0x4060080000000000 ; double 128.25
.quad 0x405ff7ae147ae148 ; double 127.87
.quad 0x40603c28f5c28f5c ; double 129.88
.quad 0x4060380000000000 ; double 129.75
.quad 0x40605428f5c28f5c ; double 130.63
.quad 0x40604428f5c28f5c ; double 130.13
.quad 0x4060300000000000 ; double 129.5
.quad 0x4060400000000000 ; double 130
.quad 0x4060300000000000 ; double 129.5
.quad 0x4060480000000000 ; double 130.25
.quad 0x4060780000000000 ; double 131.75
.quad 0x40607428f5c28f5c ; double 131.63
.quad 0x40608c28f5c28f5c ; double 132.38
.quad 0x4060980000000000 ; double 132.75
.quad 0x4060a80000000000 ; double 133.25
.quad 0x4060a80000000000 ; double 133.25
.quad 0x4060680000000000 ; double 131.25
.quad 0x4060300000000000 ; double 129.5
.quad 0x4060280000000000 ; double 129.25
.quad 0x40605c28f5c28f5c ; double 130.88
.quad 0x40605428f5c28f5c ; double 130.63
.quad 0x40605c28f5c28f5c ; double 130.88
.quad 0x4060080000000000 ; double 128.25
.quad 0x405fd00000000000 ; double 127.25
.quad 0x405f900000000000 ; double 126.25
.quad 0x405f67ae147ae148 ; double 125.62
.quad 0x405f67ae147ae148 ; double 125.62
.quad 0x405fb7ae147ae148 ; double 126.87
.quad 0x405fb7ae147ae148 ; double 126.87
.quad 0x405fb00000000000 ; double 126.75
.quad 0x405fc7ae147ae148 ; double 127.12
.quad 0x4060380000000000 ; double 129.75
.quad 0x4060200000000000 ; double 129
.quad 0x40602428f5c28f5c ; double 129.13
.quad 0x4060280000000000 ; double 129.25
.quad 0x4060000000000000 ; double 128
.quad 0x40600428f5c28f5c ; double 128.13
.quad 0x4060400000000000 ; double 130
.quad 0x40604428f5c28f5c ; double 130.13
.quad 0x40604c28f5c28f5c ; double 130.38
.quad 0x4060380000000000 ; double 129.75
.quad 0x40600428f5c28f5c ; double 128.13
.quad 0x405fe7ae147ae148 ; double 127.62
.quad 0x405fa7ae147ae148 ; double 126.62
.quad 0x405f57ae147ae148 ; double 125.37
.quad 0x405fc00000000000 ; double 127
.quad 0x405fd00000000000 ; double 127.25
.quad 0x405fe7ae147ae148 ; double 127.62
.quad 0x4060300000000000 ; double 129.5
.quad 0x4060080000000000 ; double 128.25
.quad 0x405fc7ae147ae148 ; double 127.12
.quad 0x405fe7ae147ae148 ; double 127.62
.quad 0x405fa7ae147ae148 ; double 126.62
.quad 0x405f67ae147ae148 ; double 125.62
.quad 0x4060000000000000 ; double 128
.quad 0x4060180000000000 ; double 128.75
.quad 0x4060400000000000 ; double 130
.quad 0x40605428f5c28f5c ; double 130.63
.quad 0x40606428f5c28f5c ; double 131.13
.quad 0x40603c28f5c28f5c ; double 129.88
.quad 0x4060400000000000 ; double 130
.quad 0x40604c28f5c28f5c ; double 130.38
.quad 0x4060800000000000 ; double 132
.quad 0x4060900000000000 ; double 132.5
.quad 0x4060780000000000 ; double 131.75
.quad 0x4060600000000000 ; double 131
.quad 0x4060ac28f5c28f5c ; double 133.38
.quad 0x4060f00000000000 ; double 135.5
.quad 0x4060fc28f5c28f5c ; double 135.88
.quad 0x40610c28f5c28f5c ; double 136.38
.quad 0x4061180000000000 ; double 136.75
.quad 0x4060d00000000000 ; double 134.5
.quad 0x4060e00000000000 ; double 135
.quad 0x4060f00000000000 ; double 135.5
.quad 0x4060bc28f5c28f5c ; double 133.88
.quad 0x4060c80000000000 ; double 134.25
.quad 0x4060d00000000000 ; double 134.5
.quad 0x4060dc28f5c28f5c ; double 134.88
.quad 0x4060880000000000 ; double 132.25
.quad 0x4060980000000000 ; double 132.75
.quad 0x4060d428f5c28f5c ; double 134.63
.quad 0x4060d428f5c28f5c ; double 134.63
.quad 0x4060a00000000000 ; double 133
.quad 0x4061280000000000 ; double 137.25
.quad 0x40613c28f5c28f5c ; double 137.88
.quad 0x40613428f5c28f5c ; double 137.63
.quad 0x40611428f5c28f5c ; double 136.63
.quad 0x4061480000000000 ; double 138.25
.quad 0x4061480000000000 ; double 138.25
.quad 0x40611428f5c28f5c ; double 136.63
.quad 0x40611c28f5c28f5c ; double 136.88
.quad 0x40613428f5c28f5c ; double 137.63
.quad 0x4061200000000000 ; double 137
.quad 0x4060e80000000000 ; double 135.25
.quad 0x4060c428f5c28f5c ; double 134.13
.quad 0x4060d00000000000 ; double 134.5
.quad 0x40609428f5c28f5c ; double 132.63
.quad 0x40605428f5c28f5c ; double 130.63
.quad 0x4060100000000000 ; double 128.5
.quad 0x405f400000000000 ; double 125
.quad 0x405f27ae147ae148 ; double 124.62
.quad 0x405f600000000000 ; double 125.5
.quad 0x405f67ae147ae148 ; double 125.62
.quad 0x405f500000000000 ; double 125.25
.quad 0x405f000000000000 ; double 124
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405e300000000000 ; double 120.75
.quad 0x405e200000000000 ; double 120.5
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405e17ae147ae148 ; double 120.37
.quad 0x405e900000000000 ; double 122.25
.quad 0x405ea7ae147ae148 ; double 122.62
.quad 0x405ef7ae147ae148 ; double 123.87
.quad 0x405f000000000000 ; double 124
.quad 0x405f100000000000 ; double 124.25
.quad 0x405f100000000000 ; double 124.25
.quad 0x405f200000000000 ; double 124.5
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405ef7ae147ae148 ; double 123.87
.quad 0x405f200000000000 ; double 124.5
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405dc00000000000 ; double 119
.quad 0x405e100000000000 ; double 120.25
.quad 0x405de7ae147ae148 ; double 119.62
.quad 0x405dd7ae147ae148 ; double 119.37
.quad 0x405dc7ae147ae148 ; double 119.12
.quad 0x405d900000000000 ; double 118.25
.quad 0x405dc7ae147ae148 ; double 119.12
.quad 0x405de00000000000 ; double 119.5
.quad 0x405e100000000000 ; double 120.25
.quad 0x405e400000000000 ; double 121
.quad 0x405e57ae147ae148 ; double 121.37
.quad 0x405e900000000000 ; double 122.25
.quad 0x405ec7ae147ae148 ; double 123.12
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405f47ae147ae148 ; double 125.12
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405ea7ae147ae148 ; double 122.62
.quad 0x405e57ae147ae148 ; double 121.37
.quad 0x405e57ae147ae148 ; double 121.37
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405ec7ae147ae148 ; double 123.12
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405f100000000000 ; double 124.25
.quad 0x405f000000000000 ; double 124
.quad 0x405f600000000000 ; double 125.5
.quad 0x405f700000000000 ; double 125.75
.quad 0x405fc7ae147ae148 ; double 127.12
.quad 0x405ff00000000000 ; double 127.75
.quad 0x405fb7ae147ae148 ; double 126.87
.quad 0x405fa00000000000 ; double 126.5
.quad 0x405fb7ae147ae148 ; double 126.87
.quad 0x405fb00000000000 ; double 126.75
.quad 0x405fc7ae147ae148 ; double 127.12
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405f47ae147ae148 ; double 125.12
.quad 0x405fb00000000000 ; double 126.75
.quad 0x405fd7ae147ae148 ; double 127.37
.quad 0x405fd7ae147ae148 ; double 127.37
.quad 0x405fa00000000000 ; double 126.5
.quad 0x405ff00000000000 ; double 127.75
.quad 0x405f900000000000 ; double 126.25
.quad 0x405ec00000000000 ; double 123
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405e900000000000 ; double 122.25
.quad 0x405e87ae147ae148 ; double 122.12
.quad 0x405e700000000000 ; double 121.75
.quad 0x405e900000000000 ; double 122.25
.quad 0x405e400000000000 ; double 121
.quad 0x405e97ae147ae148 ; double 122.37
.quad 0x405e900000000000 ; double 122.25
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405f87ae147ae148 ; double 126.12
.quad 0x405f800000000000 ; double 126
.quad 0x405f800000000000 ; double 126
.quad 0x405f300000000000 ; double 124.75
.quad 0x405f47ae147ae148 ; double 125.12
.quad 0x405fe00000000000 ; double 127.5
.quad 0x405f900000000000 ; double 126.25
.quad 0x40600c28f5c28f5c ; double 128.38
.quad 0x405ff7ae147ae148 ; double 127.87
.quad 0x4060100000000000 ; double 128.5
.quad 0x405ff00000000000 ; double 127.75
.quad 0x405f87ae147ae148 ; double 126.12
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405f200000000000 ; double 124.5
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405ef7ae147ae148 ; double 123.87
.quad 0x405f100000000000 ; double 124.25
.quad 0x405ea7ae147ae148 ; double 122.62
.quad 0x405ec7ae147ae148 ; double 123.12
.quad 0x405f07ae147ae148 ; double 124.12
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405f800000000000 ; double 126
.quad 0x405f700000000000 ; double 125.75
.quad 0x405f300000000000 ; double 124.75
.quad 0x405f67ae147ae148 ; double 125.62
.quad 0x405f700000000000 ; double 125.75
.quad 0x405f97ae147ae148 ; double 126.37
.quad 0x405fb00000000000 ; double 126.75
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405ef7ae147ae148 ; double 123.87
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405e400000000000 ; double 121
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405f000000000000 ; double 124
.quad 0x405e500000000000 ; double 121.25
.quad 0x405cc7ae147ae148 ; double 115.12
.quad 0x405c500000000000 ; double 113.25
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405b57ae147ae148 ; double 109.37
.quad 0x405b67ae147ae148 ; double 109.62
.quad 0x405b300000000000 ; double 108.75
.quad 0x405af00000000000 ; double 107.75
.quad 0x405ac00000000000 ; double 107
.quad 0x405a97ae147ae148 ; double 106.37
.quad 0x405ad00000000000 ; double 107.25
.quad 0x405ad7ae147ae148 ; double 107.37
.quad 0x405b07ae147ae148 ; double 108.12
.quad 0x405b17ae147ae148 ; double 108.37
.quad 0x405ad00000000000 ; double 107.25
.quad 0x405a700000000000 ; double 105.75
.quad 0x405a67ae147ae148 ; double 105.62
.quad 0x405ac7ae147ae148 ; double 107.12
.quad 0x405ad7ae147ae148 ; double 107.37
.quad 0x405ae00000000000 ; double 107.5
.quad 0x405a800000000000 ; double 106
.quad 0x405ae00000000000 ; double 107.5
.quad 0x405ad7ae147ae148 ; double 107.37
.quad 0x405a700000000000 ; double 105.75
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405a67ae147ae148 ; double 105.62
.quad 0x405a200000000000 ; double 104.5
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a900000000000 ; double 106.25
.quad 0x405aa00000000000 ; double 106.5
.quad 0x405b000000000000 ; double 108
.quad 0x4059a00000000000 ; double 102.5
.quad 0x4059f00000000000 ; double 103.75
.quad 0x405977ae147ae148 ; double 101.87
.quad 0x405947ae147ae148 ; double 101.12
.quad 0x405a07ae147ae148 ; double 104.12
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a37ae147ae148 ; double 104.87
.quad 0x405a600000000000 ; double 105.5
.quad 0x405a800000000000 ; double 106
.quad 0x405a900000000000 ; double 106.25
.quad 0x405a800000000000 ; double 106
.quad 0x405ae7ae147ae148 ; double 107.62
.quad 0x405b500000000000 ; double 109.25
.quad 0x405b27ae147ae148 ; double 108.62
.quad 0x405b300000000000 ; double 108.75
.quad 0x405b200000000000 ; double 108.5
.quad 0x405ae7ae147ae148 ; double 107.62
.quad 0x405af7ae147ae148 ; double 107.87
.quad 0x405b07ae147ae148 ; double 108.12
.quad 0x405b400000000000 ; double 109
.quad 0x405b200000000000 ; double 108.5
.quad 0x405b700000000000 ; double 109.75
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405bf00000000000 ; double 111.75
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405c500000000000 ; double 113.25
.quad 0x405c100000000000 ; double 112.25
.quad 0x405c07ae147ae148 ; double 112.12
.quad 0x405c57ae147ae148 ; double 113.37
.quad 0x405cb7ae147ae148 ; double 114.87
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c57ae147ae148 ; double 113.37
.quad 0x405cb7ae147ae148 ; double 114.87
.quad 0x405d17ae147ae148 ; double 116.37
.quad 0x405d500000000000 ; double 117.25
.quad 0x405d200000000000 ; double 116.5
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c100000000000 ; double 112.25
.quad 0x405be7ae147ae148 ; double 111.62
.quad 0x405bd7ae147ae148 ; double 111.37
.quad 0x405bc7ae147ae148 ; double 111.12
.quad 0x405c100000000000 ; double 112.25
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405c600000000000 ; double 113.5
.quad 0x405c300000000000 ; double 112.75
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405bb00000000000 ; double 110.75
.quad 0x405b800000000000 ; double 110
.quad 0x405b700000000000 ; double 109.75
.quad 0x405bd7ae147ae148 ; double 111.37
.quad 0x405be00000000000 ; double 111.5
.quad 0x405c200000000000 ; double 112.5
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405cf00000000000 ; double 115.75
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405c500000000000 ; double 113.25
.quad 0x405c400000000000 ; double 113
.quad 0x405c800000000000 ; double 114
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405cd7ae147ae148 ; double 115.37
.quad 0x405c800000000000 ; double 114
.quad 0x405c900000000000 ; double 114.25
.quad 0x405c200000000000 ; double 112.5
.quad 0x405be00000000000 ; double 111.5
.quad 0x405c000000000000 ; double 112
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405b27ae147ae148 ; double 108.62
.quad 0x405b500000000000 ; double 109.25
.quad 0x405b100000000000 ; double 108.25
.quad 0x405be7ae147ae148 ; double 111.62
.quad 0x405be00000000000 ; double 111.5
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405be7ae147ae148 ; double 111.62
.quad 0x405bf00000000000 ; double 111.75
.quad 0x405c200000000000 ; double 112.5
.quad 0x405c600000000000 ; double 113.5
.quad 0x405be00000000000 ; double 111.5
.quad 0x405b500000000000 ; double 109.25
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405b77ae147ae148 ; double 109.87
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405b97ae147ae148 ; double 110.37
.quad 0x405bd7ae147ae148 ; double 111.37
.quad 0x405bc00000000000 ; double 111
.quad 0x405b900000000000 ; double 110.25
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405b87ae147ae148 ; double 110.12
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405bb00000000000 ; double 110.75
.quad 0x405bb7ae147ae148 ; double 110.87
.quad 0x405c97ae147ae148 ; double 114.37
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405cc00000000000 ; double 115
.quad 0x405cc00000000000 ; double 115
.quad 0x405cf7ae147ae148 ; double 115.87
.quad 0x405d700000000000 ; double 117.75
.quad 0x405d500000000000 ; double 117.25
.quad 0x405d47ae147ae148 ; double 117.12
.quad 0x405dc7ae147ae148 ; double 119.12
.quad 0x405e300000000000 ; double 120.75
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405e400000000000 ; double 121
.quad 0x405e200000000000 ; double 120.5
.quad 0x405e900000000000 ; double 122.25
.quad 0x405ec00000000000 ; double 123
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405ef7ae147ae148 ; double 123.87
.quad 0x405f000000000000 ; double 124
.quad 0x405f200000000000 ; double 124.5
.quad 0x405f400000000000 ; double 125
.quad 0x405f000000000000 ; double 124
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405f27ae147ae148 ; double 124.62
.quad 0x405f300000000000 ; double 124.75
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405f07ae147ae148 ; double 124.12
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405ea7ae147ae148 ; double 122.62
.quad 0x405e97ae147ae148 ; double 122.37
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405e67ae147ae148 ; double 121.62
.quad 0x405ec7ae147ae148 ; double 123.12
.quad 0x405f17ae147ae148 ; double 124.37
.quad 0x405f200000000000 ; double 124.5
.quad 0x405e97ae147ae148 ; double 122.37
.quad 0x405df7ae147ae148 ; double 119.87
.quad 0x405dd7ae147ae148 ; double 119.37
.quad 0x405dd00000000000 ; double 119.25
.quad 0x405dc7ae147ae148 ; double 119.12
.quad 0x405dd00000000000 ; double 119.25
.quad 0x405de00000000000 ; double 119.5
.quad 0x405e900000000000 ; double 122.25
.quad 0x405e700000000000 ; double 121.75
.quad 0x405e400000000000 ; double 121
.quad 0x405e700000000000 ; double 121.75
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405f67ae147ae148 ; double 125.62
.quad 0x405f600000000000 ; double 125.5
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405f200000000000 ; double 124.5
.quad 0x405f77ae147ae148 ; double 125.87
.quad 0x405fe7ae147ae148 ; double 127.62
.quad 0x405ff00000000000 ; double 127.75
.quad 0x405fd00000000000 ; double 127.25
.quad 0x405f700000000000 ; double 125.75
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405eb7ae147ae148 ; double 122.87
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405fa00000000000 ; double 126.5
.quad 0x405fc00000000000 ; double 127
.quad 0x405fd00000000000 ; double 127.25
.quad 0x4060200000000000 ; double 129
.quad 0x4060400000000000 ; double 130
.quad 0x40602c28f5c28f5c ; double 129.38
.quad 0x40601c28f5c28f5c ; double 128.88
.quad 0x4060380000000000 ; double 129.75
.quad 0x40601428f5c28f5c ; double 128.63
.quad 0x4060380000000000 ; double 129.75
.quad 0x4060500000000000 ; double 130.5
.quad 0x4060580000000000 ; double 130.75
.quad 0x40608428f5c28f5c ; double 132.13
.quad 0x40609428f5c28f5c ; double 132.63
.quad 0x4060b80000000000 ; double 133.75
.quad 0x4060bc28f5c28f5c ; double 133.88
.quad 0x4060ac28f5c28f5c ; double 133.38
.quad 0x4060c00000000000 ; double 134
.quad 0x4060c80000000000 ; double 134.25
.quad 0x4060900000000000 ; double 132.5
.quad 0x4060880000000000 ; double 132.25
.quad 0x4060680000000000 ; double 131.25
.quad 0x40604428f5c28f5c ; double 130.13
.quad 0x4060100000000000 ; double 128.5
.quad 0x40600428f5c28f5c ; double 128.13
.quad 0x4060300000000000 ; double 129.5
.quad 0x40602428f5c28f5c ; double 129.13
.quad 0x40600c28f5c28f5c ; double 128.38
.quad 0x4060300000000000 ; double 129.5
.quad 0x405fa7ae147ae148 ; double 126.62
.quad 0x405f300000000000 ; double 124.75
.quad 0x405f07ae147ae148 ; double 124.12
.quad 0x405f200000000000 ; double 124.5
.quad 0x405f37ae147ae148 ; double 124.87
.quad 0x405f100000000000 ; double 124.25
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405e800000000000 ; double 122
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405eb7ae147ae148 ; double 122.87
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405eb7ae147ae148 ; double 122.87
.quad 0x405e77ae147ae148 ; double 121.87
.quad 0x405df00000000000 ; double 119.75
.quad 0x405de7ae147ae148 ; double 119.62
.quad 0x405de00000000000 ; double 119.5
.quad 0x405dd00000000000 ; double 119.25
.quad 0x405dd00000000000 ; double 119.25
.quad 0x405d900000000000 ; double 118.25
.quad 0x405d37ae147ae148 ; double 116.87
.quad 0x405dc00000000000 ; double 119
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405ed00000000000 ; double 123.25
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405e800000000000 ; double 122
.quad 0x405e600000000000 ; double 121.5
.quad 0x405db7ae147ae148 ; double 118.87
.quad 0x405dc7ae147ae148 ; double 119.12
.quad 0x405dc00000000000 ; double 119
.quad 0x405de00000000000 ; double 119.5
.quad 0x405df7ae147ae148 ; double 119.87
.quad 0x405e07ae147ae148 ; double 120.12
.quad 0x405df7ae147ae148 ; double 119.87
.quad 0x405e57ae147ae148 ; double 121.37
.quad 0x405e600000000000 ; double 121.5
.quad 0x405e57ae147ae148 ; double 121.37
.quad 0x405e800000000000 ; double 122
.quad 0x405f07ae147ae148 ; double 124.12
.quad 0x405fa00000000000 ; double 126.5
.quad 0x405fc00000000000 ; double 127
.quad 0x405f900000000000 ; double 126.25
.quad 0x405f600000000000 ; double 125.5
.quad 0x405f900000000000 ; double 126.25
.quad 0x405f57ae147ae148 ; double 125.37
.quad 0x405e600000000000 ; double 121.5
.quad 0x405e200000000000 ; double 120.5
.quad 0x405e700000000000 ; double 121.75
.quad 0x405f000000000000 ; double 124
.quad 0x405e700000000000 ; double 121.75
.quad 0x405ee00000000000 ; double 123.5
.quad 0x405ed7ae147ae148 ; double 123.37
.quad 0x405e600000000000 ; double 121.5
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405e700000000000 ; double 121.75
.quad 0x405de7ae147ae148 ; double 119.62
.quad 0x405e500000000000 ; double 121.25
.quad 0x405e400000000000 ; double 121
.quad 0x405e37ae147ae148 ; double 120.87
.quad 0x405e57ae147ae148 ; double 121.37
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405ef7ae147ae148 ; double 123.87
.quad 0x405ef00000000000 ; double 123.75
.quad 0x405f200000000000 ; double 124.5
.quad 0x405ee7ae147ae148 ; double 123.62
.quad 0x405eb00000000000 ; double 122.75
.quad 0x405ea00000000000 ; double 122.5
.quad 0x405eb7ae147ae148 ; double 122.87
.quad 0x405e400000000000 ; double 121
.quad 0x405df00000000000 ; double 119.75
.quad 0x405d500000000000 ; double 117.25
.quad 0x405ca7ae147ae148 ; double 114.62
.quad 0x405ca00000000000 ; double 114.5
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405d100000000000 ; double 116.25
.quad 0x405d07ae147ae148 ; double 116.12
.quad 0x405cb7ae147ae148 ; double 114.87
.quad 0x405c77ae147ae148 ; double 113.87
.quad 0x405bf7ae147ae148 ; double 111.87
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405cc00000000000 ; double 115
.quad 0x405cf7ae147ae148 ; double 115.87
.quad 0x405ce7ae147ae148 ; double 115.62
.quad 0x405cc00000000000 ; double 115
.quad 0x405c37ae147ae148 ; double 112.87
.quad 0x405bf00000000000 ; double 111.75
.quad 0x405c200000000000 ; double 112.5
.quad 0x405cb00000000000 ; double 114.75
.quad 0x405cc00000000000 ; double 115
.quad 0x405cd00000000000 ; double 115.25
.quad 0x405d27ae147ae148 ; double 116.62
.quad 0x405d000000000000 ; double 116
.quad 0x405d400000000000 ; double 117
.quad 0x405d67ae147ae148 ; double 117.62
.quad 0x405d900000000000 ; double 118.25
.quad 0x405da00000000000 ; double 118.5
.quad 0x405cf7ae147ae148 ; double 115.87
.quad 0x405d37ae147ae148 ; double 116.87
.quad 0x405cf7ae147ae148 ; double 115.87
.quad 0x405d47ae147ae148 ; double 117.12
.quad 0x405d67ae147ae148 ; double 117.62
.quad 0x405d600000000000 ; double 117.5
.quad 0x405d800000000000 ; double 118
.quad 0x405d500000000000 ; double 117.25
.quad 0x405d47ae147ae148 ; double 117.12
.quad 0x405d77ae147ae148 ; double 117.87
.quad 0x405ce00000000000 ; double 115.5
.quad 0x405cd00000000000 ; double 115.25
.quad 0x405c07ae147ae148 ; double 112.12
.quad 0x405c27ae147ae148 ; double 112.62
.quad 0x405ba00000000000 ; double 110.5
.quad 0x405b800000000000 ; double 110
.quad 0x405b400000000000 ; double 109
.quad 0x405ad7ae147ae148 ; double 107.37
.quad 0x405ab00000000000 ; double 106.75
.quad 0x405a000000000000 ; double 104
.quad 0x4059d7ae147ae148 ; double 103.37
.quad 0x4059b00000000000 ; double 102.75
.quad 0x4059d00000000000 ; double 103.25
.quad 0x4059a00000000000 ; double 102.5
.quad 0x405a37ae147ae148 ; double 104.87
.quad 0x405a200000000000 ; double 104.5
.quad 0x405a000000000000 ; double 104
.quad 0x4059b7ae147ae148 ; double 102.87
.quad 0x4059d00000000000 ; double 103.25
.quad 0x4059c7ae147ae148 ; double 103.12
.quad 0x4059b7ae147ae148 ; double 102.87
.quad 0x405947ae147ae148 ; double 101.12
.quad 0x405947ae147ae148 ; double 101.12
.quad 0x4059000000000000 ; double 100
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4059300000000000 ; double 100.75
.quad 0x4059400000000000 ; double 101
.quad 0x405947ae147ae148 ; double 101.12
.quad 0x4059400000000000 ; double 101
.quad 0x4059a7ae147ae148 ; double 102.62
.quad 0x405997ae147ae148 ; double 102.37
.quad 0x4059d00000000000 ; double 103.25
.quad 0x4059d7ae147ae148 ; double 103.37
.quad 0x4059900000000000 ; double 102.25
.quad 0x4059c00000000000 ; double 103
.quad 0x4059c00000000000 ; double 103
.quad 0x405977ae147ae148 ; double 101.87
.quad 0x4059200000000000 ; double 100.5
.quad 0x4059500000000000 ; double 101.25
.quad 0x4059600000000000 ; double 101.5
.quad 0x4058b00000000000 ; double 98.75
.quad 0x4058e00000000000 ; double 99.5
.quad 0x4058e00000000000 ; double 99.5
.quad 0x4058c00000000000 ; double 99
.quad 0x4059100000000000 ; double 100.25
.quad 0x4059200000000000 ; double 100.5
.quad 0x4058e00000000000 ; double 99.5
.quad 0x4058700000000000 ; double 97.75
.quad 0x405837ae147ae148 ; double 96.870000000000005
.quad 0x4057e00000000000 ; double 95.5
.quad 0x405857ae147ae148 ; double 97.370000000000005
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x4058600000000000 ; double 97.5
.quad 0x4058000000000000 ; double 96
.quad 0x4058500000000000 ; double 97.25
.quad 0x4058d00000000000 ; double 99.25
.quad 0x4058b7ae147ae148 ; double 98.870000000000005
.quad 0x4058a00000000000 ; double 98.5
.quad 0x4058800000000000 ; double 98
.quad 0x405807ae147ae148 ; double 96.120000000000005
.quad 0x4058100000000000 ; double 96.25
.quad 0x405787ae147ae148 ; double 94.120000000000005
.quad 0x4058600000000000 ; double 97.5
.quad 0x4058b00000000000 ; double 98.75
.quad 0x4059100000000000 ; double 100.25
.quad 0x405917ae147ae148 ; double 100.37
.quad 0x4059300000000000 ; double 100.75
.quad 0x4058f7ae147ae148 ; double 99.870000000000005
.quad 0x4058d00000000000 ; double 99.25
.quad 0x405907ae147ae148 ; double 100.12
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x4058d00000000000 ; double 99.25
.quad 0x405897ae147ae148 ; double 98.370000000000005
.quad 0x4058a00000000000 ; double 98.5
.quad 0x4058300000000000 ; double 96.75
.quad 0x4057e7ae147ae148 ; double 95.620000000000005
.quad 0x4058400000000000 ; double 97
.quad 0x405847ae147ae148 ; double 97.120000000000005
.quad 0x4058800000000000 ; double 98
.quad 0x4058500000000000 ; double 97.25
.quad 0x405817ae147ae148 ; double 96.370000000000005
.quad 0x405837ae147ae148 ; double 96.870000000000005
.quad 0x4057a00000000000 ; double 94.5
.quad 0x405817ae147ae148 ; double 96.370000000000005
.quad 0x4058000000000000 ; double 96
.quad 0x4057a7ae147ae148 ; double 94.620000000000005
.quad 0x4057a7ae147ae148 ; double 94.620000000000005
.quad 0x4056d00000000000 ; double 91.25
.quad 0x4056e00000000000 ; double 91.5
.quad 0x4058100000000000 ; double 96.25
.quad 0x4057b00000000000 ; double 94.75
.quad 0x4057c7ae147ae148 ; double 95.120000000000005
.quad 0x4058000000000000 ; double 96
.quad 0x4058600000000000 ; double 97.5
.quad 0x405817ae147ae148 ; double 96.370000000000005
.quad 0x4057700000000000 ; double 93.75
.quad 0x405657ae147ae148 ; double 89.370000000000005
.quad 0x4056500000000000 ; double 89.25
.quad 0x4056100000000000 ; double 88.25
.quad 0x4055a00000000000 ; double 86.5
.quad 0x4054f00000000000 ; double 83.75
.quad 0x4054e7ae147ae148 ; double 83.620000000000005
.quad 0x4054b00000000000 ; double 82.75
.quad 0x405487ae147ae148 ; double 82.120000000000005
.quad 0x4054c00000000000 ; double 83
.quad 0x405517ae147ae148 ; double 84.370000000000005
.quad 0x4055200000000000 ; double 84.5
.quad 0x4055200000000000 ; double 84.5
.quad 0x4054c7ae147ae148 ; double 83.120000000000005
.quad 0x4054b00000000000 ; double 82.75
.quad 0x405557ae147ae148 ; double 85.370000000000005
.quad 0x405557ae147ae148 ; double 85.370000000000005
.quad 0x4055a00000000000 ; double 86.5
.quad 0x405577ae147ae148 ; double 85.870000000000005
.quad 0x4054f00000000000 ; double 83.75
.quad 0x4055400000000000 ; double 85
.quad 0x405557ae147ae148 ; double 85.370000000000005
.quad 0x4055600000000000 ; double 85.5
.quad 0x4054c00000000000 ; double 83
.quad 0x4054600000000000 ; double 81.5
.quad 0x4054200000000000 ; double 80.5
.quad 0x4054600000000000 ; double 81.5
.quad 0x405497ae147ae148 ; double 82.370000000000005
.quad 0x405487ae147ae148 ; double 82.120000000000005
.quad 0x405477ae147ae148 ; double 81.870000000000005
.quad 0x4055300000000000 ; double 84.75
.quad 0x4055300000000000 ; double 84.75
.quad 0x4055200000000000 ; double 84.5
.quad 0x4054e00000000000 ; double 83.5
.quad 0x4054c00000000000 ; double 83
.quad 0x405417ae147ae148 ; double 80.370000000000005
.quad 0x405477ae147ae148 ; double 81.870000000000005
.quad 0x405507ae147ae148 ; double 84.120000000000005
.quad 0x405517ae147ae148 ; double 84.370000000000005
.quad 0x4055200000000000 ; double 84.5
.quad 0x405447ae147ae148 ; double 81.120000000000005
.quad 0x405417ae147ae148 ; double 80.370000000000005
.quad 0x4053c00000000000 ; double 79
.quad 0x405307ae147ae148 ; double 76.120000000000005
.quad 0x4052a7ae147ae148 ; double 74.620000000000005
.quad 0x4052a7ae147ae148 ; double 74.620000000000005
.quad 0x4052b7ae147ae148 ; double 74.870000000000005
.quad 0x405317ae147ae148 ; double 76.370000000000005
.quad 0x4053500000000000 ; double 77.25
.quad 0x4053400000000000 ; double 77
.quad 0x4053200000000000 ; double 76.5
.quad 0x405327ae147ae148 ; double 76.620000000000005
.quad 0x405367ae147ae148 ; double 77.620000000000005
.quad 0x4053300000000000 ; double 76.75
.quad 0x4052b00000000000 ; double 74.75
.quad 0x4052b00000000000 ; double 74.75
.quad 0x4052c00000000000 ; double 75
.quad 0x405267ae147ae148 ; double 73.620000000000005
.quad 0x4052800000000000 ; double 74
.quad 0x4052400000000000 ; double 73
.quad 0x405207ae147ae148 ; double 72.120000000000005
.quad 0x4052500000000000 ; double 73.25
.quad 0x405267ae147ae148 ; double 73.620000000000005
.quad 0x4052300000000000 ; double 72.75
.quad 0x4052800000000000 ; double 74
.quad 0x4052000000000000 ; double 72
.quad 0x4051c00000000000 ; double 71
.quad 0x4051c00000000000 ; double 71
.quad 0x4051800000000000 ; double 70
.quad 0x4051500000000000 ; double 69.25
.quad 0x4051a00000000000 ; double 70.5
.quad 0x405187ae147ae148 ; double 70.120000000000005
.quad 0x4051500000000000 ; double 69.25
.quad 0x4051600000000000 ; double 69.5
.quad 0x405127ae147ae148 ; double 68.620000000000005
.quad 0x4050b00000000000 ; double 66.75
.quad 0x4050e7ae147ae148 ; double 67.620000000000005
.quad 0x405087ae147ae148 ; double 66.120000000000005
.quad 0x404fd0a3d70a3d71 ; double 63.630000000000003
.quad 0x404f800000000000 ; double 63
.quad 0x404f70a3d70a3d71 ; double 62.880000000000003
.quad 0x404f90a3d70a3d71 ; double 63.130000000000003
.quad 0x404fe00000000000 ; double 63.75
.quad 0x404fa00000000000 ; double 63.25
.quad 0x4050100000000000 ; double 64.25
.quad 0x4050300000000000 ; double 64.75
.quad 0x405087ae147ae148 ; double 66.120000000000005
.quad 0x4050d00000000000 ; double 67.25
.quad 0x4050c7ae147ae148 ; double 67.120000000000005
.quad 0x4050900000000000 ; double 66.25
.quad 0x405067ae147ae148 ; double 65.620000000000005
.quad 0x4050800000000000 ; double 66
.quad 0x4050900000000000 ; double 66.25
.quad 0x4050a7ae147ae148 ; double 66.620000000000005
.quad 0x4050c00000000000 ; double 67
.quad 0x4050d7ae147ae148 ; double 67.370000000000005
.quad 0x405127ae147ae148 ; double 68.620000000000005
.quad 0x4050e7ae147ae148 ; double 67.620000000000005
.quad 0x4050a7ae147ae148 ; double 66.620000000000005
.quad 0x4050f00000000000 ; double 67.75
.quad 0x4050e00000000000 ; double 67.5
.quad 0x4050b7ae147ae148 ; double 66.870000000000005
.quad 0x4050400000000000 ; double 65
.quad 0x4050b7ae147ae148 ; double 66.870000000000005
.quad 0x404f400000000000 ; double 62.5
.quad 0x404ed0a3d70a3d71 ; double 61.630000000000003
.quad 0x404ea00000000000 ; double 61.25
.quad 0x404e600000000000 ; double 60.75
.quad 0x404e50a3d70a3d71 ; double 60.630000000000003
.quad 0x404e70a3d70a3d71 ; double 60.880000000000003
.quad 0x404ec00000000000 ; double 61.5
.quad 0x404eb0a3d70a3d71 ; double 61.380000000000003
.quad 0x404ec00000000000 ; double 61.5
.quad 0x404e50a3d70a3d71 ; double 60.630000000000003
.quad 0x404ea00000000000 ; double 61.25
.quad 0x404ec00000000000 ; double 61.5
.quad 0x404df0a3d70a3d71 ; double 59.880000000000003
.quad 0x404dd0a3d70a3d71 ; double 59.630000000000003
.quad 0x404d50a3d70a3d71 ; double 58.630000000000003
.quad 0x404da00000000000 ; double 59.25
.quad 0x404e000000000000 ; double 60
.quad 0x404de00000000000 ; double 59.75
.quad 0x404dc00000000000 ; double 59.5
.quad 0x404e10a3d70a3d71 ; double 60.130000000000003
.quad 0x404d800000000000 ; double 59
.quad 0x404d50a3d70a3d71 ; double 58.630000000000003
.quad 0x404df0a3d70a3d71 ; double 59.880000000000003
.quad 0x404e50a3d70a3d71 ; double 60.630000000000003
.quad 0x404ed0a3d70a3d71 ; double 61.630000000000003
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404ef0a3d70a3d71 ; double 61.880000000000003
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f50a3d70a3d71 ; double 62.630000000000003
.quad 0x404f50a3d70a3d71 ; double 62.630000000000003
.quad 0x404f800000000000 ; double 63
.quad 0x404f800000000000 ; double 63
.quad 0x404fa00000000000 ; double 63.25
.quad 0x404fa00000000000 ; double 63.25
.quad 0x404fe00000000000 ; double 63.75
.quad 0x4050000000000000 ; double 64
.quad 0x4050100000000000 ; double 64.25
.quad 0x405027ae147ae148 ; double 64.620000000000005
.quad 0x4050300000000000 ; double 64.75
.quad 0x405037ae147ae148 ; double 64.870000000000005
.quad 0x405047ae147ae148 ; double 65.120000000000005
.quad 0x405047ae147ae148 ; double 65.120000000000005
.quad 0x4050500000000000 ; double 65.25
.quad 0x4050500000000000 ; double 65.25
.quad 0x4050300000000000 ; double 64.75
.quad 0x4050300000000000 ; double 64.75
.quad 0x405027ae147ae148 ; double 64.620000000000005
.quad 0x4050500000000000 ; double 65.25
.quad 0x4050700000000000 ; double 65.75
.quad 0x405087ae147ae148 ; double 66.120000000000005
.quad 0x4050700000000000 ; double 65.75
.quad 0x4050300000000000 ; double 64.75
.quad 0x404ff0a3d70a3d71 ; double 63.880000000000003
.quad 0x404ff0a3d70a3d71 ; double 63.880000000000003
.quad 0x405047ae147ae148 ; double 65.120000000000005
.quad 0x405037ae147ae148 ; double 64.870000000000005
.quad 0x405027ae147ae148 ; double 64.620000000000005
.quad 0x4050000000000000 ; double 64
.quad 0x404f600000000000 ; double 62.75
.quad 0x404f400000000000 ; double 62.5
.quad 0x404f400000000000 ; double 62.5
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404ef0a3d70a3d71 ; double 61.880000000000003
.quad 0x404ee00000000000 ; double 61.75
.quad 0x404ef0a3d70a3d71 ; double 61.880000000000003
.quad 0x404e90a3d70a3d71 ; double 61.130000000000003
.quad 0x404e50a3d70a3d71 ; double 60.630000000000003
.quad 0x404e400000000000 ; double 60.5
.quad 0x404e200000000000 ; double 60.25
.quad 0x404de00000000000 ; double 59.75
.quad 0x404e10a3d70a3d71 ; double 60.130000000000003
.quad 0x404db0a3d70a3d71 ; double 59.380000000000003
.quad 0x404dd0a3d70a3d71 ; double 59.630000000000003
.quad 0x404dc00000000000 ; double 59.5
.quad 0x404d000000000000 ; double 58
.quad 0x404d30a3d70a3d71 ; double 58.380000000000003
.quad 0x404d10a3d70a3d71 ; double 58.130000000000003
.quad 0x404d70a3d70a3d71 ; double 58.880000000000003
.quad 0x404d70a3d70a3d71 ; double 58.880000000000003
.quad 0x404d10a3d70a3d71 ; double 58.130000000000003
.quad 0x404db0a3d70a3d71 ; double 59.380000000000003
.quad 0x404dd0a3d70a3d71 ; double 59.630000000000003
.quad 0x404dc00000000000 ; double 59.5
.quad 0x404da00000000000 ; double 59.25
.quad 0x404db0a3d70a3d71 ; double 59.380000000000003
.quad 0x404e000000000000 ; double 60
.quad 0x404e70a3d70a3d71 ; double 60.880000000000003
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f600000000000 ; double 62.75
.quad 0x404ef0a3d70a3d71 ; double 61.880000000000003
.quad 0x404f400000000000 ; double 62.5
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404eb0a3d70a3d71 ; double 61.380000000000003
.quad 0x404f70a3d70a3d71 ; double 62.880000000000003
.quad 0x404f800000000000 ; double 63
.quad 0x404f800000000000 ; double 63
.quad 0x404fa00000000000 ; double 63.25
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404ee00000000000 ; double 61.75
.quad 0x404f000000000000 ; double 62
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404ee00000000000 ; double 61.75
.quad 0x404f800000000000 ; double 63
.quad 0x4050000000000000 ; double 64
.quad 0x404fc00000000000 ; double 63.5
.quad 0x404fa00000000000 ; double 63.25
.quad 0x404fc00000000000 ; double 63.5
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x4050300000000000 ; double 64.75
.quad 0x4050200000000000 ; double 64.5
.quad 0x404f600000000000 ; double 62.75
.quad 0x404f400000000000 ; double 62.5
.quad 0x404f200000000000 ; double 62.25
.quad 0x404f50a3d70a3d71 ; double 62.630000000000003
.quad 0x404f800000000000 ; double 63
.quad 0x404f000000000000 ; double 62
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404e10a3d70a3d71 ; double 60.130000000000003
.quad 0x404d70a3d70a3d71 ; double 58.880000000000003
.quad 0x404d200000000000 ; double 58.25
.quad 0x404d30a3d70a3d71 ; double 58.380000000000003
.quad 0x404d600000000000 ; double 58.75
.quad 0x404c600000000000 ; double 56.75
.quad 0x404ca00000000000 ; double 57.25
.quad 0x404c800000000000 ; double 57
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404d30a3d70a3d71 ; double 58.380000000000003
.quad 0x404cb0a3d70a3d71 ; double 57.380000000000003
.quad 0x404cd0a3d70a3d71 ; double 57.630000000000003
.quad 0x404c800000000000 ; double 57
.quad 0x404cb0a3d70a3d71 ; double 57.380000000000003
.quad 0x404c800000000000 ; double 57
.quad 0x404c70a3d70a3d71 ; double 56.880000000000003
.quad 0x404ca00000000000 ; double 57.25
.quad 0x404c70a3d70a3d71 ; double 56.880000000000003
.quad 0x404c800000000000 ; double 57
.quad 0x404be00000000000 ; double 55.75
.quad 0x404b400000000000 ; double 54.5
.quad 0x404af0a3d70a3d71 ; double 53.880000000000003
.quad 0x404af0a3d70a3d71 ; double 53.880000000000003
.quad 0x404b600000000000 ; double 54.75
.quad 0x404b600000000000 ; double 54.75
.quad 0x404b000000000000 ; double 54
.quad 0x404ae00000000000 ; double 53.75
.quad 0x404bb0a3d70a3d71 ; double 55.380000000000003
.quad 0x404b90a3d70a3d71 ; double 55.130000000000003
.quad 0x404b000000000000 ; double 54
.quad 0x404ae00000000000 ; double 53.75
.quad 0x404b50a3d70a3d71 ; double 54.630000000000003
.quad 0x404b400000000000 ; double 54.5
.quad 0x404b400000000000 ; double 54.5
.quad 0x404a90a3d70a3d71 ; double 53.130000000000003
.quad 0x404a30a3d70a3d71 ; double 52.380000000000003
.quad 0x404970a3d70a3d71 ; double 50.880000000000003
.quad 0x404970a3d70a3d71 ; double 50.880000000000003
.quad 0x404930a3d70a3d71 ; double 50.380000000000003
.quad 0x404990a3d70a3d71 ; double 51.130000000000003
.quad 0x4049b0a3d70a3d71 ; double 51.380000000000003
.quad 0x4049a00000000000 ; double 51.25
.quad 0x404a50a3d70a3d71 ; double 52.630000000000003
.quad 0x404a90a3d70a3d71 ; double 53.130000000000003
.quad 0x404a50a3d70a3d71 ; double 52.630000000000003
.quad 0x404a800000000000 ; double 53
.quad 0x4049e00000000000 ; double 51.75
.quad 0x4049e00000000000 ; double 51.75
.quad 0x404a600000000000 ; double 52.75
.quad 0x404a30a3d70a3d71 ; double 52.380000000000003
.quad 0x404a800000000000 ; double 53
.quad 0x404a90a3d70a3d71 ; double 53.130000000000003
.quad 0x4049e00000000000 ; double 51.75
.quad 0x4048e00000000000 ; double 49.75
.quad 0x404970a3d70a3d71 ; double 50.880000000000003
.quad 0x4049a00000000000 ; double 51.25
.quad 0x404970a3d70a3d71 ; double 50.880000000000003
.quad 0x404990a3d70a3d71 ; double 51.130000000000003
.quad 0x4049d0a3d70a3d71 ; double 51.630000000000003
.quad 0x404a200000000000 ; double 52.25
.quad 0x4049f0a3d70a3d71 ; double 51.880000000000003
.quad 0x4049d0a3d70a3d71 ; double 51.630000000000003
.quad 0x404a30a3d70a3d71 ; double 52.380000000000003
.quad 0x404a70a3d70a3d71 ; double 52.880000000000003
.quad 0x404ab0a3d70a3d71 ; double 53.380000000000003
.quad 0x404b90a3d70a3d71 ; double 55.130000000000003
.quad 0x404c000000000000 ; double 56
.quad 0x404c200000000000 ; double 56.25
.quad 0x404bc00000000000 ; double 55.5
.quad 0x404b800000000000 ; double 55
.quad 0x404b90a3d70a3d71 ; double 55.130000000000003
.quad 0x404bc00000000000 ; double 55.5
.quad 0x404b90a3d70a3d71 ; double 55.130000000000003
.quad 0x404b30a3d70a3d71 ; double 54.380000000000003
.quad 0x404b400000000000 ; double 54.5
.quad 0x404b70a3d70a3d71 ; double 54.880000000000003
.quad 0x404b800000000000 ; double 55
.quad 0x404b200000000000 ; double 54.25
.quad 0x404ba00000000000 ; double 55.25
.quad 0x404b50a3d70a3d71 ; double 54.630000000000003
.quad 0x404b800000000000 ; double 55
.quad 0x404b90a3d70a3d71 ; double 55.130000000000003
.quad 0x404b400000000000 ; double 54.5
.quad 0x404b90a3d70a3d71 ; double 55.130000000000003
.quad 0x404bb0a3d70a3d71 ; double 55.380000000000003
.quad 0x404c400000000000 ; double 56.5
.quad 0x404c600000000000 ; double 56.75
.quad 0x404c10a3d70a3d71 ; double 56.130000000000003
.quad 0x404b800000000000 ; double 55
.quad 0x404b400000000000 ; double 54.5
.quad 0x404b10a3d70a3d71 ; double 54.130000000000003
.quad 0x404b70a3d70a3d71 ; double 54.880000000000003
.quad 0x404c10a3d70a3d71 ; double 56.130000000000003
.quad 0x404c200000000000 ; double 56.25
.quad 0x404c10a3d70a3d71 ; double 56.130000000000003
.quad 0x404c30a3d70a3d71 ; double 56.380000000000003
.quad 0x404c10a3d70a3d71 ; double 56.130000000000003
.quad 0x404c30a3d70a3d71 ; double 56.380000000000003
.quad 0x404c10a3d70a3d71 ; double 56.130000000000003
.quad 0x404c50a3d70a3d71 ; double 56.630000000000003
.quad 0x404c50a3d70a3d71 ; double 56.630000000000003
.quad 0x404cb0a3d70a3d71 ; double 57.380000000000003
.quad 0x404cc00000000000 ; double 57.5
.quad 0x404ca00000000000 ; double 57.25
.quad 0x404c90a3d70a3d71 ; double 57.130000000000003
.quad 0x404d200000000000 ; double 58.25
.quad 0x404d600000000000 ; double 58.75
.quad 0x404d50a3d70a3d71 ; double 58.630000000000003
.quad 0x404d600000000000 ; double 58.75
.quad 0x404d70a3d70a3d71 ; double 58.880000000000003
.quad 0x404d10a3d70a3d71 ; double 58.130000000000003
.quad 0x404ca00000000000 ; double 57.25
.quad 0x404cc00000000000 ; double 57.5
.quad 0x404d600000000000 ; double 58.75
.quad 0x404d200000000000 ; double 58.25
.quad 0x404c600000000000 ; double 56.75
.quad 0x404c50a3d70a3d71 ; double 56.630000000000003
.quad 0x404c50a3d70a3d71 ; double 56.630000000000003
.quad 0x404be00000000000 ; double 55.75
.quad 0x404bc00000000000 ; double 55.5
.quad 0x404bd0a3d70a3d71 ; double 55.630000000000003
.quad 0x404b90a3d70a3d71 ; double 55.130000000000003
.quad 0x404ba00000000000 ; double 55.25
.quad 0x404bf0a3d70a3d71 ; double 55.880000000000003
.quad 0x404bd0a3d70a3d71 ; double 55.630000000000003
.quad 0x404bf0a3d70a3d71 ; double 55.880000000000003
.quad 0x404c50a3d70a3d71 ; double 56.630000000000003
.quad 0x404c70a3d70a3d71 ; double 56.880000000000003
.quad 0x404c70a3d70a3d71 ; double 56.880000000000003
.quad 0x404c70a3d70a3d71 ; double 56.880000000000003
.quad 0x404c200000000000 ; double 56.25
.quad 0x404c400000000000 ; double 56.5
.quad 0x404c70a3d70a3d71 ; double 56.880000000000003
.quad 0x404c90a3d70a3d71 ; double 57.130000000000003
.quad 0x404cb0a3d70a3d71 ; double 57.380000000000003
.quad 0x404c90a3d70a3d71 ; double 57.130000000000003
.quad 0x404ca00000000000 ; double 57.25
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404d000000000000 ; double 58
.quad 0x404d90a3d70a3d71 ; double 59.130000000000003
.quad 0x404dd0a3d70a3d71 ; double 59.630000000000003
.quad 0x404e10a3d70a3d71 ; double 60.130000000000003
.quad 0x404dc00000000000 ; double 59.5
.quad 0x404dd0a3d70a3d71 ; double 59.630000000000003
.quad 0x404d200000000000 ; double 58.25
.quad 0x404d000000000000 ; double 58
.quad 0x404d400000000000 ; double 58.5
.quad 0x404d70a3d70a3d71 ; double 58.880000000000003
.quad 0x404de00000000000 ; double 59.75
.quad 0x404e30a3d70a3d71 ; double 60.380000000000003
.quad 0x404dd0a3d70a3d71 ; double 59.630000000000003
.quad 0x404dc00000000000 ; double 59.5
.quad 0x404d200000000000 ; double 58.25
.quad 0x404d10a3d70a3d71 ; double 58.130000000000003
.quad 0x404d70a3d70a3d71 ; double 58.880000000000003
.quad 0x404d400000000000 ; double 58.5
.quad 0x404d90a3d70a3d71 ; double 59.130000000000003
.quad 0x404de00000000000 ; double 59.75
.quad 0x404e400000000000 ; double 60.5
.quad 0x404e600000000000 ; double 60.75
.quad 0x404d90a3d70a3d71 ; double 59.130000000000003
.quad 0x404d400000000000 ; double 58.5
.quad 0x404d10a3d70a3d71 ; double 58.130000000000003
.quad 0x404c30a3d70a3d71 ; double 56.380000000000003
.quad 0x404bf0a3d70a3d71 ; double 55.880000000000003
.quad 0x404c30a3d70a3d71 ; double 56.380000000000003
.quad 0x404c50a3d70a3d71 ; double 56.630000000000003
.quad 0x404c50a3d70a3d71 ; double 56.630000000000003
.quad 0x404c70a3d70a3d71 ; double 56.880000000000003
.quad 0x404c70a3d70a3d71 ; double 56.880000000000003
.quad 0x404c90a3d70a3d71 ; double 57.130000000000003
.quad 0x404ca00000000000 ; double 57.25
.quad 0x404c70a3d70a3d71 ; double 56.880000000000003
.quad 0x404cd0a3d70a3d71 ; double 57.630000000000003
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404cd0a3d70a3d71 ; double 57.630000000000003
.quad 0x404d10a3d70a3d71 ; double 58.130000000000003
.quad 0x404d200000000000 ; double 58.25
.quad 0x404d30a3d70a3d71 ; double 58.380000000000003
.quad 0x404db0a3d70a3d71 ; double 59.380000000000003
.quad 0x404de00000000000 ; double 59.75
.quad 0x404e200000000000 ; double 60.25
.quad 0x404ea00000000000 ; double 61.25
.quad 0x404eb0a3d70a3d71 ; double 61.380000000000003
.quad 0x404e90a3d70a3d71 ; double 61.130000000000003
.quad 0x404eb0a3d70a3d71 ; double 61.380000000000003
.quad 0x404e800000000000 ; double 61
.quad 0x404eb0a3d70a3d71 ; double 61.380000000000003
.quad 0x404ee00000000000 ; double 61.75
.quad 0x404e200000000000 ; double 60.25
.quad 0x404df0a3d70a3d71 ; double 59.880000000000003
.quad 0x404e000000000000 ; double 60
.quad 0x404e600000000000 ; double 60.75
.quad 0x404fa00000000000 ; double 63.25
.quad 0x404ee00000000000 ; double 61.75
.quad 0x404e400000000000 ; double 60.5
.quad 0x404e50a3d70a3d71 ; double 60.630000000000003
.quad 0x404e800000000000 ; double 61
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f600000000000 ; double 62.75
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x404f50a3d70a3d71 ; double 62.630000000000003
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x4050100000000000 ; double 64.25
.quad 0x405007ae147ae148 ; double 64.120000000000005
.quad 0x404fa00000000000 ; double 63.25
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x405007ae147ae148 ; double 64.120000000000005
.quad 0x4050300000000000 ; double 64.75
.quad 0x4050500000000000 ; double 65.25
.quad 0x4050500000000000 ; double 65.25
.quad 0x4050300000000000 ; double 64.75
.quad 0x405027ae147ae148 ; double 64.620000000000005
.quad 0x405007ae147ae148 ; double 64.120000000000005
.quad 0x404f600000000000 ; double 62.75
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x404fa00000000000 ; double 63.25
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f70a3d70a3d71 ; double 62.880000000000003
.quad 0x404fe00000000000 ; double 63.75
.quad 0x4050100000000000 ; double 64.25
.quad 0x4050200000000000 ; double 64.5
.quad 0x405037ae147ae148 ; double 64.870000000000005
.quad 0x4050100000000000 ; double 64.25
.quad 0x404f50a3d70a3d71 ; double 62.630000000000003
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404ed0a3d70a3d71 ; double 61.630000000000003
.quad 0x404f200000000000 ; double 62.25
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f400000000000 ; double 62.5
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404f600000000000 ; double 62.75
.quad 0x404fa00000000000 ; double 63.25
.quad 0x404fd0a3d70a3d71 ; double 63.630000000000003
.quad 0x405017ae147ae148 ; double 64.370000000000005
.quad 0x4050300000000000 ; double 64.75
.quad 0x4050200000000000 ; double 64.5
.quad 0x4050300000000000 ; double 64.75
.quad 0x4050100000000000 ; double 64.25
.quad 0x405017ae147ae148 ; double 64.370000000000005
.quad 0x4050600000000000 ; double 65.5
.quad 0x4050600000000000 ; double 65.5
.quad 0x405067ae147ae148 ; double 65.620000000000005
.quad 0x405077ae147ae148 ; double 65.870000000000005
.quad 0x4050600000000000 ; double 65.5
.quad 0x405067ae147ae148 ; double 65.620000000000005
.quad 0x405057ae147ae148 ; double 65.370000000000005
.quad 0x405067ae147ae148 ; double 65.620000000000005
.quad 0x4050c7ae147ae148 ; double 67.120000000000005
.quad 0x4050d00000000000 ; double 67.25
.quad 0x4050c00000000000 ; double 67
.quad 0x4050b00000000000 ; double 66.75
.quad 0x4050e00000000000 ; double 67.5
.quad 0x4050b00000000000 ; double 66.75
.quad 0x4051000000000000 ; double 68
.quad 0x4051000000000000 ; double 68
.quad 0x4051700000000000 ; double 69.75
.quad 0x405187ae147ae148 ; double 70.120000000000005
.quad 0x4051e00000000000 ; double 71.5
.quad 0x4051b00000000000 ; double 70.75
.quad 0x4051500000000000 ; double 69.25
.quad 0x405147ae147ae148 ; double 69.120000000000005
.quad 0x4051500000000000 ; double 69.25
.quad 0x4051600000000000 ; double 69.5
.quad 0x4051400000000000 ; double 69
.quad 0x4051300000000000 ; double 68.75
.quad 0x405107ae147ae148 ; double 68.120000000000005
.quad 0x4050d00000000000 ; double 67.25
.quad 0x405037ae147ae148 ; double 64.870000000000005
.quad 0x405067ae147ae148 ; double 65.620000000000005
.quad 0x4050800000000000 ; double 66
.quad 0x4050900000000000 ; double 66.25
.quad 0x405097ae147ae148 ; double 66.370000000000005
.quad 0x4050800000000000 ; double 66
.quad 0x4050200000000000 ; double 64.5
.quad 0x4050a00000000000 ; double 66.5
.quad 0x4050b00000000000 ; double 66.75
.quad 0x4050d00000000000 ; double 67.25
.quad 0x405137ae147ae148 ; double 68.870000000000005
.quad 0x4051600000000000 ; double 69.5
.quad 0x4051400000000000 ; double 69
.quad 0x4051000000000000 ; double 68
.quad 0x4051000000000000 ; double 68
.quad 0x405137ae147ae148 ; double 68.870000000000005
.quad 0x4051700000000000 ; double 69.75
.quad 0x4051a7ae147ae148 ; double 70.620000000000005
.quad 0x405197ae147ae148 ; double 70.370000000000005
.quad 0x4051e7ae147ae148 ; double 71.620000000000005
.quad 0x4052100000000000 ; double 72.25
.quad 0x4052300000000000 ; double 72.75
.quad 0x405227ae147ae148 ; double 72.620000000000005
.quad 0x4051f00000000000 ; double 71.75
.quad 0x4052100000000000 ; double 72.25
.quad 0x4051e7ae147ae148 ; double 71.620000000000005
.quad 0x4051c7ae147ae148 ; double 71.120000000000005
.quad 0x4050f00000000000 ; double 67.75
.quad 0x4050d00000000000 ; double 67.25
.quad 0x4050c7ae147ae148 ; double 67.120000000000005
.quad 0x405107ae147ae148 ; double 68.120000000000005
.quad 0x4051900000000000 ; double 70.25
.quad 0x4050e7ae147ae148 ; double 67.620000000000005
.quad 0x4050d00000000000 ; double 67.25
.quad 0x4050900000000000 ; double 66.25
.quad 0x4050c7ae147ae148 ; double 67.120000000000005
.quad 0x4050b00000000000 ; double 66.75
.quad 0x4050a00000000000 ; double 66.5
.quad 0x4050a7ae147ae148 ; double 66.620000000000005
.quad 0x4050f00000000000 ; double 67.75
.quad 0x405107ae147ae148 ; double 68.120000000000005
.quad 0x405137ae147ae148 ; double 68.870000000000005
.quad 0x4051200000000000 ; double 68.5
.quad 0x405157ae147ae148 ; double 69.370000000000005
.quad 0x4051c7ae147ae148 ; double 71.120000000000005
.quad 0x405197ae147ae148 ; double 70.370000000000005
.quad 0x4051b7ae147ae148 ; double 70.870000000000005
.quad 0x4051b00000000000 ; double 70.75
.quad 0x4051e00000000000 ; double 71.5
.quad 0x4051b7ae147ae148 ; double 70.870000000000005
.quad 0x405197ae147ae148 ; double 70.370000000000005
.quad 0x4051100000000000 ; double 68.25
.quad 0x4051100000000000 ; double 68.25
.quad 0x4050c00000000000 ; double 67
.quad 0x405057ae147ae148 ; double 65.370000000000005
.quad 0x405067ae147ae148 ; double 65.620000000000005
.quad 0x4050200000000000 ; double 64.5
.quad 0x4050100000000000 ; double 64.25
.quad 0x405077ae147ae148 ; double 65.870000000000005
.quad 0x4050f7ae147ae148 ; double 67.870000000000005
.quad 0x4051000000000000 ; double 68
.quad 0x4051400000000000 ; double 69
.quad 0x405117ae147ae148 ; double 68.370000000000005
.quad 0x4050e00000000000 ; double 67.5
.quad 0x4050d7ae147ae148 ; double 67.370000000000005
.quad 0x4050f7ae147ae148 ; double 67.870000000000005
.quad 0x405097ae147ae148 ; double 66.370000000000005
.quad 0x4050900000000000 ; double 66.25
.quad 0x4050a00000000000 ; double 66.5
.quad 0x405097ae147ae148 ; double 66.370000000000005
.quad 0x4050900000000000 ; double 66.25
.quad 0x4050900000000000 ; double 66.25
.quad 0x4050a7ae147ae148 ; double 66.620000000000005
.quad 0x4050d00000000000 ; double 67.25
.quad 0x405107ae147ae148 ; double 68.120000000000005
.quad 0x4050e00000000000 ; double 67.5
.quad 0x4050a00000000000 ; double 66.5
.quad 0x405067ae147ae148 ; double 65.620000000000005
.quad 0x4050800000000000 ; double 66
.quad 0x4050c00000000000 ; double 67
.quad 0x4050f00000000000 ; double 67.75
.quad 0x4050e00000000000 ; double 67.5
.quad 0x405137ae147ae148 ; double 68.870000000000005
.quad 0x4050f00000000000 ; double 67.75
.quad 0x4050f7ae147ae148 ; double 67.870000000000005
.quad 0x4050c00000000000 ; double 67
.quad 0x4051000000000000 ; double 68
.quad 0x405147ae147ae148 ; double 69.120000000000005
.quad 0x405147ae147ae148 ; double 69.120000000000005
.quad 0x4050e00000000000 ; double 67.5
.quad 0x4051000000000000 ; double 68
.quad 0x4050b00000000000 ; double 66.75
.quad 0x4050900000000000 ; double 66.25
.quad 0x4050700000000000 ; double 65.75
.quad 0x405067ae147ae148 ; double 65.620000000000005
.quad 0x405057ae147ae148 ; double 65.370000000000005
.quad 0x4050400000000000 ; double 65
.quad 0x405057ae147ae148 ; double 65.370000000000005
.quad 0x405057ae147ae148 ; double 65.370000000000005
.quad 0x4050800000000000 ; double 66
.quad 0x4050600000000000 ; double 65.5
.quad 0x4050500000000000 ; double 65.25
.quad 0x405027ae147ae148 ; double 64.620000000000005
.quad 0x4050400000000000 ; double 65
.quad 0x405057ae147ae148 ; double 65.370000000000005
.quad 0x405087ae147ae148 ; double 66.120000000000005
.quad 0x4050800000000000 ; double 66
.quad 0x4050700000000000 ; double 65.75
.quad 0x405067ae147ae148 ; double 65.620000000000005
.quad 0x405027ae147ae148 ; double 64.620000000000005
.quad 0x4050500000000000 ; double 65.25
.quad 0x4050400000000000 ; double 65
.quad 0x404f70a3d70a3d71 ; double 62.880000000000003
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f70a3d70a3d71 ; double 62.880000000000003
.quad 0x404f200000000000 ; double 62.25
.quad 0x404f200000000000 ; double 62.25
.quad 0x404ec00000000000 ; double 61.5
.quad 0x404e800000000000 ; double 61
.quad 0x404dd0a3d70a3d71 ; double 59.630000000000003
.quad 0x404dc00000000000 ; double 59.5
.quad 0x404e000000000000 ; double 60
.quad 0x404e200000000000 ; double 60.25
.quad 0x404e200000000000 ; double 60.25
.quad 0x404db0a3d70a3d71 ; double 59.380000000000003
.quad 0x404da00000000000 ; double 59.25
.quad 0x404d30a3d70a3d71 ; double 58.380000000000003
.quad 0x404de00000000000 ; double 59.75
.quad 0x404de00000000000 ; double 59.75
.quad 0x404e30a3d70a3d71 ; double 60.380000000000003
.quad 0x404e30a3d70a3d71 ; double 60.380000000000003
.quad 0x404d90a3d70a3d71 ; double 59.130000000000003
.quad 0x404d30a3d70a3d71 ; double 58.380000000000003
.quad 0x404d30a3d70a3d71 ; double 58.380000000000003
.quad 0x404ce00000000000 ; double 57.75
.quad 0x404cc00000000000 ; double 57.5
.quad 0x404cf0a3d70a3d71 ; double 57.880000000000003
.quad 0x404d30a3d70a3d71 ; double 58.380000000000003
.quad 0x404d10a3d70a3d71 ; double 58.130000000000003
.quad 0x404c90a3d70a3d71 ; double 57.130000000000003
.quad 0x404cb0a3d70a3d71 ; double 57.380000000000003
.quad 0x404bc00000000000 ; double 55.5
.quad 0x404c10a3d70a3d71 ; double 56.130000000000003
.quad 0x404c50a3d70a3d71 ; double 56.630000000000003
.quad 0x404bd0a3d70a3d71 ; double 55.630000000000003
.quad 0x404b800000000000 ; double 55
.quad 0x404b10a3d70a3d71 ; double 54.130000000000003
.quad 0x404a70a3d70a3d71 ; double 52.880000000000003
.quad 0x404ac00000000000 ; double 53.5
.quad 0x404ae00000000000 ; double 53.75
.quad 0x404a800000000000 ; double 53
.quad 0x404aa00000000000 ; double 53.25
.quad 0x404ac00000000000 ; double 53.5
.quad 0x404aa00000000000 ; double 53.25
.quad 0x404a800000000000 ; double 53
.quad 0x404ae00000000000 ; double 53.75
.quad 0x404b30a3d70a3d71 ; double 54.380000000000003
.quad 0x404be00000000000 ; double 55.75
.quad 0x404be00000000000 ; double 55.75
.quad 0x404be00000000000 ; double 55.75
.quad 0x404b30a3d70a3d71 ; double 54.380000000000003
.quad 0x404b70a3d70a3d71 ; double 54.880000000000003
.quad 0x404b90a3d70a3d71 ; double 55.130000000000003
.quad 0x404b90a3d70a3d71 ; double 55.130000000000003
.quad 0x404bb0a3d70a3d71 ; double 55.380000000000003
.quad 0x404bb0a3d70a3d71 ; double 55.380000000000003
.quad 0x404b200000000000 ; double 54.25
.quad 0x404b30a3d70a3d71 ; double 54.380000000000003
.quad 0x404b400000000000 ; double 54.5
.quad 0x4049d0a3d70a3d71 ; double 51.630000000000003
.quad 0x4049e00000000000 ; double 51.75
.quad 0x4049d0a3d70a3d71 ; double 51.630000000000003
.quad 0x404a70a3d70a3d71 ; double 52.880000000000003
.quad 0x404ac00000000000 ; double 53.5
.quad 0x404ae00000000000 ; double 53.75
.quad 0x404b800000000000 ; double 55
.quad 0x404ba00000000000 ; double 55.25
.quad 0x404b600000000000 ; double 54.75
.quad 0x404be00000000000 ; double 55.75
.quad 0x404b70a3d70a3d71 ; double 54.880000000000003
.quad 0x404bf0a3d70a3d71 ; double 55.880000000000003
.quad 0x404c000000000000 ; double 56
.quad 0x404be00000000000 ; double 55.75
.quad 0x404bf0a3d70a3d71 ; double 55.880000000000003
.quad 0x404c30a3d70a3d71 ; double 56.380000000000003
.quad 0x404cc00000000000 ; double 57.5
.quad 0x404bf0a3d70a3d71 ; double 55.880000000000003
.quad 0x404b90a3d70a3d71 ; double 55.130000000000003
.quad 0x404c30a3d70a3d71 ; double 56.380000000000003
.quad 0x404d10a3d70a3d71 ; double 58.130000000000003
.quad 0x404da00000000000 ; double 59.25
.quad 0x404de00000000000 ; double 59.75
.quad 0x404e200000000000 ; double 60.25
.quad 0x404e30a3d70a3d71 ; double 60.380000000000003
.quad 0x404e600000000000 ; double 60.75
.quad 0x404ea00000000000 ; double 61.25
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404f200000000000 ; double 62.25
.quad 0x404f200000000000 ; double 62.25
.quad 0x404ef0a3d70a3d71 ; double 61.880000000000003
.quad 0x404f200000000000 ; double 62.25
.quad 0x404f90a3d70a3d71 ; double 63.130000000000003
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x404fc00000000000 ; double 63.5
.quad 0x404fe00000000000 ; double 63.75
.quad 0x4050000000000000 ; double 64
.quad 0x405047ae147ae148 ; double 65.120000000000005
.quad 0x405057ae147ae148 ; double 65.370000000000005
.quad 0x405077ae147ae148 ; double 65.870000000000005
.quad 0x4050900000000000 ; double 66.25
.quad 0x4050c7ae147ae148 ; double 67.120000000000005
.quad 0x4050f00000000000 ; double 67.75
.quad 0x4050f00000000000 ; double 67.75
.quad 0x4051000000000000 ; double 68
.quad 0x4051900000000000 ; double 70.25
.quad 0x4051a00000000000 ; double 70.5
.quad 0x405137ae147ae148 ; double 68.870000000000005
.quad 0x4050e00000000000 ; double 67.5
.quad 0x4050e7ae147ae148 ; double 67.620000000000005
.quad 0x4051100000000000 ; double 68.25
.quad 0x4051400000000000 ; double 69
.quad 0x405167ae147ae148 ; double 69.620000000000005
.quad 0x405167ae147ae148 ; double 69.620000000000005
.quad 0x4051600000000000 ; double 69.5
.quad 0x4051c7ae147ae148 ; double 71.120000000000005
.quad 0x4051a7ae147ae148 ; double 70.620000000000005
.quad 0x4051d7ae147ae148 ; double 71.370000000000005
.quad 0x4052000000000000 ; double 72
.quad 0x4051b00000000000 ; double 70.75
.quad 0x4051e00000000000 ; double 71.5
.quad 0x4051d00000000000 ; double 71.25
.quad 0x405177ae147ae148 ; double 69.870000000000005
.quad 0x405177ae147ae148 ; double 69.870000000000005
.quad 0x4051700000000000 ; double 69.75
.quad 0x405197ae147ae148 ; double 70.370000000000005
.quad 0x4051b7ae147ae148 ; double 70.870000000000005
.quad 0x4051900000000000 ; double 70.25
.quad 0x4050d00000000000 ; double 67.25
.quad 0x4051100000000000 ; double 68.25
.quad 0x4050f00000000000 ; double 67.75
.quad 0x4051000000000000 ; double 68
.quad 0x4050e00000000000 ; double 67.5
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x404fe00000000000 ; double 63.75
.quad 0x404fc00000000000 ; double 63.5
.quad 0x4050200000000000 ; double 64.5
.quad 0x405027ae147ae148 ; double 64.620000000000005
.quad 0x405027ae147ae148 ; double 64.620000000000005
.quad 0x4050400000000000 ; double 65
.quad 0x405047ae147ae148 ; double 65.120000000000005
.quad 0x405037ae147ae148 ; double 64.870000000000005
.quad 0x4050500000000000 ; double 65.25
.quad 0x4050700000000000 ; double 65.75
.quad 0x405027ae147ae148 ; double 64.620000000000005
.quad 0x4050500000000000 ; double 65.25
.quad 0x405077ae147ae148 ; double 65.870000000000005
.quad 0x405087ae147ae148 ; double 66.120000000000005
.quad 0x4050500000000000 ; double 65.25
.quad 0x4050700000000000 ; double 65.75
.quad 0x4050b00000000000 ; double 66.75
.quad 0x4050a7ae147ae148 ; double 66.620000000000005
.quad 0x4050e7ae147ae148 ; double 67.620000000000005
.quad 0x4050d00000000000 ; double 67.25
.quad 0x405097ae147ae148 ; double 66.370000000000005
.quad 0x405057ae147ae148 ; double 65.370000000000005
.quad 0x405047ae147ae148 ; double 65.120000000000005
.quad 0x4050600000000000 ; double 65.5
.quad 0x4050900000000000 ; double 66.25
.quad 0x4050c00000000000 ; double 67
.quad 0x4050500000000000 ; double 65.25
.quad 0x405027ae147ae148 ; double 64.620000000000005
.quad 0x404f200000000000 ; double 62.25
.quad 0x404ef0a3d70a3d71 ; double 61.880000000000003
.quad 0x404f400000000000 ; double 62.5
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404fa00000000000 ; double 63.25
.quad 0x404f800000000000 ; double 63
.quad 0x404f600000000000 ; double 62.75
.quad 0x404f70a3d70a3d71 ; double 62.880000000000003
.quad 0x404f400000000000 ; double 62.5
.quad 0x404f10a3d70a3d71 ; double 62.130000000000003
.quad 0x404f000000000000 ; double 62
.quad 0x404f90a3d70a3d71 ; double 63.130000000000003
.quad 0x404fa00000000000 ; double 63.25
.quad 0x404fe00000000000 ; double 63.75
.quad 0x404fd0a3d70a3d71 ; double 63.630000000000003
.quad 0x405007ae147ae148 ; double 64.120000000000005
.quad 0x4050000000000000 ; double 64
.quad 0x404f70a3d70a3d71 ; double 62.880000000000003
.quad 0x404f30a3d70a3d71 ; double 62.380000000000003
.quad 0x404f70a3d70a3d71 ; double 62.880000000000003
.quad 0x404f90a3d70a3d71 ; double 63.130000000000003
.quad 0x404fb0a3d70a3d71 ; double 63.380000000000003
.quad 0x405017ae147ae148 ; double 64.370000000000005
.quad 0x404f800000000000 ; double 63
.quad 0x404fe00000000000 ; double 63.75
.quad 0x4050200000000000 ; double 64.5
.quad 0x4050700000000000 ; double 65.75
.quad 0x405077ae147ae148 ; double 65.870000000000005
.quad 0x4050c7ae147ae148 ; double 67.120000000000005
.quad 0x4050e7ae147ae148 ; double 67.620000000000005
.quad 0x4050f00000000000 ; double 67.75
.quad 0x4050d7ae147ae148 ; double 67.370000000000005
.quad 0x4051400000000000 ; double 69
.quad 0x4051600000000000 ; double 69.5
.quad 0x405137ae147ae148 ; double 68.870000000000005
.quad 0x4051400000000000 ; double 69
.quad 0x4051300000000000 ; double 68.75
.quad 0x4050e7ae147ae148 ; double 67.620000000000005
.quad 0x4051200000000000 ; double 68.5
.quad 0x4051300000000000 ; double 68.75
.quad 0x4051600000000000 ; double 69.5
.quad 0x4051500000000000 ; double 69.25
.quad 0x405177ae147ae148 ; double 69.870000000000005
.quad 0x4051c00000000000 ; double 71
.quad 0x4051d7ae147ae148 ; double 71.370000000000005
.quad 0x405127ae147ae148 ; double 68.620000000000005
.quad 0x4051200000000000 ; double 68.5
.quad 0x4051500000000000 ; double 69.25
.quad 0x405117ae147ae148 ; double 68.370000000000005
.quad 0x4050b7ae147ae148 ; double 66.870000000000005
.quad 0x4050a7ae147ae148 ; double 66.620000000000005
.quad 0x4050f00000000000 ; double 67.75
.quad 0x4050f7ae147ae148 ; double 67.870000000000005
.quad 0x4051000000000000 ; double 68
.quad 0x4051100000000000 ; double 68.25
.quad 0x4051200000000000 ; double 68.5
.quad 0x405177ae147ae148 ; double 69.870000000000005
.quad 0x4051800000000000 ; double 70
.quad 0x4051800000000000 ; double 70
.quad 0x4051900000000000 ; double 70.25
.quad 0x405187ae147ae148 ; double 70.120000000000005
.quad 0x4051a7ae147ae148 ; double 70.620000000000005
.quad 0x4051a00000000000 ; double 70.5
.quad 0x4051b00000000000 ; double 70.75
.quad 0x4051c7ae147ae148 ; double 71.120000000000005
.quad 0x4051e7ae147ae148 ; double 71.620000000000005
.quad 0x4051f7ae147ae148 ; double 71.870000000000005
.quad 0x4051f7ae147ae148 ; double 71.870000000000005
.quad 0x4052200000000000 ; double 72.5
.quad 0x405217ae147ae148 ; double 72.370000000000005
.quad 0x405197ae147ae148 ; double 70.370000000000005
.quad 0x405197ae147ae148 ; double 70.370000000000005
.quad 0x4051a00000000000 ; double 70.5
.quad 0x4051500000000000 ; double 69.25
.quad 0x405197ae147ae148 ; double 70.370000000000005
.quad 0x4051c7ae147ae148 ; double 71.120000000000005
.quad 0x405157ae147ae148 ; double 69.370000000000005
.quad 0x4051600000000000 ; double 69.5
.quad 0x4051900000000000 ; double 70.25
.quad 0x4051800000000000 ; double 70
.quad 0x4051800000000000 ; double 70
.quad 0x4051700000000000 ; double 69.75
.quad 0x4051600000000000 ; double 69.5
.quad 0x405157ae147ae148 ; double 69.370000000000005
.quad 0x405167ae147ae148 ; double 69.620000000000005
.quad 0x405167ae147ae148 ; double 69.620000000000005
.quad 0x405157ae147ae148 ; double 69.370000000000005
.quad 0x405187ae147ae148 ; double 70.120000000000005
.quad 0x4051800000000000 ; double 70
.quad 0x4051600000000000 ; double 69.5
.quad 0x405197ae147ae148 ; double 70.370000000000005
.quad 0x4051a7ae147ae148 ; double 70.620000000000005
.quad 0x4051800000000000 ; double 70
.quad 0x4051e00000000000 ; double 71.5
.quad 0x405217ae147ae148 ; double 72.370000000000005
.quad 0x405267ae147ae148 ; double 73.620000000000005
.quad 0x4052800000000000 ; double 74
.quad 0x4053300000000000 ; double 76.75
.quad 0x405217ae147ae148 ; double 72.370000000000005
.quad 0x4052200000000000 ; double 72.5
.quad 0x405267ae147ae148 ; double 73.620000000000005
.quad 0x405287ae147ae148 ; double 74.120000000000005
.quad 0x405287ae147ae148 ; double 74.120000000000005
.quad 0x4052a00000000000 ; double 74.5
.quad 0x4052900000000000 ; double 74.25
.quad 0x4052a00000000000 ; double 74.5
.quad 0x4052900000000000 ; double 74.25
.quad 0x4052a00000000000 ; double 74.5
.quad 0x4052700000000000 ; double 73.75
.quad 0x4052600000000000 ; double 73.5
.quad 0x4052800000000000 ; double 74
.quad 0x4052c00000000000 ; double 75
.quad 0x4052d00000000000 ; double 75.25
.quad 0x4053800000000000 ; double 78
.quad 0x4053a00000000000 ; double 78.5
.quad 0x405367ae147ae148 ; double 77.620000000000005
.quad 0x405377ae147ae148 ; double 77.870000000000005
.quad 0x4053900000000000 ; double 78.25
.quad 0x4053900000000000 ; double 78.25
.quad 0x4053800000000000 ; double 78
.quad 0x405317ae147ae148 ; double 76.370000000000005
.quad 0x405317ae147ae148 ; double 76.370000000000005
.quad 0x40730c0000000000 ; double 304.75
.quad 0x407326147ae147ae ; double 306.38
.quad 0x4073340000000000 ; double 307.25
.quad 0x407336147ae147ae ; double 307.38
.quad 0x4073380000000000 ; double 307.5
.quad 0x4073680000000000 ; double 310.5
.quad 0x4073680000000000 ; double 310.5
.quad 0x40734e147ae147ae ; double 308.88
.quad 0x40735c0000000000 ; double 309.75
.quad 0x4073580000000000 ; double 309.5
.quad 0x4073300000000000 ; double 307
.quad 0x4073600000000000 ; double 310
.quad 0x40735c0000000000 ; double 309.75
.quad 0x4073500000000000 ; double 309
.quad 0x407362147ae147ae ; double 310.13
.quad 0x40738c0000000000 ; double 312.75
.quad 0x4073840000000000 ; double 312.25
.quad 0x40735e147ae147ae ; double 309.88
.quad 0x4073d80000000000 ; double 317.5
.quad 0x4074180000000000 ; double 321.5
.quad 0x4073e80000000000 ; double 318.5
.quad 0x4073d80000000000 ; double 317.5
.quad 0x4073a40000000000 ; double 314.25
.quad 0x4073680000000000 ; double 310.5
.quad 0x4073800000000000 ; double 312
.quad 0x4073840000000000 ; double 312.25
.quad 0x40738e147ae147ae ; double 312.88
.quad 0x4073640000000000 ; double 310.25
.quad 0x4073540000000000 ; double 309.25
.quad 0x40738e147ae147ae ; double 312.88
.quad 0x4073a00000000000 ; double 314
.quad 0x4073980000000000 ; double 313.5
.quad 0x407396147ae147ae ; double 313.38
.quad 0x4073a40000000000 ; double 314.25
.quad 0x4074180000000000 ; double 321.5
.quad 0x4074200000000000 ; double 322
.quad 0x4073fc0000000000 ; double 319.75
.quad 0x407406147ae147ae ; double 320.38
.quad 0x40740c0000000000 ; double 320.75
.quad 0x407416147ae147ae ; double 321.38
.quad 0x4073fc0000000000 ; double 319.75
.quad 0x4073b80000000000 ; double 315.5
.quad 0x4073ea147ae147ae ; double 318.63
.quad 0x4073f6147ae147ae ; double 319.38
.quad 0x4073fe147ae147ae ; double 319.88
.quad 0x40740c0000000000 ; double 320.75
.quad 0x4073a80000000000 ; double 314.5
.quad 0x4073d00000000000 ; double 317
.quad 0x4073d80000000000 ; double 317.5
.quad 0x4073c00000000000 ; double 316
.quad 0x40737a147ae147ae ; double 311.63
.quad 0x4073840000000000 ; double 312.25
.quad 0x4073540000000000 ; double 309.25
.quad 0x40734a147ae147ae ; double 308.63
.quad 0x40735c0000000000 ; double 309.75
.quad 0x40737c0000000000 ; double 311.75
.quad 0x4073600000000000 ; double 310
.quad 0x40735e147ae147ae ; double 309.88
.quad 0x4073580000000000 ; double 309.5
.quad 0x40733c0000000000 ; double 307.75
.quad 0x4073280000000000 ; double 306.5
.quad 0x40733c0000000000 ; double 307.75
.quad 0x4072d40000000000 ; double 301.25
.quad 0x4072d80000000000 ; double 301.5
.quad 0x4072d00000000000 ; double 301
.quad 0x4073000000000000 ; double 304
.quad 0x40730c0000000000 ; double 304.75
.quad 0x4072f80000000000 ; double 303.5
.quad 0x4073180000000000 ; double 305.5
.quad 0x40734a147ae147ae ; double 308.63
.quad 0x407346147ae147ae ; double 308.38
.quad 0x4073400000000000 ; double 308
.quad 0x4073480000000000 ; double 308.5
.quad 0x4073440000000000 ; double 308.25
.quad 0x4073600000000000 ; double 310
.quad 0x4073300000000000 ; double 307
.quad 0x4072de147ae147ae ; double 301.88
.quad 0x4072ca147ae147ae ; double 300.63
.quad 0x4072bc0000000000 ; double 299.75
.quad 0x4073280000000000 ; double 306.5
.quad 0x40732a147ae147ae ; double 306.63
.quad 0x4073700000000000 ; double 311
.quad 0x4073700000000000 ; double 311
.quad 0x4073880000000000 ; double 312.5
.quad 0x4073a80000000000 ; double 314.5
.quad 0x4073940000000000 ; double 313.25
.quad 0x4073980000000000 ; double 313.5
.quad 0x4073800000000000 ; double 312
.quad 0x4073740000000000 ; double 311.25
.quad 0x4073900000000000 ; double 313
.quad 0x4073800000000000 ; double 312
.quad 0x40736c0000000000 ; double 310.75
.quad 0x4073800000000000 ; double 312
.quad 0x4073800000000000 ; double 312
.quad 0x4073aa147ae147ae ; double 314.63
.quad 0x4073c40000000000 ; double 316.25
.quad 0x40739e147ae147ae ; double 313.88
.quad 0x4073800000000000 ; double 312
.quad 0x4073380000000000 ; double 307.5
.quad 0x407346147ae147ae ; double 308.38
.quad 0x40731c0000000000 ; double 305.75
.quad 0x4073300000000000 ; double 307
.quad 0x4073500000000000 ; double 309
.quad 0x40734c0000000000 ; double 308.75
.quad 0x4073100000000000 ; double 305
.quad 0x4073280000000000 ; double 306.5
.quad 0x4073400000000000 ; double 308
.quad 0x4073280000000000 ; double 306.5
.quad 0x4073600000000000 ; double 310
.quad 0x4072700000000000 ; double 295
.quad 0x4071ce147ae147ae ; double 284.88
.quad 0x4071d80000000000 ; double 285.5
.quad 0x4071b40000000000 ; double 283.25
.quad 0x4070fe147ae147ae ; double 271.88
.quad 0x407136147ae147ae ; double 275.38
.quad 0x4071300000000000 ; double 275
.quad 0x4071480000000000 ; double 276.5
.quad 0x4071680000000000 ; double 278.5
.quad 0x40716c0000000000 ; double 278.75
.quad 0x4071480000000000 ; double 276.5
.quad 0x4071640000000000 ; double 278.25
.quad 0x4071780000000000 ; double 279.5
.quad 0x407166147ae147ae ; double 278.38
.quad 0x40712a147ae147ae ; double 274.63
.quad 0x4071280000000000 ; double 274.5
.quad 0x4070e00000000000 ; double 270
.quad 0x4070b80000000000 ; double 267.5
.quad 0x4071000000000000 ; double 272
.quad 0x4070fc0000000000 ; double 271.75
.quad 0x4070e80000000000 ; double 270.5
.quad 0x4070c00000000000 ; double 268
.quad 0x4070940000000000 ; double 265.25
.quad 0x4070880000000000 ; double 264.5
.quad 0x4070640000000000 ; double 262.25
.quad 0x4070540000000000 ; double 261.25
.quad 0x4070640000000000 ; double 262.25
.quad 0x407046147ae147ae ; double 260.38
.quad 0x4070880000000000 ; double 264.5
.quad 0x4070980000000000 ; double 265.5
.quad 0x4070ac0000000000 ; double 266.75
.quad 0x4070a00000000000 ; double 266
.quad 0x4070740000000000 ; double 263.25
.quad 0x4070c40000000000 ; double 268.25
.quad 0x4070f80000000000 ; double 271.5
.quad 0x40710c0000000000 ; double 272.75
.quad 0x4071480000000000 ; double 276.5
.quad 0x4071080000000000 ; double 272.5
.quad 0x4071100000000000 ; double 273
.quad 0x40711e147ae147ae ; double 273.88
.quad 0x407132147ae147ae ; double 275.13
.quad 0x4071580000000000 ; double 277.5
.quad 0x4071640000000000 ; double 278.25
.quad 0x4071680000000000 ; double 278.5
.quad 0x4071580000000000 ; double 277.5
.quad 0x40718c0000000000 ; double 280.75
.quad 0x40717c0000000000 ; double 279.75
.quad 0x40717c0000000000 ; double 279.75
.quad 0x4071e00000000000 ; double 286
.quad 0x4072080000000000 ; double 288.5
.quad 0x4072580000000000 ; double 293.5
.quad 0x4072280000000000 ; double 290.5
.quad 0x4071b80000000000 ; double 283.5
.quad 0x4071b00000000000 ; double 283
.quad 0x4071980000000000 ; double 281.5
.quad 0x4071a00000000000 ; double 282
.quad 0x4071a00000000000 ; double 282
.quad 0x4071980000000000 ; double 281.5
.quad 0x4071880000000000 ; double 280.5
.quad 0x4071680000000000 ; double 278.5
.quad 0x4071700000000000 ; double 279
.quad 0x4071a00000000000 ; double 282
.quad 0x4071a80000000000 ; double 282.5
.quad 0x4071980000000000 ; double 281.5
.quad 0x4071c80000000000 ; double 284.5
.quad 0x4071dc0000000000 ; double 285.75
.quad 0x4072080000000000 ; double 288.5
.quad 0x4072340000000000 ; double 291.25
.quad 0x40723e147ae147ae ; double 291.88
.quad 0x4072680000000000 ; double 294.5
.quad 0x407286147ae147ae ; double 296.38
.quad 0x4072d6147ae147ae ; double 301.38
.quad 0x4072ce147ae147ae ; double 300.88
.quad 0x4072f6147ae147ae ; double 303.38
.quad 0x4073080000000000 ; double 304.5
.quad 0x4072e80000000000 ; double 302.5
.quad 0x4072f00000000000 ; double 303
.quad 0x4072d00000000000 ; double 301
.quad 0x40726e147ae147ae ; double 294.88
.quad 0x4072600000000000 ; double 294
.quad 0x4072700000000000 ; double 295
.quad 0x4072840000000000 ; double 296.25
.quad 0x407296147ae147ae ; double 297.38
.quad 0x4072b80000000000 ; double 299.5
.quad 0x4072c00000000000 ; double 300
.quad 0x4072cc0000000000 ; double 300.75
.quad 0x40729a147ae147ae ; double 297.63
.quad 0x40727c0000000000 ; double 295.75
.quad 0x4072bc0000000000 ; double 299.75
.quad 0x4072b6147ae147ae ; double 299.38
.quad 0x4072a80000000000 ; double 298.5
.quad 0x4072380000000000 ; double 291.5
.quad 0x4072380000000000 ; double 291.5
.quad 0x40722c0000000000 ; double 290.75
.quad 0x4072340000000000 ; double 291.25
.quad 0x4072540000000000 ; double 293.25
.quad 0x40721c0000000000 ; double 289.75
.quad 0x4072280000000000 ; double 290.5
.quad 0x40721a147ae147ae ; double 289.63
.quad 0x4072700000000000 ; double 295
.quad 0x4072500000000000 ; double 293
.quad 0x40719c0000000000 ; double 281.75
.quad 0x4071980000000000 ; double 281.5
.quad 0x4071780000000000 ; double 279.5
.quad 0x4071440000000000 ; double 276.25
.quad 0x40714c0000000000 ; double 276.75
.quad 0x4071100000000000 ; double 273
.quad 0x4070f00000000000 ; double 271
.quad 0x4070e40000000000 ; double 270.25
.quad 0x4071000000000000 ; double 272
.quad 0x4070dc0000000000 ; double 269.75
.quad 0x4070ae147ae147ae ; double 266.88
.quad 0x4070e2147ae147ae ; double 270.13
.quad 0x4070bc0000000000 ; double 267.75
.quad 0x4070380000000000 ; double 259.5
.quad 0x40703c0000000000 ; double 259.75
.quad 0x40704c0000000000 ; double 260.75
.quad 0x40704e147ae147ae ; double 260.88
.quad 0x40704a147ae147ae ; double 260.63
.quad 0x4070300000000000 ; double 259
.quad 0x4070380000000000 ; double 259.5
.quad 0x4070180000000000 ; double 257.5
.quad 0x4070440000000000 ; double 260.25
.quad 0x4070640000000000 ; double 262.25
.quad 0x40705e147ae147ae ; double 261.88
.quad 0x4070600000000000 ; double 262
.quad 0x4070680000000000 ; double 262.5
.quad 0x4070a80000000000 ; double 266.5
.quad 0x4070b00000000000 ; double 267
.quad 0x4070c40000000000 ; double 268.25
.quad 0x4070dc0000000000 ; double 269.75
.quad 0x4070e00000000000 ; double 270
.quad 0x4070ea147ae147ae ; double 270.63
.quad 0x4071080000000000 ; double 272.5
.quad 0x4071440000000000 ; double 276.25
.quad 0x4071240000000000 ; double 274.25
.quad 0x4070f6147ae147ae ; double 271.38
.quad 0x4070e00000000000 ; double 270
.quad 0x4070dc0000000000 ; double 269.75
.quad 0x4070aa147ae147ae ; double 266.63
.quad 0x4070c80000000000 ; double 268.5
.quad 0x4070a00000000000 ; double 266
.quad 0x40704c0000000000 ; double 260.75
.quad 0x4070380000000000 ; double 259.5
.quad 0x407066147ae147ae ; double 262.38
.quad 0x40705c0000000000 ; double 261.75
.quad 0x40705e147ae147ae ; double 261.88
.quad 0x4070800000000000 ; double 264
.quad 0x4070680000000000 ; double 262.5
.quad 0x4070a00000000000 ; double 266
.quad 0x4070ac0000000000 ; double 266.75
.quad 0x40708a147ae147ae ; double 264.63
.quad 0x4070a40000000000 ; double 266.25
.quad 0x4070ba147ae147ae ; double 267.63
.quad 0x4070c80000000000 ; double 268.5
.quad 0x4070a40000000000 ; double 266.25
.quad 0x4070a40000000000 ; double 266.25
.quad 0x4070940000000000 ; double 265.25
.quad 0x40704e147ae147ae ; double 260.88
.quad 0x407046147ae147ae ; double 260.38
.quad 0x4070780000000000 ; double 263.5
.quad 0x4070880000000000 ; double 264.5
.quad 0x4070540000000000 ; double 261.25
.quad 0x4070a40000000000 ; double 266.25
.quad 0x4070ba147ae147ae ; double 267.63
.quad 0x4070cc0000000000 ; double 268.75
.quad 0x4070a00000000000 ; double 266
.quad 0x40705c0000000000 ; double 261.75
.quad 0x4070900000000000 ; double 265
.quad 0x4070800000000000 ; double 264
.quad 0x4070240000000000 ; double 258.25
.quad 0x406ff00000000000 ; double 255.5
.quad 0x40701e147ae147ae ; double 257.88
.quad 0x406fc00000000000 ; double 254
.quad 0x406fa00000000000 ; double 253
.quad 0x406fb428f5c28f5c ; double 253.63
.quad 0x406e800000000000 ; double 244
.quad 0x406df00000000000 ; double 239.5
.quad 0x406e400000000000 ; double 242
.quad 0x406e100000000000 ; double 240.5
.quad 0x406e300000000000 ; double 241.5
.quad 0x406e600000000000 ; double 243
.quad 0x406e800000000000 ; double 244
.quad 0x406e300000000000 ; double 241.5
.quad 0x406dcc28f5c28f5c ; double 238.38
.quad 0x406d880000000000 ; double 236.25
.quad 0x406db428f5c28f5c ; double 237.63
.quad 0x406dcc28f5c28f5c ; double 238.38
.quad 0x406e000000000000 ; double 240
.quad 0x406dfc28f5c28f5c ; double 239.88
.quad 0x406de00000000000 ; double 239
.quad 0x406e180000000000 ; double 240.75
.quad 0x406e180000000000 ; double 240.75
.quad 0x406e200000000000 ; double 241
.quad 0x406e8c28f5c28f5c ; double 244.38
.quad 0x406e880000000000 ; double 244.25
.quad 0x406e700000000000 ; double 243.5
.quad 0x406e4428f5c28f5c ; double 242.13
.quad 0x406e600000000000 ; double 243
.quad 0x406e780000000000 ; double 243.75
.quad 0x406eb80000000000 ; double 245.75
.quad 0x406eec28f5c28f5c ; double 247.38
.quad 0x406ed80000000000 ; double 246.75
.quad 0x406e980000000000 ; double 244.75
.quad 0x406e880000000000 ; double 244.25
.quad 0x406f380000000000 ; double 249.75
.quad 0x406f880000000000 ; double 252.25
.quad 0x406f900000000000 ; double 252.5
.quad 0x406fb80000000000 ; double 253.75
.quad 0x407012147ae147ae ; double 257.13
.quad 0x4070180000000000 ; double 257.5
.quad 0x4070000000000000 ; double 256
.quad 0x4070040000000000 ; double 256.25
.quad 0x406ff428f5c28f5c ; double 255.63
.quad 0x407006147ae147ae ; double 256.38
.quad 0x4070040000000000 ; double 256.25
.quad 0x4070200000000000 ; double 258
.quad 0x40701c0000000000 ; double 257.75
.quad 0x4070280000000000 ; double 258.5
.quad 0x4070440000000000 ; double 260.25
.quad 0x407042147ae147ae ; double 260.13
.quad 0x40705e147ae147ae ; double 261.88
.quad 0x40704c0000000000 ; double 260.75
.quad 0x40702c0000000000 ; double 258.75
.quad 0x4070480000000000 ; double 260.5
.quad 0x4070a40000000000 ; double 266.25
.quad 0x4070a6147ae147ae ; double 266.38
.quad 0x4070ae147ae147ae ; double 266.88
.quad 0x4070ae147ae147ae ; double 266.88
.quad 0x40709c0000000000 ; double 265.75
.quad 0x4070aa147ae147ae ; double 266.63
.quad 0x4070b6147ae147ae ; double 267.38
.quad 0x4070b00000000000 ; double 267
.quad 0x4070a40000000000 ; double 266.25
.quad 0x4070b40000000000 ; double 267.25
.quad 0x4070d40000000000 ; double 269.25
.quad 0x4070dc0000000000 ; double 269.75
.quad 0x4070d40000000000 ; double 269.25
.quad 0x4070c00000000000 ; double 268
.quad 0x4070bc0000000000 ; double 267.75
.quad 0x4070c40000000000 ; double 268.25
.quad 0x4070c2147ae147ae ; double 268.13
.quad 0x4070dc0000000000 ; double 269.75
.quad 0x4070ce147ae147ae ; double 268.88
.quad 0x4070b00000000000 ; double 267
.quad 0x4070fc0000000000 ; double 271.75
.quad 0x4070ec0000000000 ; double 270.75
.quad 0x4071100000000000 ; double 273
.quad 0x4071340000000000 ; double 275.25
.quad 0x4071180000000000 ; double 273.5
.quad 0x4070f40000000000 ; double 271.25
.quad 0x4070ea147ae147ae ; double 270.63
.quad 0x4070e40000000000 ; double 270.25
.quad 0x4070c00000000000 ; double 268
.quad 0x4070ac0000000000 ; double 266.75
.quad 0x407086147ae147ae ; double 264.38
.quad 0x40708c0000000000 ; double 264.75
.quad 0x4070a00000000000 ; double 266
.quad 0x4070ba147ae147ae ; double 267.63
.quad 0x4070a6147ae147ae ; double 266.38
.quad 0x4070880000000000 ; double 264.5
.quad 0x40709e147ae147ae ; double 265.88
.quad 0x40709e147ae147ae ; double 265.88
.quad 0x40706c0000000000 ; double 262.75
.quad 0x4070580000000000 ; double 261.5
.quad 0x407066147ae147ae ; double 262.38
.quad 0x40708c0000000000 ; double 264.75
.quad 0x4070880000000000 ; double 264.5
.quad 0x4070840000000000 ; double 264.25
.quad 0x4070900000000000 ; double 265
.quad 0x4070b2147ae147ae ; double 267.13
.quad 0x4070ce147ae147ae ; double 268.88
.quad 0x4070d40000000000 ; double 269.25
.quad 0x4070d40000000000 ; double 269.25
.quad 0x4070a40000000000 ; double 266.25
.quad 0x4070500000000000 ; double 261
.quad 0x40704e147ae147ae ; double 260.88
.quad 0x4070340000000000 ; double 259.25
.quad 0x4070580000000000 ; double 261.5
.quad 0x4070700000000000 ; double 263
.quad 0x4070400000000000 ; double 260
.quad 0x40704c0000000000 ; double 260.75
.quad 0x4070380000000000 ; double 259.5
.quad 0x406f980000000000 ; double 252.75
.quad 0x406f8c28f5c28f5c ; double 252.38
.quad 0x406fa80000000000 ; double 253.25
.quad 0x406fa00000000000 ; double 253
.quad 0x406f580000000000 ; double 250.75
.quad 0x406fac28f5c28f5c ; double 253.38
.quad 0x40701a147ae147ae ; double 257.63
.quad 0x4070340000000000 ; double 259.25
.quad 0x407042147ae147ae ; double 260.13
.quad 0x4070540000000000 ; double 261.25
.quad 0x4070500000000000 ; double 261
.quad 0x4070180000000000 ; double 257.5
.quad 0x40703a147ae147ae ; double 259.63
.quad 0x4070380000000000 ; double 259.5
.quad 0x40702a147ae147ae ; double 258.63
.quad 0x4070180000000000 ; double 257.5
.quad 0x4070340000000000 ; double 259.25
.quad 0x40702c0000000000 ; double 258.75
.quad 0x40702c0000000000 ; double 258.75
.quad 0x4070280000000000 ; double 258.5
.quad 0x4070000000000000 ; double 256
.quad 0x40701a147ae147ae ; double 257.63
.quad 0x4070200000000000 ; double 258
.quad 0x407026147ae147ae ; double 258.38
.quad 0x40702c0000000000 ; double 258.75
.quad 0x4070340000000000 ; double 259.25
.quad 0x4070600000000000 ; double 262
.quad 0x4070680000000000 ; double 262.5
.quad 0x4070540000000000 ; double 261.25
.quad 0x40704a147ae147ae ; double 260.63
.quad 0x4070480000000000 ; double 260.5
.quad 0x40703c0000000000 ; double 259.75
.quad 0x4070400000000000 ; double 260
.quad 0x4070280000000000 ; double 258.5
.quad 0x40702a147ae147ae ; double 258.63
.quad 0x4070300000000000 ; double 259
.quad 0x4070300000000000 ; double 259
.quad 0x4070200000000000 ; double 258
.quad 0x4070680000000000 ; double 262.5
.quad 0x40707e147ae147ae ; double 263.88
.quad 0x4070840000000000 ; double 264.25
.quad 0x407076147ae147ae ; double 263.38
.quad 0x407072147ae147ae ; double 263.13
.quad 0x4070940000000000 ; double 265.25
.quad 0x4070bc0000000000 ; double 267.75
.quad 0x4070c80000000000 ; double 268.5
.quad 0x4070ca147ae147ae ; double 268.63
.quad 0x4070c80000000000 ; double 268.5
.quad 0x4070d00000000000 ; double 269
.quad 0x4070cc0000000000 ; double 268.75
.quad 0x4070ee147ae147ae ; double 270.88
.quad 0x4070d80000000000 ; double 269.5
.quad 0x4070be147ae147ae ; double 267.88
.quad 0x4070cc0000000000 ; double 268.75
.quad 0x4070e6147ae147ae ; double 270.38
.quad 0x4071000000000000 ; double 272
.quad 0x4070f80000000000 ; double 271.5
.quad 0x4070d6147ae147ae ; double 269.38
.quad 0x4070f40000000000 ; double 271.25
.quad 0x4070e2147ae147ae ; double 270.13
.quad 0x4070d40000000000 ; double 269.25
.quad 0x4070d6147ae147ae ; double 269.38
.quad 0x4070b6147ae147ae ; double 267.38
.quad 0x4070cc0000000000 ; double 268.75
.quad 0x4070c40000000000 ; double 268.25
.quad 0x4070a40000000000 ; double 266.25
.quad 0x4070a80000000000 ; double 266.5
.quad 0x4070da147ae147ae ; double 269.63
.quad 0x4070d00000000000 ; double 269
.quad 0x4070e80000000000 ; double 270.5
.quad 0x4070da147ae147ae ; double 269.63
.quad 0x4070ec0000000000 ; double 270.75
.quad 0x4070c2147ae147ae ; double 268.13
.quad 0x4070c80000000000 ; double 268.5
.quad 0x4070f80000000000 ; double 271.5
.quad 0x4071140000000000 ; double 273.25
.quad 0x4071240000000000 ; double 274.25
.quad 0x407126147ae147ae ; double 274.38
.quad 0x4071140000000000 ; double 273.25
.quad 0x40712c0000000000 ; double 274.75
.quad 0x4071380000000000 ; double 275.5
.quad 0x4071240000000000 ; double 274.25
.quad 0x4070ec0000000000 ; double 270.75
.quad 0x40705a147ae147ae ; double 261.63
.quad 0x4070580000000000 ; double 261.5
.quad 0x40703e147ae147ae ; double 259.88
.quad 0x4070640000000000 ; double 262.25
.quad 0x4070580000000000 ; double 261.5
.quad 0x40705c0000000000 ; double 261.75
.quad 0x40707a147ae147ae ; double 263.63
.quad 0x40707e147ae147ae ; double 263.88
.quad 0x4070a6147ae147ae ; double 266.38
.quad 0x40709c0000000000 ; double 265.75
.quad 0x4070ce147ae147ae ; double 268.88
.quad 0x4070b40000000000 ; double 267.25
.quad 0x4070a80000000000 ; double 266.5
.quad 0x4070640000000000 ; double 262.25
.quad 0x407046147ae147ae ; double 260.38
.quad 0x4070540000000000 ; double 261.25
.quad 0x4070440000000000 ; double 260.25
.quad 0x40702c0000000000 ; double 258.75
.quad 0x407026147ae147ae ; double 258.38
.quad 0x40703e147ae147ae ; double 259.88
.quad 0x4070280000000000 ; double 258.5
.quad 0x406fb80000000000 ; double 253.75
.quad 0x406f880000000000 ; double 252.25
.quad 0x406fb00000000000 ; double 253.5
.quad 0x406fb80000000000 ; double 253.75
.quad 0x406fa00000000000 ; double 253
.quad 0x406f800000000000 ; double 252
.quad 0x406f580000000000 ; double 250.75
.quad 0x406f0c28f5c28f5c ; double 248.38
.quad 0x406f200000000000 ; double 249
.quad 0x406ed428f5c28f5c ; double 246.63
.quad 0x406f280000000000 ; double 249.25
.quad 0x406f2428f5c28f5c ; double 249.13
.quad 0x406f0c28f5c28f5c ; double 248.38
.quad 0x406f0428f5c28f5c ; double 248.13
.quad 0x406f3428f5c28f5c ; double 249.63
.quad 0x406f600000000000 ; double 251
.quad 0x406fd00000000000 ; double 254.5
.quad 0x4070080000000000 ; double 256.5
.quad 0x406fec28f5c28f5c ; double 255.38
.quad 0x406fe428f5c28f5c ; double 255.13
.quad 0x406fc80000000000 ; double 254.25
.quad 0x406f9c28f5c28f5c ; double 252.88
.quad 0x406ff00000000000 ; double 255.5
.quad 0x4070140000000000 ; double 257.25
.quad 0x4070100000000000 ; double 257
.quad 0x4070400000000000 ; double 260
.quad 0x4070600000000000 ; double 262
.quad 0x40707c0000000000 ; double 263.75
.quad 0x4070600000000000 ; double 262
.quad 0x4070400000000000 ; double 260
.quad 0x40703c0000000000 ; double 259.75
.quad 0x40705a147ae147ae ; double 261.63
.quad 0x4070600000000000 ; double 262
.quad 0x4070480000000000 ; double 260.5
.quad 0x40703a147ae147ae ; double 259.63
.quad 0x4070740000000000 ; double 263.25
.quad 0x4070e80000000000 ; double 270.5
.quad 0x4070f80000000000 ; double 271.5
.quad 0x40710e147ae147ae ; double 272.88
.quad 0x40710a147ae147ae ; double 272.63
.quad 0x4071080000000000 ; double 272.5
.quad 0x4071380000000000 ; double 275.5
.quad 0x4071600000000000 ; double 278
.quad 0x4071740000000000 ; double 279.25
.quad 0x4071540000000000 ; double 277.25
.quad 0x40714e147ae147ae ; double 276.88
.quad 0x4071380000000000 ; double 275.5
.quad 0x40714c0000000000 ; double 276.75
.quad 0x407162147ae147ae ; double 278.13
.quad 0x4071680000000000 ; double 278.5
.quad 0x4071680000000000 ; double 278.5
.quad 0x4071880000000000 ; double 280.5
.quad 0x4071940000000000 ; double 281.25
.quad 0x4071780000000000 ; double 279.5
.quad 0x40718c0000000000 ; double 280.75
.quad 0x4071aa147ae147ae ; double 282.63
.quad 0x4071da147ae147ae ; double 285.63
.quad 0x4071d80000000000 ; double 285.5
.quad 0x4071c00000000000 ; double 284
.quad 0x4071cc0000000000 ; double 284.75
.quad 0x4071c2147ae147ae ; double 284.13
.quad 0x4071da147ae147ae ; double 285.63
.quad 0x4071e2147ae147ae ; double 286.13
.quad 0x4071d00000000000 ; double 285
.quad 0x4071c6147ae147ae ; double 284.38
.quad 0x4071c80000000000 ; double 284.5
.quad 0x407182147ae147ae ; double 280.13
.quad 0x4071940000000000 ; double 281.25
.quad 0x4071880000000000 ; double 280.5
.quad 0x4071700000000000 ; double 279
.quad 0x4071680000000000 ; double 278.5
.quad 0x4071580000000000 ; double 277.5
.quad 0x40715c0000000000 ; double 277.75
.quad 0x4071400000000000 ; double 276
.quad 0x407142147ae147ae ; double 276.13
.quad 0x4071400000000000 ; double 276
.quad 0x40714a147ae147ae ; double 276.63
.quad 0x4071500000000000 ; double 277
.quad 0x4070ee147ae147ae ; double 270.88
.quad 0x4070fe147ae147ae ; double 271.88
.quad 0x4071200000000000 ; double 274
.quad 0x4071280000000000 ; double 274.5
.quad 0x4071240000000000 ; double 274.25
.quad 0x4070da147ae147ae ; double 269.63
.quad 0x4070e00000000000 ; double 270
.quad 0x4070e40000000000 ; double 270.25
.quad 0x407102147ae147ae ; double 272.13
.quad 0x4070e80000000000 ; double 270.5
.quad 0x4071040000000000 ; double 272.25
.quad 0x4070fc0000000000 ; double 271.75
.quad 0x40714c0000000000 ; double 276.75
.quad 0x40715e147ae147ae ; double 277.88
.quad 0x4071340000000000 ; double 275.25
.quad 0x4071240000000000 ; double 274.25
.quad 0x4070d80000000000 ; double 269.5
.quad 0x4070d80000000000 ; double 269.5
.quad 0x4071100000000000 ; double 273
.quad 0x4071400000000000 ; double 276
.quad 0x4071380000000000 ; double 275.5
.quad 0x4071700000000000 ; double 279
.quad 0x40717a147ae147ae ; double 279.63
.quad 0x40713c0000000000 ; double 275.75
.quad 0x4071100000000000 ; double 273
.quad 0x4070f00000000000 ; double 271
.quad 0x4070f40000000000 ; double 271.25
.quad 0x4070d00000000000 ; double 269
.quad 0x4071000000000000 ; double 272
.quad 0x4071100000000000 ; double 273
.quad 0x407122147ae147ae ; double 274.13
.quad 0x4071480000000000 ; double 276.5
.quad 0x40712c0000000000 ; double 274.75
.quad 0x4071540000000000 ; double 277.25
.quad 0x4071880000000000 ; double 280.5
.quad 0x4071a00000000000 ; double 282
.quad 0x4071880000000000 ; double 280.5
.quad 0x4071700000000000 ; double 279
.quad 0x407182147ae147ae ; double 280.13
.quad 0x4071600000000000 ; double 278
.quad 0x4070fc0000000000 ; double 271.75
.quad 0x407106147ae147ae ; double 272.38
.quad 0x4070e6147ae147ae ; double 270.38
.quad 0x4070a40000000000 ; double 266.25
.quad 0x4070d40000000000 ; double 269.25
.quad 0x4070ce147ae147ae ; double 268.88
.quad 0x4070e80000000000 ; double 270.5
.quad 0x4070e80000000000 ; double 270.5
.quad 0x4070f40000000000 ; double 271.25
.quad 0x40710a147ae147ae ; double 272.63
.quad 0x407122147ae147ae ; double 274.13
.quad 0x4071100000000000 ; double 273
.quad 0x40711c0000000000 ; double 273.75
.quad 0x4071280000000000 ; double 274.5
.quad 0x4071000000000000 ; double 272
.quad 0x40711e147ae147ae ; double 273.88
.quad 0x40711e147ae147ae ; double 273.88
.quad 0x4070fa147ae147ae ; double 271.63
.quad 0x4071200000000000 ; double 274
.quad 0x4071380000000000 ; double 275.5
.quad 0x4071080000000000 ; double 272.5
.quad 0x4070fc0000000000 ; double 271.75
.quad 0x4071100000000000 ; double 273
.quad 0x4070ec0000000000 ; double 270.75
.quad 0x4070f6147ae147ae ; double 271.38
.quad 0x4070b80000000000 ; double 267.5
.quad 0x4070a40000000000 ; double 266.25
.quad 0x4070900000000000 ; double 265
.quad 0x4070880000000000 ; double 264.5
.quad 0x40708e147ae147ae ; double 264.88
.quad 0x40705e147ae147ae ; double 261.88
.quad 0x40704a147ae147ae ; double 260.63
.quad 0x40705c0000000000 ; double 261.75
.quad 0x4070bc0000000000 ; double 267.75
.quad 0x4070d6147ae147ae ; double 269.38
.quad 0x4070ec0000000000 ; double 270.75
.quad 0x4071080000000000 ; double 272.5
.quad 0x4071000000000000 ; double 272
.quad 0x4070c6147ae147ae ; double 268.38
.quad 0x4070a00000000000 ; double 266
.quad 0x4070780000000000 ; double 263.5
.quad 0x40703c0000000000 ; double 259.75
.quad 0x4070140000000000 ; double 257.25
.quad 0x4070800000000000 ; double 264
.quad 0x40708c0000000000 ; double 264.75
.quad 0x4070980000000000 ; double 265.5
.quad 0x4070940000000000 ; double 265.25
.quad 0x407096147ae147ae ; double 265.38
.quad 0x4070f80000000000 ; double 271.5
.quad 0x4071140000000000 ; double 273.25
.quad 0x407172147ae147ae ; double 279.13
.quad 0x40716c0000000000 ; double 278.75
.quad 0x40719a147ae147ae ; double 281.63
.quad 0x40719e147ae147ae ; double 281.88
.quad 0x407186147ae147ae ; double 280.38
.quad 0x4071900000000000 ; double 281
.quad 0x4071900000000000 ; double 281
.quad 0x4071b40000000000 ; double 283.25
.quad 0x4071b80000000000 ; double 283.5
.quad 0x4071d00000000000 ; double 285
.quad 0x4071f80000000000 ; double 287.5
.quad 0x4071f80000000000 ; double 287.5
.quad 0x4071d80000000000 ; double 285.5
.quad 0x4071e2147ae147ae ; double 286.13
.quad 0x4072080000000000 ; double 288.5
.quad 0x4071fc0000000000 ; double 287.75
.quad 0x4071ec0000000000 ; double 286.75
.quad 0x4071da147ae147ae ; double 285.63
.quad 0x4071c00000000000 ; double 284
.quad 0x4071780000000000 ; double 279.5
.quad 0x4071680000000000 ; double 278.5
.quad 0x407182147ae147ae ; double 280.13
.quad 0x40717a147ae147ae ; double 279.63
.quad 0x4071700000000000 ; double 279
.quad 0x407182147ae147ae ; double 280.13
.quad 0x407182147ae147ae ; double 280.13
.quad 0x4071680000000000 ; double 278.5
.quad 0x4071600000000000 ; double 278
.quad 0x4071540000000000 ; double 277.25
.quad 0x40712c0000000000 ; double 274.75
.quad 0x40710c0000000000 ; double 272.75
.quad 0x4070e6147ae147ae ; double 270.38
.quad 0x4071000000000000 ; double 272
.quad 0x40710c0000000000 ; double 272.75
.quad 0x40711c0000000000 ; double 273.75
.quad 0x4071180000000000 ; double 273.5
.quad 0x407132147ae147ae ; double 275.13
.quad 0x40717a147ae147ae ; double 279.63
.quad 0x4071880000000000 ; double 280.5
.quad 0x40718c0000000000 ; double 280.75
.quad 0x4071800000000000 ; double 280
.quad 0x40716e147ae147ae ; double 278.88
.quad 0x4071600000000000 ; double 278
.quad 0x40717c0000000000 ; double 279.75
.quad 0x4071680000000000 ; double 278.5
.quad 0x4071340000000000 ; double 275.25
.quad 0x4071340000000000 ; double 275.25
.quad 0x40713a147ae147ae ; double 275.63
.quad 0x4071580000000000 ; double 277.5
.quad 0x40713e147ae147ae ; double 275.88
.quad 0x40710e147ae147ae ; double 272.88
.quad 0x4071180000000000 ; double 273.5
.quad 0x40711c0000000000 ; double 273.75
.quad 0x407112147ae147ae ; double 273.13
.quad 0x4071380000000000 ; double 275.5
.quad 0x4071380000000000 ; double 275.5
.quad 0x40711e147ae147ae ; double 273.88
.quad 0x4071080000000000 ; double 272.5
.quad 0x4071080000000000 ; double 272.5
.quad 0x40710c0000000000 ; double 272.75
.quad 0x4071380000000000 ; double 275.5
.quad 0x407146147ae147ae ; double 276.38
.quad 0x4071780000000000 ; double 279.5
.quad 0x407186147ae147ae ; double 280.38
.quad 0x407186147ae147ae ; double 280.38
.quad 0x4071800000000000 ; double 280
.quad 0x4071700000000000 ; double 279
.quad 0x407176147ae147ae ; double 279.38
.quad 0x4071600000000000 ; double 278
.quad 0x4071540000000000 ; double 277.25
.quad 0x4071400000000000 ; double 276
.quad 0x4071600000000000 ; double 278
.quad 0x40716e147ae147ae ; double 278.88
.quad 0x4071400000000000 ; double 276
.quad 0x4071400000000000 ; double 276
.quad 0x40714c0000000000 ; double 276.75
.quad 0x4071340000000000 ; double 275.25
.quad 0x4071000000000000 ; double 272
.quad 0x40710e147ae147ae ; double 272.88
.quad 0x4070e80000000000 ; double 270.5
.quad 0x4070e00000000000 ; double 270
.quad 0x4070d00000000000 ; double 269
.quad 0x4070700000000000 ; double 263
.quad 0x4070480000000000 ; double 260.5
.quad 0x40704c0000000000 ; double 260.75
.quad 0x40701e147ae147ae ; double 257.88
.quad 0x406ff80000000000 ; double 255.75
.quad 0x406fe428f5c28f5c ; double 255.13
.quad 0x406fd428f5c28f5c ; double 254.63
.quad 0x406fb428f5c28f5c ; double 253.63
.quad 0x4070040000000000 ; double 256.25
.quad 0x4070280000000000 ; double 258.5
.quad 0x4070240000000000 ; double 258.25
.quad 0x4070080000000000 ; double 256.5
.quad 0x4070140000000000 ; double 257.25
.quad 0x406fc00000000000 ; double 254
.quad 0x406f980000000000 ; double 252.75
.quad 0x406f700000000000 ; double 251.5
.quad 0x406fa00000000000 ; double 253
.quad 0x4070180000000000 ; double 257.5
.quad 0x40701a147ae147ae ; double 257.63
.quad 0x406fe80000000000 ; double 255.25
.quad 0x406fc80000000000 ; double 254.25
.quad 0x406fb80000000000 ; double 253.75
.quad 0x406fe00000000000 ; double 255
.quad 0x4070100000000000 ; double 257
.quad 0x4070140000000000 ; double 257.25
.quad 0x4070380000000000 ; double 259.5
.quad 0x4070240000000000 ; double 258.25
.quad 0x406f680000000000 ; double 251.25
.quad 0x406f400000000000 ; double 250
.quad 0x406f700000000000 ; double 251.5
.quad 0x406f9c28f5c28f5c ; double 252.88
.quad 0x406f980000000000 ; double 252.75
.quad 0x406fcc28f5c28f5c ; double 254.38
.quad 0x40700c0000000000 ; double 256.75
.quad 0x4070000000000000 ; double 256
.quad 0x40703c0000000000 ; double 259.75
.quad 0x4070340000000000 ; double 259.25
.quad 0x4070400000000000 ; double 260
.quad 0x4070640000000000 ; double 262.25
.quad 0x4070600000000000 ; double 262
.quad 0x40705c0000000000 ; double 261.75
.quad 0x4070340000000000 ; double 259.25
.quad 0x4070180000000000 ; double 257.5
.quad 0x40706e147ae147ae ; double 262.88
.quad 0x40707c0000000000 ; double 263.75
.quad 0x4070ec0000000000 ; double 270.75
.quad 0x4070d80000000000 ; double 269.5
.quad 0x4070d40000000000 ; double 269.25
.quad 0x4070fc0000000000 ; double 271.75
.quad 0x4071140000000000 ; double 273.25
.quad 0x4071000000000000 ; double 272
.quad 0x4070b40000000000 ; double 267.25
.quad 0x40707c0000000000 ; double 263.75
.quad 0x40706c0000000000 ; double 262.75
.quad 0x40704c0000000000 ; double 260.75
.quad 0x40705c0000000000 ; double 261.75
.quad 0x4070480000000000 ; double 260.5
.quad 0x4070680000000000 ; double 262.5
.quad 0x407092147ae147ae ; double 265.13
.quad 0x4070600000000000 ; double 262
.quad 0x4070440000000000 ; double 260.25
.quad 0x4070340000000000 ; double 259.25
.quad 0x40703a147ae147ae ; double 259.63
.quad 0x4070680000000000 ; double 262.5
.quad 0x40706c0000000000 ; double 262.75
.quad 0x4070540000000000 ; double 261.25
.quad 0x40709e147ae147ae ; double 265.88
.quad 0x40709e147ae147ae ; double 265.88
.quad 0x4070640000000000 ; double 262.25
.quad 0x4070680000000000 ; double 262.5
.quad 0x407042147ae147ae ; double 260.13
.quad 0x4070180000000000 ; double 257.5
.quad 0x4070340000000000 ; double 259.25
.quad 0x407046147ae147ae ; double 260.38
.quad 0x40704c0000000000 ; double 260.75
.quad 0x4070340000000000 ; double 259.25
.quad 0x407006147ae147ae ; double 256.38
.quad 0x4070580000000000 ; double 261.5
.quad 0x4070680000000000 ; double 262.5
.quad 0x407072147ae147ae ; double 263.13
.quad 0x4070600000000000 ; double 262
.quad 0x4070880000000000 ; double 264.5
.quad 0x4070640000000000 ; double 262.25
.quad 0x4070140000000000 ; double 257.25
.quad 0x406fe80000000000 ; double 255.25
.quad 0x406fe00000000000 ; double 255
.quad 0x4070200000000000 ; double 258
.quad 0x4070480000000000 ; double 260.5
.quad 0x4070200000000000 ; double 258
.quad 0x40701c0000000000 ; double 257.75
.quad 0x4070140000000000 ; double 257.25
.quad 0x4070740000000000 ; double 263.25
.quad 0x4070880000000000 ; double 264.5
.quad 0x4070540000000000 ; double 261.25
.quad 0x4070340000000000 ; double 259.25
.quad 0x40703c0000000000 ; double 259.75
.quad 0x4070100000000000 ; double 257
.quad 0x406f880000000000 ; double 252.25
.quad 0x406fe00000000000 ; double 255
.quad 0x4070000000000000 ; double 256
.quad 0x406ff00000000000 ; double 255.5
.quad 0x406f600000000000 ; double 251
.quad 0x406f900000000000 ; double 252.5
.quad 0x406fa00000000000 ; double 253
.quad 0x406f400000000000 ; double 250
.quad 0x406e580000000000 ; double 242.75
.quad 0x406e5c28f5c28f5c ; double 242.88
.quad 0x406e280000000000 ; double 241.25
.quad 0x406dcc28f5c28f5c ; double 238.38
.quad 0x406d880000000000 ; double 236.25
.quad 0x406cf80000000000 ; double 231.75
.quad 0x406cd80000000000 ; double 230.75
.quad 0x406ce00000000000 ; double 231
.quad 0x406cdc28f5c28f5c ; double 230.88
.quad 0x406cc00000000000 ; double 230
.quad 0x406c500000000000 ; double 226.5
.quad 0x406c280000000000 ; double 225.25
.quad 0x406bf00000000000 ; double 223.5
.quad 0x406c1428f5c28f5c ; double 224.63
.quad 0x406bf00000000000 ; double 223.5
.quad 0x406bf00000000000 ; double 223.5
.quad 0x406b980000000000 ; double 220.75
.quad 0x406b400000000000 ; double 218
.quad 0x406b7c28f5c28f5c ; double 219.88
.quad 0x406bb00000000000 ; double 221.5
.quad 0x406b980000000000 ; double 220.75
.quad 0x406b7c28f5c28f5c ; double 219.88
.quad 0x406b1c28f5c28f5c ; double 216.88
.quad 0x406af80000000000 ; double 215.75
.quad 0x406b380000000000 ; double 217.75
.quad 0x406b400000000000 ; double 218
.quad 0x406afc28f5c28f5c ; double 215.88
.quad 0x406b200000000000 ; double 217
.quad 0x406b9428f5c28f5c ; double 220.63
.quad 0x406b8428f5c28f5c ; double 220.13
.quad 0x406b7428f5c28f5c ; double 219.63
.quad 0x406c080000000000 ; double 224.25
.quad 0x406c680000000000 ; double 227.25
.quad 0x406c500000000000 ; double 226.5
.quad 0x406c400000000000 ; double 226
.quad 0x406c280000000000 ; double 225.25
.quad 0x406bdc28f5c28f5c ; double 222.88
.quad 0x406bd80000000000 ; double 222.75
.quad 0x406bc00000000000 ; double 222
.quad 0x406b980000000000 ; double 220.75
.quad 0x406c380000000000 ; double 225.75
.quad 0x406c280000000000 ; double 225.25
.quad 0x406be80000000000 ; double 223.25
.quad 0x406c200000000000 ; double 225
.quad 0x406c180000000000 ; double 224.75
.quad 0x406b7c28f5c28f5c ; double 219.88
.quad 0x406b1c28f5c28f5c ; double 216.88
.quad 0x406b3428f5c28f5c ; double 217.63
.quad 0x406b400000000000 ; double 218
.quad 0x406b200000000000 ; double 217
.quad 0x406ac80000000000 ; double 214.25
.quad 0x406a980000000000 ; double 212.75
.quad 0x406a980000000000 ; double 212.75
.quad 0x406ab00000000000 ; double 213.5
.quad 0x406ac80000000000 ; double 214.25
.quad 0x406af00000000000 ; double 215.5
.quad 0x406a780000000000 ; double 211.75
.quad 0x406ad80000000000 ; double 214.75
.quad 0x406af80000000000 ; double 215.75
.quad 0x406ab80000000000 ; double 213.75
.quad 0x406ad80000000000 ; double 214.75
.quad 0x406a7c28f5c28f5c ; double 211.88
.quad 0x406a500000000000 ; double 210.5
.quad 0x406a900000000000 ; double 212.5
.quad 0x406a400000000000 ; double 210
.quad 0x406aa00000000000 ; double 213
.quad 0x406a500000000000 ; double 210.5
.quad 0x4069e00000000000 ; double 207
.quad 0x406a280000000000 ; double 209.25
.quad 0x4069d80000000000 ; double 206.75
.quad 0x40694c28f5c28f5c ; double 202.38
.quad 0x4069700000000000 ; double 203.5
.quad 0x4068700000000000 ; double 195.5
.quad 0x4067980000000000 ; double 188.75
.quad 0x4067b80000000000 ; double 189.75
.quad 0x4067e00000000000 ; double 191
.quad 0x4068600000000000 ; double 195
.quad 0x4068880000000000 ; double 196.25
.quad 0x4068200000000000 ; double 193
.quad 0x4068280000000000 ; double 193.25
.quad 0x4067980000000000 ; double 188.75
.quad 0x4067c80000000000 ; double 190.25
.quad 0x4067c00000000000 ; double 190
.quad 0x4067500000000000 ; double 186.5
.quad 0x40669428f5c28f5c ; double 180.63
.quad 0x4066880000000000 ; double 180.25
.quad 0x4066800000000000 ; double 180
.quad 0x4066e00000000000 ; double 183
.quad 0x4066d80000000000 ; double 182.75
.quad 0x4066980000000000 ; double 180.75
.quad 0x40673c28f5c28f5c ; double 185.88
.quad 0x4066f00000000000 ; double 183.5
.quad 0x4066c00000000000 ; double 182
.quad 0x4066f80000000000 ; double 183.75
.quad 0x4066d80000000000 ; double 182.75
.quad 0x4067500000000000 ; double 186.5
.quad 0x4067700000000000 ; double 187.5
.quad 0x4067500000000000 ; double 186.5
.quad 0x4066a00000000000 ; double 181
.quad 0x4066b80000000000 ; double 181.75
.quad 0x4066c00000000000 ; double 182
.quad 0x4066880000000000 ; double 180.25
.quad 0x4066580000000000 ; double 178.75
.quad 0x40664428f5c28f5c ; double 178.13
.quad 0x4066c80000000000 ; double 182.25
.quad 0x4066f80000000000 ; double 183.75
.quad 0x4066c80000000000 ; double 182.25
.quad 0x4066a00000000000 ; double 181
.quad 0x4066c80000000000 ; double 182.25
.quad 0x40672c28f5c28f5c ; double 185.38
.quad 0x4066f00000000000 ; double 183.5
.quad 0x4066f80000000000 ; double 183.75
.quad 0x4067380000000000 ; double 185.75
.quad 0x4067480000000000 ; double 186.25
.quad 0x4067980000000000 ; double 188.75
.quad 0x4067c80000000000 ; double 190.25
.quad 0x4067d80000000000 ; double 190.75
.quad 0x4068680000000000 ; double 195.25
.quad 0x40685c28f5c28f5c ; double 194.88
.quad 0x4068400000000000 ; double 194
.quad 0x4068080000000000 ; double 192.25
.quad 0x4068500000000000 ; double 194.5
.quad 0x4068500000000000 ; double 194.5
.quad 0x4068d80000000000 ; double 198.75
.quad 0x4068e80000000000 ; double 199.25
.quad 0x4069700000000000 ; double 203.5
.quad 0x40693c28f5c28f5c ; double 201.88
.quad 0x4069a80000000000 ; double 205.25
.quad 0x4069f80000000000 ; double 207.75
.quad 0x406a300000000000 ; double 209.5
.quad 0x406a000000000000 ; double 208
.quad 0x406a000000000000 ; double 208
.quad 0x406a280000000000 ; double 209.25
.quad 0x406a400000000000 ; double 210
.quad 0x4069d00000000000 ; double 206.5
.quad 0x4069e00000000000 ; double 207
.quad 0x406a000000000000 ; double 208
.quad 0x4069f00000000000 ; double 207.5
.quad 0x406a580000000000 ; double 210.75
.quad 0x406a600000000000 ; double 211
.quad 0x406a700000000000 ; double 211.5
.quad 0x406a500000000000 ; double 210.5
.quad 0x406aa80000000000 ; double 213.25
.quad 0x406aec28f5c28f5c ; double 215.38
.quad 0x406aa00000000000 ; double 213
.quad 0x406a480000000000 ; double 210.25
.quad 0x406a0c28f5c28f5c ; double 208.38
.quad 0x4069b00000000000 ; double 205.5
.quad 0x406a200000000000 ; double 209
.quad 0x406a100000000000 ; double 208.5
.quad 0x406a180000000000 ; double 208.75
.quad 0x406a780000000000 ; double 211.75
.quad 0x406aa80000000000 ; double 213.25
.quad 0x406a9c28f5c28f5c ; double 212.88
.quad 0x406ae00000000000 ; double 215
.quad 0x406b3428f5c28f5c ; double 217.63
.quad 0x406b400000000000 ; double 218
.quad 0x406b580000000000 ; double 218.75
.quad 0x406b780000000000 ; double 219.75
.quad 0x406b680000000000 ; double 219.25
.quad 0x406af80000000000 ; double 215.75
.quad 0x406ae00000000000 ; double 215
.quad 0x406b100000000000 ; double 216.5
.quad 0x406b600000000000 ; double 219
.quad 0x406b6c28f5c28f5c ; double 219.38
.quad 0x406b000000000000 ; double 216
.quad 0x406b100000000000 ; double 216.5
.quad 0x406b800000000000 ; double 220
.quad 0x406b680000000000 ; double 219.25
.quad 0x406b680000000000 ; double 219.25
.quad 0x406c4c28f5c28f5c ; double 226.38
.quad 0x406c6c28f5c28f5c ; double 227.38
.quad 0x406bf00000000000 ; double 223.5
.quad 0x406b580000000000 ; double 218.75
.quad 0x406ad00000000000 ; double 214.5
.quad 0x406a700000000000 ; double 211.5
.quad 0x406a900000000000 ; double 212.5
.quad 0x406adc28f5c28f5c ; double 214.88
.quad 0x406ae00000000000 ; double 215
.quad 0x406a580000000000 ; double 210.75
.quad 0x406a900000000000 ; double 212.5
.quad 0x406a400000000000 ; double 210
.quad 0x4069800000000000 ; double 204
.quad 0x4069e00000000000 ; double 207
.quad 0x406a400000000000 ; double 210
.quad 0x406a200000000000 ; double 209
.quad 0x406a780000000000 ; double 211.75
.quad 0x406af80000000000 ; double 215.75
.quad 0x406ae80000000000 ; double 215.25
.quad 0x406a9c28f5c28f5c ; double 212.88
.quad 0x406b2c28f5c28f5c ; double 217.38
.quad 0x406a980000000000 ; double 212.75
.quad 0x406a800000000000 ; double 212
.quad 0x406a300000000000 ; double 209.5
.quad 0x4069c80000000000 ; double 206.25
.quad 0x406a900000000000 ; double 212.5
.quad 0x406a500000000000 ; double 210.5
.quad 0x4069300000000000 ; double 201.5
.quad 0x4068cc28f5c28f5c ; double 198.38
.quad 0x4069100000000000 ; double 200.5
.quad 0x4069880000000000 ; double 204.25
.quad 0x4069f80000000000 ; double 207.75
.quad 0x4069f00000000000 ; double 207.5
.quad 0x406a100000000000 ; double 208.5
.quad 0x406a500000000000 ; double 210.5
.quad 0x406a800000000000 ; double 212
.quad 0x406a500000000000 ; double 210.5
.quad 0x4069c80000000000 ; double 206.25
.quad 0x406a400000000000 ; double 210
.quad 0x406b0c28f5c28f5c ; double 216.38
.quad 0x406ad80000000000 ; double 214.75
.quad 0x406b6c28f5c28f5c ; double 219.38
.quad 0x406b580000000000 ; double 218.75
.quad 0x406b480000000000 ; double 218.25
.quad 0x406b200000000000 ; double 217
.quad 0x406b000000000000 ; double 216
.quad 0x406ba00000000000 ; double 221
.quad 0x406b900000000000 ; double 220.5
.quad 0x406b8428f5c28f5c ; double 220.13
.quad 0x406b680000000000 ; double 219.25
.quad 0x406b880000000000 ; double 220.25
.quad 0x406c180000000000 ; double 224.75
.quad 0x406b800000000000 ; double 220
.quad 0x406af80000000000 ; double 215.75
.quad 0x406ac00000000000 ; double 214
.quad 0x406a900000000000 ; double 212.5
.quad 0x406a380000000000 ; double 209.75
.quad 0x406b380000000000 ; double 217.75
.quad 0x406b980000000000 ; double 220.75
.quad 0x406b5c28f5c28f5c ; double 218.88
.quad 0x406b380000000000 ; double 217.75
.quad 0x406b980000000000 ; double 220.75
.quad 0x406ba00000000000 ; double 221
.quad 0x406b480000000000 ; double 218.25
.quad 0x406a500000000000 ; double 210.5
.quad 0x40693c28f5c28f5c ; double 201.88
.quad 0x40693c28f5c28f5c ; double 201.88
.quad 0x4068b80000000000 ; double 197.75
.quad 0x4069400000000000 ; double 202
.quad 0x4068fc28f5c28f5c ; double 199.88
.quad 0x4068580000000000 ; double 194.75
.quad 0x4068200000000000 ; double 193
.quad 0x4067a00000000000 ; double 189
.quad 0x4067f00000000000 ; double 191.5
.quad 0x4067800000000000 ; double 188
.quad 0x4067800000000000 ; double 188
.quad 0x40645c28f5c28f5c ; double 162.88
.quad 0x40649428f5c28f5c ; double 164.63
.quad 0x4064a80000000000 ; double 165.25
.quad 0x4064700000000000 ; double 163.5
.quad 0x4064480000000000 ; double 162.25
.quad 0x4064000000000000 ; double 160
.quad 0x4064180000000000 ; double 160.75
.quad 0x4064900000000000 ; double 164.5
.quad 0x4065400000000000 ; double 170
.quad 0x4065500000000000 ; double 170.5
.quad 0x4065900000000000 ; double 172.5
.quad 0x4065a00000000000 ; double 173
.quad 0x4065100000000000 ; double 168.5
.quad 0x4064e428f5c28f5c ; double 167.13
.quad 0x4064d00000000000 ; double 166.5
.quad 0x4065480000000000 ; double 170.25
.quad 0x4065680000000000 ; double 171.25
.quad 0x40656c28f5c28f5c ; double 171.38
.quad 0x40651c28f5c28f5c ; double 168.88
.quad 0x4064880000000000 ; double 164.25
.quad 0x4064c00000000000 ; double 166
.quad 0x4065180000000000 ; double 168.75
.quad 0x4064f00000000000 ; double 167.5
.quad 0x4065400000000000 ; double 170
.quad 0x4065400000000000 ; double 170
.quad 0x4065600000000000 ; double 171
.quad 0x4065a80000000000 ; double 173.25
.quad 0x40657428f5c28f5c ; double 171.63
.quad 0x4065080000000000 ; double 168.25
.quad 0x4065500000000000 ; double 170.5
.quad 0x4065a00000000000 ; double 173
.quad 0x4065f428f5c28f5c ; double 175.63
.quad 0x4065c00000000000 ; double 174
.quad 0x4065100000000000 ; double 168.5
.quad 0x4064b80000000000 ; double 165.75
.quad 0x4065700000000000 ; double 171.5
.quad 0x4065980000000000 ; double 172.75
.quad 0x4065500000000000 ; double 170.5
.quad 0x4065f00000000000 ; double 175.5
.quad 0x4066580000000000 ; double 178.75
.quad 0x4066b00000000000 ; double 181.5
.quad 0x4066480000000000 ; double 178.25
.quad 0x4065d00000000000 ; double 174.5
.quad 0x4065e80000000000 ; double 175.25
.quad 0x4065a00000000000 ; double 173
.quad 0x40657c28f5c28f5c ; double 171.88
.quad 0x4065d80000000000 ; double 174.75
.quad 0x4066300000000000 ; double 177.5
.quad 0x4067000000000000 ; double 184
.quad 0x4067780000000000 ; double 187.75
.quad 0x4067480000000000 ; double 186.25
.quad 0x4067bc28f5c28f5c ; double 189.88
.quad 0x4067f80000000000 ; double 191.75
.quad 0x4067e00000000000 ; double 191
.quad 0x4067e80000000000 ; double 191.25
.quad 0x4069000000000000 ; double 200
.quad 0x4068800000000000 ; double 196
.quad 0x4067e80000000000 ; double 191.25
.quad 0x4068300000000000 ; double 193.5
.quad 0x4068480000000000 ; double 194.25
.quad 0x4068a00000000000 ; double 197
.quad 0x4067f80000000000 ; double 191.75
.quad 0x4066a80000000000 ; double 181.25
.quad 0x4066f80000000000 ; double 183.75
.quad 0x4066a80000000000 ; double 181.25
.quad 0x4067300000000000 ; double 185.5
.quad 0x4068700000000000 ; double 195.5
.quad 0x4068100000000000 ; double 192.5
.quad 0x4067580000000000 ; double 186.75
.quad 0x4066b00000000000 ; double 181.5
.quad 0x4066a00000000000 ; double 181
.quad 0x4066c00000000000 ; double 182
.quad 0x4067000000000000 ; double 184
.quad 0x40666c28f5c28f5c ; double 179.38
.quad 0x4066700000000000 ; double 179.5
.quad 0x4065800000000000 ; double 172
.quad 0x4064900000000000 ; double 164.5
.quad 0x4064980000000000 ; double 164.75
.quad 0x4063f00000000000 ; double 159.5
.quad 0x4064100000000000 ; double 160.5
.quad 0x4064700000000000 ; double 163.5
.quad 0x4064380000000000 ; double 161.75
.quad 0x4063f00000000000 ; double 159.5
.quad 0x4064d80000000000 ; double 166.75
.quad 0x4064800000000000 ; double 164
.quad 0x4065600000000000 ; double 171
.quad 0x4065000000000000 ; double 168
.quad 0x4065e00000000000 ; double 175
.quad 0x4065b00000000000 ; double 173.5
.quad 0x4065b80000000000 ; double 173.75
.quad 0x4064d00000000000 ; double 166.5
.quad 0x4065100000000000 ; double 168.5
.quad 0x4063f80000000000 ; double 159.75
.quad 0x4063a80000000000 ; double 157.25
.quad 0x4064a00000000000 ; double 165
.quad 0x4065880000000000 ; double 172.25
.quad 0x4065e80000000000 ; double 175.25
.quad 0x4066500000000000 ; double 178.5
.quad 0x4067300000000000 ; double 185.5
.quad 0x4066f00000000000 ; double 183.5
.quad 0x4066800000000000 ; double 180
.quad 0x4068100000000000 ; double 192.5
.quad 0x4068300000000000 ; double 193.5
.quad 0x4068000000000000 ; double 192
.quad 0x4068600000000000 ; double 195
.quad 0x4068480000000000 ; double 194.25
.quad 0x4068900000000000 ; double 196.5
.quad 0x4068680000000000 ; double 195.25
.quad 0x4068900000000000 ; double 196.5
.quad 0x4069000000000000 ; double 200
.quad 0x4069100000000000 ; double 200.5
.quad 0x4068980000000000 ; double 196.75
.quad 0x4069200000000000 ; double 201
.quad 0x4069500000000000 ; double 202.5
.quad 0x4069400000000000 ; double 202
.quad 0x4069780000000000 ; double 203.75
.quad 0x406a380000000000 ; double 209.75
.quad 0x406a580000000000 ; double 210.75
.quad 0x406ae00000000000 ; double 215
.quad 0x406ae00000000000 ; double 215
.quad 0x406a780000000000 ; double 211.75
.quad 0x4069dc28f5c28f5c ; double 206.88
.quad 0x4069700000000000 ; double 203.5
.quad 0x4069780000000000 ; double 203.75
.quad 0x4069f00000000000 ; double 207.5
.quad 0x4069f00000000000 ; double 207.5
.quad 0x4069700000000000 ; double 203.5
.quad 0x406ad00000000000 ; double 214.5
.quad 0x406bd80000000000 ; double 222.75
.quad 0x406bd80000000000 ; double 222.75
.quad 0x406bfc28f5c28f5c ; double 223.88
.quad 0x406b7c28f5c28f5c ; double 219.88
.quad 0x406b800000000000 ; double 220
.quad 0x406bb00000000000 ; double 221.5
.quad 0x406b380000000000 ; double 217.75
.quad 0x406b000000000000 ; double 216
.quad 0x406ba00000000000 ; double 221
.quad 0x406ae00000000000 ; double 215
.quad 0x4069180000000000 ; double 200.75
.quad 0x4069600000000000 ; double 203
.quad 0x4069600000000000 ; double 203
.quad 0x4069480000000000 ; double 202.25
.quad 0x406a2428f5c28f5c ; double 209.13
.quad 0x406aa80000000000 ; double 213.25
.quad 0x406ab00000000000 ; double 213.5
.quad 0x406ab00000000000 ; double 213.5
.quad 0x406ac00000000000 ; double 214
.quad 0x406ae00000000000 ; double 215
.quad 0x406b900000000000 ; double 220.5
.quad 0x406bb00000000000 ; double 221.5
.quad 0x406af80000000000 ; double 215.75
.quad 0x406ab80000000000 ; double 213.75
.quad 0x406b400000000000 ; double 218
.quad 0x406b500000000000 ; double 218.5
.quad 0x406b700000000000 ; double 219.5
.quad 0x406bc00000000000 ; double 222
.quad 0x406c580000000000 ; double 226.75
.quad 0x406c880000000000 ; double 228.25
.quad 0x406c400000000000 ; double 226
.quad 0x406ca00000000000 ; double 229
.quad 0x406cd00000000000 ; double 230.5
.quad 0x406d000000000000 ; double 232
.quad 0x406c780000000000 ; double 227.75
.quad 0x406bd00000000000 ; double 222.5
.quad 0x406bc00000000000 ; double 222
.quad 0x406b600000000000 ; double 219
.quad 0x406ae00000000000 ; double 215
.quad 0x406ad80000000000 ; double 214.75
.quad 0x406b3c28f5c28f5c ; double 217.88
.quad 0x406b700000000000 ; double 219.5
.quad 0x406b580000000000 ; double 218.75
.quad 0x406b180000000000 ; double 216.75
.quad 0x406b880000000000 ; double 220.25
.quad 0x406bb00000000000 ; double 221.5
.quad 0x406b980000000000 ; double 220.75
.quad 0x406b780000000000 ; double 219.75
.quad 0x406bf80000000000 ; double 223.75
.quad 0x406bf00000000000 ; double 223.5
.quad 0x406c100000000000 ; double 224.5
.quad 0x406c000000000000 ; double 224
.quad 0x406cd00000000000 ; double 230.5
.quad 0x406cd80000000000 ; double 230.75
.quad 0x406c900000000000 ; double 228.5
.quad 0x406c900000000000 ; double 228.5
.quad 0x406c100000000000 ; double 224.5
.quad 0x406c980000000000 ; double 228.75
.quad 0x406d000000000000 ; double 232
.quad 0x406cf80000000000 ; double 231.75
.quad 0x406ca00000000000 ; double 229
.quad 0x406c200000000000 ; double 225
.quad 0x406c580000000000 ; double 226.75
.quad 0x406bd00000000000 ; double 222.5
.quad 0x406c780000000000 ; double 227.75
.quad 0x406ccc28f5c28f5c ; double 230.38
.quad 0x406d100000000000 ; double 232.5
.quad 0x406d380000000000 ; double 233.75
.quad 0x406da00000000000 ; double 237
.quad 0x406d700000000000 ; double 235.5
.quad 0x406d500000000000 ; double 234.5
.quad 0x406d300000000000 ; double 233.5
.quad 0x406d000000000000 ; double 232
.quad 0x406d500000000000 ; double 234.5
.quad 0x406d600000000000 ; double 235
.quad 0x406d300000000000 ; double 233.5
.quad 0x406d900000000000 ; double 236.5
.quad 0x406e500000000000 ; double 242.5
.quad 0x406df00000000000 ; double 239.5
.quad 0x406d680000000000 ; double 235.25
.quad 0x406d900000000000 ; double 236.5
.quad 0x406e080000000000 ; double 240.25
.quad 0x406e500000000000 ; double 242.5
.quad 0x406f200000000000 ; double 249
.quad 0x406f580000000000 ; double 250.75
.quad 0x406ef80000000000 ; double 247.75
.quad 0x406e900000000000 ; double 244.5
.quad 0x406ef80000000000 ; double 247.75
.quad 0x406ed80000000000 ; double 246.75
.quad 0x406ef00000000000 ; double 247.5
.quad 0x406f580000000000 ; double 250.75
.quad 0x406f500000000000 ; double 250.5
.quad 0x406f700000000000 ; double 251.5
.quad 0x406fc00000000000 ; double 254
.quad 0x406f700000000000 ; double 251.5
.quad 0x406ed00000000000 ; double 246.5
.quad 0x406e680000000000 ; double 243.25
.quad 0x406e980000000000 ; double 244.75
.quad 0x406e900000000000 ; double 244.5
.quad 0x406e400000000000 ; double 242
.quad 0x406d980000000000 ; double 236.75
.quad 0x406dc80000000000 ; double 238.25
.quad 0x406dc00000000000 ; double 238
.quad 0x406e000000000000 ; double 240
.quad 0x406db80000000000 ; double 237.75
.quad 0x406db00000000000 ; double 237.5
.quad 0x406da00000000000 ; double 237
.quad 0x406dc00000000000 ; double 238
.quad 0x406d700000000000 ; double 235.5
.quad 0x406dc00000000000 ; double 238
.quad 0x406d300000000000 ; double 233.5
.quad 0x406cb00000000000 ; double 229.5
.quad 0x406d000000000000 ; double 232
.quad 0x406cc00000000000 ; double 230
.quad 0x406d300000000000 ; double 233.5
.quad 0x406da80000000000 ; double 237.25
.quad 0x406da80000000000 ; double 237.25
.quad 0x406dd00000000000 ; double 238.5
.quad 0x406df00000000000 ; double 239.5
.quad 0x406e180000000000 ; double 240.75
.quad 0x406e900000000000 ; double 244.5
.quad 0x406f380000000000 ; double 249.75
.quad 0x406f100000000000 ; double 248.5
.quad 0x406e880000000000 ; double 244.25
.quad 0x406f000000000000 ; double 248
.quad 0x406f180000000000 ; double 248.75
.quad 0x406f080000000000 ; double 248.25
.quad 0x406f400000000000 ; double 250
.quad 0x406f380000000000 ; double 249.75
.quad 0x406ea00000000000 ; double 245
.quad 0x406f600000000000 ; double 251
.quad 0x406f700000000000 ; double 251.5
.quad 0x406e900000000000 ; double 244.5
.quad 0x406e480000000000 ; double 242.25
.quad 0x406eb80000000000 ; double 245.75
.quad 0x406e000000000000 ; double 240
.quad 0x406d600000000000 ; double 235
.quad 0x406c880000000000 ; double 228.25
.quad 0x406cd80000000000 ; double 230.75
.quad 0x406cd80000000000 ; double 230.75
.quad 0x406dc00000000000 ; double 238
.quad 0x406eb80000000000 ; double 245.75
.quad 0x406ee00000000000 ; double 247
.quad 0x406f300000000000 ; double 249.5
.quad 0x406fe00000000000 ; double 255
.quad 0x4070040000000000 ; double 256.25
.quad 0x406f100000000000 ; double 248.5
.quad 0x406de00000000000 ; double 239
.quad 0x406e800000000000 ; double 244
.quad 0x406f600000000000 ; double 251
.quad 0x406fc00000000000 ; double 254
.quad 0x406f300000000000 ; double 249.5
.quad 0x406e900000000000 ; double 244.5
.quad 0x406f700000000000 ; double 251.5
.quad 0x406fe00000000000 ; double 255
.quad 0x406fa00000000000 ; double 253
.quad 0x4070780000000000 ; double 263.5
.quad 0x4070a00000000000 ; double 266
.quad 0x4070b00000000000 ; double 267
.quad 0x40704c0000000000 ; double 260.75
.quad 0x40701c0000000000 ; double 257.75
.quad 0x4070480000000000 ; double 260.5
.quad 0x40708c0000000000 ; double 264.75
.quad 0x4070c00000000000 ; double 268
.quad 0x4070f00000000000 ; double 271
.quad 0x4070ec0000000000 ; double 270.75
.quad 0x4070c40000000000 ; double 268.25
.quad 0x4070d80000000000 ; double 269.5
.quad 0x4071440000000000 ; double 276.25
.quad 0x40715c0000000000 ; double 277.75
.quad 0x40712c0000000000 ; double 274.75
.quad 0x4071ca147ae147ae ; double 284.63
.quad 0x4072040000000000 ; double 288.25
.quad 0x4071e00000000000 ; double 286
.quad 0x4072200000000000 ; double 290
.quad 0x4072200000000000 ; double 290
.quad 0x4071cc0000000000 ; double 284.75
.quad 0x4071a00000000000 ; double 282
.quad 0x4071dc0000000000 ; double 285.75
.quad 0x4071780000000000 ; double 279.5
.quad 0x40718c0000000000 ; double 280.75
.quad 0x4071700000000000 ; double 279
.quad 0x4071d00000000000 ; double 285
.quad 0x4071cc0000000000 ; double 284.75
.quad 0x40719c0000000000 ; double 281.75
.quad 0x4071cc0000000000 ; double 284.75
.quad 0x40720c0000000000 ; double 288.75
.quad 0x40723c0000000000 ; double 291.75
.quad 0x4072380000000000 ; double 291.5
.quad 0x40720c0000000000 ; double 288.75
.quad 0x40721c0000000000 ; double 289.75
.quad 0x4072040000000000 ; double 288.25
.quad 0x4072700000000000 ; double 295
.quad 0x4072480000000000 ; double 292.5
.quad 0x4071d00000000000 ; double 285
.quad 0x4071a00000000000 ; double 282
.quad 0x4071840000000000 ; double 280.25
.quad 0x4071c80000000000 ; double 284.5
.quad 0x4071bc0000000000 ; double 283.75
.quad 0x4071c80000000000 ; double 284.5
.quad 0x40705c0000000000 ; double 261.75
.quad 0x4070780000000000 ; double 263.5
.quad 0x4070600000000000 ; double 262
.quad 0x406f700000000000 ; double 251.5
.quad 0x406fc00000000000 ; double 254
.quad 0x4070180000000000 ; double 257.5
.quad 0x40701c0000000000 ; double 257.75
.quad 0x4070500000000000 ; double 261
.quad 0x4070880000000000 ; double 264.5
.quad 0x4070b80000000000 ; double 267.5
.quad 0x4070500000000000 ; double 261
.quad 0x40704c0000000000 ; double 260.75
.quad 0x4070880000000000 ; double 264.5
.quad 0x4070fc0000000000 ; double 271.75
.quad 0x4070ec0000000000 ; double 270.75
.quad 0x4070500000000000 ; double 261
.quad 0x4071d2147ae147ae ; double 285.13
.quad 0x4072a00000000000 ; double 298
.quad 0x4072600000000000 ; double 294
.quad 0x4072480000000000 ; double 292.5
.quad 0x4072380000000000 ; double 291.5
.quad 0x4072800000000000 ; double 296
.quad 0x4072a40000000000 ; double 298.25
.quad 0x4072ac0000000000 ; double 298.75
.quad 0x4072a00000000000 ; double 298
.quad 0x4072c80000000000 ; double 300.5
.quad 0x4072f00000000000 ; double 303
.quad 0x4073040000000000 ; double 304.25
.quad 0x4073100000000000 ; double 305
.quad 0x4073280000000000 ; double 306.5
.quad 0x4073080000000000 ; double 304.5
.quad 0x4072bc0000000000 ; double 299.75
.quad 0x4072c40000000000 ; double 300.25
.quad 0x4072c00000000000 ; double 300
.quad 0x4072cc0000000000 ; double 300.75
.quad 0x4072d40000000000 ; double 301.25
.quad 0x4072e00000000000 ; double 302
.quad 0x40730c0000000000 ; double 304.75
.quad 0x4072f80000000000 ; double 303.5
.quad 0x4072f80000000000 ; double 303.5
.quad 0x4072e00000000000 ; double 302
.quad 0x4073180000000000 ; double 305.5
.quad 0x40732c0000000000 ; double 306.75
.quad 0x4073200000000000 ; double 306
.quad 0x40736c0000000000 ; double 310.75
.quad 0x40738c0000000000 ; double 312.75
.quad 0x4073800000000000 ; double 312
.quad 0x4073a00000000000 ; double 314
.quad 0x4073ac0000000000 ; double 314.75
.quad 0x4074380000000000 ; double 323.5
.quad 0x40740c0000000000 ; double 320.75
.quad 0x40740c0000000000 ; double 320.75
.quad 0x4074240000000000 ; double 322.25
.quad 0x4074280000000000 ; double 322.5
.quad 0x4073980000000000 ; double 313.5
.quad 0x4073c80000000000 ; double 316.5
.quad 0x4073e80000000000 ; double 318.5
.quad 0x4073f40000000000 ; double 319.25
.quad 0x4073c00000000000 ; double 316
.quad 0x4073f80000000000 ; double 319.5
.quad 0x4073f00000000000 ; double 319
.quad 0x4073c80000000000 ; double 316.5
.quad 0x4073f00000000000 ; double 319
.quad 0x4074000000000000 ; double 320
.quad 0x4073a00000000000 ; double 314
.quad 0x4073180000000000 ; double 305.5
.quad 0x4072e00000000000 ; double 302
.quad 0x4072d00000000000 ; double 301
.quad 0x4073000000000000 ; double 304
.quad 0x4073a00000000000 ; double 314
.quad 0x4073f80000000000 ; double 319.5
.quad 0x4073fc0000000000 ; double 319.75
.quad 0x4073a80000000000 ; double 314.5
.quad 0x4073b80000000000 ; double 315.5
.quad 0x4073a2147ae147ae ; double 314.13
.quad 0x4074000000000000 ; double 320
.quad 0x4073f00000000000 ; double 319
.quad 0x4074240000000000 ; double 322.25
.quad 0x4073e00000000000 ; double 318
.quad 0x4073d00000000000 ; double 317
.quad 0x4073fc0000000000 ; double 319.75
.quad 0x4074880000000000 ; double 328.5
.quad 0x4074d80000000000 ; double 333.5
.quad 0x4074a00000000000 ; double 330
.quad 0x4074700000000000 ; double 327
.quad 0x4074540000000000 ; double 325.25
.quad 0x4074c80000000000 ; double 332.5
.quad 0x4073fc0000000000 ; double 319.75
.quad 0x4073d00000000000 ; double 317
.quad 0x40736c0000000000 ; double 310.75
.quad 0x4073a00000000000 ; double 314
.quad 0x4073fc0000000000 ; double 319.75
.quad 0x4074400000000000 ; double 324
.quad 0x4074880000000000 ; double 328.5
.quad 0x4079800000000000 ; double 408
.quad 0x4079080000000000 ; double 400.5
.quad 0x4078c80000000000 ; double 396.5
.quad 0x40789c0000000000 ; double 393.75
.quad 0x40782c0000000000 ; double 386.75
.quad 0x4078b00000000000 ; double 395
.quad 0x4079000000000000 ; double 400
.quad 0x4079200000000000 ; double 402
.quad 0x4079200000000000 ; double 402
.quad 0x4079380000000000 ; double 403.5
.quad 0x4079980000000000 ; double 409.5
.quad 0x4079f80000000000 ; double 415.5
.quad 0x407a700000000000 ; double 423
.quad 0x407a600000000000 ; double 422
.quad 0x407a300000000000 ; double 419
.quad 0x407a940000000000 ; double 425.25
.quad 0x407a800000000000 ; double 424
.quad 0x407a1c0000000000 ; double 417.75
.quad 0x407a1c0000000000 ; double 417.75
.quad 0x4079900000000000 ; double 409
.quad 0x4079b00000000000 ; double 411
.quad 0x4079b80000000000 ; double 411.5
.quad 0x40799c0000000000 ; double 409.75
.quad 0x407a780000000000 ; double 423.5
.quad 0x407ac40000000000 ; double 428.25
.quad 0x407b080000000000 ; double 432.5
.quad 0x407b000000000000 ; double 432
.quad 0x407a9c0000000000 ; double 425.75
.quad 0x407adc0000000000 ; double 429.75
.quad 0x407aec0000000000 ; double 430.75
.quad 0x407b040000000000 ; double 432.25
.quad 0x407b440000000000 ; double 436.25
.quad 0x407b300000000000 ; double 435
.quad 0x407ad80000000000 ; double 429.5
.quad 0x407ac00000000000 ; double 428
.quad 0x407a800000000000 ; double 424
.quad 0x407aa80000000000 ; double 426.5
.quad 0x407acc0000000000 ; double 428.75
.quad 0x407b0c0000000000 ; double 432.75
.quad 0x407b580000000000 ; double 437.5
.quad 0x407b700000000000 ; double 439
.quad 0x407b5c0000000000 ; double 437.75
.quad 0x407b680000000000 ; double 438.5
.quad 0x407b300000000000 ; double 435
.quad 0x407ad00000000000 ; double 429
.quad 0x407ace147ae147ae ; double 428.88
.quad 0x407b940000000000 ; double 441.25
.quad 0x407b800000000000 ; double 440
.quad 0x407bae147ae147ae ; double 442.88
.quad 0x407c080000000000 ; double 448.5
.quad 0x407c1c0000000000 ; double 449.75
.quad 0x407c180000000000 ; double 449.5
.quad 0x407bec0000000000 ; double 446.75
.quad 0x407bc00000000000 ; double 444
.quad 0x407ba80000000000 ; double 442.5
.quad 0x407bb40000000000 ; double 443.25
.quad 0x407bd80000000000 ; double 445.5
.quad 0x407bdc0000000000 ; double 445.75
.quad 0x407ba2147ae147ae ; double 442.13
.quad 0x407bc00000000000 ; double 444
.quad 0x407b300000000000 ; double 435
.quad 0x407af80000000000 ; double 431.5
.quad 0x407b000000000000 ; double 432
.quad 0x407b180000000000 ; double 433.5
.quad 0x407ba80000000000 ; double 442.5
.quad 0x407bd00000000000 ; double 445
.quad 0x407bf80000000000 ; double 447.5
.quad 0x407bf00000000000 ; double 447
.quad 0x407bac0000000000 ; double 442.75
.quad 0x407bf40000000000 ; double 447.25
.quad 0x407c2c0000000000 ; double 450.75
.quad 0x407c880000000000 ; double 456.5
.quad 0x407c200000000000 ; double 450
.quad 0x407be00000000000 ; double 446
.quad 0x407b700000000000 ; double 439
.quad 0x407ba00000000000 ; double 442
.quad 0x407b800000000000 ; double 440
.quad 0x407b000000000000 ; double 432
.quad 0x407b000000000000 ; double 432
.quad 0x407b600000000000 ; double 438
.quad 0x407b5c0000000000 ; double 437.75
.quad 0x407bd00000000000 ; double 445
.quad 0x407ba00000000000 ; double 442
.quad 0x407b600000000000 ; double 438
.quad 0x407c080000000000 ; double 448.5
.quad 0x407bcc0000000000 ; double 444.75
.quad 0x407b700000000000 ; double 439
.quad 0x407b700000000000 ; double 439
.quad 0x407ae00000000000 ; double 430
.quad 0x407a840000000000 ; double 424.25
.quad 0x407a8c0000000000 ; double 424.75
.quad 0x407ad00000000000 ; double 429
.quad 0x407a900000000000 ; double 425
.quad 0x407a700000000000 ; double 423
.quad 0x4079f80000000000 ; double 415.5
.quad 0x407a340000000000 ; double 419.25
.quad 0x407a400000000000 ; double 420
.quad 0x407a380000000000 ; double 419.5
.quad 0x4079c80000000000 ; double 412.5
.quad 0x4079dc0000000000 ; double 413.75
.quad 0x4079980000000000 ; double 409.5
.quad 0x4079300000000000 ; double 403
.quad 0x4078f00000000000 ; double 399
.quad 0x4078b80000000000 ; double 395.5
.quad 0x4078680000000000 ; double 390.5
.quad 0x40785c0000000000 ; double 389.75
.quad 0x40788c0000000000 ; double 392.75
.quad 0x40789c0000000000 ; double 393.75
.quad 0x4078d80000000000 ; double 397.5
.quad 0x4079100000000000 ; double 401
.quad 0x4078d00000000000 ; double 397
.quad 0x4078f00000000000 ; double 399
.quad 0x40793c0000000000 ; double 403.75
.quad 0x4079500000000000 ; double 405
.quad 0x4079480000000000 ; double 404.5
.quad 0x4078d80000000000 ; double 397.5
.quad 0x4078f00000000000 ; double 399
.quad 0x4079000000000000 ; double 400
.quad 0x40790c0000000000 ; double 400.75
.quad 0x4079100000000000 ; double 401
.quad 0x4078740000000000 ; double 391.25
.quad 0x4078300000000000 ; double 387
.quad 0x4078240000000000 ; double 386.25
.quad 0x4078340000000000 ; double 387.25
.quad 0x4078a40000000000 ; double 394.25
.quad 0x4078a80000000000 ; double 394.5
.quad 0x4078400000000000 ; double 388
.quad 0x4078180000000000 ; double 385.5
.quad 0x4078140000000000 ; double 385.25
.quad 0x4078180000000000 ; double 385.5
.quad 0x4078600000000000 ; double 390
.quad 0x4078400000000000 ; double 388
.quad 0x4077900000000000 ; double 377
.quad 0x40776c0000000000 ; double 374.75
.quad 0x4077840000000000 ; double 376.25
.quad 0x4077f00000000000 ; double 383
.quad 0x4078480000000000 ; double 388.5
.quad 0x40786e147ae147ae ; double 390.88
.quad 0x4078700000000000 ; double 391
.quad 0x40785c0000000000 ; double 389.75
.quad 0x4078180000000000 ; double 385.5
.quad 0x4077d00000000000 ; double 381
.quad 0x4077cc0000000000 ; double 380.75
.quad 0x4077f80000000000 ; double 383.5
.quad 0x40784c0000000000 ; double 388.75
.quad 0x4078680000000000 ; double 390.5
.quad 0x4078880000000000 ; double 392.5
.quad 0x4078300000000000 ; double 387
.quad 0x4077b40000000000 ; double 379.25
.quad 0x4077bc0000000000 ; double 379.75
.quad 0x4077400000000000 ; double 372
.quad 0x4077c00000000000 ; double 380
.quad 0x4077f80000000000 ; double 383.5
.quad 0x4078480000000000 ; double 388.5
.quad 0x4078a40000000000 ; double 394.25
.quad 0x4079300000000000 ; double 403
.quad 0x407936147ae147ae ; double 403.38
.quad 0x40793c0000000000 ; double 403.75
.quad 0x4079380000000000 ; double 403.5
.quad 0x40797e147ae147ae ; double 407.88
.quad 0x4079780000000000 ; double 407.5
.quad 0x40796c0000000000 ; double 406.75
.quad 0x40796c0000000000 ; double 406.75
.quad 0x40795c0000000000 ; double 405.75
.quad 0x4079700000000000 ; double 407
.quad 0x4079180000000000 ; double 401.5
.quad 0x4078f00000000000 ; double 399
.quad 0x4078fc0000000000 ; double 399.75
.quad 0x4079100000000000 ; double 401
.quad 0x40790e147ae147ae ; double 400.88
.quad 0x4078f40000000000 ; double 399.25
.quad 0x4078d00000000000 ; double 397
.quad 0x4079040000000000 ; double 400.25
.quad 0x4079080000000000 ; double 400.5
.quad 0x4078e00000000000 ; double 398
.quad 0x4078e80000000000 ; double 398.5
.quad 0x4078dc0000000000 ; double 397.75
.quad 0x4079080000000000 ; double 400.5
.quad 0x4079300000000000 ; double 403
.quad 0x4079580000000000 ; double 405.5
.quad 0x40796c0000000000 ; double 406.75
.quad 0x40799c0000000000 ; double 409.75
.quad 0x4079940000000000 ; double 409.25
.quad 0x4079980000000000 ; double 409.5
.quad 0x4079900000000000 ; double 409
.quad 0x4079780000000000 ; double 407.5
.quad 0x4079800000000000 ; double 408
.quad 0x4079d80000000000 ; double 413.5
.quad 0x4079e00000000000 ; double 414
.quad 0x4079e80000000000 ; double 414.5
.quad 0x4079dc0000000000 ; double 413.75
.quad 0x407a080000000000 ; double 416.5
.quad 0x407a140000000000 ; double 417.25
.quad 0x407a580000000000 ; double 421.5
.quad 0x407a6c0000000000 ; double 422.75
.quad 0x407aac0000000000 ; double 426.75
.quad 0x407a980000000000 ; double 425.5
.quad 0x407a900000000000 ; double 425
.quad 0x407a780000000000 ; double 423.5
.quad 0x407a300000000000 ; double 419
.quad 0x407a440000000000 ; double 420.25
.quad 0x407a100000000000 ; double 417
.quad 0x407a080000000000 ; double 416.5
.quad 0x4079b80000000000 ; double 411.5
.quad 0x4079a00000000000 ; double 410
.quad 0x40790c0000000000 ; double 400.75
.quad 0x4078fc0000000000 ; double 399.75
.quad 0x4078e00000000000 ; double 398
.quad 0x4078e2147ae147ae ; double 398.13
.quad 0x4079080000000000 ; double 400.5
.quad 0x4079100000000000 ; double 401
.quad 0x4078c80000000000 ; double 396.5
.quad 0x4078780000000000 ; double 391.5
.quad 0x4078a00000000000 ; double 394
.quad 0x40784c0000000000 ; double 388.75
.quad 0x4078bc0000000000 ; double 395.75
.quad 0x4079100000000000 ; double 401
.quad 0x4078d00000000000 ; double 397
.quad 0x4078ec0000000000 ; double 398.75
.quad 0x4078f80000000000 ; double 399.5
.quad 0x4079400000000000 ; double 404
.quad 0x4079580000000000 ; double 405.5
.quad 0x407952147ae147ae ; double 405.13
.quad 0x40791c0000000000 ; double 401.75
.quad 0x4078b40000000000 ; double 395.25
.quad 0x4078a40000000000 ; double 394.25
.quad 0x4078800000000000 ; double 392
.quad 0x4078a40000000000 ; double 394.25
.quad 0x4078b00000000000 ; double 395
.quad 0x4078980000000000 ; double 393.5
.quad 0x4078d40000000000 ; double 397.25
.quad 0x4078c00000000000 ; double 396
.quad 0x40790c0000000000 ; double 400.75
.quad 0x4078e00000000000 ; double 398
.quad 0x4078ec0000000000 ; double 398.75
.quad 0x4078f80000000000 ; double 399.5
.quad 0x40790c0000000000 ; double 400.75
.quad 0x4078fc0000000000 ; double 399.75
.quad 0x4078900000000000 ; double 393
.quad 0x4078940000000000 ; double 393.25
.quad 0x4078aa147ae147ae ; double 394.63
.quad 0x4078d00000000000 ; double 397
.quad 0x4078b80000000000 ; double 395.5
.quad 0x4078c80000000000 ; double 396.5
.quad 0x4078d80000000000 ; double 397.5
.quad 0x4079100000000000 ; double 401
.quad 0x4079100000000000 ; double 401
.quad 0x4078f00000000000 ; double 399
.quad 0x4079340000000000 ; double 403.25
.quad 0x4079600000000000 ; double 406
.quad 0x4079400000000000 ; double 404
.quad 0x40794c0000000000 ; double 404.75
.quad 0x4079280000000000 ; double 402.5
.quad 0x4079180000000000 ; double 401.5
.quad 0x4078e00000000000 ; double 398
.quad 0x4078b00000000000 ; double 395
.quad 0x4078580000000000 ; double 389.5
.quad 0x4078300000000000 ; double 387
.quad 0x40782c0000000000 ; double 386.75
.quad 0x4078280000000000 ; double 386.5
.quad 0x4078280000000000 ; double 386.5
.quad 0x4078200000000000 ; double 386
.quad 0x40781c0000000000 ; double 385.75
.quad 0x4078300000000000 ; double 387
.quad 0x40785c0000000000 ; double 389.75
.quad 0x4078100000000000 ; double 385
.quad 0x4077fc0000000000 ; double 383.75
.quad 0x4078200000000000 ; double 386
.quad 0x4078240000000000 ; double 386.25
.quad 0x4078300000000000 ; double 387
.quad 0x4078080000000000 ; double 384.5
.quad 0x4078100000000000 ; double 385
.quad 0x4077f80000000000 ; double 383.5
.quad 0x4078180000000000 ; double 385.5
.quad 0x4078540000000000 ; double 389.25
.quad 0x40786c0000000000 ; double 390.75
.quad 0x4078cc0000000000 ; double 396.75
.quad 0x4078f80000000000 ; double 399.5
.quad 0x4078b00000000000 ; double 395
.quad 0x4078e00000000000 ; double 398
.quad 0x4078e00000000000 ; double 398
.quad 0x4078fc0000000000 ; double 399.75
.quad 0x4078980000000000 ; double 393.5
.quad 0x40789c0000000000 ; double 393.75
.quad 0x4078700000000000 ; double 391
.quad 0x4078980000000000 ; double 393.5
.quad 0x40789c0000000000 ; double 393.75
.quad 0x4078580000000000 ; double 389.5
.quad 0x40783c0000000000 ; double 387.75
.quad 0x4077f80000000000 ; double 383.5
.quad 0x4077fa147ae147ae ; double 383.63
.quad 0x4078200000000000 ; double 386
.quad 0x4078100000000000 ; double 385
.quad 0x4078000000000000 ; double 384
.quad 0x4077e80000000000 ; double 382.5
.quad 0x4077cc0000000000 ; double 380.75
.quad 0x4077b00000000000 ; double 379
.quad 0x4077d00000000000 ; double 381
.quad 0x4077900000000000 ; double 377
.quad 0x4077600000000000 ; double 374
.quad 0x4077680000000000 ; double 374.5
.quad 0x4077600000000000 ; double 374
.quad 0x40779c0000000000 ; double 377.75
.quad 0x4077e00000000000 ; double 382
.quad 0x4078100000000000 ; double 385
.quad 0x4078080000000000 ; double 384.5
.quad 0x4077fc0000000000 ; double 383.75
.quad 0x4077dc0000000000 ; double 381.75
.quad 0x4077ac0000000000 ; double 378.75
.quad 0x4077c80000000000 ; double 380.5
.quad 0x40775c0000000000 ; double 373.75
.quad 0x40772c0000000000 ; double 370.75
.quad 0x40773c0000000000 ; double 371.75
.quad 0x4077500000000000 ; double 373
.quad 0x4077300000000000 ; double 371
.quad 0x40773c0000000000 ; double 371.75
.quad 0x4077580000000000 ; double 373.5
.quad 0x40770c0000000000 ; double 368.75
.quad 0x4077400000000000 ; double 372
.quad 0x4077200000000000 ; double 370
.quad 0x4077240000000000 ; double 370.25
.quad 0x40774c0000000000 ; double 372.75
.quad 0x40776c0000000000 ; double 374.75
.quad 0x40776c0000000000 ; double 374.75
.quad 0x4077700000000000 ; double 375
.quad 0x4077440000000000 ; double 372.25
.quad 0x4077300000000000 ; double 371
.quad 0x4077440000000000 ; double 372.25
.quad 0x4077700000000000 ; double 375
.quad 0x40776e147ae147ae ; double 374.88
.quad 0x4077100000000000 ; double 369
.quad 0x40771e147ae147ae ; double 369.88
.quad 0x4077400000000000 ; double 372
.quad 0x4077140000000000 ; double 369.25
.quad 0x4077200000000000 ; double 370
.quad 0x4077200000000000 ; double 370
.quad 0x4076e00000000000 ; double 366
.quad 0x4077200000000000 ; double 370
.quad 0x4076d00000000000 ; double 365
.quad 0x4076340000000000 ; double 355.25
.quad 0x4076180000000000 ; double 353.5
.quad 0x4075ec0000000000 ; double 350.75
.quad 0x4075e00000000000 ; double 350
.quad 0x4075540000000000 ; double 341.25
.quad 0x4075b80000000000 ; double 347.5
.quad 0x4075540000000000 ; double 341.25
.quad 0x40753c0000000000 ; double 339.75
.quad 0x40756c0000000000 ; double 342.75
.quad 0x4075580000000000 ; double 341.5
.quad 0x4075780000000000 ; double 343.5
.quad 0x4075400000000000 ; double 340
.quad 0x4075280000000000 ; double 338.5
.quad 0x4075200000000000 ; double 338
.quad 0x4075140000000000 ; double 337.25
.quad 0x4075400000000000 ; double 340
.quad 0x40753c0000000000 ; double 339.75
.quad 0x4075080000000000 ; double 336.5
.quad 0x40753c0000000000 ; double 339.75
.quad 0x4075900000000000 ; double 345
.quad 0x4075b00000000000 ; double 347
.quad 0x4075c80000000000 ; double 348.5
.quad 0x40753c0000000000 ; double 339.75
.quad 0x40753c0000000000 ; double 339.75
.quad 0x4074800000000000 ; double 328
.quad 0x4074540000000000 ; double 325.25
.quad 0x4074800000000000 ; double 328
.quad 0x40744c0000000000 ; double 324.75
.quad 0x4073e80000000000 ; double 318.5
.quad 0x4073d40000000000 ; double 317.25
.quad 0x4073d40000000000 ; double 317.25
.quad 0x40740c0000000000 ; double 320.75
.quad 0x4074180000000000 ; double 321.5
.quad 0x4073800000000000 ; double 312
.quad 0x4073600000000000 ; double 310
.quad 0x4073180000000000 ; double 305.5
.quad 0x4072e80000000000 ; double 302.5
.quad 0x4072ac0000000000 ; double 298.75
.quad 0x4072600000000000 ; double 294
.quad 0x4072540000000000 ; double 293.25
.quad 0x4072740000000000 ; double 295.25
.quad 0x40727c0000000000 ; double 295.75
.quad 0x4072d00000000000 ; double 301
.quad 0x4072dc0000000000 ; double 301.75
.quad 0x4072e00000000000 ; double 302
.quad 0x4072700000000000 ; double 295
.quad 0x4072880000000000 ; double 296.5
.quad 0x4072800000000000 ; double 296
.quad 0x4072bc0000000000 ; double 299.75
.quad 0x4072e00000000000 ; double 302
.quad 0x4072c00000000000 ; double 300
.quad 0x4072c00000000000 ; double 300
.quad 0x4073280000000000 ; double 306.5
.quad 0x4072f80000000000 ; double 303.5
.quad 0x40727c0000000000 ; double 295.75
.quad 0x4072c00000000000 ; double 300
.quad 0x4072d00000000000 ; double 301
.quad 0x4072d00000000000 ; double 301
.quad 0x4072c80000000000 ; double 300.5
.quad 0x40730c0000000000 ; double 304.75
.quad 0x4072f40000000000 ; double 303.25
.quad 0x4073400000000000 ; double 308
.quad 0x40731c0000000000 ; double 305.75
.quad 0x4073480000000000 ; double 308.5
.quad 0x4073540000000000 ; double 309.25
.quad 0x4073540000000000 ; double 309.25
.quad 0x4073540000000000 ; double 309.25
.quad 0x4073580000000000 ; double 309.5
.quad 0x40739a147ae147ae ; double 313.63
.quad 0x4073b40000000000 ; double 315.25
.quad 0x4073200000000000 ; double 306
.quad 0x4073540000000000 ; double 309.25
.quad 0x4073880000000000 ; double 312.5
.quad 0x4073500000000000 ; double 309
.quad 0x40733c0000000000 ; double 307.75
.quad 0x4073400000000000 ; double 308
.quad 0x4073200000000000 ; double 306
.quad 0x4073230a3d70a3d7 ; double 306.19
.quad 0x4073040000000000 ; double 304.25
.quad 0x4073280000000000 ; double 306.5
.quad 0x4073080000000000 ; double 304.5
.quad 0x4073580000000000 ; double 309.5
.quad 0x40734c0000000000 ; double 308.75
.quad 0x40734c0000000000 ; double 308.75
.quad 0x4073280000000000 ; double 306.5
.quad 0x4073080000000000 ; double 304.5
.quad 0x4072ec0000000000 ; double 302.75
.quad 0x4072e80000000000 ; double 302.5
.quad 0x4072fc0000000000 ; double 303.75
.quad 0x4072c40000000000 ; double 300.25
.quad 0x4073000000000000 ; double 304
.quad 0x40731c0000000000 ; double 305.75
.quad 0x40736c0000000000 ; double 310.75
.quad 0x40737c0000000000 ; double 311.75
.quad 0x4073900000000000 ; double 313
.quad 0x4073400000000000 ; double 308
.quad 0x4072dc0000000000 ; double 301.75
.quad 0x40731c0000000000 ; double 305.75
.quad 0x4073380000000000 ; double 307.5
.quad 0x4073cc0000000000 ; double 316.75
.quad 0x4073d80000000000 ; double 317.5
.quad 0x4073b00000000000 ; double 315
.quad 0x4073e6147ae147ae ; double 318.38
.quad 0x4073ee147ae147ae ; double 318.88
.quad 0x4073ac0000000000 ; double 314.75
.quad 0x4073600000000000 ; double 310
.quad 0x4073600000000000 ; double 310
.quad 0x4073b00000000000 ; double 315
.quad 0x4073e00000000000 ; double 318
.quad 0x4074000000000000 ; double 320
.quad 0x4072880000000000 ; double 296.5
.quad 0x4072980000000000 ; double 297.5
.quad 0x4072380000000000 ; double 291.5
.quad 0x4071f00000000000 ; double 287
.quad 0x4071dc0000000000 ; double 285.75
.quad 0x4071fc0000000000 ; double 287.75
.quad 0x40720c0000000000 ; double 288.75
.quad 0x40724c0000000000 ; double 292.75
.quad 0x40726c0000000000 ; double 294.75
.quad 0x4072800000000000 ; double 296
.quad 0x40729c0000000000 ; double 297.75
.quad 0x4072be147ae147ae ; double 299.88
.quad 0x4072d00000000000 ; double 301
.quad 0x4072800000000000 ; double 296
.quad 0x4072b80000000000 ; double 299.5
.quad 0x4072b80000000000 ; double 299.5
.quad 0x4072dc0000000000 ; double 301.75
.quad 0x4072dc0000000000 ; double 301.75
.quad 0x4072de147ae147ae ; double 301.88
.quad 0x40728c0000000000 ; double 296.75
.quad 0x4072d80000000000 ; double 301.5
.quad 0x4072e00000000000 ; double 302
.quad 0x4072d00000000000 ; double 301
.quad 0x4073a80000000000 ; double 314.5
.quad 0x4073c00000000000 ; double 316
.quad 0x4073c80000000000 ; double 316.5
.quad 0x4073980000000000 ; double 313.5
.quad 0x4073d40000000000 ; double 317.25
.quad 0x4073d80000000000 ; double 317.5
.quad 0x4073f40000000000 ; double 319.25
.quad 0x4073e80000000000 ; double 318.5
.quad 0x4073d80000000000 ; double 317.5
.quad 0x4073a40000000000 ; double 314.25
.quad 0x4073880000000000 ; double 312.5
.quad 0x4073800000000000 ; double 312
.quad 0x4073bc0000000000 ; double 315.75
.quad 0x4073c40000000000 ; double 316.25
.quad 0x4073d80000000000 ; double 317.5
.quad 0x4073cc0000000000 ; double 316.75
.quad 0x407402147ae147ae ; double 320.13
.quad 0x40743c0000000000 ; double 323.75
.quad 0x4074500000000000 ; double 325
.quad 0x4074280000000000 ; double 322.5
.quad 0x4074380000000000 ; double 323.5
.quad 0x40745c0000000000 ; double 325.75
.quad 0x4074200000000000 ; double 322
.quad 0x4073e80000000000 ; double 318.5
.quad 0x4073f80000000000 ; double 319.5
.quad 0x40743c0000000000 ; double 323.75
.quad 0x4074900000000000 ; double 329
.quad 0x40747c0000000000 ; double 327.75
.quad 0x4074900000000000 ; double 329
.quad 0x4074b80000000000 ; double 331.5
.quad 0x4074d00000000000 ; double 333
.quad 0x4075200000000000 ; double 338
.quad 0x40753c0000000000 ; double 339.75
.quad 0x4075400000000000 ; double 340
.quad 0x4075700000000000 ; double 343
.quad 0x4075b80000000000 ; double 347.5
.quad 0x4075dc0000000000 ; double 349.75
.quad 0x407562147ae147ae ; double 342.13
.quad 0x4075380000000000 ; double 339.5
.quad 0x4075880000000000 ; double 344.5
.quad 0x4075300000000000 ; double 339
.quad 0x40757c0000000000 ; double 343.75
.quad 0x4075880000000000 ; double 344.5
.quad 0x4075940000000000 ; double 345.25
.quad 0x4075b00000000000 ; double 347
.quad 0x4075d80000000000 ; double 349.5
.quad 0x40762c0000000000 ; double 354.75
.quad 0x4076100000000000 ; double 353
.quad 0x4076380000000000 ; double 355.5
.quad 0x4076300000000000 ; double 355
.quad 0x4076940000000000 ; double 361.25
.quad 0x4076a00000000000 ; double 362
.quad 0x4076940000000000 ; double 361.25
.quad 0x4076680000000000 ; double 358.5
.quad 0x4076600000000000 ; double 358
.quad 0x40764c0000000000 ; double 356.75
.quad 0x4076080000000000 ; double 352.5
.quad 0x40763c0000000000 ; double 355.75
.quad 0x4076400000000000 ; double 356
.quad 0x4076340000000000 ; double 355.25
.quad 0x40761c0000000000 ; double 353.75
.quad 0x40766c0000000000 ; double 358.75
.quad 0x4076740000000000 ; double 359.25
.quad 0x4076940000000000 ; double 361.25
.quad 0x4076880000000000 ; double 360.5
.quad 0x4076a00000000000 ; double 362
.quad 0x4076c00000000000 ; double 364
.quad 0x4076ac0000000000 ; double 362.75
.quad 0x40768c0000000000 ; double 360.75
.quad 0x4076700000000000 ; double 359
.quad 0x4076600000000000 ; double 358
.quad 0x40767c0000000000 ; double 359.75
.quad 0x4076640000000000 ; double 358.25
.quad 0x4076800000000000 ; double 360
.quad 0x4076700000000000 ; double 359
.quad 0x4076440000000000 ; double 356.25
.quad 0x4076500000000000 ; double 357
.quad 0x4076780000000000 ; double 359.5
.quad 0x40769c0000000000 ; double 361.75
.quad 0x4076a40000000000 ; double 362.25
.quad 0x4076dc0000000000 ; double 365.75
.quad 0x4076c00000000000 ; double 364
.quad 0x4076b40000000000 ; double 363.25
.quad 0x4076cc0000000000 ; double 364.75
.quad 0x4076200000000000 ; double 354
.quad 0x4075d80000000000 ; double 349.5
.quad 0x4075e40000000000 ; double 350.25
.quad 0x4075ec0000000000 ; double 350.75
.quad 0x4075c80000000000 ; double 348.5
.quad 0x4075980000000000 ; double 345.5
.quad 0x40756c0000000000 ; double 342.75
.quad 0x4075240000000000 ; double 338.25
.quad 0x4075280000000000 ; double 338.5
.quad 0x4075340000000000 ; double 339.25
.quad 0x4075080000000000 ; double 336.5
.quad 0x40752c0000000000 ; double 338.75
.quad 0x4075380000000000 ; double 339.5
.quad 0x4074f80000000000 ; double 335.5
.quad 0x4074b00000000000 ; double 331
.quad 0x4074bc0000000000 ; double 331.75
.quad 0x4075140000000000 ; double 337.25
.quad 0x407546147ae147ae ; double 340.38
.quad 0x4075780000000000 ; double 343.5
.quad 0x4075380000000000 ; double 339.5
.quad 0x4075380000000000 ; double 339.5
.quad 0x4075200000000000 ; double 338
.quad 0x4075540000000000 ; double 341.25
.quad 0x40755c0000000000 ; double 341.75
.quad 0x4075180000000000 ; double 337.5
.quad 0x4074b00000000000 ; double 331
.quad 0x4074940000000000 ; double 329.25
.quad 0x40748c0000000000 ; double 328.75
.quad 0x4074800000000000 ; double 328
.quad 0x4074580000000000 ; double 325.5
.quad 0x4074100000000000 ; double 321
.quad 0x4074600000000000 ; double 326
.quad 0x40747c0000000000 ; double 327.75
.quad 0x40741c0000000000 ; double 321.75
.quad 0x4073dc0000000000 ; double 317.75
.quad 0x4073c00000000000 ; double 316
.quad 0x4073ac0000000000 ; double 314.75
.quad 0x4073a00000000000 ; double 314
.quad 0x40739c0000000000 ; double 313.75
.quad 0x4073b00000000000 ; double 315
.quad 0x4073c00000000000 ; double 316
.quad 0x4073dc0000000000 ; double 317.75
.quad 0x4073c80000000000 ; double 316.5
.quad 0x4073a00000000000 ; double 314
.quad 0x4073cc0000000000 ; double 316.75
.quad 0x4073f80000000000 ; double 319.5
.quad 0x4074080000000000 ; double 320.5
.quad 0x4073e40000000000 ; double 318.25
.quad 0x4073cc0000000000 ; double 316.75
.quad 0x4073fc0000000000 ; double 319.75
.quad 0x40740c0000000000 ; double 320.75
.quad 0x4074080000000000 ; double 320.5
.quad 0x4073e80000000000 ; double 318.5
.quad 0x4073bc0000000000 ; double 315.75
.quad 0x4073cc0000000000 ; double 316.75
.quad 0x4073ec0000000000 ; double 318.75
.quad 0x4073e40000000000 ; double 318.25
.quad 0x4073cc0000000000 ; double 316.75
.quad 0x4073bc0000000000 ; double 315.75
.quad 0x4073a00000000000 ; double 314
.quad 0x4073780000000000 ; double 311.5
.quad 0x4073cc0000000000 ; double 316.75
.quad 0x4073f00000000000 ; double 319
.quad 0x4073fe147ae147ae ; double 319.88
.quad 0x4073e00000000000 ; double 318
.quad 0x4073f40000000000 ; double 319.25
.quad 0x4073c00000000000 ; double 316
.quad 0x4073ac0000000000 ; double 314.75
.quad 0x4073b00000000000 ; double 315
.quad 0x4073a40000000000 ; double 314.25
.quad 0x4073500000000000 ; double 309
.quad 0x4073500000000000 ; double 309
.quad 0x4073480000000000 ; double 308.5
.quad 0x4072cc0000000000 ; double 300.75
.quad 0x4072c00000000000 ; double 300
.quad 0x4072c80000000000 ; double 300.5
.quad 0x40728c0000000000 ; double 296.75
.quad 0x4072500000000000 ; double 293
.quad 0x4072480000000000 ; double 292.5
.quad 0x4072740000000000 ; double 295.25
.quad 0x4072540000000000 ; double 293.25
.quad 0x4072800000000000 ; double 296
.quad 0x4072a80000000000 ; double 298.5
.quad 0x4072f00000000000 ; double 303
.quad 0x4072c00000000000 ; double 300
.quad 0x4072b00000000000 ; double 299
.quad 0x4072600000000000 ; double 294
.quad 0x4072880000000000 ; double 296.5
.quad 0x4072c80000000000 ; double 300.5
.quad 0x4072cc0000000000 ; double 300.75
.quad 0x4072bc0000000000 ; double 299.75
.quad 0x4072900000000000 ; double 297
.quad 0x4072c00000000000 ; double 300
.quad 0x4072940000000000 ; double 297.25
.quad 0x4072300000000000 ; double 291
.quad 0x40726c0000000000 ; double 294.75
.quad 0x4072740000000000 ; double 295.25
.quad 0x4072180000000000 ; double 289.5
.quad 0x4072780000000000 ; double 295.5
.quad 0x4072400000000000 ; double 292
.quad 0x4071ec0000000000 ; double 286.75
.quad 0x4072580000000000 ; double 293.5
.quad 0x40726e147ae147ae ; double 294.88
.quad 0x40726c0000000000 ; double 294.75
.quad 0x4072500000000000 ; double 293
.quad 0x40726c0000000000 ; double 294.75
.quad 0x4072a80000000000 ; double 298.5
.quad 0x4073100000000000 ; double 305
.quad 0x4073300000000000 ; double 307
.quad 0x40735c0000000000 ; double 309.75
.quad 0x4073400000000000 ; double 308
.quad 0x4072d00000000000 ; double 301
.quad 0x4072900000000000 ; double 297
.quad 0x4072780000000000 ; double 295.5
.quad 0x40724c0000000000 ; double 292.75
.quad 0x4072400000000000 ; double 292
.quad 0x4072340000000000 ; double 291.25
.quad 0x4072080000000000 ; double 288.5
.quad 0x4071900000000000 ; double 281
.quad 0x4071340000000000 ; double 275.25
.quad 0x407186147ae147ae ; double 280.38
.quad 0x4071cc0000000000 ; double 284.75
.quad 0x4071500000000000 ; double 277
.quad 0x4070f80000000000 ; double 271.5
.quad 0x4070a80000000000 ; double 266.5
.quad 0x4070e80000000000 ; double 270.5
.quad 0x4070f00000000000 ; double 271
.quad 0x4070ec0000000000 ; double 270.75
.quad 0x40714c0000000000 ; double 276.75
.quad 0x4071300000000000 ; double 275
.quad 0x4071200000000000 ; double 274
.quad 0x4070e40000000000 ; double 270.25
.quad 0x4070800000000000 ; double 264
.quad 0x4070940000000000 ; double 265.25
.quad 0x4070d80000000000 ; double 269.5
.quad 0x4070f80000000000 ; double 271.5
.quad 0x4070bc0000000000 ; double 267.75
.quad 0x4070e00000000000 ; double 270
.quad 0x4070c00000000000 ; double 268
.quad 0x40706c0000000000 ; double 262.75
.quad 0x406f400000000000 ; double 250
.quad 0x406e980000000000 ; double 244.75
.quad 0x406e580000000000 ; double 242.75
.quad 0x406e080000000000 ; double 240.25
.quad 0x406d680000000000 ; double 235.25
.quad 0x406cbc28f5c28f5c ; double 229.88
.quad 0x406cc00000000000 ; double 230
.quad 0x406d780000000000 ; double 235.75
.quad 0x406df80000000000 ; double 239.75
.quad 0x406eb00000000000 ; double 245.5
.quad 0x406f200000000000 ; double 249
.quad 0x406f380000000000 ; double 249.75
.quad 0x406f580000000000 ; double 250.75
.quad 0x406f500000000000 ; double 250.5
.quad 0x406f7c28f5c28f5c ; double 251.88
.quad 0x406fa80000000000 ; double 253.25
.quad 0x406fd80000000000 ; double 254.75
.quad 0x406fc00000000000 ; double 254
.quad 0x406fb80000000000 ; double 253.75
.quad 0x406fe00000000000 ; double 255
.quad 0x4070000000000000 ; double 256
.quad 0x4070280000000000 ; double 258.5
.quad 0x4070300000000000 ; double 259
.quad 0x4070240000000000 ; double 258.25
.quad 0x4070680000000000 ; double 262.5
.quad 0x4070400000000000 ; double 260
.quad 0x406ff00000000000 ; double 255.5
.quad 0x406fa00000000000 ; double 253
.quad 0x406f780000000000 ; double 251.75
.quad 0x406f800000000000 ; double 252
.quad 0x406fd00000000000 ; double 254.5
.quad 0x4070000000000000 ; double 256
.quad 0x406f900000000000 ; double 252.5
.quad 0x406ee80000000000 ; double 247.25
.quad 0x406f4c28f5c28f5c ; double 250.38
.quad 0x406fe80000000000 ; double 255.25
.quad 0x406fd80000000000 ; double 254.75
.quad 0x40703c0000000000 ; double 259.75
.quad 0x4070400000000000 ; double 260
.quad 0x4070500000000000 ; double 261
.quad 0x4070600000000000 ; double 262
.quad 0x4070640000000000 ; double 262.25
.quad 0x4070e00000000000 ; double 270
.quad 0x4071140000000000 ; double 273.25
.quad 0x40713c0000000000 ; double 275.75
.quad 0x4071000000000000 ; double 272
.quad 0x40710c0000000000 ; double 272.75
.quad 0x4070f00000000000 ; double 271
.quad 0x4070700000000000 ; double 263
.quad 0x4070400000000000 ; double 260
.quad 0x4070640000000000 ; double 262.25
.quad 0x4070cc0000000000 ; double 268.75
.quad 0x4070d80000000000 ; double 269.5
.quad 0x4071180000000000 ; double 273.5
.quad 0x4071140000000000 ; double 273.25
.quad 0x4071940000000000 ; double 281.25
.quad 0x4071cc0000000000 ; double 284.75
.quad 0x4071e80000000000 ; double 286.5
.quad 0x4071f00000000000 ; double 287
.quad 0x40717c0000000000 ; double 279.75
.quad 0x4070bc0000000000 ; double 267.75
.quad 0x4070480000000000 ; double 260.5
.quad 0x406ee80000000000 ; double 247.25
.quad 0x406ee80000000000 ; double 247.25
.quad 0x406f400000000000 ; double 250
.quad 0x406eb80000000000 ; double 245.75
.quad 0x406ff00000000000 ; double 255.5
.quad 0x4070c80000000000 ; double 268.5
.quad 0x4070f80000000000 ; double 271.5
.quad 0x4070f00000000000 ; double 271
.quad 0x4071280000000000 ; double 274.5
.quad 0x40716c0000000000 ; double 278.75
.quad 0x4072040000000000 ; double 288.25
.quad 0x4072180000000000 ; double 289.5
.quad 0x4072280000000000 ; double 290.5
.quad 0x407222147ae147ae ; double 290.13
.quad 0x4072300000000000 ; double 291
.quad 0x4071c00000000000 ; double 284
.quad 0x40726a147ae147ae ; double 294.63
.quad 0x4072b00000000000 ; double 299
.quad 0x4072c40000000000 ; double 300.25
.quad 0x4072d00000000000 ; double 301
.quad 0x4072bc0000000000 ; double 299.75
.quad 0x4073600000000000 ; double 310
.quad 0x4073780000000000 ; double 311.5
.quad 0x4073a80000000000 ; double 314.5
.quad 0x4073fa147ae147ae ; double 319.63
.quad 0x4074500000000000 ; double 325
.quad 0x40745c0000000000 ; double 325.75
.quad 0x4074380000000000 ; double 323.5
.quad 0x4074500000000000 ; double 325
.quad 0x4074700000000000 ; double 327
.quad 0x4074680000000000 ; double 326.5
.quad 0x4074b00000000000 ; double 331
.quad 0x4075100000000000 ; double 337
.quad 0x4074a80000000000 ; double 330.5
.quad 0x4074400000000000 ; double 324
.quad 0x4074300000000000 ; double 323
.quad 0x4074380000000000 ; double 323.5
.quad 0x4074ac0000000000 ; double 330.75
.quad 0x4075100000000000 ; double 337
.quad 0x4075480000000000 ; double 340.5
.quad 0x4075280000000000 ; double 338.5
.quad 0x40751c0000000000 ; double 337.75
.quad 0x4074f00000000000 ; double 335
.quad 0x4075100000000000 ; double 337
.quad 0x4074800000000000 ; double 328
.quad 0x40742c0000000000 ; double 322.75
.quad 0x4074300000000000 ; double 323
.quad 0x40743c0000000000 ; double 323.75
.quad 0x4074440000000000 ; double 324.25
.quad 0x4074400000000000 ; double 324
.quad 0x40741c0000000000 ; double 321.75
.quad 0x4074900000000000 ; double 329
.quad 0x40748c0000000000 ; double 328.75
.quad 0x4074c00000000000 ; double 332
.quad 0x4074680000000000 ; double 326.5
.quad 0x4074000000000000 ; double 320
.quad 0x407492147ae147ae ; double 329.13
.quad 0x4074c00000000000 ; double 332
.quad 0x4074ec0000000000 ; double 334.75
.quad 0x4074f80000000000 ; double 335.5
.quad 0x4075500000000000 ; double 341
.quad 0x4075900000000000 ; double 345
.quad 0x4075900000000000 ; double 345
.quad 0x4075d00000000000 ; double 349
.quad 0x4075c80000000000 ; double 348.5
.quad 0x4076080000000000 ; double 352.5
.quad 0x4076280000000000 ; double 354.5
.quad 0x40762c0000000000 ; double 354.75
.quad 0x4075dc0000000000 ; double 349.75
.quad 0x4075ec0000000000 ; double 350.75
.quad 0x4076000000000000 ; double 352
.quad 0x4076300000000000 ; double 355
.quad 0x4076300000000000 ; double 355
.quad 0x4075f80000000000 ; double 351.5
.quad 0x4075f80000000000 ; double 351.5
.quad 0x4075940000000000 ; double 345.25
.quad 0x4075380000000000 ; double 339.5
.quad 0x4075ac0000000000 ; double 346.75
.quad 0x4075ac0000000000 ; double 346.75
.quad 0x4075700000000000 ; double 343
.quad 0x40758c0000000000 ; double 344.75
.quad 0x40758c0000000000 ; double 344.75
.quad 0x4075b40000000000 ; double 347.25
.quad 0x40759c0000000000 ; double 345.75
.quad 0x4076200000000000 ; double 354
.quad 0x4076700000000000 ; double 359
.quad 0x4076500000000000 ; double 357
.quad 0x4076480000000000 ; double 356.5
.quad 0x4076500000000000 ; double 357
.quad 0x407632147ae147ae ; double 355.13
.quad 0x4078300000000000 ; double 387
.quad 0x4077d80000000000 ; double 381.5
.quad 0x4077780000000000 ; double 375.5
.quad 0x4077800000000000 ; double 376
.quad 0x40771c0000000000 ; double 369.75
.quad 0x4077280000000000 ; double 370.5
.quad 0x4077180000000000 ; double 369.5
.quad 0x40770c0000000000 ; double 368.75
.quad 0x4077100000000000 ; double 369
.quad 0x4077040000000000 ; double 368.25
.quad 0x4076dc0000000000 ; double 365.75
.quad 0x4076c80000000000 ; double 364.5
.quad 0x4076840000000000 ; double 360.25
.quad 0x4076ac0000000000 ; double 362.75
.quad 0x4076980000000000 ; double 361.5
.quad 0x4076780000000000 ; double 359.5
.quad 0x4076800000000000 ; double 360
.quad 0x4076e00000000000 ; double 366
.quad 0x4076f00000000000 ; double 367
.quad 0x40768c0000000000 ; double 360.75
.quad 0x4076280000000000 ; double 354.5
.quad 0x4076600000000000 ; double 358
.quad 0x4076700000000000 ; double 359
.quad 0x4076840000000000 ; double 360.25
.quad 0x40764c0000000000 ; double 356.75
.quad 0x4076400000000000 ; double 356
.quad 0x4076980000000000 ; double 361.5
.quad 0x4076700000000000 ; double 359
.quad 0x4076800000000000 ; double 360
.quad 0x4076580000000000 ; double 357.5
.quad 0x4076700000000000 ; double 359
.quad 0x4076780000000000 ; double 359.5
.quad 0x4076900000000000 ; double 361
.quad 0x4076580000000000 ; double 357.5
.quad 0x4075f00000000000 ; double 351
.quad 0x4075fc0000000000 ; double 351.75
.quad 0x4075e00000000000 ; double 350
.quad 0x4076280000000000 ; double 354.5
.quad 0x4076540000000000 ; double 357.25
.quad 0x4076b80000000000 ; double 363.5
.quad 0x4076bc0000000000 ; double 363.75
.quad 0x4076bc0000000000 ; double 363.75
.quad 0x4076c80000000000 ; double 364.5
.quad 0x4076dc0000000000 ; double 365.75
.quad 0x4077080000000000 ; double 368.5
.quad 0x40770c0000000000 ; double 368.75
.quad 0x4077080000000000 ; double 368.5
.quad 0x4076d40000000000 ; double 365.25
.quad 0x4076940000000000 ; double 361.25
.quad 0x4076940000000000 ; double 361.25
.quad 0x4076880000000000 ; double 360.5
.quad 0x4076800000000000 ; double 360
.quad 0x4076a40000000000 ; double 362.25
.quad 0x4076780000000000 ; double 359.5
.quad 0x4076800000000000 ; double 360
.quad 0x4076b80000000000 ; double 363.5
.quad 0x4076d00000000000 ; double 365
.quad 0x4076d00000000000 ; double 365
.quad 0x4076480000000000 ; double 356.5
.quad 0x4076540000000000 ; double 357.25
.quad 0x40766c0000000000 ; double 358.75
.quad 0x4076100000000000 ; double 353
.quad 0x4076180000000000 ; double 353.5
.quad 0x4076400000000000 ; double 356
.quad 0x40763c0000000000 ; double 355.75
.quad 0x4076180000000000 ; double 353.5
.quad 0x4075bc0000000000 ; double 347.75
.quad 0x4075de147ae147ae ; double 349.88
.quad 0x4075bc0000000000 ; double 347.75
.quad 0x40759c0000000000 ; double 345.75
.quad 0x4075e80000000000 ; double 350.5
.quad 0x4075d00000000000 ; double 349
.quad 0x4075ac0000000000 ; double 346.75
.quad 0x4075980000000000 ; double 345.5
.quad 0x4075a00000000000 ; double 346
.quad 0x4075b00000000000 ; double 347
.quad 0x4075c40000000000 ; double 348.25
.quad 0x4075f00000000000 ; double 351
.quad 0x4076400000000000 ; double 356
.quad 0x4076580000000000 ; double 357.5
.quad 0x4076840000000000 ; double 360.25
.quad 0x4076700000000000 ; double 359
.quad 0x4076100000000000 ; double 353
.quad 0x4075d80000000000 ; double 349.5
.quad 0x4075b80000000000 ; double 347.5
.quad 0x4075bc0000000000 ; double 347.75
.quad 0x4075b40000000000 ; double 347.25
.quad 0x4075980000000000 ; double 345.5
.quad 0x4075a80000000000 ; double 346.5
.quad 0x4075d80000000000 ; double 349.5
.quad 0x4075280000000000 ; double 338.5
.quad 0x4075180000000000 ; double 337.5
.quad 0x4075480000000000 ; double 340.5
.quad 0x4075580000000000 ; double 341.5
.quad 0x4075940000000000 ; double 345.25
.quad 0x4075980000000000 ; double 345.5
.quad 0x4075de147ae147ae ; double 349.88
.quad 0x4075c00000000000 ; double 348
.quad 0x4075a00000000000 ; double 346
.quad 0x40755c0000000000 ; double 341.75
.quad 0x4075a80000000000 ; double 346.5
.quad 0x4075ec0000000000 ; double 350.75
.quad 0x4075d80000000000 ; double 349.5
.quad 0x4075ae147ae147ae ; double 346.88
.quad 0x4075840000000000 ; double 344.25
.quad 0x4075580000000000 ; double 341.5
.quad 0x4075380000000000 ; double 339.5
.quad 0x40752c0000000000 ; double 338.75
.quad 0x4074f00000000000 ; double 335
.quad 0x4074a80000000000 ; double 330.5
.quad 0x4074e40000000000 ; double 334.25
.quad 0x4075180000000000 ; double 337.5
.quad 0x4075100000000000 ; double 337
.quad 0x4075280000000000 ; double 338.5
.quad 0x4074e80000000000 ; double 334.5
.quad 0x4074e00000000000 ; double 334
.quad 0x4074bc0000000000 ; double 331.75
.quad 0x4074680000000000 ; double 326.5
.quad 0x4074000000000000 ; double 320
.quad 0x4074300000000000 ; double 323
.quad 0x4073c40000000000 ; double 316.25
.quad 0x4073e80000000000 ; double 318.5
.quad 0x4074180000000000 ; double 321.5
.quad 0x4074200000000000 ; double 322
.quad 0x4074500000000000 ; double 325
.quad 0x4074600000000000 ; double 326
.quad 0x4074c00000000000 ; double 332
.quad 0x4074880000000000 ; double 328.5
.quad 0x4074400000000000 ; double 324
.quad 0x4074c00000000000 ; double 332
.quad 0x4074d00000000000 ; double 333
.quad 0x4075000000000000 ; double 336
.quad 0x4074f80000000000 ; double 335.5
.quad 0x4075900000000000 ; double 345
.quad 0x4075f80000000000 ; double 351.5
.quad 0x4076c80000000000 ; double 364.5
.quad 0x4075c00000000000 ; double 348
.quad 0x4075340000000000 ; double 339.25
.quad 0x4075380000000000 ; double 339.5
.quad 0x4074e80000000000 ; double 334.5
.quad 0x40749c0000000000 ; double 329.75
.quad 0x4074580000000000 ; double 325.5
.quad 0x4074680000000000 ; double 326.5
.quad 0x4073b80000000000 ; double 315.5
.quad 0x4073f00000000000 ; double 319
.quad 0x4073dc0000000000 ; double 317.75
.quad 0x4073e00000000000 ; double 318
.quad 0x4073a40000000000 ; double 314.25
.quad 0x4073ac0000000000 ; double 314.75
.quad 0x4073940000000000 ; double 313.25
.quad 0x4073500000000000 ; double 309
.quad 0x4073700000000000 ; double 311
.quad 0x4073900000000000 ; double 313
.quad 0x4073a40000000000 ; double 314.25
.quad 0x4073e80000000000 ; double 318.5
.quad 0x4073f40000000000 ; double 319.25
.quad 0x4074080000000000 ; double 320.5
.quad 0x4074180000000000 ; double 321.5
.quad 0x4074000000000000 ; double 320
.quad 0x4074400000000000 ; double 324
.quad 0x4074280000000000 ; double 322.5
.quad 0x4074940000000000 ; double 329.25
.quad 0x4074900000000000 ; double 329
.quad 0x4074780000000000 ; double 327.5
.quad 0x4074400000000000 ; double 324
.quad 0x4074400000000000 ; double 324
.quad 0x40743c0000000000 ; double 323.75
.quad 0x4074900000000000 ; double 329
.quad 0x4074d00000000000 ; double 333
.quad 0x4074c80000000000 ; double 332.5
.quad 0x4074ee147ae147ae ; double 334.88
.quad 0x4074b80000000000 ; double 331.5
.quad 0x4074840000000000 ; double 328.25
.quad 0x40749c0000000000 ; double 329.75
.quad 0x4074b80000000000 ; double 331.5
.quad 0x4074c80000000000 ; double 332.5
.quad 0x4074bc0000000000 ; double 331.75
.quad 0x4074800000000000 ; double 328
.quad 0x4074780000000000 ; double 327.5
.quad 0x4074880000000000 ; double 328.5
.quad 0x4074a40000000000 ; double 330.25
.quad 0x4074880000000000 ; double 328.5
.quad 0x4074480000000000 ; double 324.5
.quad 0x4073e00000000000 ; double 318
.quad 0x4073c00000000000 ; double 316
.quad 0x4073980000000000 ; double 313.5
.quad 0x4073900000000000 ; double 313
.quad 0x40734c0000000000 ; double 308.75
.quad 0x40734c0000000000 ; double 308.75
.quad 0x40734c0000000000 ; double 308.75
.quad 0x4073740000000000 ; double 311.25
.quad 0x4073800000000000 ; double 312
.quad 0x4073800000000000 ; double 312
.quad 0x4073a80000000000 ; double 314.5
.quad 0x40739c0000000000 ; double 313.75
.quad 0x4073800000000000 ; double 312
.quad 0x4073580000000000 ; double 309.5
.quad 0x4073500000000000 ; double 309
.quad 0x4073700000000000 ; double 311
.quad 0x4073900000000000 ; double 313
.quad 0x4073c00000000000 ; double 316
.quad 0x4073b80000000000 ; double 315.5
.quad 0x4073780000000000 ; double 311.5
.quad 0x4073640000000000 ; double 310.25
.quad 0x4073580000000000 ; double 309.5
.quad 0x4073300000000000 ; double 307
.quad 0x4073180000000000 ; double 305.5
.quad 0x40731c0000000000 ; double 305.75
.quad 0x40730c0000000000 ; double 304.75
.quad 0x4072dc0000000000 ; double 301.75
.quad 0x4072f00000000000 ; double 303
.quad 0x40727c0000000000 ; double 295.75
.quad 0x4072c00000000000 ; double 300
.quad 0x4072f80000000000 ; double 303.5
.quad 0x40733c0000000000 ; double 307.75
.quad 0x4072f80000000000 ; double 303.5
.quad 0x4072bc0000000000 ; double 299.75
.quad 0x4072e80000000000 ; double 302.5
.quad 0x4072e80000000000 ; double 302.5
.quad 0x4072a80000000000 ; double 298.5
.quad 0x4072800000000000 ; double 296
.quad 0x4072940000000000 ; double 297.25
.quad 0x4072a00000000000 ; double 298
.quad 0x4072a80000000000 ; double 298.5
.quad 0x4072b80000000000 ; double 299.5
.quad 0x4072980000000000 ; double 297.5
.quad 0x4072880000000000 ; double 296.5
.quad 0x4072e00000000000 ; double 302
.quad 0x4072c00000000000 ; double 300
.quad 0x4073280000000000 ; double 306.5
.quad 0x4073680000000000 ; double 310.5
.quad 0x4073680000000000 ; double 310.5
.quad 0x40732c0000000000 ; double 306.75
.quad 0x4072f00000000000 ; double 303
.quad 0x4072a00000000000 ; double 298
.quad 0x4072b00000000000 ; double 299
.quad 0x40728c0000000000 ; double 296.75
.quad 0x4072900000000000 ; double 297
.quad 0x4072940000000000 ; double 297.25
.quad 0x4072980000000000 ; double 297.5
.quad 0x4072940000000000 ; double 297.25
.quad 0x4072c00000000000 ; double 300
.quad 0x4072c80000000000 ; double 300.5
.quad 0x4072c00000000000 ; double 300
.quad 0x4072c80000000000 ; double 300.5
.quad 0x4072f00000000000 ; double 303
.quad 0x4072fc0000000000 ; double 303.75
.quad 0x4072f00000000000 ; double 303
.quad 0x4072c00000000000 ; double 300
.quad 0x4073900000000000 ; double 313
.quad 0x40738c0000000000 ; double 312.75
.quad 0x4073580000000000 ; double 309.5
.quad 0x4073480000000000 ; double 308.5
.quad 0x4072fc0000000000 ; double 303.75
.quad 0x4073600000000000 ; double 310
.quad 0x4073700000000000 ; double 311
.quad 0x40732c0000000000 ; double 306.75
.quad 0x4072f80000000000 ; double 303.5
.quad 0x4073380000000000 ; double 307.5
.quad 0x4073800000000000 ; double 312
.quad 0x4073b40000000000 ; double 315.25
.quad 0x4073d00000000000 ; double 317
.quad 0x4073e00000000000 ; double 318
.quad 0x4074180000000000 ; double 321.5
.quad 0x4074400000000000 ; double 324
.quad 0x4074200000000000 ; double 322
.quad 0x40741e147ae147ae ; double 321.88
.quad 0x4074980000000000 ; double 329.5
.quad 0x4074600000000000 ; double 326
.quad 0x4073fc0000000000 ; double 319.75
.quad 0x4073e00000000000 ; double 318
.quad 0x4073fc0000000000 ; double 319.75
.quad 0x4074780000000000 ; double 327.5
.quad 0x4074900000000000 ; double 329
.quad 0x4074880000000000 ; double 328.5
.quad 0x4074840000000000 ; double 328.25
.quad 0x4074ac0000000000 ; double 330.75
.quad 0x4074a40000000000 ; double 330.25
.quad 0x4074ec0000000000 ; double 334.75
.quad 0x4074ec0000000000 ; double 334.75
.quad 0x4074e80000000000 ; double 334.5
.quad 0x4074b00000000000 ; double 331
.quad 0x4074800000000000 ; double 328
.quad 0x40747e147ae147ae ; double 327.88
.quad 0x4074680000000000 ; double 326.5
.quad 0x4074500000000000 ; double 325
.quad 0x40747c0000000000 ; double 327.75
.quad 0x4074740000000000 ; double 327.25
.quad 0x4074700000000000 ; double 327
.quad 0x4074400000000000 ; double 324
.quad 0x4074000000000000 ; double 320
.quad 0x4073c00000000000 ; double 316
.quad 0x4073d00000000000 ; double 317
.quad 0x4073d00000000000 ; double 317
.quad 0x4073d00000000000 ; double 317
.quad 0x40735c0000000000 ; double 309.75
.quad 0x4073380000000000 ; double 307.5
.quad 0x4073a80000000000 ; double 314.5
.quad 0x4073cc0000000000 ; double 316.75
.quad 0x4073d80000000000 ; double 317.5
.quad 0x4074380000000000 ; double 323.5
.quad 0x4074700000000000 ; double 327
.quad 0x4074980000000000 ; double 329.5
.quad 0x4074700000000000 ; double 327
.quad 0x4073f00000000000 ; double 319
.quad 0x4073e80000000000 ; double 318.5
.quad 0x40745c0000000000 ; double 325.75
.quad 0x40747c0000000000 ; double 327.75
.quad 0x4074d00000000000 ; double 333
.quad 0x4074b00000000000 ; double 331
.quad 0x4074c40000000000 ; double 332.25
.quad 0x4074c80000000000 ; double 332.5
.quad 0x40746c0000000000 ; double 326.75
.quad 0x4074a80000000000 ; double 330.5
.quad 0x4074f00000000000 ; double 335
.quad 0x4074f80000000000 ; double 335.5
.quad 0x4075100000000000 ; double 337
.quad 0x4075100000000000 ; double 337
.quad 0x4075100000000000 ; double 337
.quad 0x4074e00000000000 ; double 334
.quad 0x4075180000000000 ; double 337.5
.quad 0x40750c0000000000 ; double 336.75
.quad 0x4075180000000000 ; double 337.5
.quad 0x4075380000000000 ; double 339.5
.quad 0x4075200000000000 ; double 338
.quad 0x4075200000000000 ; double 338
.quad 0x4075500000000000 ; double 341
.quad 0x4075580000000000 ; double 341.5
.quad 0x4075580000000000 ; double 341.5
.quad 0x4075300000000000 ; double 339
.quad 0x4075080000000000 ; double 336.5
.quad 0x4075180000000000 ; double 337.5
.quad 0x4075280000000000 ; double 338.5
.quad 0x4075480000000000 ; double 340.5
.quad 0x4075480000000000 ; double 340.5
.quad 0x40757a147ae147ae ; double 343.63
.quad 0x4075a80000000000 ; double 346.5
.quad 0x40758c0000000000 ; double 344.75
.quad 0x4075900000000000 ; double 345
.quad 0x4075580000000000 ; double 341.5
.quad 0x40758c0000000000 ; double 344.75
.quad 0x4075780000000000 ; double 343.5
.quad 0x4075380000000000 ; double 339.5
.quad 0x4074f80000000000 ; double 335.5
.quad 0x4075500000000000 ; double 341
.quad 0x4075200000000000 ; double 338
.quad 0x4075080000000000 ; double 336.5
.quad 0x4075080000000000 ; double 336.5
.quad 0x4075800000000000 ; double 344
.quad 0x4075480000000000 ; double 340.5
.quad 0x4074c00000000000 ; double 332
.quad 0x4075100000000000 ; double 337
.quad 0x4075a00000000000 ; double 346
.quad 0x4075800000000000 ; double 344
.quad 0x4075ac0000000000 ; double 346.75
.quad 0x4076100000000000 ; double 353
.quad 0x4076600000000000 ; double 358
.quad 0x4076480000000000 ; double 356.5
.quad 0x4076880000000000 ; double 360.5
.quad 0x40766c0000000000 ; double 358.75
.quad 0x4076a80000000000 ; double 362.5
.quad 0x4076cc0000000000 ; double 364.75
.quad 0x4076d00000000000 ; double 365
.quad 0x4076600000000000 ; double 358
.quad 0x4076000000000000 ; double 352
.quad 0x4076400000000000 ; double 356
.quad 0x40767c0000000000 ; double 359.75
.quad 0x4076880000000000 ; double 360.5
.quad 0x4076500000000000 ; double 357
.quad 0x4076600000000000 ; double 358
.quad 0x40767c0000000000 ; double 359.75
.quad 0x4076680000000000 ; double 358.5
.quad 0x4075e00000000000 ; double 350
.quad 0x4076600000000000 ; double 358
.quad 0x4076700000000000 ; double 359
.quad 0x40773c0000000000 ; double 371.75
.quad 0x40773c0000000000 ; double 371.75
.quad 0x40773c0000000000 ; double 371.75
.quad 0x4077280000000000 ; double 370.5
.quad 0x4077180000000000 ; double 369.5
.quad 0x4077280000000000 ; double 370.5
.quad 0x4077700000000000 ; double 375
.quad 0x4077700000000000 ; double 375
.quad 0x4076500000000000 ; double 357
.quad 0x40759c0000000000 ; double 345.75
.quad 0x4074f80000000000 ; double 335.5
.quad 0x4074dc0000000000 ; double 333.75
.quad 0x4074980000000000 ; double 329.5
.quad 0x4074840000000000 ; double 328.25
.quad 0x4074e80000000000 ; double 334.5
.quad 0x4074a00000000000 ; double 330
.quad 0x40746c0000000000 ; double 326.75
.quad 0x4074840000000000 ; double 328.25
.quad 0x4074f00000000000 ; double 335
.quad 0x4074f40000000000 ; double 335.25
.quad 0x40750c0000000000 ; double 336.75
.quad 0x4075280000000000 ; double 338.5
.quad 0x4075700000000000 ; double 343
.quad 0x4075800000000000 ; double 344
.quad 0x40757e147ae147ae ; double 343.88
.quad 0x4075940000000000 ; double 345.25
.quad 0x4075940000000000 ; double 345.25
.quad 0x4075d80000000000 ; double 349.5
.quad 0x4075ec0000000000 ; double 350.75
.quad 0x4075740000000000 ; double 343.25
.quad 0x40752c0000000000 ; double 338.75
.quad 0x4074e80000000000 ; double 334.5
.quad 0x4074b00000000000 ; double 331
.quad 0x40749c0000000000 ; double 329.75
.quad 0x4074b80000000000 ; double 331.5
.quad 0x4074780000000000 ; double 327.5
.quad 0x4083f80000000000 ; double 639
.quad 0x4084060000000000 ; double 640.75
.quad 0x4084200000000000 ; double 644
.quad 0x4084240000000000 ; double 644.5
.quad 0x4084380000000000 ; double 647
.quad 0x40844f0a3d70a3d7 ; double 649.88
.quad 0x40844c0000000000 ; double 649.5
.quad 0x4084400000000000 ; double 648
.quad 0x4084180000000000 ; double 643
.quad 0x4083f40000000000 ; double 638.5
.quad 0x4083f40000000000 ; double 638.5
.quad 0x4084400000000000 ; double 648
.quad 0x4083ec0000000000 ; double 637.5
.quad 0x4083c00000000000 ; double 632
.quad 0x4083280000000000 ; double 613
.quad 0x4082e40000000000 ; double 604.5
.quad 0x4082be0000000000 ; double 599.75
.quad 0x4082a00000000000 ; double 596
.quad 0x4082680000000000 ; double 589
.quad 0x4082980000000000 ; double 595
.quad 0x4082640000000000 ; double 588.5
.quad 0x4082680000000000 ; double 589
.quad 0x4082800000000000 ; double 592
.quad 0x4082b80000000000 ; double 599
.quad 0x4082880000000000 ; double 593
.quad 0x4082510a3d70a3d7 ; double 586.13
.quad 0x4082940000000000 ; double 594.5
.quad 0x4082a80000000000 ; double 597
.quad 0x40828e0000000000 ; double 593.75
.quad 0x4082300000000000 ; double 582
.quad 0x4082700000000000 ; double 590
.quad 0x4082700000000000 ; double 590
.quad 0x4081dc0000000000 ; double 571.5
.quad 0x4081ee0000000000 ; double 573.75
.quad 0x4082200000000000 ; double 580
.quad 0x4082500000000000 ; double 586
.quad 0x4082a40000000000 ; double 596.5
.quad 0x4082900000000000 ; double 594
.quad 0x4082740000000000 ; double 590.5
.quad 0x4082aa0000000000 ; double 597.25
.quad 0x4082c00000000000 ; double 600
.quad 0x4082980000000000 ; double 595
.quad 0x4082600000000000 ; double 588
.quad 0x4082180000000000 ; double 579
.quad 0x4082840000000000 ; double 592.5
.quad 0x4082500000000000 ; double 586
.quad 0x4082080000000000 ; double 577
.quad 0x4081fc0000000000 ; double 575.5
.quad 0x40827c0000000000 ; double 591.5
.quad 0x4082840000000000 ; double 592.5
.quad 0x40827c0000000000 ; double 591.5
.quad 0x4082940000000000 ; double 594.5
.quad 0x4082d00000000000 ; double 602
.quad 0x4082be0000000000 ; double 599.75
.quad 0x4082f00000000000 ; double 606
.quad 0x4083240000000000 ; double 612.5
.quad 0x4083600000000000 ; double 620
.quad 0x4083660000000000 ; double 620.75
.quad 0x4083540000000000 ; double 618.5
.quad 0x40837c0000000000 ; double 623.5
.quad 0x4083900000000000 ; double 626
.quad 0x4083580000000000 ; double 619
.quad 0x4083800000000000 ; double 624
.quad 0x4083800000000000 ; double 624
.quad 0x4083440000000000 ; double 616.5
.quad 0x4083860000000000 ; double 624.75
.quad 0x4083900000000000 ; double 626
.quad 0x4083640000000000 ; double 620.5
.quad 0x4083300000000000 ; double 614
.quad 0x4082e40000000000 ; double 604.5
.quad 0x4082d80000000000 ; double 603
.quad 0x4082b00000000000 ; double 598
.quad 0x4082bc0000000000 ; double 599.5
.quad 0x4082ce0000000000 ; double 601.75
.quad 0x4083400000000000 ; double 616
.quad 0x40839a0000000000 ; double 627.25
.quad 0x40839e0000000000 ; double 627.75
.quad 0x4083960000000000 ; double 626.75
.quad 0x4083940000000000 ; double 626.5
.quad 0x4083580000000000 ; double 619
.quad 0x4083700000000000 ; double 622
.quad 0x4083880000000000 ; double 625
.quad 0x4083900000000000 ; double 626
.quad 0x4083780000000000 ; double 623
.quad 0x4083c40000000000 ; double 632.5
.quad 0x4084300000000000 ; double 646
.quad 0x4084400000000000 ; double 648
.quad 0x4084240000000000 ; double 644.5
.quad 0x4084380000000000 ; double 647
.quad 0x4084380000000000 ; double 647
.quad 0x4084400000000000 ; double 648
.quad 0x4084180000000000 ; double 643
.quad 0x4083c80000000000 ; double 633
.quad 0x4083cc0000000000 ; double 633.5
.quad 0x4083b00000000000 ; double 630
.quad 0x4083400000000000 ; double 616
.quad 0x40833e0000000000 ; double 615.75
.subsections_via_symbols
| the_stack_data/3898.c | stack |
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 4, 0x90 ## -- Begin function ses_elmdevname_callback
_ses_elmdevname_callback: ## @ses_elmdevname_callback
.cfi_startproc
## %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
movq %rdx, %rdi
movq %rcx, %rsi
popq %rbp
jmp _cam_periph_list ## TAILCALL
.cfi_endproc
## -- End function
.no_dead_strip _ses_elmdevname_callback
.subsections_via_symbols
| .section __TEXT,__text,regular,pure_instructions
.build_version macos, 13, 0 sdk_version 13, 3
.p2align 2 ; -- Begin function ses_elmdevname_callback
_ses_elmdevname_callback: ; @ses_elmdevname_callback
.cfi_startproc
; %bb.0:
mov x1, x3
mov x0, x2
b _cam_periph_list
.cfi_endproc
; -- End function
.no_dead_strip _ses_elmdevname_callback
.subsections_via_symbols
| AnghaBench/freebsd/sys/cam/scsi/extr_scsi_enc_ses.c_ses_elmdevname_callback.c | anghabench |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.